aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-driver-ib_srp25
-rw-r--r--Documentation/scsi/libsas.txt82
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt20
-rw-r--r--Documentation/scsi/wd719x.txt21
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/m68k/atari/config.c27
-rw-r--r--arch/m68k/atari/stdma.c61
-rw-r--r--arch/m68k/include/asm/atari_stdma.h4
-rw-r--r--arch/m68k/include/asm/macintosh.h4
-rw-r--r--arch/m68k/mac/config.c146
-rw-r--r--arch/m68k/sun3/config.c60
-rw-r--r--drivers/ata/libata-scsi.c17
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1076
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h75
-rw-r--r--drivers/message/fusion/mptscsih.c12
-rw-r--r--drivers/message/fusion/mptscsih.h3
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c62
-rw-r--r--drivers/s390/scsi/zfcp_aux.c6
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c32
-rw-r--r--drivers/s390/scsi/zfcp_def.h3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c52
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c24
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c66
-rw-r--r--drivers/scsi/3w-9xxx.c15
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/3w-xxxx.c15
-rw-r--r--drivers/scsi/53c700.c19
-rw-r--r--drivers/scsi/BusLogic.c4
-rw-r--r--drivers/scsi/Kconfig18
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR5380.c295
-rw-r--r--drivers/scsi/NCR5380.h78
-rw-r--r--drivers/scsi/aacraid/linit.c18
-rw-r--r--drivers/scsi/advansys.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c13
-rw-r--r--drivers/scsi/am53c974.c586
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/arm/cumana_1.c18
-rw-r--r--drivers/scsi/arm/oak.c23
-rw-r--r--drivers/scsi/atari_NCR5380.c981
-rw-r--r--drivers/scsi/atari_scsi.c673
-rw-r--r--drivers/scsi/atari_scsi.h60
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c30
-rw-r--r--drivers/scsi/bfa/bfad_im.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/dmx3191d.c31
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/dtc.c85
-rw-r--r--drivers/scsi/dtc.h26
-rw-r--r--drivers/scsi/eata.c6
-rw-r--r--drivers/scsi/esas2r/esas2r.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c22
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c11
-rw-r--r--drivers/scsi/esp_scsi.c411
-rw-r--r--drivers/scsi/esp_scsi.h22
-rw-r--r--drivers/scsi/fcoe/fcoe.c3
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c5
-rw-r--r--drivers/scsi/fnic/fnic_main.c18
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c32
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/g_NCR5380.c224
-rw-r--r--drivers/scsi/g_NCR5380.h26
-rw-r--r--drivers/scsi/hpsa.c509
-rw-r--r--drivers/scsi/hpsa.h33
-rw-r--r--drivers/scsi/hpsa_cmd.h34
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c10
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c10
-rw-r--r--drivers/scsi/ipr.c116
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/isci/task.c147
-rw-r--r--drivers/scsi/isci/task.h1
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/libfc/fc_fcp.c27
-rw-r--r--drivers/scsi/libiscsi.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c9
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_init.c21
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c203
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c113
-rw-r--r--drivers/scsi/mac_scsi.c552
-rw-r--r--drivers/scsi/mac_scsi.h74
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c23
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h23
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c163
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c112
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h18
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c46
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c48
-rw-r--r--drivers/scsi/mvsas/mv_init.c23
-rw-r--r--drivers/scsi/mvsas/mv_sas.c109
-rw-r--r--drivers/scsi/mvsas/mv_sas.h10
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/pas16.c106
-rw-r--r--drivers/scsi/pas16.h21
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c22
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pmcraid.c12
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c58
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c18
-rw-r--r--drivers/scsi/scsi.c41
-rw-r--r--drivers/scsi/scsi_debug.c1982
-rw-r--r--drivers/scsi/scsi_error.c15
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c5
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sun3_NCR5380.c2932
-rw-r--r--drivers/scsi/sun3_scsi.c512
-rw-r--r--drivers/scsi/sun3_scsi.h84
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/t128.c83
-rw-r--r--drivers/scsi/t128.h23
-rw-r--r--drivers/scsi/tmscsim.c2626
-rw-r--r--drivers/scsi/tmscsim.h551
-rw-r--r--drivers/scsi/u14-34f.c10
-rw-r--r--drivers/scsi/ufs/ufshcd.c25
-rw-r--r--drivers/scsi/virtio_scsi.c38
-rw-r--r--drivers/scsi/vmw_pvscsi.c32
-rw-r--r--drivers/scsi/wd7000.c1
-rw-r--r--drivers/scsi/wd719x.c996
-rw-r--r--drivers/scsi/wd719x.h249
-rw-r--r--drivers/target/loopback/tcm_loop.c28
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--include/linux/eeprom_93cx6.h4
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/scsi/libfc.h1
-rw-r--r--include/scsi/libiscsi.h2
-rw-r--r--include/scsi/libsas.h17
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_host.h15
154 files changed, 7004 insertions, 11816 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp
index b9688de8455b..7049a2b50359 100644
--- a/Documentation/ABI/stable/sysfs-driver-ib_srp
+++ b/Documentation/ABI/stable/sysfs-driver-ib_srp
@@ -55,12 +55,12 @@ Description: Interface for making ib_srp connect to a new target.
55 only safe with partial memory descriptor list support enabled 55 only safe with partial memory descriptor list support enabled
56 (allow_ext_sg=1). 56 (allow_ext_sg=1).
57 * comp_vector, a number in the range 0..n-1 specifying the 57 * comp_vector, a number in the range 0..n-1 specifying the
58 MSI-X completion vector. Some HCA's allocate multiple (n) 58 MSI-X completion vector of the first RDMA channel. Some
59 MSI-X vectors per HCA port. If the IRQ affinity masks of 59 HCA's allocate multiple (n) MSI-X vectors per HCA port. If
60 these interrupts have been configured such that each MSI-X 60 the IRQ affinity masks of these interrupts have been
61 interrupt is handled by a different CPU then the comp_vector 61 configured such that each MSI-X interrupt is handled by a
62 parameter can be used to spread the SRP completion workload 62 different CPU then the comp_vector parameter can be used to
63 over multiple CPU's. 63 spread the SRP completion workload over multiple CPU's.
64 * tl_retry_count, a number in the range 2..7 specifying the 64 * tl_retry_count, a number in the range 2..7 specifying the
65 IB RC retry count. 65 IB RC retry count.
66 * queue_size, the maximum number of commands that the 66 * queue_size, the maximum number of commands that the
@@ -88,6 +88,13 @@ Description: Whether ib_srp is allowed to include a partial memory
88 descriptor list in an SRP_CMD when communicating with an SRP 88 descriptor list in an SRP_CMD when communicating with an SRP
89 target. 89 target.
90 90
91What: /sys/class/scsi_host/host<n>/ch_count
92Date: April 1, 2015
93KernelVersion: 3.19
94Contact: linux-rdma@vger.kernel.org
95Description: Number of RDMA channels used for communication with the SRP
96 target.
97
91What: /sys/class/scsi_host/host<n>/cmd_sg_entries 98What: /sys/class/scsi_host/host<n>/cmd_sg_entries
92Date: May 19, 2011 99Date: May 19, 2011
93KernelVersion: 2.6.39 100KernelVersion: 2.6.39
@@ -95,6 +102,12 @@ Contact: linux-rdma@vger.kernel.org
95Description: Maximum number of data buffer descriptors that may be sent to 102Description: Maximum number of data buffer descriptors that may be sent to
96 the target in a single SRP_CMD request. 103 the target in a single SRP_CMD request.
97 104
105What: /sys/class/scsi_host/host<n>/comp_vector
106Date: September 2, 2013
107KernelVersion: 3.11
108Contact: linux-rdma@vger.kernel.org
109Description: Completion vector used for the first RDMA channel.
110
98What: /sys/class/scsi_host/host<n>/dgid 111What: /sys/class/scsi_host/host<n>/dgid
99Date: June 17, 2006 112Date: June 17, 2006
100KernelVersion: 2.6.17 113KernelVersion: 2.6.17
diff --git a/Documentation/scsi/libsas.txt b/Documentation/scsi/libsas.txt
index 3cc9c7843e15..8cac6492aade 100644
--- a/Documentation/scsi/libsas.txt
+++ b/Documentation/scsi/libsas.txt
@@ -226,9 +226,6 @@ static int register_sas_ha(struct my_sas_ha *my_ha)
226 my_ha->sas_ha.lldd_dev_found = my_dev_found; 226 my_ha->sas_ha.lldd_dev_found = my_dev_found;
227 my_ha->sas_ha.lldd_dev_gone = my_dev_gone; 227 my_ha->sas_ha.lldd_dev_gone = my_dev_gone;
228 228
229 my_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num; (1)
230
231 my_ha->sas_ha.lldd_queue_size = ha_can_queue;
232 my_ha->sas_ha.lldd_execute_task = my_execute_task; 229 my_ha->sas_ha.lldd_execute_task = my_execute_task;
233 230
234 my_ha->sas_ha.lldd_abort_task = my_abort_task; 231 my_ha->sas_ha.lldd_abort_task = my_abort_task;
@@ -247,28 +244,6 @@ static int register_sas_ha(struct my_sas_ha *my_ha)
247 return sas_register_ha(&my_ha->sas_ha); 244 return sas_register_ha(&my_ha->sas_ha);
248} 245}
249 246
250(1) This is normally a LLDD parameter, something of the
251lines of a task collector. What it tells the SAS Layer is
252whether the SAS layer should run in Direct Mode (default:
253value 0 or 1) or Task Collector Mode (value greater than 1).
254
255In Direct Mode, the SAS Layer calls Execute Task as soon as
256it has a command to send to the SDS, _and_ this is a single
257command, i.e. not linked.
258
259Some hardware (e.g. aic94xx) has the capability to DMA more
260than one task at a time (interrupt) from host memory. Task
261Collector Mode is an optional feature for HAs which support
262this in their hardware. (Again, it is completely optional
263even if your hardware supports it.)
264
265In Task Collector Mode, the SAS Layer would do _natural_
266coalescing of tasks and at the appropriate moment it would
267call your driver to DMA more than one task in a single HA
268interrupt. DMBS may want to use this by insmod/modprobe
269setting the lldd_max_execute_num to something greater than
2701.
271
272(2) SAS 1.1 does not define I_T Nexus Reset TMF. 247(2) SAS 1.1 does not define I_T Nexus Reset TMF.
273 248
274Events 249Events
@@ -325,71 +300,22 @@ PHYE_SPINUP_HOLD -- SATA is present, COMWAKE not sent.
325 300
326The Execute Command SCSI RPC: 301The Execute Command SCSI RPC:
327 302
328 int (*lldd_execute_task)(struct sas_task *, int num, 303 int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags);
329 unsigned long gfp_flags);
330 304
331Used to queue a task to the SAS LLDD. @task is the tasks to 305Used to queue a task to the SAS LLDD. @task is the task to be executed.
332be executed. @num should be the number of tasks being 306@gfp_mask is the gfp_mask defining the context of the caller.
333queued at this function call (they are linked listed via
334task::list), @gfp_mask should be the gfp_mask defining the
335context of the caller.
336 307
337This function should implement the Execute Command SCSI RPC, 308This function should implement the Execute Command SCSI RPC,
338or if you're sending a SCSI Task as linked commands, you
339should also use this function.
340 309
341That is, when lldd_execute_task() is called, the command(s) 310That is, when lldd_execute_task() is called, the command
342go out on the transport *immediately*. There is *no* 311go out on the transport *immediately*. There is *no*
343queuing of any sort and at any level in a SAS LLDD. 312queuing of any sort and at any level in a SAS LLDD.
344 313
345The use of task::list is two-fold, one for linked commands,
346the other discussed below.
347
348It is possible to queue up more than one task at a time, by
349initializing the list element of struct sas_task, and
350passing the number of tasks enlisted in this manner in num.
351
352Returns: -SAS_QUEUE_FULL, -ENOMEM, nothing was queued; 314Returns: -SAS_QUEUE_FULL, -ENOMEM, nothing was queued;
353 0, the task(s) were queued. 315 0, the task(s) were queued.
354 316
355If you want to pass num > 1, then either
356A) you're the only caller of this function and keep track
357 of what you've queued to the LLDD, or
358B) you know what you're doing and have a strategy of
359 retrying.
360
361As opposed to queuing one task at a time (function call),
362batch queuing of tasks, by having num > 1, greatly
363simplifies LLDD code, sequencer code, and _hardware design_,
364and has some performance advantages in certain situations
365(DBMS).
366
367The LLDD advertises if it can take more than one command at
368a time at lldd_execute_task(), by setting the
369lldd_max_execute_num parameter (controlled by "collector"
370module parameter in aic94xx SAS LLDD).
371
372You should leave this to the default 1, unless you know what
373you're doing.
374
375This is a function of the LLDD, to which the SAS layer can
376cater to.
377
378int lldd_queue_size
379 The host adapter's queue size. This is the maximum
380number of commands the lldd can have pending to domain
381devices on behalf of all upper layers submitting through
382lldd_execute_task().
383
384You really want to set this to something (much) larger than
3851.
386
387This _really_ has absolutely nothing to do with queuing.
388There is no queuing in SAS LLDDs.
389
390struct sas_task { 317struct sas_task {
391 dev -- the device this task is destined to 318 dev -- the device this task is destined to
392 list -- must be initialized (INIT_LIST_HEAD)
393 task_proto -- _one_ of enum sas_proto 319 task_proto -- _one_ of enum sas_proto
394 scatter -- pointer to scatter gather list array 320 scatter -- pointer to scatter gather list array
395 num_scatter -- number of elements in scatter 321 num_scatter -- number of elements in scatter
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index bee7d86b9dcc..731bc4f4c5e6 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -149,7 +149,7 @@ scsi_add_host() ---->
149scsi_scan_host() -------+ 149scsi_scan_host() -------+
150 | 150 |
151 slave_alloc() 151 slave_alloc()
152 slave_configure() --> scsi_adjust_queue_depth() 152 slave_configure() --> scsi_change_queue_depth()
153 | 153 |
154 slave_alloc() 154 slave_alloc()
155 slave_configure() 155 slave_configure()
@@ -159,7 +159,7 @@ scsi_scan_host() -------+
159------------------------------------------------------------ 159------------------------------------------------------------
160 160
161If the LLD wants to adjust the default queue settings, it can invoke 161If the LLD wants to adjust the default queue settings, it can invoke
162scsi_adjust_queue_depth() in its slave_configure() routine. 162scsi_change_queue_depth() in its slave_configure() routine.
163 163
164*** For scsi devices that the mid level tries to scan but do not 164*** For scsi devices that the mid level tries to scan but do not
165 respond, a slave_alloc(), slave_destroy() pair is called. 165 respond, a slave_alloc(), slave_destroy() pair is called.
@@ -203,7 +203,7 @@ LLD mid level LLD
203scsi_add_device() ------+ 203scsi_add_device() ------+
204 | 204 |
205 slave_alloc() 205 slave_alloc()
206 slave_configure() [--> scsi_adjust_queue_depth()] 206 slave_configure() [--> scsi_change_queue_depth()]
207------------------------------------------------------------ 207------------------------------------------------------------
208 208
209In a similar fashion, an LLD may become aware that a SCSI device has been 209In a similar fashion, an LLD may become aware that a SCSI device has been
@@ -261,7 +261,7 @@ init_this_scsi_driver() ----+
261 | scsi_register() 261 | scsi_register()
262 | 262 |
263 slave_alloc() 263 slave_alloc()
264 slave_configure() --> scsi_adjust_queue_depth() 264 slave_configure() --> scsi_change_queue_depth()
265 slave_alloc() *** 265 slave_alloc() ***
266 slave_destroy() *** 266 slave_destroy() ***
267 | 267 |
@@ -271,7 +271,7 @@ init_this_scsi_driver() ----+
271 slave_destroy() *** 271 slave_destroy() ***
272------------------------------------------------------------ 272------------------------------------------------------------
273 273
274The mid level invokes scsi_adjust_queue_depth() with "cmd_per_lun" for that 274The mid level invokes scsi_change_queue_depth() with "cmd_per_lun" for that
275host as the queue length. These settings can be overridden by a 275host as the queue length. These settings can be overridden by a
276slave_configure() supplied by the LLD. 276slave_configure() supplied by the LLD.
277 277
@@ -368,7 +368,7 @@ names all start with "scsi_".
368Summary: 368Summary:
369 scsi_add_device - creates new scsi device (lu) instance 369 scsi_add_device - creates new scsi device (lu) instance
370 scsi_add_host - perform sysfs registration and set up transport class 370 scsi_add_host - perform sysfs registration and set up transport class
371 scsi_adjust_queue_depth - change the queue depth on a SCSI device 371 scsi_change_queue_depth - change the queue depth on a SCSI device
372 scsi_bios_ptable - return copy of block device's partition table 372 scsi_bios_ptable - return copy of block device's partition table
373 scsi_block_requests - prevent further commands being queued to given host 373 scsi_block_requests - prevent further commands being queued to given host
374 scsi_host_alloc - return a new scsi_host instance whose refcount==1 374 scsi_host_alloc - return a new scsi_host instance whose refcount==1
@@ -436,7 +436,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
436 436
437 437
438/** 438/**
439 * scsi_adjust_queue_depth - allow LLD to change queue depth on a SCSI device 439 * scsi_change_queue_depth - allow LLD to change queue depth on a SCSI device
440 * @sdev: pointer to SCSI device to change queue depth on 440 * @sdev: pointer to SCSI device to change queue depth on
441 * @tags Number of tags allowed if tagged queuing enabled, 441 * @tags Number of tags allowed if tagged queuing enabled,
442 * or number of commands the LLD can queue up 442 * or number of commands the LLD can queue up
@@ -453,7 +453,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
453 * Defined in: drivers/scsi/scsi.c [see source code for more notes] 453 * Defined in: drivers/scsi/scsi.c [see source code for more notes]
454 * 454 *
455 **/ 455 **/
456void scsi_adjust_queue_depth(struct scsi_device *sdev, int tags) 456int scsi_change_queue_depth(struct scsi_device *sdev, int tags)
457 457
458 458
459/** 459/**
@@ -1214,7 +1214,7 @@ of interest:
1214 for disk firmware uploads. 1214 for disk firmware uploads.
1215 cmd_per_lun - maximum number of commands that can be queued on devices 1215 cmd_per_lun - maximum number of commands that can be queued on devices
1216 controlled by the host. Overridden by LLD calls to 1216 controlled by the host. Overridden by LLD calls to
1217 scsi_adjust_queue_depth(). 1217 scsi_change_queue_depth().
1218 unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing 1218 unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing
1219 restriction), 0=>can use full 32 bit (or better) DMA 1219 restriction), 0=>can use full 32 bit (or better) DMA
1220 address space 1220 address space
@@ -1254,7 +1254,7 @@ struct scsi_cmnd
1254Instances of this structure convey SCSI commands to the LLD and responses 1254Instances of this structure convey SCSI commands to the LLD and responses
1255back to the mid level. The SCSI mid level will ensure that no more SCSI 1255back to the mid level. The SCSI mid level will ensure that no more SCSI
1256commands become queued against the LLD than are indicated by 1256commands become queued against the LLD than are indicated by
1257scsi_adjust_queue_depth() (or struct Scsi_Host::cmd_per_lun). There will 1257scsi_change_queue_depth() (or struct Scsi_Host::cmd_per_lun). There will
1258be at least one instance of struct scsi_cmnd available for each SCSI device. 1258be at least one instance of struct scsi_cmnd available for each SCSI device.
1259Members of interest: 1259Members of interest:
1260 cmnd - array containing SCSI command 1260 cmnd - array containing SCSI command
diff --git a/Documentation/scsi/wd719x.txt b/Documentation/scsi/wd719x.txt
new file mode 100644
index 000000000000..0816b0220238
--- /dev/null
+++ b/Documentation/scsi/wd719x.txt
@@ -0,0 +1,21 @@
1Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards
2---------------------------------------------------------------
3
4The card requires firmware that can be cut out of the Windows NT driver that
5can be downloaded from WD at:
6http://support.wdc.com/product/download.asp?groupid=801&sid=27&lang=en
7
8There is no license anywhere in the file or on the page - so the firmware
9probably cannot be added to linux-firmware.
10
11This script downloads and extracts the firmware, creating wd719x-risc.bin and
12d719x-wcs.bin files. Put them in /lib/firmware/.
13
14#!/bin/sh
15wget http://support.wdc.com/download/archive/pciscsi.exe
16lha xi pciscsi.exe pci-scsi.exe
17lha xi pci-scsi.exe nt/wd7296a.sys
18rm pci-scsi.exe
19dd if=wd7296a.sys of=wd719x-risc.bin bs=1 skip=5760 count=14336
20dd if=wd7296a.sys of=wd719x-wcs.bin bs=1 skip=20096 count=514
21rm wd7296a.sys
diff --git a/MAINTAINERS b/MAINTAINERS
index 0ff630de8a6d..7a95dc1a8de6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2862,11 +2862,10 @@ F: Documentation/networking/dmfe.txt
2862F: drivers/net/ethernet/dec/tulip/dmfe.c 2862F: drivers/net/ethernet/dec/tulip/dmfe.c
2863 2863
2864DC390/AM53C974 SCSI driver 2864DC390/AM53C974 SCSI driver
2865M: Kurt Garloff <garloff@suse.de> 2865M: Hannes Reinecke <hare@suse.de>
2866W: http://www.garloff.de/kurt/linux/dc390/ 2866L: linux-scsi@vger.kernel.org
2867M: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
2868S: Maintained 2867S: Maintained
2869F: drivers/scsi/tmscsim.* 2868F: drivers/scsi/am53c974.c
2870 2869
2871DC395x SCSI driver 2870DC395x SCSI driver
2872M: Oliver Neukum <oliver@neukum.org> 2871M: Oliver Neukum <oliver@neukum.org>
@@ -5991,10 +5990,13 @@ W: http://linuxtv.org
5991S: Odd Fixes 5990S: Odd Fixes
5992F: drivers/media/parport/pms* 5991F: drivers/media/parport/pms*
5993 5992
5994MEGARAID SCSI DRIVERS 5993MEGARAID SCSI/SAS DRIVERS
5995M: Neela Syam Kolli <megaraidlinux@lsi.com> 5994M: Kashyap Desai <kashyap.desai@avagotech.com>
5995M: Sumit Saxena <sumit.saxena@avagotech.com>
5996M: Uday Lingala <uday.lingala@avagotech.com>
5997L: megaraidlinux.pdl@avagotech.com
5996L: linux-scsi@vger.kernel.org 5998L: linux-scsi@vger.kernel.org
5997W: http://megaraid.lsilogic.com 5999W: http://www.lsi.com
5998S: Maintained 6000S: Maintained
5999F: Documentation/scsi/megaraid.txt 6001F: Documentation/scsi/megaraid.txt
6000F: drivers/scsi/megaraid.* 6002F: drivers/scsi/megaraid.*
@@ -6305,7 +6307,6 @@ F: drivers/scsi/g_NCR5380.*
6305F: drivers/scsi/g_NCR5380_mmio.c 6307F: drivers/scsi/g_NCR5380_mmio.c
6306F: drivers/scsi/mac_scsi.* 6308F: drivers/scsi/mac_scsi.*
6307F: drivers/scsi/pas16.* 6309F: drivers/scsi/pas16.*
6308F: drivers/scsi/sun3_NCR5380.c
6309F: drivers/scsi/sun3_scsi.* 6310F: drivers/scsi/sun3_scsi.*
6310F: drivers/scsi/sun3_scsi_vme.c 6311F: drivers/scsi/sun3_scsi_vme.c
6311F: drivers/scsi/t128.* 6312F: drivers/scsi/t128.*
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 01a62161b08a..192b00f098f4 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -858,6 +858,24 @@ static struct platform_device *atari_netusbee_devices[] __initdata = {
858}; 858};
859#endif /* CONFIG_ATARI_ETHERNEC */ 859#endif /* CONFIG_ATARI_ETHERNEC */
860 860
861#ifdef CONFIG_ATARI_SCSI
862static const struct resource atari_scsi_st_rsrc[] __initconst = {
863 {
864 .flags = IORESOURCE_IRQ,
865 .start = IRQ_MFP_FSCSI,
866 .end = IRQ_MFP_FSCSI,
867 },
868};
869
870static const struct resource atari_scsi_tt_rsrc[] __initconst = {
871 {
872 .flags = IORESOURCE_IRQ,
873 .start = IRQ_TT_MFP_SCSI,
874 .end = IRQ_TT_MFP_SCSI,
875 },
876};
877#endif
878
861int __init atari_platform_init(void) 879int __init atari_platform_init(void)
862{ 880{
863 int rv = 0; 881 int rv = 0;
@@ -892,6 +910,15 @@ int __init atari_platform_init(void)
892 } 910 }
893#endif 911#endif
894 912
913#ifdef CONFIG_ATARI_SCSI
914 if (ATARIHW_PRESENT(ST_SCSI))
915 platform_device_register_simple("atari_scsi", -1,
916 atari_scsi_st_rsrc, ARRAY_SIZE(atari_scsi_st_rsrc));
917 else if (ATARIHW_PRESENT(TT_SCSI))
918 platform_device_register_simple("atari_scsi", -1,
919 atari_scsi_tt_rsrc, ARRAY_SIZE(atari_scsi_tt_rsrc));
920#endif
921
895 return rv; 922 return rv;
896} 923}
897 924
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index ddbf43ca8858..e5a66596b116 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -59,6 +59,31 @@ static irqreturn_t stdma_int (int irq, void *dummy);
59/************************* End of Prototypes **************************/ 59/************************* End of Prototypes **************************/
60 60
61 61
62/**
63 * stdma_try_lock - attempt to acquire ST DMA interrupt "lock"
64 * @handler: interrupt handler to use after acquisition
65 *
66 * Returns !0 if lock was acquired; otherwise 0.
67 */
68
69int stdma_try_lock(irq_handler_t handler, void *data)
70{
71 unsigned long flags;
72
73 local_irq_save(flags);
74 if (stdma_locked) {
75 local_irq_restore(flags);
76 return 0;
77 }
78
79 stdma_locked = 1;
80 stdma_isr = handler;
81 stdma_isr_data = data;
82 local_irq_restore(flags);
83 return 1;
84}
85EXPORT_SYMBOL(stdma_try_lock);
86
62 87
63/* 88/*
64 * Function: void stdma_lock( isrfunc isr, void *data ) 89 * Function: void stdma_lock( isrfunc isr, void *data )
@@ -78,19 +103,10 @@ static irqreturn_t stdma_int (int irq, void *dummy);
78 103
79void stdma_lock(irq_handler_t handler, void *data) 104void stdma_lock(irq_handler_t handler, void *data)
80{ 105{
81 unsigned long flags;
82
83 local_irq_save(flags); /* protect lock */
84
85 /* Since the DMA is used for file system purposes, we 106 /* Since the DMA is used for file system purposes, we
86 have to sleep uninterruptible (there may be locked 107 have to sleep uninterruptible (there may be locked
87 buffers) */ 108 buffers) */
88 wait_event(stdma_wait, !stdma_locked); 109 wait_event(stdma_wait, stdma_try_lock(handler, data));
89
90 stdma_locked = 1;
91 stdma_isr = handler;
92 stdma_isr_data = data;
93 local_irq_restore(flags);
94} 110}
95EXPORT_SYMBOL(stdma_lock); 111EXPORT_SYMBOL(stdma_lock);
96 112
@@ -122,22 +138,25 @@ void stdma_release(void)
122EXPORT_SYMBOL(stdma_release); 138EXPORT_SYMBOL(stdma_release);
123 139
124 140
125/* 141/**
126 * Function: int stdma_others_waiting( void ) 142 * stdma_is_locked_by - allow lock holder to check whether it needs to release.
127 * 143 * @handler: interrupt handler previously used to acquire lock.
128 * Purpose: Check if someone waits for the ST-DMA lock.
129 *
130 * Inputs: none
131 *
132 * Returns: 0 if no one is waiting, != 0 otherwise
133 * 144 *
145 * Returns !0 if locked for the given handler; 0 otherwise.
134 */ 146 */
135 147
136int stdma_others_waiting(void) 148int stdma_is_locked_by(irq_handler_t handler)
137{ 149{
138 return waitqueue_active(&stdma_wait); 150 unsigned long flags;
151 int result;
152
153 local_irq_save(flags);
154 result = stdma_locked && (stdma_isr == handler);
155 local_irq_restore(flags);
156
157 return result;
139} 158}
140EXPORT_SYMBOL(stdma_others_waiting); 159EXPORT_SYMBOL(stdma_is_locked_by);
141 160
142 161
143/* 162/*
diff --git a/arch/m68k/include/asm/atari_stdma.h b/arch/m68k/include/asm/atari_stdma.h
index 8e389b7fa70c..d24e34d870dc 100644
--- a/arch/m68k/include/asm/atari_stdma.h
+++ b/arch/m68k/include/asm/atari_stdma.h
@@ -8,11 +8,11 @@
8 8
9/***************************** Prototypes *****************************/ 9/***************************** Prototypes *****************************/
10 10
11int stdma_try_lock(irq_handler_t, void *);
11void stdma_lock(irq_handler_t handler, void *data); 12void stdma_lock(irq_handler_t handler, void *data);
12void stdma_release( void ); 13void stdma_release( void );
13int stdma_others_waiting( void );
14int stdma_islocked( void ); 14int stdma_islocked( void );
15void *stdma_locked_by( void ); 15int stdma_is_locked_by(irq_handler_t);
16void stdma_init( void ); 16void stdma_init( void );
17 17
18/************************* End of Prototypes **************************/ 18/************************* End of Prototypes **************************/
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index d323b2c2d07d..29c7c6c3a5f2 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -53,6 +53,10 @@ struct mac_model
53#define MAC_SCSI_QUADRA 2 53#define MAC_SCSI_QUADRA 2
54#define MAC_SCSI_QUADRA2 3 54#define MAC_SCSI_QUADRA2 3
55#define MAC_SCSI_QUADRA3 4 55#define MAC_SCSI_QUADRA3 4
56#define MAC_SCSI_IIFX 5
57#define MAC_SCSI_DUO 6
58#define MAC_SCSI_CCL 7
59#define MAC_SCSI_LATE 8
56 60
57#define MAC_IDE_NONE 0 61#define MAC_IDE_NONE 0
58#define MAC_IDE_QUADRA 1 62#define MAC_IDE_QUADRA 1
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index a471eab1a4dd..e9c3756139fc 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -278,7 +278,7 @@ static struct mac_model mac_data_table[] = {
278 .name = "IIfx", 278 .name = "IIfx",
279 .adb_type = MAC_ADB_IOP, 279 .adb_type = MAC_ADB_IOP,
280 .via_type = MAC_VIA_IICI, 280 .via_type = MAC_VIA_IICI,
281 .scsi_type = MAC_SCSI_OLD, 281 .scsi_type = MAC_SCSI_IIFX,
282 .scc_type = MAC_SCC_IOP, 282 .scc_type = MAC_SCC_IOP,
283 .nubus_type = MAC_NUBUS, 283 .nubus_type = MAC_NUBUS,
284 .floppy_type = MAC_FLOPPY_SWIM_IOP, 284 .floppy_type = MAC_FLOPPY_SWIM_IOP,
@@ -329,7 +329,7 @@ static struct mac_model mac_data_table[] = {
329 .name = "Color Classic", 329 .name = "Color Classic",
330 .adb_type = MAC_ADB_CUDA, 330 .adb_type = MAC_ADB_CUDA,
331 .via_type = MAC_VIA_IICI, 331 .via_type = MAC_VIA_IICI,
332 .scsi_type = MAC_SCSI_OLD, 332 .scsi_type = MAC_SCSI_CCL,
333 .scc_type = MAC_SCC_II, 333 .scc_type = MAC_SCC_II,
334 .nubus_type = MAC_NUBUS, 334 .nubus_type = MAC_NUBUS,
335 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 335 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -338,7 +338,7 @@ static struct mac_model mac_data_table[] = {
338 .name = "Color Classic II", 338 .name = "Color Classic II",
339 .adb_type = MAC_ADB_CUDA, 339 .adb_type = MAC_ADB_CUDA,
340 .via_type = MAC_VIA_IICI, 340 .via_type = MAC_VIA_IICI,
341 .scsi_type = MAC_SCSI_OLD, 341 .scsi_type = MAC_SCSI_CCL,
342 .scc_type = MAC_SCC_II, 342 .scc_type = MAC_SCC_II,
343 .nubus_type = MAC_NUBUS, 343 .nubus_type = MAC_NUBUS,
344 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 344 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -526,7 +526,7 @@ static struct mac_model mac_data_table[] = {
526 .name = "Performa 520", 526 .name = "Performa 520",
527 .adb_type = MAC_ADB_CUDA, 527 .adb_type = MAC_ADB_CUDA,
528 .via_type = MAC_VIA_IICI, 528 .via_type = MAC_VIA_IICI,
529 .scsi_type = MAC_SCSI_OLD, 529 .scsi_type = MAC_SCSI_CCL,
530 .scc_type = MAC_SCC_II, 530 .scc_type = MAC_SCC_II,
531 .nubus_type = MAC_NUBUS, 531 .nubus_type = MAC_NUBUS,
532 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 532 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -535,7 +535,7 @@ static struct mac_model mac_data_table[] = {
535 .name = "Performa 550", 535 .name = "Performa 550",
536 .adb_type = MAC_ADB_CUDA, 536 .adb_type = MAC_ADB_CUDA,
537 .via_type = MAC_VIA_IICI, 537 .via_type = MAC_VIA_IICI,
538 .scsi_type = MAC_SCSI_OLD, 538 .scsi_type = MAC_SCSI_CCL,
539 .scc_type = MAC_SCC_II, 539 .scc_type = MAC_SCC_II,
540 .nubus_type = MAC_NUBUS, 540 .nubus_type = MAC_NUBUS,
541 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 541 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -567,7 +567,7 @@ static struct mac_model mac_data_table[] = {
567 .name = "TV", 567 .name = "TV",
568 .adb_type = MAC_ADB_CUDA, 568 .adb_type = MAC_ADB_CUDA,
569 .via_type = MAC_VIA_IICI, 569 .via_type = MAC_VIA_IICI,
570 .scsi_type = MAC_SCSI_OLD, 570 .scsi_type = MAC_SCSI_CCL,
571 .scc_type = MAC_SCC_II, 571 .scc_type = MAC_SCC_II,
572 .nubus_type = MAC_NUBUS, 572 .nubus_type = MAC_NUBUS,
573 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 573 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -712,7 +712,7 @@ static struct mac_model mac_data_table[] = {
712 .name = "PowerBook 190", 712 .name = "PowerBook 190",
713 .adb_type = MAC_ADB_PB2, 713 .adb_type = MAC_ADB_PB2,
714 .via_type = MAC_VIA_QUADRA, 714 .via_type = MAC_VIA_QUADRA,
715 .scsi_type = MAC_SCSI_OLD, 715 .scsi_type = MAC_SCSI_LATE,
716 .ide_type = MAC_IDE_BABOON, 716 .ide_type = MAC_IDE_BABOON,
717 .scc_type = MAC_SCC_QUADRA, 717 .scc_type = MAC_SCC_QUADRA,
718 .nubus_type = MAC_NUBUS, 718 .nubus_type = MAC_NUBUS,
@@ -722,7 +722,7 @@ static struct mac_model mac_data_table[] = {
722 .name = "PowerBook 520", 722 .name = "PowerBook 520",
723 .adb_type = MAC_ADB_PB2, 723 .adb_type = MAC_ADB_PB2,
724 .via_type = MAC_VIA_QUADRA, 724 .via_type = MAC_VIA_QUADRA,
725 .scsi_type = MAC_SCSI_OLD, 725 .scsi_type = MAC_SCSI_LATE,
726 .scc_type = MAC_SCC_QUADRA, 726 .scc_type = MAC_SCC_QUADRA,
727 .ether_type = MAC_ETHER_SONIC, 727 .ether_type = MAC_ETHER_SONIC,
728 .nubus_type = MAC_NUBUS, 728 .nubus_type = MAC_NUBUS,
@@ -740,7 +740,7 @@ static struct mac_model mac_data_table[] = {
740 .name = "PowerBook Duo 210", 740 .name = "PowerBook Duo 210",
741 .adb_type = MAC_ADB_PB2, 741 .adb_type = MAC_ADB_PB2,
742 .via_type = MAC_VIA_IICI, 742 .via_type = MAC_VIA_IICI,
743 .scsi_type = MAC_SCSI_OLD, 743 .scsi_type = MAC_SCSI_DUO,
744 .scc_type = MAC_SCC_QUADRA, 744 .scc_type = MAC_SCC_QUADRA,
745 .nubus_type = MAC_NUBUS, 745 .nubus_type = MAC_NUBUS,
746 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 746 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -749,7 +749,7 @@ static struct mac_model mac_data_table[] = {
749 .name = "PowerBook Duo 230", 749 .name = "PowerBook Duo 230",
750 .adb_type = MAC_ADB_PB2, 750 .adb_type = MAC_ADB_PB2,
751 .via_type = MAC_VIA_IICI, 751 .via_type = MAC_VIA_IICI,
752 .scsi_type = MAC_SCSI_OLD, 752 .scsi_type = MAC_SCSI_DUO,
753 .scc_type = MAC_SCC_QUADRA, 753 .scc_type = MAC_SCC_QUADRA,
754 .nubus_type = MAC_NUBUS, 754 .nubus_type = MAC_NUBUS,
755 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 755 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -758,7 +758,7 @@ static struct mac_model mac_data_table[] = {
758 .name = "PowerBook Duo 250", 758 .name = "PowerBook Duo 250",
759 .adb_type = MAC_ADB_PB2, 759 .adb_type = MAC_ADB_PB2,
760 .via_type = MAC_VIA_IICI, 760 .via_type = MAC_VIA_IICI,
761 .scsi_type = MAC_SCSI_OLD, 761 .scsi_type = MAC_SCSI_DUO,
762 .scc_type = MAC_SCC_QUADRA, 762 .scc_type = MAC_SCC_QUADRA,
763 .nubus_type = MAC_NUBUS, 763 .nubus_type = MAC_NUBUS,
764 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 764 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -767,7 +767,7 @@ static struct mac_model mac_data_table[] = {
767 .name = "PowerBook Duo 270c", 767 .name = "PowerBook Duo 270c",
768 .adb_type = MAC_ADB_PB2, 768 .adb_type = MAC_ADB_PB2,
769 .via_type = MAC_VIA_IICI, 769 .via_type = MAC_VIA_IICI,
770 .scsi_type = MAC_SCSI_OLD, 770 .scsi_type = MAC_SCSI_DUO,
771 .scc_type = MAC_SCC_QUADRA, 771 .scc_type = MAC_SCC_QUADRA,
772 .nubus_type = MAC_NUBUS, 772 .nubus_type = MAC_NUBUS,
773 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 773 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -776,7 +776,7 @@ static struct mac_model mac_data_table[] = {
776 .name = "PowerBook Duo 280", 776 .name = "PowerBook Duo 280",
777 .adb_type = MAC_ADB_PB2, 777 .adb_type = MAC_ADB_PB2,
778 .via_type = MAC_VIA_IICI, 778 .via_type = MAC_VIA_IICI,
779 .scsi_type = MAC_SCSI_OLD, 779 .scsi_type = MAC_SCSI_DUO,
780 .scc_type = MAC_SCC_QUADRA, 780 .scc_type = MAC_SCC_QUADRA,
781 .nubus_type = MAC_NUBUS, 781 .nubus_type = MAC_NUBUS,
782 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 782 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -785,7 +785,7 @@ static struct mac_model mac_data_table[] = {
785 .name = "PowerBook Duo 280c", 785 .name = "PowerBook Duo 280c",
786 .adb_type = MAC_ADB_PB2, 786 .adb_type = MAC_ADB_PB2,
787 .via_type = MAC_VIA_IICI, 787 .via_type = MAC_VIA_IICI,
788 .scsi_type = MAC_SCSI_OLD, 788 .scsi_type = MAC_SCSI_DUO,
789 .scc_type = MAC_SCC_QUADRA, 789 .scc_type = MAC_SCC_QUADRA,
790 .nubus_type = MAC_NUBUS, 790 .nubus_type = MAC_NUBUS,
791 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 791 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -929,6 +929,70 @@ static struct platform_device swim_pdev = {
929 .resource = &swim_rsrc, 929 .resource = &swim_rsrc,
930}; 930};
931 931
932static const struct resource mac_scsi_iifx_rsrc[] __initconst = {
933 {
934 .flags = IORESOURCE_IRQ,
935 .start = IRQ_MAC_SCSI,
936 .end = IRQ_MAC_SCSI,
937 }, {
938 .flags = IORESOURCE_MEM,
939 .start = 0x50008000,
940 .end = 0x50009FFF,
941 },
942};
943
944static const struct resource mac_scsi_duo_rsrc[] __initconst = {
945 {
946 .flags = IORESOURCE_MEM,
947 .start = 0xFEE02000,
948 .end = 0xFEE03FFF,
949 },
950};
951
952static const struct resource mac_scsi_old_rsrc[] __initconst = {
953 {
954 .flags = IORESOURCE_IRQ,
955 .start = IRQ_MAC_SCSI,
956 .end = IRQ_MAC_SCSI,
957 }, {
958 .flags = IORESOURCE_MEM,
959 .start = 0x50010000,
960 .end = 0x50011FFF,
961 }, {
962 .flags = IORESOURCE_MEM,
963 .start = 0x50006000,
964 .end = 0x50007FFF,
965 },
966};
967
968static const struct resource mac_scsi_late_rsrc[] __initconst = {
969 {
970 .flags = IORESOURCE_IRQ,
971 .start = IRQ_MAC_SCSI,
972 .end = IRQ_MAC_SCSI,
973 }, {
974 .flags = IORESOURCE_MEM,
975 .start = 0x50010000,
976 .end = 0x50011FFF,
977 },
978};
979
980static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
981 {
982 .flags = IORESOURCE_IRQ,
983 .start = IRQ_MAC_SCSI,
984 .end = IRQ_MAC_SCSI,
985 }, {
986 .flags = IORESOURCE_MEM,
987 .start = 0x50F10000,
988 .end = 0x50F11FFF,
989 }, {
990 .flags = IORESOURCE_MEM,
991 .start = 0x50F06000,
992 .end = 0x50F07FFF,
993 },
994};
995
932static struct platform_device esp_0_pdev = { 996static struct platform_device esp_0_pdev = {
933 .name = "mac_esp", 997 .name = "mac_esp",
934 .id = 0, 998 .id = 0,
@@ -1000,6 +1064,60 @@ int __init mac_platform_init(void)
1000 (macintosh_config->ident == MAC_MODEL_Q950)) 1064 (macintosh_config->ident == MAC_MODEL_Q950))
1001 platform_device_register(&esp_1_pdev); 1065 platform_device_register(&esp_1_pdev);
1002 break; 1066 break;
1067 case MAC_SCSI_IIFX:
1068 /* Addresses from The Guide to Mac Family Hardware.
1069 * $5000 8000 - $5000 9FFF: SCSI DMA
1070 * $5000 C000 - $5000 DFFF: Alternate SCSI (DMA)
1071 * $5000 E000 - $5000 FFFF: Alternate SCSI (Hsk)
1072 * The SCSI DMA custom IC embeds the 53C80 core. mac_scsi does
1073 * not make use of its DMA or hardware handshaking logic.
1074 */
1075 platform_device_register_simple("mac_scsi", 0,
1076 mac_scsi_iifx_rsrc, ARRAY_SIZE(mac_scsi_iifx_rsrc));
1077 break;
1078 case MAC_SCSI_DUO:
1079 /* Addresses from the Duo Dock II Developer Note.
1080 * $FEE0 2000 - $FEE0 3FFF: normal mode
1081 * $FEE0 4000 - $FEE0 5FFF: pseudo DMA without /DRQ
1082 * $FEE0 6000 - $FEE0 7FFF: pseudo DMA with /DRQ
1083 * The NetBSD code indicates that both 5380 chips share
1084 * an IRQ (?) which would need careful handling (see mac_esp).
1085 */
1086 platform_device_register_simple("mac_scsi", 1,
1087 mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc));
1088 /* fall through */
1089 case MAC_SCSI_OLD:
1090 /* Addresses from Developer Notes for Duo System,
1091 * PowerBook 180 & 160, 140 & 170, Macintosh IIsi
1092 * and also from The Guide to Mac Family Hardware for
1093 * SE/30, II, IIx, IIcx, IIci.
1094 * $5000 6000 - $5000 7FFF: pseudo-DMA with /DRQ
1095 * $5001 0000 - $5001 1FFF: normal mode
1096 * $5001 2000 - $5001 3FFF: pseudo-DMA without /DRQ
1097 * GMFH says that $5000 0000 - $50FF FFFF "wraps
1098 * $5000 0000 - $5001 FFFF eight times" (!)
1099 * mess.org says IIci and Color Classic do not alias
1100 * I/O address space.
1101 */
1102 platform_device_register_simple("mac_scsi", 0,
1103 mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
1104 break;
1105 case MAC_SCSI_LATE:
1106 /* PDMA logic in 68040 PowerBooks is somehow different to
1107 * '030 models. It's probably more like Quadras (see mac_esp).
1108 */
1109 platform_device_register_simple("mac_scsi", 0,
1110 mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
1111 break;
1112 case MAC_SCSI_CCL:
1113 /* Addresses from the Color Classic Developer Note.
1114 * $50F0 6000 - $50F0 7FFF: SCSI handshake
1115 * $50F1 0000 - $50F1 1FFF: SCSI
1116 * $50F1 2000 - $50F1 3FFF: SCSI DMA
1117 */
1118 platform_device_register_simple("mac_scsi", 0,
1119 mac_scsi_ccl_rsrc, ARRAY_SIZE(mac_scsi_ccl_rsrc));
1120 break;
1003 } 1121 }
1004 1122
1005 /* 1123 /*
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index f59ec58083f8..a8b942bf7163 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -16,6 +16,7 @@
16#include <linux/console.h> 16#include <linux/console.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/platform_device.h>
19 20
20#include <asm/oplib.h> 21#include <asm/oplib.h>
21#include <asm/setup.h> 22#include <asm/setup.h>
@@ -27,6 +28,7 @@
27#include <asm/sun3mmu.h> 28#include <asm/sun3mmu.h>
28#include <asm/rtc.h> 29#include <asm/rtc.h>
29#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/machines.h>
30#include <asm/idprom.h> 32#include <asm/idprom.h>
31#include <asm/intersil.h> 33#include <asm/intersil.h>
32#include <asm/irq.h> 34#include <asm/irq.h>
@@ -169,3 +171,61 @@ static void __init sun3_sched_init(irq_handler_t timer_routine)
169 intersil_clear(); 171 intersil_clear();
170} 172}
171 173
174#ifdef CONFIG_SUN3_SCSI
175
176static const struct resource sun3_scsi_vme_rsrc[] __initconst = {
177 {
178 .flags = IORESOURCE_IRQ,
179 .start = SUN3_VEC_VMESCSI0,
180 .end = SUN3_VEC_VMESCSI0,
181 }, {
182 .flags = IORESOURCE_MEM,
183 .start = 0xff200000,
184 .end = 0xff200021,
185 }, {
186 .flags = IORESOURCE_IRQ,
187 .start = SUN3_VEC_VMESCSI1,
188 .end = SUN3_VEC_VMESCSI1,
189 }, {
190 .flags = IORESOURCE_MEM,
191 .start = 0xff204000,
192 .end = 0xff204021,
193 },
194};
195
196/*
197 * Int: level 2 autovector
198 * IO: type 1, base 0x00140000, 5 bits phys space: A<4..0>
199 */
200static const struct resource sun3_scsi_rsrc[] __initconst = {
201 {
202 .flags = IORESOURCE_IRQ,
203 .start = 2,
204 .end = 2,
205 }, {
206 .flags = IORESOURCE_MEM,
207 .start = 0x00140000,
208 .end = 0x0014001f,
209 },
210};
211
212int __init sun3_platform_init(void)
213{
214 switch (idprom->id_machtype) {
215 case SM_SUN3 | SM_3_160:
216 case SM_SUN3 | SM_3_260:
217 platform_device_register_simple("sun3_scsi_vme", -1,
218 sun3_scsi_vme_rsrc, ARRAY_SIZE(sun3_scsi_vme_rsrc));
219 break;
220 case SM_SUN3 | SM_3_50:
221 case SM_SUN3 | SM_3_60:
222 platform_device_register_simple("sun3_scsi", -1,
223 sun3_scsi_rsrc, ARRAY_SIZE(sun3_scsi_rsrc));
224 break;
225 }
226 return 0;
227}
228
229arch_initcall(sun3_platform_init);
230
231#endif
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index fc6a60abe518..dd45c6a03e5d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1164,7 +1164,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1164 1164
1165 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 1165 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1166 depth = min(ATA_MAX_QUEUE - 1, depth); 1166 depth = min(ATA_MAX_QUEUE - 1, depth);
1167 scsi_adjust_queue_depth(sdev, depth); 1167 scsi_change_queue_depth(sdev, depth);
1168 } 1168 }
1169 1169
1170 blk_queue_flush_queueable(q, false); 1170 blk_queue_flush_queueable(q, false);
@@ -1243,21 +1243,17 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
1243 * @ap: ATA port to which the device change the queue depth 1243 * @ap: ATA port to which the device change the queue depth
1244 * @sdev: SCSI device to configure queue depth for 1244 * @sdev: SCSI device to configure queue depth for
1245 * @queue_depth: new queue depth 1245 * @queue_depth: new queue depth
1246 * @reason: calling context
1247 * 1246 *
1248 * libsas and libata have different approaches for associating a sdev to 1247 * libsas and libata have different approaches for associating a sdev to
1249 * its ata_port. 1248 * its ata_port.
1250 * 1249 *
1251 */ 1250 */
1252int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, 1251int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1253 int queue_depth, int reason) 1252 int queue_depth)
1254{ 1253{
1255 struct ata_device *dev; 1254 struct ata_device *dev;
1256 unsigned long flags; 1255 unsigned long flags;
1257 1256
1258 if (reason != SCSI_QDEPTH_DEFAULT)
1259 return -EOPNOTSUPP;
1260
1261 if (queue_depth < 1 || queue_depth == sdev->queue_depth) 1257 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1262 return sdev->queue_depth; 1258 return sdev->queue_depth;
1263 1259
@@ -1282,15 +1278,13 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1282 if (sdev->queue_depth == queue_depth) 1278 if (sdev->queue_depth == queue_depth)
1283 return -EINVAL; 1279 return -EINVAL;
1284 1280
1285 scsi_adjust_queue_depth(sdev, queue_depth); 1281 return scsi_change_queue_depth(sdev, queue_depth);
1286 return queue_depth;
1287} 1282}
1288 1283
1289/** 1284/**
1290 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 1285 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
1291 * @sdev: SCSI device to configure queue depth for 1286 * @sdev: SCSI device to configure queue depth for
1292 * @queue_depth: new queue depth 1287 * @queue_depth: new queue depth
1293 * @reason: calling context
1294 * 1288 *
1295 * This is libata standard hostt->change_queue_depth callback. 1289 * This is libata standard hostt->change_queue_depth callback.
1296 * SCSI will call into this callback when user tries to set queue 1290 * SCSI will call into this callback when user tries to set queue
@@ -1302,12 +1296,11 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1302 * RETURNS: 1296 * RETURNS:
1303 * Newly configured queue depth. 1297 * Newly configured queue depth.
1304 */ 1298 */
1305int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, 1299int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1306 int reason)
1307{ 1300{
1308 struct ata_port *ap = ata_shost_to_port(sdev->host); 1301 struct ata_port *ap = ata_shost_to_port(sdev->host);
1309 1302
1310 return __ata_change_queue_depth(ap, sdev, queue_depth, reason); 1303 return __ata_change_queue_depth(ap, sdev, queue_depth);
1311} 1304}
1312 1305
1313/** 1306/**
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index cdf99fac139a..1db6f5ce5e89 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1951,7 +1951,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
1951 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 1951 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1952 1952
1953 if (strncmp(model_num, "Maxtor", 6) == 0) { 1953 if (strncmp(model_num, "Maxtor", 6) == 0) {
1954 ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT); 1954 ata_scsi_change_queue_depth(sdev, 1);
1955 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n", 1955 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1956 sdev->queue_depth); 1956 sdev->queue_depth);
1957 } 1957 }
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index f42ab14105ac..20ca6a619476 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -911,7 +911,7 @@ static struct scsi_host_template iscsi_iser_sht = {
911 .module = THIS_MODULE, 911 .module = THIS_MODULE,
912 .name = "iSCSI Initiator over iSER", 912 .name = "iSCSI Initiator over iSER",
913 .queuecommand = iscsi_queuecommand, 913 .queuecommand = iscsi_queuecommand,
914 .change_queue_depth = iscsi_change_queue_depth, 914 .change_queue_depth = scsi_change_queue_depth,
915 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, 915 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
916 .max_sectors = 1024, 916 .max_sectors = 1024,
917 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 917 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
@@ -922,6 +922,7 @@ static struct scsi_host_template iscsi_iser_sht = {
922 .use_clustering = DISABLE_CLUSTERING, 922 .use_clustering = DISABLE_CLUSTERING,
923 .proc_name = "iscsi_iser", 923 .proc_name = "iscsi_iser",
924 .this_id = -1, 924 .this_id = -1,
925 .track_queue_depth = 1,
925}; 926};
926 927
927static struct iscsi_transport iscsi_iser_transport = { 928static struct iscsi_transport iscsi_iser_transport = {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 023a66f5ca14..5461924c9f10 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -123,10 +123,15 @@ MODULE_PARM_DESC(dev_loss_tmo,
123 " if fast_io_fail_tmo has not been set. \"off\" means that" 123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled."); 124 " this functionality is disabled.");
125 125
126static unsigned ch_count;
127module_param(ch_count, uint, 0444);
128MODULE_PARM_DESC(ch_count,
129 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
130
126static void srp_add_one(struct ib_device *device); 131static void srp_add_one(struct ib_device *device);
127static void srp_remove_one(struct ib_device *device); 132static void srp_remove_one(struct ib_device *device);
128static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); 133static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
129static void srp_send_completion(struct ib_cq *cq, void *target_ptr); 134static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
130static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 135static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
131 136
132static struct scsi_transport_template *ib_srp_transport_template; 137static struct scsi_transport_template *ib_srp_transport_template;
@@ -262,7 +267,7 @@ static int srp_init_qp(struct srp_target_port *target,
262 267
263 ret = ib_find_pkey(target->srp_host->srp_dev->dev, 268 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
264 target->srp_host->port, 269 target->srp_host->port,
265 be16_to_cpu(target->path.pkey), 270 be16_to_cpu(target->pkey),
266 &attr->pkey_index); 271 &attr->pkey_index);
267 if (ret) 272 if (ret)
268 goto out; 273 goto out;
@@ -283,18 +288,23 @@ out:
283 return ret; 288 return ret;
284} 289}
285 290
286static int srp_new_cm_id(struct srp_target_port *target) 291static int srp_new_cm_id(struct srp_rdma_ch *ch)
287{ 292{
293 struct srp_target_port *target = ch->target;
288 struct ib_cm_id *new_cm_id; 294 struct ib_cm_id *new_cm_id;
289 295
290 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
291 srp_cm_handler, target); 297 srp_cm_handler, ch);
292 if (IS_ERR(new_cm_id)) 298 if (IS_ERR(new_cm_id))
293 return PTR_ERR(new_cm_id); 299 return PTR_ERR(new_cm_id);
294 300
295 if (target->cm_id) 301 if (ch->cm_id)
296 ib_destroy_cm_id(target->cm_id); 302 ib_destroy_cm_id(ch->cm_id);
297 target->cm_id = new_cm_id; 303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
298 308
299 return 0; 309 return 0;
300} 310}
@@ -443,8 +453,44 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
443 dev->max_pages_per_mr); 453 dev->max_pages_per_mr);
444} 454}
445 455
446static int srp_create_target_ib(struct srp_target_port *target) 456/**
457 * srp_destroy_qp() - destroy an RDMA queue pair
458 * @ch: SRP RDMA channel.
459 *
460 * Change a queue pair into the error state and wait until all receive
461 * completions have been processed before destroying it. This avoids that
462 * the receive completion handler can access the queue pair while it is
463 * being destroyed.
464 */
465static void srp_destroy_qp(struct srp_rdma_ch *ch)
447{ 466{
467 struct srp_target_port *target = ch->target;
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(target->connected);
475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
492{
493 struct srp_target_port *target = ch->target;
448 struct srp_device *dev = target->srp_host->srp_dev; 494 struct srp_device *dev = target->srp_host->srp_dev;
449 struct ib_qp_init_attr *init_attr; 495 struct ib_qp_init_attr *init_attr;
450 struct ib_cq *recv_cq, *send_cq; 496 struct ib_cq *recv_cq, *send_cq;
@@ -458,15 +504,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
458 if (!init_attr) 504 if (!init_attr)
459 return -ENOMEM; 505 return -ENOMEM;
460 506
461 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target, 507 /* + 1 for SRP_LAST_WR_ID */
462 target->queue_size, target->comp_vector); 508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
509 target->queue_size + 1, ch->comp_vector);
463 if (IS_ERR(recv_cq)) { 510 if (IS_ERR(recv_cq)) {
464 ret = PTR_ERR(recv_cq); 511 ret = PTR_ERR(recv_cq);
465 goto err; 512 goto err;
466 } 513 }
467 514
468 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target, 515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
469 m * target->queue_size, target->comp_vector); 516 m * target->queue_size, ch->comp_vector);
470 if (IS_ERR(send_cq)) { 517 if (IS_ERR(send_cq)) {
471 ret = PTR_ERR(send_cq); 518 ret = PTR_ERR(send_cq);
472 goto err_recv_cq; 519 goto err_recv_cq;
@@ -476,7 +523,7 @@ static int srp_create_target_ib(struct srp_target_port *target)
476 523
477 init_attr->event_handler = srp_qp_event; 524 init_attr->event_handler = srp_qp_event;
478 init_attr->cap.max_send_wr = m * target->queue_size; 525 init_attr->cap.max_send_wr = m * target->queue_size;
479 init_attr->cap.max_recv_wr = target->queue_size; 526 init_attr->cap.max_recv_wr = target->queue_size + 1;
480 init_attr->cap.max_recv_sge = 1; 527 init_attr->cap.max_recv_sge = 1;
481 init_attr->cap.max_send_sge = 1; 528 init_attr->cap.max_send_sge = 1;
482 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -502,9 +549,9 @@ static int srp_create_target_ib(struct srp_target_port *target)
502 "FR pool allocation failed (%d)\n", ret); 549 "FR pool allocation failed (%d)\n", ret);
503 goto err_qp; 550 goto err_qp;
504 } 551 }
505 if (target->fr_pool) 552 if (ch->fr_pool)
506 srp_destroy_fr_pool(target->fr_pool); 553 srp_destroy_fr_pool(ch->fr_pool);
507 target->fr_pool = fr_pool; 554 ch->fr_pool = fr_pool;
508 } else if (!dev->use_fast_reg && dev->has_fmr) { 555 } else if (!dev->use_fast_reg && dev->has_fmr) {
509 fmr_pool = srp_alloc_fmr_pool(target); 556 fmr_pool = srp_alloc_fmr_pool(target);
510 if (IS_ERR(fmr_pool)) { 557 if (IS_ERR(fmr_pool)) {
@@ -513,21 +560,21 @@ static int srp_create_target_ib(struct srp_target_port *target)
513 "FMR pool allocation failed (%d)\n", ret); 560 "FMR pool allocation failed (%d)\n", ret);
514 goto err_qp; 561 goto err_qp;
515 } 562 }
516 if (target->fmr_pool) 563 if (ch->fmr_pool)
517 ib_destroy_fmr_pool(target->fmr_pool); 564 ib_destroy_fmr_pool(ch->fmr_pool);
518 target->fmr_pool = fmr_pool; 565 ch->fmr_pool = fmr_pool;
519 } 566 }
520 567
521 if (target->qp) 568 if (ch->qp)
522 ib_destroy_qp(target->qp); 569 srp_destroy_qp(ch);
523 if (target->recv_cq) 570 if (ch->recv_cq)
524 ib_destroy_cq(target->recv_cq); 571 ib_destroy_cq(ch->recv_cq);
525 if (target->send_cq) 572 if (ch->send_cq)
526 ib_destroy_cq(target->send_cq); 573 ib_destroy_cq(ch->send_cq);
527 574
528 target->qp = qp; 575 ch->qp = qp;
529 target->recv_cq = recv_cq; 576 ch->recv_cq = recv_cq;
530 target->send_cq = send_cq; 577 ch->send_cq = send_cq;
531 578
532 kfree(init_attr); 579 kfree(init_attr);
533 return 0; 580 return 0;
@@ -548,93 +595,117 @@ err:
548 595
549/* 596/*
550 * Note: this function may be called without srp_alloc_iu_bufs() having been 597 * Note: this function may be called without srp_alloc_iu_bufs() having been
551 * invoked. Hence the target->[rt]x_ring checks. 598 * invoked. Hence the ch->[rt]x_ring checks.
552 */ 599 */
553static void srp_free_target_ib(struct srp_target_port *target) 600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
554{ 602{
555 struct srp_device *dev = target->srp_host->srp_dev; 603 struct srp_device *dev = target->srp_host->srp_dev;
556 int i; 604 int i;
557 605
606 if (!ch->target)
607 return;
608
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
612 }
613
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
558 if (dev->use_fast_reg) { 618 if (dev->use_fast_reg) {
559 if (target->fr_pool) 619 if (ch->fr_pool)
560 srp_destroy_fr_pool(target->fr_pool); 620 srp_destroy_fr_pool(ch->fr_pool);
561 } else { 621 } else {
562 if (target->fmr_pool) 622 if (ch->fmr_pool)
563 ib_destroy_fmr_pool(target->fmr_pool); 623 ib_destroy_fmr_pool(ch->fmr_pool);
564 } 624 }
565 ib_destroy_qp(target->qp); 625 srp_destroy_qp(ch);
566 ib_destroy_cq(target->send_cq); 626 ib_destroy_cq(ch->send_cq);
567 ib_destroy_cq(target->recv_cq); 627 ib_destroy_cq(ch->recv_cq);
568 628
569 target->qp = NULL; 629 /*
570 target->send_cq = target->recv_cq = NULL; 630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
571 639
572 if (target->rx_ring) { 640 if (ch->rx_ring) {
573 for (i = 0; i < target->queue_size; ++i) 641 for (i = 0; i < target->queue_size; ++i)
574 srp_free_iu(target->srp_host, target->rx_ring[i]); 642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
575 kfree(target->rx_ring); 643 kfree(ch->rx_ring);
576 target->rx_ring = NULL; 644 ch->rx_ring = NULL;
577 } 645 }
578 if (target->tx_ring) { 646 if (ch->tx_ring) {
579 for (i = 0; i < target->queue_size; ++i) 647 for (i = 0; i < target->queue_size; ++i)
580 srp_free_iu(target->srp_host, target->tx_ring[i]); 648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
581 kfree(target->tx_ring); 649 kfree(ch->tx_ring);
582 target->tx_ring = NULL; 650 ch->tx_ring = NULL;
583 } 651 }
584} 652}
585 653
586static void srp_path_rec_completion(int status, 654static void srp_path_rec_completion(int status,
587 struct ib_sa_path_rec *pathrec, 655 struct ib_sa_path_rec *pathrec,
588 void *target_ptr) 656 void *ch_ptr)
589{ 657{
590 struct srp_target_port *target = target_ptr; 658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
591 660
592 target->status = status; 661 ch->status = status;
593 if (status) 662 if (status)
594 shost_printk(KERN_ERR, target->scsi_host, 663 shost_printk(KERN_ERR, target->scsi_host,
595 PFX "Got failed path rec status %d\n", status); 664 PFX "Got failed path rec status %d\n", status);
596 else 665 else
597 target->path = *pathrec; 666 ch->path = *pathrec;
598 complete(&target->done); 667 complete(&ch->done);
599} 668}
600 669
601static int srp_lookup_path(struct srp_target_port *target) 670static int srp_lookup_path(struct srp_rdma_ch *ch)
602{ 671{
672 struct srp_target_port *target = ch->target;
603 int ret; 673 int ret;
604 674
605 target->path.numb_path = 1; 675 ch->path.numb_path = 1;
606 676
607 init_completion(&target->done); 677 init_completion(&ch->done);
608 678
609 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
610 target->srp_host->srp_dev->dev, 680 target->srp_host->srp_dev->dev,
611 target->srp_host->port, 681 target->srp_host->port,
612 &target->path, 682 &ch->path,
613 IB_SA_PATH_REC_SERVICE_ID | 683 IB_SA_PATH_REC_SERVICE_ID |
614 IB_SA_PATH_REC_DGID | 684 IB_SA_PATH_REC_DGID |
615 IB_SA_PATH_REC_SGID | 685 IB_SA_PATH_REC_SGID |
616 IB_SA_PATH_REC_NUMB_PATH | 686 IB_SA_PATH_REC_NUMB_PATH |
617 IB_SA_PATH_REC_PKEY, 687 IB_SA_PATH_REC_PKEY,
618 SRP_PATH_REC_TIMEOUT_MS, 688 SRP_PATH_REC_TIMEOUT_MS,
619 GFP_KERNEL, 689 GFP_KERNEL,
620 srp_path_rec_completion, 690 srp_path_rec_completion,
621 target, &target->path_query); 691 ch, &ch->path_query);
622 if (target->path_query_id < 0) 692 if (ch->path_query_id < 0)
623 return target->path_query_id; 693 return ch->path_query_id;
624 694
625 ret = wait_for_completion_interruptible(&target->done); 695 ret = wait_for_completion_interruptible(&ch->done);
626 if (ret < 0) 696 if (ret < 0)
627 return ret; 697 return ret;
628 698
629 if (target->status < 0) 699 if (ch->status < 0)
630 shost_printk(KERN_WARNING, target->scsi_host, 700 shost_printk(KERN_WARNING, target->scsi_host,
631 PFX "Path record query failed\n"); 701 PFX "Path record query failed\n");
632 702
633 return target->status; 703 return ch->status;
634} 704}
635 705
636static int srp_send_req(struct srp_target_port *target) 706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
637{ 707{
708 struct srp_target_port *target = ch->target;
638 struct { 709 struct {
639 struct ib_cm_req_param param; 710 struct ib_cm_req_param param;
640 struct srp_login_req priv; 711 struct srp_login_req priv;
@@ -645,11 +716,11 @@ static int srp_send_req(struct srp_target_port *target)
645 if (!req) 716 if (!req)
646 return -ENOMEM; 717 return -ENOMEM;
647 718
648 req->param.primary_path = &target->path; 719 req->param.primary_path = &ch->path;
649 req->param.alternate_path = NULL; 720 req->param.alternate_path = NULL;
650 req->param.service_id = target->service_id; 721 req->param.service_id = target->service_id;
651 req->param.qp_num = target->qp->qp_num; 722 req->param.qp_num = ch->qp->qp_num;
652 req->param.qp_type = target->qp->qp_type; 723 req->param.qp_type = ch->qp->qp_type;
653 req->param.private_data = &req->priv; 724 req->param.private_data = &req->priv;
654 req->param.private_data_len = sizeof req->priv; 725 req->param.private_data_len = sizeof req->priv;
655 req->param.flow_control = 1; 726 req->param.flow_control = 1;
@@ -673,6 +744,8 @@ static int srp_send_req(struct srp_target_port *target)
673 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
674 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
675 SRP_BUF_FORMAT_INDIRECT); 746 SRP_BUF_FORMAT_INDIRECT);
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
676 /* 749 /*
677 * In the published SRP specification (draft rev. 16a), the 750 * In the published SRP specification (draft rev. 16a), the
678 * port identifier format is 8 bytes of ID extension followed 751 * port identifier format is 8 bytes of ID extension followed
@@ -684,7 +757,7 @@ static int srp_send_req(struct srp_target_port *target)
684 */ 757 */
685 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
686 memcpy(req->priv.initiator_port_id, 759 memcpy(req->priv.initiator_port_id,
687 &target->path.sgid.global.interface_id, 8); 760 &target->sgid.global.interface_id, 8);
688 memcpy(req->priv.initiator_port_id + 8, 761 memcpy(req->priv.initiator_port_id + 8,
689 &target->initiator_ext, 8); 762 &target->initiator_ext, 8);
690 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
@@ -693,7 +766,7 @@ static int srp_send_req(struct srp_target_port *target)
693 memcpy(req->priv.initiator_port_id, 766 memcpy(req->priv.initiator_port_id,
694 &target->initiator_ext, 8); 767 &target->initiator_ext, 8);
695 memcpy(req->priv.initiator_port_id + 8, 768 memcpy(req->priv.initiator_port_id + 8,
696 &target->path.sgid.global.interface_id, 8); 769 &target->sgid.global.interface_id, 8);
697 memcpy(req->priv.target_port_id, &target->id_ext, 8); 770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
698 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
699 } 772 }
@@ -713,7 +786,7 @@ static int srp_send_req(struct srp_target_port *target)
713 &target->srp_host->srp_dev->dev->node_guid, 8); 786 &target->srp_host->srp_dev->dev->node_guid, 8);
714 } 787 }
715 788
716 status = ib_send_cm_req(target->cm_id, &req->param); 789 status = ib_send_cm_req(ch->cm_id, &req->param);
717 790
718 kfree(req); 791 kfree(req);
719 792
@@ -754,28 +827,35 @@ static bool srp_change_conn_state(struct srp_target_port *target,
754 827
755static void srp_disconnect_target(struct srp_target_port *target) 828static void srp_disconnect_target(struct srp_target_port *target)
756{ 829{
830 struct srp_rdma_ch *ch;
831 int i;
832
757 if (srp_change_conn_state(target, false)) { 833 if (srp_change_conn_state(target, false)) {
758 /* XXX should send SRP_I_LOGOUT request */ 834 /* XXX should send SRP_I_LOGOUT request */
759 835
760 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 836 for (i = 0; i < target->ch_count; i++) {
761 shost_printk(KERN_DEBUG, target->scsi_host, 837 ch = &target->ch[i];
762 PFX "Sending CM DREQ failed\n"); 838 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
839 shost_printk(KERN_DEBUG, target->scsi_host,
840 PFX "Sending CM DREQ failed\n");
841 }
763 } 842 }
764 } 843 }
765} 844}
766 845
767static void srp_free_req_data(struct srp_target_port *target) 846static void srp_free_req_data(struct srp_target_port *target,
847 struct srp_rdma_ch *ch)
768{ 848{
769 struct srp_device *dev = target->srp_host->srp_dev; 849 struct srp_device *dev = target->srp_host->srp_dev;
770 struct ib_device *ibdev = dev->dev; 850 struct ib_device *ibdev = dev->dev;
771 struct srp_request *req; 851 struct srp_request *req;
772 int i; 852 int i;
773 853
774 if (!target->req_ring) 854 if (!ch->target || !ch->req_ring)
775 return; 855 return;
776 856
777 for (i = 0; i < target->req_ring_size; ++i) { 857 for (i = 0; i < target->req_ring_size; ++i) {
778 req = &target->req_ring[i]; 858 req = &ch->req_ring[i];
779 if (dev->use_fast_reg) 859 if (dev->use_fast_reg)
780 kfree(req->fr_list); 860 kfree(req->fr_list);
781 else 861 else
@@ -789,12 +869,13 @@ static void srp_free_req_data(struct srp_target_port *target)
789 kfree(req->indirect_desc); 869 kfree(req->indirect_desc);
790 } 870 }
791 871
792 kfree(target->req_ring); 872 kfree(ch->req_ring);
793 target->req_ring = NULL; 873 ch->req_ring = NULL;
794} 874}
795 875
796static int srp_alloc_req_data(struct srp_target_port *target) 876static int srp_alloc_req_data(struct srp_rdma_ch *ch)
797{ 877{
878 struct srp_target_port *target = ch->target;
798 struct srp_device *srp_dev = target->srp_host->srp_dev; 879 struct srp_device *srp_dev = target->srp_host->srp_dev;
799 struct ib_device *ibdev = srp_dev->dev; 880 struct ib_device *ibdev = srp_dev->dev;
800 struct srp_request *req; 881 struct srp_request *req;
@@ -802,15 +883,13 @@ static int srp_alloc_req_data(struct srp_target_port *target)
802 dma_addr_t dma_addr; 883 dma_addr_t dma_addr;
803 int i, ret = -ENOMEM; 884 int i, ret = -ENOMEM;
804 885
805 INIT_LIST_HEAD(&target->free_reqs); 886 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
806 887 GFP_KERNEL);
807 target->req_ring = kzalloc(target->req_ring_size * 888 if (!ch->req_ring)
808 sizeof(*target->req_ring), GFP_KERNEL);
809 if (!target->req_ring)
810 goto out; 889 goto out;
811 890
812 for (i = 0; i < target->req_ring_size; ++i) { 891 for (i = 0; i < target->req_ring_size; ++i) {
813 req = &target->req_ring[i]; 892 req = &ch->req_ring[i];
814 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 893 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
815 GFP_KERNEL); 894 GFP_KERNEL);
816 if (!mr_list) 895 if (!mr_list)
@@ -834,8 +913,6 @@ static int srp_alloc_req_data(struct srp_target_port *target)
834 goto out; 913 goto out;
835 914
836 req->indirect_dma_addr = dma_addr; 915 req->indirect_dma_addr = dma_addr;
837 req->index = i;
838 list_add_tail(&req->list, &target->free_reqs);
839 } 916 }
840 ret = 0; 917 ret = 0;
841 918
@@ -860,6 +937,9 @@ static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
860 937
861static void srp_remove_target(struct srp_target_port *target) 938static void srp_remove_target(struct srp_target_port *target)
862{ 939{
940 struct srp_rdma_ch *ch;
941 int i;
942
863 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 943 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
864 944
865 srp_del_scsi_host_attr(target->scsi_host); 945 srp_del_scsi_host_attr(target->scsi_host);
@@ -868,11 +948,18 @@ static void srp_remove_target(struct srp_target_port *target)
868 scsi_remove_host(target->scsi_host); 948 scsi_remove_host(target->scsi_host);
869 srp_stop_rport_timers(target->rport); 949 srp_stop_rport_timers(target->rport);
870 srp_disconnect_target(target); 950 srp_disconnect_target(target);
871 ib_destroy_cm_id(target->cm_id); 951 for (i = 0; i < target->ch_count; i++) {
872 srp_free_target_ib(target); 952 ch = &target->ch[i];
953 srp_free_ch_ib(target, ch);
954 }
873 cancel_work_sync(&target->tl_err_work); 955 cancel_work_sync(&target->tl_err_work);
874 srp_rport_put(target->rport); 956 srp_rport_put(target->rport);
875 srp_free_req_data(target); 957 for (i = 0; i < target->ch_count; i++) {
958 ch = &target->ch[i];
959 srp_free_req_data(target, ch);
960 }
961 kfree(target->ch);
962 target->ch = NULL;
876 963
877 spin_lock(&target->srp_host->target_lock); 964 spin_lock(&target->srp_host->target_lock);
878 list_del(&target->list); 965 list_del(&target->list);
@@ -898,25 +985,25 @@ static void srp_rport_delete(struct srp_rport *rport)
898 srp_queue_remove_work(target); 985 srp_queue_remove_work(target);
899} 986}
900 987
901static int srp_connect_target(struct srp_target_port *target) 988static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
902{ 989{
903 int retries = 3; 990 struct srp_target_port *target = ch->target;
904 int ret; 991 int ret;
905 992
906 WARN_ON_ONCE(target->connected); 993 WARN_ON_ONCE(!multich && target->connected);
907 994
908 target->qp_in_error = false; 995 target->qp_in_error = false;
909 996
910 ret = srp_lookup_path(target); 997 ret = srp_lookup_path(ch);
911 if (ret) 998 if (ret)
912 return ret; 999 return ret;
913 1000
914 while (1) { 1001 while (1) {
915 init_completion(&target->done); 1002 init_completion(&ch->done);
916 ret = srp_send_req(target); 1003 ret = srp_send_req(ch, multich);
917 if (ret) 1004 if (ret)
918 return ret; 1005 return ret;
919 ret = wait_for_completion_interruptible(&target->done); 1006 ret = wait_for_completion_interruptible(&ch->done);
920 if (ret < 0) 1007 if (ret < 0)
921 return ret; 1008 return ret;
922 1009
@@ -926,13 +1013,13 @@ static int srp_connect_target(struct srp_target_port *target)
926 * back, or SRP_DLID_REDIRECT if we get a lid/qp 1013 * back, or SRP_DLID_REDIRECT if we get a lid/qp
927 * redirect REJ back. 1014 * redirect REJ back.
928 */ 1015 */
929 switch (target->status) { 1016 switch (ch->status) {
930 case 0: 1017 case 0:
931 srp_change_conn_state(target, true); 1018 srp_change_conn_state(target, true);
932 return 0; 1019 return 0;
933 1020
934 case SRP_PORT_REDIRECT: 1021 case SRP_PORT_REDIRECT:
935 ret = srp_lookup_path(target); 1022 ret = srp_lookup_path(ch);
936 if (ret) 1023 if (ret)
937 return ret; 1024 return ret;
938 break; 1025 break;
@@ -941,27 +1028,18 @@ static int srp_connect_target(struct srp_target_port *target)
941 break; 1028 break;
942 1029
943 case SRP_STALE_CONN: 1030 case SRP_STALE_CONN:
944 /* Our current CM id was stale, and is now in timewait.
945 * Try to reconnect with a new one.
946 */
947 if (!retries-- || srp_new_cm_id(target)) {
948 shost_printk(KERN_ERR, target->scsi_host, PFX
949 "giving up on stale connection\n");
950 target->status = -ECONNRESET;
951 return target->status;
952 }
953
954 shost_printk(KERN_ERR, target->scsi_host, PFX 1031 shost_printk(KERN_ERR, target->scsi_host, PFX
955 "retrying stale connection\n"); 1032 "giving up on stale connection\n");
956 break; 1033 ch->status = -ECONNRESET;
1034 return ch->status;
957 1035
958 default: 1036 default:
959 return target->status; 1037 return ch->status;
960 } 1038 }
961 } 1039 }
962} 1040}
963 1041
964static int srp_inv_rkey(struct srp_target_port *target, u32 rkey) 1042static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
965{ 1043{
966 struct ib_send_wr *bad_wr; 1044 struct ib_send_wr *bad_wr;
967 struct ib_send_wr wr = { 1045 struct ib_send_wr wr = {
@@ -973,13 +1051,14 @@ static int srp_inv_rkey(struct srp_target_port *target, u32 rkey)
973 .ex.invalidate_rkey = rkey, 1051 .ex.invalidate_rkey = rkey,
974 }; 1052 };
975 1053
976 return ib_post_send(target->qp, &wr, &bad_wr); 1054 return ib_post_send(ch->qp, &wr, &bad_wr);
977} 1055}
978 1056
979static void srp_unmap_data(struct scsi_cmnd *scmnd, 1057static void srp_unmap_data(struct scsi_cmnd *scmnd,
980 struct srp_target_port *target, 1058 struct srp_rdma_ch *ch,
981 struct srp_request *req) 1059 struct srp_request *req)
982{ 1060{
1061 struct srp_target_port *target = ch->target;
983 struct srp_device *dev = target->srp_host->srp_dev; 1062 struct srp_device *dev = target->srp_host->srp_dev;
984 struct ib_device *ibdev = dev->dev; 1063 struct ib_device *ibdev = dev->dev;
985 int i, res; 1064 int i, res;
@@ -993,7 +1072,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
993 struct srp_fr_desc **pfr; 1072 struct srp_fr_desc **pfr;
994 1073
995 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
996 res = srp_inv_rkey(target, (*pfr)->mr->rkey); 1075 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
997 if (res < 0) { 1076 if (res < 0) {
998 shost_printk(KERN_ERR, target->scsi_host, PFX 1077 shost_printk(KERN_ERR, target->scsi_host, PFX
999 "Queueing INV WR for rkey %#x failed (%d)\n", 1078 "Queueing INV WR for rkey %#x failed (%d)\n",
@@ -1003,7 +1082,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
1003 } 1082 }
1004 } 1083 }
1005 if (req->nmdesc) 1084 if (req->nmdesc)
1006 srp_fr_pool_put(target->fr_pool, req->fr_list, 1085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1007 req->nmdesc); 1086 req->nmdesc);
1008 } else { 1087 } else {
1009 struct ib_pool_fmr **pfmr; 1088 struct ib_pool_fmr **pfmr;
@@ -1018,7 +1097,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
1018 1097
1019/** 1098/**
1020 * srp_claim_req - Take ownership of the scmnd associated with a request. 1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
1021 * @target: SRP target port. 1100 * @ch: SRP RDMA channel.
1022 * @req: SRP request. 1101 * @req: SRP request.
1023 * @sdev: If not NULL, only take ownership for this SCSI device. 1102 * @sdev: If not NULL, only take ownership for this SCSI device.
1024 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 1103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
@@ -1027,14 +1106,14 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
1027 * Return value: 1106 * Return value:
1028 * Either NULL or a pointer to the SCSI command the caller became owner of. 1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1029 */ 1108 */
1030static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, 1109static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1031 struct srp_request *req, 1110 struct srp_request *req,
1032 struct scsi_device *sdev, 1111 struct scsi_device *sdev,
1033 struct scsi_cmnd *scmnd) 1112 struct scsi_cmnd *scmnd)
1034{ 1113{
1035 unsigned long flags; 1114 unsigned long flags;
1036 1115
1037 spin_lock_irqsave(&target->lock, flags); 1116 spin_lock_irqsave(&ch->lock, flags);
1038 if (req->scmnd && 1117 if (req->scmnd &&
1039 (!sdev || req->scmnd->device == sdev) && 1118 (!sdev || req->scmnd->device == sdev) &&
1040 (!scmnd || req->scmnd == scmnd)) { 1119 (!scmnd || req->scmnd == scmnd)) {
@@ -1043,40 +1122,37 @@ static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
1043 } else { 1122 } else {
1044 scmnd = NULL; 1123 scmnd = NULL;
1045 } 1124 }
1046 spin_unlock_irqrestore(&target->lock, flags); 1125 spin_unlock_irqrestore(&ch->lock, flags);
1047 1126
1048 return scmnd; 1127 return scmnd;
1049} 1128}
1050 1129
1051/** 1130/**
1052 * srp_free_req() - Unmap data and add request to the free request list. 1131 * srp_free_req() - Unmap data and add request to the free request list.
1053 * @target: SRP target port. 1132 * @ch: SRP RDMA channel.
1054 * @req: Request to be freed. 1133 * @req: Request to be freed.
1055 * @scmnd: SCSI command associated with @req. 1134 * @scmnd: SCSI command associated with @req.
1056 * @req_lim_delta: Amount to be added to @target->req_lim. 1135 * @req_lim_delta: Amount to be added to @target->req_lim.
1057 */ 1136 */
1058static void srp_free_req(struct srp_target_port *target, 1137static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1059 struct srp_request *req, struct scsi_cmnd *scmnd, 1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1060 s32 req_lim_delta)
1061{ 1139{
1062 unsigned long flags; 1140 unsigned long flags;
1063 1141
1064 srp_unmap_data(scmnd, target, req); 1142 srp_unmap_data(scmnd, ch, req);
1065 1143
1066 spin_lock_irqsave(&target->lock, flags); 1144 spin_lock_irqsave(&ch->lock, flags);
1067 target->req_lim += req_lim_delta; 1145 ch->req_lim += req_lim_delta;
1068 list_add_tail(&req->list, &target->free_reqs); 1146 spin_unlock_irqrestore(&ch->lock, flags);
1069 spin_unlock_irqrestore(&target->lock, flags);
1070} 1147}
1071 1148
1072static void srp_finish_req(struct srp_target_port *target, 1149static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1073 struct srp_request *req, struct scsi_device *sdev, 1150 struct scsi_device *sdev, int result)
1074 int result)
1075{ 1151{
1076 struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL); 1152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1077 1153
1078 if (scmnd) { 1154 if (scmnd) {
1079 srp_free_req(target, req, scmnd, 0); 1155 srp_free_req(ch, req, scmnd, 0);
1080 scmnd->result = result; 1156 scmnd->result = result;
1081 scmnd->scsi_done(scmnd); 1157 scmnd->scsi_done(scmnd);
1082 } 1158 }
@@ -1085,9 +1161,10 @@ static void srp_finish_req(struct srp_target_port *target,
1085static void srp_terminate_io(struct srp_rport *rport) 1161static void srp_terminate_io(struct srp_rport *rport)
1086{ 1162{
1087 struct srp_target_port *target = rport->lld_data; 1163 struct srp_target_port *target = rport->lld_data;
1164 struct srp_rdma_ch *ch;
1088 struct Scsi_Host *shost = target->scsi_host; 1165 struct Scsi_Host *shost = target->scsi_host;
1089 struct scsi_device *sdev; 1166 struct scsi_device *sdev;
1090 int i; 1167 int i, j;
1091 1168
1092 /* 1169 /*
1093 * Invoking srp_terminate_io() while srp_queuecommand() is running 1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
@@ -1096,9 +1173,15 @@ static void srp_terminate_io(struct srp_rport *rport)
1096 shost_for_each_device(sdev, shost) 1173 shost_for_each_device(sdev, shost)
1097 WARN_ON_ONCE(sdev->request_queue->request_fn_active); 1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1098 1175
1099 for (i = 0; i < target->req_ring_size; ++i) { 1176 for (i = 0; i < target->ch_count; i++) {
1100 struct srp_request *req = &target->req_ring[i]; 1177 ch = &target->ch[i];
1101 srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16); 1178
1179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1181
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1184 }
1102 } 1185 }
1103} 1186}
1104 1187
@@ -1114,34 +1197,61 @@ static void srp_terminate_io(struct srp_rport *rport)
1114static int srp_rport_reconnect(struct srp_rport *rport) 1197static int srp_rport_reconnect(struct srp_rport *rport)
1115{ 1198{
1116 struct srp_target_port *target = rport->lld_data; 1199 struct srp_target_port *target = rport->lld_data;
1117 int i, ret; 1200 struct srp_rdma_ch *ch;
1201 int i, j, ret = 0;
1202 bool multich = false;
1118 1203
1119 srp_disconnect_target(target); 1204 srp_disconnect_target(target);
1205
1206 if (target->state == SRP_TARGET_SCANNING)
1207 return -ENODEV;
1208
1120 /* 1209 /*
1121 * Now get a new local CM ID so that we avoid confusing the target in 1210 * Now get a new local CM ID so that we avoid confusing the target in
1122 * case things are really fouled up. Doing so also ensures that all CM 1211 * case things are really fouled up. Doing so also ensures that all CM
1123 * callbacks will have finished before a new QP is allocated. 1212 * callbacks will have finished before a new QP is allocated.
1124 */ 1213 */
1125 ret = srp_new_cm_id(target); 1214 for (i = 0; i < target->ch_count; i++) {
1126 1215 ch = &target->ch[i];
1127 for (i = 0; i < target->req_ring_size; ++i) { 1216 if (!ch->target)
1128 struct srp_request *req = &target->req_ring[i]; 1217 break;
1129 srp_finish_req(target, req, NULL, DID_RESET << 16); 1218 ret += srp_new_cm_id(ch);
1130 } 1219 }
1220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
1222 if (!ch->target)
1223 break;
1224 for (j = 0; j < target->req_ring_size; ++j) {
1225 struct srp_request *req = &ch->req_ring[j];
1131 1226
1132 /* 1227 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1133 * Whether or not creating a new CM ID succeeded, create a new 1228 }
1134 * QP. This guarantees that all callback functions for the old QP have 1229 }
1135 * finished before any send requests are posted on the new QP. 1230 for (i = 0; i < target->ch_count; i++) {
1136 */ 1231 ch = &target->ch[i];
1137 ret += srp_create_target_ib(target); 1232 if (!ch->target)
1138 1233 break;
1139 INIT_LIST_HEAD(&target->free_tx); 1234 /*
1140 for (i = 0; i < target->queue_size; ++i) 1235 * Whether or not creating a new CM ID succeeded, create a new
1141 list_add(&target->tx_ring[i]->list, &target->free_tx); 1236 * QP. This guarantees that all completion callback function
1237 * invocations have finished before request resetting starts.
1238 */
1239 ret += srp_create_ch_ib(ch);
1142 1240
1143 if (ret == 0) 1241 INIT_LIST_HEAD(&ch->free_tx);
1144 ret = srp_connect_target(target); 1242 for (j = 0; j < target->queue_size; ++j)
1243 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1244 }
1245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
1247 if (ret || !ch->target) {
1248 if (i > 1)
1249 ret = 0;
1250 break;
1251 }
1252 ret = srp_connect_ch(ch, multich);
1253 multich = true;
1254 }
1145 1255
1146 if (ret == 0) 1256 if (ret == 0)
1147 shost_printk(KERN_INFO, target->scsi_host, 1257 shost_printk(KERN_INFO, target->scsi_host,
@@ -1165,12 +1275,12 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1165} 1275}
1166 1276
1167static int srp_map_finish_fmr(struct srp_map_state *state, 1277static int srp_map_finish_fmr(struct srp_map_state *state,
1168 struct srp_target_port *target) 1278 struct srp_rdma_ch *ch)
1169{ 1279{
1170 struct ib_pool_fmr *fmr; 1280 struct ib_pool_fmr *fmr;
1171 u64 io_addr = 0; 1281 u64 io_addr = 0;
1172 1282
1173 fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages, 1283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1174 state->npages, io_addr); 1284 state->npages, io_addr);
1175 if (IS_ERR(fmr)) 1285 if (IS_ERR(fmr))
1176 return PTR_ERR(fmr); 1286 return PTR_ERR(fmr);
@@ -1184,15 +1294,16 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
1184} 1294}
1185 1295
1186static int srp_map_finish_fr(struct srp_map_state *state, 1296static int srp_map_finish_fr(struct srp_map_state *state,
1187 struct srp_target_port *target) 1297 struct srp_rdma_ch *ch)
1188{ 1298{
1299 struct srp_target_port *target = ch->target;
1189 struct srp_device *dev = target->srp_host->srp_dev; 1300 struct srp_device *dev = target->srp_host->srp_dev;
1190 struct ib_send_wr *bad_wr; 1301 struct ib_send_wr *bad_wr;
1191 struct ib_send_wr wr; 1302 struct ib_send_wr wr;
1192 struct srp_fr_desc *desc; 1303 struct srp_fr_desc *desc;
1193 u32 rkey; 1304 u32 rkey;
1194 1305
1195 desc = srp_fr_pool_get(target->fr_pool); 1306 desc = srp_fr_pool_get(ch->fr_pool);
1196 if (!desc) 1307 if (!desc)
1197 return -ENOMEM; 1308 return -ENOMEM;
1198 1309
@@ -1221,12 +1332,13 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1221 srp_map_desc(state, state->base_dma_addr, state->dma_len, 1332 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1222 desc->mr->rkey); 1333 desc->mr->rkey);
1223 1334
1224 return ib_post_send(target->qp, &wr, &bad_wr); 1335 return ib_post_send(ch->qp, &wr, &bad_wr);
1225} 1336}
1226 1337
1227static int srp_finish_mapping(struct srp_map_state *state, 1338static int srp_finish_mapping(struct srp_map_state *state,
1228 struct srp_target_port *target) 1339 struct srp_rdma_ch *ch)
1229{ 1340{
1341 struct srp_target_port *target = ch->target;
1230 int ret = 0; 1342 int ret = 0;
1231 1343
1232 if (state->npages == 0) 1344 if (state->npages == 0)
@@ -1237,8 +1349,8 @@ static int srp_finish_mapping(struct srp_map_state *state,
1237 target->rkey); 1349 target->rkey);
1238 else 1350 else
1239 ret = target->srp_host->srp_dev->use_fast_reg ? 1351 ret = target->srp_host->srp_dev->use_fast_reg ?
1240 srp_map_finish_fr(state, target) : 1352 srp_map_finish_fr(state, ch) :
1241 srp_map_finish_fmr(state, target); 1353 srp_map_finish_fmr(state, ch);
1242 1354
1243 if (ret == 0) { 1355 if (ret == 0) {
1244 state->npages = 0; 1356 state->npages = 0;
@@ -1258,10 +1370,11 @@ static void srp_map_update_start(struct srp_map_state *state,
1258} 1370}
1259 1371
1260static int srp_map_sg_entry(struct srp_map_state *state, 1372static int srp_map_sg_entry(struct srp_map_state *state,
1261 struct srp_target_port *target, 1373 struct srp_rdma_ch *ch,
1262 struct scatterlist *sg, int sg_index, 1374 struct scatterlist *sg, int sg_index,
1263 bool use_mr) 1375 bool use_mr)
1264{ 1376{
1377 struct srp_target_port *target = ch->target;
1265 struct srp_device *dev = target->srp_host->srp_dev; 1378 struct srp_device *dev = target->srp_host->srp_dev;
1266 struct ib_device *ibdev = dev->dev; 1379 struct ib_device *ibdev = dev->dev;
1267 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 1380 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
@@ -1290,7 +1403,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1290 */ 1403 */
1291 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) || 1404 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1292 dma_len > dev->mr_max_size) { 1405 dma_len > dev->mr_max_size) {
1293 ret = srp_finish_mapping(state, target); 1406 ret = srp_finish_mapping(state, ch);
1294 if (ret) 1407 if (ret)
1295 return ret; 1408 return ret;
1296 1409
@@ -1311,7 +1424,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1311 while (dma_len) { 1424 while (dma_len) {
1312 unsigned offset = dma_addr & ~dev->mr_page_mask; 1425 unsigned offset = dma_addr & ~dev->mr_page_mask;
1313 if (state->npages == dev->max_pages_per_mr || offset != 0) { 1426 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1314 ret = srp_finish_mapping(state, target); 1427 ret = srp_finish_mapping(state, ch);
1315 if (ret) 1428 if (ret)
1316 return ret; 1429 return ret;
1317 1430
@@ -1335,17 +1448,18 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1335 */ 1448 */
1336 ret = 0; 1449 ret = 0;
1337 if (len != dev->mr_page_size) { 1450 if (len != dev->mr_page_size) {
1338 ret = srp_finish_mapping(state, target); 1451 ret = srp_finish_mapping(state, ch);
1339 if (!ret) 1452 if (!ret)
1340 srp_map_update_start(state, NULL, 0, 0); 1453 srp_map_update_start(state, NULL, 0, 0);
1341 } 1454 }
1342 return ret; 1455 return ret;
1343} 1456}
1344 1457
1345static int srp_map_sg(struct srp_map_state *state, 1458static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1346 struct srp_target_port *target, struct srp_request *req, 1459 struct srp_request *req, struct scatterlist *scat,
1347 struct scatterlist *scat, int count) 1460 int count)
1348{ 1461{
1462 struct srp_target_port *target = ch->target;
1349 struct srp_device *dev = target->srp_host->srp_dev; 1463 struct srp_device *dev = target->srp_host->srp_dev;
1350 struct ib_device *ibdev = dev->dev; 1464 struct ib_device *ibdev = dev->dev;
1351 struct scatterlist *sg; 1465 struct scatterlist *sg;
@@ -1356,14 +1470,14 @@ static int srp_map_sg(struct srp_map_state *state,
1356 state->pages = req->map_page; 1470 state->pages = req->map_page;
1357 if (dev->use_fast_reg) { 1471 if (dev->use_fast_reg) {
1358 state->next_fr = req->fr_list; 1472 state->next_fr = req->fr_list;
1359 use_mr = !!target->fr_pool; 1473 use_mr = !!ch->fr_pool;
1360 } else { 1474 } else {
1361 state->next_fmr = req->fmr_list; 1475 state->next_fmr = req->fmr_list;
1362 use_mr = !!target->fmr_pool; 1476 use_mr = !!ch->fmr_pool;
1363 } 1477 }
1364 1478
1365 for_each_sg(scat, sg, count, i) { 1479 for_each_sg(scat, sg, count, i) {
1366 if (srp_map_sg_entry(state, target, sg, i, use_mr)) { 1480 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1367 /* 1481 /*
1368 * Memory registration failed, so backtrack to the 1482 * Memory registration failed, so backtrack to the
1369 * first unmapped entry and continue on without using 1483 * first unmapped entry and continue on without using
@@ -1385,7 +1499,7 @@ backtrack:
1385 } 1499 }
1386 } 1500 }
1387 1501
1388 if (use_mr && srp_finish_mapping(state, target)) 1502 if (use_mr && srp_finish_mapping(state, ch))
1389 goto backtrack; 1503 goto backtrack;
1390 1504
1391 req->nmdesc = state->nmdesc; 1505 req->nmdesc = state->nmdesc;
@@ -1393,9 +1507,10 @@ backtrack:
1393 return 0; 1507 return 0;
1394} 1508}
1395 1509
1396static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 1510static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1397 struct srp_request *req) 1511 struct srp_request *req)
1398{ 1512{
1513 struct srp_target_port *target = ch->target;
1399 struct scatterlist *scat; 1514 struct scatterlist *scat;
1400 struct srp_cmd *cmd = req->cmd->buf; 1515 struct srp_cmd *cmd = req->cmd->buf;
1401 int len, nents, count; 1516 int len, nents, count;
@@ -1457,7 +1572,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1457 target->indirect_size, DMA_TO_DEVICE); 1572 target->indirect_size, DMA_TO_DEVICE);
1458 1573
1459 memset(&state, 0, sizeof(state)); 1574 memset(&state, 0, sizeof(state));
1460 srp_map_sg(&state, target, req, scat, count); 1575 srp_map_sg(&state, ch, req, scat, count);
1461 1576
1462 /* We've mapped the request, now pull as much of the indirect 1577 /* We've mapped the request, now pull as much of the indirect
1463 * descriptor table as we can into the command buffer. If this 1578 * descriptor table as we can into the command buffer. If this
@@ -1518,20 +1633,20 @@ map_complete:
1518/* 1633/*
1519 * Return an IU and possible credit to the free pool 1634 * Return an IU and possible credit to the free pool
1520 */ 1635 */
1521static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, 1636static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1522 enum srp_iu_type iu_type) 1637 enum srp_iu_type iu_type)
1523{ 1638{
1524 unsigned long flags; 1639 unsigned long flags;
1525 1640
1526 spin_lock_irqsave(&target->lock, flags); 1641 spin_lock_irqsave(&ch->lock, flags);
1527 list_add(&iu->list, &target->free_tx); 1642 list_add(&iu->list, &ch->free_tx);
1528 if (iu_type != SRP_IU_RSP) 1643 if (iu_type != SRP_IU_RSP)
1529 ++target->req_lim; 1644 ++ch->req_lim;
1530 spin_unlock_irqrestore(&target->lock, flags); 1645 spin_unlock_irqrestore(&ch->lock, flags);
1531} 1646}
1532 1647
1533/* 1648/*
1534 * Must be called with target->lock held to protect req_lim and free_tx. 1649 * Must be called with ch->lock held to protect req_lim and free_tx.
1535 * If IU is not sent, it must be returned using srp_put_tx_iu(). 1650 * If IU is not sent, it must be returned using srp_put_tx_iu().
1536 * 1651 *
1537 * Note: 1652 * Note:
@@ -1543,35 +1658,36 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1543 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 1658 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1544 * one unanswered SRP request to an initiator. 1659 * one unanswered SRP request to an initiator.
1545 */ 1660 */
1546static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 1661static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1547 enum srp_iu_type iu_type) 1662 enum srp_iu_type iu_type)
1548{ 1663{
1664 struct srp_target_port *target = ch->target;
1549 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 1665 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1550 struct srp_iu *iu; 1666 struct srp_iu *iu;
1551 1667
1552 srp_send_completion(target->send_cq, target); 1668 srp_send_completion(ch->send_cq, ch);
1553 1669
1554 if (list_empty(&target->free_tx)) 1670 if (list_empty(&ch->free_tx))
1555 return NULL; 1671 return NULL;
1556 1672
1557 /* Initiator responses to target requests do not consume credits */ 1673 /* Initiator responses to target requests do not consume credits */
1558 if (iu_type != SRP_IU_RSP) { 1674 if (iu_type != SRP_IU_RSP) {
1559 if (target->req_lim <= rsv) { 1675 if (ch->req_lim <= rsv) {
1560 ++target->zero_req_lim; 1676 ++target->zero_req_lim;
1561 return NULL; 1677 return NULL;
1562 } 1678 }
1563 1679
1564 --target->req_lim; 1680 --ch->req_lim;
1565 } 1681 }
1566 1682
1567 iu = list_first_entry(&target->free_tx, struct srp_iu, list); 1683 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1568 list_del(&iu->list); 1684 list_del(&iu->list);
1569 return iu; 1685 return iu;
1570} 1686}
1571 1687
1572static int srp_post_send(struct srp_target_port *target, 1688static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1573 struct srp_iu *iu, int len)
1574{ 1689{
1690 struct srp_target_port *target = ch->target;
1575 struct ib_sge list; 1691 struct ib_sge list;
1576 struct ib_send_wr wr, *bad_wr; 1692 struct ib_send_wr wr, *bad_wr;
1577 1693
@@ -1586,11 +1702,12 @@ static int srp_post_send(struct srp_target_port *target,
1586 wr.opcode = IB_WR_SEND; 1702 wr.opcode = IB_WR_SEND;
1587 wr.send_flags = IB_SEND_SIGNALED; 1703 wr.send_flags = IB_SEND_SIGNALED;
1588 1704
1589 return ib_post_send(target->qp, &wr, &bad_wr); 1705 return ib_post_send(ch->qp, &wr, &bad_wr);
1590} 1706}
1591 1707
1592static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) 1708static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1593{ 1709{
1710 struct srp_target_port *target = ch->target;
1594 struct ib_recv_wr wr, *bad_wr; 1711 struct ib_recv_wr wr, *bad_wr;
1595 struct ib_sge list; 1712 struct ib_sge list;
1596 1713
@@ -1603,35 +1720,39 @@ static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1603 wr.sg_list = &list; 1720 wr.sg_list = &list;
1604 wr.num_sge = 1; 1721 wr.num_sge = 1;
1605 1722
1606 return ib_post_recv(target->qp, &wr, &bad_wr); 1723 return ib_post_recv(ch->qp, &wr, &bad_wr);
1607} 1724}
1608 1725
1609static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 1726static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1610{ 1727{
1728 struct srp_target_port *target = ch->target;
1611 struct srp_request *req; 1729 struct srp_request *req;
1612 struct scsi_cmnd *scmnd; 1730 struct scsi_cmnd *scmnd;
1613 unsigned long flags; 1731 unsigned long flags;
1614 1732
1615 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1733 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1616 spin_lock_irqsave(&target->lock, flags); 1734 spin_lock_irqsave(&ch->lock, flags);
1617 target->req_lim += be32_to_cpu(rsp->req_lim_delta); 1735 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1618 spin_unlock_irqrestore(&target->lock, flags); 1736 spin_unlock_irqrestore(&ch->lock, flags);
1619 1737
1620 target->tsk_mgmt_status = -1; 1738 ch->tsk_mgmt_status = -1;
1621 if (be32_to_cpu(rsp->resp_data_len) >= 4) 1739 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1622 target->tsk_mgmt_status = rsp->data[3]; 1740 ch->tsk_mgmt_status = rsp->data[3];
1623 complete(&target->tsk_mgmt_done); 1741 complete(&ch->tsk_mgmt_done);
1624 } else { 1742 } else {
1625 req = &target->req_ring[rsp->tag]; 1743 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1626 scmnd = srp_claim_req(target, req, NULL, NULL); 1744 if (scmnd) {
1745 req = (void *)scmnd->host_scribble;
1746 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1747 }
1627 if (!scmnd) { 1748 if (!scmnd) {
1628 shost_printk(KERN_ERR, target->scsi_host, 1749 shost_printk(KERN_ERR, target->scsi_host,
1629 "Null scmnd for RSP w/tag %016llx\n", 1750 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1630 (unsigned long long) rsp->tag); 1751 rsp->tag, ch - target->ch, ch->qp->qp_num);
1631 1752
1632 spin_lock_irqsave(&target->lock, flags); 1753 spin_lock_irqsave(&ch->lock, flags);
1633 target->req_lim += be32_to_cpu(rsp->req_lim_delta); 1754 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1634 spin_unlock_irqrestore(&target->lock, flags); 1755 spin_unlock_irqrestore(&ch->lock, flags);
1635 1756
1636 return; 1757 return;
1637 } 1758 }
@@ -1653,7 +1774,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1653 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) 1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1654 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); 1775 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1655 1776
1656 srp_free_req(target, req, scmnd, 1777 srp_free_req(ch, req, scmnd,
1657 be32_to_cpu(rsp->req_lim_delta)); 1778 be32_to_cpu(rsp->req_lim_delta));
1658 1779
1659 scmnd->host_scribble = NULL; 1780 scmnd->host_scribble = NULL;
@@ -1661,18 +1782,19 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1661 } 1782 }
1662} 1783}
1663 1784
1664static int srp_response_common(struct srp_target_port *target, s32 req_delta, 1785static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1665 void *rsp, int len) 1786 void *rsp, int len)
1666{ 1787{
1788 struct srp_target_port *target = ch->target;
1667 struct ib_device *dev = target->srp_host->srp_dev->dev; 1789 struct ib_device *dev = target->srp_host->srp_dev->dev;
1668 unsigned long flags; 1790 unsigned long flags;
1669 struct srp_iu *iu; 1791 struct srp_iu *iu;
1670 int err; 1792 int err;
1671 1793
1672 spin_lock_irqsave(&target->lock, flags); 1794 spin_lock_irqsave(&ch->lock, flags);
1673 target->req_lim += req_delta; 1795 ch->req_lim += req_delta;
1674 iu = __srp_get_tx_iu(target, SRP_IU_RSP); 1796 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1675 spin_unlock_irqrestore(&target->lock, flags); 1797 spin_unlock_irqrestore(&ch->lock, flags);
1676 1798
1677 if (!iu) { 1799 if (!iu) {
1678 shost_printk(KERN_ERR, target->scsi_host, PFX 1800 shost_printk(KERN_ERR, target->scsi_host, PFX
@@ -1684,17 +1806,17 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1684 memcpy(iu->buf, rsp, len); 1806 memcpy(iu->buf, rsp, len);
1685 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1807 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1686 1808
1687 err = srp_post_send(target, iu, len); 1809 err = srp_post_send(ch, iu, len);
1688 if (err) { 1810 if (err) {
1689 shost_printk(KERN_ERR, target->scsi_host, PFX 1811 shost_printk(KERN_ERR, target->scsi_host, PFX
1690 "unable to post response: %d\n", err); 1812 "unable to post response: %d\n", err);
1691 srp_put_tx_iu(target, iu, SRP_IU_RSP); 1813 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1692 } 1814 }
1693 1815
1694 return err; 1816 return err;
1695} 1817}
1696 1818
1697static void srp_process_cred_req(struct srp_target_port *target, 1819static void srp_process_cred_req(struct srp_rdma_ch *ch,
1698 struct srp_cred_req *req) 1820 struct srp_cred_req *req)
1699{ 1821{
1700 struct srp_cred_rsp rsp = { 1822 struct srp_cred_rsp rsp = {
@@ -1703,14 +1825,15 @@ static void srp_process_cred_req(struct srp_target_port *target,
1703 }; 1825 };
1704 s32 delta = be32_to_cpu(req->req_lim_delta); 1826 s32 delta = be32_to_cpu(req->req_lim_delta);
1705 1827
1706 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1828 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1707 shost_printk(KERN_ERR, target->scsi_host, PFX 1829 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1708 "problems processing SRP_CRED_REQ\n"); 1830 "problems processing SRP_CRED_REQ\n");
1709} 1831}
1710 1832
1711static void srp_process_aer_req(struct srp_target_port *target, 1833static void srp_process_aer_req(struct srp_rdma_ch *ch,
1712 struct srp_aer_req *req) 1834 struct srp_aer_req *req)
1713{ 1835{
1836 struct srp_target_port *target = ch->target;
1714 struct srp_aer_rsp rsp = { 1837 struct srp_aer_rsp rsp = {
1715 .opcode = SRP_AER_RSP, 1838 .opcode = SRP_AER_RSP,
1716 .tag = req->tag, 1839 .tag = req->tag,
@@ -1720,19 +1843,20 @@ static void srp_process_aer_req(struct srp_target_port *target,
1720 shost_printk(KERN_ERR, target->scsi_host, PFX 1843 shost_printk(KERN_ERR, target->scsi_host, PFX
1721 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); 1844 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1722 1845
1723 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1846 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1724 shost_printk(KERN_ERR, target->scsi_host, PFX 1847 shost_printk(KERN_ERR, target->scsi_host, PFX
1725 "problems processing SRP_AER_REQ\n"); 1848 "problems processing SRP_AER_REQ\n");
1726} 1849}
1727 1850
1728static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1851static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1729{ 1852{
1853 struct srp_target_port *target = ch->target;
1730 struct ib_device *dev = target->srp_host->srp_dev->dev; 1854 struct ib_device *dev = target->srp_host->srp_dev->dev;
1731 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; 1855 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1732 int res; 1856 int res;
1733 u8 opcode; 1857 u8 opcode;
1734 1858
1735 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 1859 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1736 DMA_FROM_DEVICE); 1860 DMA_FROM_DEVICE);
1737 1861
1738 opcode = *(u8 *) iu->buf; 1862 opcode = *(u8 *) iu->buf;
@@ -1746,15 +1870,15 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1746 1870
1747 switch (opcode) { 1871 switch (opcode) {
1748 case SRP_RSP: 1872 case SRP_RSP:
1749 srp_process_rsp(target, iu->buf); 1873 srp_process_rsp(ch, iu->buf);
1750 break; 1874 break;
1751 1875
1752 case SRP_CRED_REQ: 1876 case SRP_CRED_REQ:
1753 srp_process_cred_req(target, iu->buf); 1877 srp_process_cred_req(ch, iu->buf);
1754 break; 1878 break;
1755 1879
1756 case SRP_AER_REQ: 1880 case SRP_AER_REQ:
1757 srp_process_aer_req(target, iu->buf); 1881 srp_process_aer_req(ch, iu->buf);
1758 break; 1882 break;
1759 1883
1760 case SRP_T_LOGOUT: 1884 case SRP_T_LOGOUT:
@@ -1769,10 +1893,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1769 break; 1893 break;
1770 } 1894 }
1771 1895
1772 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 1896 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1773 DMA_FROM_DEVICE); 1897 DMA_FROM_DEVICE);
1774 1898
1775 res = srp_post_recv(target, iu); 1899 res = srp_post_recv(ch, iu);
1776 if (res != 0) 1900 if (res != 0)
1777 shost_printk(KERN_ERR, target->scsi_host, 1901 shost_printk(KERN_ERR, target->scsi_host,
1778 PFX "Recv failed with error code %d\n", res); 1902 PFX "Recv failed with error code %d\n", res);
@@ -1795,8 +1919,15 @@ static void srp_tl_err_work(struct work_struct *work)
1795} 1919}
1796 1920
1797static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, 1921static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1798 bool send_err, struct srp_target_port *target) 1922 bool send_err, struct srp_rdma_ch *ch)
1799{ 1923{
1924 struct srp_target_port *target = ch->target;
1925
1926 if (wr_id == SRP_LAST_WR_ID) {
1927 complete(&ch->done);
1928 return;
1929 }
1930
1800 if (target->connected && !target->qp_in_error) { 1931 if (target->connected && !target->qp_in_error) {
1801 if (wr_id & LOCAL_INV_WR_ID_MASK) { 1932 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1802 shost_printk(KERN_ERR, target->scsi_host, PFX 1933 shost_printk(KERN_ERR, target->scsi_host, PFX
@@ -1817,33 +1948,33 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1817 target->qp_in_error = true; 1948 target->qp_in_error = true;
1818} 1949}
1819 1950
1820static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) 1951static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1821{ 1952{
1822 struct srp_target_port *target = target_ptr; 1953 struct srp_rdma_ch *ch = ch_ptr;
1823 struct ib_wc wc; 1954 struct ib_wc wc;
1824 1955
1825 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1956 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1826 while (ib_poll_cq(cq, 1, &wc) > 0) { 1957 while (ib_poll_cq(cq, 1, &wc) > 0) {
1827 if (likely(wc.status == IB_WC_SUCCESS)) { 1958 if (likely(wc.status == IB_WC_SUCCESS)) {
1828 srp_handle_recv(target, &wc); 1959 srp_handle_recv(ch, &wc);
1829 } else { 1960 } else {
1830 srp_handle_qp_err(wc.wr_id, wc.status, false, target); 1961 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1831 } 1962 }
1832 } 1963 }
1833} 1964}
1834 1965
1835static void srp_send_completion(struct ib_cq *cq, void *target_ptr) 1966static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1836{ 1967{
1837 struct srp_target_port *target = target_ptr; 1968 struct srp_rdma_ch *ch = ch_ptr;
1838 struct ib_wc wc; 1969 struct ib_wc wc;
1839 struct srp_iu *iu; 1970 struct srp_iu *iu;
1840 1971
1841 while (ib_poll_cq(cq, 1, &wc) > 0) { 1972 while (ib_poll_cq(cq, 1, &wc) > 0) {
1842 if (likely(wc.status == IB_WC_SUCCESS)) { 1973 if (likely(wc.status == IB_WC_SUCCESS)) {
1843 iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 1974 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1844 list_add(&iu->list, &target->free_tx); 1975 list_add(&iu->list, &ch->free_tx);
1845 } else { 1976 } else {
1846 srp_handle_qp_err(wc.wr_id, wc.status, true, target); 1977 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1847 } 1978 }
1848 } 1979 }
1849} 1980}
@@ -1852,11 +1983,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1852{ 1983{
1853 struct srp_target_port *target = host_to_target(shost); 1984 struct srp_target_port *target = host_to_target(shost);
1854 struct srp_rport *rport = target->rport; 1985 struct srp_rport *rport = target->rport;
1986 struct srp_rdma_ch *ch;
1855 struct srp_request *req; 1987 struct srp_request *req;
1856 struct srp_iu *iu; 1988 struct srp_iu *iu;
1857 struct srp_cmd *cmd; 1989 struct srp_cmd *cmd;
1858 struct ib_device *dev; 1990 struct ib_device *dev;
1859 unsigned long flags; 1991 unsigned long flags;
1992 u32 tag;
1993 u16 idx;
1860 int len, ret; 1994 int len, ret;
1861 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; 1995 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1862 1996
@@ -1873,15 +2007,22 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1873 if (unlikely(scmnd->result)) 2007 if (unlikely(scmnd->result))
1874 goto err; 2008 goto err;
1875 2009
1876 spin_lock_irqsave(&target->lock, flags); 2010 WARN_ON_ONCE(scmnd->request->tag < 0);
1877 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 2011 tag = blk_mq_unique_tag(scmnd->request);
1878 if (!iu) 2012 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1879 goto err_unlock; 2013 idx = blk_mq_unique_tag_to_tag(tag);
2014 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2015 dev_name(&shost->shost_gendev), tag, idx,
2016 target->req_ring_size);
1880 2017
1881 req = list_first_entry(&target->free_reqs, struct srp_request, list); 2018 spin_lock_irqsave(&ch->lock, flags);
1882 list_del(&req->list); 2019 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
1883 spin_unlock_irqrestore(&target->lock, flags); 2020 spin_unlock_irqrestore(&ch->lock, flags);
1884 2021
2022 if (!iu)
2023 goto err;
2024
2025 req = &ch->req_ring[idx];
1885 dev = target->srp_host->srp_dev->dev; 2026 dev = target->srp_host->srp_dev->dev;
1886 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 2027 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1887 DMA_TO_DEVICE); 2028 DMA_TO_DEVICE);
@@ -1893,13 +2034,13 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1893 2034
1894 cmd->opcode = SRP_CMD; 2035 cmd->opcode = SRP_CMD;
1895 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 2036 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1896 cmd->tag = req->index; 2037 cmd->tag = tag;
1897 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2038 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1898 2039
1899 req->scmnd = scmnd; 2040 req->scmnd = scmnd;
1900 req->cmd = iu; 2041 req->cmd = iu;
1901 2042
1902 len = srp_map_data(scmnd, target, req); 2043 len = srp_map_data(scmnd, ch, req);
1903 if (len < 0) { 2044 if (len < 0) {
1904 shost_printk(KERN_ERR, target->scsi_host, 2045 shost_printk(KERN_ERR, target->scsi_host,
1905 PFX "Failed to map data (%d)\n", len); 2046 PFX "Failed to map data (%d)\n", len);
@@ -1917,7 +2058,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1917 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 2058 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1918 DMA_TO_DEVICE); 2059 DMA_TO_DEVICE);
1919 2060
1920 if (srp_post_send(target, iu, len)) { 2061 if (srp_post_send(ch, iu, len)) {
1921 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2062 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1922 goto err_unmap; 2063 goto err_unmap;
1923 } 2064 }
@@ -1931,10 +2072,10 @@ unlock_rport:
1931 return ret; 2072 return ret;
1932 2073
1933err_unmap: 2074err_unmap:
1934 srp_unmap_data(scmnd, target, req); 2075 srp_unmap_data(scmnd, ch, req);
1935 2076
1936err_iu: 2077err_iu:
1937 srp_put_tx_iu(target, iu, SRP_IU_CMD); 2078 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
1938 2079
1939 /* 2080 /*
1940 * Avoid that the loops that iterate over the request ring can 2081 * Avoid that the loops that iterate over the request ring can
@@ -1942,12 +2083,6 @@ err_iu:
1942 */ 2083 */
1943 req->scmnd = NULL; 2084 req->scmnd = NULL;
1944 2085
1945 spin_lock_irqsave(&target->lock, flags);
1946 list_add(&req->list, &target->free_reqs);
1947
1948err_unlock:
1949 spin_unlock_irqrestore(&target->lock, flags);
1950
1951err: 2086err:
1952 if (scmnd->result) { 2087 if (scmnd->result) {
1953 scmnd->scsi_done(scmnd); 2088 scmnd->scsi_done(scmnd);
@@ -1961,53 +2096,54 @@ err:
1961 2096
1962/* 2097/*
1963 * Note: the resources allocated in this function are freed in 2098 * Note: the resources allocated in this function are freed in
1964 * srp_free_target_ib(). 2099 * srp_free_ch_ib().
1965 */ 2100 */
1966static int srp_alloc_iu_bufs(struct srp_target_port *target) 2101static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
1967{ 2102{
2103 struct srp_target_port *target = ch->target;
1968 int i; 2104 int i;
1969 2105
1970 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring), 2106 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
1971 GFP_KERNEL); 2107 GFP_KERNEL);
1972 if (!target->rx_ring) 2108 if (!ch->rx_ring)
1973 goto err_no_ring; 2109 goto err_no_ring;
1974 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring), 2110 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
1975 GFP_KERNEL); 2111 GFP_KERNEL);
1976 if (!target->tx_ring) 2112 if (!ch->tx_ring)
1977 goto err_no_ring; 2113 goto err_no_ring;
1978 2114
1979 for (i = 0; i < target->queue_size; ++i) { 2115 for (i = 0; i < target->queue_size; ++i) {
1980 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 2116 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
1981 target->max_ti_iu_len, 2117 ch->max_ti_iu_len,
1982 GFP_KERNEL, DMA_FROM_DEVICE); 2118 GFP_KERNEL, DMA_FROM_DEVICE);
1983 if (!target->rx_ring[i]) 2119 if (!ch->rx_ring[i])
1984 goto err; 2120 goto err;
1985 } 2121 }
1986 2122
1987 for (i = 0; i < target->queue_size; ++i) { 2123 for (i = 0; i < target->queue_size; ++i) {
1988 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 2124 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
1989 target->max_iu_len, 2125 target->max_iu_len,
1990 GFP_KERNEL, DMA_TO_DEVICE); 2126 GFP_KERNEL, DMA_TO_DEVICE);
1991 if (!target->tx_ring[i]) 2127 if (!ch->tx_ring[i])
1992 goto err; 2128 goto err;
1993 2129
1994 list_add(&target->tx_ring[i]->list, &target->free_tx); 2130 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
1995 } 2131 }
1996 2132
1997 return 0; 2133 return 0;
1998 2134
1999err: 2135err:
2000 for (i = 0; i < target->queue_size; ++i) { 2136 for (i = 0; i < target->queue_size; ++i) {
2001 srp_free_iu(target->srp_host, target->rx_ring[i]); 2137 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2002 srp_free_iu(target->srp_host, target->tx_ring[i]); 2138 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2003 } 2139 }
2004 2140
2005 2141
2006err_no_ring: 2142err_no_ring:
2007 kfree(target->tx_ring); 2143 kfree(ch->tx_ring);
2008 target->tx_ring = NULL; 2144 ch->tx_ring = NULL;
2009 kfree(target->rx_ring); 2145 kfree(ch->rx_ring);
2010 target->rx_ring = NULL; 2146 ch->rx_ring = NULL;
2011 2147
2012 return -ENOMEM; 2148 return -ENOMEM;
2013} 2149}
@@ -2041,23 +2177,24 @@ static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2041 2177
2042static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2178static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2043 struct srp_login_rsp *lrsp, 2179 struct srp_login_rsp *lrsp,
2044 struct srp_target_port *target) 2180 struct srp_rdma_ch *ch)
2045{ 2181{
2182 struct srp_target_port *target = ch->target;
2046 struct ib_qp_attr *qp_attr = NULL; 2183 struct ib_qp_attr *qp_attr = NULL;
2047 int attr_mask = 0; 2184 int attr_mask = 0;
2048 int ret; 2185 int ret;
2049 int i; 2186 int i;
2050 2187
2051 if (lrsp->opcode == SRP_LOGIN_RSP) { 2188 if (lrsp->opcode == SRP_LOGIN_RSP) {
2052 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2189 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2053 target->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2190 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2054 2191
2055 /* 2192 /*
2056 * Reserve credits for task management so we don't 2193 * Reserve credits for task management so we don't
2057 * bounce requests back to the SCSI mid-layer. 2194 * bounce requests back to the SCSI mid-layer.
2058 */ 2195 */
2059 target->scsi_host->can_queue 2196 target->scsi_host->can_queue
2060 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2197 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2061 target->scsi_host->can_queue); 2198 target->scsi_host->can_queue);
2062 target->scsi_host->cmd_per_lun 2199 target->scsi_host->cmd_per_lun
2063 = min_t(int, target->scsi_host->can_queue, 2200 = min_t(int, target->scsi_host->can_queue,
@@ -2069,8 +2206,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2069 goto error; 2206 goto error;
2070 } 2207 }
2071 2208
2072 if (!target->rx_ring) { 2209 if (!ch->rx_ring) {
2073 ret = srp_alloc_iu_bufs(target); 2210 ret = srp_alloc_iu_bufs(ch);
2074 if (ret) 2211 if (ret)
2075 goto error; 2212 goto error;
2076 } 2213 }
@@ -2085,13 +2222,14 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2085 if (ret) 2222 if (ret)
2086 goto error_free; 2223 goto error_free;
2087 2224
2088 ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 2225 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2089 if (ret) 2226 if (ret)
2090 goto error_free; 2227 goto error_free;
2091 2228
2092 for (i = 0; i < target->queue_size; i++) { 2229 for (i = 0; i < target->queue_size; i++) {
2093 struct srp_iu *iu = target->rx_ring[i]; 2230 struct srp_iu *iu = ch->rx_ring[i];
2094 ret = srp_post_recv(target, iu); 2231
2232 ret = srp_post_recv(ch, iu);
2095 if (ret) 2233 if (ret)
2096 goto error_free; 2234 goto error_free;
2097 } 2235 }
@@ -2103,7 +2241,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2103 2241
2104 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2242 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2105 2243
2106 ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 2244 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2107 if (ret) 2245 if (ret)
2108 goto error_free; 2246 goto error_free;
2109 2247
@@ -2113,13 +2251,14 @@ error_free:
2113 kfree(qp_attr); 2251 kfree(qp_attr);
2114 2252
2115error: 2253error:
2116 target->status = ret; 2254 ch->status = ret;
2117} 2255}
2118 2256
2119static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 2257static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2120 struct ib_cm_event *event, 2258 struct ib_cm_event *event,
2121 struct srp_target_port *target) 2259 struct srp_rdma_ch *ch)
2122{ 2260{
2261 struct srp_target_port *target = ch->target;
2123 struct Scsi_Host *shost = target->scsi_host; 2262 struct Scsi_Host *shost = target->scsi_host;
2124 struct ib_class_port_info *cpi; 2263 struct ib_class_port_info *cpi;
2125 int opcode; 2264 int opcode;
@@ -2127,12 +2266,12 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2127 switch (event->param.rej_rcvd.reason) { 2266 switch (event->param.rej_rcvd.reason) {
2128 case IB_CM_REJ_PORT_CM_REDIRECT: 2267 case IB_CM_REJ_PORT_CM_REDIRECT:
2129 cpi = event->param.rej_rcvd.ari; 2268 cpi = event->param.rej_rcvd.ari;
2130 target->path.dlid = cpi->redirect_lid; 2269 ch->path.dlid = cpi->redirect_lid;
2131 target->path.pkey = cpi->redirect_pkey; 2270 ch->path.pkey = cpi->redirect_pkey;
2132 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2271 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2133 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 2272 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2134 2273
2135 target->status = target->path.dlid ? 2274 ch->status = ch->path.dlid ?
2136 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2275 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2137 break; 2276 break;
2138 2277
@@ -2143,26 +2282,26 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2143 * reject reason code 25 when they mean 24 2282 * reject reason code 25 when they mean 24
2144 * (port redirect). 2283 * (port redirect).
2145 */ 2284 */
2146 memcpy(target->path.dgid.raw, 2285 memcpy(ch->path.dgid.raw,
2147 event->param.rej_rcvd.ari, 16); 2286 event->param.rej_rcvd.ari, 16);
2148 2287
2149 shost_printk(KERN_DEBUG, shost, 2288 shost_printk(KERN_DEBUG, shost,
2150 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2289 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2151 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 2290 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2152 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 2291 be64_to_cpu(ch->path.dgid.global.interface_id));
2153 2292
2154 target->status = SRP_PORT_REDIRECT; 2293 ch->status = SRP_PORT_REDIRECT;
2155 } else { 2294 } else {
2156 shost_printk(KERN_WARNING, shost, 2295 shost_printk(KERN_WARNING, shost,
2157 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2296 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2158 target->status = -ECONNRESET; 2297 ch->status = -ECONNRESET;
2159 } 2298 }
2160 break; 2299 break;
2161 2300
2162 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 2301 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2163 shost_printk(KERN_WARNING, shost, 2302 shost_printk(KERN_WARNING, shost,
2164 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2303 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2165 target->status = -ECONNRESET; 2304 ch->status = -ECONNRESET;
2166 break; 2305 break;
2167 2306
2168 case IB_CM_REJ_CONSUMER_DEFINED: 2307 case IB_CM_REJ_CONSUMER_DEFINED:
@@ -2177,30 +2316,31 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2177 else 2316 else
2178 shost_printk(KERN_WARNING, shost, PFX 2317 shost_printk(KERN_WARNING, shost, PFX
2179 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2318 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2180 target->path.sgid.raw, 2319 target->sgid.raw,
2181 target->orig_dgid, reason); 2320 target->orig_dgid.raw, reason);
2182 } else 2321 } else
2183 shost_printk(KERN_WARNING, shost, 2322 shost_printk(KERN_WARNING, shost,
2184 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2323 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2185 " opcode 0x%02x\n", opcode); 2324 " opcode 0x%02x\n", opcode);
2186 target->status = -ECONNRESET; 2325 ch->status = -ECONNRESET;
2187 break; 2326 break;
2188 2327
2189 case IB_CM_REJ_STALE_CONN: 2328 case IB_CM_REJ_STALE_CONN:
2190 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2329 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2191 target->status = SRP_STALE_CONN; 2330 ch->status = SRP_STALE_CONN;
2192 break; 2331 break;
2193 2332
2194 default: 2333 default:
2195 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2334 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2196 event->param.rej_rcvd.reason); 2335 event->param.rej_rcvd.reason);
2197 target->status = -ECONNRESET; 2336 ch->status = -ECONNRESET;
2198 } 2337 }
2199} 2338}
2200 2339
2201static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2340static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2202{ 2341{
2203 struct srp_target_port *target = cm_id->context; 2342 struct srp_rdma_ch *ch = cm_id->context;
2343 struct srp_target_port *target = ch->target;
2204 int comp = 0; 2344 int comp = 0;
2205 2345
2206 switch (event->event) { 2346 switch (event->event) {
@@ -2208,19 +2348,19 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2208 shost_printk(KERN_DEBUG, target->scsi_host, 2348 shost_printk(KERN_DEBUG, target->scsi_host,
2209 PFX "Sending CM REQ failed\n"); 2349 PFX "Sending CM REQ failed\n");
2210 comp = 1; 2350 comp = 1;
2211 target->status = -ECONNRESET; 2351 ch->status = -ECONNRESET;
2212 break; 2352 break;
2213 2353
2214 case IB_CM_REP_RECEIVED: 2354 case IB_CM_REP_RECEIVED:
2215 comp = 1; 2355 comp = 1;
2216 srp_cm_rep_handler(cm_id, event->private_data, target); 2356 srp_cm_rep_handler(cm_id, event->private_data, ch);
2217 break; 2357 break;
2218 2358
2219 case IB_CM_REJ_RECEIVED: 2359 case IB_CM_REJ_RECEIVED:
2220 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2360 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2221 comp = 1; 2361 comp = 1;
2222 2362
2223 srp_cm_rej_handler(cm_id, event, target); 2363 srp_cm_rej_handler(cm_id, event, ch);
2224 break; 2364 break;
2225 2365
2226 case IB_CM_DREQ_RECEIVED: 2366 case IB_CM_DREQ_RECEIVED:
@@ -2238,7 +2378,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2238 PFX "connection closed\n"); 2378 PFX "connection closed\n");
2239 comp = 1; 2379 comp = 1;
2240 2380
2241 target->status = 0; 2381 ch->status = 0;
2242 break; 2382 break;
2243 2383
2244 case IB_CM_MRA_RECEIVED: 2384 case IB_CM_MRA_RECEIVED:
@@ -2253,7 +2393,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2253 } 2393 }
2254 2394
2255 if (comp) 2395 if (comp)
2256 complete(&target->done); 2396 complete(&ch->done);
2257 2397
2258 return 0; 2398 return 0;
2259} 2399}
@@ -2262,34 +2402,21 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2262 * srp_change_queue_depth - setting device queue depth 2402 * srp_change_queue_depth - setting device queue depth
2263 * @sdev: scsi device struct 2403 * @sdev: scsi device struct
2264 * @qdepth: requested queue depth 2404 * @qdepth: requested queue depth
2265 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2266 * (see include/scsi/scsi_host.h for definition)
2267 * 2405 *
2268 * Returns queue depth. 2406 * Returns queue depth.
2269 */ 2407 */
2270static int 2408static int
2271srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 2409srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2272{ 2410{
2273 struct Scsi_Host *shost = sdev->host; 2411 if (!sdev->tagged_supported)
2274 int max_depth; 2412 qdepth = 1;
2275 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { 2413 return scsi_change_queue_depth(sdev, qdepth);
2276 max_depth = shost->can_queue;
2277 if (!sdev->tagged_supported)
2278 max_depth = 1;
2279 if (qdepth > max_depth)
2280 qdepth = max_depth;
2281 scsi_adjust_queue_depth(sdev, qdepth);
2282 } else if (reason == SCSI_QDEPTH_QFULL)
2283 scsi_track_queue_full(sdev, qdepth);
2284 else
2285 return -EOPNOTSUPP;
2286
2287 return sdev->queue_depth;
2288} 2414}
2289 2415
2290static int srp_send_tsk_mgmt(struct srp_target_port *target, 2416static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2291 u64 req_tag, unsigned int lun, u8 func) 2417 unsigned int lun, u8 func)
2292{ 2418{
2419 struct srp_target_port *target = ch->target;
2293 struct srp_rport *rport = target->rport; 2420 struct srp_rport *rport = target->rport;
2294 struct ib_device *dev = target->srp_host->srp_dev->dev; 2421 struct ib_device *dev = target->srp_host->srp_dev->dev;
2295 struct srp_iu *iu; 2422 struct srp_iu *iu;
@@ -2298,16 +2425,16 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
2298 if (!target->connected || target->qp_in_error) 2425 if (!target->connected || target->qp_in_error)
2299 return -1; 2426 return -1;
2300 2427
2301 init_completion(&target->tsk_mgmt_done); 2428 init_completion(&ch->tsk_mgmt_done);
2302 2429
2303 /* 2430 /*
2304 * Lock the rport mutex to avoid that srp_create_target_ib() is 2431 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2305 * invoked while a task management function is being sent. 2432 * invoked while a task management function is being sent.
2306 */ 2433 */
2307 mutex_lock(&rport->mutex); 2434 mutex_lock(&rport->mutex);
2308 spin_lock_irq(&target->lock); 2435 spin_lock_irq(&ch->lock);
2309 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 2436 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2310 spin_unlock_irq(&target->lock); 2437 spin_unlock_irq(&ch->lock);
2311 2438
2312 if (!iu) { 2439 if (!iu) {
2313 mutex_unlock(&rport->mutex); 2440 mutex_unlock(&rport->mutex);
@@ -2328,15 +2455,15 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
2328 2455
2329 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 2456 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2330 DMA_TO_DEVICE); 2457 DMA_TO_DEVICE);
2331 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { 2458 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2332 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); 2459 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2333 mutex_unlock(&rport->mutex); 2460 mutex_unlock(&rport->mutex);
2334 2461
2335 return -1; 2462 return -1;
2336 } 2463 }
2337 mutex_unlock(&rport->mutex); 2464 mutex_unlock(&rport->mutex);
2338 2465
2339 if (!wait_for_completion_timeout(&target->tsk_mgmt_done, 2466 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2340 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 2467 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2341 return -1; 2468 return -1;
2342 2469
@@ -2347,20 +2474,32 @@ static int srp_abort(struct scsi_cmnd *scmnd)
2347{ 2474{
2348 struct srp_target_port *target = host_to_target(scmnd->device->host); 2475 struct srp_target_port *target = host_to_target(scmnd->device->host);
2349 struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 2476 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2477 u32 tag;
2478 u16 ch_idx;
2479 struct srp_rdma_ch *ch;
2350 int ret; 2480 int ret;
2351 2481
2352 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2482 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2353 2483
2354 if (!req || !srp_claim_req(target, req, NULL, scmnd)) 2484 if (!req)
2485 return SUCCESS;
2486 tag = blk_mq_unique_tag(scmnd->request);
2487 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2488 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2355 return SUCCESS; 2489 return SUCCESS;
2356 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, 2490 ch = &target->ch[ch_idx];
2491 if (!srp_claim_req(ch, req, NULL, scmnd))
2492 return SUCCESS;
2493 shost_printk(KERN_ERR, target->scsi_host,
2494 "Sending SRP abort for tag %#x\n", tag);
2495 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2357 SRP_TSK_ABORT_TASK) == 0) 2496 SRP_TSK_ABORT_TASK) == 0)
2358 ret = SUCCESS; 2497 ret = SUCCESS;
2359 else if (target->rport->state == SRP_RPORT_LOST) 2498 else if (target->rport->state == SRP_RPORT_LOST)
2360 ret = FAST_IO_FAIL; 2499 ret = FAST_IO_FAIL;
2361 else 2500 else
2362 ret = FAILED; 2501 ret = FAILED;
2363 srp_free_req(target, req, scmnd, 0); 2502 srp_free_req(ch, req, scmnd, 0);
2364 scmnd->result = DID_ABORT << 16; 2503 scmnd->result = DID_ABORT << 16;
2365 scmnd->scsi_done(scmnd); 2504 scmnd->scsi_done(scmnd);
2366 2505
@@ -2370,19 +2509,25 @@ static int srp_abort(struct scsi_cmnd *scmnd)
2370static int srp_reset_device(struct scsi_cmnd *scmnd) 2509static int srp_reset_device(struct scsi_cmnd *scmnd)
2371{ 2510{
2372 struct srp_target_port *target = host_to_target(scmnd->device->host); 2511 struct srp_target_port *target = host_to_target(scmnd->device->host);
2512 struct srp_rdma_ch *ch;
2373 int i; 2513 int i;
2374 2514
2375 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2376 2516
2377 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, 2517 ch = &target->ch[0];
2518 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2378 SRP_TSK_LUN_RESET)) 2519 SRP_TSK_LUN_RESET))
2379 return FAILED; 2520 return FAILED;
2380 if (target->tsk_mgmt_status) 2521 if (ch->tsk_mgmt_status)
2381 return FAILED; 2522 return FAILED;
2382 2523
2383 for (i = 0; i < target->req_ring_size; ++i) { 2524 for (i = 0; i < target->ch_count; i++) {
2384 struct srp_request *req = &target->req_ring[i]; 2525 ch = &target->ch[i];
2385 srp_finish_req(target, req, scmnd->device, DID_RESET << 16); 2526 for (i = 0; i < target->req_ring_size; ++i) {
2527 struct srp_request *req = &ch->req_ring[i];
2528
2529 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2530 }
2386 } 2531 }
2387 2532
2388 return SUCCESS; 2533 return SUCCESS;
@@ -2444,7 +2589,7 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2444{ 2589{
2445 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2446 2591
2447 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 2592 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2448} 2593}
2449 2594
2450static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, 2595static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@@ -2452,15 +2597,16 @@ static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2452{ 2597{
2453 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2454 2599
2455 return sprintf(buf, "%pI6\n", target->path.sgid.raw); 2600 return sprintf(buf, "%pI6\n", target->sgid.raw);
2456} 2601}
2457 2602
2458static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 2603static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2459 char *buf) 2604 char *buf)
2460{ 2605{
2461 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2606 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2607 struct srp_rdma_ch *ch = &target->ch[0];
2462 2608
2463 return sprintf(buf, "%pI6\n", target->path.dgid.raw); 2609 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2464} 2610}
2465 2611
2466static ssize_t show_orig_dgid(struct device *dev, 2612static ssize_t show_orig_dgid(struct device *dev,
@@ -2468,15 +2614,21 @@ static ssize_t show_orig_dgid(struct device *dev,
2468{ 2614{
2469 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2470 2616
2471 return sprintf(buf, "%pI6\n", target->orig_dgid); 2617 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2472} 2618}
2473 2619
2474static ssize_t show_req_lim(struct device *dev, 2620static ssize_t show_req_lim(struct device *dev,
2475 struct device_attribute *attr, char *buf) 2621 struct device_attribute *attr, char *buf)
2476{ 2622{
2477 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2623 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2624 struct srp_rdma_ch *ch;
2625 int i, req_lim = INT_MAX;
2478 2626
2479 return sprintf(buf, "%d\n", target->req_lim); 2627 for (i = 0; i < target->ch_count; i++) {
2628 ch = &target->ch[i];
2629 req_lim = min(req_lim, ch->req_lim);
2630 }
2631 return sprintf(buf, "%d\n", req_lim);
2480} 2632}
2481 2633
2482static ssize_t show_zero_req_lim(struct device *dev, 2634static ssize_t show_zero_req_lim(struct device *dev,
@@ -2503,6 +2655,14 @@ static ssize_t show_local_ib_device(struct device *dev,
2503 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 2655 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2504} 2656}
2505 2657
2658static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2659 char *buf)
2660{
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2662
2663 return sprintf(buf, "%d\n", target->ch_count);
2664}
2665
2506static ssize_t show_comp_vector(struct device *dev, 2666static ssize_t show_comp_vector(struct device *dev,
2507 struct device_attribute *attr, char *buf) 2667 struct device_attribute *attr, char *buf)
2508{ 2668{
@@ -2546,6 +2706,7 @@ static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2546static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 2706static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2547static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 2707static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2548static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 2708static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2709static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2549static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); 2710static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2550static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); 2711static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2551static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 2712static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
@@ -2563,6 +2724,7 @@ static struct device_attribute *srp_host_attrs[] = {
2563 &dev_attr_zero_req_lim, 2724 &dev_attr_zero_req_lim,
2564 &dev_attr_local_ib_port, 2725 &dev_attr_local_ib_port,
2565 &dev_attr_local_ib_device, 2726 &dev_attr_local_ib_device,
2727 &dev_attr_ch_count,
2566 &dev_attr_comp_vector, 2728 &dev_attr_comp_vector,
2567 &dev_attr_tl_retry_count, 2729 &dev_attr_tl_retry_count,
2568 &dev_attr_cmd_sg_entries, 2730 &dev_attr_cmd_sg_entries,
@@ -2588,14 +2750,28 @@ static struct scsi_host_template srp_template = {
2588 .this_id = -1, 2750 .this_id = -1,
2589 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 2751 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2590 .use_clustering = ENABLE_CLUSTERING, 2752 .use_clustering = ENABLE_CLUSTERING,
2591 .shost_attrs = srp_host_attrs 2753 .shost_attrs = srp_host_attrs,
2754 .use_blk_tags = 1,
2755 .track_queue_depth = 1,
2592}; 2756};
2593 2757
2758static int srp_sdev_count(struct Scsi_Host *host)
2759{
2760 struct scsi_device *sdev;
2761 int c = 0;
2762
2763 shost_for_each_device(sdev, host)
2764 c++;
2765
2766 return c;
2767}
2768
2594static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 2769static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2595{ 2770{
2596 struct srp_rport_identifiers ids; 2771 struct srp_rport_identifiers ids;
2597 struct srp_rport *rport; 2772 struct srp_rport *rport;
2598 2773
2774 target->state = SRP_TARGET_SCANNING;
2599 sprintf(target->target_name, "SRP.T10:%016llX", 2775 sprintf(target->target_name, "SRP.T10:%016llX",
2600 (unsigned long long) be64_to_cpu(target->id_ext)); 2776 (unsigned long long) be64_to_cpu(target->id_ext));
2601 2777
@@ -2618,11 +2794,26 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2618 list_add_tail(&target->list, &host->target_list); 2794 list_add_tail(&target->list, &host->target_list);
2619 spin_unlock(&host->target_lock); 2795 spin_unlock(&host->target_lock);
2620 2796
2621 target->state = SRP_TARGET_LIVE;
2622
2623 scsi_scan_target(&target->scsi_host->shost_gendev, 2797 scsi_scan_target(&target->scsi_host->shost_gendev,
2624 0, target->scsi_id, SCAN_WILD_CARD, 0); 2798 0, target->scsi_id, SCAN_WILD_CARD, 0);
2625 2799
2800 if (!target->connected || target->qp_in_error) {
2801 shost_printk(KERN_INFO, target->scsi_host,
2802 PFX "SCSI scan failed - removing SCSI host\n");
2803 srp_queue_remove_work(target);
2804 goto out;
2805 }
2806
2807 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2808 dev_name(&target->scsi_host->shost_gendev),
2809 srp_sdev_count(target->scsi_host));
2810
2811 spin_lock_irq(&target->lock);
2812 if (target->state == SRP_TARGET_SCANNING)
2813 target->state = SRP_TARGET_LIVE;
2814 spin_unlock_irq(&target->lock);
2815
2816out:
2626 return 0; 2817 return 0;
2627} 2818}
2628 2819
@@ -2779,11 +2970,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2779 } 2970 }
2780 2971
2781 for (i = 0; i < 16; ++i) { 2972 for (i = 0; i < 16; ++i) {
2782 strlcpy(dgid, p + i * 2, 3); 2973 strlcpy(dgid, p + i * 2, sizeof(dgid));
2783 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 2974 if (sscanf(dgid, "%hhx",
2975 &target->orig_dgid.raw[i]) < 1) {
2976 ret = -EINVAL;
2977 kfree(p);
2978 goto out;
2979 }
2784 } 2980 }
2785 kfree(p); 2981 kfree(p);
2786 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2787 break; 2982 break;
2788 2983
2789 case SRP_OPT_PKEY: 2984 case SRP_OPT_PKEY:
@@ -2791,7 +2986,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2791 pr_warn("bad P_Key parameter '%s'\n", p); 2986 pr_warn("bad P_Key parameter '%s'\n", p);
2792 goto out; 2987 goto out;
2793 } 2988 }
2794 target->path.pkey = cpu_to_be16(token); 2989 target->pkey = cpu_to_be16(token);
2795 break; 2990 break;
2796 2991
2797 case SRP_OPT_SERVICE_ID: 2992 case SRP_OPT_SERVICE_ID:
@@ -2801,7 +2996,6 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2801 goto out; 2996 goto out;
2802 } 2997 }
2803 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2998 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2804 target->path.service_id = target->service_id;
2805 kfree(p); 2999 kfree(p);
2806 break; 3000 break;
2807 3001
@@ -2938,9 +3132,11 @@ static ssize_t srp_create_target(struct device *dev,
2938 container_of(dev, struct srp_host, dev); 3132 container_of(dev, struct srp_host, dev);
2939 struct Scsi_Host *target_host; 3133 struct Scsi_Host *target_host;
2940 struct srp_target_port *target; 3134 struct srp_target_port *target;
3135 struct srp_rdma_ch *ch;
2941 struct srp_device *srp_dev = host->srp_dev; 3136 struct srp_device *srp_dev = host->srp_dev;
2942 struct ib_device *ibdev = srp_dev->dev; 3137 struct ib_device *ibdev = srp_dev->dev;
2943 int ret; 3138 int ret, node_idx, node, cpu, i;
3139 bool multich = false;
2944 3140
2945 target_host = scsi_host_alloc(&srp_template, 3141 target_host = scsi_host_alloc(&srp_template,
2946 sizeof (struct srp_target_port)); 3142 sizeof (struct srp_target_port));
@@ -2966,12 +3162,22 @@ static ssize_t srp_create_target(struct device *dev,
2966 target->tl_retry_count = 7; 3162 target->tl_retry_count = 7;
2967 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3163 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
2968 3164
3165 /*
3166 * Avoid that the SCSI host can be removed by srp_remove_target()
3167 * before this function returns.
3168 */
3169 scsi_host_get(target->scsi_host);
3170
2969 mutex_lock(&host->add_target_mutex); 3171 mutex_lock(&host->add_target_mutex);
2970 3172
2971 ret = srp_parse_options(buf, target); 3173 ret = srp_parse_options(buf, target);
2972 if (ret) 3174 if (ret)
2973 goto err; 3175 goto err;
2974 3176
3177 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3178 if (ret)
3179 goto err;
3180
2975 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; 3181 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2976 3182
2977 if (!srp_conn_unique(target->srp_host, target)) { 3183 if (!srp_conn_unique(target->srp_host, target)) {
@@ -3000,59 +3206,115 @@ static ssize_t srp_create_target(struct device *dev,
3000 INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3206 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3001 INIT_WORK(&target->remove_work, srp_remove_work); 3207 INIT_WORK(&target->remove_work, srp_remove_work);
3002 spin_lock_init(&target->lock); 3208 spin_lock_init(&target->lock);
3003 INIT_LIST_HEAD(&target->free_tx); 3209 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3004 ret = srp_alloc_req_data(target);
3005 if (ret) 3210 if (ret)
3006 goto err_free_mem; 3211 goto err;
3007 3212
3008 ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid); 3213 ret = -ENOMEM;
3009 if (ret) 3214 target->ch_count = max_t(unsigned, num_online_nodes(),
3010 goto err_free_mem; 3215 min(ch_count ? :
3216 min(4 * num_online_nodes(),
3217 ibdev->num_comp_vectors),
3218 num_online_cpus()));
3219 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3220 GFP_KERNEL);
3221 if (!target->ch)
3222 goto err;
3011 3223
3012 ret = srp_create_target_ib(target); 3224 node_idx = 0;
3013 if (ret) 3225 for_each_online_node(node) {
3014 goto err_free_mem; 3226 const int ch_start = (node_idx * target->ch_count /
3227 num_online_nodes());
3228 const int ch_end = ((node_idx + 1) * target->ch_count /
3229 num_online_nodes());
3230 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3231 num_online_nodes() + target->comp_vector)
3232 % ibdev->num_comp_vectors;
3233 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3234 num_online_nodes() + target->comp_vector)
3235 % ibdev->num_comp_vectors;
3236 int cpu_idx = 0;
3237
3238 for_each_online_cpu(cpu) {
3239 if (cpu_to_node(cpu) != node)
3240 continue;
3241 if (ch_start + cpu_idx >= ch_end)
3242 continue;
3243 ch = &target->ch[ch_start + cpu_idx];
3244 ch->target = target;
3245 ch->comp_vector = cv_start == cv_end ? cv_start :
3246 cv_start + cpu_idx % (cv_end - cv_start);
3247 spin_lock_init(&ch->lock);
3248 INIT_LIST_HEAD(&ch->free_tx);
3249 ret = srp_new_cm_id(ch);
3250 if (ret)
3251 goto err_disconnect;
3015 3252
3016 ret = srp_new_cm_id(target); 3253 ret = srp_create_ch_ib(ch);
3017 if (ret) 3254 if (ret)
3018 goto err_free_ib; 3255 goto err_disconnect;
3019 3256
3020 ret = srp_connect_target(target); 3257 ret = srp_alloc_req_data(ch);
3021 if (ret) { 3258 if (ret)
3022 shost_printk(KERN_ERR, target->scsi_host, 3259 goto err_disconnect;
3023 PFX "Connection failed\n"); 3260
3024 goto err_cm_id; 3261 ret = srp_connect_ch(ch, multich);
3262 if (ret) {
3263 shost_printk(KERN_ERR, target->scsi_host,
3264 PFX "Connection %d/%d failed\n",
3265 ch_start + cpu_idx,
3266 target->ch_count);
3267 if (node_idx == 0 && cpu_idx == 0) {
3268 goto err_disconnect;
3269 } else {
3270 srp_free_ch_ib(target, ch);
3271 srp_free_req_data(target, ch);
3272 target->ch_count = ch - target->ch;
3273 break;
3274 }
3275 }
3276
3277 multich = true;
3278 cpu_idx++;
3279 }
3280 node_idx++;
3025 } 3281 }
3026 3282
3283 target->scsi_host->nr_hw_queues = target->ch_count;
3284
3027 ret = srp_add_target(host, target); 3285 ret = srp_add_target(host, target);
3028 if (ret) 3286 if (ret)
3029 goto err_disconnect; 3287 goto err_disconnect;
3030 3288
3031 shost_printk(KERN_DEBUG, target->scsi_host, PFX 3289 if (target->state != SRP_TARGET_REMOVED) {
3032 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3290 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3033 be64_to_cpu(target->id_ext), 3291 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3034 be64_to_cpu(target->ioc_guid), 3292 be64_to_cpu(target->id_ext),
3035 be16_to_cpu(target->path.pkey), 3293 be64_to_cpu(target->ioc_guid),
3036 be64_to_cpu(target->service_id), 3294 be16_to_cpu(target->pkey),
3037 target->path.sgid.raw, target->path.dgid.raw); 3295 be64_to_cpu(target->service_id),
3296 target->sgid.raw, target->orig_dgid.raw);
3297 }
3038 3298
3039 ret = count; 3299 ret = count;
3040 3300
3041out: 3301out:
3042 mutex_unlock(&host->add_target_mutex); 3302 mutex_unlock(&host->add_target_mutex);
3303
3304 scsi_host_put(target->scsi_host);
3305
3043 return ret; 3306 return ret;
3044 3307
3045err_disconnect: 3308err_disconnect:
3046 srp_disconnect_target(target); 3309 srp_disconnect_target(target);
3047 3310
3048err_cm_id: 3311 for (i = 0; i < target->ch_count; i++) {
3049 ib_destroy_cm_id(target->cm_id); 3312 ch = &target->ch[i];
3050 3313 srp_free_ch_ib(target, ch);
3051err_free_ib: 3314 srp_free_req_data(target, ch);
3052 srp_free_target_ib(target); 3315 }
3053 3316
3054err_free_mem: 3317 kfree(target->ch);
3055 srp_free_req_data(target);
3056 3318
3057err: 3319err:
3058 scsi_host_put(target_host); 3320 scsi_host_put(target_host);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e46ecb15aa0d..a611556406ac 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -70,9 +70,12 @@ enum {
70 70
71 LOCAL_INV_WR_ID_MASK = 1, 71 LOCAL_INV_WR_ID_MASK = 1,
72 FAST_REG_WR_ID_MASK = 2, 72 FAST_REG_WR_ID_MASK = 2,
73
74 SRP_LAST_WR_ID = 0xfffffffcU,
73}; 75};
74 76
75enum srp_target_state { 77enum srp_target_state {
78 SRP_TARGET_SCANNING,
76 SRP_TARGET_LIVE, 79 SRP_TARGET_LIVE,
77 SRP_TARGET_REMOVED, 80 SRP_TARGET_REMOVED,
78}; 81};
@@ -115,7 +118,6 @@ struct srp_host {
115}; 118};
116 119
117struct srp_request { 120struct srp_request {
118 struct list_head list;
119 struct scsi_cmnd *scmnd; 121 struct scsi_cmnd *scmnd;
120 struct srp_iu *cmd; 122 struct srp_iu *cmd;
121 union { 123 union {
@@ -126,24 +128,62 @@ struct srp_request {
126 struct srp_direct_buf *indirect_desc; 128 struct srp_direct_buf *indirect_desc;
127 dma_addr_t indirect_dma_addr; 129 dma_addr_t indirect_dma_addr;
128 short nmdesc; 130 short nmdesc;
129 short index;
130}; 131};
131 132
132struct srp_target_port { 133/**
134 * struct srp_rdma_ch
135 * @comp_vector: Completion vector used by this RDMA channel.
136 */
137struct srp_rdma_ch {
133 /* These are RW in the hot path, and commonly used together */ 138 /* These are RW in the hot path, and commonly used together */
134 struct list_head free_tx; 139 struct list_head free_tx;
135 struct list_head free_reqs;
136 spinlock_t lock; 140 spinlock_t lock;
137 s32 req_lim; 141 s32 req_lim;
138 142
139 /* These are read-only in the hot path */ 143 /* These are read-only in the hot path */
140 struct ib_cq *send_cq ____cacheline_aligned_in_smp; 144 struct srp_target_port *target ____cacheline_aligned_in_smp;
145 struct ib_cq *send_cq;
141 struct ib_cq *recv_cq; 146 struct ib_cq *recv_cq;
142 struct ib_qp *qp; 147 struct ib_qp *qp;
143 union { 148 union {
144 struct ib_fmr_pool *fmr_pool; 149 struct ib_fmr_pool *fmr_pool;
145 struct srp_fr_pool *fr_pool; 150 struct srp_fr_pool *fr_pool;
146 }; 151 };
152
153 /* Everything above this point is used in the hot path of
154 * command processing. Try to keep them packed into cachelines.
155 */
156
157 struct completion done;
158 int status;
159
160 struct ib_sa_path_rec path;
161 struct ib_sa_query *path_query;
162 int path_query_id;
163
164 struct ib_cm_id *cm_id;
165 struct srp_iu **tx_ring;
166 struct srp_iu **rx_ring;
167 struct srp_request *req_ring;
168 int max_ti_iu_len;
169 int comp_vector;
170
171 struct completion tsk_mgmt_done;
172 u8 tsk_mgmt_status;
173};
174
175/**
176 * struct srp_target_port
177 * @comp_vector: Completion vector used by the first RDMA channel created for
178 * this target port.
179 */
180struct srp_target_port {
181 /* read and written in the hot path */
182 spinlock_t lock;
183
184 /* read only in the hot path */
185 struct srp_rdma_ch *ch;
186 u32 ch_count;
147 u32 lkey; 187 u32 lkey;
148 u32 rkey; 188 u32 rkey;
149 enum srp_target_state state; 189 enum srp_target_state state;
@@ -152,10 +192,8 @@ struct srp_target_port {
152 unsigned int indirect_size; 192 unsigned int indirect_size;
153 bool allow_ext_sg; 193 bool allow_ext_sg;
154 194
155 /* Everything above this point is used in the hot path of 195 /* other member variables */
156 * command processing. Try to keep them packed into cachelines. 196 union ib_gid sgid;
157 */
158
159 __be64 id_ext; 197 __be64 id_ext;
160 __be64 ioc_guid; 198 __be64 ioc_guid;
161 __be64 service_id; 199 __be64 service_id;
@@ -172,34 +210,19 @@ struct srp_target_port {
172 int comp_vector; 210 int comp_vector;
173 int tl_retry_count; 211 int tl_retry_count;
174 212
175 struct ib_sa_path_rec path; 213 union ib_gid orig_dgid;
176 __be16 orig_dgid[8]; 214 __be16 pkey;
177 struct ib_sa_query *path_query;
178 int path_query_id;
179 215
180 u32 rq_tmo_jiffies; 216 u32 rq_tmo_jiffies;
181 bool connected; 217 bool connected;
182 218
183 struct ib_cm_id *cm_id;
184
185 int max_ti_iu_len;
186
187 int zero_req_lim; 219 int zero_req_lim;
188 220
189 struct srp_iu **tx_ring;
190 struct srp_iu **rx_ring;
191 struct srp_request *req_ring;
192
193 struct work_struct tl_err_work; 221 struct work_struct tl_err_work;
194 struct work_struct remove_work; 222 struct work_struct remove_work;
195 223
196 struct list_head list; 224 struct list_head list;
197 struct completion done;
198 int status;
199 bool qp_in_error; 225 bool qp_in_error;
200
201 struct completion tsk_mgmt_done;
202 u8 tsk_mgmt_status;
203}; 226};
204 227
205struct srp_iu { 228struct srp_iu {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index dee06d6f0b68..6c9fc11efb87 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2311,12 +2311,11 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
2311 * mptscsih_change_queue_depth - This function will set a devices queue depth 2311 * mptscsih_change_queue_depth - This function will set a devices queue depth
2312 * @sdev: per scsi_device pointer 2312 * @sdev: per scsi_device pointer
2313 * @qdepth: requested queue depth 2313 * @qdepth: requested queue depth
2314 * @reason: calling context
2315 * 2314 *
2316 * Adding support for new 'change_queue_depth' api. 2315 * Adding support for new 'change_queue_depth' api.
2317*/ 2316*/
2318int 2317int
2319mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 2318mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
2320{ 2319{
2321 MPT_SCSI_HOST *hd = shost_priv(sdev->host); 2320 MPT_SCSI_HOST *hd = shost_priv(sdev->host);
2322 VirtTarget *vtarget; 2321 VirtTarget *vtarget;
@@ -2327,9 +2326,6 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2327 starget = scsi_target(sdev); 2326 starget = scsi_target(sdev);
2328 vtarget = starget->hostdata; 2327 vtarget = starget->hostdata;
2329 2328
2330 if (reason != SCSI_QDEPTH_DEFAULT)
2331 return -EOPNOTSUPP;
2332
2333 if (ioc->bus_type == SPI) { 2329 if (ioc->bus_type == SPI) {
2334 if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) 2330 if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
2335 max_depth = 1; 2331 max_depth = 1;
@@ -2347,8 +2343,7 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2347 if (qdepth > max_depth) 2343 if (qdepth > max_depth)
2348 qdepth = max_depth; 2344 qdepth = max_depth;
2349 2345
2350 scsi_adjust_queue_depth(sdev, qdepth); 2346 return scsi_change_queue_depth(sdev, qdepth);
2351 return sdev->queue_depth;
2352} 2347}
2353 2348
2354/* 2349/*
@@ -2392,8 +2387,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2392 ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2387 ioc->name, vtarget->negoFlags, vtarget->maxOffset,
2393 vtarget->minSyncFactor)); 2388 vtarget->minSyncFactor));
2394 2389
2395 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH, 2390 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2396 SCSI_QDEPTH_DEFAULT);
2397 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2391 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2398 "tagged %d, simple %d\n", 2392 "tagged %d, simple %d\n",
2399 ioc->name,sdev->tagged_supported, sdev->simple_tags)); 2393 ioc->name,sdev->tagged_supported, sdev->simple_tags));
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index e1b1a198a62a..2baeefd9be7a 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -128,8 +128,7 @@ extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_F
128extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); 128extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
129extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 129extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, 131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
132 int reason);
133extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 132extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
134extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 133extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
135extern struct device_attribute *mptscsih_host_attrs[]; 134extern struct device_attribute *mptscsih_host_attrs[];
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 0ff4b02177be..0cf2c9d676be 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -170,7 +170,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
170} 170}
171 171
172/** 172/**
173 * eeprom_93cx6_read - Read multiple words from eeprom 173 * eeprom_93cx6_read - Read a word from eeprom
174 * @eeprom: Pointer to eeprom structure 174 * @eeprom: Pointer to eeprom structure
175 * @word: Word index from where we should start reading 175 * @word: Word index from where we should start reading
176 * @data: target pointer where the information will have to be stored 176 * @data: target pointer where the information will have to be stored
@@ -235,6 +235,66 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
235EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); 235EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
236 236
237/** 237/**
238 * eeprom_93cx6_readb - Read a byte from eeprom
239 * @eeprom: Pointer to eeprom structure
240 * @word: Byte index from where we should start reading
241 * @data: target pointer where the information will have to be stored
242 *
243 * This function will read a byte of the eeprom data
244 * into the given data pointer.
245 */
246void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte,
247 u8 *data)
248{
249 u16 command;
250 u16 tmp;
251
252 /*
253 * Initialize the eeprom register
254 */
255 eeprom_93cx6_startup(eeprom);
256
257 /*
258 * Select the read opcode and the byte to be read.
259 */
260 command = (PCI_EEPROM_READ_OPCODE << (eeprom->width + 1)) | byte;
261 eeprom_93cx6_write_bits(eeprom, command,
262 PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1);
263
264 /*
265 * Read the requested 8 bits.
266 */
267 eeprom_93cx6_read_bits(eeprom, &tmp, 8);
268 *data = tmp & 0xff;
269
270 /*
271 * Cleanup eeprom register.
272 */
273 eeprom_93cx6_cleanup(eeprom);
274}
275EXPORT_SYMBOL_GPL(eeprom_93cx6_readb);
276
277/**
278 * eeprom_93cx6_multireadb - Read multiple bytes from eeprom
279 * @eeprom: Pointer to eeprom structure
280 * @byte: Index from where we should start reading
281 * @data: target pointer where the information will have to be stored
282 * @words: Number of bytes that should be read.
283 *
284 * This function will read all requested bytes from the eeprom,
285 * this is done by calling eeprom_93cx6_readb() multiple times.
286 */
287void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, const u8 byte,
288 u8 *data, const u16 bytes)
289{
290 unsigned int i;
291
292 for (i = 0; i < bytes; i++)
293 eeprom_93cx6_readb(eeprom, byte + i, &data[i]);
294}
295EXPORT_SYMBOL_GPL(eeprom_93cx6_multireadb);
296
297/**
238 * eeprom_93cx6_wren - set the write enable state 298 * eeprom_93cx6_wren - set the write enable state
239 * @eeprom: Pointer to eeprom structure 299 * @eeprom: Pointer to eeprom structure
240 * @enable: true to enable writes, otherwise disable writes 300 * @enable: true to enable writes, otherwise disable writes
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 8004b071a9f2..01a73395a017 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -353,9 +353,11 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
353 adapter->ccw_device = ccw_device; 353 adapter->ccw_device = ccw_device;
354 354
355 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); 355 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
356 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); 356 INIT_DELAYED_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
357 INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update); 357 INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
358 358
359 adapter->next_port_scan = jiffies;
360
359 if (zfcp_qdio_setup(adapter)) 361 if (zfcp_qdio_setup(adapter))
360 goto failed; 362 goto failed;
361 363
@@ -420,7 +422,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
420{ 422{
421 struct ccw_device *cdev = adapter->ccw_device; 423 struct ccw_device *cdev = adapter->ccw_device;
422 424
423 cancel_work_sync(&adapter->scan_work); 425 cancel_delayed_work_sync(&adapter->scan_work);
424 cancel_work_sync(&adapter->stat_work); 426 cancel_work_sync(&adapter->stat_work);
425 cancel_work_sync(&adapter->ns_up_work); 427 cancel_work_sync(&adapter->ns_up_work);
426 zfcp_destroy_adapter_work_queue(adapter); 428 zfcp_destroy_adapter_work_queue(adapter);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index f9879d400d0e..54c7b48fdb46 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -56,8 +56,22 @@ static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
56 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 56 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
57 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 57 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
58 tag); 58 tag);
59
60 /*
61 * We want to scan ports here, with some random backoff and without
62 * rate limit. Recovery has already scheduled a port scan for us,
63 * but with both random delay and rate limit. Nevertheless we get
64 * what we want here by flushing the scheduled work after sleeping
65 * an equivalent random time.
66 * Let the port scan random delay elapse first. If recovery finishes
67 * up to that point in time, that would be perfect for both recovery
68 * and port scan. If not, i.e. recovery takes ages, there was no
69 * point in waiting a random delay on top of the time consumed by
70 * recovery.
71 */
72 msleep(zfcp_fc_port_scan_backoff());
59 zfcp_erp_wait(adapter); 73 zfcp_erp_wait(adapter);
60 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ 74 flush_delayed_work(&adapter->scan_work);
61 75
62 zfcp_ccw_adapter_put(adapter); 76 zfcp_ccw_adapter_put(adapter);
63 77
@@ -162,11 +176,19 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
162 adapter->req_no = 0; 176 adapter->req_no = 0;
163 177
164 zfcp_ccw_activate(cdev, 0, "ccsonl1"); 178 zfcp_ccw_activate(cdev, 0, "ccsonl1");
165 /* scan for remote ports 179
166 either at the end of any successful adapter recovery 180 /*
167 or only after the adapter recovery for setting a device online */ 181 * We want to scan ports here, always, with some random delay and
182 * without rate limit - basically what zfcp_ccw_activate() has
183 * achieved for us. Not quite! That port scan depended on
184 * !no_auto_port_rescan. So let's cover the no_auto_port_rescan
185 * case here to make sure a port scan is done unconditionally.
186 * Since zfcp_ccw_activate() has waited the desired random time,
187 * we can immediately schedule and flush a port scan for the
188 * remaining cases.
189 */
168 zfcp_fc_inverse_conditional_port_scan(adapter); 190 zfcp_fc_inverse_conditional_port_scan(adapter);
169 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ 191 flush_delayed_work(&adapter->scan_work);
170 zfcp_ccw_adapter_put(adapter); 192 zfcp_ccw_adapter_put(adapter);
171 return 0; 193 return 0;
172} 194}
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index d91173f326c5..b8e853e53546 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -186,12 +186,13 @@ struct zfcp_adapter {
186 struct fc_host_statistics *fc_stats; 186 struct fc_host_statistics *fc_stats;
187 struct fsf_qtcb_bottom_port *stats_reset_data; 187 struct fsf_qtcb_bottom_port *stats_reset_data;
188 unsigned long stats_reset; 188 unsigned long stats_reset;
189 struct work_struct scan_work; 189 struct delayed_work scan_work;
190 struct work_struct ns_up_work; 190 struct work_struct ns_up_work;
191 struct service_level service_level; 191 struct service_level service_level;
192 struct workqueue_struct *work_queue; 192 struct workqueue_struct *work_queue;
193 struct device_dma_parameters dma_parms; 193 struct device_dma_parameters dma_parms;
194 struct zfcp_fc_events events; 194 struct zfcp_fc_events events;
195 unsigned long next_port_scan;
195}; 196};
196 197
197struct zfcp_port { 198struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c82fe65c4128..2c5d4567d1da 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -821,11 +821,6 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
821 return ZFCP_ERP_CONTINUES; 821 return ZFCP_ERP_CONTINUES;
822} 822}
823 823
824static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
825{
826 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
827}
828
829static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) 824static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
830{ 825{
831 struct zfcp_port *port = erp_action->port; 826 struct zfcp_port *port = erp_action->port;
@@ -833,7 +828,6 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
833 828
834 switch (erp_action->step) { 829 switch (erp_action->step) {
835 case ZFCP_ERP_STEP_UNINITIALIZED: 830 case ZFCP_ERP_STEP_UNINITIALIZED:
836 zfcp_erp_port_strategy_clearstati(port);
837 if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) && 831 if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
838 (status & ZFCP_STATUS_COMMON_OPEN)) 832 (status & ZFCP_STATUS_COMMON_OPEN))
839 return zfcp_erp_port_forced_strategy_close(erp_action); 833 return zfcp_erp_port_forced_strategy_close(erp_action);
@@ -933,7 +927,6 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
933 927
934 switch (erp_action->step) { 928 switch (erp_action->step) {
935 case ZFCP_ERP_STEP_UNINITIALIZED: 929 case ZFCP_ERP_STEP_UNINITIALIZED:
936 zfcp_erp_port_strategy_clearstati(port);
937 if (p_status & ZFCP_STATUS_COMMON_OPEN) 930 if (p_status & ZFCP_STATUS_COMMON_OPEN)
938 return zfcp_erp_port_strategy_close(erp_action); 931 return zfcp_erp_port_strategy_close(erp_action);
939 break; 932 break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index a9c570a09b85..5b500652572b 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -85,6 +85,7 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
85extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 85extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
86extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); 86extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
87extern void zfcp_fc_sym_name_update(struct work_struct *); 87extern void zfcp_fc_sym_name_update(struct work_struct *);
88extern unsigned int zfcp_fc_port_scan_backoff(void);
88extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *); 89extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
89extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *); 90extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
90 91
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index ca28e1c66115..25d49f32ca63 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/random.h>
15#include <scsi/fc/fc_els.h> 16#include <scsi/fc/fc_els.h>
16#include <scsi/libfc.h> 17#include <scsi/libfc.h>
17#include "zfcp_ext.h" 18#include "zfcp_ext.h"
@@ -31,12 +32,54 @@ module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
31MODULE_PARM_DESC(no_auto_port_rescan, 32MODULE_PARM_DESC(no_auto_port_rescan,
32 "no automatic port_rescan (default off)"); 33 "no automatic port_rescan (default off)");
33 34
35static unsigned int port_scan_backoff = 500;
36module_param(port_scan_backoff, uint, 0600);
37MODULE_PARM_DESC(port_scan_backoff,
38 "upper limit of port scan random backoff in msecs (default 500)");
39
40static unsigned int port_scan_ratelimit = 60000;
41module_param(port_scan_ratelimit, uint, 0600);
42MODULE_PARM_DESC(port_scan_ratelimit,
43 "minimum interval between port scans in msecs (default 60000)");
44
45unsigned int zfcp_fc_port_scan_backoff(void)
46{
47 if (!port_scan_backoff)
48 return 0;
49 return get_random_int() % port_scan_backoff;
50}
51
52static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
53{
54 unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
55 unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());
56
57 adapter->next_port_scan = jiffies + interval + backoff;
58}
59
60static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
61{
62 unsigned long now = jiffies;
63 unsigned long next = adapter->next_port_scan;
64 unsigned long delay = 0, max;
65
66 /* delay only needed within waiting period */
67 if (time_before(now, next)) {
68 delay = next - now;
69 /* paranoia: never ever delay scans longer than specified */
70 max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
71 delay = min(delay, max);
72 }
73
74 queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
75}
76
34void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter) 77void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
35{ 78{
36 if (no_auto_port_rescan) 79 if (no_auto_port_rescan)
37 return; 80 return;
38 81
39 queue_work(adapter->work_queue, &adapter->scan_work); 82 zfcp_fc_port_scan(adapter);
40} 83}
41 84
42void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter) 85void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
@@ -44,7 +87,7 @@ void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
44 if (!no_auto_port_rescan) 87 if (!no_auto_port_rescan)
45 return; 88 return;
46 89
47 queue_work(adapter->work_queue, &adapter->scan_work); 90 zfcp_fc_port_scan(adapter);
48} 91}
49 92
50/** 93/**
@@ -680,12 +723,15 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
680 */ 723 */
681void zfcp_fc_scan_ports(struct work_struct *work) 724void zfcp_fc_scan_ports(struct work_struct *work)
682{ 725{
683 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, 726 struct delayed_work *dw = to_delayed_work(work);
727 struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
684 scan_work); 728 scan_work);
685 int ret, i; 729 int ret, i;
686 struct zfcp_fc_req *fc_req; 730 struct zfcp_fc_req *fc_req;
687 int chain, max_entries, buf_num, max_bytes; 731 int chain, max_entries, buf_num, max_bytes;
688 732
733 zfcp_fc_port_scan_time(adapter);
734
689 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; 735 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
690 buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1; 736 buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
691 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE; 737 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0fe8d5d95119..21ec5e2f584c 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1396,8 +1396,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1396 port->handle = header->port_handle; 1396 port->handle = header->port_handle;
1397 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | 1397 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1398 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1398 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1399 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1399 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1400 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1401 &port->status); 1400 &port->status);
1402 /* check whether D_ID has changed during open */ 1401 /* check whether D_ID has changed during open */
1403 /* 1402 /*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b5dfa51f396f..75f4bfc2b98a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -32,25 +32,6 @@ static bool allow_lun_scan = 1;
32module_param(allow_lun_scan, bool, 0600); 32module_param(allow_lun_scan, bool, 0600);
33MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); 33MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
34 34
35static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
36 int reason)
37{
38 switch (reason) {
39 case SCSI_QDEPTH_DEFAULT:
40 scsi_adjust_queue_depth(sdev, depth);
41 break;
42 case SCSI_QDEPTH_QFULL:
43 scsi_track_queue_full(sdev, depth);
44 break;
45 case SCSI_QDEPTH_RAMP_UP:
46 scsi_adjust_queue_depth(sdev, depth);
47 break;
48 default:
49 return -EOPNOTSUPP;
50 }
51 return sdev->queue_depth;
52}
53
54static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) 35static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
55{ 36{
56 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 37 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -66,7 +47,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
66static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 47static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
67{ 48{
68 if (sdp->tagged_supported) 49 if (sdp->tagged_supported)
69 scsi_adjust_queue_depth(sdp, default_depth); 50 scsi_change_queue_depth(sdp, default_depth);
70 return 0; 51 return 0;
71} 52}
72 53
@@ -305,7 +286,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
305 .slave_alloc = zfcp_scsi_slave_alloc, 286 .slave_alloc = zfcp_scsi_slave_alloc,
306 .slave_configure = zfcp_scsi_slave_configure, 287 .slave_configure = zfcp_scsi_slave_configure,
307 .slave_destroy = zfcp_scsi_slave_destroy, 288 .slave_destroy = zfcp_scsi_slave_destroy,
308 .change_queue_depth = zfcp_scsi_change_queue_depth, 289 .change_queue_depth = scsi_change_queue_depth,
309 .proc_name = "zfcp", 290 .proc_name = "zfcp",
310 .can_queue = 4096, 291 .can_queue = 4096,
311 .this_id = -1, 292 .this_id = -1,
@@ -320,6 +301,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
320 .use_clustering = 1, 301 .use_clustering = 1,
321 .shost_attrs = zfcp_sysfs_shost_attrs, 302 .shost_attrs = zfcp_sysfs_shost_attrs,
322 .sdev_attrs = zfcp_sysfs_sdev_attrs, 303 .sdev_attrs = zfcp_sysfs_sdev_attrs,
304 .track_queue_depth = 1,
323}; 305};
324 306
325/** 307/**
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 672b57219e11..96a0be13e841 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -73,9 +73,7 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
73ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n", 73ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
74 (atomic_read(&port->status) & 74 (atomic_read(&port->status) &
75 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 75 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
76ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", 76ZFCP_DEFINE_ATTR_CONST(port, access_denied, "%d\n", 0);
77 (atomic_read(&port->status) &
78 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
79 77
80ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", 78ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
81 zfcp_unit_sdev_status(unit)); 79 zfcp_unit_sdev_status(unit));
@@ -223,9 +221,13 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
223 if (!adapter) 221 if (!adapter)
224 return -ENODEV; 222 return -ENODEV;
225 223
226 /* sync the user-space- with the kernel-invocation of scan_work */ 224 /*
227 queue_work(adapter->work_queue, &adapter->scan_work); 225 * Users wish is our command: immediately schedule and flush a
228 flush_work(&adapter->scan_work); 226 * worker to conduct a synchronous port scan, that is, neither
227 * a random delay nor a rate limit is applied here.
228 */
229 queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
230 flush_delayed_work(&adapter->scan_work);
229 zfcp_ccw_adapter_put(adapter); 231 zfcp_ccw_adapter_put(adapter);
230 232
231 return (ssize_t) count; 233 return (ssize_t) count;
@@ -439,16 +441,15 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
439{ \ 441{ \
440 struct scsi_device *sdev = to_scsi_device(dev); \ 442 struct scsi_device *sdev = to_scsi_device(dev); \
441 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ 443 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
442 struct zfcp_port *port = zfcp_sdev->port; \
443 \ 444 \
444 return sprintf(buf, _format, _value); \ 445 return sprintf(buf, _format, _value); \
445} \ 446} \
446static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 447static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
447 448
448ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 449ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
449 dev_name(&port->adapter->ccw_device->dev)); 450 dev_name(&zfcp_sdev->port->adapter->ccw_device->dev));
450ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", 451ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
451 (unsigned long long) port->wwpn); 452 (unsigned long long) zfcp_sdev->port->wwpn);
452 453
453static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, 454static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
454 struct device_attribute *attr, 455 struct device_attribute *attr,
@@ -460,6 +461,49 @@ static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
460} 461}
461static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); 462static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
462 463
464ZFCP_DEFINE_SCSI_ATTR(zfcp_access_denied, "%d\n",
465 (atomic_read(&zfcp_sdev->status) &
466 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
467
468static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev,
469 struct device_attribute *attr,
470 char *buf)
471{
472 struct scsi_device *sdev = to_scsi_device(dev);
473 unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status);
474 unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
475
476 return sprintf(buf, "%d\n", failed);
477}
478
479static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev,
480 struct device_attribute *attr,
481 const char *buf, size_t count)
482{
483 struct scsi_device *sdev = to_scsi_device(dev);
484 unsigned long val;
485
486 if (kstrtoul(buf, 0, &val) || val != 0)
487 return -EINVAL;
488
489 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
490 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
491 "syufai3");
492 zfcp_erp_wait(sdev_to_zfcp(sdev)->port->adapter);
493
494 return count;
495}
496static DEVICE_ATTR(zfcp_failed, S_IWUSR | S_IRUGO,
497 zfcp_sysfs_scsi_zfcp_failed_show,
498 zfcp_sysfs_scsi_zfcp_failed_store);
499
500ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
501 (atomic_read(&zfcp_sdev->status) &
502 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
503
504ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
505 atomic_read(&zfcp_sdev->status));
506
463struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 507struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
464 &dev_attr_fcp_lun, 508 &dev_attr_fcp_lun,
465 &dev_attr_wwpn, 509 &dev_attr_wwpn,
@@ -467,6 +511,10 @@ struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
467 &dev_attr_read_latency, 511 &dev_attr_read_latency,
468 &dev_attr_write_latency, 512 &dev_attr_write_latency,
469 &dev_attr_cmd_latency, 513 &dev_attr_cmd_latency,
514 &dev_attr_zfcp_access_denied,
515 &dev_attr_zfcp_failed,
516 &dev_attr_zfcp_in_recovery,
517 &dev_attr_zfcp_status,
470 NULL 518 NULL
471}; 519};
472 520
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 02021f5ca866..cd4129ff7ae4 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -189,19 +189,6 @@ static ssize_t twa_show_stats(struct device *dev,
189 return len; 189 return len;
190} /* End twa_show_stats() */ 190} /* End twa_show_stats() */
191 191
192/* This function will set a devices queue depth */
193static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
194 int reason)
195{
196 if (reason != SCSI_QDEPTH_DEFAULT)
197 return -EOPNOTSUPP;
198
199 if (queue_depth > TW_Q_LENGTH-2)
200 queue_depth = TW_Q_LENGTH-2;
201 scsi_adjust_queue_depth(sdev, queue_depth);
202 return queue_depth;
203} /* End twa_change_queue_depth() */
204
205/* Create sysfs 'stats' entry */ 192/* Create sysfs 'stats' entry */
206static struct device_attribute twa_host_stats_attr = { 193static struct device_attribute twa_host_stats_attr = {
207 .attr = { 194 .attr = {
@@ -2016,7 +2003,7 @@ static struct scsi_host_template driver_template = {
2016 .queuecommand = twa_scsi_queue, 2003 .queuecommand = twa_scsi_queue,
2017 .eh_host_reset_handler = twa_scsi_eh_reset, 2004 .eh_host_reset_handler = twa_scsi_eh_reset,
2018 .bios_param = twa_scsi_biosparam, 2005 .bios_param = twa_scsi_biosparam,
2019 .change_queue_depth = twa_change_queue_depth, 2006 .change_queue_depth = scsi_change_queue_depth,
2020 .can_queue = TW_Q_LENGTH-2, 2007 .can_queue = TW_Q_LENGTH-2,
2021 .slave_configure = twa_slave_configure, 2008 .slave_configure = twa_slave_configure,
2022 .this_id = -1, 2009 .this_id = -1,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index ac0c2544a470..2361772d5909 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -191,19 +191,6 @@ static ssize_t twl_show_stats(struct device *dev,
191 return len; 191 return len;
192} /* End twl_show_stats() */ 192} /* End twl_show_stats() */
193 193
194/* This function will set a devices queue depth */
195static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
196 int reason)
197{
198 if (reason != SCSI_QDEPTH_DEFAULT)
199 return -EOPNOTSUPP;
200
201 if (queue_depth > TW_Q_LENGTH-2)
202 queue_depth = TW_Q_LENGTH-2;
203 scsi_adjust_queue_depth(sdev, queue_depth);
204 return queue_depth;
205} /* End twl_change_queue_depth() */
206
207/* stats sysfs attribute initializer */ 194/* stats sysfs attribute initializer */
208static struct device_attribute twl_host_stats_attr = { 195static struct device_attribute twl_host_stats_attr = {
209 .attr = { 196 .attr = {
@@ -1590,7 +1577,7 @@ static struct scsi_host_template driver_template = {
1590 .queuecommand = twl_scsi_queue, 1577 .queuecommand = twl_scsi_queue,
1591 .eh_host_reset_handler = twl_scsi_eh_reset, 1578 .eh_host_reset_handler = twl_scsi_eh_reset,
1592 .bios_param = twl_scsi_biosparam, 1579 .bios_param = twl_scsi_biosparam,
1593 .change_queue_depth = twl_change_queue_depth, 1580 .change_queue_depth = scsi_change_queue_depth,
1594 .can_queue = TW_Q_LENGTH-2, 1581 .can_queue = TW_Q_LENGTH-2,
1595 .slave_configure = twl_slave_configure, 1582 .slave_configure = twl_slave_configure,
1596 .this_id = -1, 1583 .this_id = -1,
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 1ec9ad92b6c3..c75f2048319f 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -523,19 +523,6 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
523 return len; 523 return len;
524} /* End tw_show_stats() */ 524} /* End tw_show_stats() */
525 525
526/* This function will set a devices queue depth */
527static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth,
528 int reason)
529{
530 if (reason != SCSI_QDEPTH_DEFAULT)
531 return -EOPNOTSUPP;
532
533 if (queue_depth > TW_Q_LENGTH-2)
534 queue_depth = TW_Q_LENGTH-2;
535 scsi_adjust_queue_depth(sdev, queue_depth);
536 return queue_depth;
537} /* End tw_change_queue_depth() */
538
539/* Create sysfs 'stats' entry */ 526/* Create sysfs 'stats' entry */
540static struct device_attribute tw_host_stats_attr = { 527static struct device_attribute tw_host_stats_attr = {
541 .attr = { 528 .attr = {
@@ -2270,7 +2257,7 @@ static struct scsi_host_template driver_template = {
2270 .queuecommand = tw_scsi_queue, 2257 .queuecommand = tw_scsi_queue,
2271 .eh_host_reset_handler = tw_scsi_eh_reset, 2258 .eh_host_reset_handler = tw_scsi_eh_reset,
2272 .bios_param = tw_scsi_biosparam, 2259 .bios_param = tw_scsi_biosparam,
2273 .change_queue_depth = tw_change_queue_depth, 2260 .change_queue_depth = scsi_change_queue_depth,
2274 .can_queue = TW_Q_LENGTH-2, 2261 .can_queue = TW_Q_LENGTH-2,
2275 .slave_configure = tw_slave_configure, 2262 .slave_configure = tw_slave_configure,
2276 .this_id = -1, 2263 .this_id = -1,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index d7557b932113..aa915da2a5e5 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -175,7 +175,7 @@ STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); 175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason); 178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); 179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180 180
181STATIC struct device_attribute *NCR_700_dev_attrs[]; 181STATIC struct device_attribute *NCR_700_dev_attrs[];
@@ -904,7 +904,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
904 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 904 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
905 905
906 SCp->device->tagged_supported = 0; 906 SCp->device->tagged_supported = 0;
907 scsi_adjust_queue_depth(SCp->device, host->cmd_per_lun); 907 scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
908 scsi_set_tag_type(SCp->device, 0); 908 scsi_set_tag_type(SCp->device, 0);
909 } else { 909 } else {
910 shost_printk(KERN_WARNING, host, 910 shost_printk(KERN_WARNING, host,
@@ -2052,7 +2052,7 @@ NCR_700_slave_configure(struct scsi_device *SDp)
2052 2052
2053 /* to do here: allocate memory; build a queue_full list */ 2053 /* to do here: allocate memory; build a queue_full list */
2054 if(SDp->tagged_supported) { 2054 if(SDp->tagged_supported) {
2055 scsi_adjust_queue_depth(SDp, NCR_700_DEFAULT_TAGS); 2055 scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2056 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); 2056 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2057 } 2057 }
2058 2058
@@ -2075,16 +2075,11 @@ NCR_700_slave_destroy(struct scsi_device *SDp)
2075} 2075}
2076 2076
2077static int 2077static int
2078NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason) 2078NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2079{ 2079{
2080 if (reason != SCSI_QDEPTH_DEFAULT)
2081 return -EOPNOTSUPP;
2082
2083 if (depth > NCR_700_MAX_TAGS) 2080 if (depth > NCR_700_MAX_TAGS)
2084 depth = NCR_700_MAX_TAGS; 2081 depth = NCR_700_MAX_TAGS;
2085 2082 return scsi_change_queue_depth(SDp, depth);
2086 scsi_adjust_queue_depth(SDp, depth);
2087 return depth;
2088} 2083}
2089 2084
2090static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type) 2085static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
@@ -2105,12 +2100,12 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2105 if (!tag_type) { 2100 if (!tag_type) {
2106 /* shift back to the default unqueued number of commands 2101 /* shift back to the default unqueued number of commands
2107 * (the user can still raise this) */ 2102 * (the user can still raise this) */
2108 scsi_adjust_queue_depth(SDp, SDp->host->cmd_per_lun); 2103 scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun);
2109 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp)); 2104 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2110 } else { 2105 } else {
2111 /* Here, we cleared the negotiation flag above, so this 2106 /* Here, we cleared the negotiation flag above, so this
2112 * will force the driver to renegotiate */ 2107 * will force the driver to renegotiate */
2113 scsi_adjust_queue_depth(SDp, SDp->queue_depth); 2108 scsi_change_queue_depth(SDp, SDp->queue_depth);
2114 if (change_tag) 2109 if (change_tag)
2115 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); 2110 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2116 } 2111 }
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 5aa476b6b8a8..8d66a6469e29 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2327,12 +2327,12 @@ static int blogic_slaveconfig(struct scsi_device *dev)
2327 if (qdepth == 0) 2327 if (qdepth == 0)
2328 qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH; 2328 qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH;
2329 adapter->qdepth[tgt_id] = qdepth; 2329 adapter->qdepth[tgt_id] = qdepth;
2330 scsi_adjust_queue_depth(dev, qdepth); 2330 scsi_change_queue_depth(dev, qdepth);
2331 } else { 2331 } else {
2332 adapter->tagq_ok &= ~(1 << tgt_id); 2332 adapter->tagq_ok &= ~(1 << tgt_id);
2333 qdepth = adapter->untag_qdepth; 2333 qdepth = adapter->untag_qdepth;
2334 adapter->qdepth[tgt_id] = qdepth; 2334 adapter->qdepth[tgt_id] = qdepth;
2335 scsi_adjust_queue_depth(dev, qdepth); 2335 scsi_change_queue_depth(dev, qdepth);
2336 } 2336 }
2337 qdepth = 0; 2337 qdepth = 0;
2338 for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) 2338 for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3a820f61ce65..86cf3d671eb9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1341,13 +1341,15 @@ config SCSI_DC395x
1341 To compile this driver as a module, choose M here: the 1341 To compile this driver as a module, choose M here: the
1342 module will be called dc395x. 1342 module will be called dc395x.
1343 1343
1344config SCSI_DC390T 1344config SCSI_AM53C974
1345 tristate "Tekram DC390(T) and Am53/79C974 SCSI support" 1345 tristate "Tekram DC390(T) and Am53/79C974 SCSI support (new driver)"
1346 depends on PCI && SCSI 1346 depends on PCI && SCSI
1347 select SCSI_SPI_ATTRS
1347 ---help--- 1348 ---help---
1348 This driver supports PCI SCSI host adapters based on the Am53C974A 1349 This driver supports PCI SCSI host adapters based on the Am53C974A
1349 chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard 1350 chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard
1350 PCscsi/PCnet (Am53/79C974) solutions. 1351 PCscsi/PCnet (Am53/79C974) solutions.
1352 This is a new implementation base on the generic esp_scsi driver.
1351 1353
1352 Documentation can be found in <file:Documentation/scsi/tmscsim.txt>. 1354 Documentation can be found in <file:Documentation/scsi/tmscsim.txt>.
1353 1355
@@ -1355,7 +1357,7 @@ config SCSI_DC390T
1355 based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those. 1357 based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those.
1356 1358
1357 To compile this driver as a module, choose M here: the 1359 To compile this driver as a module, choose M here: the
1358 module will be called tmscsim. 1360 module will be called am53c974.
1359 1361
1360config SCSI_T128 1362config SCSI_T128
1361 tristate "Trantor T128/T128F/T228 SCSI support" 1363 tristate "Trantor T128/T128F/T228 SCSI support"
@@ -1451,6 +1453,14 @@ config SCSI_NSP32
1451 To compile this driver as a module, choose M here: the 1453 To compile this driver as a module, choose M here: the
1452 module will be called nsp32. 1454 module will be called nsp32.
1453 1455
1456config SCSI_WD719X
1457 tristate "Western Digital WD7193/7197/7296 support"
1458 depends on PCI && SCSI
1459 select EEPROM_93CX6
1460 ---help---
1461 This is a driver for Western Digital WD7193, WD7197 and WD7296 PCI
1462 SCSI controllers (based on WD33C296A chip).
1463
1454config SCSI_DEBUG 1464config SCSI_DEBUG
1455 tristate "SCSI debugging host simulator" 1465 tristate "SCSI debugging host simulator"
1456 depends on SCSI 1466 depends on SCSI
@@ -1615,7 +1625,7 @@ config ATARI_SCSI_RESET_BOOT
1615 that leave the devices with SCSI operations partway completed. 1625 that leave the devices with SCSI operations partway completed.
1616 1626
1617config MAC_SCSI 1627config MAC_SCSI
1618 bool "Macintosh NCR5380 SCSI" 1628 tristate "Macintosh NCR5380 SCSI"
1619 depends on MAC && SCSI=y 1629 depends on MAC && SCSI=y
1620 select SCSI_SPI_ATTRS 1630 select SCSI_SPI_ATTRS
1621 help 1631 help
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 59f1ce6df2d6..58158f11ed7b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -100,7 +100,7 @@ obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
100obj-$(CONFIG_SCSI_7000FASST) += wd7000.o 100obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
101obj-$(CONFIG_SCSI_EATA) += eata.o 101obj-$(CONFIG_SCSI_EATA) += eata.o
102obj-$(CONFIG_SCSI_DC395x) += dc395x.o 102obj-$(CONFIG_SCSI_DC395x) += dc395x.o
103obj-$(CONFIG_SCSI_DC390T) += tmscsim.o 103obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
104obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o 104obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
105obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 105obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
106obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 106obj-$(CONFIG_MEGARAID_SAS) += megaraid/
@@ -143,6 +143,7 @@ obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
143obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o 143obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
144obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o 144obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o
145obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o 145obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
146obj-$(CONFIG_SCSI_WD719X) += wd719x.o
146 147
147obj-$(CONFIG_ARM) += arm/ 148obj-$(CONFIG_ARM) += arm/
148 149
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 296c6f53605a..36244d63def2 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -11,8 +11,6 @@
11 * drew@colorado.edu 11 * drew@colorado.edu
12 * +1 (303) 666-5836 12 * +1 (303) 666-5836
13 * 13 *
14 * DISTRIBUTION RELEASE 6.
15 *
16 * For more information, please consult 14 * For more information, please consult
17 * 15 *
18 * NCR 5380 Family 16 * NCR 5380 Family
@@ -279,7 +277,7 @@ static void do_reset(struct Scsi_Host *host);
279 * Set up the internal fields in the SCSI command. 277 * Set up the internal fields in the SCSI command.
280 */ 278 */
281 279
282static __inline__ void initialize_SCp(Scsi_Cmnd * cmd) 280static inline void initialize_SCp(struct scsi_cmnd *cmd)
283{ 281{
284 /* 282 /*
285 * Initialize the Scsi Pointer field so that all of the commands in the 283 * Initialize the Scsi Pointer field so that all of the commands in the
@@ -574,12 +572,12 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
574 int trying_irqs, i, mask; 572 int trying_irqs, i, mask;
575 NCR5380_setup(instance); 573 NCR5380_setup(instance);
576 574
577 for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1) 575 for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
578 if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0)) 576 if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
579 trying_irqs |= mask; 577 trying_irqs |= mask;
580 578
581 timeout = jiffies + (250 * HZ / 1000); 579 timeout = jiffies + (250 * HZ / 1000);
582 probe_irq = SCSI_IRQ_NONE; 580 probe_irq = NO_IRQ;
583 581
584 /* 582 /*
585 * A interrupt is triggered whenever BSY = false, SEL = true 583 * A interrupt is triggered whenever BSY = false, SEL = true
@@ -596,13 +594,13 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
596 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); 594 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
597 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); 595 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
598 596
599 while (probe_irq == SCSI_IRQ_NONE && time_before(jiffies, timeout)) 597 while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
600 schedule_timeout_uninterruptible(1); 598 schedule_timeout_uninterruptible(1);
601 599
602 NCR5380_write(SELECT_ENABLE_REG, 0); 600 NCR5380_write(SELECT_ENABLE_REG, 0);
603 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 601 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
604 602
605 for (i = 0, mask = 1; i < 16; ++i, mask <<= 1) 603 for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
606 if (trying_irqs & mask) 604 if (trying_irqs & mask)
607 free_irq(i, NULL); 605 free_irq(i, NULL);
608 606
@@ -610,50 +608,70 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
610} 608}
611 609
612/** 610/**
613 * NCR58380_print_options - show options 611 * NCR58380_info - report driver and host information
614 * @instance: unused for now 612 * @instance: relevant scsi host instance
615 * 613 *
616 * Called by probe code indicating the NCR5380 driver options that 614 * For use as the host template info() handler.
617 * were selected. At some point this will switch to runtime options
618 * read from the adapter in question
619 * 615 *
620 * Locks: none 616 * Locks: none
621 */ 617 */
622 618
623static void __init __maybe_unused 619static const char *NCR5380_info(struct Scsi_Host *instance)
624NCR5380_print_options(struct Scsi_Host *instance)
625{ 620{
626 printk(" generic options" 621 struct NCR5380_hostdata *hostdata = shost_priv(instance);
627#ifdef AUTOPROBE_IRQ 622
628 " AUTOPROBE_IRQ" 623 return hostdata->info;
624}
625
626static void prepare_info(struct Scsi_Host *instance)
627{
628 struct NCR5380_hostdata *hostdata = shost_priv(instance);
629
630 snprintf(hostdata->info, sizeof(hostdata->info),
631 "%s, io_port 0x%lx, n_io_port %d, "
632 "base 0x%lx, irq %d, "
633 "can_queue %d, cmd_per_lun %d, "
634 "sg_tablesize %d, this_id %d, "
635 "flags { %s%s%s}, "
636#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
637 "USLEEP_POLL %d, USLEEP_WAITLONG %d, "
638#endif
639 "options { %s} ",
640 instance->hostt->name, instance->io_port, instance->n_io_port,
641 instance->base, instance->irq,
642 instance->can_queue, instance->cmd_per_lun,
643 instance->sg_tablesize, instance->this_id,
644 hostdata->flags & FLAG_NCR53C400 ? "NCR53C400 " : "",
645 hostdata->flags & FLAG_DTC3181E ? "DTC3181E " : "",
646 hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
647#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
648 USLEEP_POLL, USLEEP_WAITLONG,
629#endif 649#endif
630#ifdef AUTOSENSE 650#ifdef AUTOPROBE_IRQ
631 " AUTOSENSE" 651 "AUTOPROBE_IRQ "
632#endif 652#endif
633#ifdef DIFFERENTIAL 653#ifdef DIFFERENTIAL
634 " DIFFERENTIAL" 654 "DIFFERENTIAL "
635#endif 655#endif
636#ifdef REAL_DMA 656#ifdef REAL_DMA
637 " REAL DMA" 657 "REAL_DMA "
638#endif 658#endif
639#ifdef REAL_DMA_POLL 659#ifdef REAL_DMA_POLL
640 " REAL DMA POLL" 660 "REAL_DMA_POLL "
641#endif 661#endif
642#ifdef PARITY 662#ifdef PARITY
643 " PARITY" 663 "PARITY "
644#endif 664#endif
645#ifdef PSEUDO_DMA 665#ifdef PSEUDO_DMA
646 " PSEUDO DMA" 666 "PSEUDO_DMA "
647#endif 667#endif
648#ifdef UNSAFE 668#ifdef UNSAFE
649 " UNSAFE " 669 "UNSAFE "
650#endif 670#endif
651 ); 671#ifdef NCR53C400
652 printk(" USLEEP, USLEEP_POLL=%d USLEEP_SLEEP=%d", USLEEP_POLL, USLEEP_SLEEP); 672 "NCR53C400 "
653 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); 673#endif
654 if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) { 674 "");
655 printk(" ncr53c400 release=%d", NCR53C400_PUBLIC_RELEASE);
656 }
657} 675}
658 676
659/** 677/**
@@ -672,6 +690,7 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
672 NCR5380_dprint_phase(NDEBUG_ANY, instance); 690 NCR5380_dprint_phase(NDEBUG_ANY, instance);
673} 691}
674 692
693#ifdef PSEUDO_DMA
675/******************************************/ 694/******************************************/
676/* 695/*
677 * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] 696 * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
@@ -689,19 +708,18 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
689static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, 708static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
690 char *buffer, int length) 709 char *buffer, int length)
691{ 710{
692#ifdef DTC_PUBLIC_RELEASE 711 struct NCR5380_hostdata *hostdata = shost_priv(instance);
693 dtc_wmaxi = dtc_maxi = 0; 712
694#endif 713 hostdata->spin_max_r = 0;
695#ifdef PAS16_PUBLIC_RELEASE 714 hostdata->spin_max_w = 0;
696 pas_wmaxi = pas_maxi = 0; 715 return 0;
697#endif
698 return (-ENOSYS); /* Currently this is a no-op */
699} 716}
717#endif
700 718
701#undef SPRINTF 719#undef SPRINTF
702#define SPRINTF(args...) seq_printf(m, ## args) 720#define SPRINTF(args...) seq_printf(m, ## args)
703static 721static
704void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m); 722void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m);
705static 723static
706void lprint_command(unsigned char *cmd, struct seq_file *m); 724void lprint_command(unsigned char *cmd, struct seq_file *m);
707static 725static
@@ -711,56 +729,31 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m,
711 struct Scsi_Host *instance) 729 struct Scsi_Host *instance)
712{ 730{
713 struct NCR5380_hostdata *hostdata; 731 struct NCR5380_hostdata *hostdata;
714 Scsi_Cmnd *ptr; 732 struct scsi_cmnd *ptr;
715 733
716 hostdata = (struct NCR5380_hostdata *) instance->hostdata; 734 hostdata = (struct NCR5380_hostdata *) instance->hostdata;
717 735
718 SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE); 736#ifdef PSEUDO_DMA
719 if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) 737 SPRINTF("Highwater I/O busy spin counts: write %d, read %d\n",
720 SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE); 738 hostdata->spin_max_w, hostdata->spin_max_r);
721#ifdef DTC_PUBLIC_RELEASE
722 SPRINTF("DTC 3180/3280 release %d", DTC_PUBLIC_RELEASE);
723#endif
724#ifdef T128_PUBLIC_RELEASE
725 SPRINTF("T128 release %d", T128_PUBLIC_RELEASE);
726#endif
727#ifdef GENERIC_NCR5380_PUBLIC_RELEASE
728 SPRINTF("Generic5380 release %d", GENERIC_NCR5380_PUBLIC_RELEASE);
729#endif
730#ifdef PAS16_PUBLIC_RELEASE
731 SPRINTF("PAS16 release=%d", PAS16_PUBLIC_RELEASE);
732#endif
733
734 SPRINTF("\nBase Addr: 0x%05lX ", (long) instance->base);
735 SPRINTF("io_port: %04x ", (int) instance->io_port);
736 if (instance->irq == SCSI_IRQ_NONE)
737 SPRINTF("IRQ: None.\n");
738 else
739 SPRINTF("IRQ: %d.\n", instance->irq);
740
741#ifdef DTC_PUBLIC_RELEASE
742 SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", dtc_wmaxi, dtc_maxi);
743#endif
744#ifdef PAS16_PUBLIC_RELEASE
745 SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", pas_wmaxi, pas_maxi);
746#endif 739#endif
747 spin_lock_irq(instance->host_lock); 740 spin_lock_irq(instance->host_lock);
748 if (!hostdata->connected) 741 if (!hostdata->connected)
749 SPRINTF("scsi%d: no currently connected command\n", instance->host_no); 742 SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
750 else 743 else
751 lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); 744 lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
752 SPRINTF("scsi%d: issue_queue\n", instance->host_no); 745 SPRINTF("scsi%d: issue_queue\n", instance->host_no);
753 for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) 746 for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
754 lprint_Scsi_Cmnd(ptr, m); 747 lprint_Scsi_Cmnd(ptr, m);
755 748
756 SPRINTF("scsi%d: disconnected_queue\n", instance->host_no); 749 SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
757 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) 750 for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
758 lprint_Scsi_Cmnd(ptr, m); 751 lprint_Scsi_Cmnd(ptr, m);
759 spin_unlock_irq(instance->host_lock); 752 spin_unlock_irq(instance->host_lock);
760 return 0; 753 return 0;
761} 754}
762 755
763static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m) 756static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
764{ 757{
765 SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); 758 SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
766 SPRINTF(" command = "); 759 SPRINTF(" command = ");
@@ -836,18 +829,6 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
836 829
837 INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); 830 INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
838 831
839#ifdef NCR5380_STATS
840 for (i = 0; i < 8; ++i) {
841 hostdata->time_read[i] = 0;
842 hostdata->time_write[i] = 0;
843 hostdata->bytes_read[i] = 0;
844 hostdata->bytes_write[i] = 0;
845 }
846 hostdata->timebase = 0;
847 hostdata->pendingw = 0;
848 hostdata->pendingr = 0;
849#endif
850
851 /* The CHECK code seems to break the 53C400. Will check it later maybe */ 832 /* The CHECK code seems to break the 53C400. Will check it later maybe */
852 if (flags & FLAG_NCR53C400) 833 if (flags & FLAG_NCR53C400)
853 hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags; 834 hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags;
@@ -857,11 +838,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
857 hostdata->host = instance; 838 hostdata->host = instance;
858 hostdata->time_expires = 0; 839 hostdata->time_expires = 0;
859 840
860#ifndef AUTOSENSE 841 prepare_info(instance);
861 if ((instance->cmd_per_lun > 1) || instance->can_queue > 1)
862 printk(KERN_WARNING "scsi%d : WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n"
863 " be incorrectly cleared.\n", instance->host_no);
864#endif /* def AUTOSENSE */
865 842
866 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 843 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
867 NCR5380_write(MODE_REG, MR_BASE); 844 NCR5380_write(MODE_REG, MR_BASE);
@@ -935,11 +912,11 @@ static void NCR5380_exit(struct Scsi_Host *instance)
935 * Locks: host lock taken by caller 912 * Locks: host lock taken by caller
936 */ 913 */
937 914
938static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) 915static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *))
939{ 916{
940 struct Scsi_Host *instance = cmd->device->host; 917 struct Scsi_Host *instance = cmd->device->host;
941 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 918 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
942 Scsi_Cmnd *tmp; 919 struct scsi_cmnd *tmp;
943 920
944#if (NDEBUG & NDEBUG_NO_WRITE) 921#if (NDEBUG & NDEBUG_NO_WRITE)
945 switch (cmd->cmnd[0]) { 922 switch (cmd->cmnd[0]) {
@@ -952,25 +929,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
952 } 929 }
953#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ 930#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
954 931
955#ifdef NCR5380_STATS
956 switch (cmd->cmnd[0]) {
957 case WRITE:
958 case WRITE_6:
959 case WRITE_10:
960 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
961 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
962 hostdata->pendingw++;
963 break;
964 case READ:
965 case READ_6:
966 case READ_10:
967 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
968 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
969 hostdata->pendingr++;
970 break;
971 }
972#endif
973
974 /* 932 /*
975 * We use the host_scribble field as a pointer to the next command 933 * We use the host_scribble field as a pointer to the next command
976 * in a queue 934 * in a queue
@@ -992,7 +950,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
992 cmd->host_scribble = (unsigned char *) hostdata->issue_queue; 950 cmd->host_scribble = (unsigned char *) hostdata->issue_queue;
993 hostdata->issue_queue = cmd; 951 hostdata->issue_queue = cmd;
994 } else { 952 } else {
995 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble); 953 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (struct scsi_cmnd *) tmp->host_scribble);
996 LIST(cmd, tmp); 954 LIST(cmd, tmp);
997 tmp->host_scribble = (unsigned char *) cmd; 955 tmp->host_scribble = (unsigned char *) cmd;
998 } 956 }
@@ -1023,7 +981,7 @@ static void NCR5380_main(struct work_struct *work)
1023 struct NCR5380_hostdata *hostdata = 981 struct NCR5380_hostdata *hostdata =
1024 container_of(work, struct NCR5380_hostdata, coroutine.work); 982 container_of(work, struct NCR5380_hostdata, coroutine.work);
1025 struct Scsi_Host *instance = hostdata->host; 983 struct Scsi_Host *instance = hostdata->host;
1026 Scsi_Cmnd *tmp, *prev; 984 struct scsi_cmnd *tmp, *prev;
1027 int done; 985 int done;
1028 986
1029 spin_lock_irq(instance->host_lock); 987 spin_lock_irq(instance->host_lock);
@@ -1036,7 +994,7 @@ static void NCR5380_main(struct work_struct *work)
1036 * Search through the issue_queue for a command destined 994 * Search through the issue_queue for a command destined
1037 * for a target that's not busy. 995 * for a target that's not busy.
1038 */ 996 */
1039 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) 997 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble)
1040 { 998 {
1041 if (prev != tmp) 999 if (prev != tmp)
1042 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); 1000 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
@@ -1048,7 +1006,7 @@ static void NCR5380_main(struct work_struct *work)
1048 prev->host_scribble = tmp->host_scribble; 1006 prev->host_scribble = tmp->host_scribble;
1049 } else { 1007 } else {
1050 REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble); 1008 REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble);
1051 hostdata->issue_queue = (Scsi_Cmnd *) tmp->host_scribble; 1009 hostdata->issue_queue = (struct scsi_cmnd *) tmp->host_scribble;
1052 } 1010 }
1053 tmp->host_scribble = NULL; 1011 tmp->host_scribble = NULL;
1054 1012
@@ -1073,14 +1031,14 @@ static void NCR5380_main(struct work_struct *work)
1073 hostdata->selecting = NULL; 1031 hostdata->selecting = NULL;
1074 /* RvC: have to preset this to indicate a new command is being performed */ 1032 /* RvC: have to preset this to indicate a new command is being performed */
1075 1033
1076 if (!NCR5380_select(instance, tmp, 1034 /*
1077 /* 1035 * REQUEST SENSE commands are issued without tagged
1078 * REQUEST SENSE commands are issued without tagged 1036 * queueing, even on SCSI-II devices because the
1079 * queueing, even on SCSI-II devices because the 1037 * contingent allegiance condition exists for the
1080 * contingent allegiance condition exists for the 1038 * entire unit.
1081 * entire unit. 1039 */
1082 */ 1040
1083 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { 1041 if (!NCR5380_select(instance, tmp)) {
1084 break; 1042 break;
1085 } else { 1043 } else {
1086 LIST(tmp, hostdata->issue_queue); 1044 LIST(tmp, hostdata->issue_queue);
@@ -1095,9 +1053,9 @@ static void NCR5380_main(struct work_struct *work)
1095 /* exited locked */ 1053 /* exited locked */
1096 } /* if (!hostdata->connected) */ 1054 } /* if (!hostdata->connected) */
1097 if (hostdata->selecting) { 1055 if (hostdata->selecting) {
1098 tmp = (Scsi_Cmnd *) hostdata->selecting; 1056 tmp = (struct scsi_cmnd *) hostdata->selecting;
1099 /* Selection will drop and retake the lock */ 1057 /* Selection will drop and retake the lock */
1100 if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { 1058 if (!NCR5380_select(instance, tmp)) {
1101 /* Ok ?? */ 1059 /* Ok ?? */
1102 } else { 1060 } else {
1103 /* RvC: device failed, so we wait a long time 1061 /* RvC: device failed, so we wait a long time
@@ -1216,47 +1174,16 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
1216 1174
1217#endif 1175#endif
1218 1176
1219/**
1220 * collect_stats - collect stats on a scsi command
1221 * @hostdata: adapter
1222 * @cmd: command being issued
1223 *
1224 * Update the statistical data by parsing the command in question
1225 */
1226
1227static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd)
1228{
1229#ifdef NCR5380_STATS
1230 switch (cmd->cmnd[0]) {
1231 case WRITE:
1232 case WRITE_6:
1233 case WRITE_10:
1234 hostdata->time_write[scmd_id(cmd)] += (jiffies - hostdata->timebase);
1235 hostdata->pendingw--;
1236 break;
1237 case READ:
1238 case READ_6:
1239 case READ_10:
1240 hostdata->time_read[scmd_id(cmd)] += (jiffies - hostdata->timebase);
1241 hostdata->pendingr--;
1242 break;
1243 }
1244#endif
1245}
1246
1247
1248/* 1177/*
1249 * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, 1178 * Function : int NCR5380_select(struct Scsi_Host *instance,
1250 * int tag); 1179 * struct scsi_cmnd *cmd)
1251 * 1180 *
1252 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, 1181 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
1253 * including ARBITRATION, SELECTION, and initial message out for 1182 * including ARBITRATION, SELECTION, and initial message out for
1254 * IDENTIFY and queue messages. 1183 * IDENTIFY and queue messages.
1255 * 1184 *
1256 * Inputs : instance - instantiation of the 5380 driver on which this 1185 * Inputs : instance - instantiation of the 5380 driver on which this
1257 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for 1186 * target lives, cmd - SCSI command to execute.
1258 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
1259 * the command that is presently connected.
1260 * 1187 *
1261 * Returns : -1 if selection could not execute for some reason, 1188 * Returns : -1 if selection could not execute for some reason,
1262 * 0 if selection succeeded or failed because the target 1189 * 0 if selection succeeded or failed because the target
@@ -1278,7 +1205,7 @@ static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd)
1278 * Locks: caller holds hostdata lock in IRQ mode 1205 * Locks: caller holds hostdata lock in IRQ mode
1279 */ 1206 */
1280 1207
1281static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) 1208static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
1282{ 1209{
1283 NCR5380_local_declare(); 1210 NCR5380_local_declare();
1284 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 1211 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -1476,7 +1403,6 @@ part2:
1476 return -1; 1403 return -1;
1477 } 1404 }
1478 cmd->result = DID_BAD_TARGET << 16; 1405 cmd->result = DID_BAD_TARGET << 16;
1479 collect_stats(hostdata, cmd);
1480 cmd->scsi_done(cmd); 1406 cmd->scsi_done(cmd);
1481 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1407 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1482 dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no); 1408 dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);
@@ -1513,7 +1439,7 @@ part2:
1513 } 1439 }
1514 1440
1515 dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id); 1441 dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);
1516 tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); 1442 tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
1517 1443
1518 len = 1; 1444 len = 1;
1519 cmd->tag = 0; 1445 cmd->tag = 0;
@@ -2086,7 +2012,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2086#endif 2012#endif
2087 unsigned char *data; 2013 unsigned char *data;
2088 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; 2014 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
2089 Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; 2015 struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
2090 /* RvC: we need to set the end of the polling time */ 2016 /* RvC: we need to set the end of the polling time */
2091 unsigned long poll_time = jiffies + USLEEP_POLL; 2017 unsigned long poll_time = jiffies + USLEEP_POLL;
2092 2018
@@ -2228,7 +2154,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2228 cmd->next_link->tag = cmd->tag; 2154 cmd->next_link->tag = cmd->tag;
2229 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 2155 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2230 dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); 2156 dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
2231 collect_stats(hostdata, cmd);
2232 cmd->scsi_done(cmd); 2157 cmd->scsi_done(cmd);
2233 cmd = hostdata->connected; 2158 cmd = hostdata->connected;
2234 break; 2159 break;
@@ -2263,7 +2188,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2263 else if (status_byte(cmd->SCp.Status) != GOOD) 2188 else if (status_byte(cmd->SCp.Status) != GOOD)
2264 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); 2189 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
2265 2190
2266#ifdef AUTOSENSE
2267 if ((cmd->cmnd[0] == REQUEST_SENSE) && 2191 if ((cmd->cmnd[0] == REQUEST_SENSE) &&
2268 hostdata->ses.cmd_len) { 2192 hostdata->ses.cmd_len) {
2269 scsi_eh_restore_cmnd(cmd, &hostdata->ses); 2193 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
@@ -2278,12 +2202,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2278 LIST(cmd, hostdata->issue_queue); 2202 LIST(cmd, hostdata->issue_queue);
2279 cmd->host_scribble = (unsigned char *) 2203 cmd->host_scribble = (unsigned char *)
2280 hostdata->issue_queue; 2204 hostdata->issue_queue;
2281 hostdata->issue_queue = (Scsi_Cmnd *) cmd; 2205 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2282 dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no); 2206 dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);
2283 } else 2207 } else {
2284#endif /* def AUTOSENSE */
2285 {
2286 collect_stats(hostdata, cmd);
2287 cmd->scsi_done(cmd); 2208 cmd->scsi_done(cmd);
2288 } 2209 }
2289 2210
@@ -2430,7 +2351,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2430 hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF)); 2351 hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF));
2431 hostdata->connected = NULL; 2352 hostdata->connected = NULL;
2432 cmd->result = DID_ERROR << 16; 2353 cmd->result = DID_ERROR << 16;
2433 collect_stats(hostdata, cmd);
2434 cmd->scsi_done(cmd); 2354 cmd->scsi_done(cmd);
2435 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2355 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2436 return; 2356 return;
@@ -2479,7 +2399,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2479 * Function : void NCR5380_reselect (struct Scsi_Host *instance) 2399 * Function : void NCR5380_reselect (struct Scsi_Host *instance)
2480 * 2400 *
2481 * Purpose : does reselection, initializing the instance->connected 2401 * Purpose : does reselection, initializing the instance->connected
2482 * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q 2402 * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
2483 * nexus has been reestablished, 2403 * nexus has been reestablished,
2484 * 2404 *
2485 * Inputs : instance - this instance of the NCR5380. 2405 * Inputs : instance - this instance of the NCR5380.
@@ -2496,7 +2416,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2496 int len; 2416 int len;
2497 unsigned char msg[3]; 2417 unsigned char msg[3];
2498 unsigned char *data; 2418 unsigned char *data;
2499 Scsi_Cmnd *tmp = NULL, *prev; 2419 struct scsi_cmnd *tmp = NULL, *prev;
2500 int abort = 0; 2420 int abort = 0;
2501 NCR5380_setup(instance); 2421 NCR5380_setup(instance);
2502 2422
@@ -2562,7 +2482,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2562 */ 2482 */
2563 2483
2564 2484
2565 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) 2485 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble)
2566 if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun) 2486 if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun)
2567 ) { 2487 ) {
2568 if (prev) { 2488 if (prev) {
@@ -2570,7 +2490,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2570 prev->host_scribble = tmp->host_scribble; 2490 prev->host_scribble = tmp->host_scribble;
2571 } else { 2491 } else {
2572 REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble); 2492 REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble);
2573 hostdata->disconnected_queue = (Scsi_Cmnd *) tmp->host_scribble; 2493 hostdata->disconnected_queue = (struct scsi_cmnd *) tmp->host_scribble;
2574 } 2494 }
2575 tmp->host_scribble = NULL; 2495 tmp->host_scribble = NULL;
2576 break; 2496 break;
@@ -2601,7 +2521,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2601 * 2521 *
2602 * Inputs : instance - this instance of the NCR5380. 2522 * Inputs : instance - this instance of the NCR5380.
2603 * 2523 *
2604 * Returns : pointer to the Scsi_Cmnd structure for which the I_T_L 2524 * Returns : pointer to the scsi_cmnd structure for which the I_T_L
2605 * nexus has been reestablished, on failure NULL is returned. 2525 * nexus has been reestablished, on failure NULL is returned.
2606 */ 2526 */
2607 2527
@@ -2643,11 +2563,11 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
2643#endif /* def REAL_DMA */ 2563#endif /* def REAL_DMA */
2644 2564
2645/* 2565/*
2646 * Function : int NCR5380_abort (Scsi_Cmnd *cmd) 2566 * Function : int NCR5380_abort (struct scsi_cmnd *cmd)
2647 * 2567 *
2648 * Purpose : abort a command 2568 * Purpose : abort a command
2649 * 2569 *
2650 * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the 2570 * Inputs : cmd - the scsi_cmnd to abort, code - code to set the
2651 * host byte of the result field to, if zero DID_ABORTED is 2571 * host byte of the result field to, if zero DID_ABORTED is
2652 * used. 2572 * used.
2653 * 2573 *
@@ -2661,11 +2581,12 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
2661 * Locks: host lock taken by caller 2581 * Locks: host lock taken by caller
2662 */ 2582 */
2663 2583
2664static int NCR5380_abort(Scsi_Cmnd * cmd) { 2584static int NCR5380_abort(struct scsi_cmnd *cmd)
2585{
2665 NCR5380_local_declare(); 2586 NCR5380_local_declare();
2666 struct Scsi_Host *instance = cmd->device->host; 2587 struct Scsi_Host *instance = cmd->device->host;
2667 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 2588 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
2668 Scsi_Cmnd *tmp, **prev; 2589 struct scsi_cmnd *tmp, **prev;
2669 2590
2670 scmd_printk(KERN_WARNING, cmd, "aborting command\n"); 2591 scmd_printk(KERN_WARNING, cmd, "aborting command\n");
2671 2592
@@ -2713,10 +2634,10 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2713 */ 2634 */
2714 2635
2715 dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no); 2636 dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);
2716 for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) 2637 for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble)
2717 if (cmd == tmp) { 2638 if (cmd == tmp) {
2718 REMOVE(5, *prev, tmp, tmp->host_scribble); 2639 REMOVE(5, *prev, tmp, tmp->host_scribble);
2719 (*prev) = (Scsi_Cmnd *) tmp->host_scribble; 2640 (*prev) = (struct scsi_cmnd *) tmp->host_scribble;
2720 tmp->host_scribble = NULL; 2641 tmp->host_scribble = NULL;
2721 tmp->result = DID_ABORT << 16; 2642 tmp->result = DID_ABORT << 16;
2722 dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no); 2643 dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);
@@ -2769,20 +2690,20 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2769 * it from the disconnected queue. 2690 * it from the disconnected queue.
2770 */ 2691 */
2771 2692
2772 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) 2693 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = (struct scsi_cmnd *) tmp->host_scribble)
2773 if (cmd == tmp) { 2694 if (cmd == tmp) {
2774 dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no); 2695 dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);
2775 2696
2776 if (NCR5380_select(instance, cmd, (int) cmd->tag)) 2697 if (NCR5380_select(instance, cmd))
2777 return FAILED; 2698 return FAILED;
2778 dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no); 2699 dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);
2779 2700
2780 do_abort(instance); 2701 do_abort(instance);
2781 2702
2782 for (prev = (Scsi_Cmnd **) & (hostdata->disconnected_queue), tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) 2703 for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble)
2783 if (cmd == tmp) { 2704 if (cmd == tmp) {
2784 REMOVE(5, *prev, tmp, tmp->host_scribble); 2705 REMOVE(5, *prev, tmp, tmp->host_scribble);
2785 *prev = (Scsi_Cmnd *) tmp->host_scribble; 2706 *prev = (struct scsi_cmnd *) tmp->host_scribble;
2786 tmp->host_scribble = NULL; 2707 tmp->host_scribble = NULL;
2787 tmp->result = DID_ABORT << 16; 2708 tmp->result = DID_ABORT << 16;
2788 tmp->scsi_done(tmp); 2709 tmp->scsi_done(tmp);
@@ -2805,7 +2726,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2805 2726
2806 2727
2807/* 2728/*
2808 * Function : int NCR5380_bus_reset (Scsi_Cmnd *cmd) 2729 * Function : int NCR5380_bus_reset (struct scsi_cmnd *cmd)
2809 * 2730 *
2810 * Purpose : reset the SCSI bus. 2731 * Purpose : reset the SCSI bus.
2811 * 2732 *
@@ -2814,7 +2735,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2814 * Locks: host lock taken by caller 2735 * Locks: host lock taken by caller
2815 */ 2736 */
2816 2737
2817static int NCR5380_bus_reset(Scsi_Cmnd * cmd) 2738static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2818{ 2739{
2819 struct Scsi_Host *instance = cmd->device->host; 2740 struct Scsi_Host *instance = cmd->device->host;
2820 2741
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index c79ddfa6f53c..162112dd1bf8 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -7,8 +7,6 @@
7 * drew@colorado.edu 7 * drew@colorado.edu
8 * +1 (303) 666-5836 8 * +1 (303) 666-5836
9 * 9 *
10 * DISTRIBUTION RELEASE 7
11 *
12 * For more information, please consult 10 * For more information, please consult
13 * 11 *
14 * NCR 5380 Family 12 * NCR 5380 Family
@@ -25,13 +23,7 @@
25#define NCR5380_H 23#define NCR5380_H
26 24
27#include <linux/interrupt.h> 25#include <linux/interrupt.h>
28
29#ifdef AUTOSENSE
30#include <scsi/scsi_eh.h> 26#include <scsi/scsi_eh.h>
31#endif
32
33#define NCR5380_PUBLIC_RELEASE 7
34#define NCR53C400_PUBLIC_RELEASE 2
35 27
36#define NDEBUG_ARBITRATION 0x1 28#define NDEBUG_ARBITRATION 0x1
37#define NDEBUG_AUTOSENSE 0x2 29#define NDEBUG_AUTOSENSE 0x2
@@ -224,33 +216,44 @@
224#define DISCONNECT_LONG 2 216#define DISCONNECT_LONG 2
225 217
226/* 218/*
227 * These are "special" values for the tag parameter passed to NCR5380_select. 219 * "Special" value for the (unsigned char) command tag, to indicate
220 * I_T_L nexus instead of I_T_L_Q.
228 */ 221 */
229 222
230#define TAG_NEXT -1 /* Use next free tag */ 223#define TAG_NONE 0xff
231#define TAG_NONE -2 /*
232 * Establish I_T_L nexus instead of I_T_L_Q
233 * even on SCSI-II devices.
234 */
235 224
236/* 225/*
237 * These are "special" values for the irq and dma_channel fields of the 226 * These are "special" values for the irq and dma_channel fields of the
238 * Scsi_Host structure 227 * Scsi_Host structure
239 */ 228 */
240 229
241#define SCSI_IRQ_NONE 255
242#define DMA_NONE 255 230#define DMA_NONE 255
243#define IRQ_AUTO 254 231#define IRQ_AUTO 254
244#define DMA_AUTO 254 232#define DMA_AUTO 254
245#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */ 233#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
246 234
235#ifndef NO_IRQ
236#define NO_IRQ 0
237#endif
238
247#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */ 239#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */
248#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */ 240#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */
249#define FLAG_NCR53C400 4 /* NCR53c400 */ 241#define FLAG_NCR53C400 4 /* NCR53c400 */
250#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ 242#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */
251#define FLAG_DTC3181E 16 /* DTC3181E */ 243#define FLAG_DTC3181E 16 /* DTC3181E */
244#define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */
245#define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */
252 246
253#ifndef ASM 247#ifndef ASM
248
249#ifdef SUPPORT_TAGS
250struct tag_alloc {
251 DECLARE_BITMAP(allocated, MAX_TAGS);
252 int nr_allocated;
253 int queue_size;
254};
255#endif
256
254struct NCR5380_hostdata { 257struct NCR5380_hostdata {
255 NCR5380_implementation_fields; /* implementation specific */ 258 NCR5380_implementation_fields; /* implementation specific */
256 struct Scsi_Host *host; /* Host backpointer */ 259 struct Scsi_Host *host; /* Host backpointer */
@@ -263,9 +266,9 @@ struct NCR5380_hostdata {
263 volatile int dma_len; /* requested length of DMA */ 266 volatile int dma_len; /* requested length of DMA */
264#endif 267#endif
265 volatile unsigned char last_message; /* last message OUT */ 268 volatile unsigned char last_message; /* last message OUT */
266 volatile Scsi_Cmnd *connected; /* currently connected command */ 269 volatile struct scsi_cmnd *connected; /* currently connected command */
267 volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */ 270 volatile struct scsi_cmnd *issue_queue; /* waiting to be issued */
268 volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */ 271 volatile struct scsi_cmnd *disconnected_queue; /* waiting for reconnect */
269 volatile int restart_select; /* we have disconnected, 272 volatile int restart_select; /* we have disconnected,
270 used to restart 273 used to restart
271 NCR5380_select() */ 274 NCR5380_select() */
@@ -273,19 +276,21 @@ struct NCR5380_hostdata {
273 int flags; 276 int flags;
274 unsigned long time_expires; /* in jiffies, set prior to sleeping */ 277 unsigned long time_expires; /* in jiffies, set prior to sleeping */
275 int select_time; /* timer in select for target response */ 278 int select_time; /* timer in select for target response */
276 volatile Scsi_Cmnd *selecting; 279 volatile struct scsi_cmnd *selecting;
277 struct delayed_work coroutine; /* our co-routine */ 280 struct delayed_work coroutine; /* our co-routine */
278#ifdef NCR5380_STATS
279 unsigned timebase; /* Base for time calcs */
280 long time_read[8]; /* time to do reads */
281 long time_write[8]; /* time to do writes */
282 unsigned long bytes_read[8]; /* bytes read */
283 unsigned long bytes_write[8]; /* bytes written */
284 unsigned pendingr;
285 unsigned pendingw;
286#endif
287#ifdef AUTOSENSE
288 struct scsi_eh_save ses; 281 struct scsi_eh_save ses;
282 char info[256];
283 int read_overruns; /* number of bytes to cut from a
284 * transfer to handle chip overruns */
285 int retain_dma_intr;
286 struct work_struct main_task;
287 volatile int main_running;
288#ifdef SUPPORT_TAGS
289 struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */
290#endif
291#ifdef PSEUDO_DMA
292 unsigned spin_max_r;
293 unsigned spin_max_w;
289#endif 294#endif
290}; 295};
291 296
@@ -296,7 +301,8 @@ struct NCR5380_hostdata {
296#endif 301#endif
297 302
298#define dprintk(flg, fmt, ...) \ 303#define dprintk(flg, fmt, ...) \
299 do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0) 304 do { if ((NDEBUG) & (flg)) \
305 printk(KERN_DEBUG fmt, ## __VA_ARGS__); } while (0)
300 306
301#if NDEBUG 307#if NDEBUG
302#define NCR5380_dprint(flg, arg) \ 308#define NCR5380_dprint(flg, arg) \
@@ -320,17 +326,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
320static irqreturn_t NCR5380_intr(int irq, void *dev_id); 326static irqreturn_t NCR5380_intr(int irq, void *dev_id);
321#endif 327#endif
322static void NCR5380_main(struct work_struct *work); 328static void NCR5380_main(struct work_struct *work);
323static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); 329static const char *NCR5380_info(struct Scsi_Host *instance);
324static int NCR5380_abort(Scsi_Cmnd * cmd);
325static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
326static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
327static int __maybe_unused NCR5380_show_info(struct seq_file *,
328 struct Scsi_Host *);
329static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
330 char *buffer, int length);
331
332static void NCR5380_reselect(struct Scsi_Host *instance); 330static void NCR5380_reselect(struct Scsi_Host *instance);
333static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag); 331static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd);
334#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL) 332#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
335static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 333static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
336#endif 334#endif
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4c340d88c33d..fdcdf9f781bc 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -462,9 +462,9 @@ static int aac_slave_configure(struct scsi_device *sdev)
462 depth = 256; 462 depth = 256;
463 else if (depth < 2) 463 else if (depth < 2)
464 depth = 2; 464 depth = 2;
465 scsi_adjust_queue_depth(sdev, depth); 465 scsi_change_queue_depth(sdev, depth);
466 } else 466 } else
467 scsi_adjust_queue_depth(sdev, 1); 467 scsi_change_queue_depth(sdev, 1);
468 468
469 return 0; 469 return 0;
470} 470}
@@ -478,12 +478,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
478 * total capacity and the queue depth supported by the target device. 478 * total capacity and the queue depth supported by the target device.
479 */ 479 */
480 480
481static int aac_change_queue_depth(struct scsi_device *sdev, int depth, 481static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
482 int reason)
483{ 482{
484 if (reason != SCSI_QDEPTH_DEFAULT)
485 return -EOPNOTSUPP;
486
487 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 483 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
488 (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 484 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
489 struct scsi_device * dev; 485 struct scsi_device * dev;
@@ -504,10 +500,10 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth,
504 depth = 256; 500 depth = 256;
505 else if (depth < 2) 501 else if (depth < 2)
506 depth = 2; 502 depth = 2;
507 scsi_adjust_queue_depth(sdev, depth); 503 return scsi_change_queue_depth(sdev, depth);
508 } else 504 }
509 scsi_adjust_queue_depth(sdev, 1); 505
510 return sdev->queue_depth; 506 return scsi_change_queue_depth(sdev, 1);
511} 507}
512 508
513static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) 509static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ae4840e4c1c5..6719a3390ebd 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7706,7 +7706,7 @@ advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
7706 asc_dvc->cfg->can_tagged_qng |= tid_bit; 7706 asc_dvc->cfg->can_tagged_qng |= tid_bit;
7707 asc_dvc->use_tagged_qng |= tid_bit; 7707 asc_dvc->use_tagged_qng |= tid_bit;
7708 } 7708 }
7709 scsi_adjust_queue_depth(sdev, 7709 scsi_change_queue_depth(sdev,
7710 asc_dvc->max_dvc_qng[sdev->id]); 7710 asc_dvc->max_dvc_qng[sdev->id]);
7711 } 7711 }
7712 } else { 7712 } else {
@@ -7847,10 +7847,8 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
7847 } 7847 }
7848 } 7848 }
7849 7849
7850 if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { 7850 if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported)
7851 scsi_adjust_queue_depth(sdev, 7851 scsi_change_queue_depth(sdev, adv_dvc->max_dvc_qng);
7852 adv_dvc->max_dvc_qng);
7853 }
7854} 7852}
7855 7853
7856/* 7854/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 80cb4fd7caaa..d5c7b193d8d3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1470,7 +1470,7 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
1470 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) { 1470 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
1471 case AHD_DEV_Q_BASIC: 1471 case AHD_DEV_Q_BASIC:
1472 case AHD_DEV_Q_TAGGED: 1472 case AHD_DEV_Q_TAGGED:
1473 scsi_adjust_queue_depth(sdev, 1473 scsi_change_queue_depth(sdev,
1474 dev->openings + dev->active); 1474 dev->openings + dev->active);
1475 break; 1475 break;
1476 default: 1476 default:
@@ -1480,7 +1480,7 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
1480 * serially on the controller/device. This should 1480 * serially on the controller/device. This should
1481 * remove some latency. 1481 * remove some latency.
1482 */ 1482 */
1483 scsi_adjust_queue_depth(sdev, 1); 1483 scsi_change_queue_depth(sdev, 1);
1484 break; 1484 break;
1485 } 1485 }
1486} 1486}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index a6a27d5398dd..88360116dbcb 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1336,7 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
1336 switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { 1336 switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
1337 case AHC_DEV_Q_BASIC: 1337 case AHC_DEV_Q_BASIC:
1338 case AHC_DEV_Q_TAGGED: 1338 case AHC_DEV_Q_TAGGED:
1339 scsi_adjust_queue_depth(sdev, 1339 scsi_change_queue_depth(sdev,
1340 dev->openings + dev->active); 1340 dev->openings + dev->active);
1341 default: 1341 default:
1342 /* 1342 /*
@@ -1345,7 +1345,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
1345 * serially on the controller/device. This should 1345 * serially on the controller/device. This should
1346 * remove some latency. 1346 * remove some latency.
1347 */ 1347 */
1348 scsi_adjust_queue_depth(sdev, 2); 1348 scsi_change_queue_depth(sdev, 2);
1349 break; 1349 break;
1350 } 1350 }
1351} 1351}
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 66cda669b417..26d4ad9ede2e 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -78,7 +78,7 @@ void asd_dev_gone(struct domain_device *dev);
78 78
79void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id); 79void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
80 80
81int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags); 81int asd_execute_task(struct sas_task *task, gfp_t gfp_flags);
82 82
83void asd_set_dmamode(struct domain_device *dev); 83void asd_set_dmamode(struct domain_device *dev);
84 84
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 4df867e07b20..9f636a34d595 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1200,8 +1200,7 @@ static void asd_start_scb_timers(struct list_head *list)
1200 * Case A: we can send the whole batch at once. Increment "pending" 1200 * Case A: we can send the whole batch at once. Increment "pending"
1201 * in the beginning of this function, when it is checked, in order to 1201 * in the beginning of this function, when it is checked, in order to
1202 * eliminate races when this function is called by multiple processes. 1202 * eliminate races when this function is called by multiple processes.
1203 * Case B: should never happen if the managing layer considers 1203 * Case B: should never happen.
1204 * lldd_queue_size.
1205 */ 1204 */
1206int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, 1205int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1207 int num) 1206 int num)
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 579dc2f460c4..14fc018436c2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -49,14 +49,6 @@ MODULE_PARM_DESC(use_msi, "\n"
49 "\tEnable(1) or disable(0) using PCI MSI.\n" 49 "\tEnable(1) or disable(0) using PCI MSI.\n"
50 "\tDefault: 0"); 50 "\tDefault: 0");
51 51
52static int lldd_max_execute_num = 0;
53module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
54MODULE_PARM_DESC(collector, "\n"
55 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
56 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
57 "\tThe aic94xx SAS LLDD supports both modes.\n"
58 "\tDefault: 0 (Direct Mode).\n");
59
60static struct scsi_transport_template *aic94xx_transport_template; 52static struct scsi_transport_template *aic94xx_transport_template;
61static int asd_scan_finished(struct Scsi_Host *, unsigned long); 53static int asd_scan_finished(struct Scsi_Host *, unsigned long);
62static void asd_scan_start(struct Scsi_Host *); 54static void asd_scan_start(struct Scsi_Host *);
@@ -84,6 +76,7 @@ static struct scsi_host_template aic94xx_sht = {
84 .target_destroy = sas_target_destroy, 76 .target_destroy = sas_target_destroy,
85 .ioctl = sas_ioctl, 77 .ioctl = sas_ioctl,
86 .use_blk_tags = 1, 78 .use_blk_tags = 1,
79 .track_queue_depth = 1,
87}; 80};
88 81
89static int asd_map_memio(struct asd_ha_struct *asd_ha) 82static int asd_map_memio(struct asd_ha_struct *asd_ha)
@@ -710,9 +703,6 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
710 asd_ha->sas_ha.sas_port= sas_ports; 703 asd_ha->sas_ha.sas_port= sas_ports;
711 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; 704 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
712 705
713 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
714 asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num;
715
716 return sas_register_ha(&asd_ha->sas_ha); 706 return sas_register_ha(&asd_ha->sas_ha);
717} 707}
718 708
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 59b86e260ce9..5ff1ce7ba1f4 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -543,8 +543,7 @@ static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
543 return res; 543 return res;
544} 544}
545 545
546int asd_execute_task(struct sas_task *task, const int num, 546int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
547 gfp_t gfp_flags)
548{ 547{
549 int res = 0; 548 int res = 0;
550 LIST_HEAD(alist); 549 LIST_HEAD(alist);
@@ -553,11 +552,11 @@ int asd_execute_task(struct sas_task *task, const int num,
553 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 552 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
554 unsigned long flags; 553 unsigned long flags;
555 554
556 res = asd_can_queue(asd_ha, num); 555 res = asd_can_queue(asd_ha, 1);
557 if (res) 556 if (res)
558 return res; 557 return res;
559 558
560 res = num; 559 res = 1;
561 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); 560 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
562 if (res) { 561 if (res) {
563 res = -ENOMEM; 562 res = -ENOMEM;
@@ -568,7 +567,7 @@ int asd_execute_task(struct sas_task *task, const int num,
568 list_for_each_entry(a, &alist, list) { 567 list_for_each_entry(a, &alist, list) {
569 a->uldd_task = t; 568 a->uldd_task = t;
570 t->lldd_task = a; 569 t->lldd_task = a;
571 t = list_entry(t->list.next, struct sas_task, list); 570 break;
572 } 571 }
573 list_for_each_entry(a, &alist, list) { 572 list_for_each_entry(a, &alist, list) {
574 t = a->uldd_task; 573 t = a->uldd_task;
@@ -601,7 +600,7 @@ int asd_execute_task(struct sas_task *task, const int num,
601 } 600 }
602 list_del_init(&alist); 601 list_del_init(&alist);
603 602
604 res = asd_post_ascb_list(asd_ha, ascb, num); 603 res = asd_post_ascb_list(asd_ha, ascb, 1);
605 if (unlikely(res)) { 604 if (unlikely(res)) {
606 a = NULL; 605 a = NULL;
607 __list_add(&alist, ascb->list.prev, &ascb->list); 606 __list_add(&alist, ascb->list.prev, &ascb->list);
@@ -639,6 +638,6 @@ out_err_unmap:
639out_err: 638out_err:
640 if (ascb) 639 if (ascb)
641 asd_ascb_free_list(ascb); 640 asd_ascb_free_list(ascb);
642 asd_can_dequeue(asd_ha, num); 641 asd_can_dequeue(asd_ha, 1);
643 return res; 642 return res;
644} 643}
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
new file mode 100644
index 000000000000..aa3e2c7cd83c
--- /dev/null
+++ b/drivers/scsi/am53c974.c
@@ -0,0 +1,586 @@
1/*
2 * AMD am53c974 driver.
3 * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/delay.h>
10#include <linux/pci.h>
11#include <linux/interrupt.h>
12
13#include <scsi/scsi_host.h>
14
15#include "esp_scsi.h"
16
17#define DRV_MODULE_NAME "am53c974"
18#define DRV_MODULE_VERSION "1.00"
19
20static bool am53c974_debug;
21static bool am53c974_fenab = true;
22
23#define esp_dma_log(f, a...) \
24 do { \
25 if (am53c974_debug) \
26 shost_printk(KERN_DEBUG, esp->host, f, ##a); \
27 } while (0)
28
29#define ESP_DMA_CMD 0x10
30#define ESP_DMA_STC 0x11
31#define ESP_DMA_SPA 0x12
32#define ESP_DMA_WBC 0x13
33#define ESP_DMA_WAC 0x14
34#define ESP_DMA_STATUS 0x15
35#define ESP_DMA_SMDLA 0x16
36#define ESP_DMA_WMAC 0x17
37
38#define ESP_DMA_CMD_IDLE 0x00
39#define ESP_DMA_CMD_BLAST 0x01
40#define ESP_DMA_CMD_ABORT 0x02
41#define ESP_DMA_CMD_START 0x03
42#define ESP_DMA_CMD_MASK 0x03
43#define ESP_DMA_CMD_DIAG 0x04
44#define ESP_DMA_CMD_MDL 0x10
45#define ESP_DMA_CMD_INTE_P 0x20
46#define ESP_DMA_CMD_INTE_D 0x40
47#define ESP_DMA_CMD_DIR 0x80
48
49#define ESP_DMA_STAT_PWDN 0x01
50#define ESP_DMA_STAT_ERROR 0x02
51#define ESP_DMA_STAT_ABORT 0x04
52#define ESP_DMA_STAT_DONE 0x08
53#define ESP_DMA_STAT_SCSIINT 0x10
54#define ESP_DMA_STAT_BCMPLT 0x20
55
56/* EEPROM is accessed with 16-bit values */
57#define DC390_EEPROM_READ 0x80
58#define DC390_EEPROM_LEN 0x40
59
60/*
61 * DC390 EEPROM
62 *
63 * 8 * 4 bytes of per-device options
64 * followed by HBA specific options
65 */
66
67/* Per-device options */
68#define DC390_EE_MODE1 0x00
69#define DC390_EE_SPEED 0x01
70
71/* HBA-specific options */
72#define DC390_EE_ADAPT_SCSI_ID 0x40
73#define DC390_EE_MODE2 0x41
74#define DC390_EE_DELAY 0x42
75#define DC390_EE_TAG_CMD_NUM 0x43
76
77#define DC390_EE_MODE1_PARITY_CHK 0x01
78#define DC390_EE_MODE1_SYNC_NEGO 0x02
79#define DC390_EE_MODE1_EN_DISC 0x04
80#define DC390_EE_MODE1_SEND_START 0x08
81#define DC390_EE_MODE1_TCQ 0x10
82
83#define DC390_EE_MODE2_MORE_2DRV 0x01
84#define DC390_EE_MODE2_GREATER_1G 0x02
85#define DC390_EE_MODE2_RST_SCSI_BUS 0x04
86#define DC390_EE_MODE2_ACTIVE_NEGATION 0x08
87#define DC390_EE_MODE2_NO_SEEK 0x10
88#define DC390_EE_MODE2_LUN_CHECK 0x20
89
90struct pci_esp_priv {
91 struct esp *esp;
92 u8 dma_status;
93};
94
95static void pci_esp_dma_drain(struct esp *esp);
96
97static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
98{
99 struct pci_dev *pdev = esp->dev;
100
101 return pci_get_drvdata(pdev);
102}
103
104static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
105{
106 iowrite8(val, esp->regs + (reg * 4UL));
107}
108
109static u8 pci_esp_read8(struct esp *esp, unsigned long reg)
110{
111 return ioread8(esp->regs + (reg * 4UL));
112}
113
114static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
115{
116 return iowrite32(val, esp->regs + (reg * 4UL));
117}
118
119static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
120 size_t sz, int dir)
121{
122 return pci_map_single(esp->dev, buf, sz, dir);
123}
124
125static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
126 int num_sg, int dir)
127{
128 return pci_map_sg(esp->dev, sg, num_sg, dir);
129}
130
131static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
132 size_t sz, int dir)
133{
134 pci_unmap_single(esp->dev, addr, sz, dir);
135}
136
137static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
138 int num_sg, int dir)
139{
140 pci_unmap_sg(esp->dev, sg, num_sg, dir);
141}
142
143static int pci_esp_irq_pending(struct esp *esp)
144{
145 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
146
147 pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS);
148 esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status);
149
150 if (pep->dma_status & (ESP_DMA_STAT_ERROR |
151 ESP_DMA_STAT_ABORT |
152 ESP_DMA_STAT_DONE |
153 ESP_DMA_STAT_SCSIINT))
154 return 1;
155
156 return 0;
157}
158
159static void pci_esp_reset_dma(struct esp *esp)
160{
161 /* Nothing to do ? */
162}
163
164static void pci_esp_dma_drain(struct esp *esp)
165{
166 u8 resid;
167 int lim = 1000;
168
169
170 if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP ||
171 (esp->sreg & ESP_STAT_PMASK) == ESP_DIP)
172 /* Data-In or Data-Out, nothing to be done */
173 return;
174
175 while (--lim > 0) {
176 resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES;
177 if (resid <= 1)
178 break;
179 cpu_relax();
180 }
181 if (resid > 1) {
182 /* FIFO not cleared */
183 shost_printk(KERN_INFO, esp->host,
184 "FIFO not cleared, %d bytes left\n",
185 resid);
186 }
187
188 /*
189 * When there is a residual BCMPLT will never be set
190 * (obviously). But we still have to issue the BLAST
191 * command, otherwise the data will not being transferred.
192 * But we'll never know when the BLAST operation is
193 * finished. So check for some time and give up eventually.
194 */
195 lim = 1000;
196 pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD);
197 while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) {
198 if (--lim == 0)
199 break;
200 cpu_relax();
201 }
202 pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
203 esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid);
204 /* BLAST residual handling is currently untested */
205 if (WARN_ON_ONCE(resid == 1)) {
206 struct esp_cmd_entry *ent = esp->active_cmd;
207
208 ent->flags |= ESP_CMD_FLAG_RESIDUAL;
209 }
210}
211
212static void pci_esp_dma_invalidate(struct esp *esp)
213{
214 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
215
216 esp_dma_log("invalidate DMA\n");
217
218 pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
219 pep->dma_status = 0;
220}
221
222static int pci_esp_dma_error(struct esp *esp)
223{
224 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
225
226 if (pep->dma_status & ESP_DMA_STAT_ERROR) {
227 u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD);
228
229 if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START)
230 pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD);
231
232 return 1;
233 }
234 if (pep->dma_status & ESP_DMA_STAT_ABORT) {
235 pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
236 pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD);
237 return 1;
238 }
239 return 0;
240}
241
242static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
243 u32 dma_count, int write, u8 cmd)
244{
245 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
246 u32 val = 0;
247
248 BUG_ON(!(cmd & ESP_CMD_DMA));
249
250 pep->dma_status = 0;
251
252 /* Set DMA engine to IDLE */
253 if (write)
254 /* DMA write direction logic is inverted */
255 val |= ESP_DMA_CMD_DIR;
256 pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD);
257
258 pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
259 pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
260 if (esp->config2 & ESP_CONFIG2_FENAB)
261 pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
262
263 pci_esp_write32(esp, esp_count, ESP_DMA_STC);
264 pci_esp_write32(esp, addr, ESP_DMA_SPA);
265
266 esp_dma_log("start dma addr[%x] count[%d:%d]\n",
267 addr, esp_count, dma_count);
268
269 scsi_esp_cmd(esp, cmd);
270 /* Send DMA Start command */
271 pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD);
272}
273
274static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
275{
276 int dma_limit = 16;
277 u32 base, end;
278
279 /*
280 * If CONFIG2_FENAB is set we can
281 * handle up to 24 bit addresses
282 */
283 if (esp->config2 & ESP_CONFIG2_FENAB)
284 dma_limit = 24;
285
286 if (dma_len > (1U << dma_limit))
287 dma_len = (1U << dma_limit);
288
289 /*
290 * Prevent crossing a 24-bit address boundary.
291 */
292 base = dma_addr & ((1U << 24) - 1U);
293 end = base + dma_len;
294 if (end > (1U << 24))
295 end = (1U <<24);
296 dma_len = end - base;
297
298 return dma_len;
299}
300
301static const struct esp_driver_ops pci_esp_ops = {
302 .esp_write8 = pci_esp_write8,
303 .esp_read8 = pci_esp_read8,
304 .map_single = pci_esp_map_single,
305 .map_sg = pci_esp_map_sg,
306 .unmap_single = pci_esp_unmap_single,
307 .unmap_sg = pci_esp_unmap_sg,
308 .irq_pending = pci_esp_irq_pending,
309 .reset_dma = pci_esp_reset_dma,
310 .dma_drain = pci_esp_dma_drain,
311 .dma_invalidate = pci_esp_dma_invalidate,
312 .send_dma_cmd = pci_esp_send_dma_cmd,
313 .dma_error = pci_esp_dma_error,
314 .dma_length_limit = pci_esp_dma_length_limit,
315};
316
317/*
318 * Read DC-390 eeprom
319 */
320static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
321{
322 u8 carry_flag = 1, j = 0x80, bval;
323 int i;
324
325 for (i = 0; i < 9; i++) {
326 if (carry_flag) {
327 pci_write_config_byte(pdev, 0x80, 0x40);
328 bval = 0xc0;
329 } else
330 bval = 0x80;
331
332 udelay(160);
333 pci_write_config_byte(pdev, 0x80, bval);
334 udelay(160);
335 pci_write_config_byte(pdev, 0x80, 0);
336 udelay(160);
337
338 carry_flag = (cmd & j) ? 1 : 0;
339 j >>= 1;
340 }
341}
342
343static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
344{
345 int i;
346 u16 wval = 0;
347 u8 bval;
348
349 for (i = 0; i < 16; i++) {
350 wval <<= 1;
351
352 pci_write_config_byte(pdev, 0x80, 0x80);
353 udelay(160);
354 pci_write_config_byte(pdev, 0x80, 0x40);
355 udelay(160);
356 pci_read_config_byte(pdev, 0x00, &bval);
357
358 if (bval == 0x22)
359 wval |= 1;
360 }
361
362 return wval;
363}
364
365static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
366{
367 u8 cmd = DC390_EEPROM_READ, i;
368
369 for (i = 0; i < DC390_EEPROM_LEN; i++) {
370 pci_write_config_byte(pdev, 0xc0, 0);
371 udelay(160);
372
373 dc390_eeprom_prepare_read(pdev, cmd++);
374 *ptr++ = dc390_eeprom_get_data(pdev);
375
376 pci_write_config_byte(pdev, 0x80, 0);
377 pci_write_config_byte(pdev, 0x80, 0);
378 udelay(160);
379 }
380}
381
382static void dc390_check_eeprom(struct esp *esp)
383{
384 u8 EEbuf[128];
385 u16 *ptr = (u16 *)EEbuf, wval = 0;
386 int i;
387
388 dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
389
390 for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
391 wval += *ptr;
392
393 /* no Tekram EEprom found */
394 if (wval != 0x1234) {
395 struct pci_dev *pdev = esp->dev;
396 dev_printk(KERN_INFO, &pdev->dev,
397 "No valid Tekram EEprom found\n");
398 return;
399 }
400 esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID];
401 esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM];
402 if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION)
403 esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE;
404}
405
406static int pci_esp_probe_one(struct pci_dev *pdev,
407 const struct pci_device_id *id)
408{
409 struct scsi_host_template *hostt = &scsi_esp_template;
410 int err = -ENODEV;
411 struct Scsi_Host *shost;
412 struct esp *esp;
413 struct pci_esp_priv *pep;
414
415 if (pci_enable_device(pdev)) {
416 dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n");
417 return -ENODEV;
418 }
419
420 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
421 dev_printk(KERN_INFO, &pdev->dev,
422 "failed to set 32bit DMA mask\n");
423 goto fail_disable_device;
424 }
425
426 shost = scsi_host_alloc(hostt, sizeof(struct esp));
427 if (!shost) {
428 dev_printk(KERN_INFO, &pdev->dev,
429 "failed to allocate scsi host\n");
430 err = -ENOMEM;
431 goto fail_disable_device;
432 }
433
434 pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL);
435 if (!pep) {
436 dev_printk(KERN_INFO, &pdev->dev,
437 "failed to allocate esp_priv\n");
438 err = -ENOMEM;
439 goto fail_host_alloc;
440 }
441
442 esp = shost_priv(shost);
443 esp->host = shost;
444 esp->dev = pdev;
445 esp->ops = &pci_esp_ops;
446 /*
447 * The am53c974 HBA has a design flaw of generating
448 * spurious DMA completion interrupts when using
449 * DMA for command submission.
450 */
451 esp->flags |= ESP_FLAG_USE_FIFO;
452 /*
453 * Enable CONFIG2_FENAB to allow for large DMA transfers
454 */
455 if (am53c974_fenab)
456 esp->config2 |= ESP_CONFIG2_FENAB;
457
458 pep->esp = esp;
459
460 if (pci_request_regions(pdev, DRV_MODULE_NAME)) {
461 dev_printk(KERN_ERR, &pdev->dev,
462 "pci memory selection failed\n");
463 goto fail_priv_alloc;
464 }
465
466 esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
467 if (!esp->regs) {
468 dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n");
469 err = -EINVAL;
470 goto fail_release_regions;
471 }
472 esp->dma_regs = esp->regs;
473
474 pci_set_master(pdev);
475
476 esp->command_block = pci_alloc_consistent(pdev, 16,
477 &esp->command_block_dma);
478 if (!esp->command_block) {
479 dev_printk(KERN_ERR, &pdev->dev,
480 "failed to allocate command block\n");
481 err = -ENOMEM;
482 goto fail_unmap_regs;
483 }
484
485 err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED,
486 DRV_MODULE_NAME, esp);
487 if (err < 0) {
488 dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n");
489 goto fail_unmap_command_block;
490 }
491
492 esp->scsi_id = 7;
493 dc390_check_eeprom(esp);
494
495 shost->this_id = esp->scsi_id;
496 shost->max_id = 8;
497 shost->irq = pdev->irq;
498 shost->io_port = pci_resource_start(pdev, 0);
499 shost->n_io_port = pci_resource_len(pdev, 0);
500 shost->unique_id = shost->io_port;
501 esp->scsi_id_mask = (1 << esp->scsi_id);
502 /* Assume 40MHz clock */
503 esp->cfreq = 40000000;
504
505 pci_set_drvdata(pdev, pep);
506
507 err = scsi_esp_register(esp, &pdev->dev);
508 if (err)
509 goto fail_free_irq;
510
511 return 0;
512
513fail_free_irq:
514 free_irq(pdev->irq, esp);
515fail_unmap_command_block:
516 pci_free_consistent(pdev, 16, esp->command_block,
517 esp->command_block_dma);
518fail_unmap_regs:
519 pci_iounmap(pdev, esp->regs);
520fail_release_regions:
521 pci_release_regions(pdev);
522fail_priv_alloc:
523 kfree(pep);
524fail_host_alloc:
525 scsi_host_put(shost);
526fail_disable_device:
527 pci_disable_device(pdev);
528
529 return err;
530}
531
532static void pci_esp_remove_one(struct pci_dev *pdev)
533{
534 struct pci_esp_priv *pep = pci_get_drvdata(pdev);
535 struct esp *esp = pep->esp;
536
537 scsi_esp_unregister(esp);
538 free_irq(pdev->irq, esp);
539 pci_free_consistent(pdev, 16, esp->command_block,
540 esp->command_block_dma);
541 pci_iounmap(pdev, esp->regs);
542 pci_release_regions(pdev);
543 pci_disable_device(pdev);
544 kfree(pep);
545
546 scsi_host_put(esp->host);
547}
548
549static struct pci_device_id am53c974_pci_tbl[] = {
550 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
551 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
552 { }
553};
554MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl);
555
556static struct pci_driver am53c974_driver = {
557 .name = DRV_MODULE_NAME,
558 .id_table = am53c974_pci_tbl,
559 .probe = pci_esp_probe_one,
560 .remove = pci_esp_remove_one,
561};
562
563static int __init am53c974_module_init(void)
564{
565 return pci_register_driver(&am53c974_driver);
566}
567
568static void __exit am53c974_module_exit(void)
569{
570 pci_unregister_driver(&am53c974_driver);
571}
572
573MODULE_DESCRIPTION("AM53C974 SCSI driver");
574MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
575MODULE_LICENSE("GPL");
576MODULE_VERSION(DRV_MODULE_VERSION);
577MODULE_ALIAS("tmscsim");
578
579module_param(am53c974_debug, bool, 0644);
580MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
581
582module_param(am53c974_fenab, bool, 0444);
583MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
584
585module_init(am53c974_module_init);
586module_exit(am53c974_module_exit);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 209f77162d06..914c39f9f388 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -114,16 +114,11 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
114static const char *arcmsr_info(struct Scsi_Host *); 114static const char *arcmsr_info(struct Scsi_Host *);
115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
116static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); 116static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
117static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 117static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
118 int queue_depth, int reason)
119{ 118{
120 if (reason != SCSI_QDEPTH_DEFAULT)
121 return -EOPNOTSUPP;
122
123 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 119 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
124 queue_depth = ARCMSR_MAX_CMD_PERLUN; 120 queue_depth = ARCMSR_MAX_CMD_PERLUN;
125 scsi_adjust_queue_depth(sdev, queue_depth); 121 return scsi_change_queue_depth(sdev, queue_depth);
126 return queue_depth;
127} 122}
128 123
129static struct scsi_host_template arcmsr_scsi_host_template = { 124static struct scsi_host_template arcmsr_scsi_host_template = {
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8ef810a4476e..d28d6c0f18c0 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -13,16 +13,12 @@
13#include <asm/ecard.h> 13#include <asm/ecard.h>
14#include <asm/io.h> 14#include <asm/io.h>
15 15
16#include "../scsi.h"
17#include <scsi/scsi_host.h> 16#include <scsi/scsi_host.h>
18 17
19#include <scsi/scsicam.h> 18#include <scsi/scsicam.h>
20 19
21#define AUTOSENSE
22#define PSEUDO_DMA 20#define PSEUDO_DMA
23 21
24#define CUMANASCSI_PUBLIC_RELEASE 1
25
26#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 22#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
27#define NCR5380_local_declare() struct Scsi_Host *_instance 23#define NCR5380_local_declare() struct Scsi_Host *_instance
28#define NCR5380_setup(instance) _instance = instance 24#define NCR5380_setup(instance) _instance = instance
@@ -30,6 +26,7 @@
30#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value) 26#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value)
31#define NCR5380_intr cumanascsi_intr 27#define NCR5380_intr cumanascsi_intr
32#define NCR5380_queue_command cumanascsi_queue_command 28#define NCR5380_queue_command cumanascsi_queue_command
29#define NCR5380_info cumanascsi_info
33 30
34#define NCR5380_implementation_fields \ 31#define NCR5380_implementation_fields \
35 unsigned ctrl; \ 32 unsigned ctrl; \
@@ -42,11 +39,6 @@ void cumanascsi_setup(char *str, int *ints)
42{ 39{
43} 40}
44 41
45const char *cumanascsi_info(struct Scsi_Host *spnt)
46{
47 return "";
48}
49
50#define CTRL 0x16fc 42#define CTRL 0x16fc
51#define STAT 0x2004 43#define STAT 0x2004
52#define L(v) (((v)<<16)|((v) & 0x0000ffff)) 44#define L(v) (((v)<<16)|((v) & 0x0000ffff))
@@ -267,14 +259,6 @@ static int cumanascsi1_probe(struct expansion_card *ec,
267 goto out_unmap; 259 goto out_unmap;
268 } 260 }
269 261
270 printk("scsi%d: at port 0x%08lx irq %d",
271 host->host_no, host->io_port, host->irq);
272 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
273 host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE);
274 printk("\nscsi%d:", host->host_no);
275 NCR5380_print_options(host);
276 printk("\n");
277
278 ret = scsi_add_host(host, &ec->dev); 262 ret = scsi_add_host(host, &ec->dev);
279 if (ret) 263 if (ret)
280 goto out_free_irq; 264 goto out_free_irq;
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 188e734c7ff0..7c6fa1479c9c 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -14,13 +14,9 @@
14#include <asm/ecard.h> 14#include <asm/ecard.h>
15#include <asm/io.h> 15#include <asm/io.h>
16 16
17#include "../scsi.h"
18#include <scsi/scsi_host.h> 17#include <scsi/scsi_host.h>
19 18
20#define AUTOSENSE
21/*#define PSEUDO_DMA*/ 19/*#define PSEUDO_DMA*/
22
23#define OAKSCSI_PUBLIC_RELEASE 1
24#define DONT_USE_INTR 20#define DONT_USE_INTR
25 21
26#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 22#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
@@ -29,10 +25,9 @@
29 25
30#define NCR5380_read(reg) readb(_base + ((reg) << 2)) 26#define NCR5380_read(reg) readb(_base + ((reg) << 2))
31#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2)) 27#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2))
32#define NCR5380_intr oakscsi_intr
33#define NCR5380_queue_command oakscsi_queue_command 28#define NCR5380_queue_command oakscsi_queue_command
29#define NCR5380_info oakscsi_info
34#define NCR5380_show_info oakscsi_show_info 30#define NCR5380_show_info oakscsi_show_info
35#define NCR5380_write_info oakscsi_write_info
36 31
37#define NCR5380_implementation_fields \ 32#define NCR5380_implementation_fields \
38 void __iomem *base 33 void __iomem *base
@@ -42,11 +37,6 @@
42#undef START_DMA_INITIATOR_RECEIVE_REG 37#undef START_DMA_INITIATOR_RECEIVE_REG
43#define START_DMA_INITIATOR_RECEIVE_REG (128 + 7) 38#define START_DMA_INITIATOR_RECEIVE_REG (128 + 7)
44 39
45const char * oakscsi_info (struct Scsi_Host *spnt)
46{
47 return "";
48}
49
50#define STAT ((128 + 16) << 2) 40#define STAT ((128 + 16) << 2)
51#define DATA ((128 + 8) << 2) 41#define DATA ((128 + 8) << 2)
52 42
@@ -114,7 +104,6 @@ printk("reading %p len %d\n", addr, len);
114static struct scsi_host_template oakscsi_template = { 104static struct scsi_host_template oakscsi_template = {
115 .module = THIS_MODULE, 105 .module = THIS_MODULE,
116 .show_info = oakscsi_show_info, 106 .show_info = oakscsi_show_info,
117 .write_info = oakscsi_write_info,
118 .name = "Oak 16-bit SCSI", 107 .name = "Oak 16-bit SCSI",
119 .info = oakscsi_info, 108 .info = oakscsi_info,
120 .queuecommand = oakscsi_queue_command, 109 .queuecommand = oakscsi_queue_command,
@@ -150,19 +139,11 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
150 goto unreg; 139 goto unreg;
151 } 140 }
152 141
153 host->irq = IRQ_NONE; 142 host->irq = NO_IRQ;
154 host->n_io_port = 255; 143 host->n_io_port = 255;
155 144
156 NCR5380_init(host, 0); 145 NCR5380_init(host, 0);
157 146
158 printk("scsi%d: at port 0x%08lx irqs disabled",
159 host->host_no, host->io_port);
160 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
161 host->can_queue, host->cmd_per_lun, OAKSCSI_PUBLIC_RELEASE);
162 printk("\nscsi%d:", host->host_no);
163 NCR5380_print_options(host);
164 printk("\n");
165
166 ret = scsi_add_host(host, &ec->dev); 147 ret = scsi_add_host(host, &ec->dev);
167 if (ret) 148 if (ret)
168 goto out_unmap; 149 goto out_unmap;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 11e93025b87a..6daed6b386d4 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -11,8 +11,6 @@
11 * drew@colorado.edu 11 * drew@colorado.edu
12 * +1 (303) 666-5836 12 * +1 (303) 666-5836
13 * 13 *
14 * DISTRIBUTION RELEASE 6.
15 *
16 * For more information, please consult 14 * For more information, please consult
17 * 15 *
18 * NCR 5380 Family 16 * NCR 5380 Family
@@ -73,6 +71,9 @@
73 * 1. Test linked command handling code after Eric is ready with 71 * 1. Test linked command handling code after Eric is ready with
74 * the high level code. 72 * the high level code.
75 */ 73 */
74
75/* Adapted for the sun3 by Sam Creasey. */
76
76#include <scsi/scsi_dbg.h> 77#include <scsi/scsi_dbg.h>
77#include <scsi/scsi_transport_spi.h> 78#include <scsi/scsi_transport_spi.h>
78 79
@@ -103,27 +104,7 @@
103 104
104/* 105/*
105 * Design 106 * Design
106 * Issues :
107 *
108 * The other Linux SCSI drivers were written when Linux was Intel PC-only,
109 * and specifically for each board rather than each chip. This makes their
110 * adaptation to platforms like the Mac (Some of which use NCR5380's)
111 * more difficult than it has to be.
112 * 107 *
113 * Also, many of the SCSI drivers were written before the command queuing
114 * routines were implemented, meaning their implementations of queued
115 * commands were hacked on rather than designed in from the start.
116 *
117 * When I designed the Linux SCSI drivers I figured that
118 * while having two different SCSI boards in a system might be useful
119 * for debugging things, two of the same type wouldn't be used.
120 * Well, I was wrong and a number of users have mailed me about running
121 * multiple high-performance SCSI boards in a server.
122 *
123 * Finally, when I get questions from users, I have no idea what
124 * revision of my driver they are running.
125 *
126 * This driver attempts to address these problems :
127 * This is a generic 5380 driver. To use it on a different platform, 108 * This is a generic 5380 driver. To use it on a different platform,
128 * one simply writes appropriate system specific macros (ie, data 109 * one simply writes appropriate system specific macros (ie, data
129 * transfer - some PC's will use the I/O bus, 68K's must use 110 * transfer - some PC's will use the I/O bus, 68K's must use
@@ -138,17 +119,6 @@
138 * allowing multiple commands to propagate all the way to a SCSI-II device 119 * allowing multiple commands to propagate all the way to a SCSI-II device
139 * while a command is already executing. 120 * while a command is already executing.
140 * 121 *
141 * To solve the multiple-boards-in-the-same-system problem,
142 * there is a separate instance structure for each instance
143 * of a 5380 in the system. So, multiple NCR5380 drivers will
144 * be able to coexist with appropriate changes to the high level
145 * SCSI code.
146 *
147 * A NCR5380_PUBLIC_REVISION macro is provided, with the release
148 * number (updated for each public release) printed by the
149 * NCR5380_print_options command, which should be called from the
150 * wrapper detect function, so that I know what release of the driver
151 * users are using.
152 * 122 *
153 * Issues specific to the NCR5380 : 123 * Issues specific to the NCR5380 :
154 * 124 *
@@ -173,19 +143,17 @@
173 * Architecture : 143 * Architecture :
174 * 144 *
175 * At the heart of the design is a coroutine, NCR5380_main, 145 * At the heart of the design is a coroutine, NCR5380_main,
176 * which is started when not running by the interrupt handler, 146 * which is started from a workqueue for each NCR5380 host in the
177 * timer, and queue command function. It attempts to establish 147 * system. It attempts to establish I_T_L or I_T_L_Q nexuses by
178 * I_T_L or I_T_L_Q nexuses by removing the commands from the 148 * removing the commands from the issue queue and calling
179 * issue queue and calling NCR5380_select() if a nexus 149 * NCR5380_select() if a nexus is not established.
180 * is not established.
181 * 150 *
182 * Once a nexus is established, the NCR5380_information_transfer() 151 * Once a nexus is established, the NCR5380_information_transfer()
183 * phase goes through the various phases as instructed by the target. 152 * phase goes through the various phases as instructed by the target.
184 * if the target goes into MSG IN and sends a DISCONNECT message, 153 * if the target goes into MSG IN and sends a DISCONNECT message,
185 * the command structure is placed into the per instance disconnected 154 * the command structure is placed into the per instance disconnected
186 * queue, and NCR5380_main tries to find more work. If USLEEP 155 * queue, and NCR5380_main tries to find more work. If the target is
187 * was defined, and the target is idle for too long, the system 156 * idle for too long, the system will try to sleep.
188 * will try to sleep.
189 * 157 *
190 * If a command has disconnected, eventually an interrupt will trigger, 158 * If a command has disconnected, eventually an interrupt will trigger,
191 * calling NCR5380_intr() which will in turn call NCR5380_reselect 159 * calling NCR5380_intr() which will in turn call NCR5380_reselect
@@ -211,6 +179,9 @@
211 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically 179 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
212 * for commands that return with a CHECK CONDITION status. 180 * for commands that return with a CHECK CONDITION status.
213 * 181 *
182 * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
183 * transceivers.
184 *
214 * LINKED - if defined, linked commands are supported. 185 * LINKED - if defined, linked commands are supported.
215 * 186 *
216 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. 187 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
@@ -223,6 +194,9 @@
223 * 194 *
224 * NCR5380_write(register, value) - write to the specific register 195 * NCR5380_write(register, value) - write to the specific register
225 * 196 *
197 * NCR5380_implementation_fields - additional fields needed for this
198 * specific implementation of the NCR5380
199 *
226 * Either real DMA *or* pseudo DMA may be implemented 200 * Either real DMA *or* pseudo DMA may be implemented
227 * REAL functions : 201 * REAL functions :
228 * NCR5380_REAL_DMA should be defined if real DMA is to be used. 202 * NCR5380_REAL_DMA should be defined if real DMA is to be used.
@@ -241,40 +215,21 @@
241 * NCR5380_pwrite(instance, src, count) 215 * NCR5380_pwrite(instance, src, count)
242 * NCR5380_pread(instance, dst, count); 216 * NCR5380_pread(instance, dst, count);
243 * 217 *
244 * If nothing specific to this implementation needs doing (ie, with external
245 * hardware), you must also define
246 *
247 * NCR5380_queue_command
248 * NCR5380_reset
249 * NCR5380_abort
250 * NCR5380_proc_info
251 *
252 * to be the global entry points into the specific driver, ie
253 * #define NCR5380_queue_command t128_queue_command.
254 *
255 * If this is not done, the routines will be defined as static functions
256 * with the NCR5380* names and the user must provide a globally
257 * accessible wrapper function.
258 *
259 * The generic driver is initialized by calling NCR5380_init(instance), 218 * The generic driver is initialized by calling NCR5380_init(instance),
260 * after setting the appropriate host specific fields and ID. If the 219 * after setting the appropriate host specific fields and ID. If the
261 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, 220 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
262 * possible) function may be used. Before the specific driver initialization 221 * possible) function may be used.
263 * code finishes, NCR5380_print_options should be called.
264 */ 222 */
265 223
266static struct Scsi_Host *first_instance = NULL;
267static struct scsi_host_template *the_template = NULL;
268
269/* Macros ease life... :-) */ 224/* Macros ease life... :-) */
270#define SETUP_HOSTDATA(in) \ 225#define SETUP_HOSTDATA(in) \
271 struct NCR5380_hostdata *hostdata = \ 226 struct NCR5380_hostdata *hostdata = \
272 (struct NCR5380_hostdata *)(in)->hostdata 227 (struct NCR5380_hostdata *)(in)->hostdata
273#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) 228#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
274 229
275#define NEXT(cmd) ((Scsi_Cmnd *)(cmd)->host_scribble) 230#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble)
276#define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) 231#define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next))
277#define NEXTADDR(cmd) ((Scsi_Cmnd **)&(cmd)->host_scribble) 232#define NEXTADDR(cmd) ((struct scsi_cmnd **)&(cmd)->host_scribble)
278 233
279#define HOSTNO instance->host_no 234#define HOSTNO instance->host_no
280#define H_NO(cmd) (cmd)->device->host->host_no 235#define H_NO(cmd) (cmd)->device->host->host_no
@@ -316,30 +271,17 @@ static struct scsi_host_template *the_template = NULL;
316 * important: the tag bit must be cleared before 'nr_allocated' is decreased. 271 * important: the tag bit must be cleared before 'nr_allocated' is decreased.
317 */ 272 */
318 273
319/* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ 274static void __init init_tags(struct NCR5380_hostdata *hostdata)
320#undef TAG_NONE
321#define TAG_NONE 0xff
322
323typedef struct {
324 DECLARE_BITMAP(allocated, MAX_TAGS);
325 int nr_allocated;
326 int queue_size;
327} TAG_ALLOC;
328
329static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */
330
331
332static void __init init_tags(void)
333{ 275{
334 int target, lun; 276 int target, lun;
335 TAG_ALLOC *ta; 277 struct tag_alloc *ta;
336 278
337 if (!setup_use_tagged_queuing) 279 if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
338 return; 280 return;
339 281
340 for (target = 0; target < 8; ++target) { 282 for (target = 0; target < 8; ++target) {
341 for (lun = 0; lun < 8; ++lun) { 283 for (lun = 0; lun < 8; ++lun) {
342 ta = &TagAlloc[target][lun]; 284 ta = &hostdata->TagAlloc[target][lun];
343 bitmap_zero(ta->allocated, MAX_TAGS); 285 bitmap_zero(ta->allocated, MAX_TAGS);
344 ta->nr_allocated = 0; 286 ta->nr_allocated = 0;
345 /* At the beginning, assume the maximum queue size we could 287 /* At the beginning, assume the maximum queue size we could
@@ -359,7 +301,7 @@ static void __init init_tags(void)
359 * conditions. 301 * conditions.
360 */ 302 */
361 303
362static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) 304static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
363{ 305{
364 u8 lun = cmd->device->lun; 306 u8 lun = cmd->device->lun;
365 SETUP_HOSTDATA(cmd->device->host); 307 SETUP_HOSTDATA(cmd->device->host);
@@ -367,10 +309,11 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
367 if (hostdata->busy[cmd->device->id] & (1 << lun)) 309 if (hostdata->busy[cmd->device->id] & (1 << lun))
368 return 1; 310 return 1;
369 if (!should_be_tagged || 311 if (!should_be_tagged ||
370 !setup_use_tagged_queuing || !cmd->device->tagged_supported) 312 !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
313 !cmd->device->tagged_supported)
371 return 0; 314 return 0;
372 if (TagAlloc[cmd->device->id][lun].nr_allocated >= 315 if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >=
373 TagAlloc[cmd->device->id][lun].queue_size) { 316 hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) {
374 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", 317 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
375 H_NO(cmd), cmd->device->id, lun); 318 H_NO(cmd), cmd->device->id, lun);
376 return 1; 319 return 1;
@@ -384,7 +327,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
384 * untagged. 327 * untagged.
385 */ 328 */
386 329
387static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) 330static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
388{ 331{
389 u8 lun = cmd->device->lun; 332 u8 lun = cmd->device->lun;
390 SETUP_HOSTDATA(cmd->device->host); 333 SETUP_HOSTDATA(cmd->device->host);
@@ -393,13 +336,14 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
393 * an untagged command. 336 * an untagged command.
394 */ 337 */
395 if (!should_be_tagged || 338 if (!should_be_tagged ||
396 !setup_use_tagged_queuing || !cmd->device->tagged_supported) { 339 !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
340 !cmd->device->tagged_supported) {
397 cmd->tag = TAG_NONE; 341 cmd->tag = TAG_NONE;
398 hostdata->busy[cmd->device->id] |= (1 << lun); 342 hostdata->busy[cmd->device->id] |= (1 << lun);
399 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " 343 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
400 "command\n", H_NO(cmd), cmd->device->id, lun); 344 "command\n", H_NO(cmd), cmd->device->id, lun);
401 } else { 345 } else {
402 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; 346 struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
403 347
404 cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); 348 cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
405 set_bit(cmd->tag, ta->allocated); 349 set_bit(cmd->tag, ta->allocated);
@@ -416,7 +360,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
416 * unlock the LUN. 360 * unlock the LUN.
417 */ 361 */
418 362
419static void cmd_free_tag(Scsi_Cmnd *cmd) 363static void cmd_free_tag(struct scsi_cmnd *cmd)
420{ 364{
421 u8 lun = cmd->device->lun; 365 u8 lun = cmd->device->lun;
422 SETUP_HOSTDATA(cmd->device->host); 366 SETUP_HOSTDATA(cmd->device->host);
@@ -429,7 +373,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
429 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", 373 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
430 H_NO(cmd), cmd->tag); 374 H_NO(cmd), cmd->tag);
431 } else { 375 } else {
432 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; 376 struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
433 clear_bit(cmd->tag, ta->allocated); 377 clear_bit(cmd->tag, ta->allocated);
434 ta->nr_allocated--; 378 ta->nr_allocated--;
435 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", 379 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
@@ -438,17 +382,17 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
438} 382}
439 383
440 384
441static void free_all_tags(void) 385static void free_all_tags(struct NCR5380_hostdata *hostdata)
442{ 386{
443 int target, lun; 387 int target, lun;
444 TAG_ALLOC *ta; 388 struct tag_alloc *ta;
445 389
446 if (!setup_use_tagged_queuing) 390 if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
447 return; 391 return;
448 392
449 for (target = 0; target < 8; ++target) { 393 for (target = 0; target < 8; ++target) {
450 for (lun = 0; lun < 8; ++lun) { 394 for (lun = 0; lun < 8; ++lun) {
451 ta = &TagAlloc[target][lun]; 395 ta = &hostdata->TagAlloc[target][lun];
452 bitmap_zero(ta->allocated, MAX_TAGS); 396 bitmap_zero(ta->allocated, MAX_TAGS);
453 ta->nr_allocated = 0; 397 ta->nr_allocated = 0;
454 } 398 }
@@ -459,19 +403,20 @@ static void free_all_tags(void)
459 403
460 404
461/* 405/*
462 * Function: void merge_contiguous_buffers( Scsi_Cmnd *cmd ) 406 * Function: void merge_contiguous_buffers( struct scsi_cmnd *cmd )
463 * 407 *
464 * Purpose: Try to merge several scatter-gather requests into one DMA 408 * Purpose: Try to merge several scatter-gather requests into one DMA
465 * transfer. This is possible if the scatter buffers lie on 409 * transfer. This is possible if the scatter buffers lie on
466 * physical contiguous addresses. 410 * physical contiguous addresses.
467 * 411 *
468 * Parameters: Scsi_Cmnd *cmd 412 * Parameters: struct scsi_cmnd *cmd
469 * The command to work on. The first scatter buffer's data are 413 * The command to work on. The first scatter buffer's data are
470 * assumed to be already transferred into ptr/this_residual. 414 * assumed to be already transferred into ptr/this_residual.
471 */ 415 */
472 416
473static void merge_contiguous_buffers(Scsi_Cmnd *cmd) 417static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
474{ 418{
419#if !defined(CONFIG_SUN3)
475 unsigned long endaddr; 420 unsigned long endaddr;
476#if (NDEBUG & NDEBUG_MERGING) 421#if (NDEBUG & NDEBUG_MERGING)
477 unsigned long oldlen = cmd->SCp.this_residual; 422 unsigned long oldlen = cmd->SCp.this_residual;
@@ -496,18 +441,17 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
496 dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", 441 dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
497 cnt, cmd->SCp.ptr, cmd->SCp.this_residual); 442 cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
498#endif 443#endif
444#endif /* !defined(CONFIG_SUN3) */
499} 445}
500 446
501/* 447/**
502 * Function : void initialize_SCp(Scsi_Cmnd *cmd) 448 * initialize_SCp - init the scsi pointer field
449 * @cmd: command block to set up
503 * 450 *
504 * Purpose : initialize the saved data pointers for cmd to point to the 451 * Set up the internal fields in the SCSI command.
505 * start of the buffer.
506 *
507 * Inputs : cmd - Scsi_Cmnd structure to have pointers reset.
508 */ 452 */
509 453
510static inline void initialize_SCp(Scsi_Cmnd *cmd) 454static inline void initialize_SCp(struct scsi_cmnd *cmd)
511{ 455{
512 /* 456 /*
513 * Initialize the Scsi Pointer field so that all of the commands in the 457 * Initialize the Scsi Pointer field so that all of the commands in the
@@ -557,12 +501,11 @@ static struct {
557 {0, NULL} 501 {0, NULL}
558}; 502};
559 503
560/* 504/**
561 * Function : void NCR5380_print(struct Scsi_Host *instance) 505 * NCR5380_print - print scsi bus signals
506 * @instance: adapter state to dump
562 * 507 *
563 * Purpose : print the SCSI bus signals for debugging purposes 508 * Print the SCSI bus signals for debugging purposes
564 *
565 * Input : instance - which NCR5380
566 */ 509 */
567 510
568static void NCR5380_print(struct Scsi_Host *instance) 511static void NCR5380_print(struct Scsi_Host *instance)
@@ -605,12 +548,13 @@ static struct {
605 {PHASE_UNKNOWN, "UNKNOWN"} 548 {PHASE_UNKNOWN, "UNKNOWN"}
606}; 549};
607 550
608/* 551/**
609 * Function : void NCR5380_print_phase(struct Scsi_Host *instance) 552 * NCR5380_print_phase - show SCSI phase
553 * @instance: adapter to dump
610 * 554 *
611 * Purpose : print the current SCSI phase for debugging purposes 555 * Print the current SCSI phase for debugging purposes
612 * 556 *
613 * Input : instance - which NCR5380 557 * Locks: none
614 */ 558 */
615 559
616static void NCR5380_print_phase(struct Scsi_Host *instance) 560static void NCR5380_print_phase(struct Scsi_Host *instance)
@@ -648,71 +592,75 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
648#include <linux/workqueue.h> 592#include <linux/workqueue.h>
649#include <linux/interrupt.h> 593#include <linux/interrupt.h>
650 594
651static volatile int main_running; 595static inline void queue_main(struct NCR5380_hostdata *hostdata)
652static DECLARE_WORK(NCR5380_tqueue, NCR5380_main);
653
654static inline void queue_main(void)
655{ 596{
656 if (!main_running) { 597 if (!hostdata->main_running) {
657 /* If in interrupt and NCR5380_main() not already running, 598 /* If in interrupt and NCR5380_main() not already running,
658 queue it on the 'immediate' task queue, to be processed 599 queue it on the 'immediate' task queue, to be processed
659 immediately after the current interrupt processing has 600 immediately after the current interrupt processing has
660 finished. */ 601 finished. */
661 schedule_work(&NCR5380_tqueue); 602 schedule_work(&hostdata->main_task);
662 } 603 }
663 /* else: nothing to do: the running NCR5380_main() will pick up 604 /* else: nothing to do: the running NCR5380_main() will pick up
664 any newly queued command. */ 605 any newly queued command. */
665} 606}
666 607
667 608/**
668static inline void NCR5380_all_init(void) 609 * NCR58380_info - report driver and host information
669{ 610 * @instance: relevant scsi host instance
670 static int done = 0;
671 if (!done) {
672 dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
673 done = 1;
674 }
675}
676
677
678/*
679 * Function : void NCR58380_print_options (struct Scsi_Host *instance)
680 * 611 *
681 * Purpose : called by probe code indicating the NCR5380 driver 612 * For use as the host template info() handler.
682 * options that were selected.
683 * 613 *
684 * Inputs : instance, pointer to this instance. Unused. 614 * Locks: none
685 */ 615 */
686 616
687static void __init NCR5380_print_options(struct Scsi_Host *instance) 617static const char *NCR5380_info(struct Scsi_Host *instance)
618{
619 struct NCR5380_hostdata *hostdata = shost_priv(instance);
620
621 return hostdata->info;
622}
623
624static void prepare_info(struct Scsi_Host *instance)
688{ 625{
689 printk(" generic options" 626 struct NCR5380_hostdata *hostdata = shost_priv(instance);
690#ifdef AUTOSENSE 627
691 " AUTOSENSE" 628 snprintf(hostdata->info, sizeof(hostdata->info),
629 "%s, io_port 0x%lx, n_io_port %d, "
630 "base 0x%lx, irq %d, "
631 "can_queue %d, cmd_per_lun %d, "
632 "sg_tablesize %d, this_id %d, "
633 "flags { %s}, "
634 "options { %s} ",
635 instance->hostt->name, instance->io_port, instance->n_io_port,
636 instance->base, instance->irq,
637 instance->can_queue, instance->cmd_per_lun,
638 instance->sg_tablesize, instance->this_id,
639 hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "",
640#ifdef DIFFERENTIAL
641 "DIFFERENTIAL "
692#endif 642#endif
693#ifdef REAL_DMA 643#ifdef REAL_DMA
694 " REAL DMA" 644 "REAL_DMA "
695#endif 645#endif
696#ifdef PARITY 646#ifdef PARITY
697 " PARITY" 647 "PARITY "
698#endif 648#endif
699#ifdef SUPPORT_TAGS 649#ifdef SUPPORT_TAGS
700 " SCSI-2 TAGGED QUEUING" 650 "SUPPORT_TAGS "
701#endif 651#endif
702 ); 652 "");
703 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
704} 653}
705 654
706/* 655/**
707 * Function : void NCR5380_print_status (struct Scsi_Host *instance) 656 * NCR5380_print_status - dump controller info
657 * @instance: controller to dump
708 * 658 *
709 * Purpose : print commands in the various queues, called from 659 * Print commands in the various queues, called from NCR5380_abort
710 * NCR5380_abort and NCR5380_debug to aid debugging. 660 * to aid debugging.
711 *
712 * Inputs : instance, pointer to this instance.
713 */ 661 */
714 662
715static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) 663static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd)
716{ 664{
717 int i, s; 665 int i, s;
718 unsigned char *command; 666 unsigned char *command;
@@ -729,7 +677,7 @@ static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
729static void NCR5380_print_status(struct Scsi_Host *instance) 677static void NCR5380_print_status(struct Scsi_Host *instance)
730{ 678{
731 struct NCR5380_hostdata *hostdata; 679 struct NCR5380_hostdata *hostdata;
732 Scsi_Cmnd *ptr; 680 struct scsi_cmnd *ptr;
733 unsigned long flags; 681 unsigned long flags;
734 682
735 NCR5380_dprint(NDEBUG_ANY, instance); 683 NCR5380_dprint(NDEBUG_ANY, instance);
@@ -737,20 +685,19 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
737 685
738 hostdata = (struct NCR5380_hostdata *)instance->hostdata; 686 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
739 687
740 printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
741 local_irq_save(flags); 688 local_irq_save(flags);
742 printk("NCR5380: coroutine is%s running.\n", 689 printk("NCR5380: coroutine is%s running.\n",
743 main_running ? "" : "n't"); 690 hostdata->main_running ? "" : "n't");
744 if (!hostdata->connected) 691 if (!hostdata->connected)
745 printk("scsi%d: no currently connected command\n", HOSTNO); 692 printk("scsi%d: no currently connected command\n", HOSTNO);
746 else 693 else
747 lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected); 694 lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected);
748 printk("scsi%d: issue_queue\n", HOSTNO); 695 printk("scsi%d: issue_queue\n", HOSTNO);
749 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) 696 for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
750 lprint_Scsi_Cmnd(ptr); 697 lprint_Scsi_Cmnd(ptr);
751 698
752 printk("scsi%d: disconnected_queue\n", HOSTNO); 699 printk("scsi%d: disconnected_queue\n", HOSTNO);
753 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; 700 for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr;
754 ptr = NEXT(ptr)) 701 ptr = NEXT(ptr))
755 lprint_Scsi_Cmnd(ptr); 702 lprint_Scsi_Cmnd(ptr);
756 703
@@ -758,7 +705,7 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
758 printk("\n"); 705 printk("\n");
759} 706}
760 707
761static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) 708static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
762{ 709{
763 int i, s; 710 int i, s;
764 unsigned char *command; 711 unsigned char *command;
@@ -772,28 +719,28 @@ static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
772 seq_printf(m, "\n"); 719 seq_printf(m, "\n");
773} 720}
774 721
775static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) 722static int __maybe_unused NCR5380_show_info(struct seq_file *m,
723 struct Scsi_Host *instance)
776{ 724{
777 struct NCR5380_hostdata *hostdata; 725 struct NCR5380_hostdata *hostdata;
778 Scsi_Cmnd *ptr; 726 struct scsi_cmnd *ptr;
779 unsigned long flags; 727 unsigned long flags;
780 728
781 hostdata = (struct NCR5380_hostdata *)instance->hostdata; 729 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
782 730
783 seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
784 local_irq_save(flags); 731 local_irq_save(flags);
785 seq_printf(m, "NCR5380: coroutine is%s running.\n", 732 seq_printf(m, "NCR5380: coroutine is%s running.\n",
786 main_running ? "" : "n't"); 733 hostdata->main_running ? "" : "n't");
787 if (!hostdata->connected) 734 if (!hostdata->connected)
788 seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); 735 seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO);
789 else 736 else
790 show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); 737 show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
791 seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); 738 seq_printf(m, "scsi%d: issue_queue\n", HOSTNO);
792 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) 739 for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
793 show_Scsi_Cmnd(ptr, m); 740 show_Scsi_Cmnd(ptr, m);
794 741
795 seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); 742 seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO);
796 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; 743 for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr;
797 ptr = NEXT(ptr)) 744 ptr = NEXT(ptr))
798 show_Scsi_Cmnd(ptr, m); 745 show_Scsi_Cmnd(ptr, m);
799 746
@@ -801,16 +748,18 @@ static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance)
801 return 0; 748 return 0;
802} 749}
803 750
804/* 751/**
805 * Function : void NCR5380_init (struct Scsi_Host *instance) 752 * NCR5380_init - initialise an NCR5380
753 * @instance: adapter to configure
754 * @flags: control flags
806 * 755 *
807 * Purpose : initializes *instance and corresponding 5380 chip. 756 * Initializes *instance and corresponding 5380 chip,
808 * 757 * with flags OR'd into the initial flags value.
809 * Inputs : instance - instantiation of the 5380 driver.
810 * 758 *
811 * Notes : I assume that the host, hostno, and id bits have been 759 * Notes : I assume that the host, hostno, and id bits have been
812 * set correctly. I don't care about the irq and other fields. 760 * set correctly. I don't care about the irq and other fields.
813 * 761 *
762 * Returns 0 for success
814 */ 763 */
815 764
816static int __init NCR5380_init(struct Scsi_Host *instance, int flags) 765static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
@@ -818,8 +767,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
818 int i; 767 int i;
819 SETUP_HOSTDATA(instance); 768 SETUP_HOSTDATA(instance);
820 769
821 NCR5380_all_init(); 770 hostdata->host = instance;
822
823 hostdata->aborted = 0; 771 hostdata->aborted = 0;
824 hostdata->id_mask = 1 << instance->this_id; 772 hostdata->id_mask = 1 << instance->this_id;
825 hostdata->id_higher_mask = 0; 773 hostdata->id_higher_mask = 0;
@@ -829,7 +777,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
829 for (i = 0; i < 8; ++i) 777 for (i = 0; i < 8; ++i)
830 hostdata->busy[i] = 0; 778 hostdata->busy[i] = 0;
831#ifdef SUPPORT_TAGS 779#ifdef SUPPORT_TAGS
832 init_tags(); 780 init_tags(hostdata);
833#endif 781#endif
834#if defined (REAL_DMA) 782#if defined (REAL_DMA)
835 hostdata->dma_len = 0; 783 hostdata->dma_len = 0;
@@ -838,19 +786,11 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
838 hostdata->connected = NULL; 786 hostdata->connected = NULL;
839 hostdata->issue_queue = NULL; 787 hostdata->issue_queue = NULL;
840 hostdata->disconnected_queue = NULL; 788 hostdata->disconnected_queue = NULL;
841 hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; 789 hostdata->flags = flags;
842 790
843 if (!the_template) { 791 INIT_WORK(&hostdata->main_task, NCR5380_main);
844 the_template = instance->hostt;
845 first_instance = instance;
846 }
847 792
848#ifndef AUTOSENSE 793 prepare_info(instance);
849 if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1))
850 printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n"
851 " without AUTOSENSE option, contingent allegiance conditions may\n"
852 " be incorrectly cleared.\n", HOSTNO);
853#endif /* def AUTOSENSE */
854 794
855 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 795 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
856 NCR5380_write(MODE_REG, MR_BASE); 796 NCR5380_write(MODE_REG, MR_BASE);
@@ -860,33 +800,35 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
860 return 0; 800 return 0;
861} 801}
862 802
803/**
804 * NCR5380_exit - remove an NCR5380
805 * @instance: adapter to remove
806 *
807 * Assumes that no more work can be queued (e.g. by NCR5380_intr).
808 */
809
863static void NCR5380_exit(struct Scsi_Host *instance) 810static void NCR5380_exit(struct Scsi_Host *instance)
864{ 811{
865 /* Empty, as we didn't schedule any delayed work */ 812 struct NCR5380_hostdata *hostdata = shost_priv(instance);
813
814 cancel_work_sync(&hostdata->main_task);
866} 815}
867 816
868/* 817/**
869 * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, 818 * NCR5380_queue_command - queue a command
870 * void (*done)(Scsi_Cmnd *)) 819 * @instance: the relevant SCSI adapter
871 * 820 * @cmd: SCSI command
872 * Purpose : enqueues a SCSI command
873 *
874 * Inputs : cmd - SCSI command, done - function called on completion, with
875 * a pointer to the command descriptor.
876 *
877 * Returns : 0
878 *
879 * Side effects :
880 * cmd is added to the per instance issue_queue, with minor
881 * twiddling done to the host specific fields of cmd. If the
882 * main coroutine is not running, it is restarted.
883 * 821 *
822 * cmd is added to the per instance issue_queue, with minor
823 * twiddling done to the host specific fields of cmd. If the
824 * main coroutine is not running, it is restarted.
884 */ 825 */
885 826
886static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) 827static int NCR5380_queue_command(struct Scsi_Host *instance,
828 struct scsi_cmnd *cmd)
887{ 829{
888 SETUP_HOSTDATA(cmd->device->host); 830 struct NCR5380_hostdata *hostdata = shost_priv(instance);
889 Scsi_Cmnd *tmp; 831 struct scsi_cmnd *tmp;
890 unsigned long flags; 832 unsigned long flags;
891 833
892#if (NDEBUG & NDEBUG_NO_WRITE) 834#if (NDEBUG & NDEBUG_NO_WRITE)
@@ -896,47 +838,17 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
896 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", 838 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n",
897 H_NO(cmd)); 839 H_NO(cmd));
898 cmd->result = (DID_ERROR << 16); 840 cmd->result = (DID_ERROR << 16);
899 done(cmd); 841 cmd->scsi_done(cmd);
900 return 0; 842 return 0;
901 } 843 }
902#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ 844#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
903 845
904#ifdef NCR5380_STATS
905# if 0
906 if (!hostdata->connected && !hostdata->issue_queue &&
907 !hostdata->disconnected_queue) {
908 hostdata->timebase = jiffies;
909 }
910# endif
911# ifdef NCR5380_STAT_LIMIT
912 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
913# endif
914 switch (cmd->cmnd[0]) {
915 case WRITE:
916 case WRITE_6:
917 case WRITE_10:
918 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
919 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
920 hostdata->pendingw++;
921 break;
922 case READ:
923 case READ_6:
924 case READ_10:
925 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
926 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
927 hostdata->pendingr++;
928 break;
929 }
930#endif
931
932 /* 846 /*
933 * We use the host_scribble field as a pointer to the next command 847 * We use the host_scribble field as a pointer to the next command
934 * in a queue 848 * in a queue
935 */ 849 */
936 850
937 SET_NEXT(cmd, NULL); 851 SET_NEXT(cmd, NULL);
938 cmd->scsi_done = done;
939
940 cmd->result = 0; 852 cmd->result = 0;
941 853
942 /* 854 /*
@@ -946,7 +858,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
946 * sense data is only guaranteed to be valid while the condition exists. 858 * sense data is only guaranteed to be valid while the condition exists.
947 */ 859 */
948 860
949 local_irq_save(flags);
950 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. 861 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA.
951 * Otherwise a running NCR5380_main may steal the lock. 862 * Otherwise a running NCR5380_main may steal the lock.
952 * Lock before actually inserting due to fairness reasons explained in 863 * Lock before actually inserting due to fairness reasons explained in
@@ -959,17 +870,24 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
959 * because also a timer int can trigger an abort or reset, which would 870 * because also a timer int can trigger an abort or reset, which would
960 * alter queues and touch the lock. 871 * alter queues and touch the lock.
961 */ 872 */
962 if (!IS_A_TT()) { 873 if (!NCR5380_acquire_dma_irq(instance))
963 /* perhaps stop command timer here */ 874 return SCSI_MLQUEUE_HOST_BUSY;
964 falcon_get_lock(); 875
965 /* perhaps restart command timer here */ 876 local_irq_save(flags);
966 } 877
878 /*
879 * Insert the cmd into the issue queue. Note that REQUEST SENSE
880 * commands are added to the head of the queue since any command will
881 * clear the contingent allegiance condition that exists and the
882 * sense data is only guaranteed to be valid while the condition exists.
883 */
884
967 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { 885 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
968 LIST(cmd, hostdata->issue_queue); 886 LIST(cmd, hostdata->issue_queue);
969 SET_NEXT(cmd, hostdata->issue_queue); 887 SET_NEXT(cmd, hostdata->issue_queue);
970 hostdata->issue_queue = cmd; 888 hostdata->issue_queue = cmd;
971 } else { 889 } else {
972 for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; 890 for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
973 NEXT(tmp); tmp = NEXT(tmp)) 891 NEXT(tmp); tmp = NEXT(tmp))
974 ; 892 ;
975 LIST(cmd, tmp); 893 LIST(cmd, tmp);
@@ -987,32 +905,42 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
987 * If we're not in an interrupt, we can call NCR5380_main() 905 * If we're not in an interrupt, we can call NCR5380_main()
988 * unconditionally, because it cannot be already running. 906 * unconditionally, because it cannot be already running.
989 */ 907 */
990 if (in_interrupt() || ((flags >> 8) & 7) >= 6) 908 if (in_interrupt() || irqs_disabled())
991 queue_main(); 909 queue_main(hostdata);
992 else 910 else
993 NCR5380_main(NULL); 911 NCR5380_main(&hostdata->main_task);
994 return 0; 912 return 0;
995} 913}
996 914
997static DEF_SCSI_QCMD(NCR5380_queue_command) 915static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
916{
917 struct NCR5380_hostdata *hostdata = shost_priv(instance);
918
919 /* Caller does the locking needed to set & test these data atomically */
920 if (!hostdata->disconnected_queue &&
921 !hostdata->issue_queue &&
922 !hostdata->connected &&
923 !hostdata->retain_dma_intr)
924 NCR5380_release_dma_irq(instance);
925}
998 926
999/* 927/**
1000 * Function : NCR5380_main (void) 928 * NCR5380_main - NCR state machines
1001 * 929 *
1002 * Purpose : NCR5380_main is a coroutine that runs as long as more work can 930 * NCR5380_main is a coroutine that runs as long as more work can
1003 * be done on the NCR5380 host adapters in a system. Both 931 * be done on the NCR5380 host adapters in a system. Both
1004 * NCR5380_queue_command() and NCR5380_intr() will try to start it 932 * NCR5380_queue_command() and NCR5380_intr() will try to start it
1005 * in case it is not running. 933 * in case it is not running.
1006 * 934 *
1007 * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should 935 * Locks: called as its own thread with no locks held.
1008 * reenable them. This prevents reentrancy and kernel stack overflow.
1009 */ 936 */
1010 937
1011static void NCR5380_main(struct work_struct *work) 938static void NCR5380_main(struct work_struct *work)
1012{ 939{
1013 Scsi_Cmnd *tmp, *prev; 940 struct NCR5380_hostdata *hostdata =
1014 struct Scsi_Host *instance = first_instance; 941 container_of(work, struct NCR5380_hostdata, main_task);
1015 struct NCR5380_hostdata *hostdata = HOSTDATA(instance); 942 struct Scsi_Host *instance = hostdata->host;
943 struct scsi_cmnd *tmp, *prev;
1016 int done; 944 int done;
1017 unsigned long flags; 945 unsigned long flags;
1018 946
@@ -1037,9 +965,9 @@ static void NCR5380_main(struct work_struct *work)
1037 'main_running' is set here, and queues/executes main via the 965 'main_running' is set here, and queues/executes main via the
1038 task queue, it doesn't do any harm, just this instance of main 966 task queue, it doesn't do any harm, just this instance of main
1039 won't find any work left to do. */ 967 won't find any work left to do. */
1040 if (main_running) 968 if (hostdata->main_running)
1041 return; 969 return;
1042 main_running = 1; 970 hostdata->main_running = 1;
1043 971
1044 local_save_flags(flags); 972 local_save_flags(flags);
1045 do { 973 do {
@@ -1053,7 +981,7 @@ static void NCR5380_main(struct work_struct *work)
1053 * for a target that's not busy. 981 * for a target that's not busy.
1054 */ 982 */
1055#if (NDEBUG & NDEBUG_LISTS) 983#if (NDEBUG & NDEBUG_LISTS)
1056 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; 984 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL;
1057 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) 985 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp))
1058 ; 986 ;
1059 /*printk("%p ", tmp);*/ 987 /*printk("%p ", tmp);*/
@@ -1061,16 +989,14 @@ static void NCR5380_main(struct work_struct *work)
1061 printk(" LOOP\n"); 989 printk(" LOOP\n");
1062 /* else printk("\n"); */ 990 /* else printk("\n"); */
1063#endif 991#endif
1064 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, 992 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
1065 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { 993 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) {
1066 u8 lun = tmp->device->lun; 994 u8 lun = tmp->device->lun;
1067 995
1068#if (NDEBUG & NDEBUG_LISTS) 996 dprintk(NDEBUG_LISTS,
1069 if (prev != tmp) 997 "MAIN tmp=%p target=%d busy=%d lun=%d\n",
1070 printk("MAIN tmp=%p target=%d busy=%d lun=%llu\n", 998 tmp, scmd_id(tmp), hostdata->busy[scmd_id(tmp)],
1071 tmp, tmp->device->id, hostdata->busy[tmp->device->id], 999 lun);
1072 lun);
1073#endif
1074 /* When we find one, remove it from the issue queue. */ 1000 /* When we find one, remove it from the issue queue. */
1075 /* ++guenther: possible race with Falcon locking */ 1001 /* ++guenther: possible race with Falcon locking */
1076 if ( 1002 if (
@@ -1090,7 +1016,7 @@ static void NCR5380_main(struct work_struct *work)
1090 hostdata->issue_queue = NEXT(tmp); 1016 hostdata->issue_queue = NEXT(tmp);
1091 } 1017 }
1092 SET_NEXT(tmp, NULL); 1018 SET_NEXT(tmp, NULL);
1093 falcon_dont_release++; 1019 hostdata->retain_dma_intr++;
1094 1020
1095 /* reenable interrupts after finding one */ 1021 /* reenable interrupts after finding one */
1096 local_irq_restore(flags); 1022 local_irq_restore(flags);
@@ -1117,12 +1043,12 @@ static void NCR5380_main(struct work_struct *work)
1117#ifdef SUPPORT_TAGS 1043#ifdef SUPPORT_TAGS
1118 cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); 1044 cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE);
1119#endif 1045#endif
1120 if (!NCR5380_select(instance, tmp, 1046 if (!NCR5380_select(instance, tmp)) {
1121 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : 1047 local_irq_disable();
1122 TAG_NEXT)) { 1048 hostdata->retain_dma_intr--;
1123 falcon_dont_release--;
1124 /* release if target did not response! */ 1049 /* release if target did not response! */
1125 falcon_release_lock_if_possible(hostdata); 1050 maybe_release_dma_irq(instance);
1051 local_irq_restore(flags);
1126 break; 1052 break;
1127 } else { 1053 } else {
1128 local_irq_disable(); 1054 local_irq_disable();
@@ -1132,7 +1058,7 @@ static void NCR5380_main(struct work_struct *work)
1132#ifdef SUPPORT_TAGS 1058#ifdef SUPPORT_TAGS
1133 cmd_free_tag(tmp); 1059 cmd_free_tag(tmp);
1134#endif 1060#endif
1135 falcon_dont_release--; 1061 hostdata->retain_dma_intr--;
1136 local_irq_restore(flags); 1062 local_irq_restore(flags);
1137 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " 1063 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
1138 "returned to issue_queue\n", HOSTNO); 1064 "returned to issue_queue\n", HOSTNO);
@@ -1160,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work)
1160 /* Better allow ints _after_ 'main_running' has been cleared, else 1086 /* Better allow ints _after_ 'main_running' has been cleared, else
1161 an interrupt could believe we'll pick up the work it left for 1087 an interrupt could believe we'll pick up the work it left for
1162 us, but we won't see it anymore here... */ 1088 us, but we won't see it anymore here... */
1163 main_running = 0; 1089 hostdata->main_running = 0;
1164 local_irq_restore(flags); 1090 local_irq_restore(flags);
1165} 1091}
1166 1092
@@ -1179,9 +1105,11 @@ static void NCR5380_main(struct work_struct *work)
1179static void NCR5380_dma_complete(struct Scsi_Host *instance) 1105static void NCR5380_dma_complete(struct Scsi_Host *instance)
1180{ 1106{
1181 SETUP_HOSTDATA(instance); 1107 SETUP_HOSTDATA(instance);
1182 int transfered, saved_data = 0, overrun = 0, cnt, toPIO; 1108 int transferred;
1183 unsigned char **data, p; 1109 unsigned char **data;
1184 volatile int *count; 1110 volatile int *count;
1111 int saved_data = 0, overrun = 0;
1112 unsigned char p;
1185 1113
1186 if (!hostdata->connected) { 1114 if (!hostdata->connected) {
1187 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " 1115 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with "
@@ -1189,7 +1117,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1189 return; 1117 return;
1190 } 1118 }
1191 1119
1192 if (atari_read_overruns) { 1120 if (hostdata->read_overruns) {
1193 p = hostdata->connected->SCp.phase; 1121 p = hostdata->connected->SCp.phase;
1194 if (p & SR_IO) { 1122 if (p & SR_IO) {
1195 udelay(10); 1123 udelay(10);
@@ -1207,21 +1135,41 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1207 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), 1135 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
1208 NCR5380_read(STATUS_REG)); 1136 NCR5380_read(STATUS_REG));
1209 1137
1138#if defined(CONFIG_SUN3)
1139 if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
1140 pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
1141 instance->host_no);
1142 BUG();
1143 }
1144
1145 /* make sure we're not stuck in a data phase */
1146 if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
1147 (BASR_PHASE_MATCH | BASR_ACK)) {
1148 pr_err("scsi%d: BASR %02x\n", instance->host_no,
1149 NCR5380_read(BUS_AND_STATUS_REG));
1150 pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n",
1151 instance->host_no);
1152 BUG();
1153 }
1154#endif
1155
1210 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1156 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1211 NCR5380_write(MODE_REG, MR_BASE); 1157 NCR5380_write(MODE_REG, MR_BASE);
1212 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1158 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1213 1159
1214 transfered = hostdata->dma_len - NCR5380_dma_residual(instance); 1160 transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
1215 hostdata->dma_len = 0; 1161 hostdata->dma_len = 0;
1216 1162
1217 data = (unsigned char **)&hostdata->connected->SCp.ptr; 1163 data = (unsigned char **)&hostdata->connected->SCp.ptr;
1218 count = &hostdata->connected->SCp.this_residual; 1164 count = &hostdata->connected->SCp.this_residual;
1219 *data += transfered; 1165 *data += transferred;
1220 *count -= transfered; 1166 *count -= transferred;
1167
1168 if (hostdata->read_overruns) {
1169 int cnt, toPIO;
1221 1170
1222 if (atari_read_overruns) {
1223 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { 1171 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
1224 cnt = toPIO = atari_read_overruns; 1172 cnt = toPIO = hostdata->read_overruns;
1225 if (overrun) { 1173 if (overrun) {
1226 dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); 1174 dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
1227 *(*data)++ = saved_data; 1175 *(*data)++ = saved_data;
@@ -1238,20 +1186,19 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1238#endif /* REAL_DMA */ 1186#endif /* REAL_DMA */
1239 1187
1240 1188
1241/* 1189/**
1242 * Function : void NCR5380_intr (int irq) 1190 * NCR5380_intr - generic NCR5380 irq handler
1243 * 1191 * @irq: interrupt number
1244 * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses 1192 * @dev_id: device info
1245 * from the disconnected queue, and restarting NCR5380_main()
1246 * as required.
1247 *
1248 * Inputs : int irq, irq that caused this interrupt.
1249 * 1193 *
1194 * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
1195 * from the disconnected queue, and restarting NCR5380_main()
1196 * as required.
1250 */ 1197 */
1251 1198
1252static irqreturn_t NCR5380_intr(int irq, void *dev_id) 1199static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1253{ 1200{
1254 struct Scsi_Host *instance = first_instance; 1201 struct Scsi_Host *instance = dev_id;
1255 int done = 1, handled = 0; 1202 int done = 1, handled = 0;
1256 unsigned char basr; 1203 unsigned char basr;
1257 1204
@@ -1265,7 +1212,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1265 NCR5380_dprint(NDEBUG_INTR, instance); 1212 NCR5380_dprint(NDEBUG_INTR, instance);
1266 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { 1213 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
1267 done = 0; 1214 done = 0;
1268 ENABLE_IRQ();
1269 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); 1215 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
1270 NCR5380_reselect(instance); 1216 NCR5380_reselect(instance);
1271 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1217 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
@@ -1295,17 +1241,19 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1295 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); 1241 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
1296 NCR5380_dma_complete( instance ); 1242 NCR5380_dma_complete( instance );
1297 done = 0; 1243 done = 0;
1298 ENABLE_IRQ();
1299 } else 1244 } else
1300#endif /* REAL_DMA */ 1245#endif /* REAL_DMA */
1301 { 1246 {
1302/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ 1247/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
1303 if (basr & BASR_PHASE_MATCH) 1248 if (basr & BASR_PHASE_MATCH)
1304 printk(KERN_NOTICE "scsi%d: unknown interrupt, " 1249 dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
1305 "BASR 0x%x, MR 0x%x, SR 0x%x\n", 1250 "BASR 0x%x, MR 0x%x, SR 0x%x\n",
1306 HOSTNO, basr, NCR5380_read(MODE_REG), 1251 HOSTNO, basr, NCR5380_read(MODE_REG),
1307 NCR5380_read(STATUS_REG)); 1252 NCR5380_read(STATUS_REG));
1308 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1253 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1254#ifdef SUN3_SCSI_VME
1255 dregs->csr |= CSR_DMA_ENABLE;
1256#endif
1309 } 1257 }
1310 } /* if !(SELECTION || PARITY) */ 1258 } /* if !(SELECTION || PARITY) */
1311 handled = 1; 1259 handled = 1;
@@ -1314,53 +1262,29 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1314 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, 1262 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr,
1315 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); 1263 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
1316 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1264 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1265#ifdef SUN3_SCSI_VME
1266 dregs->csr |= CSR_DMA_ENABLE;
1267#endif
1317 } 1268 }
1318 1269
1319 if (!done) { 1270 if (!done) {
1320 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); 1271 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
1321 /* Put a call to NCR5380_main() on the queue... */ 1272 /* Put a call to NCR5380_main() on the queue... */
1322 queue_main(); 1273 queue_main(shost_priv(instance));
1323 } 1274 }
1324 return IRQ_RETVAL(handled); 1275 return IRQ_RETVAL(handled);
1325} 1276}
1326 1277
1327#ifdef NCR5380_STATS
1328static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
1329{
1330# ifdef NCR5380_STAT_LIMIT
1331 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
1332# endif
1333 switch (cmd->cmnd[0]) {
1334 case WRITE:
1335 case WRITE_6:
1336 case WRITE_10:
1337 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
1338 /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
1339 hostdata->pendingw--;
1340 break;
1341 case READ:
1342 case READ_6:
1343 case READ_10:
1344 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
1345 /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
1346 hostdata->pendingr--;
1347 break;
1348 }
1349}
1350#endif
1351
1352/* 1278/*
1353 * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, 1279 * Function : int NCR5380_select(struct Scsi_Host *instance,
1354 * int tag); 1280 * struct scsi_cmnd *cmd)
1355 * 1281 *
1356 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, 1282 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
1357 * including ARBITRATION, SELECTION, and initial message out for 1283 * including ARBITRATION, SELECTION, and initial message out for
1358 * IDENTIFY and queue messages. 1284 * IDENTIFY and queue messages.
1359 * 1285 *
1360 * Inputs : instance - instantiation of the 5380 driver on which this 1286 * Inputs : instance - instantiation of the 5380 driver on which this
1361 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for 1287 * target lives, cmd - SCSI command to execute.
1362 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
1363 * the command that is presently connected.
1364 * 1288 *
1365 * Returns : -1 if selection could not execute for some reason, 1289 * Returns : -1 if selection could not execute for some reason,
1366 * 0 if selection succeeded or failed because the target 1290 * 0 if selection succeeded or failed because the target
@@ -1380,7 +1304,7 @@ static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
1380 * cmd->result host byte set to DID_BAD_TARGET. 1304 * cmd->result host byte set to DID_BAD_TARGET.
1381 */ 1305 */
1382 1306
1383static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) 1307static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
1384{ 1308{
1385 SETUP_HOSTDATA(instance); 1309 SETUP_HOSTDATA(instance);
1386 unsigned char tmp[3], phase; 1310 unsigned char tmp[3], phase;
@@ -1562,7 +1486,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1562 * selection. 1486 * selection.
1563 */ 1487 */
1564 1488
1565 timeout = jiffies + 25; 1489 timeout = jiffies + (250 * HZ / 1000);
1566 1490
1567 /* 1491 /*
1568 * XXX very interesting - we're seeing a bounce where the BSY we 1492 * XXX very interesting - we're seeing a bounce where the BSY we
@@ -1616,9 +1540,6 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1616 return -1; 1540 return -1;
1617 } 1541 }
1618 cmd->result = DID_BAD_TARGET << 16; 1542 cmd->result = DID_BAD_TARGET << 16;
1619#ifdef NCR5380_STATS
1620 collect_stats(hostdata, cmd);
1621#endif
1622#ifdef SUPPORT_TAGS 1543#ifdef SUPPORT_TAGS
1623 cmd_free_tag(cmd); 1544 cmd_free_tag(cmd);
1624#endif 1545#endif
@@ -1676,6 +1597,9 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1676#ifndef SUPPORT_TAGS 1597#ifndef SUPPORT_TAGS
1677 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 1598 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
1678#endif 1599#endif
1600#ifdef SUN3_SCSI_VME
1601 dregs->csr |= CSR_INTR;
1602#endif
1679 1603
1680 initialize_SCp(cmd); 1604 initialize_SCp(cmd);
1681 1605
@@ -1826,7 +1750,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1826 * Returns : 0 on success, -1 on failure. 1750 * Returns : 0 on success, -1 on failure.
1827 */ 1751 */
1828 1752
1829static int do_abort(struct Scsi_Host *host) 1753static int do_abort(struct Scsi_Host *instance)
1830{ 1754{
1831 unsigned char tmp, *msgptr, phase; 1755 unsigned char tmp, *msgptr, phase;
1832 int len; 1756 int len;
@@ -1861,7 +1785,7 @@ static int do_abort(struct Scsi_Host *host)
1861 msgptr = &tmp; 1785 msgptr = &tmp;
1862 len = 1; 1786 len = 1;
1863 phase = PHASE_MSGOUT; 1787 phase = PHASE_MSGOUT;
1864 NCR5380_transfer_pio(host, &phase, &len, &msgptr); 1788 NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
1865 1789
1866 /* 1790 /*
1867 * If we got here, and the command completed successfully, 1791 * If we got here, and the command completed successfully,
@@ -1899,17 +1823,62 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1899 SETUP_HOSTDATA(instance); 1823 SETUP_HOSTDATA(instance);
1900 register int c = *count; 1824 register int c = *count;
1901 register unsigned char p = *phase; 1825 register unsigned char p = *phase;
1826 unsigned long flags;
1827
1828#if defined(CONFIG_SUN3)
1829 /* sanity check */
1830 if (!sun3_dma_setup_done) {
1831 pr_err("scsi%d: transfer_dma without setup!\n",
1832 instance->host_no);
1833 BUG();
1834 }
1835 hostdata->dma_len = c;
1836
1837 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1838 instance->host_no, (p & SR_IO) ? "reading" : "writing",
1839 c, (p & SR_IO) ? "to" : "from", *data);
1840
1841 /* netbsd turns off ints here, why not be safe and do it too */
1842 local_irq_save(flags);
1843
1844 /* send start chain */
1845 sun3scsi_dma_start(c, *data);
1846
1847 if (p & SR_IO) {
1848 NCR5380_write(TARGET_COMMAND_REG, 1);
1849 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1850 NCR5380_write(INITIATOR_COMMAND_REG, 0);
1851 NCR5380_write(MODE_REG,
1852 (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1853 NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
1854 } else {
1855 NCR5380_write(TARGET_COMMAND_REG, 0);
1856 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1857 NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA);
1858 NCR5380_write(MODE_REG,
1859 (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1860 NCR5380_write(START_DMA_SEND_REG, 0);
1861 }
1862
1863#ifdef SUN3_SCSI_VME
1864 dregs->csr |= CSR_DMA_ENABLE;
1865#endif
1866
1867 local_irq_restore(flags);
1868
1869 sun3_dma_active = 1;
1870
1871#else /* !defined(CONFIG_SUN3) */
1902 register unsigned char *d = *data; 1872 register unsigned char *d = *data;
1903 unsigned char tmp; 1873 unsigned char tmp;
1904 unsigned long flags;
1905 1874
1906 if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { 1875 if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
1907 *phase = tmp; 1876 *phase = tmp;
1908 return -1; 1877 return -1;
1909 } 1878 }
1910 1879
1911 if (atari_read_overruns && (p & SR_IO)) 1880 if (hostdata->read_overruns && (p & SR_IO))
1912 c -= atari_read_overruns; 1881 c -= hostdata->read_overruns;
1913 1882
1914 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", 1883 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1915 HOSTNO, (p & SR_IO) ? "reading" : "writing", 1884 HOSTNO, (p & SR_IO) ? "reading" : "writing",
@@ -1921,7 +1890,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1921 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); 1890 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
1922#endif /* def REAL_DMA */ 1891#endif /* def REAL_DMA */
1923 1892
1924 if (IS_A_TT()) { 1893 if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) {
1925 /* On the Medusa, it is a must to initialize the DMA before 1894 /* On the Medusa, it is a must to initialize the DMA before
1926 * starting the NCR. This is also the cleaner way for the TT. 1895 * starting the NCR. This is also the cleaner way for the TT.
1927 */ 1896 */
@@ -1939,7 +1908,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1939 NCR5380_write(START_DMA_SEND_REG, 0); 1908 NCR5380_write(START_DMA_SEND_REG, 0);
1940 } 1909 }
1941 1910
1942 if (!IS_A_TT()) { 1911 if (hostdata->flags & FLAG_LATE_DMA_SETUP) {
1943 /* On the Falcon, the DMA setup must be done after the last */ 1912 /* On the Falcon, the DMA setup must be done after the last */
1944 /* NCR access, else the DMA setup gets trashed! 1913 /* NCR access, else the DMA setup gets trashed!
1945 */ 1914 */
@@ -1949,6 +1918,8 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1949 NCR5380_dma_write_setup(instance, d, c); 1918 NCR5380_dma_write_setup(instance, d, c);
1950 local_irq_restore(flags); 1919 local_irq_restore(flags);
1951 } 1920 }
1921#endif /* !defined(CONFIG_SUN3) */
1922
1952 return 0; 1923 return 0;
1953} 1924}
1954#endif /* defined(REAL_DMA) */ 1925#endif /* defined(REAL_DMA) */
@@ -1982,7 +1953,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1982#endif 1953#endif
1983 unsigned char *data; 1954 unsigned char *data;
1984 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; 1955 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
1985 Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; 1956 struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
1957
1958#ifdef SUN3_SCSI_VME
1959 dregs->csr |= CSR_INTR;
1960#endif
1986 1961
1987 while (1) { 1962 while (1) {
1988 tmp = NCR5380_read(STATUS_REG); 1963 tmp = NCR5380_read(STATUS_REG);
@@ -1993,6 +1968,33 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1993 old_phase = phase; 1968 old_phase = phase;
1994 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); 1969 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
1995 } 1970 }
1971#if defined(CONFIG_SUN3)
1972 if (phase == PHASE_CMDOUT) {
1973#if defined(REAL_DMA)
1974 void *d;
1975 unsigned long count;
1976
1977 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
1978 count = cmd->SCp.buffer->length;
1979 d = sg_virt(cmd->SCp.buffer);
1980 } else {
1981 count = cmd->SCp.this_residual;
1982 d = cmd->SCp.ptr;
1983 }
1984 /* this command setup for dma yet? */
1985 if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) {
1986 if (cmd->request->cmd_type == REQ_TYPE_FS) {
1987 sun3scsi_dma_setup(d, count,
1988 rq_data_dir(cmd->request));
1989 sun3_dma_setup_done = cmd;
1990 }
1991 }
1992#endif
1993#ifdef SUN3_SCSI_VME
1994 dregs->csr |= CSR_INTR;
1995#endif
1996 }
1997#endif /* CONFIG_SUN3 */
1996 1998
1997 if (sink && (phase != PHASE_MSGOUT)) { 1999 if (sink && (phase != PHASE_MSGOUT)) {
1998 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 2000 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
@@ -2054,8 +2056,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2054 */ 2056 */
2055 2057
2056#if defined(REAL_DMA) 2058#if defined(REAL_DMA)
2057 if (!cmd->device->borken && 2059 if (
2058 (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) { 2060#if !defined(CONFIG_SUN3)
2061 !cmd->device->borken &&
2062#endif
2063 (transfersize = NCR5380_dma_xfer_len(instance, cmd, phase)) >= DMA_MIN_SIZE) {
2059 len = transfersize; 2064 len = transfersize;
2060 cmd->SCp.phase = phase; 2065 cmd->SCp.phase = phase;
2061 if (NCR5380_transfer_dma(instance, &phase, 2066 if (NCR5380_transfer_dma(instance, &phase,
@@ -2064,9 +2069,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2064 * If the watchdog timer fires, all future 2069 * If the watchdog timer fires, all future
2065 * accesses to this device will use the 2070 * accesses to this device will use the
2066 * polled-IO. */ 2071 * polled-IO. */
2067 printk(KERN_NOTICE "scsi%d: switching target %d " 2072 scmd_printk(KERN_INFO, cmd,
2068 "lun %llu to slow handshake\n", HOSTNO, 2073 "switching to slow handshake\n");
2069 cmd->device->id, cmd->device->lun);
2070 cmd->device->borken = 1; 2074 cmd->device->borken = 1;
2071 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 2075 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2072 ICR_ASSERT_ATN); 2076 ICR_ASSERT_ATN);
@@ -2092,6 +2096,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2092 NCR5380_transfer_pio(instance, &phase, 2096 NCR5380_transfer_pio(instance, &phase,
2093 (int *)&cmd->SCp.this_residual, 2097 (int *)&cmd->SCp.this_residual,
2094 (unsigned char **)&cmd->SCp.ptr); 2098 (unsigned char **)&cmd->SCp.ptr);
2099#if defined(CONFIG_SUN3) && defined(REAL_DMA)
2100 /* if we had intended to dma that command clear it */
2101 if (sun3_dma_setup_done == cmd)
2102 sun3_dma_setup_done = NULL;
2103#endif
2095 break; 2104 break;
2096 case PHASE_MSGIN: 2105 case PHASE_MSGIN:
2097 len = 1; 2106 len = 1;
@@ -2145,9 +2154,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2145 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " 2154 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request "
2146 "done, calling scsi_done().\n", 2155 "done, calling scsi_done().\n",
2147 HOSTNO, cmd->device->id, cmd->device->lun); 2156 HOSTNO, cmd->device->id, cmd->device->lun);
2148#ifdef NCR5380_STATS
2149 collect_stats(hostdata, cmd);
2150#endif
2151 cmd->scsi_done(cmd); 2157 cmd->scsi_done(cmd);
2152 cmd = hostdata->connected; 2158 cmd = hostdata->connected;
2153 break; 2159 break;
@@ -2156,11 +2162,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2156 case COMMAND_COMPLETE: 2162 case COMMAND_COMPLETE:
2157 /* Accept message by clearing ACK */ 2163 /* Accept message by clearing ACK */
2158 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2164 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2159 /* ++guenther: possible race with Falcon locking */
2160 falcon_dont_release++;
2161 hostdata->connected = NULL;
2162 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " 2165 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu "
2163 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); 2166 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
2167
2168 local_irq_save(flags);
2169 hostdata->retain_dma_intr++;
2170 hostdata->connected = NULL;
2164#ifdef SUPPORT_TAGS 2171#ifdef SUPPORT_TAGS
2165 cmd_free_tag(cmd); 2172 cmd_free_tag(cmd);
2166 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { 2173 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
@@ -2172,7 +2179,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2172 */ 2179 */
2173 /* ++Andreas: the mid level code knows about 2180 /* ++Andreas: the mid level code knows about
2174 QUEUE_FULL now. */ 2181 QUEUE_FULL now. */
2175 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 2182 struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][cmd->device->lun];
2176 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " 2183 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned "
2177 "QUEUE_FULL after %d commands\n", 2184 "QUEUE_FULL after %d commands\n",
2178 HOSTNO, cmd->device->id, cmd->device->lun, 2185 HOSTNO, cmd->device->id, cmd->device->lun,
@@ -2207,7 +2214,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2207 else if (status_byte(cmd->SCp.Status) != GOOD) 2214 else if (status_byte(cmd->SCp.Status) != GOOD)
2208 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); 2215 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
2209 2216
2210#ifdef AUTOSENSE
2211 if ((cmd->cmnd[0] == REQUEST_SENSE) && 2217 if ((cmd->cmnd[0] == REQUEST_SENSE) &&
2212 hostdata->ses.cmd_len) { 2218 hostdata->ses.cmd_len) {
2213 scsi_eh_restore_cmnd(cmd, &hostdata->ses); 2219 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
@@ -2220,22 +2226,17 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2220 2226
2221 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); 2227 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);
2222 2228
2223 local_irq_save(flags);
2224 LIST(cmd,hostdata->issue_queue); 2229 LIST(cmd,hostdata->issue_queue);
2225 SET_NEXT(cmd, hostdata->issue_queue); 2230 SET_NEXT(cmd, hostdata->issue_queue);
2226 hostdata->issue_queue = (Scsi_Cmnd *) cmd; 2231 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2227 local_irq_restore(flags);
2228 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " 2232 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
2229 "issue queue\n", H_NO(cmd)); 2233 "issue queue\n", H_NO(cmd));
2230 } else 2234 } else {
2231#endif /* def AUTOSENSE */
2232 {
2233#ifdef NCR5380_STATS
2234 collect_stats(hostdata, cmd);
2235#endif
2236 cmd->scsi_done(cmd); 2235 cmd->scsi_done(cmd);
2237 } 2236 }
2238 2237
2238 local_irq_restore(flags);
2239
2239 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2240 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2240 /* 2241 /*
2241 * Restore phase bits to 0 so an interrupted selection, 2242 * Restore phase bits to 0 so an interrupted selection,
@@ -2246,12 +2247,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2246 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) 2247 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2247 barrier(); 2248 barrier();
2248 2249
2249 falcon_dont_release--; 2250 local_irq_save(flags);
2251 hostdata->retain_dma_intr--;
2250 /* ++roman: For Falcon SCSI, release the lock on the 2252 /* ++roman: For Falcon SCSI, release the lock on the
2251 * ST-DMA here if no other commands are waiting on the 2253 * ST-DMA here if no other commands are waiting on the
2252 * disconnected queue. 2254 * disconnected queue.
2253 */ 2255 */
2254 falcon_release_lock_if_possible(hostdata); 2256 maybe_release_dma_irq(instance);
2257 local_irq_restore(flags);
2255 return; 2258 return;
2256 case MESSAGE_REJECT: 2259 case MESSAGE_REJECT:
2257 /* Accept message by clearing ACK */ 2260 /* Accept message by clearing ACK */
@@ -2303,6 +2306,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2303 /* Wait for bus free to avoid nasty timeouts */ 2306 /* Wait for bus free to avoid nasty timeouts */
2304 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) 2307 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2305 barrier(); 2308 barrier();
2309#ifdef SUN3_SCSI_VME
2310 dregs->csr |= CSR_DMA_ENABLE;
2311#endif
2306 return; 2312 return;
2307 /* 2313 /*
2308 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect 2314 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
@@ -2384,20 +2390,18 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2384 */ 2390 */
2385 default: 2391 default:
2386 if (!tmp) { 2392 if (!tmp) {
2387 printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); 2393 printk(KERN_INFO "scsi%d: rejecting message ",
2394 instance->host_no);
2388 spi_print_msg(extended_msg); 2395 spi_print_msg(extended_msg);
2389 printk("\n"); 2396 printk("\n");
2390 } else if (tmp != EXTENDED_MESSAGE) 2397 } else if (tmp != EXTENDED_MESSAGE)
2391 printk(KERN_DEBUG "scsi%d: rejecting unknown " 2398 scmd_printk(KERN_INFO, cmd,
2392 "message %02x from target %d, lun %llu\n", 2399 "rejecting unknown message %02x\n",
2393 HOSTNO, tmp, cmd->device->id, cmd->device->lun); 2400 tmp);
2394 else 2401 else
2395 printk(KERN_DEBUG "scsi%d: rejecting unknown " 2402 scmd_printk(KERN_INFO, cmd,
2396 "extended message " 2403 "rejecting unknown extended message code %02x, length %d\n",
2397 "code %02x, length %d from target %d, lun %llu\n", 2404 extended_msg[1], extended_msg[0]);
2398 HOSTNO, extended_msg[1], extended_msg[0],
2399 cmd->device->id, cmd->device->lun);
2400
2401 2405
2402 msgout = MESSAGE_REJECT; 2406 msgout = MESSAGE_REJECT;
2403 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 2407 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -2410,6 +2414,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2410 hostdata->last_message = msgout; 2414 hostdata->last_message = msgout;
2411 NCR5380_transfer_pio(instance, &phase, &len, &data); 2415 NCR5380_transfer_pio(instance, &phase, &len, &data);
2412 if (msgout == ABORT) { 2416 if (msgout == ABORT) {
2417 local_irq_save(flags);
2413#ifdef SUPPORT_TAGS 2418#ifdef SUPPORT_TAGS
2414 cmd_free_tag(cmd); 2419 cmd_free_tag(cmd);
2415#else 2420#else
@@ -2417,12 +2422,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2417#endif 2422#endif
2418 hostdata->connected = NULL; 2423 hostdata->connected = NULL;
2419 cmd->result = DID_ERROR << 16; 2424 cmd->result = DID_ERROR << 16;
2420#ifdef NCR5380_STATS
2421 collect_stats(hostdata, cmd);
2422#endif
2423 cmd->scsi_done(cmd);
2424 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2425 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2425 falcon_release_lock_if_possible(hostdata); 2426 maybe_release_dma_irq(instance);
2427 local_irq_restore(flags);
2428 cmd->scsi_done(cmd);
2426 return; 2429 return;
2427 } 2430 }
2428 msgout = NOP; 2431 msgout = NOP;
@@ -2455,7 +2458,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2455 * Function : void NCR5380_reselect (struct Scsi_Host *instance) 2458 * Function : void NCR5380_reselect (struct Scsi_Host *instance)
2456 * 2459 *
2457 * Purpose : does reselection, initializing the instance->connected 2460 * Purpose : does reselection, initializing the instance->connected
2458 * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q 2461 * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
2459 * nexus has been reestablished, 2462 * nexus has been reestablished,
2460 * 2463 *
2461 * Inputs : instance - this instance of the NCR5380. 2464 * Inputs : instance - this instance of the NCR5380.
@@ -2463,19 +2466,21 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2463 */ 2466 */
2464 2467
2465 2468
2469/* it might eventually prove necessary to do a dma setup on
2470 reselection, but it doesn't seem to be needed now -- sam */
2471
2466static void NCR5380_reselect(struct Scsi_Host *instance) 2472static void NCR5380_reselect(struct Scsi_Host *instance)
2467{ 2473{
2468 SETUP_HOSTDATA(instance); 2474 SETUP_HOSTDATA(instance);
2469 unsigned char target_mask; 2475 unsigned char target_mask;
2470 unsigned char lun, phase; 2476 unsigned char lun;
2471 int len;
2472#ifdef SUPPORT_TAGS 2477#ifdef SUPPORT_TAGS
2473 unsigned char tag; 2478 unsigned char tag;
2474#endif 2479#endif
2475 unsigned char msg[3]; 2480 unsigned char msg[3];
2476 unsigned char *data; 2481 int __maybe_unused len;
2477 Scsi_Cmnd *tmp = NULL, *prev; 2482 unsigned char __maybe_unused *data, __maybe_unused phase;
2478/* unsigned long flags; */ 2483 struct scsi_cmnd *tmp = NULL, *prev;
2479 2484
2480 /* 2485 /*
2481 * Disable arbitration, etc. since the host adapter obviously 2486 * Disable arbitration, etc. since the host adapter obviously
@@ -2511,10 +2516,18 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2511 while (!(NCR5380_read(STATUS_REG) & SR_REQ)) 2516 while (!(NCR5380_read(STATUS_REG) & SR_REQ))
2512 ; 2517 ;
2513 2518
2519#if defined(CONFIG_SUN3) && defined(REAL_DMA)
2520 /* acknowledge toggle to MSGIN */
2521 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
2522
2523 /* peek at the byte without really hitting the bus */
2524 msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
2525#else
2514 len = 1; 2526 len = 1;
2515 data = msg; 2527 data = msg;
2516 phase = PHASE_MSGIN; 2528 phase = PHASE_MSGIN;
2517 NCR5380_transfer_pio(instance, &phase, &len, &data); 2529 NCR5380_transfer_pio(instance, &phase, &len, &data);
2530#endif
2518 2531
2519 if (!(msg[0] & 0x80)) { 2532 if (!(msg[0] & 0x80)) {
2520 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); 2533 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
@@ -2524,13 +2537,13 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2524 } 2537 }
2525 lun = (msg[0] & 0x07); 2538 lun = (msg[0] & 0x07);
2526 2539
2527#ifdef SUPPORT_TAGS 2540#if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3)
2528 /* If the phase is still MSGIN, the target wants to send some more 2541 /* If the phase is still MSGIN, the target wants to send some more
2529 * messages. In case it supports tagged queuing, this is probably a 2542 * messages. In case it supports tagged queuing, this is probably a
2530 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. 2543 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
2531 */ 2544 */
2532 tag = TAG_NONE; 2545 tag = TAG_NONE;
2533 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { 2546 if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) {
2534 /* Accept previous IDENTIFY message by clearing ACK */ 2547 /* Accept previous IDENTIFY message by clearing ACK */
2535 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2548 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2536 len = 2; 2549 len = 2;
@@ -2548,15 +2561,13 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2548 * just reestablished, and remove it from the disconnected queue. 2561 * just reestablished, and remove it from the disconnected queue.
2549 */ 2562 */
2550 2563
2551 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; 2564 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL;
2552 tmp; prev = tmp, tmp = NEXT(tmp)) { 2565 tmp; prev = tmp, tmp = NEXT(tmp)) {
2553 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) 2566 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
2554#ifdef SUPPORT_TAGS 2567#ifdef SUPPORT_TAGS
2555 && (tag == tmp->tag) 2568 && (tag == tmp->tag)
2556#endif 2569#endif
2557 ) { 2570 ) {
2558 /* ++guenther: prevent race with falcon_release_lock */
2559 falcon_dont_release++;
2560 if (prev) { 2571 if (prev) {
2561 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); 2572 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
2562 SET_NEXT(prev, NEXT(tmp)); 2573 SET_NEXT(prev, NEXT(tmp));
@@ -2588,22 +2599,63 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2588 return; 2599 return;
2589 } 2600 }
2590 2601
2602#if defined(CONFIG_SUN3) && defined(REAL_DMA)
2603 /* engage dma setup for the command we just saw */
2604 {
2605 void *d;
2606 unsigned long count;
2607
2608 if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
2609 count = tmp->SCp.buffer->length;
2610 d = sg_virt(tmp->SCp.buffer);
2611 } else {
2612 count = tmp->SCp.this_residual;
2613 d = tmp->SCp.ptr;
2614 }
2615 /* setup this command for dma if not already */
2616 if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) {
2617 sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request));
2618 sun3_dma_setup_done = tmp;
2619 }
2620 }
2621
2622 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
2623#endif
2624
2591 /* Accept message by clearing ACK */ 2625 /* Accept message by clearing ACK */
2592 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2626 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2593 2627
2628#if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3)
2629 /* If the phase is still MSGIN, the target wants to send some more
2630 * messages. In case it supports tagged queuing, this is probably a
2631 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
2632 */
2633 tag = TAG_NONE;
2634 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
2635 /* Accept previous IDENTIFY message by clearing ACK */
2636 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2637 len = 2;
2638 data = msg + 1;
2639 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
2640 msg[1] == SIMPLE_QUEUE_TAG)
2641 tag = msg[2];
2642 dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at reselection\n"
2643 HOSTNO, target_mask, lun, tag);
2644 }
2645#endif
2646
2594 hostdata->connected = tmp; 2647 hostdata->connected = tmp;
2595 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", 2648 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n",
2596 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); 2649 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
2597 falcon_dont_release--;
2598} 2650}
2599 2651
2600 2652
2601/* 2653/*
2602 * Function : int NCR5380_abort (Scsi_Cmnd *cmd) 2654 * Function : int NCR5380_abort (struct scsi_cmnd *cmd)
2603 * 2655 *
2604 * Purpose : abort a command 2656 * Purpose : abort a command
2605 * 2657 *
2606 * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the 2658 * Inputs : cmd - the scsi_cmnd to abort, code - code to set the
2607 * host byte of the result field to, if zero DID_ABORTED is 2659 * host byte of the result field to, if zero DID_ABORTED is
2608 * used. 2660 * used.
2609 * 2661 *
@@ -2616,11 +2668,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2616 */ 2668 */
2617 2669
2618static 2670static
2619int NCR5380_abort(Scsi_Cmnd *cmd) 2671int NCR5380_abort(struct scsi_cmnd *cmd)
2620{ 2672{
2621 struct Scsi_Host *instance = cmd->device->host; 2673 struct Scsi_Host *instance = cmd->device->host;
2622 SETUP_HOSTDATA(instance); 2674 SETUP_HOSTDATA(instance);
2623 Scsi_Cmnd *tmp, **prev; 2675 struct scsi_cmnd *tmp, **prev;
2624 unsigned long flags; 2676 unsigned long flags;
2625 2677
2626 scmd_printk(KERN_NOTICE, cmd, "aborting command\n"); 2678 scmd_printk(KERN_NOTICE, cmd, "aborting command\n");
@@ -2629,10 +2681,6 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2629 2681
2630 local_irq_save(flags); 2682 local_irq_save(flags);
2631 2683
2632 if (!IS_A_TT() && !falcon_got_lock)
2633 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
2634 HOSTNO);
2635
2636 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, 2684 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
2637 NCR5380_read(BUS_AND_STATUS_REG), 2685 NCR5380_read(BUS_AND_STATUS_REG),
2638 NCR5380_read(STATUS_REG)); 2686 NCR5380_read(STATUS_REG));
@@ -2673,12 +2721,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2673#else 2721#else
2674 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2722 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2675#endif 2723#endif
2724 maybe_release_dma_irq(instance);
2676 local_irq_restore(flags); 2725 local_irq_restore(flags);
2677 cmd->scsi_done(cmd); 2726 cmd->scsi_done(cmd);
2678 falcon_release_lock_if_possible(hostdata);
2679 return SUCCESS; 2727 return SUCCESS;
2680 } else { 2728 } else {
2681/* local_irq_restore(flags); */ 2729 local_irq_restore(flags);
2682 printk("scsi%d: abort of connected command failed!\n", HOSTNO); 2730 printk("scsi%d: abort of connected command failed!\n", HOSTNO);
2683 return FAILED; 2731 return FAILED;
2684 } 2732 }
@@ -2689,21 +2737,21 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2689 * Case 2 : If the command hasn't been issued yet, we simply remove it 2737 * Case 2 : If the command hasn't been issued yet, we simply remove it
2690 * from the issue queue. 2738 * from the issue queue.
2691 */ 2739 */
2692 for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue), 2740 for (prev = (struct scsi_cmnd **)&(hostdata->issue_queue),
2693 tmp = (Scsi_Cmnd *)hostdata->issue_queue; 2741 tmp = (struct scsi_cmnd *)hostdata->issue_queue;
2694 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { 2742 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
2695 if (cmd == tmp) { 2743 if (cmd == tmp) {
2696 REMOVE(5, *prev, tmp, NEXT(tmp)); 2744 REMOVE(5, *prev, tmp, NEXT(tmp));
2697 (*prev) = NEXT(tmp); 2745 (*prev) = NEXT(tmp);
2698 SET_NEXT(tmp, NULL); 2746 SET_NEXT(tmp, NULL);
2699 tmp->result = DID_ABORT << 16; 2747 tmp->result = DID_ABORT << 16;
2748 maybe_release_dma_irq(instance);
2700 local_irq_restore(flags); 2749 local_irq_restore(flags);
2701 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", 2750 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
2702 HOSTNO); 2751 HOSTNO);
2703 /* Tagged queuing note: no tag to free here, hasn't been assigned 2752 /* Tagged queuing note: no tag to free here, hasn't been assigned
2704 * yet... */ 2753 * yet... */
2705 tmp->scsi_done(tmp); 2754 tmp->scsi_done(tmp);
2706 falcon_release_lock_if_possible(hostdata);
2707 return SUCCESS; 2755 return SUCCESS;
2708 } 2756 }
2709 } 2757 }
@@ -2750,13 +2798,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2750 * it from the disconnected queue. 2798 * it from the disconnected queue.
2751 */ 2799 */
2752 2800
2753 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; 2801 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp;
2754 tmp = NEXT(tmp)) { 2802 tmp = NEXT(tmp)) {
2755 if (cmd == tmp) { 2803 if (cmd == tmp) {
2756 local_irq_restore(flags); 2804 local_irq_restore(flags);
2757 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); 2805 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
2758 2806
2759 if (NCR5380_select(instance, cmd, (int)cmd->tag)) 2807 if (NCR5380_select(instance, cmd))
2760 return FAILED; 2808 return FAILED;
2761 2809
2762 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); 2810 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
@@ -2764,8 +2812,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2764 do_abort(instance); 2812 do_abort(instance);
2765 2813
2766 local_irq_save(flags); 2814 local_irq_save(flags);
2767 for (prev = (Scsi_Cmnd **)&(hostdata->disconnected_queue), 2815 for (prev = (struct scsi_cmnd **)&(hostdata->disconnected_queue),
2768 tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; 2816 tmp = (struct scsi_cmnd *)hostdata->disconnected_queue;
2769 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { 2817 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
2770 if (cmd == tmp) { 2818 if (cmd == tmp) {
2771 REMOVE(5, *prev, tmp, NEXT(tmp)); 2819 REMOVE(5, *prev, tmp, NEXT(tmp));
@@ -2781,15 +2829,22 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2781#else 2829#else
2782 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2830 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2783#endif 2831#endif
2832 maybe_release_dma_irq(instance);
2784 local_irq_restore(flags); 2833 local_irq_restore(flags);
2785 tmp->scsi_done(tmp); 2834 tmp->scsi_done(tmp);
2786 falcon_release_lock_if_possible(hostdata);
2787 return SUCCESS; 2835 return SUCCESS;
2788 } 2836 }
2789 } 2837 }
2790 } 2838 }
2791 } 2839 }
2792 2840
2841 /* Maybe it is sufficient just to release the ST-DMA lock... (if
2842 * possible at all) At least, we should check if the lock could be
2843 * released after the abort, in case it is kept due to some bug.
2844 */
2845 maybe_release_dma_irq(instance);
2846 local_irq_restore(flags);
2847
2793 /* 2848 /*
2794 * Case 5 : If we reached this point, the command was not found in any of 2849 * Case 5 : If we reached this point, the command was not found in any of
2795 * the queues. 2850 * the queues.
@@ -2800,21 +2855,14 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2800 * broke. 2855 * broke.
2801 */ 2856 */
2802 2857
2803 local_irq_restore(flags);
2804 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); 2858 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
2805 2859
2806 /* Maybe it is sufficient just to release the ST-DMA lock... (if
2807 * possible at all) At least, we should check if the lock could be
2808 * released after the abort, in case it is kept due to some bug.
2809 */
2810 falcon_release_lock_if_possible(hostdata);
2811
2812 return FAILED; 2860 return FAILED;
2813} 2861}
2814 2862
2815 2863
2816/* 2864/*
2817 * Function : int NCR5380_reset (Scsi_Cmnd *cmd) 2865 * Function : int NCR5380_reset (struct scsi_cmnd *cmd)
2818 * 2866 *
2819 * Purpose : reset the SCSI bus. 2867 * Purpose : reset the SCSI bus.
2820 * 2868 *
@@ -2822,20 +2870,14 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2822 * 2870 *
2823 */ 2871 */
2824 2872
2825static int NCR5380_bus_reset(Scsi_Cmnd *cmd) 2873static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2826{ 2874{
2827 SETUP_HOSTDATA(cmd->device->host); 2875 struct Scsi_Host *instance = cmd->device->host;
2876 struct NCR5380_hostdata *hostdata = shost_priv(instance);
2828 int i; 2877 int i;
2829 unsigned long flags; 2878 unsigned long flags;
2830#if defined(RESET_RUN_DONE)
2831 Scsi_Cmnd *connected, *disconnected_queue;
2832#endif
2833
2834 if (!IS_A_TT() && !falcon_got_lock)
2835 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n",
2836 H_NO(cmd));
2837 2879
2838 NCR5380_print_status(cmd->device->host); 2880 NCR5380_print_status(instance);
2839 2881
2840 /* get in phase */ 2882 /* get in phase */
2841 NCR5380_write(TARGET_COMMAND_REG, 2883 NCR5380_write(TARGET_COMMAND_REG,
@@ -2852,89 +2894,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2852 * through anymore ... */ 2894 * through anymore ... */
2853 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 2895 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
2854 2896
2855 /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
2856 * should go.
2857 * Catch-22: if we don't clear all queues, the SCSI driver lock will
2858 * not be reset by atari_scsi_reset()!
2859 */
2860
2861#if defined(RESET_RUN_DONE)
2862 /* XXX Should now be done by midlevel code, but it's broken XXX */
2863 /* XXX see below XXX */
2864
2865 /* MSch: old-style reset: actually abort all command processing here */
2866
2867 /* After the reset, there are no more connected or disconnected commands
2868 * and no busy units; to avoid problems with re-inserting the commands
2869 * into the issue_queue (via scsi_done()), the aborted commands are
2870 * remembered in local variables first.
2871 */
2872 local_irq_save(flags);
2873 connected = (Scsi_Cmnd *)hostdata->connected;
2874 hostdata->connected = NULL;
2875 disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue;
2876 hostdata->disconnected_queue = NULL;
2877#ifdef SUPPORT_TAGS
2878 free_all_tags();
2879#endif
2880 for (i = 0; i < 8; ++i)
2881 hostdata->busy[i] = 0;
2882#ifdef REAL_DMA
2883 hostdata->dma_len = 0;
2884#endif
2885 local_irq_restore(flags);
2886
2887 /* In order to tell the mid-level code which commands were aborted,
2888 * set the command status to DID_RESET and call scsi_done() !!!
2889 * This ultimately aborts processing of these commands in the mid-level.
2890 */
2891
2892 if ((cmd = connected)) {
2893 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2894 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2895 cmd->scsi_done(cmd);
2896 }
2897
2898 for (i = 0; (cmd = disconnected_queue); ++i) {
2899 disconnected_queue = NEXT(cmd);
2900 SET_NEXT(cmd, NULL);
2901 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2902 cmd->scsi_done(cmd);
2903 }
2904 if (i > 0)
2905 dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
2906
2907 /* The Falcon lock should be released after a reset...
2908 */
2909 /* ++guenther: moved to atari_scsi_reset(), to prevent a race between
2910 * unlocking and enabling dma interrupt.
2911 */
2912/* falcon_release_lock_if_possible( hostdata );*/
2913
2914 /* since all commands have been explicitly terminated, we need to tell
2915 * the midlevel code that the reset was SUCCESSFUL, and there is no
2916 * need to 'wake up' the commands by a request_sense
2917 */
2918 return SUCCESS;
2919#else /* 1 */
2920
2921 /* MSch: new-style reset handling: let the mid-level do what it can */
2922
2923 /* ++guenther: MID-LEVEL IS STILL BROKEN.
2924 * Mid-level is supposed to requeue all commands that were active on the
2925 * various low-level queues. In fact it does this, but that's not enough
2926 * because all these commands are subject to timeout. And if a timeout
2927 * happens for any removed command, *_abort() is called but all queues
2928 * are now empty. Abort then gives up the falcon lock, which is fatal,
2929 * since the mid-level will queue more commands and must have the lock
2930 * (it's all happening inside timer interrupt handler!!).
2931 * Even worse, abort will return NOT_RUNNING for all those commands not
2932 * on any queue, so they won't be retried ...
2933 *
2934 * Conclusion: either scsi.c disables timeout for all resetted commands
2935 * immediately, or we lose! As of linux-2.0.20 it doesn't.
2936 */
2937
2938 /* After the reset, there are no more connected or disconnected commands 2897 /* After the reset, there are no more connected or disconnected commands
2939 * and no busy units; so clear the low-level status here to avoid 2898 * and no busy units; so clear the low-level status here to avoid
2940 * conflicts when the mid-level code tries to wake up the affected 2899 * conflicts when the mid-level code tries to wake up the affected
@@ -2953,16 +2912,16 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2953 hostdata->connected = NULL; 2912 hostdata->connected = NULL;
2954 hostdata->disconnected_queue = NULL; 2913 hostdata->disconnected_queue = NULL;
2955#ifdef SUPPORT_TAGS 2914#ifdef SUPPORT_TAGS
2956 free_all_tags(); 2915 free_all_tags(hostdata);
2957#endif 2916#endif
2958 for (i = 0; i < 8; ++i) 2917 for (i = 0; i < 8; ++i)
2959 hostdata->busy[i] = 0; 2918 hostdata->busy[i] = 0;
2960#ifdef REAL_DMA 2919#ifdef REAL_DMA
2961 hostdata->dma_len = 0; 2920 hostdata->dma_len = 0;
2962#endif 2921#endif
2922
2923 maybe_release_dma_irq(instance);
2963 local_irq_restore(flags); 2924 local_irq_restore(flags);
2964 2925
2965 /* we did no complete reset of all commands, so a wakeup is required */
2966 return SUCCESS; 2926 return SUCCESS;
2967#endif /* 1 */
2968} 2927}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index b522134528d6..d1c37a386947 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -64,45 +64,57 @@
64/**************************************************************************/ 64/**************************************************************************/
65 65
66 66
67
68#include <linux/module.h> 67#include <linux/module.h>
69
70#define AUTOSENSE
71/* For the Atari version, use only polled IO or REAL_DMA */
72#define REAL_DMA
73/* Support tagged queuing? (on devices that are able to... :-) */
74#define SUPPORT_TAGS
75#define MAX_TAGS 32
76
77#include <linux/types.h> 68#include <linux/types.h>
78#include <linux/stddef.h>
79#include <linux/ctype.h>
80#include <linux/delay.h> 69#include <linux/delay.h>
81#include <linux/mm.h>
82#include <linux/blkdev.h> 70#include <linux/blkdev.h>
83#include <linux/interrupt.h> 71#include <linux/interrupt.h>
84#include <linux/init.h> 72#include <linux/init.h>
85#include <linux/nvram.h> 73#include <linux/nvram.h>
86#include <linux/bitops.h> 74#include <linux/bitops.h>
87#include <linux/wait.h> 75#include <linux/wait.h>
76#include <linux/platform_device.h>
88 77
89#include <asm/setup.h> 78#include <asm/setup.h>
90#include <asm/atarihw.h> 79#include <asm/atarihw.h>
91#include <asm/atariints.h> 80#include <asm/atariints.h>
92#include <asm/page.h>
93#include <asm/pgtable.h>
94#include <asm/irq.h>
95#include <asm/traps.h>
96
97#include "scsi.h"
98#include <scsi/scsi_host.h>
99#include "atari_scsi.h"
100#include "NCR5380.h"
101#include <asm/atari_stdma.h> 81#include <asm/atari_stdma.h>
102#include <asm/atari_stram.h> 82#include <asm/atari_stram.h>
103#include <asm/io.h> 83#include <asm/io.h>
104 84
105#include <linux/stat.h> 85#include <scsi/scsi_host.h>
86
87/* Definitions for the core NCR5380 driver. */
88
89#define REAL_DMA
90#define SUPPORT_TAGS
91#define MAX_TAGS 32
92#define DMA_MIN_SIZE 32
93
94#define NCR5380_implementation_fields /* none */
95
96#define NCR5380_read(reg) atari_scsi_reg_read(reg)
97#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
98
99#define NCR5380_queue_command atari_scsi_queue_command
100#define NCR5380_abort atari_scsi_abort
101#define NCR5380_show_info atari_scsi_show_info
102#define NCR5380_info atari_scsi_info
103
104#define NCR5380_dma_read_setup(instance, data, count) \
105 atari_scsi_dma_setup(instance, data, count, 0)
106#define NCR5380_dma_write_setup(instance, data, count) \
107 atari_scsi_dma_setup(instance, data, count, 1)
108#define NCR5380_dma_residual(instance) \
109 atari_scsi_dma_residual(instance)
110#define NCR5380_dma_xfer_len(instance, cmd, phase) \
111 atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
112
113#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
114#define NCR5380_release_dma_irq(instance) falcon_release_lock()
115
116#include "NCR5380.h"
117
106 118
107#define IS_A_TT() ATARIHW_PRESENT(TT_SCSI) 119#define IS_A_TT() ATARIHW_PRESENT(TT_SCSI)
108 120
@@ -149,23 +161,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
149 return adr; 161 return adr;
150} 162}
151 163
152static inline void ENABLE_IRQ(void)
153{
154 if (IS_A_TT())
155 atari_enable_irq(IRQ_TT_MFP_SCSI);
156 else
157 atari_enable_irq(IRQ_MFP_FSCSI);
158}
159
160static inline void DISABLE_IRQ(void)
161{
162 if (IS_A_TT())
163 atari_disable_irq(IRQ_TT_MFP_SCSI);
164 else
165 atari_disable_irq(IRQ_MFP_FSCSI);
166}
167
168
169#define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \ 164#define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \
170 (atari_scsi_host->hostdata))->dma_len) 165 (atari_scsi_host->hostdata))->dma_len)
171 166
@@ -178,30 +173,9 @@ static inline void DISABLE_IRQ(void)
178#define AFTER_RESET_DELAY (5*HZ/2) 173#define AFTER_RESET_DELAY (5*HZ/2)
179#endif 174#endif
180 175
181/***************************** Prototypes *****************************/
182
183#ifdef REAL_DMA 176#ifdef REAL_DMA
184static int scsi_dma_is_ignored_buserr(unsigned char dma_stat);
185static void atari_scsi_fetch_restbytes(void); 177static void atari_scsi_fetch_restbytes(void);
186static long atari_scsi_dma_residual(struct Scsi_Host *instance);
187static int falcon_classify_cmd(Scsi_Cmnd *cmd);
188static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
189 Scsi_Cmnd *cmd, int write_flag);
190#endif
191static irqreturn_t scsi_tt_intr(int irq, void *dummy);
192static irqreturn_t scsi_falcon_intr(int irq, void *dummy);
193static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata);
194static void falcon_get_lock(void);
195#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
196static void atari_scsi_reset_boot(void);
197#endif 178#endif
198static unsigned char atari_scsi_tt_reg_read(unsigned char reg);
199static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value);
200static unsigned char atari_scsi_falcon_reg_read(unsigned char reg);
201static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value);
202
203/************************* End of Prototypes **************************/
204
205 179
206static struct Scsi_Host *atari_scsi_host; 180static struct Scsi_Host *atari_scsi_host;
207static unsigned char (*atari_scsi_reg_read)(unsigned char reg); 181static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
@@ -226,8 +200,6 @@ static char *atari_dma_orig_addr;
226/* mask for address bits that can't be used with the ST-DMA */ 200/* mask for address bits that can't be used with the ST-DMA */
227static unsigned long atari_dma_stram_mask; 201static unsigned long atari_dma_stram_mask;
228#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) 202#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0)
229/* number of bytes to cut from a transfer to handle NCR overruns */
230static int atari_read_overruns;
231#endif 203#endif
232 204
233static int setup_can_queue = -1; 205static int setup_can_queue = -1;
@@ -386,10 +358,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
386 358
387 NCR5380_intr(irq, dummy); 359 NCR5380_intr(irq, dummy);
388 360
389#if 0
390 /* To be sure the int is not masked */
391 atari_enable_irq(IRQ_TT_MFP_SCSI);
392#endif
393 return IRQ_HANDLED; 361 return IRQ_HANDLED;
394} 362}
395 363
@@ -480,257 +448,35 @@ static void atari_scsi_fetch_restbytes(void)
480#endif /* REAL_DMA */ 448#endif /* REAL_DMA */
481 449
482 450
483static int falcon_got_lock = 0;
484static DECLARE_WAIT_QUEUE_HEAD(falcon_fairness_wait);
485static int falcon_trying_lock = 0;
486static DECLARE_WAIT_QUEUE_HEAD(falcon_try_wait);
487static int falcon_dont_release = 0;
488
489/* This function releases the lock on the DMA chip if there is no 451/* This function releases the lock on the DMA chip if there is no
490 * connected command and the disconnected queue is empty. On 452 * connected command and the disconnected queue is empty.
491 * releasing, instances of falcon_get_lock are awoken, that put
492 * themselves to sleep for fairness. They can now try to get the lock
493 * again (but others waiting longer more probably will win).
494 */ 453 */
495 454
496static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata) 455static void falcon_release_lock(void)
497{ 456{
498 unsigned long flags;
499
500 if (IS_A_TT()) 457 if (IS_A_TT())
501 return; 458 return;
502 459
503 local_irq_save(flags); 460 if (stdma_is_locked_by(scsi_falcon_intr))
504
505 if (falcon_got_lock && !hostdata->disconnected_queue &&
506 !hostdata->issue_queue && !hostdata->connected) {
507
508 if (falcon_dont_release) {
509#if 0
510 printk("WARNING: Lock release not allowed. Ignored\n");
511#endif
512 local_irq_restore(flags);
513 return;
514 }
515 falcon_got_lock = 0;
516 stdma_release(); 461 stdma_release();
517 wake_up(&falcon_fairness_wait);
518 }
519
520 local_irq_restore(flags);
521} 462}
522 463
523/* This function manages the locking of the ST-DMA. 464/* This function manages the locking of the ST-DMA.
524 * If the DMA isn't locked already for SCSI, it tries to lock it by 465 * If the DMA isn't locked already for SCSI, it tries to lock it by
525 * calling stdma_lock(). But if the DMA is locked by the SCSI code and 466 * calling stdma_lock(). But if the DMA is locked by the SCSI code and
526 * there are other drivers waiting for the chip, we do not issue the 467 * there are other drivers waiting for the chip, we do not issue the
527 * command immediately but wait on 'falcon_fairness_queue'. We will be 468 * command immediately but tell the SCSI mid-layer to defer.
528 * waked up when the DMA is unlocked by some SCSI interrupt. After that
529 * we try to get the lock again.
530 * But we must be prepared that more than one instance of
531 * falcon_get_lock() is waiting on the fairness queue. They should not
532 * try all at once to call stdma_lock(), one is enough! For that, the
533 * first one sets 'falcon_trying_lock', others that see that variable
534 * set wait on the queue 'falcon_try_wait'.
535 * Complicated, complicated.... Sigh...
536 */ 469 */
537 470
538static void falcon_get_lock(void) 471static int falcon_get_lock(struct Scsi_Host *instance)
539{ 472{
540 unsigned long flags;
541
542 if (IS_A_TT()) 473 if (IS_A_TT())
543 return; 474 return 1;
544
545 local_irq_save(flags);
546
547 wait_event_cmd(falcon_fairness_wait,
548 in_interrupt() || !falcon_got_lock || !stdma_others_waiting(),
549 local_irq_restore(flags),
550 local_irq_save(flags));
551
552 while (!falcon_got_lock) {
553 if (in_irq())
554 panic("Falcon SCSI hasn't ST-DMA lock in interrupt");
555 if (!falcon_trying_lock) {
556 falcon_trying_lock = 1;
557 stdma_lock(scsi_falcon_intr, NULL);
558 falcon_got_lock = 1;
559 falcon_trying_lock = 0;
560 wake_up(&falcon_try_wait);
561 } else {
562 wait_event_cmd(falcon_try_wait,
563 falcon_got_lock && !falcon_trying_lock,
564 local_irq_restore(flags),
565 local_irq_save(flags));
566 }
567 }
568
569 local_irq_restore(flags);
570 if (!falcon_got_lock)
571 panic("Falcon SCSI: someone stole the lock :-(\n");
572}
573
574
575static int __init atari_scsi_detect(struct scsi_host_template *host)
576{
577 static int called = 0;
578 struct Scsi_Host *instance;
579
580 if (!MACH_IS_ATARI ||
581 (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) ||
582 called)
583 return 0;
584
585 host->proc_name = "Atari";
586
587 atari_scsi_reg_read = IS_A_TT() ? atari_scsi_tt_reg_read :
588 atari_scsi_falcon_reg_read;
589 atari_scsi_reg_write = IS_A_TT() ? atari_scsi_tt_reg_write :
590 atari_scsi_falcon_reg_write;
591
592 /* setup variables */
593 host->can_queue =
594 (setup_can_queue > 0) ? setup_can_queue :
595 IS_A_TT() ? ATARI_TT_CAN_QUEUE : ATARI_FALCON_CAN_QUEUE;
596 host->cmd_per_lun =
597 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun :
598 IS_A_TT() ? ATARI_TT_CMD_PER_LUN : ATARI_FALCON_CMD_PER_LUN;
599 /* Force sg_tablesize to 0 on a Falcon! */
600 host->sg_tablesize =
601 !IS_A_TT() ? ATARI_FALCON_SG_TABLESIZE :
602 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : ATARI_TT_SG_TABLESIZE;
603
604 if (setup_hostid >= 0)
605 host->this_id = setup_hostid;
606 else {
607 /* use 7 as default */
608 host->this_id = 7;
609 /* Test if a host id is set in the NVRam */
610 if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
611 unsigned char b = nvram_read_byte( 14 );
612 /* Arbitration enabled? (for TOS) If yes, use configured host ID */
613 if (b & 0x80)
614 host->this_id = b & 7;
615 }
616 }
617 475
618#ifdef SUPPORT_TAGS 476 if (in_interrupt())
619 if (setup_use_tagged_queuing < 0) 477 return stdma_try_lock(scsi_falcon_intr, instance);
620 setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING;
621#endif
622#ifdef REAL_DMA
623 /* If running on a Falcon and if there's TT-Ram (i.e., more than one
624 * memory block, since there's always ST-Ram in a Falcon), then allocate a
625 * STRAM_BUFFER_SIZE byte dribble buffer for transfers from/to alternative
626 * Ram.
627 */
628 if (MACH_IS_ATARI && ATARIHW_PRESENT(ST_SCSI) &&
629 !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) {
630 atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
631 if (!atari_dma_buffer) {
632 printk(KERN_ERR "atari_scsi_detect: can't allocate ST-RAM "
633 "double buffer\n");
634 return 0;
635 }
636 atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
637 atari_dma_orig_addr = 0;
638 }
639#endif
640 instance = scsi_register(host, sizeof(struct NCR5380_hostdata));
641 if (instance == NULL) {
642 atari_stram_free(atari_dma_buffer);
643 atari_dma_buffer = 0;
644 return 0;
645 }
646 atari_scsi_host = instance;
647 /*
648 * Set irq to 0, to avoid that the mid-level code disables our interrupt
649 * during queue_command calls. This is completely unnecessary, and even
650 * worse causes bad problems on the Falcon, where the int is shared with
651 * IDE and floppy!
652 */
653 instance->irq = 0;
654
655#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
656 atari_scsi_reset_boot();
657#endif
658 NCR5380_init(instance, 0);
659
660 if (IS_A_TT()) {
661
662 /* This int is actually "pseudo-slow", i.e. it acts like a slow
663 * interrupt after having cleared the pending flag for the DMA
664 * interrupt. */
665 if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW,
666 "SCSI NCR5380", instance)) {
667 printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI);
668 scsi_unregister(atari_scsi_host);
669 atari_stram_free(atari_dma_buffer);
670 atari_dma_buffer = 0;
671 return 0;
672 }
673 tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */
674#ifdef REAL_DMA
675 tt_scsi_dma.dma_ctrl = 0;
676 atari_dma_residual = 0;
677
678 if (MACH_IS_MEDUSA) {
679 /* While the read overruns (described by Drew Eckhardt in
680 * NCR5380.c) never happened on TTs, they do in fact on the Medusa
681 * (This was the cause why SCSI didn't work right for so long
682 * there.) Since handling the overruns slows down a bit, I turned
683 * the #ifdef's into a runtime condition.
684 *
685 * In principle it should be sufficient to do max. 1 byte with
686 * PIO, but there is another problem on the Medusa with the DMA
687 * rest data register. So 'atari_read_overruns' is currently set
688 * to 4 to avoid having transfers that aren't a multiple of 4. If
689 * the rest data bug is fixed, this can be lowered to 1.
690 */
691 atari_read_overruns = 4;
692 }
693#endif /*REAL_DMA*/
694 } else { /* ! IS_A_TT */
695
696 /* Nothing to do for the interrupt: the ST-DMA is initialized
697 * already by atari_init_INTS()
698 */
699
700#ifdef REAL_DMA
701 atari_dma_residual = 0;
702 atari_dma_active = 0;
703 atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000
704 : 0xff000000);
705#endif
706 }
707 478
708 printk(KERN_INFO "scsi%d: options CAN_QUEUE=%d CMD_PER_LUN=%d SCAT-GAT=%d " 479 stdma_lock(scsi_falcon_intr, instance);
709#ifdef SUPPORT_TAGS
710 "TAGGED-QUEUING=%s "
711#endif
712 "HOSTID=%d",
713 instance->host_no, instance->hostt->can_queue,
714 instance->hostt->cmd_per_lun,
715 instance->hostt->sg_tablesize,
716#ifdef SUPPORT_TAGS
717 setup_use_tagged_queuing ? "yes" : "no",
718#endif
719 instance->hostt->this_id );
720 NCR5380_print_options(instance);
721 printk("\n");
722
723 called = 1;
724 return 1;
725}
726
727static int atari_scsi_release(struct Scsi_Host *sh)
728{
729 if (IS_A_TT())
730 free_irq(IRQ_TT_MFP_SCSI, sh);
731 if (atari_dma_buffer)
732 atari_stram_free(atari_dma_buffer);
733 NCR5380_exit(sh);
734 return 1; 480 return 1;
735} 481}
736 482
@@ -739,7 +485,7 @@ static int __init atari_scsi_setup(char *str)
739{ 485{
740 /* Format of atascsi parameter is: 486 /* Format of atascsi parameter is:
741 * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> 487 * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
742 * Defaults depend on TT or Falcon, hostid determined at run time. 488 * Defaults depend on TT or Falcon, determined at run time.
743 * Negative values mean don't change. 489 * Negative values mean don't change.
744 */ 490 */
745 int ints[6]; 491 int ints[6];
@@ -750,36 +496,17 @@ static int __init atari_scsi_setup(char *str)
750 printk("atari_scsi_setup: no arguments!\n"); 496 printk("atari_scsi_setup: no arguments!\n");
751 return 0; 497 return 0;
752 } 498 }
753 499 if (ints[0] >= 1)
754 if (ints[0] >= 1) { 500 setup_can_queue = ints[1];
755 if (ints[1] > 0) 501 if (ints[0] >= 2)
756 /* no limits on this, just > 0 */ 502 setup_cmd_per_lun = ints[2];
757 setup_can_queue = ints[1]; 503 if (ints[0] >= 3)
758 } 504 setup_sg_tablesize = ints[3];
759 if (ints[0] >= 2) { 505 if (ints[0] >= 4)
760 if (ints[2] > 0) 506 setup_hostid = ints[4];
761 setup_cmd_per_lun = ints[2];
762 }
763 if (ints[0] >= 3) {
764 if (ints[3] >= 0) {
765 setup_sg_tablesize = ints[3];
766 /* Must be <= SG_ALL (255) */
767 if (setup_sg_tablesize > SG_ALL)
768 setup_sg_tablesize = SG_ALL;
769 }
770 }
771 if (ints[0] >= 4) {
772 /* Must be between 0 and 7 */
773 if (ints[4] >= 0 && ints[4] <= 7)
774 setup_hostid = ints[4];
775 else if (ints[4] > 7)
776 printk("atari_scsi_setup: invalid host ID %d !\n", ints[4]);
777 }
778#ifdef SUPPORT_TAGS 507#ifdef SUPPORT_TAGS
779 if (ints[0] >= 5) { 508 if (ints[0] >= 5)
780 if (ints[5] >= 0) 509 setup_use_tagged_queuing = ints[5];
781 setup_use_tagged_queuing = !!ints[5];
782 }
783#endif 510#endif
784 511
785 return 1; 512 return 1;
@@ -788,45 +515,6 @@ static int __init atari_scsi_setup(char *str)
788__setup("atascsi=", atari_scsi_setup); 515__setup("atascsi=", atari_scsi_setup);
789#endif /* !MODULE */ 516#endif /* !MODULE */
790 517
791static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
792{
793 int rv;
794 struct NCR5380_hostdata *hostdata =
795 (struct NCR5380_hostdata *)cmd->device->host->hostdata;
796
797 /* For doing the reset, SCSI interrupts must be disabled first,
798 * since the 5380 raises its IRQ line while _RST is active and we
799 * can't disable interrupts completely, since we need the timer.
800 */
801 /* And abort a maybe active DMA transfer */
802 if (IS_A_TT()) {
803 atari_turnoff_irq(IRQ_TT_MFP_SCSI);
804#ifdef REAL_DMA
805 tt_scsi_dma.dma_ctrl = 0;
806#endif /* REAL_DMA */
807 } else {
808 atari_turnoff_irq(IRQ_MFP_FSCSI);
809#ifdef REAL_DMA
810 st_dma.dma_mode_status = 0x90;
811 atari_dma_active = 0;
812 atari_dma_orig_addr = NULL;
813#endif /* REAL_DMA */
814 }
815
816 rv = NCR5380_bus_reset(cmd);
817
818 /* Re-enable ints */
819 if (IS_A_TT()) {
820 atari_turnon_irq(IRQ_TT_MFP_SCSI);
821 } else {
822 atari_turnon_irq(IRQ_MFP_FSCSI);
823 }
824 if (rv == SUCCESS)
825 falcon_release_lock_if_possible(hostdata);
826
827 return rv;
828}
829
830 518
831#ifdef CONFIG_ATARI_SCSI_RESET_BOOT 519#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
832static void __init atari_scsi_reset_boot(void) 520static void __init atari_scsi_reset_boot(void)
@@ -860,15 +548,6 @@ static void __init atari_scsi_reset_boot(void)
860} 548}
861#endif 549#endif
862 550
863
864static const char *atari_scsi_info(struct Scsi_Host *host)
865{
866 /* atari_scsi_detect() is verbose enough... */
867 static const char string[] = "Atari native SCSI";
868 return string;
869}
870
871
872#if defined(REAL_DMA) 551#if defined(REAL_DMA)
873 552
874static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, 553static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
@@ -949,7 +628,7 @@ static long atari_scsi_dma_residual(struct Scsi_Host *instance)
949#define CMD_SURELY_BYTE_MODE 1 628#define CMD_SURELY_BYTE_MODE 1
950#define CMD_MODE_UNKNOWN 2 629#define CMD_MODE_UNKNOWN 2
951 630
952static int falcon_classify_cmd(Scsi_Cmnd *cmd) 631static int falcon_classify_cmd(struct scsi_cmnd *cmd)
953{ 632{
954 unsigned char opcode = cmd->cmnd[0]; 633 unsigned char opcode = cmd->cmnd[0];
955 634
@@ -981,7 +660,7 @@ static int falcon_classify_cmd(Scsi_Cmnd *cmd)
981 */ 660 */
982 661
983static unsigned long atari_dma_xfer_len(unsigned long wanted_len, 662static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
984 Scsi_Cmnd *cmd, int write_flag) 663 struct scsi_cmnd *cmd, int write_flag)
985{ 664{
986 unsigned long possible_len, limit; 665 unsigned long possible_len, limit;
987 666
@@ -1099,23 +778,247 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
1099 778
1100#include "atari_NCR5380.c" 779#include "atari_NCR5380.c"
1101 780
1102static struct scsi_host_template driver_template = { 781static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
782{
783 int rv;
784 unsigned long flags;
785
786 local_irq_save(flags);
787
788#ifdef REAL_DMA
789 /* Abort a maybe active DMA transfer */
790 if (IS_A_TT()) {
791 tt_scsi_dma.dma_ctrl = 0;
792 } else {
793 st_dma.dma_mode_status = 0x90;
794 atari_dma_active = 0;
795 atari_dma_orig_addr = NULL;
796 }
797#endif
798
799 rv = NCR5380_bus_reset(cmd);
800
801 /* The 5380 raises its IRQ line while _RST is active but the ST DMA
802 * "lock" has been released so this interrupt may end up handled by
803 * floppy or IDE driver (if one of them holds the lock). The NCR5380
804 * interrupt flag has been cleared already.
805 */
806
807 local_irq_restore(flags);
808
809 return rv;
810}
811
812#define DRV_MODULE_NAME "atari_scsi"
813#define PFX DRV_MODULE_NAME ": "
814
815static struct scsi_host_template atari_scsi_template = {
816 .module = THIS_MODULE,
817 .proc_name = DRV_MODULE_NAME,
1103 .show_info = atari_scsi_show_info, 818 .show_info = atari_scsi_show_info,
1104 .name = "Atari native SCSI", 819 .name = "Atari native SCSI",
1105 .detect = atari_scsi_detect,
1106 .release = atari_scsi_release,
1107 .info = atari_scsi_info, 820 .info = atari_scsi_info,
1108 .queuecommand = atari_scsi_queue_command, 821 .queuecommand = atari_scsi_queue_command,
1109 .eh_abort_handler = atari_scsi_abort, 822 .eh_abort_handler = atari_scsi_abort,
1110 .eh_bus_reset_handler = atari_scsi_bus_reset, 823 .eh_bus_reset_handler = atari_scsi_bus_reset,
1111 .can_queue = 0, /* initialized at run-time */ 824 .this_id = 7,
1112 .this_id = 0, /* initialized at run-time */
1113 .sg_tablesize = 0, /* initialized at run-time */
1114 .cmd_per_lun = 0, /* initialized at run-time */
1115 .use_clustering = DISABLE_CLUSTERING 825 .use_clustering = DISABLE_CLUSTERING
1116}; 826};
1117 827
828static int __init atari_scsi_probe(struct platform_device *pdev)
829{
830 struct Scsi_Host *instance;
831 int error;
832 struct resource *irq;
833 int host_flags = 0;
834
835 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
836 if (!irq)
837 return -ENODEV;
838
839 if (ATARIHW_PRESENT(TT_SCSI)) {
840 atari_scsi_reg_read = atari_scsi_tt_reg_read;
841 atari_scsi_reg_write = atari_scsi_tt_reg_write;
842 } else {
843 atari_scsi_reg_read = atari_scsi_falcon_reg_read;
844 atari_scsi_reg_write = atari_scsi_falcon_reg_write;
845 }
846
847 /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary.
848 * Higher values should work, too; try it!
849 * (But cmd_per_lun costs memory!)
850 *
851 * But there seems to be a bug somewhere that requires CAN_QUEUE to be
852 * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since
853 * changed CMD_PER_LUN...
854 *
855 * Note: The Falcon currently uses 8/1 setting due to unsolved problems
856 * with cmd_per_lun != 1
857 */
858 if (ATARIHW_PRESENT(TT_SCSI)) {
859 atari_scsi_template.can_queue = 16;
860 atari_scsi_template.cmd_per_lun = 8;
861 atari_scsi_template.sg_tablesize = SG_ALL;
862 } else {
863 atari_scsi_template.can_queue = 8;
864 atari_scsi_template.cmd_per_lun = 1;
865 atari_scsi_template.sg_tablesize = SG_NONE;
866 }
867
868 if (setup_can_queue > 0)
869 atari_scsi_template.can_queue = setup_can_queue;
870
871 if (setup_cmd_per_lun > 0)
872 atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
873
874 /* Leave sg_tablesize at 0 on a Falcon! */
875 if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
876 atari_scsi_template.sg_tablesize = setup_sg_tablesize;
877
878 if (setup_hostid >= 0) {
879 atari_scsi_template.this_id = setup_hostid & 7;
880 } else {
881 /* Test if a host id is set in the NVRam */
882 if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
883 unsigned char b = nvram_read_byte(14);
884
885 /* Arbitration enabled? (for TOS)
886 * If yes, use configured host ID
887 */
888 if (b & 0x80)
889 atari_scsi_template.this_id = b & 7;
890 }
891 }
892
893
894#ifdef REAL_DMA
895 /* If running on a Falcon and if there's TT-Ram (i.e., more than one
896 * memory block, since there's always ST-Ram in a Falcon), then
897 * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers
898 * from/to alternative Ram.
899 */
900 if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) &&
901 m68k_num_memory > 1) {
902 atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
903 if (!atari_dma_buffer) {
904 pr_err(PFX "can't allocate ST-RAM double buffer\n");
905 return -ENOMEM;
906 }
907 atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
908 atari_dma_orig_addr = 0;
909 }
910#endif
911
912 instance = scsi_host_alloc(&atari_scsi_template,
913 sizeof(struct NCR5380_hostdata));
914 if (!instance) {
915 error = -ENOMEM;
916 goto fail_alloc;
917 }
918 atari_scsi_host = instance;
919
920#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
921 atari_scsi_reset_boot();
922#endif
923
924 instance->irq = irq->start;
925
926 host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP;
927
928#ifdef SUPPORT_TAGS
929 host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
930#endif
931
932 NCR5380_init(instance, host_flags);
933
934 if (IS_A_TT()) {
935 error = request_irq(instance->irq, scsi_tt_intr, 0,
936 "NCR5380", instance);
937 if (error) {
938 pr_err(PFX "request irq %d failed, aborting\n",
939 instance->irq);
940 goto fail_irq;
941 }
942 tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */
943#ifdef REAL_DMA
944 tt_scsi_dma.dma_ctrl = 0;
945 atari_dma_residual = 0;
946
947 /* While the read overruns (described by Drew Eckhardt in
948 * NCR5380.c) never happened on TTs, they do in fact on the
949 * Medusa (This was the cause why SCSI didn't work right for
950 * so long there.) Since handling the overruns slows down
951 * a bit, I turned the #ifdef's into a runtime condition.
952 *
953 * In principle it should be sufficient to do max. 1 byte with
954 * PIO, but there is another problem on the Medusa with the DMA
955 * rest data register. So read_overruns is currently set
956 * to 4 to avoid having transfers that aren't a multiple of 4.
957 * If the rest data bug is fixed, this can be lowered to 1.
958 */
959 if (MACH_IS_MEDUSA) {
960 struct NCR5380_hostdata *hostdata =
961 shost_priv(instance);
962
963 hostdata->read_overruns = 4;
964 }
965#endif
966 } else {
967 /* Nothing to do for the interrupt: the ST-DMA is initialized
968 * already.
969 */
970#ifdef REAL_DMA
971 atari_dma_residual = 0;
972 atari_dma_active = 0;
973 atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000
974 : 0xff000000);
975#endif
976 }
977
978 error = scsi_add_host(instance, NULL);
979 if (error)
980 goto fail_host;
981
982 platform_set_drvdata(pdev, instance);
983
984 scsi_scan_host(instance);
985 return 0;
986
987fail_host:
988 if (IS_A_TT())
989 free_irq(instance->irq, instance);
990fail_irq:
991 NCR5380_exit(instance);
992 scsi_host_put(instance);
993fail_alloc:
994 if (atari_dma_buffer)
995 atari_stram_free(atari_dma_buffer);
996 return error;
997}
998
999static int __exit atari_scsi_remove(struct platform_device *pdev)
1000{
1001 struct Scsi_Host *instance = platform_get_drvdata(pdev);
1002
1003 scsi_remove_host(instance);
1004 if (IS_A_TT())
1005 free_irq(instance->irq, instance);
1006 NCR5380_exit(instance);
1007 scsi_host_put(instance);
1008 if (atari_dma_buffer)
1009 atari_stram_free(atari_dma_buffer);
1010 return 0;
1011}
1012
1013static struct platform_driver atari_scsi_driver = {
1014 .remove = __exit_p(atari_scsi_remove),
1015 .driver = {
1016 .name = DRV_MODULE_NAME,
1017 .owner = THIS_MODULE,
1018 },
1019};
1118 1020
1119#include "scsi_module.c" 1021module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe);
1120 1022
1023MODULE_ALIAS("platform:" DRV_MODULE_NAME);
1121MODULE_LICENSE("GPL"); 1024MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
deleted file mode 100644
index 3299d91d7336..000000000000
--- a/drivers/scsi/atari_scsi.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * atari_scsi.h -- Header file for the Atari native SCSI driver
3 *
4 * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
5 *
6 * (Loosely based on the work of Robert De Vries' team)
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file COPYING in the main directory of this archive
10 * for more details.
11 *
12 */
13
14
15#ifndef ATARI_SCSI_H
16#define ATARI_SCSI_H
17
18/* (I_HAVE_OVERRUNS stuff removed) */
19
20#ifndef ASM
21/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher
22 * values should work, too; try it! (but cmd_per_lun costs memory!) */
23
24/* But there seems to be a bug somewhere that requires CAN_QUEUE to be
25 * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since
26 * changed CMD_PER_LUN... */
27
28/* Note: The Falcon currently uses 8/1 setting due to unsolved problems with
29 * cmd_per_lun != 1 */
30
31#define ATARI_TT_CAN_QUEUE 16
32#define ATARI_TT_CMD_PER_LUN 8
33#define ATARI_TT_SG_TABLESIZE SG_ALL
34
35#define ATARI_FALCON_CAN_QUEUE 8
36#define ATARI_FALCON_CMD_PER_LUN 1
37#define ATARI_FALCON_SG_TABLESIZE SG_NONE
38
39#define DEFAULT_USE_TAGGED_QUEUING 0
40
41
42#define NCR5380_implementation_fields /* none */
43
44#define NCR5380_read(reg) atari_scsi_reg_read( reg )
45#define NCR5380_write(reg, value) atari_scsi_reg_write( reg, value )
46
47#define NCR5380_intr atari_scsi_intr
48#define NCR5380_queue_command atari_scsi_queue_command
49#define NCR5380_abort atari_scsi_abort
50#define NCR5380_show_info atari_scsi_show_info
51#define NCR5380_dma_read_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 0)
52#define NCR5380_dma_write_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 1)
53#define NCR5380_dma_residual(inst) atari_scsi_dma_residual( inst )
54#define NCR5380_dma_xfer_len(i,cmd,phase) \
55 atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
56
57#endif /* ndef ASM */
58#endif /* ATARI_SCSI_H */
59
60
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 30d74a06b993..f3193406776c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -556,7 +556,7 @@ static struct scsi_host_template beiscsi_sht = {
556 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 556 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
557 .proc_name = DRV_NAME, 557 .proc_name = DRV_NAME,
558 .queuecommand = iscsi_queuecommand, 558 .queuecommand = iscsi_queuecommand,
559 .change_queue_depth = iscsi_change_queue_depth, 559 .change_queue_depth = scsi_change_queue_depth,
560 .slave_configure = beiscsi_slave_configure, 560 .slave_configure = beiscsi_slave_configure,
561 .target_alloc = iscsi_target_alloc, 561 .target_alloc = iscsi_target_alloc,
562 .eh_abort_handler = beiscsi_eh_abort, 562 .eh_abort_handler = beiscsi_eh_abort,
@@ -570,7 +570,7 @@ static struct scsi_host_template beiscsi_sht = {
570 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 570 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
571 .use_clustering = ENABLE_CLUSTERING, 571 .use_clustering = ENABLE_CLUSTERING,
572 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 572 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
573 573 .track_queue_depth = 1,
574}; 574};
575 575
576static struct scsi_transport_template *beiscsi_scsi_transport; 576static struct scsi_transport_template *beiscsi_scsi_transport;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 8e83d0474fe7..74a307c0a240 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -260,18 +260,9 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
260 unsigned long flags; 260 unsigned long flags;
261 void *kern_buf; 261 void *kern_buf;
262 262
263 kern_buf = kzalloc(nbytes, GFP_KERNEL); 263 kern_buf = memdup_user(buf, nbytes);
264 264 if (IS_ERR(kern_buf))
265 if (!kern_buf) { 265 return PTR_ERR(kern_buf);
266 printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
267 bfad->inst_no);
268 return -ENOMEM;
269 }
270
271 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
272 kfree(kern_buf);
273 return -ENOMEM;
274 }
275 266
276 rc = sscanf(kern_buf, "%x:%x", &addr, &len); 267 rc = sscanf(kern_buf, "%x:%x", &addr, &len);
277 if (rc < 2) { 268 if (rc < 2) {
@@ -336,18 +327,9 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
336 unsigned long flags; 327 unsigned long flags;
337 void *kern_buf; 328 void *kern_buf;
338 329
339 kern_buf = kzalloc(nbytes, GFP_KERNEL); 330 kern_buf = memdup_user(buf, nbytes);
340 331 if (IS_ERR(kern_buf))
341 if (!kern_buf) { 332 return PTR_ERR(kern_buf);
342 printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
343 bfad->inst_no);
344 return -ENOMEM;
345 }
346
347 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
348 kfree(kern_buf);
349 return -ENOMEM;
350 }
351 333
352 rc = sscanf(kern_buf, "%x:%x", &addr, &val); 334 rc = sscanf(kern_buf, "%x:%x", &addr, &val);
353 if (rc < 2) { 335 if (rc < 2) {
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 87b09cd232cc..7223b0006740 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -776,7 +776,7 @@ bfad_thread_workq(struct bfad_s *bfad)
776static int 776static int
777bfad_im_slave_configure(struct scsi_device *sdev) 777bfad_im_slave_configure(struct scsi_device *sdev)
778{ 778{
779 scsi_adjust_queue_depth(sdev, bfa_lun_queue_depth); 779 scsi_change_queue_depth(sdev, bfa_lun_queue_depth);
780 return 0; 780 return 0;
781} 781}
782 782
@@ -866,7 +866,7 @@ bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
866 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { 866 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
867 if (tmp_sdev->id != sdev->id) 867 if (tmp_sdev->id != sdev->id)
868 continue; 868 continue;
869 scsi_adjust_queue_depth(tmp_sdev, 869 scsi_change_queue_depth(tmp_sdev,
870 tmp_sdev->queue_depth + 1); 870 tmp_sdev->queue_depth + 1);
871 871
872 itnim->last_ramp_up_time = jiffies; 872 itnim->last_ramp_up_time = jiffies;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 3c7db3e8aa76..e861f286b42e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1088,7 +1088,7 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
1088 mutex_unlock(&bnx2fc_dev_lock); 1088 mutex_unlock(&bnx2fc_dev_lock);
1089 rtnl_unlock(); 1089 rtnl_unlock();
1090 1090
1091 if (IS_ERR(vn_port)) { 1091 if (!vn_port) {
1092 printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", 1092 printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
1093 netdev->name); 1093 netdev->name);
1094 return -EIO; 1094 return -EIO;
@@ -2202,6 +2202,7 @@ static int _bnx2fc_create(struct net_device *netdev,
2202 interface = bnx2fc_interface_create(hba, netdev, fip_mode); 2202 interface = bnx2fc_interface_create(hba, netdev, fip_mode);
2203 if (!interface) { 2203 if (!interface) {
2204 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); 2204 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
2205 rc = -ENOMEM;
2205 goto ifput_err; 2206 goto ifput_err;
2206 } 2207 }
2207 2208
@@ -2790,7 +2791,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
2790 .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ 2791 .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
2791 .eh_host_reset_handler = fc_eh_host_reset, 2792 .eh_host_reset_handler = fc_eh_host_reset,
2792 .slave_alloc = fc_slave_alloc, 2793 .slave_alloc = fc_slave_alloc,
2793 .change_queue_depth = fc_change_queue_depth, 2794 .change_queue_depth = scsi_change_queue_depth,
2794 .change_queue_type = scsi_change_queue_type, 2795 .change_queue_type = scsi_change_queue_type,
2795 .this_id = -1, 2796 .this_id = -1,
2796 .cmd_per_lun = 3, 2797 .cmd_per_lun = 3,
@@ -2798,6 +2799,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
2798 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2799 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2799 .max_sectors = 1024, 2800 .max_sectors = 1024,
2800 .use_blk_tags = 1, 2801 .use_blk_tags = 1,
2802 .track_queue_depth = 1,
2801}; 2803};
2802 2804
2803static struct libfc_function_template bnx2fc_libfc_fcn_templ = { 2805static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 7a36388822aa..e53078d03309 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2259,7 +2259,7 @@ static struct scsi_host_template bnx2i_host_template = {
2259 .eh_abort_handler = iscsi_eh_abort, 2259 .eh_abort_handler = iscsi_eh_abort,
2260 .eh_device_reset_handler = iscsi_eh_device_reset, 2260 .eh_device_reset_handler = iscsi_eh_device_reset,
2261 .eh_target_reset_handler = iscsi_eh_recover_target, 2261 .eh_target_reset_handler = iscsi_eh_recover_target,
2262 .change_queue_depth = iscsi_change_queue_depth, 2262 .change_queue_depth = scsi_change_queue_depth,
2263 .target_alloc = iscsi_target_alloc, 2263 .target_alloc = iscsi_target_alloc,
2264 .can_queue = 2048, 2264 .can_queue = 2048,
2265 .max_sectors = 127, 2265 .max_sectors = 127,
@@ -2268,6 +2268,7 @@ static struct scsi_host_template bnx2i_host_template = {
2268 .use_clustering = ENABLE_CLUSTERING, 2268 .use_clustering = ENABLE_CLUSTERING,
2269 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2269 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2270 .shost_attrs = bnx2i_dev_attributes, 2270 .shost_attrs = bnx2i_dev_attributes,
2271 .track_queue_depth = 1,
2271}; 2272};
2272 2273
2273struct iscsi_transport bnx2i_iscsi_transport = { 2274struct iscsi_transport bnx2i_iscsi_transport = {
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 44a8cc51428f..4d0b6ce55f20 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2241,7 +2241,7 @@ csio_slave_alloc(struct scsi_device *sdev)
2241static int 2241static int
2242csio_slave_configure(struct scsi_device *sdev) 2242csio_slave_configure(struct scsi_device *sdev)
2243{ 2243{
2244 scsi_adjust_queue_depth(sdev, csio_lun_qdepth); 2244 scsi_change_queue_depth(sdev, csio_lun_qdepth);
2245 return 0; 2245 return 0;
2246} 2246}
2247 2247
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 49692a1ac44a..3db4c63978c5 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -86,7 +86,7 @@ static struct scsi_host_template cxgb3i_host_template = {
86 .proc_name = DRV_MODULE_NAME, 86 .proc_name = DRV_MODULE_NAME,
87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH, 87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
88 .queuecommand = iscsi_queuecommand, 88 .queuecommand = iscsi_queuecommand,
89 .change_queue_depth = iscsi_change_queue_depth, 89 .change_queue_depth = scsi_change_queue_depth,
90 .sg_tablesize = SG_ALL, 90 .sg_tablesize = SG_ALL,
91 .max_sectors = 0xFFFF, 91 .max_sectors = 0xFFFF,
92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
@@ -96,6 +96,7 @@ static struct scsi_host_template cxgb3i_host_template = {
96 .target_alloc = iscsi_target_alloc, 96 .target_alloc = iscsi_target_alloc,
97 .use_clustering = DISABLE_CLUSTERING, 97 .use_clustering = DISABLE_CLUSTERING,
98 .this_id = -1, 98 .this_id = -1,
99 .track_queue_depth = 1,
99}; 100};
100 101
101static struct iscsi_transport cxgb3i_iscsi_transport = { 102static struct iscsi_transport cxgb3i_iscsi_transport = {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 15081257cfc8..e6c3f55d9d36 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -89,7 +89,7 @@ static struct scsi_host_template cxgb4i_host_template = {
89 .proc_name = DRV_MODULE_NAME, 89 .proc_name = DRV_MODULE_NAME,
90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
91 .queuecommand = iscsi_queuecommand, 91 .queuecommand = iscsi_queuecommand,
92 .change_queue_depth = iscsi_change_queue_depth, 92 .change_queue_depth = scsi_change_queue_depth,
93 .sg_tablesize = SG_ALL, 93 .sg_tablesize = SG_ALL,
94 .max_sectors = 0xFFFF, 94 .max_sectors = 0xFFFF,
95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
@@ -99,6 +99,7 @@ static struct scsi_host_template cxgb4i_host_template = {
99 .target_alloc = iscsi_target_alloc, 99 .target_alloc = iscsi_target_alloc,
100 .use_clustering = DISABLE_CLUSTERING, 100 .use_clustering = DISABLE_CLUSTERING,
101 .this_id = -1, 101 .this_id = -1,
102 .track_queue_depth = 1,
102}; 103};
103 104
104static struct iscsi_transport cxgb4i_iscsi_transport = { 105static struct iscsi_transport cxgb4i_iscsi_transport = {
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 4b0dd8c56707..3e088125a8be 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -33,20 +33,20 @@
33/* 33/*
34 * Definitions for the generic 5380 driver. 34 * Definitions for the generic 5380 driver.
35 */ 35 */
36#define AUTOSENSE 36
37#define DONT_USE_INTR
37 38
38#define NCR5380_read(reg) inb(port + reg) 39#define NCR5380_read(reg) inb(port + reg)
39#define NCR5380_write(reg, value) outb(value, port + reg) 40#define NCR5380_write(reg, value) outb(value, port + reg)
40 41
41#define NCR5380_implementation_fields unsigned int port 42#define NCR5380_implementation_fields /* none */
42#define NCR5380_local_declare() NCR5380_implementation_fields 43#define NCR5380_local_declare() unsigned int port
43#define NCR5380_setup(instance) port = instance->io_port 44#define NCR5380_setup(instance) port = instance->io_port
44 45
45/* 46/*
46 * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h) 47 * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h)
47 */ 48 */
48#include <linux/delay.h> 49#include <linux/delay.h>
49#include "scsi.h"
50 50
51#include "NCR5380.h" 51#include "NCR5380.h"
52#include "NCR5380.c" 52#include "NCR5380.c"
@@ -58,6 +58,7 @@
58static struct scsi_host_template dmx3191d_driver_template = { 58static struct scsi_host_template dmx3191d_driver_template = {
59 .proc_name = DMX3191D_DRIVER_NAME, 59 .proc_name = DMX3191D_DRIVER_NAME,
60 .name = "Domex DMX3191D", 60 .name = "Domex DMX3191D",
61 .info = NCR5380_info,
61 .queuecommand = NCR5380_queue_command, 62 .queuecommand = NCR5380_queue_command,
62 .eh_abort_handler = NCR5380_abort, 63 .eh_abort_handler = NCR5380_abort,
63 .eh_bus_reset_handler = NCR5380_bus_reset, 64 .eh_bus_reset_handler = NCR5380_bus_reset,
@@ -90,31 +91,23 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
90 if (!shost) 91 if (!shost)
91 goto out_release_region; 92 goto out_release_region;
92 shost->io_port = io; 93 shost->io_port = io;
93 shost->irq = pdev->irq;
94 94
95 NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); 95 /* This card does not seem to raise an interrupt on pdev->irq.
96 * Steam-powered SCSI controllers run without an IRQ anyway.
97 */
98 shost->irq = NO_IRQ;
96 99
97 if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED, 100 NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E);
98 DMX3191D_DRIVER_NAME, shost)) {
99 /*
100 * Steam powered scsi controllers run without an IRQ anyway
101 */
102 printk(KERN_WARNING "dmx3191: IRQ %d not available - "
103 "switching to polled mode.\n", pdev->irq);
104 shost->irq = SCSI_IRQ_NONE;
105 }
106 101
107 pci_set_drvdata(pdev, shost); 102 pci_set_drvdata(pdev, shost);
108 103
109 error = scsi_add_host(shost, &pdev->dev); 104 error = scsi_add_host(shost, &pdev->dev);
110 if (error) 105 if (error)
111 goto out_free_irq; 106 goto out_release_region;
112 107
113 scsi_scan_host(shost); 108 scsi_scan_host(shost);
114 return 0; 109 return 0;
115 110
116 out_free_irq:
117 free_irq(shost->irq, shost);
118 out_release_region: 111 out_release_region:
119 release_region(io, DMX3191D_REGION_LEN); 112 release_region(io, DMX3191D_REGION_LEN);
120 out_disable_device: 113 out_disable_device:
@@ -131,8 +124,6 @@ static void dmx3191d_remove_one(struct pci_dev *pdev)
131 124
132 NCR5380_exit(shost); 125 NCR5380_exit(shost);
133 126
134 if (shost->irq != SCSI_IRQ_NONE)
135 free_irq(shost->irq, shost);
136 release_region(shost->io_port, DMX3191D_REGION_LEN); 127 release_region(shost->io_port, DMX3191D_REGION_LEN);
137 pci_disable_device(pdev); 128 pci_disable_device(pdev);
138 129
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 1af8d54bcded..0bf976936a10 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -415,7 +415,7 @@ static int adpt_slave_configure(struct scsi_device * device)
415 pHba = (adpt_hba *) host->hostdata[0]; 415 pHba = (adpt_hba *) host->hostdata[0];
416 416
417 if (host->can_queue && device->tagged_supported) { 417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, 418 scsi_change_queue_depth(device,
419 host->can_queue - 1); 419 host->can_queue - 1);
420 } 420 }
421 return 0; 421 return 0;
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index 0a667fe05006..4c74c7ba2dff 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -1,5 +1,4 @@
1 1
2#define AUTOSENSE
3#define PSEUDO_DMA 2#define PSEUDO_DMA
4#define DONT_USE_INTR 3#define DONT_USE_INTR
5#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ 4#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
@@ -18,29 +17,9 @@
18 * (Unix and Linux consulting and custom programming) 17 * (Unix and Linux consulting and custom programming)
19 * drew@colorado.edu 18 * drew@colorado.edu
20 * +1 (303) 440-4894 19 * +1 (303) 440-4894
21 * 20 */
22 * DISTRIBUTION RELEASE 1.
23 *
24 * For more information, please consult
25 *
26 * NCR 5380 Family
27 * SCSI Protocol Controller
28 * Databook
29*/
30 21
31/* 22/*
32 * Options :
33 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
34 * for commands that return with a CHECK CONDITION status.
35 *
36 * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
37 * increase compared to polled I/O.
38 *
39 * PARITY - enable parity checking. Not supported.
40 *
41 * UNSAFE - leave interrupts enabled during pseudo-DMA transfers.
42 * You probably want this.
43 *
44 * The card is detected and initialized in one of several ways : 23 * The card is detected and initialized in one of several ways :
45 * 1. Autoprobe (default) - since the board is memory mapped, 24 * 1. Autoprobe (default) - since the board is memory mapped,
46 * a BIOS signature is scanned for to locate the registers. 25 * a BIOS signature is scanned for to locate the registers.
@@ -79,15 +58,11 @@
79#include <linux/init.h> 58#include <linux/init.h>
80#include <linux/interrupt.h> 59#include <linux/interrupt.h>
81#include <linux/io.h> 60#include <linux/io.h>
82#include "scsi.h"
83#include <scsi/scsi_host.h> 61#include <scsi/scsi_host.h>
84#include "dtc.h" 62#include "dtc.h"
85#define AUTOPROBE_IRQ 63#define AUTOPROBE_IRQ
86#include "NCR5380.h" 64#include "NCR5380.h"
87 65
88
89#define DTC_PUBLIC_RELEASE 2
90
91/* 66/*
92 * The DTC3180 & 3280 boards are memory mapped. 67 * The DTC3180 & 3280 boards are memory mapped.
93 * 68 *
@@ -173,10 +148,13 @@ static const struct signature {
173 * 148 *
174 */ 149 */
175 150
176static void __init dtc_setup(char *str, int *ints) 151static int __init dtc_setup(char *str)
177{ 152{
178 static int commandline_current = 0; 153 static int commandline_current = 0;
179 int i; 154 int i;
155 int ints[10];
156
157 get_options(str, ARRAY_SIZE(ints), ints);
180 if (ints[0] != 2) 158 if (ints[0] != 2)
181 printk("dtc_setup: usage dtc=address,irq\n"); 159 printk("dtc_setup: usage dtc=address,irq\n");
182 else if (commandline_current < NO_OVERRIDES) { 160 else if (commandline_current < NO_OVERRIDES) {
@@ -189,7 +167,10 @@ static void __init dtc_setup(char *str, int *ints)
189 } 167 }
190 ++commandline_current; 168 ++commandline_current;
191 } 169 }
170 return 1;
192} 171}
172
173__setup("dtc=", dtc_setup);
193#endif 174#endif
194 175
195/* 176/*
@@ -213,10 +194,6 @@ static int __init dtc_detect(struct scsi_host_template * tpnt)
213 void __iomem *base; 194 void __iomem *base;
214 int sig, count; 195 int sig, count;
215 196
216 tpnt->proc_name = "dtc3x80";
217 tpnt->show_info = dtc_show_info;
218 tpnt->write_info = dtc_write_info;
219
220 for (count = 0; current_override < NO_OVERRIDES; ++current_override) { 197 for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
221 addr = 0; 198 addr = 0;
222 base = NULL; 199 base = NULL;
@@ -271,38 +248,33 @@ found:
271 else 248 else
272 instance->irq = NCR5380_probe_irq(instance, DTC_IRQS); 249 instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
273 250
251 /* Compatibility with documented NCR5380 kernel parameters */
252 if (instance->irq == 255)
253 instance->irq = NO_IRQ;
254
274#ifndef DONT_USE_INTR 255#ifndef DONT_USE_INTR
275 /* With interrupts enabled, it will sometimes hang when doing heavy 256 /* With interrupts enabled, it will sometimes hang when doing heavy
276 * reads. So better not enable them until I finger it out. */ 257 * reads. So better not enable them until I finger it out. */
277 if (instance->irq != SCSI_IRQ_NONE) 258 if (instance->irq != NO_IRQ)
278 if (request_irq(instance->irq, dtc_intr, 0, 259 if (request_irq(instance->irq, dtc_intr, 0,
279 "dtc", instance)) { 260 "dtc", instance)) {
280 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 261 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
281 instance->irq = SCSI_IRQ_NONE; 262 instance->irq = NO_IRQ;
282 } 263 }
283 264
284 if (instance->irq == SCSI_IRQ_NONE) { 265 if (instance->irq == NO_IRQ) {
285 printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 266 printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
286 printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 267 printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
287 } 268 }
288#else 269#else
289 if (instance->irq != SCSI_IRQ_NONE) 270 if (instance->irq != NO_IRQ)
290 printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no); 271 printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no);
291 instance->irq = SCSI_IRQ_NONE; 272 instance->irq = NO_IRQ;
292#endif 273#endif
293#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT) 274#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
294 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); 275 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
295#endif 276#endif
296 277
297 printk(KERN_INFO "scsi%d : at 0x%05X", instance->host_no, (int) instance->base);
298 if (instance->irq == SCSI_IRQ_NONE)
299 printk(" interrupts disabled");
300 else
301 printk(" irq %d", instance->irq);
302 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, DTC_PUBLIC_RELEASE);
303 NCR5380_print_options(instance);
304 printk("\n");
305
306 ++current_override; 278 ++current_override;
307 ++count; 279 ++count;
308 } 280 }
@@ -354,20 +326,18 @@ static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev,
354 * timeout. 326 * timeout.
355*/ 327*/
356 328
357static int dtc_maxi = 0;
358static int dtc_wmaxi = 0;
359
360static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) 329static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
361{ 330{
362 unsigned char *d = dst; 331 unsigned char *d = dst;
363 int i; /* For counting time spent in the poll-loop */ 332 int i; /* For counting time spent in the poll-loop */
333 struct NCR5380_hostdata *hostdata = shost_priv(instance);
364 NCR5380_local_declare(); 334 NCR5380_local_declare();
365 NCR5380_setup(instance); 335 NCR5380_setup(instance);
366 336
367 i = 0; 337 i = 0;
368 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 338 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
369 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); 339 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
370 if (instance->irq == SCSI_IRQ_NONE) 340 if (instance->irq == NO_IRQ)
371 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ); 341 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
372 else 342 else
373 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE); 343 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
@@ -391,8 +361,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
391 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ 361 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
392 rtrc(0); 362 rtrc(0);
393 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 363 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
394 if (i > dtc_maxi) 364 if (i > hostdata->spin_max_r)
395 dtc_maxi = i; 365 hostdata->spin_max_r = i;
396 return (0); 366 return (0);
397} 367}
398 368
@@ -412,13 +382,14 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
412static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) 382static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
413{ 383{
414 int i; 384 int i;
385 struct NCR5380_hostdata *hostdata = shost_priv(instance);
415 NCR5380_local_declare(); 386 NCR5380_local_declare();
416 NCR5380_setup(instance); 387 NCR5380_setup(instance);
417 388
418 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 389 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
419 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); 390 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
420 /* set direction (write) */ 391 /* set direction (write) */
421 if (instance->irq == SCSI_IRQ_NONE) 392 if (instance->irq == NO_IRQ)
422 NCR5380_write(DTC_CONTROL_REG, 0); 393 NCR5380_write(DTC_CONTROL_REG, 0);
423 else 394 else
424 NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); 395 NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
@@ -444,8 +415,8 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
444 /* Check for parity error here. fixme. */ 415 /* Check for parity error here. fixme. */
445 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ 416 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
446 rtrc(0); 417 rtrc(0);
447 if (i > dtc_wmaxi) 418 if (i > hostdata->spin_max_w)
448 dtc_wmaxi = i; 419 hostdata->spin_max_w = i;
449 return (0); 420 return (0);
450} 421}
451 422
@@ -457,7 +428,7 @@ static int dtc_release(struct Scsi_Host *shost)
457{ 428{
458 NCR5380_local_declare(); 429 NCR5380_local_declare();
459 NCR5380_setup(shost); 430 NCR5380_setup(shost);
460 if (shost->irq) 431 if (shost->irq != NO_IRQ)
461 free_irq(shost->irq, shost); 432 free_irq(shost->irq, shost);
462 NCR5380_exit(shost); 433 NCR5380_exit(shost);
463 if (shost->io_port && shost->n_io_port) 434 if (shost->io_port && shost->n_io_port)
@@ -471,6 +442,10 @@ static struct scsi_host_template driver_template = {
471 .name = "DTC 3180/3280 ", 442 .name = "DTC 3180/3280 ",
472 .detect = dtc_detect, 443 .detect = dtc_detect,
473 .release = dtc_release, 444 .release = dtc_release,
445 .proc_name = "dtc3x80",
446 .show_info = dtc_show_info,
447 .write_info = dtc_write_info,
448 .info = dtc_info,
474 .queuecommand = dtc_queue_command, 449 .queuecommand = dtc_queue_command,
475 .eh_abort_handler = dtc_abort, 450 .eh_abort_handler = dtc_abort,
476 .eh_bus_reset_handler = dtc_bus_reset, 451 .eh_bus_reset_handler = dtc_bus_reset,
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
index 92d7cfc3f4fc..78a2332e9064 100644
--- a/drivers/scsi/dtc.h
+++ b/drivers/scsi/dtc.h
@@ -5,24 +5,6 @@
5 * (Unix and Linux consulting and custom programming) 5 * (Unix and Linux consulting and custom programming)
6 * drew@colorado.edu 6 * drew@colorado.edu
7 * +1 (303) 440-4894 7 * +1 (303) 440-4894
8 *
9 * DISTRIBUTION RELEASE 2.
10 *
11 * For more information, please consult
12 *
13 *
14 *
15 * and
16 *
17 * NCR 5380 Family
18 * SCSI Protocol Controller
19 * Databook
20 *
21 * NCR Microelectronics
22 * 1635 Aeroplaza Drive
23 * Colorado Springs, CO 80916
24 * 1+ (719) 578-3400
25 * 1+ (800) 334-5454
26 */ 8 */
27 9
28#ifndef DTC3280_H 10#ifndef DTC3280_H
@@ -32,13 +14,6 @@
32#define DTCDEBUG_INIT 0x1 14#define DTCDEBUG_INIT 0x1
33#define DTCDEBUG_TRANSFER 0x2 15#define DTCDEBUG_TRANSFER 0x2
34 16
35static int dtc_abort(Scsi_Cmnd *);
36static int dtc_biosparam(struct scsi_device *, struct block_device *,
37 sector_t, int*);
38static int dtc_detect(struct scsi_host_template *);
39static int dtc_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
40static int dtc_bus_reset(Scsi_Cmnd *);
41
42#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
43#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
44#endif 19#endif
@@ -88,6 +63,7 @@ static int dtc_bus_reset(Scsi_Cmnd *);
88#define NCR5380_queue_command dtc_queue_command 63#define NCR5380_queue_command dtc_queue_command
89#define NCR5380_abort dtc_abort 64#define NCR5380_abort dtc_abort
90#define NCR5380_bus_reset dtc_bus_reset 65#define NCR5380_bus_reset dtc_bus_reset
66#define NCR5380_info dtc_info
91#define NCR5380_show_info dtc_show_info 67#define NCR5380_show_info dtc_show_info
92#define NCR5380_write_info dtc_write_info 68#define NCR5380_write_info dtc_write_info
93 69
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index bc0f918f1729..227dd2c2ec2f 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -952,12 +952,12 @@ static int eata2x_slave_configure(struct scsi_device *dev)
952 } else { 952 } else {
953 tag_suffix = ", no tags"; 953 tag_suffix = ", no tags";
954 } 954 }
955 scsi_adjust_queue_depth(dev, tqd); 955 scsi_change_queue_depth(dev, tqd);
956 } else if (TLDEV(dev->type) && linked_comm) { 956 } else if (TLDEV(dev->type) && linked_comm) {
957 scsi_adjust_queue_depth(dev, tqd); 957 scsi_change_queue_depth(dev, tqd);
958 tag_suffix = ", untagged"; 958 tag_suffix = ", untagged";
959 } else { 959 } else {
960 scsi_adjust_queue_depth(dev, utqd); 960 scsi_change_queue_depth(dev, utqd);
961 tag_suffix = ""; 961 tag_suffix = "";
962 } 962 }
963 963
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 1941d837f6f2..b6030e3edd01 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -972,7 +972,6 @@ u8 handle_hba_ioctl(struct esas2r_adapter *a,
972 struct atto_ioctl *ioctl_hba); 972 struct atto_ioctl *ioctl_hba);
973int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); 973int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
974int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); 974int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
975int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
976long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); 975long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
977 976
978/* SCSI error handler (eh) functions */ 977/* SCSI error handler (eh) functions */
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index d89a0277a8e1..baf913047b48 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -117,9 +117,8 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
117 117
118 rq = esas2r_alloc_request(a); 118 rq = esas2r_alloc_request(a);
119 if (rq == NULL) { 119 if (rq == NULL) {
120 up(&a->fm_api_semaphore);
121 fi->status = FI_STAT_BUSY; 120 fi->status = FI_STAT_BUSY;
122 return; 121 goto free_sem;
123 } 122 }
124 123
125 if (fi == &a->firmware.header) { 124 if (fi == &a->firmware.header) {
@@ -135,7 +134,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
135 if (a->firmware.header_buff == NULL) { 134 if (a->firmware.header_buff == NULL) {
136 esas2r_debug("failed to allocate header buffer!"); 135 esas2r_debug("failed to allocate header buffer!");
137 fi->status = FI_STAT_BUSY; 136 fi->status = FI_STAT_BUSY;
138 return; 137 goto free_req;
139 } 138 }
140 139
141 memcpy(a->firmware.header_buff, fi, 140 memcpy(a->firmware.header_buff, fi,
@@ -171,9 +170,10 @@ all_done:
171 a->firmware.header_buff, 170 a->firmware.header_buff,
172 (dma_addr_t)a->firmware.header_buff_phys); 171 (dma_addr_t)a->firmware.header_buff_phys);
173 } 172 }
174 173free_req:
175 up(&a->fm_api_semaphore);
176 esas2r_free_request(a, (struct esas2r_request *)rq); 174 esas2r_free_request(a, (struct esas2r_request *)rq);
175free_sem:
176 up(&a->fm_api_semaphore);
177 return; 177 return;
178 178
179} 179}
@@ -1420,9 +1420,10 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
1420 1420
1421 rq = esas2r_alloc_request(a); 1421 rq = esas2r_alloc_request(a);
1422 if (rq == NULL) { 1422 if (rq == NULL) {
1423 up(&a->nvram_semaphore); 1423 kfree(ioctl);
1424 ioctl->data.prw.code = 0; 1424 esas2r_log(ESAS2R_LOG_WARN,
1425 break; 1425 "could not allocate an internal request");
1426 return -ENOMEM;
1426 } 1427 }
1427 1428
1428 code = esas2r_write_params(a, rq, 1429 code = esas2r_write_params(a, rq,
@@ -1523,9 +1524,12 @@ ioctl_done:
1523 case -EINVAL: 1524 case -EINVAL:
1524 ioctl->header.return_code = IOCTL_INVALID_PARAM; 1525 ioctl->header.return_code = IOCTL_INVALID_PARAM;
1525 break; 1526 break;
1527
1528 default:
1529 ioctl->header.return_code = IOCTL_GENERAL_ERROR;
1530 break;
1526 } 1531 }
1527 1532
1528 ioctl->header.return_code = IOCTL_GENERAL_ERROR;
1529 } 1533 }
1530 1534
1531 /* Always copy the buffer back, if only to pick up the status */ 1535 /* Always copy the buffer back, if only to pick up the status */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 30fce64faf75..593ff8a63c70 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -254,7 +254,7 @@ static struct scsi_host_template driver_template = {
254 .use_clustering = ENABLE_CLUSTERING, 254 .use_clustering = ENABLE_CLUSTERING,
255 .emulated = 0, 255 .emulated = 0,
256 .proc_name = ESAS2R_DRVR_NAME, 256 .proc_name = ESAS2R_DRVR_NAME,
257 .change_queue_depth = esas2r_change_queue_depth, 257 .change_queue_depth = scsi_change_queue_depth,
258 .change_queue_type = scsi_change_queue_type, 258 .change_queue_type = scsi_change_queue_type,
259 .max_sectors = 0xFFFF, 259 .max_sectors = 0xFFFF,
260 .use_blk_tags = 1, 260 .use_blk_tags = 1,
@@ -1257,15 +1257,6 @@ int esas2r_target_reset(struct scsi_cmnd *cmd)
1257 return esas2r_dev_targ_reset(cmd, true); 1257 return esas2r_dev_targ_reset(cmd, true);
1258} 1258}
1259 1259
1260int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
1261{
1262 esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
1263
1264 scsi_adjust_queue_depth(dev, depth);
1265
1266 return dev->queue_depth;
1267}
1268
1269void esas2r_log_request_failure(struct esas2r_adapter *a, 1260void esas2r_log_request_failure(struct esas2r_adapter *a,
1270 struct esas2r_request *rq) 1261 struct esas2r_request *rq)
1271{ 1262{
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 38c23e0b73af..ce5bd52fe692 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -49,55 +49,67 @@ static u32 esp_debug;
49#define ESP_DEBUG_DATADONE 0x00000100 49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200 50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400 51#define ESP_DEBUG_AUTOSENSE 0x00000400
52#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
52 54
53#define esp_log_intr(f, a...) \ 55#define esp_log_intr(f, a...) \
54do { if (esp_debug & ESP_DEBUG_INTR) \ 56do { if (esp_debug & ESP_DEBUG_INTR) \
55 printk(f, ## a); \ 57 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
56} while (0) 58} while (0)
57 59
58#define esp_log_reset(f, a...) \ 60#define esp_log_reset(f, a...) \
59do { if (esp_debug & ESP_DEBUG_RESET) \ 61do { if (esp_debug & ESP_DEBUG_RESET) \
60 printk(f, ## a); \ 62 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
61} while (0) 63} while (0)
62 64
63#define esp_log_msgin(f, a...) \ 65#define esp_log_msgin(f, a...) \
64do { if (esp_debug & ESP_DEBUG_MSGIN) \ 66do { if (esp_debug & ESP_DEBUG_MSGIN) \
65 printk(f, ## a); \ 67 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
66} while (0) 68} while (0)
67 69
68#define esp_log_msgout(f, a...) \ 70#define esp_log_msgout(f, a...) \
69do { if (esp_debug & ESP_DEBUG_MSGOUT) \ 71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
70 printk(f, ## a); \ 72 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
71} while (0) 73} while (0)
72 74
73#define esp_log_cmddone(f, a...) \ 75#define esp_log_cmddone(f, a...) \
74do { if (esp_debug & ESP_DEBUG_CMDDONE) \ 76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
75 printk(f, ## a); \ 77 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
76} while (0) 78} while (0)
77 79
78#define esp_log_disconnect(f, a...) \ 80#define esp_log_disconnect(f, a...) \
79do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ 81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
80 printk(f, ## a); \ 82 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
81} while (0) 83} while (0)
82 84
83#define esp_log_datastart(f, a...) \ 85#define esp_log_datastart(f, a...) \
84do { if (esp_debug & ESP_DEBUG_DATASTART) \ 86do { if (esp_debug & ESP_DEBUG_DATASTART) \
85 printk(f, ## a); \ 87 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
86} while (0) 88} while (0)
87 89
88#define esp_log_datadone(f, a...) \ 90#define esp_log_datadone(f, a...) \
89do { if (esp_debug & ESP_DEBUG_DATADONE) \ 91do { if (esp_debug & ESP_DEBUG_DATADONE) \
90 printk(f, ## a); \ 92 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
91} while (0) 93} while (0)
92 94
93#define esp_log_reconnect(f, a...) \ 95#define esp_log_reconnect(f, a...) \
94do { if (esp_debug & ESP_DEBUG_RECONNECT) \ 96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
95 printk(f, ## a); \ 97 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
96} while (0) 98} while (0)
97 99
98#define esp_log_autosense(f, a...) \ 100#define esp_log_autosense(f, a...) \
99do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ 101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100 printk(f, ## a); \ 102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
103} while (0)
104
105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
101} while (0) 113} while (0)
102 114
103#define esp_read8(REG) esp->ops->esp_read8(esp, REG) 115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
@@ -126,10 +138,29 @@ void scsi_esp_cmd(struct esp *esp, u8 val)
126 138
127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128 140
141 esp_log_command("cmd[%02x]\n", val);
129 esp_write8(val, ESP_CMD); 142 esp_write8(val, ESP_CMD);
130} 143}
131EXPORT_SYMBOL(scsi_esp_cmd); 144EXPORT_SYMBOL(scsi_esp_cmd);
132 145
146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
133static void esp_event(struct esp *esp, u8 val) 164static void esp_event(struct esp *esp, u8 val)
134{ 165{
135 struct esp_event_ent *p; 166 struct esp_event_ent *p;
@@ -150,19 +181,17 @@ static void esp_dump_cmd_log(struct esp *esp)
150 int idx = esp->esp_event_cur; 181 int idx = esp->esp_event_cur;
151 int stop = idx; 182 int stop = idx;
152 183
153 printk(KERN_INFO PFX "esp%d: Dumping command log\n", 184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
154 esp->host->unique_id);
155 do { 185 do {
156 struct esp_event_ent *p = &esp->esp_event_log[idx]; 186 struct esp_event_ent *p = &esp->esp_event_log[idx];
157 187
158 printk(KERN_INFO PFX "esp%d: ent[%d] %s ", 188 shost_printk(KERN_INFO, esp->host,
159 esp->host->unique_id, idx, 189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
160 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); 190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
161 191 idx,
162 printk("val[%02x] sreg[%02x] seqreg[%02x] " 192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
163 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", 193 p->val, p->sreg, p->seqreg,
164 p->val, p->sreg, p->seqreg, 194 p->sreg2, p->ireg, p->select_state, p->event);
165 p->sreg2, p->ireg, p->select_state, p->event);
166 195
167 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168 } while (idx != stop); 197 } while (idx != stop);
@@ -176,9 +205,8 @@ static void esp_flush_fifo(struct esp *esp)
176 205
177 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { 206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178 if (--lim == 0) { 207 if (--lim == 0) {
179 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " 208 shost_printk(KERN_ALERT, esp->host,
180 "will not clear!\n", 209 "ESP_FF_BYTES will not clear!\n");
181 esp->host->unique_id);
182 break; 210 break;
183 } 211 }
184 udelay(1); 212 udelay(1);
@@ -240,6 +268,19 @@ static void esp_reset_esp(struct esp *esp)
240 } else { 268 } else {
241 esp->min_period = ((5 * esp->ccycle) / 1000); 269 esp->min_period = ((5 * esp->ccycle) / 1000);
242 } 270 }
271 if (esp->rev == FAS236) {
272 /*
273 * The AM53c974 chip returns the same ID as FAS236;
274 * try to configure glitch eater.
275 */
276 u8 config4 = ESP_CONFIG4_GE1;
277 esp_write8(config4, ESP_CFG4);
278 config4 = esp_read8(ESP_CFG4);
279 if (config4 & ESP_CONFIG4_GE1) {
280 esp->rev = PCSCSI;
281 esp_write8(esp->config4, ESP_CFG4);
282 }
283 }
243 esp->max_period = (esp->max_period + 3)>>2; 284 esp->max_period = (esp->max_period + 3)>>2;
244 esp->min_period = (esp->min_period + 3)>>2; 285 esp->min_period = (esp->min_period + 3)>>2;
245 286
@@ -265,7 +306,8 @@ static void esp_reset_esp(struct esp *esp)
265 /* fallthrough... */ 306 /* fallthrough... */
266 307
267 case FAS236: 308 case FAS236:
268 /* Fast 236 or HME */ 309 case PCSCSI:
310 /* Fast 236, AM53c974 or HME */
269 esp_write8(esp->config2, ESP_CFG2); 311 esp_write8(esp->config2, ESP_CFG2);
270 if (esp->rev == FASHME) { 312 if (esp->rev == FASHME) {
271 u8 cfg3 = esp->target[0].esp_config3; 313 u8 cfg3 = esp->target[0].esp_config3;
@@ -383,12 +425,11 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
383 p->cur_residue -= len; 425 p->cur_residue -= len;
384 p->tot_residue -= len; 426 p->tot_residue -= len;
385 if (p->cur_residue < 0 || p->tot_residue < 0) { 427 if (p->cur_residue < 0 || p->tot_residue < 0) {
386 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", 428 shost_printk(KERN_ERR, esp->host,
387 esp->host->unique_id); 429 "Data transfer overflow.\n");
388 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " 430 shost_printk(KERN_ERR, esp->host,
389 "len[%u]\n", 431 "cur_residue[%d] tot_residue[%d] len[%u]\n",
390 esp->host->unique_id, 432 p->cur_residue, p->tot_residue, len);
391 p->cur_residue, p->tot_residue, len);
392 p->cur_residue = 0; 433 p->cur_residue = 0;
393 p->tot_residue = 0; 434 p->tot_residue = 0;
394 } 435 }
@@ -604,9 +645,8 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
604 645
605 646
606 if (!ent->sense_ptr) { 647 if (!ent->sense_ptr) {
607 esp_log_autosense("esp%d: Doing auto-sense for " 648 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
608 "tgt[%d] lun[%d]\n", 649 tgt, lun);
609 esp->host->unique_id, tgt, lun);
610 650
611 ent->sense_ptr = cmd->sense_buffer; 651 ent->sense_ptr = cmd->sense_buffer;
612 ent->sense_dma = esp->ops->map_single(esp, 652 ent->sense_dma = esp->ops->map_single(esp,
@@ -642,10 +682,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
642 682
643 val = (p - esp->command_block); 683 val = (p - esp->command_block);
644 684
645 if (esp->rev == FASHME) 685 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
646 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
647 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
648 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
649} 686}
650 687
651static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) 688static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
@@ -781,12 +818,12 @@ build_identify:
781 } 818 }
782 819
783 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { 820 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
784 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; 821 start_cmd = ESP_CMD_SELA;
785 if (ent->tag[0]) { 822 if (ent->tag[0]) {
786 *p++ = ent->tag[0]; 823 *p++ = ent->tag[0];
787 *p++ = ent->tag[1]; 824 *p++ = ent->tag[1];
788 825
789 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; 826 start_cmd = ESP_CMD_SA3;
790 } 827 }
791 828
792 for (i = 0; i < cmd->cmd_len; i++) 829 for (i = 0; i < cmd->cmd_len; i++)
@@ -806,7 +843,7 @@ build_identify:
806 esp->msg_out_len += 2; 843 esp->msg_out_len += 2;
807 } 844 }
808 845
809 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; 846 start_cmd = ESP_CMD_SELAS;
810 esp->select_state = ESP_SELECT_MSGOUT; 847 esp->select_state = ESP_SELECT_MSGOUT;
811 } 848 }
812 val = tgt; 849 val = tgt;
@@ -826,10 +863,7 @@ build_identify:
826 printk("]\n"); 863 printk("]\n");
827 } 864 }
828 865
829 if (esp->rev == FASHME) 866 esp_send_dma_cmd(esp, val, 16, start_cmd);
830 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
831 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
832 val, 16, 0, start_cmd);
833} 867}
834 868
835static struct esp_cmd_entry *esp_get_ent(struct esp *esp) 869static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
@@ -953,8 +987,8 @@ static int esp_check_gross_error(struct esp *esp)
953 * - DMA programmed with wrong direction 987 * - DMA programmed with wrong direction
954 * - improper phase change 988 * - improper phase change
955 */ 989 */
956 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", 990 shost_printk(KERN_ERR, esp->host,
957 esp->host->unique_id, esp->sreg); 991 "Gross error sreg[%02x]\n", esp->sreg);
958 /* XXX Reset the chip. XXX */ 992 /* XXX Reset the chip. XXX */
959 return 1; 993 return 1;
960 } 994 }
@@ -974,7 +1008,6 @@ static int esp_check_spur_intr(struct esp *esp)
974 1008
975 default: 1009 default:
976 if (!(esp->sreg & ESP_STAT_INTR)) { 1010 if (!(esp->sreg & ESP_STAT_INTR)) {
977 esp->ireg = esp_read8(ESP_INTRPT);
978 if (esp->ireg & ESP_INTR_SR) 1011 if (esp->ireg & ESP_INTR_SR)
979 return 1; 1012 return 1;
980 1013
@@ -982,14 +1015,13 @@ static int esp_check_spur_intr(struct esp *esp)
982 * ESP is not, the only possibility is a DMA error. 1015 * ESP is not, the only possibility is a DMA error.
983 */ 1016 */
984 if (!esp->ops->dma_error(esp)) { 1017 if (!esp->ops->dma_error(esp)) {
985 printk(KERN_ERR PFX "esp%d: Spurious irq, " 1018 shost_printk(KERN_ERR, esp->host,
986 "sreg=%02x.\n", 1019 "Spurious irq, sreg=%02x.\n",
987 esp->host->unique_id, esp->sreg); 1020 esp->sreg);
988 return -1; 1021 return -1;
989 } 1022 }
990 1023
991 printk(KERN_ERR PFX "esp%d: DMA error\n", 1024 shost_printk(KERN_ERR, esp->host, "DMA error\n");
992 esp->host->unique_id);
993 1025
994 /* XXX Reset the chip. XXX */ 1026 /* XXX Reset the chip. XXX */
995 return -1; 1027 return -1;
@@ -1002,7 +1034,7 @@ static int esp_check_spur_intr(struct esp *esp)
1002 1034
1003static void esp_schedule_reset(struct esp *esp) 1035static void esp_schedule_reset(struct esp *esp)
1004{ 1036{
1005 esp_log_reset("ESP: esp_schedule_reset() from %pf\n", 1037 esp_log_reset("esp_schedule_reset() from %pf\n",
1006 __builtin_return_address(0)); 1038 __builtin_return_address(0));
1007 esp->flags |= ESP_FLAG_RESETTING; 1039 esp->flags |= ESP_FLAG_RESETTING;
1008 esp_event(esp, ESP_EVENT_RESET); 1040 esp_event(esp, ESP_EVENT_RESET);
@@ -1019,20 +1051,20 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1019 int i; 1051 int i;
1020 1052
1021 if (!lp->num_tagged) { 1053 if (!lp->num_tagged) {
1022 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", 1054 shost_printk(KERN_ERR, esp->host,
1023 esp->host->unique_id); 1055 "Reconnect w/num_tagged==0\n");
1024 return NULL; 1056 return NULL;
1025 } 1057 }
1026 1058
1027 esp_log_reconnect("ESP: reconnect tag, "); 1059 esp_log_reconnect("reconnect tag, ");
1028 1060
1029 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 1061 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1030 if (esp->ops->irq_pending(esp)) 1062 if (esp->ops->irq_pending(esp))
1031 break; 1063 break;
1032 } 1064 }
1033 if (i == ESP_QUICKIRQ_LIMIT) { 1065 if (i == ESP_QUICKIRQ_LIMIT) {
1034 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", 1066 shost_printk(KERN_ERR, esp->host,
1035 esp->host->unique_id); 1067 "Reconnect IRQ1 timeout\n");
1036 return NULL; 1068 return NULL;
1037 } 1069 }
1038 1070
@@ -1043,14 +1075,14 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1043 i, esp->ireg, esp->sreg); 1075 i, esp->ireg, esp->sreg);
1044 1076
1045 if (esp->ireg & ESP_INTR_DC) { 1077 if (esp->ireg & ESP_INTR_DC) {
1046 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", 1078 shost_printk(KERN_ERR, esp->host,
1047 esp->host->unique_id); 1079 "Reconnect, got disconnect.\n");
1048 return NULL; 1080 return NULL;
1049 } 1081 }
1050 1082
1051 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { 1083 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1052 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", 1084 shost_printk(KERN_ERR, esp->host,
1053 esp->host->unique_id, esp->sreg); 1085 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1054 return NULL; 1086 return NULL;
1055 } 1087 }
1056 1088
@@ -1073,8 +1105,7 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1073 udelay(1); 1105 udelay(1);
1074 } 1106 }
1075 if (i == ESP_RESELECT_TAG_LIMIT) { 1107 if (i == ESP_RESELECT_TAG_LIMIT) {
1076 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", 1108 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1077 esp->host->unique_id);
1078 return NULL; 1109 return NULL;
1079 } 1110 }
1080 esp->ops->dma_drain(esp); 1111 esp->ops->dma_drain(esp);
@@ -1087,17 +1118,17 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1087 1118
1088 if (esp->command_block[0] < SIMPLE_QUEUE_TAG || 1119 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1089 esp->command_block[0] > ORDERED_QUEUE_TAG) { 1120 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1090 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " 1121 shost_printk(KERN_ERR, esp->host,
1091 "type %02x.\n", 1122 "Reconnect, bad tag type %02x.\n",
1092 esp->host->unique_id, esp->command_block[0]); 1123 esp->command_block[0]);
1093 return NULL; 1124 return NULL;
1094 } 1125 }
1095 1126
1096 ent = lp->tagged_cmds[esp->command_block[1]]; 1127 ent = lp->tagged_cmds[esp->command_block[1]];
1097 if (!ent) { 1128 if (!ent) {
1098 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " 1129 shost_printk(KERN_ERR, esp->host,
1099 "tag %02x.\n", 1130 "Reconnect, no entry for tag %02x.\n",
1100 esp->host->unique_id, esp->command_block[1]); 1131 esp->command_block[1]);
1101 return NULL; 1132 return NULL;
1102 } 1133 }
1103 1134
@@ -1163,9 +1194,9 @@ static int esp_reconnect(struct esp *esp)
1163 tp = &esp->target[target]; 1194 tp = &esp->target[target];
1164 dev = __scsi_device_lookup_by_target(tp->starget, lun); 1195 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1165 if (!dev) { 1196 if (!dev) {
1166 printk(KERN_ERR PFX "esp%d: Reconnect, no lp " 1197 shost_printk(KERN_ERR, esp->host,
1167 "tgt[%u] lun[%u]\n", 1198 "Reconnect, no lp tgt[%u] lun[%u]\n",
1168 esp->host->unique_id, target, lun); 1199 target, lun);
1169 goto do_reset; 1200 goto do_reset;
1170 } 1201 }
1171 lp = dev->hostdata; 1202 lp = dev->hostdata;
@@ -1291,8 +1322,8 @@ static int esp_finish_select(struct esp *esp)
1291 return 0; 1322 return 0;
1292 } 1323 }
1293 1324
1294 printk("ESP: Unexpected selection completion ireg[%x].\n", 1325 shost_printk(KERN_INFO, esp->host,
1295 esp->ireg); 1326 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1296 esp_schedule_reset(esp); 1327 esp_schedule_reset(esp);
1297 return 0; 1328 return 0;
1298} 1329}
@@ -1312,11 +1343,42 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1312 (((unsigned int)esp_read8(ESP_TCMED)) << 8)); 1343 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1313 if (esp->rev == FASHME) 1344 if (esp->rev == FASHME)
1314 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; 1345 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1346 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1347 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1315 } 1348 }
1316 1349
1317 bytes_sent = esp->data_dma_len; 1350 bytes_sent = esp->data_dma_len;
1318 bytes_sent -= ecount; 1351 bytes_sent -= ecount;
1319 1352
1353 /*
1354 * The am53c974 has a DMA 'pecularity'. The doc states:
1355 * In some odd byte conditions, one residual byte will
1356 * be left in the SCSI FIFO, and the FIFO Flags will
1357 * never count to '0 '. When this happens, the residual
1358 * byte should be retrieved via PIO following completion
1359 * of the BLAST operation.
1360 */
1361 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1362 size_t count = 1;
1363 size_t offset = bytes_sent;
1364 u8 bval = esp_read8(ESP_FDATA);
1365
1366 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1367 ent->sense_ptr[bytes_sent] = bval;
1368 else {
1369 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1370 u8 *ptr;
1371
1372 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1373 &offset, &count);
1374 if (likely(ptr)) {
1375 *(ptr + offset) = bval;
1376 scsi_kunmap_atomic_sg(ptr);
1377 }
1378 }
1379 bytes_sent += fifo_cnt;
1380 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1381 }
1320 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1382 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1321 bytes_sent -= fifo_cnt; 1383 bytes_sent -= fifo_cnt;
1322 1384
@@ -1556,8 +1618,8 @@ static void esp_msgin_extended(struct esp *esp)
1556 return; 1618 return;
1557 } 1619 }
1558 1620
1559 printk("ESP: Unexpected extended msg type %x\n", 1621 shost_printk(KERN_INFO, esp->host,
1560 esp->msg_in[2]); 1622 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1561 1623
1562 esp->msg_out[0] = ABORT_TASK_SET; 1624 esp->msg_out[0] = ABORT_TASK_SET;
1563 esp->msg_out_len = 1; 1625 esp->msg_out_len = 1;
@@ -1574,7 +1636,8 @@ static int esp_msgin_process(struct esp *esp)
1574 1636
1575 if (msg0 & 0x80) { 1637 if (msg0 & 0x80) {
1576 /* Identify */ 1638 /* Identify */
1577 printk("ESP: Unexpected msgin identify\n"); 1639 shost_printk(KERN_INFO, esp->host,
1640 "Unexpected msgin identify\n");
1578 return 0; 1641 return 0;
1579 } 1642 }
1580 1643
@@ -1640,10 +1703,12 @@ static int esp_msgin_process(struct esp *esp)
1640 1703
1641static int esp_process_event(struct esp *esp) 1704static int esp_process_event(struct esp *esp)
1642{ 1705{
1643 int write; 1706 int write, i;
1644 1707
1645again: 1708again:
1646 write = 0; 1709 write = 0;
1710 esp_log_event("process event %d phase %x\n",
1711 esp->event, esp->sreg & ESP_STAT_PMASK);
1647 switch (esp->event) { 1712 switch (esp->event) {
1648 case ESP_EVENT_CHECK_PHASE: 1713 case ESP_EVENT_CHECK_PHASE:
1649 switch (esp->sreg & ESP_STAT_PMASK) { 1714 switch (esp->sreg & ESP_STAT_PMASK) {
@@ -1673,8 +1738,9 @@ again:
1673 break; 1738 break;
1674 1739
1675 default: 1740 default:
1676 printk("ESP: Unexpected phase, sreg=%02x\n", 1741 shost_printk(KERN_INFO, esp->host,
1677 esp->sreg); 1742 "Unexpected phase, sreg=%02x\n",
1743 esp->sreg);
1678 esp_schedule_reset(esp); 1744 esp_schedule_reset(esp);
1679 return 0; 1745 return 0;
1680 } 1746 }
@@ -1708,18 +1774,17 @@ again:
1708 esp->data_dma_len = dma_len; 1774 esp->data_dma_len = dma_len;
1709 1775
1710 if (!dma_len) { 1776 if (!dma_len) {
1711 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", 1777 shost_printk(KERN_ERR, esp->host,
1712 esp->host->unique_id); 1778 "DMA length is zero!\n");
1713 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n", 1779 shost_printk(KERN_ERR, esp->host,
1714 esp->host->unique_id, 1780 "cur adr[%08llx] len[%08x]\n",
1715 (unsigned long long)esp_cur_dma_addr(ent, cmd), 1781 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1716 esp_cur_dma_len(ent, cmd)); 1782 esp_cur_dma_len(ent, cmd));
1717 esp_schedule_reset(esp); 1783 esp_schedule_reset(esp);
1718 return 0; 1784 return 0;
1719 } 1785 }
1720 1786
1721 esp_log_datastart("ESP: start data addr[%08llx] len[%u] " 1787 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1722 "write(%d)\n",
1723 (unsigned long long)dma_addr, dma_len, write); 1788 (unsigned long long)dma_addr, dma_len, write);
1724 1789
1725 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, 1790 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
@@ -1733,7 +1798,8 @@ again:
1733 int bytes_sent; 1798 int bytes_sent;
1734 1799
1735 if (esp->ops->dma_error(esp)) { 1800 if (esp->ops->dma_error(esp)) {
1736 printk("ESP: data done, DMA error, resetting\n"); 1801 shost_printk(KERN_INFO, esp->host,
1802 "data done, DMA error, resetting\n");
1737 esp_schedule_reset(esp); 1803 esp_schedule_reset(esp);
1738 return 0; 1804 return 0;
1739 } 1805 }
@@ -1749,14 +1815,15 @@ again:
1749 /* We should always see exactly a bus-service 1815 /* We should always see exactly a bus-service
1750 * interrupt at the end of a successful transfer. 1816 * interrupt at the end of a successful transfer.
1751 */ 1817 */
1752 printk("ESP: data done, not BSERV, resetting\n"); 1818 shost_printk(KERN_INFO, esp->host,
1819 "data done, not BSERV, resetting\n");
1753 esp_schedule_reset(esp); 1820 esp_schedule_reset(esp);
1754 return 0; 1821 return 0;
1755 } 1822 }
1756 1823
1757 bytes_sent = esp_data_bytes_sent(esp, ent, cmd); 1824 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1758 1825
1759 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", 1826 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1760 ent->flags, bytes_sent); 1827 ent->flags, bytes_sent);
1761 1828
1762 if (bytes_sent < 0) { 1829 if (bytes_sent < 0) {
@@ -1785,8 +1852,9 @@ again:
1785 } 1852 }
1786 1853
1787 if (ent->message != COMMAND_COMPLETE) { 1854 if (ent->message != COMMAND_COMPLETE) {
1788 printk("ESP: Unexpected message %x in status\n", 1855 shost_printk(KERN_INFO, esp->host,
1789 ent->message); 1856 "Unexpected message %x in status\n",
1857 ent->message);
1790 esp_schedule_reset(esp); 1858 esp_schedule_reset(esp);
1791 return 0; 1859 return 0;
1792 } 1860 }
@@ -1804,8 +1872,7 @@ again:
1804 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1872 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1805 1873
1806 if (ent->message == COMMAND_COMPLETE) { 1874 if (ent->message == COMMAND_COMPLETE) {
1807 esp_log_cmddone("ESP: Command done status[%x] " 1875 esp_log_cmddone("Command done status[%x] message[%x]\n",
1808 "message[%x]\n",
1809 ent->status, ent->message); 1876 ent->status, ent->message);
1810 if (ent->status == SAM_STAT_TASK_SET_FULL) 1877 if (ent->status == SAM_STAT_TASK_SET_FULL)
1811 esp_event_queue_full(esp, ent); 1878 esp_event_queue_full(esp, ent);
@@ -1821,16 +1888,16 @@ again:
1821 DID_OK)); 1888 DID_OK));
1822 } 1889 }
1823 } else if (ent->message == DISCONNECT) { 1890 } else if (ent->message == DISCONNECT) {
1824 esp_log_disconnect("ESP: Disconnecting tgt[%d] " 1891 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1825 "tag[%x:%x]\n",
1826 cmd->device->id, 1892 cmd->device->id,
1827 ent->tag[0], ent->tag[1]); 1893 ent->tag[0], ent->tag[1]);
1828 1894
1829 esp->active_cmd = NULL; 1895 esp->active_cmd = NULL;
1830 esp_maybe_execute_command(esp); 1896 esp_maybe_execute_command(esp);
1831 } else { 1897 } else {
1832 printk("ESP: Unexpected message %x in freebus\n", 1898 shost_printk(KERN_INFO, esp->host,
1833 ent->message); 1899 "Unexpected message %x in freebus\n",
1900 ent->message);
1834 esp_schedule_reset(esp); 1901 esp_schedule_reset(esp);
1835 return 0; 1902 return 0;
1836 } 1903 }
@@ -1862,6 +1929,10 @@ again:
1862 if (esp->msg_out_len == 1) { 1929 if (esp->msg_out_len == 1) {
1863 esp_write8(esp->msg_out[0], ESP_FDATA); 1930 esp_write8(esp->msg_out[0], ESP_FDATA);
1864 scsi_esp_cmd(esp, ESP_CMD_TI); 1931 scsi_esp_cmd(esp, ESP_CMD_TI);
1932 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1933 for (i = 0; i < esp->msg_out_len; i++)
1934 esp_write8(esp->msg_out[i], ESP_FDATA);
1935 scsi_esp_cmd(esp, ESP_CMD_TI);
1865 } else { 1936 } else {
1866 /* Use DMA. */ 1937 /* Use DMA. */
1867 memcpy(esp->command_block, 1938 memcpy(esp->command_block,
@@ -1917,7 +1988,7 @@ again:
1917 val = esp_read8(ESP_FDATA); 1988 val = esp_read8(ESP_FDATA);
1918 esp->msg_in[esp->msg_in_len++] = val; 1989 esp->msg_in[esp->msg_in_len++] = val;
1919 1990
1920 esp_log_msgin("ESP: Got msgin byte %x\n", val); 1991 esp_log_msgin("Got msgin byte %x\n", val);
1921 1992
1922 if (!esp_msgin_process(esp)) 1993 if (!esp_msgin_process(esp))
1923 esp->msg_in_len = 0; 1994 esp->msg_in_len = 0;
@@ -1930,7 +2001,8 @@ again:
1930 if (esp->event != ESP_EVENT_FREE_BUS) 2001 if (esp->event != ESP_EVENT_FREE_BUS)
1931 esp_event(esp, ESP_EVENT_CHECK_PHASE); 2002 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1932 } else { 2003 } else {
1933 printk("ESP: MSGIN neither BSERV not FDON, resetting"); 2004 shost_printk(KERN_INFO, esp->host,
2005 "MSGIN neither BSERV not FDON, resetting");
1934 esp_schedule_reset(esp); 2006 esp_schedule_reset(esp);
1935 return 0; 2007 return 0;
1936 } 2008 }
@@ -1938,11 +2010,7 @@ again:
1938 case ESP_EVENT_CMD_START: 2010 case ESP_EVENT_CMD_START:
1939 memcpy(esp->command_block, esp->cmd_bytes_ptr, 2011 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1940 esp->cmd_bytes_left); 2012 esp->cmd_bytes_left);
1941 if (esp->rev == FASHME) 2013 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
1942 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1943 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1944 esp->cmd_bytes_left, 16, 0,
1945 ESP_CMD_DMA | ESP_CMD_TI);
1946 esp_event(esp, ESP_EVENT_CMD_DONE); 2014 esp_event(esp, ESP_EVENT_CMD_DONE);
1947 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 2015 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1948 break; 2016 break;
@@ -1961,8 +2029,8 @@ again:
1961 break; 2029 break;
1962 2030
1963 default: 2031 default:
1964 printk("ESP: Unexpected event %x, resetting\n", 2032 shost_printk(KERN_INFO, esp->host,
1965 esp->event); 2033 "Unexpected event %x, resetting\n", esp->event);
1966 esp_schedule_reset(esp); 2034 esp_schedule_reset(esp);
1967 return 0; 2035 return 0;
1968 break; 2036 break;
@@ -2044,7 +2112,12 @@ static void __esp_interrupt(struct esp *esp)
2044 int finish_reset, intr_done; 2112 int finish_reset, intr_done;
2045 u8 phase; 2113 u8 phase;
2046 2114
2115 /*
2116 * Once INTRPT is read STATUS and SSTEP are cleared.
2117 */
2047 esp->sreg = esp_read8(ESP_STATUS); 2118 esp->sreg = esp_read8(ESP_STATUS);
2119 esp->seqreg = esp_read8(ESP_SSTEP);
2120 esp->ireg = esp_read8(ESP_INTRPT);
2048 2121
2049 if (esp->flags & ESP_FLAG_RESETTING) { 2122 if (esp->flags & ESP_FLAG_RESETTING) {
2050 finish_reset = 1; 2123 finish_reset = 1;
@@ -2057,8 +2130,6 @@ static void __esp_interrupt(struct esp *esp)
2057 return; 2130 return;
2058 } 2131 }
2059 2132
2060 esp->ireg = esp_read8(ESP_INTRPT);
2061
2062 if (esp->ireg & ESP_INTR_SR) 2133 if (esp->ireg & ESP_INTR_SR)
2063 finish_reset = 1; 2134 finish_reset = 1;
2064 2135
@@ -2085,14 +2156,15 @@ static void __esp_interrupt(struct esp *esp)
2085 } 2156 }
2086 } 2157 }
2087 2158
2088 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " 2159 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2089 "sreg2[%02x] ireg[%02x]\n", 2160 "sreg2[%02x] ireg[%02x]\n",
2090 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); 2161 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2091 2162
2092 intr_done = 0; 2163 intr_done = 0;
2093 2164
2094 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { 2165 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2095 printk("ESP: unexpected IREG %02x\n", esp->ireg); 2166 shost_printk(KERN_INFO, esp->host,
2167 "unexpected IREG %02x\n", esp->ireg);
2096 if (esp->ireg & ESP_INTR_IC) 2168 if (esp->ireg & ESP_INTR_IC)
2097 esp_dump_cmd_log(esp); 2169 esp_dump_cmd_log(esp);
2098 2170
@@ -2149,46 +2221,50 @@ static void esp_get_revision(struct esp *esp)
2149 u8 val; 2221 u8 val;
2150 2222
2151 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); 2223 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2152 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); 2224 if (esp->config2 == 0) {
2225 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2226 esp_write8(esp->config2, ESP_CFG2);
2227
2228 val = esp_read8(ESP_CFG2);
2229 val &= ~ESP_CONFIG2_MAGIC;
2230
2231 esp->config2 = 0;
2232 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2233 /*
2234 * If what we write to cfg2 does not come back,
2235 * cfg2 is not implemented.
2236 * Therefore this must be a plain esp100.
2237 */
2238 esp->rev = ESP100;
2239 return;
2240 }
2241 }
2242
2243 esp_set_all_config3(esp, 5);
2244 esp->prev_cfg3 = 5;
2153 esp_write8(esp->config2, ESP_CFG2); 2245 esp_write8(esp->config2, ESP_CFG2);
2246 esp_write8(0, ESP_CFG3);
2247 esp_write8(esp->prev_cfg3, ESP_CFG3);
2154 2248
2155 val = esp_read8(ESP_CFG2); 2249 val = esp_read8(ESP_CFG3);
2156 val &= ~ESP_CONFIG2_MAGIC; 2250 if (val != 5) {
2157 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { 2251 /* The cfg2 register is implemented, however
2158 /* If what we write to cfg2 does not come back, cfg2 is not 2252 * cfg3 is not, must be esp100a.
2159 * implemented, therefore this must be a plain esp100.
2160 */ 2253 */
2161 esp->rev = ESP100; 2254 esp->rev = ESP100A;
2162 } else { 2255 } else {
2163 esp->config2 = 0; 2256 esp_set_all_config3(esp, 0);
2164 esp_set_all_config3(esp, 5); 2257 esp->prev_cfg3 = 0;
2165 esp->prev_cfg3 = 5;
2166 esp_write8(esp->config2, ESP_CFG2);
2167 esp_write8(0, ESP_CFG3);
2168 esp_write8(esp->prev_cfg3, ESP_CFG3); 2258 esp_write8(esp->prev_cfg3, ESP_CFG3);
2169 2259
2170 val = esp_read8(ESP_CFG3); 2260 /* All of cfg{1,2,3} implemented, must be one of
2171 if (val != 5) { 2261 * the fas variants, figure out which one.
2172 /* The cfg2 register is implemented, however 2262 */
2173 * cfg3 is not, must be esp100a. 2263 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2174 */ 2264 esp->rev = FAST;
2175 esp->rev = ESP100A; 2265 esp->sync_defp = SYNC_DEFP_FAST;
2176 } else { 2266 } else {
2177 esp_set_all_config3(esp, 0); 2267 esp->rev = ESP236;
2178 esp->prev_cfg3 = 0;
2179 esp_write8(esp->prev_cfg3, ESP_CFG3);
2180
2181 /* All of cfg{1,2,3} implemented, must be one of
2182 * the fas variants, figure out which one.
2183 */
2184 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2185 esp->rev = FAST;
2186 esp->sync_defp = SYNC_DEFP_FAST;
2187 } else {
2188 esp->rev = ESP236;
2189 }
2190 esp->config2 = 0;
2191 esp_write8(esp->config2, ESP_CFG2);
2192 } 2268 }
2193 } 2269 }
2194} 2270}
@@ -2308,6 +2384,7 @@ static const char *esp_chip_names[] = {
2308 "FAS100A", 2384 "FAS100A",
2309 "FAST", 2385 "FAST",
2310 "FASHME", 2386 "FASHME",
2387 "AM53C974",
2311}; 2388};
2312 2389
2313static struct scsi_transport_template *esp_transport_template; 2390static struct scsi_transport_template *esp_transport_template;
@@ -2317,6 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
2317 static int instance; 2394 static int instance;
2318 int err; 2395 int err;
2319 2396
2397 if (!esp->num_tags)
2398 esp->num_tags = ESP_DEFAULT_TAGS;
2399 else if (esp->num_tags >= ESP_MAX_TAG)
2400 esp->num_tags = ESP_MAX_TAG - 1;
2320 esp->host->transportt = esp_transport_template; 2401 esp->host->transportt = esp_transport_template;
2321 esp->host->max_lun = ESP_MAX_LUN; 2402 esp->host->max_lun = ESP_MAX_LUN;
2322 esp->host->cmd_per_lun = 2; 2403 esp->host->cmd_per_lun = 2;
@@ -2330,12 +2411,13 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
2330 2411
2331 esp_bootup_reset(esp); 2412 esp_bootup_reset(esp);
2332 2413
2333 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", 2414 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2334 esp->host->unique_id, esp->regs, esp->dma_regs, 2415 esp->host->unique_id, esp->regs, esp->dma_regs,
2335 esp->host->irq); 2416 esp->host->irq);
2336 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", 2417 dev_printk(KERN_INFO, dev,
2337 esp->host->unique_id, esp_chip_names[esp->rev], 2418 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2338 esp->cfreq / 1000000, esp->cfact, esp->scsi_id); 2419 esp->host->unique_id, esp_chip_names[esp->rev],
2420 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2339 2421
2340 /* Let the SCSI bus reset settle. */ 2422 /* Let the SCSI bus reset settle. */
2341 ssleep(esp_bus_reset_settle); 2423 ssleep(esp_bus_reset_settle);
@@ -2403,12 +2485,8 @@ static int esp_slave_configure(struct scsi_device *dev)
2403 struct esp *esp = shost_priv(dev->host); 2485 struct esp *esp = shost_priv(dev->host);
2404 struct esp_target_data *tp = &esp->target[dev->id]; 2486 struct esp_target_data *tp = &esp->target[dev->id];
2405 2487
2406 if (dev->tagged_supported) { 2488 if (dev->tagged_supported)
2407 /* XXX make this configurable somehow XXX */ 2489 scsi_change_queue_depth(dev, esp->num_tags);
2408 int goal_tags = min(ESP_DEFAULT_TAGS, ESP_MAX_TAG);
2409
2410 scsi_adjust_queue_depth(dev, goal_tags);
2411 }
2412 2490
2413 tp->flags |= ESP_TGT_DISCONNECT; 2491 tp->flags |= ESP_TGT_DISCONNECT;
2414 2492
@@ -2437,19 +2515,20 @@ static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2437 * XXX much for the final driver. 2515 * XXX much for the final driver.
2438 */ 2516 */
2439 spin_lock_irqsave(esp->host->host_lock, flags); 2517 spin_lock_irqsave(esp->host->host_lock, flags);
2440 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", 2518 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2441 esp->host->unique_id, cmd, cmd->cmnd[0]); 2519 cmd, cmd->cmnd[0]);
2442 ent = esp->active_cmd; 2520 ent = esp->active_cmd;
2443 if (ent) 2521 if (ent)
2444 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", 2522 shost_printk(KERN_ERR, esp->host,
2445 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2523 "Current command [%p:%02x]\n",
2524 ent->cmd, ent->cmd->cmnd[0]);
2446 list_for_each_entry(ent, &esp->queued_cmds, list) { 2525 list_for_each_entry(ent, &esp->queued_cmds, list) {
2447 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", 2526 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2448 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2527 ent->cmd, ent->cmd->cmnd[0]);
2449 } 2528 }
2450 list_for_each_entry(ent, &esp->active_cmds, list) { 2529 list_for_each_entry(ent, &esp->active_cmds, list) {
2451 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", 2530 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2452 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2531 ent->cmd, ent->cmd->cmnd[0]);
2453 } 2532 }
2454 esp_dump_cmd_log(esp); 2533 esp_dump_cmd_log(esp);
2455 spin_unlock_irqrestore(esp->host->host_lock, flags); 2534 spin_unlock_irqrestore(esp->host->host_lock, flags);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index cd68805e8d78..84dcbe4a6268 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -1,4 +1,4 @@
1/* esp_scsi.h: Defines and structures for the ESP drier. 1/* esp_scsi.h: Defines and structures for the ESP driver.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
@@ -25,6 +25,7 @@
25#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ 25#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */
26#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ 26#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */
27#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ 27#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */
28#define ESP_CFG4 0x0dUL /* rw Fourth cfg register 0x34 */
28#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ 29#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */
29#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ 30#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */
30#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ 31#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */
@@ -76,6 +77,18 @@
76#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ 77#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
77#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ 78#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
78 79
80/* ESP config register 4 read-write, found only on am53c974 chips */
81#define ESP_CONFIG4_RADE 0x04 /* Active negation */
82#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */
83#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */
84#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */
85#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */
86
87#define ESP_CONFIG_GE_12NS (0)
88#define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1)
89#define ESP_CONFIG_GE_35NS (ESP_CONFIG_GE0)
90#define ESP_CONFIG_GE_0NS (ESP_CONFIG_GE0 | ESP_CONFIG_GE1)
91
79/* ESP command register read-write */ 92/* ESP command register read-write */
80/* Group 1 commands: These may be sent at any point in time to the ESP 93/* Group 1 commands: These may be sent at any point in time to the ESP
81 * chip. None of them can generate interrupts 'cept 94 * chip. None of them can generate interrupts 'cept
@@ -254,6 +267,7 @@ enum esp_rev {
254 FAS100A = 0x04, 267 FAS100A = 0x04,
255 FAST = 0x05, 268 FAST = 0x05,
256 FASHME = 0x06, 269 FASHME = 0x06,
270 PCSCSI = 0x07, /* AM53c974 */
257}; 271};
258 272
259struct esp_cmd_entry { 273struct esp_cmd_entry {
@@ -269,6 +283,7 @@ struct esp_cmd_entry {
269#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ 283#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */
270#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ 284#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */
271#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ 285#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
286#define ESP_CMD_FLAG_RESIDUAL 0x08 /* AM53c974 BLAST residual */
272 287
273 u8 tag[2]; 288 u8 tag[2];
274 u8 orig_tag[2]; 289 u8 orig_tag[2];
@@ -283,7 +298,6 @@ struct esp_cmd_entry {
283 struct completion *eh_done; 298 struct completion *eh_done;
284}; 299};
285 300
286/* XXX make this configurable somehow XXX */
287#define ESP_DEFAULT_TAGS 16 301#define ESP_DEFAULT_TAGS 16
288 302
289#define ESP_MAX_TARGET 16 303#define ESP_MAX_TARGET 16
@@ -445,7 +459,7 @@ struct esp {
445 u8 prev_soff; 459 u8 prev_soff;
446 u8 prev_stp; 460 u8 prev_stp;
447 u8 prev_cfg3; 461 u8 prev_cfg3;
448 u8 __pad; 462 u8 num_tags;
449 463
450 struct list_head esp_cmd_pool; 464 struct list_head esp_cmd_pool;
451 465
@@ -466,6 +480,7 @@ struct esp {
466 u8 bursts; 480 u8 bursts;
467 u8 config1; 481 u8 config1;
468 u8 config2; 482 u8 config2;
483 u8 config4;
469 484
470 u8 scsi_id; 485 u8 scsi_id;
471 u32 scsi_id_mask; 486 u32 scsi_id_mask;
@@ -479,6 +494,7 @@ struct esp {
479#define ESP_FLAG_WIDE_CAPABLE 0x00000008 494#define ESP_FLAG_WIDE_CAPABLE 0x00000008
480#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 495#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
481#define ESP_FLAG_DISABLE_SYNC 0x00000020 496#define ESP_FLAG_DISABLE_SYNC 0x00000020
497#define ESP_FLAG_USE_FIFO 0x00000040
482 498
483 u8 select_state; 499 u8 select_state;
484#define ESP_SELECT_NONE 0x00 /* Not selecting */ 500#define ESP_SELECT_NONE 0x00 /* Not selecting */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a3eeb6842499..308a016fdaea 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -280,7 +280,7 @@ static struct scsi_host_template fcoe_shost_template = {
280 .eh_device_reset_handler = fc_eh_device_reset, 280 .eh_device_reset_handler = fc_eh_device_reset,
281 .eh_host_reset_handler = fc_eh_host_reset, 281 .eh_host_reset_handler = fc_eh_host_reset,
282 .slave_alloc = fc_slave_alloc, 282 .slave_alloc = fc_slave_alloc,
283 .change_queue_depth = fc_change_queue_depth, 283 .change_queue_depth = scsi_change_queue_depth,
284 .change_queue_type = scsi_change_queue_type, 284 .change_queue_type = scsi_change_queue_type,
285 .this_id = -1, 285 .this_id = -1,
286 .cmd_per_lun = 3, 286 .cmd_per_lun = 3,
@@ -289,6 +289,7 @@ static struct scsi_host_template fcoe_shost_template = {
289 .sg_tablesize = SG_ALL, 289 .sg_tablesize = SG_ALL,
290 .max_sectors = 0xffff, 290 .max_sectors = 0xffff,
291 .use_blk_tags = 1, 291 .use_blk_tags = 1,
292 .track_queue_depth = 1,
292}; 293};
293 294
294/** 295/**
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index bf8d34c26f13..3b73b96619e2 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.6.0.11" 42#define DRV_VERSION "1.6.0.16"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index f3984b48f8e9..bf0bbd42efb5 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -135,6 +135,11 @@ void fnic_handle_link(struct work_struct *work)
135 fnic->lport->host->host_no, FNIC_FC_LE, 135 fnic->lport->host->host_no, FNIC_FC_LE,
136 "Link Status: UP_DOWN", 136 "Link Status: UP_DOWN",
137 strlen("Link Status: UP_DOWN")); 137 strlen("Link Status: UP_DOWN"));
138 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
139 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
140 "deleting fip-timer during link-down\n");
141 del_timer_sync(&fnic->fip_timer);
142 }
138 fcoe_ctlr_link_down(&fnic->ctlr); 143 fcoe_ctlr_link_down(&fnic->ctlr);
139 } 144 }
140 145
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index cf1560c30b7f..0c1f8177b5b7 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -98,7 +98,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
98 if (!rport || fc_remote_port_chkready(rport)) 98 if (!rport || fc_remote_port_chkready(rport))
99 return -ENXIO; 99 return -ENXIO;
100 100
101 scsi_adjust_queue_depth(sdev, fnic_max_qdepth); 101 scsi_change_queue_depth(sdev, fnic_max_qdepth);
102 return 0; 102 return 0;
103} 103}
104 104
@@ -110,7 +110,7 @@ static struct scsi_host_template fnic_host_template = {
110 .eh_device_reset_handler = fnic_device_reset, 110 .eh_device_reset_handler = fnic_device_reset,
111 .eh_host_reset_handler = fnic_host_reset, 111 .eh_host_reset_handler = fnic_host_reset,
112 .slave_alloc = fnic_slave_alloc, 112 .slave_alloc = fnic_slave_alloc,
113 .change_queue_depth = fc_change_queue_depth, 113 .change_queue_depth = scsi_change_queue_depth,
114 .change_queue_type = scsi_change_queue_type, 114 .change_queue_type = scsi_change_queue_type,
115 .this_id = -1, 115 .this_id = -1,
116 .cmd_per_lun = 3, 116 .cmd_per_lun = 3,
@@ -120,6 +120,7 @@ static struct scsi_host_template fnic_host_template = {
120 .max_sectors = 0xffff, 120 .max_sectors = 0xffff,
121 .shost_attrs = fnic_attrs, 121 .shost_attrs = fnic_attrs,
122 .use_blk_tags = 1, 122 .use_blk_tags = 1,
123 .track_queue_depth = 1,
123}; 124};
124 125
125static void 126static void
@@ -437,21 +438,30 @@ static int fnic_dev_wait(struct vnic_dev *vdev,
437 unsigned long time; 438 unsigned long time;
438 int done; 439 int done;
439 int err; 440 int err;
441 int count;
442
443 count = 0;
440 444
441 err = start(vdev, arg); 445 err = start(vdev, arg);
442 if (err) 446 if (err)
443 return err; 447 return err;
444 448
445 /* Wait for func to complete...2 seconds max */ 449 /* Wait for func to complete.
450 * Sometime schedule_timeout_uninterruptible take long time
451 * to wake up so we do not retry as we are only waiting for
452 * 2 seconds in while loop. By adding count, we make sure
453 * we try atleast three times before returning -ETIMEDOUT
454 */
446 time = jiffies + (HZ * 2); 455 time = jiffies + (HZ * 2);
447 do { 456 do {
448 err = finished(vdev, &done); 457 err = finished(vdev, &done);
458 count++;
449 if (err) 459 if (err)
450 return err; 460 return err;
451 if (done) 461 if (done)
452 return 0; 462 return 0;
453 schedule_timeout_uninterruptible(HZ / 10); 463 schedule_timeout_uninterruptible(HZ / 10);
454 } while (time_after(time, jiffies)); 464 } while (time_after(time, jiffies) || (count < 3));
455 465
456 return -ETIMEDOUT; 466 return -ETIMEDOUT;
457} 467}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 10d5c6bbc9e7..2097de42a147 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -421,8 +421,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
421 int ret; 421 int ret;
422 u64 cmd_trace; 422 u64 cmd_trace;
423 int sg_count = 0; 423 int sg_count = 0;
424 unsigned long flags; 424 unsigned long flags = 0;
425 unsigned long ptr; 425 unsigned long ptr;
426 struct fc_rport_priv *rdata;
427 spinlock_t *io_lock = NULL;
426 428
427 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) 429 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
428 return SCSI_MLQUEUE_HOST_BUSY; 430 return SCSI_MLQUEUE_HOST_BUSY;
@@ -436,6 +438,16 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
436 return 0; 438 return 0;
437 } 439 }
438 440
441 rdata = lp->tt.rport_lookup(lp, rport->port_id);
442 if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) {
443 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
444 "returning IO as rport is removed\n");
445 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
446 sc->result = DID_NO_CONNECT;
447 done(sc);
448 return 0;
449 }
450
439 if (lp->state != LPORT_ST_READY || !(lp->link_up)) 451 if (lp->state != LPORT_ST_READY || !(lp->link_up))
440 return SCSI_MLQUEUE_HOST_BUSY; 452 return SCSI_MLQUEUE_HOST_BUSY;
441 453
@@ -498,6 +510,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
498 } 510 }
499 } 511 }
500 512
513 /*
514 * Will acquire lock defore setting to IO initialized.
515 */
516
517 io_lock = fnic_io_lock_hash(fnic, sc);
518 spin_lock_irqsave(io_lock, flags);
519
501 /* initialize rest of io_req */ 520 /* initialize rest of io_req */
502 io_req->port_id = rport->port_id; 521 io_req->port_id = rport->port_id;
503 io_req->start_time = jiffies; 522 io_req->start_time = jiffies;
@@ -514,11 +533,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
514 * In case another thread cancelled the request, 533 * In case another thread cancelled the request,
515 * refetch the pointer under the lock. 534 * refetch the pointer under the lock.
516 */ 535 */
517 spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
518 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, 536 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
519 sc->request->tag, sc, 0, 0, 0, 537 sc->request->tag, sc, 0, 0, 0,
520 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 538 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
521 spin_lock_irqsave(io_lock, flags);
522 io_req = (struct fnic_io_req *)CMD_SP(sc); 539 io_req = (struct fnic_io_req *)CMD_SP(sc);
523 CMD_SP(sc) = NULL; 540 CMD_SP(sc) = NULL;
524 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; 541 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
@@ -527,6 +544,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
527 fnic_release_ioreq_buf(fnic, io_req, sc); 544 fnic_release_ioreq_buf(fnic, io_req, sc);
528 mempool_free(io_req, fnic->io_req_pool); 545 mempool_free(io_req, fnic->io_req_pool);
529 } 546 }
547 atomic_dec(&fnic->in_flight);
548 /* acquire host lock before returning to SCSI */
549 spin_lock(lp->host->host_lock);
550 return ret;
530 } else { 551 } else {
531 atomic64_inc(&fnic_stats->io_stats.active_ios); 552 atomic64_inc(&fnic_stats->io_stats.active_ios);
532 atomic64_inc(&fnic_stats->io_stats.num_ios); 553 atomic64_inc(&fnic_stats->io_stats.num_ios);
@@ -548,6 +569,11 @@ out:
548 sc->request->tag, sc, io_req, 569 sc->request->tag, sc, io_req,
549 sg_count, cmd_trace, 570 sg_count, cmd_trace,
550 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); 571 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
572
573 /* if only we issued IO, will we have the io lock */
574 if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
575 spin_unlock_irqrestore(io_lock, flags);
576
551 atomic_dec(&fnic->in_flight); 577 atomic_dec(&fnic->in_flight);
552 /* acquire host lock before returning to SCSI */ 578 /* acquire host lock before returning to SCSI */
553 spin_lock(lp->host->host_lock); 579 spin_lock(lp->host->host_lock);
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index acf1f95cb5c5..65a9bde26974 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -624,12 +624,12 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
624 if (frame_type == FNIC_FC_RECV) { 624 if (frame_type == FNIC_FC_RECV) {
625 eth_fcoe_hdr_len = sizeof(struct ethhdr) + 625 eth_fcoe_hdr_len = sizeof(struct ethhdr) +
626 sizeof(struct fcoe_hdr); 626 sizeof(struct fcoe_hdr);
627 fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len;
628 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); 627 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
629 /* Copy the rest of data frame */ 628 /* Copy the rest of data frame */
630 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, 629 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
631 min_t(u8, fc_trc_frame_len, 630 min_t(u8, fc_trc_frame_len,
632 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); 631 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE
632 - eth_fcoe_hdr_len)));
633 } else { 633 } else {
634 memcpy((char *)fc_trace, (void *)frame, 634 memcpy((char *)fc_trace, (void *)frame,
635 min_t(u8, fc_trc_frame_len, 635 min_t(u8, fc_trc_frame_len,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index b331272e93bc..f35792f7051c 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -18,20 +18,6 @@
18 * 18 *
19 * Added ISAPNP support for DTC436 adapters, 19 * Added ISAPNP support for DTC436 adapters,
20 * Thomas Sailer, sailer@ife.ee.ethz.ch 20 * Thomas Sailer, sailer@ife.ee.ethz.ch
21 *
22 * ALPHA RELEASE 1.
23 *
24 * For more information, please consult
25 *
26 * NCR 5380 Family
27 * SCSI Protocol Controller
28 * Databook
29 *
30 * NCR Microelectronics
31 * 1635 Aeroplaza Drive
32 * Colorado Springs, CO 80916
33 * 1+ (719) 578-3400
34 * 1+ (800) 334-5454
35 */ 21 */
36 22
37/* 23/*
@@ -40,14 +26,6 @@
40 */ 26 */
41 27
42/* 28/*
43 * Options :
44 *
45 * PARITY - enable parity checking. Not supported.
46 *
47 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
48 *
49 * USLEEP - enable support for devices that don't disconnect. Untested.
50 *
51 * The card is detected and initialized in one of several ways : 29 * The card is detected and initialized in one of several ways :
52 * 1. With command line overrides - NCR5380=port,irq may be 30 * 1. With command line overrides - NCR5380=port,irq may be
53 * used on the LILO command line to override the defaults. 31 * used on the LILO command line to override the defaults.
@@ -79,27 +57,21 @@
79 */ 57 */
80 58
81/* settings for DTC3181E card with only Mustek scanner attached */ 59/* settings for DTC3181E card with only Mustek scanner attached */
82#define USLEEP
83#define USLEEP_POLL 1 60#define USLEEP_POLL 1
84#define USLEEP_SLEEP 20 61#define USLEEP_SLEEP 20
85#define USLEEP_WAITLONG 500 62#define USLEEP_WAITLONG 500
86 63
87#define AUTOPROBE_IRQ 64#define AUTOPROBE_IRQ
88#define AUTOSENSE
89
90 65
91#ifdef CONFIG_SCSI_GENERIC_NCR53C400 66#ifdef CONFIG_SCSI_GENERIC_NCR53C400
92#define NCR53C400_PSEUDO_DMA 1 67#define NCR53C400_PSEUDO_DMA 1
93#define PSEUDO_DMA 68#define PSEUDO_DMA
94#define NCR53C400 69#define NCR53C400
95#define NCR5380_STATS
96#undef NCR5380_STAT_LIMIT
97#endif 70#endif
98 71
99#include <asm/io.h> 72#include <asm/io.h>
100#include <linux/signal.h> 73#include <linux/signal.h>
101#include <linux/blkdev.h> 74#include <linux/blkdev.h>
102#include "scsi.h"
103#include <scsi/scsi_host.h> 75#include <scsi/scsi_host.h>
104#include "g_NCR5380.h" 76#include "g_NCR5380.h"
105#include "NCR5380.h" 77#include "NCR5380.h"
@@ -277,7 +249,7 @@ static int __init do_DTC3181E_setup(char *str)
277 * Locks: none 249 * Locks: none
278 */ 250 */
279 251
280int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) 252static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
281{ 253{
282 static int current_override = 0; 254 static int current_override = 0;
283 int count; 255 int count;
@@ -335,7 +307,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
335 if (pnp_irq_valid(dev, 0)) 307 if (pnp_irq_valid(dev, 0))
336 overrides[count].irq = pnp_irq(dev, 0); 308 overrides[count].irq = pnp_irq(dev, 0);
337 else 309 else
338 overrides[count].irq = SCSI_IRQ_NONE; 310 overrides[count].irq = NO_IRQ;
339 if (pnp_dma_valid(dev, 0)) 311 if (pnp_dma_valid(dev, 0))
340 overrides[count].dma = pnp_dma(dev, 0); 312 overrides[count].dma = pnp_dma(dev, 0);
341 else 313 else
@@ -455,27 +427,22 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
455 else 427 else
456 instance->irq = NCR5380_probe_irq(instance, 0xffff); 428 instance->irq = NCR5380_probe_irq(instance, 0xffff);
457 429
458 if (instance->irq != SCSI_IRQ_NONE) 430 /* Compatibility with documented NCR5380 kernel parameters */
431 if (instance->irq == 255)
432 instance->irq = NO_IRQ;
433
434 if (instance->irq != NO_IRQ)
459 if (request_irq(instance->irq, generic_NCR5380_intr, 435 if (request_irq(instance->irq, generic_NCR5380_intr,
460 0, "NCR5380", instance)) { 436 0, "NCR5380", instance)) {
461 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 437 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
462 instance->irq = SCSI_IRQ_NONE; 438 instance->irq = NO_IRQ;
463 } 439 }
464 440
465 if (instance->irq == SCSI_IRQ_NONE) { 441 if (instance->irq == NO_IRQ) {
466 printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 442 printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
467 printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 443 printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
468 } 444 }
469 445
470 printk(KERN_INFO "scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int) instance->NCR5380_instance_name);
471 if (instance->irq == SCSI_IRQ_NONE)
472 printk(" interrupts disabled");
473 else
474 printk(" irq %d", instance->irq);
475 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE);
476 NCR5380_print_options(instance);
477 printk("\n");
478
479 ++current_override; 446 ++current_override;
480 ++count; 447 ++count;
481 } 448 }
@@ -483,19 +450,6 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
483} 450}
484 451
485/** 452/**
486 * generic_NCR5380_info - reporting string
487 * @host: NCR5380 to report on
488 *
489 * Report driver information for the NCR5380
490 */
491
492const char *generic_NCR5380_info(struct Scsi_Host *host)
493{
494 static const char string[] = "Generic NCR5380/53C400 Driver";
495 return string;
496}
497
498/**
499 * generic_NCR5380_release_resources - free resources 453 * generic_NCR5380_release_resources - free resources
500 * @instance: host adapter to clean up 454 * @instance: host adapter to clean up
501 * 455 *
@@ -504,12 +458,12 @@ const char *generic_NCR5380_info(struct Scsi_Host *host)
504 * Locks: none 458 * Locks: none
505 */ 459 */
506 460
507int generic_NCR5380_release_resources(struct Scsi_Host *instance) 461static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
508{ 462{
509 NCR5380_local_declare(); 463 NCR5380_local_declare();
510 NCR5380_setup(instance); 464 NCR5380_setup(instance);
511 465
512 if (instance->irq != SCSI_IRQ_NONE) 466 if (instance->irq != NO_IRQ)
513 free_irq(instance->irq, instance); 467 free_irq(instance->irq, instance);
514 NCR5380_exit(instance); 468 NCR5380_exit(instance);
515 469
@@ -741,163 +695,9 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
741 695
742#include "NCR5380.c" 696#include "NCR5380.c"
743 697
744#define PRINTP(x) seq_printf(m, x)
745#define ANDP ,
746
747static void sprint_opcode(struct seq_file *m, int opcode)
748{
749 PRINTP("0x%02x " ANDP opcode);
750}
751
752static void sprint_command(struct seq_file *m, unsigned char *command)
753{
754 int i, s;
755 sprint_opcode(m, command[0]);
756 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
757 PRINTP("%02x " ANDP command[i]);
758 PRINTP("\n");
759}
760
761/**
762 * sprintf_Scsi_Cmnd - print a scsi command
763 * @m: seq_fil to print into
764 * @cmd: SCSI command block
765 *
766 * Print out the target and command data in hex
767 */
768
769static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd)
770{
771 PRINTP("host number %d destination target %d, lun %llu\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun);
772 PRINTP(" command = ");
773 sprint_command(m, cmd->cmnd);
774}
775
776/**
777 * generic_NCR5380_proc_info - /proc for NCR5380 driver
778 * @buffer: buffer to print into
779 * @start: start position
780 * @offset: offset into buffer
781 * @len: length
782 * @hostno: instance to affect
783 * @inout: read/write
784 *
785 * Provide the procfs information for the 5380 controller. We fill
786 * this with useful debugging information including the commands
787 * being executed, disconnected command queue and the statistical
788 * data
789 *
790 * Locks: global cli/lock for queue walk
791 */
792
793static int generic_NCR5380_show_info(struct seq_file *m, struct Scsi_Host *scsi_ptr)
794{
795 NCR5380_local_declare();
796 unsigned long flags;
797 unsigned char status;
798 int i;
799 Scsi_Cmnd *ptr;
800 struct NCR5380_hostdata *hostdata;
801#ifdef NCR5380_STATS
802 struct scsi_device *dev;
803#endif
804
805 NCR5380_setup(scsi_ptr);
806 hostdata = (struct NCR5380_hostdata *) scsi_ptr->hostdata;
807
808 spin_lock_irqsave(scsi_ptr->host_lock, flags);
809 PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name);
810 PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE);
811 PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE);
812#ifdef NCR53C400
813 PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE);
814 PRINTP("NCR53C400 card%s detected\n" ANDP(((struct NCR5380_hostdata *) scsi_ptr->hostdata)->flags & FLAG_NCR53C400) ? "" : " not");
815# if NCR53C400_PSEUDO_DMA
816 PRINTP("NCR53C400 pseudo DMA used\n");
817# endif
818#else
819 PRINTP("NO NCR53C400 driver extensions\n");
820#endif
821 PRINTP("Using %s mapping at %s 0x%lx, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name);
822 if (scsi_ptr->irq == SCSI_IRQ_NONE)
823 PRINTP("no interrupt\n");
824 else
825 PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq);
826
827#ifdef NCR5380_STATS
828 if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue)
829 PRINTP("There are commands pending, transfer rates may be crud\n");
830 if (hostdata->pendingr)
831 PRINTP(" %d pending reads" ANDP hostdata->pendingr);
832 if (hostdata->pendingw)
833 PRINTP(" %d pending writes" ANDP hostdata->pendingw);
834 if (hostdata->pendingr || hostdata->pendingw)
835 PRINTP("\n");
836 shost_for_each_device(dev, scsi_ptr) {
837 unsigned long br = hostdata->bytes_read[dev->id];
838 unsigned long bw = hostdata->bytes_write[dev->id];
839 long tr = hostdata->time_read[dev->id] / HZ;
840 long tw = hostdata->time_write[dev->id] / HZ;
841
842 PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type));
843 for (i = 0; i < 8; i++)
844 if (dev->vendor[i] >= 0x20)
845 seq_putc(m, dev->vendor[i]);
846 seq_putc(m, ' ');
847 for (i = 0; i < 16; i++)
848 if (dev->model[i] >= 0x20)
849 seq_putc(m, dev->model[i]);
850 seq_putc(m, ' ');
851 for (i = 0; i < 4; i++)
852 if (dev->rev[i] >= 0x20)
853 seq_putc(m, dev->rev[i]);
854 seq_putc(m, ' ');
855
856 PRINTP("\n%10ld kb read in %5ld secs" ANDP br / 1024 ANDP tr);
857 if (tr)
858 PRINTP(" @ %5ld bps" ANDP br / tr);
859
860 PRINTP("\n%10ld kb written in %5ld secs" ANDP bw / 1024 ANDP tw);
861 if (tw)
862 PRINTP(" @ %5ld bps" ANDP bw / tw);
863 PRINTP("\n");
864 }
865#endif
866
867 status = NCR5380_read(STATUS_REG);
868 if (!(status & SR_REQ))
869 PRINTP("REQ not asserted, phase unknown.\n");
870 else {
871 for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i);
872 PRINTP("Phase %s\n" ANDP phases[i].name);
873 }
874
875 if (!hostdata->connected) {
876 PRINTP("No currently connected command\n");
877 } else {
878 sprint_Scsi_Cmnd(m, (Scsi_Cmnd *) hostdata->connected);
879 }
880
881 PRINTP("issue_queue\n");
882
883 for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
884 sprint_Scsi_Cmnd(m, ptr);
885
886 PRINTP("disconnected_queue\n");
887
888 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
889 sprint_Scsi_Cmnd(m, ptr);
890
891 spin_unlock_irqrestore(scsi_ptr->host_lock, flags);
892 return 0;
893}
894
895#undef PRINTP
896#undef ANDP
897
898static struct scsi_host_template driver_template = { 698static struct scsi_host_template driver_template = {
899 .show_info = generic_NCR5380_show_info, 699 .show_info = generic_NCR5380_show_info,
900 .name = "Generic NCR5380/NCR53C400 Scsi Driver", 700 .name = "Generic NCR5380/NCR53C400 SCSI",
901 .detect = generic_NCR5380_detect, 701 .detect = generic_NCR5380_detect,
902 .release = generic_NCR5380_release_resources, 702 .release = generic_NCR5380_release_resources,
903 .info = generic_NCR5380_info, 703 .info = generic_NCR5380_info,
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 703adf78e0b2..bea1a3b9b862 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -9,28 +9,11 @@
9 * 9 *
10 * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin 10 * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
11 * K.Lentin@cs.monash.edu.au 11 * K.Lentin@cs.monash.edu.au
12 *
13 * ALPHA RELEASE 1.
14 *
15 * For more information, please consult
16 *
17 * NCR 5380 Family
18 * SCSI Protocol Controller
19 * Databook
20 *
21 * NCR Microelectronics
22 * 1635 Aeroplaza Drive
23 * Colorado Springs, CO 80916
24 * 1+ (719) 578-3400
25 * 1+ (800) 334-5454
26 */ 12 */
27 13
28#ifndef GENERIC_NCR5380_H 14#ifndef GENERIC_NCR5380_H
29#define GENERIC_NCR5380_H 15#define GENERIC_NCR5380_H
30 16
31
32#define GENERIC_NCR5380_PUBLIC_RELEASE 1
33
34#ifdef NCR53C400 17#ifdef NCR53C400
35#define BIOSPARAM 18#define BIOSPARAM
36#define NCR5380_BIOSPARAM generic_NCR5380_biosparam 19#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
@@ -39,12 +22,6 @@
39#endif 22#endif
40 23
41#ifndef ASM 24#ifndef ASM
42static int generic_NCR5380_abort(Scsi_Cmnd *);
43static int generic_NCR5380_detect(struct scsi_host_template *);
44static int generic_NCR5380_release_resources(struct Scsi_Host *);
45static int generic_NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
46static int generic_NCR5380_bus_reset(Scsi_Cmnd *);
47static const char* generic_NCR5380_info(struct Scsi_Host *);
48 25
49#ifndef CMD_PER_LUN 26#ifndef CMD_PER_LUN
50#define CMD_PER_LUN 2 27#define CMD_PER_LUN 2
@@ -118,7 +95,8 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
118#define NCR5380_bus_reset generic_NCR5380_bus_reset 95#define NCR5380_bus_reset generic_NCR5380_bus_reset
119#define NCR5380_pread generic_NCR5380_pread 96#define NCR5380_pread generic_NCR5380_pread
120#define NCR5380_pwrite generic_NCR5380_pwrite 97#define NCR5380_pwrite generic_NCR5380_pwrite
121#define NCR5380_proc_info notyet_generic_proc_info 98#define NCR5380_info generic_NCR5380_info
99#define NCR5380_show_info generic_NCR5380_show_info
122 100
123#define BOARD_NCR5380 0 101#define BOARD_NCR5380 0
124#define BOARD_NCR53C400 1 102#define BOARD_NCR53C400 1
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 18ea2e16e34f..6bb4611b238a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
48#include <linux/bitmap.h> 48#include <linux/bitmap.h>
49#include <linux/atomic.h> 49#include <linux/atomic.h>
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/percpu-defs.h>
51#include <linux/percpu.h> 52#include <linux/percpu.h>
52#include <asm/div64.h> 53#include <asm/div64.h>
53#include "hpsa_cmd.h" 54#include "hpsa_cmd.h"
@@ -103,7 +104,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
@@ -149,6 +149,7 @@ static struct board_type products[] = {
149 {0x3249103C, "Smart Array P812", &SA5_access}, 149 {0x3249103C, "Smart Array P812", &SA5_access},
150 {0x324A103C, "Smart Array P712m", &SA5_access}, 150 {0x324A103C, "Smart Array P712m", &SA5_access},
151 {0x324B103C, "Smart Array P711m", &SA5_access}, 151 {0x324B103C, "Smart Array P711m", &SA5_access},
152 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
152 {0x3350103C, "Smart Array P222", &SA5_access}, 153 {0x3350103C, "Smart Array P222", &SA5_access},
153 {0x3351103C, "Smart Array P420", &SA5_access}, 154 {0x3351103C, "Smart Array P420", &SA5_access},
154 {0x3352103C, "Smart Array P421", &SA5_access}, 155 {0x3352103C, "Smart Array P421", &SA5_access},
@@ -193,12 +194,13 @@ static int number_of_controllers;
193 194
194static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 195static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
195static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 196static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
196static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 197static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
197static void lock_and_start_io(struct ctlr_info *h); 198static void lock_and_start_io(struct ctlr_info *h);
198static void start_io(struct ctlr_info *h, unsigned long *flags); 199static void start_io(struct ctlr_info *h, unsigned long *flags);
199 200
200#ifdef CONFIG_COMPAT 201#ifdef CONFIG_COMPAT
201static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 202static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
203 void __user *arg);
202#endif 204#endif
203 205
204static void cmd_free(struct ctlr_info *h, struct CommandList *c); 206static void cmd_free(struct ctlr_info *h, struct CommandList *c);
@@ -214,8 +216,6 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
214static void hpsa_scan_start(struct Scsi_Host *); 216static void hpsa_scan_start(struct Scsi_Host *);
215static int hpsa_scan_finished(struct Scsi_Host *sh, 217static int hpsa_scan_finished(struct Scsi_Host *sh,
216 unsigned long elapsed_time); 218 unsigned long elapsed_time);
217static int hpsa_change_queue_depth(struct scsi_device *sdev,
218 int qdepth, int reason);
219 219
220static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 220static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
221static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 221static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
@@ -274,12 +274,12 @@ static int check_for_unit_attention(struct ctlr_info *h,
274 "detected, command retried\n", h->ctlr); 274 "detected, command retried\n", h->ctlr);
275 break; 275 break;
276 case LUN_FAILED: 276 case LUN_FAILED:
277 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " 277 dev_warn(&h->pdev->dev,
278 "detected, action required\n", h->ctlr); 278 HPSA "%d: LUN failure detected\n", h->ctlr);
279 break; 279 break;
280 case REPORT_LUNS_CHANGED: 280 case REPORT_LUNS_CHANGED:
281 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " 281 dev_warn(&h->pdev->dev,
282 "changed, action required\n", h->ctlr); 282 HPSA "%d: report LUN data changed\n", h->ctlr);
283 /* 283 /*
284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
285 * target (array) devices. 285 * target (array) devices.
@@ -392,7 +392,8 @@ static ssize_t host_show_commands_outstanding(struct device *dev,
392 struct Scsi_Host *shost = class_to_shost(dev); 392 struct Scsi_Host *shost = class_to_shost(dev);
393 struct ctlr_info *h = shost_to_hba(shost); 393 struct ctlr_info *h = shost_to_hba(shost);
394 394
395 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 395 return snprintf(buf, 20, "%d\n",
396 atomic_read(&h->commands_outstanding));
396} 397}
397 398
398static ssize_t host_show_transport_mode(struct device *dev, 399static ssize_t host_show_transport_mode(struct device *dev,
@@ -670,7 +671,7 @@ static struct scsi_host_template hpsa_driver_template = {
670 .queuecommand = hpsa_scsi_queue_command, 671 .queuecommand = hpsa_scsi_queue_command,
671 .scan_start = hpsa_scan_start, 672 .scan_start = hpsa_scan_start,
672 .scan_finished = hpsa_scan_finished, 673 .scan_finished = hpsa_scan_finished,
673 .change_queue_depth = hpsa_change_queue_depth, 674 .change_queue_depth = scsi_change_queue_depth,
674 .this_id = -1, 675 .this_id = -1,
675 .use_clustering = ENABLE_CLUSTERING, 676 .use_clustering = ENABLE_CLUSTERING,
676 .eh_abort_handler = hpsa_eh_abort_handler, 677 .eh_abort_handler = hpsa_eh_abort_handler,
@@ -698,7 +699,6 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
698{ 699{
699 u32 a; 700 u32 a;
700 struct reply_queue_buffer *rq = &h->reply_queue[q]; 701 struct reply_queue_buffer *rq = &h->reply_queue[q];
701 unsigned long flags;
702 702
703 if (h->transMethod & CFGTBL_Trans_io_accel1) 703 if (h->transMethod & CFGTBL_Trans_io_accel1)
704 return h->access.command_completed(h, q); 704 return h->access.command_completed(h, q);
@@ -709,9 +709,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
709 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 709 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
710 a = rq->head[rq->current_entry]; 710 a = rq->head[rq->current_entry];
711 rq->current_entry++; 711 rq->current_entry++;
712 spin_lock_irqsave(&h->lock, flags); 712 atomic_dec(&h->commands_outstanding);
713 h->commands_outstanding--;
714 spin_unlock_irqrestore(&h->lock, flags);
715 } else { 713 } else {
716 a = FIFO_EMPTY; 714 a = FIFO_EMPTY;
717 } 715 }
@@ -1500,22 +1498,22 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1500{ 1498{
1501 struct SGDescriptor *chain_sg, *chain_block; 1499 struct SGDescriptor *chain_sg, *chain_block;
1502 u64 temp64; 1500 u64 temp64;
1501 u32 chain_len;
1503 1502
1504 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1503 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1505 chain_block = h->cmd_sg_list[c->cmdindex]; 1504 chain_block = h->cmd_sg_list[c->cmdindex];
1506 chain_sg->Ext = HPSA_SG_CHAIN; 1505 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1507 chain_sg->Len = sizeof(*chain_sg) * 1506 chain_len = sizeof(*chain_sg) *
1508 (c->Header.SGTotal - h->max_cmd_sg_entries); 1507 (c->Header.SGTotal - h->max_cmd_sg_entries);
1509 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1508 chain_sg->Len = cpu_to_le32(chain_len);
1509 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1510 PCI_DMA_TODEVICE); 1510 PCI_DMA_TODEVICE);
1511 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1511 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1512 /* prevent subsequent unmapping */ 1512 /* prevent subsequent unmapping */
1513 chain_sg->Addr.lower = 0; 1513 chain_sg->Addr = cpu_to_le64(0);
1514 chain_sg->Addr.upper = 0;
1515 return -1; 1514 return -1;
1516 } 1515 }
1517 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1516 chain_sg->Addr = cpu_to_le64(temp64);
1518 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1519 return 0; 1517 return 0;
1520} 1518}
1521 1519
@@ -1523,15 +1521,13 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1523 struct CommandList *c) 1521 struct CommandList *c)
1524{ 1522{
1525 struct SGDescriptor *chain_sg; 1523 struct SGDescriptor *chain_sg;
1526 union u64bit temp64;
1527 1524
1528 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1525 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1529 return; 1526 return;
1530 1527
1531 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1528 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1532 temp64.val32.lower = chain_sg->Addr.lower; 1529 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1533 temp64.val32.upper = chain_sg->Addr.upper; 1530 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1534 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1535} 1531}
1536 1532
1537 1533
@@ -1732,8 +1728,7 @@ static void complete_scsi_command(struct CommandList *cp)
1732 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1728 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1733 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); 1729 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1734 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; 1730 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1735 cp->Header.Tag.lower = c->Tag.lower; 1731 cp->Header.tag = c->tag;
1736 cp->Header.Tag.upper = c->Tag.upper;
1737 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1732 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1738 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1733 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1739 1734
@@ -1763,72 +1758,13 @@ static void complete_scsi_command(struct CommandList *cp)
1763 /* Get addition sense code qualifier */ 1758 /* Get addition sense code qualifier */
1764 ascq = ei->SenseInfo[13]; 1759 ascq = ei->SenseInfo[13];
1765 } 1760 }
1766
1767 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1761 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1768 if (check_for_unit_attention(h, cp))
1769 break;
1770 if (sense_key == ILLEGAL_REQUEST) {
1771 /*
1772 * SCSI REPORT_LUNS is commonly unsupported on
1773 * Smart Array. Suppress noisy complaint.
1774 */
1775 if (cp->Request.CDB[0] == REPORT_LUNS)
1776 break;
1777
1778 /* If ASC/ASCQ indicate Logical Unit
1779 * Not Supported condition,
1780 */
1781 if ((asc == 0x25) && (ascq == 0x0)) {
1782 dev_warn(&h->pdev->dev, "cp %p "
1783 "has check condition\n", cp);
1784 break;
1785 }
1786 }
1787
1788 if (sense_key == NOT_READY) {
1789 /* If Sense is Not Ready, Logical Unit
1790 * Not ready, Manual Intervention
1791 * required
1792 */
1793 if ((asc == 0x04) && (ascq == 0x03)) {
1794 dev_warn(&h->pdev->dev, "cp %p "
1795 "has check condition: unit "
1796 "not ready, manual "
1797 "intervention required\n", cp);
1798 break;
1799 }
1800 }
1801 if (sense_key == ABORTED_COMMAND) { 1762 if (sense_key == ABORTED_COMMAND) {
1802 /* Aborted command is retryable */
1803 dev_warn(&h->pdev->dev, "cp %p "
1804 "has check condition: aborted command: "
1805 "ASC: 0x%x, ASCQ: 0x%x\n",
1806 cp, asc, ascq);
1807 cmd->result |= DID_SOFT_ERROR << 16; 1763 cmd->result |= DID_SOFT_ERROR << 16;
1808 break; 1764 break;
1809 } 1765 }
1810 /* Must be some other type of check condition */
1811 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1812 "unknown type: "
1813 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1814 "Returning result: 0x%x, "
1815 "cmd=[%02x %02x %02x %02x %02x "
1816 "%02x %02x %02x %02x %02x %02x "
1817 "%02x %02x %02x %02x %02x]\n",
1818 cp, sense_key, asc, ascq,
1819 cmd->result,
1820 cmd->cmnd[0], cmd->cmnd[1],
1821 cmd->cmnd[2], cmd->cmnd[3],
1822 cmd->cmnd[4], cmd->cmnd[5],
1823 cmd->cmnd[6], cmd->cmnd[7],
1824 cmd->cmnd[8], cmd->cmnd[9],
1825 cmd->cmnd[10], cmd->cmnd[11],
1826 cmd->cmnd[12], cmd->cmnd[13],
1827 cmd->cmnd[14], cmd->cmnd[15]);
1828 break; 1766 break;
1829 } 1767 }
1830
1831
1832 /* Problem was not a check condition 1768 /* Problem was not a check condition
1833 * Pass it up to the upper layers... 1769 * Pass it up to the upper layers...
1834 */ 1770 */
@@ -1934,14 +1870,11 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
1934 struct CommandList *c, int sg_used, int data_direction) 1870 struct CommandList *c, int sg_used, int data_direction)
1935{ 1871{
1936 int i; 1872 int i;
1937 union u64bit addr64;
1938 1873
1939 for (i = 0; i < sg_used; i++) { 1874 for (i = 0; i < sg_used; i++)
1940 addr64.val32.lower = c->SG[i].Addr.lower; 1875 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
1941 addr64.val32.upper = c->SG[i].Addr.upper; 1876 le32_to_cpu(c->SG[i].Len),
1942 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1877 data_direction);
1943 data_direction);
1944 }
1945} 1878}
1946 1879
1947static int hpsa_map_one(struct pci_dev *pdev, 1880static int hpsa_map_one(struct pci_dev *pdev,
@@ -1954,25 +1887,22 @@ static int hpsa_map_one(struct pci_dev *pdev,
1954 1887
1955 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1888 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1956 cp->Header.SGList = 0; 1889 cp->Header.SGList = 0;
1957 cp->Header.SGTotal = 0; 1890 cp->Header.SGTotal = cpu_to_le16(0);
1958 return 0; 1891 return 0;
1959 } 1892 }
1960 1893
1961 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1894 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
1962 if (dma_mapping_error(&pdev->dev, addr64)) { 1895 if (dma_mapping_error(&pdev->dev, addr64)) {
1963 /* Prevent subsequent unmap of something never mapped */ 1896 /* Prevent subsequent unmap of something never mapped */
1964 cp->Header.SGList = 0; 1897 cp->Header.SGList = 0;
1965 cp->Header.SGTotal = 0; 1898 cp->Header.SGTotal = cpu_to_le16(0);
1966 return -1; 1899 return -1;
1967 } 1900 }
1968 cp->SG[0].Addr.lower = 1901 cp->SG[0].Addr = cpu_to_le64(addr64);
1969 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1902 cp->SG[0].Len = cpu_to_le32(buflen);
1970 cp->SG[0].Addr.upper = 1903 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
1971 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1904 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
1972 cp->SG[0].Len = buflen; 1905 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
1973 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
1974 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1975 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1976 return 0; 1906 return 0;
1977} 1907}
1978 1908
@@ -2830,8 +2760,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2830 if (d == NULL) 2760 if (d == NULL)
2831 return 0; /* no match */ 2761 return 0; /* no match */
2832 2762
2833 it_nexus = cpu_to_le32((u32) d->ioaccel_handle); 2763 it_nexus = cpu_to_le32(d->ioaccel_handle);
2834 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); 2764 scsi_nexus = cpu_to_le32(c2a->scsi_nexus);
2835 find = c2a->scsi_nexus; 2765 find = c2a->scsi_nexus;
2836 2766
2837 if (h->raid_offload_debug > 0) 2767 if (h->raid_offload_debug > 0)
@@ -2891,7 +2821,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2891 * Returns 0 on success, -1 otherwise. 2821 * Returns 0 on success, -1 otherwise.
2892 */ 2822 */
2893static int hpsa_gather_lun_info(struct ctlr_info *h, 2823static int hpsa_gather_lun_info(struct ctlr_info *h,
2894 int reportlunsize, 2824 int reportphyslunsize, int reportloglunsize,
2895 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, 2825 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
2896 struct ReportLUNdata *logdev, u32 *nlogicals) 2826 struct ReportLUNdata *logdev, u32 *nlogicals)
2897{ 2827{
@@ -2905,7 +2835,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
2905 *physical_mode = HPSA_REPORT_PHYS_EXTENDED; 2835 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2906 physical_entry_size = 24; 2836 physical_entry_size = 24;
2907 } 2837 }
2908 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 2838 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize,
2909 *physical_mode)) { 2839 *physical_mode)) {
2910 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2840 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2911 return -1; 2841 return -1;
@@ -2918,7 +2848,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
2918 *nphysicals - HPSA_MAX_PHYS_LUN); 2848 *nphysicals - HPSA_MAX_PHYS_LUN);
2919 *nphysicals = HPSA_MAX_PHYS_LUN; 2849 *nphysicals = HPSA_MAX_PHYS_LUN;
2920 } 2850 }
2921 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 2851 if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) {
2922 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2852 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2923 return -1; 2853 return -1;
2924 } 2854 }
@@ -2941,8 +2871,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
2941 return 0; 2871 return 0;
2942} 2872}
2943 2873
2944u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 2874static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2945 int nphysicals, int nlogicals, 2875 int i, int nphysicals, int nlogicals,
2946 struct ReportExtendedLUNdata *physdev_list, 2876 struct ReportExtendedLUNdata *physdev_list,
2947 struct ReportLUNdata *logdev_list) 2877 struct ReportLUNdata *logdev_list)
2948{ 2878{
@@ -3011,15 +2941,14 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3011 u32 ndev_allocated = 0; 2941 u32 ndev_allocated = 0;
3012 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 2942 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3013 int ncurrent = 0; 2943 int ncurrent = 0;
3014 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
3015 int i, n_ext_target_devs, ndevs_to_allocate; 2944 int i, n_ext_target_devs, ndevs_to_allocate;
3016 int raid_ctlr_position; 2945 int raid_ctlr_position;
3017 int rescan_hba_mode; 2946 int rescan_hba_mode;
3018 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 2947 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3019 2948
3020 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 2949 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3021 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 2950 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3022 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 2951 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3023 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 2952 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3024 2953
3025 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 2954 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
@@ -3039,7 +2968,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3039 2968
3040 h->hba_mode_enabled = rescan_hba_mode; 2969 h->hba_mode_enabled = rescan_hba_mode;
3041 2970
3042 if (hpsa_gather_lun_info(h, reportlunsize, 2971 if (hpsa_gather_lun_info(h,
2972 sizeof(*physdev_list), sizeof(*logdev_list),
3043 (struct ReportLUNdata *) physdev_list, &nphysicals, 2973 (struct ReportLUNdata *) physdev_list, &nphysicals,
3044 &physical_mode, logdev_list, &nlogicals)) 2974 &physical_mode, logdev_list, &nlogicals))
3045 goto out; 2975 goto out;
@@ -3210,19 +3140,19 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
3210 } 3140 }
3211 addr64 = (u64) sg_dma_address(sg); 3141 addr64 = (u64) sg_dma_address(sg);
3212 len = sg_dma_len(sg); 3142 len = sg_dma_len(sg);
3213 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3143 curr_sg->Addr = cpu_to_le64(addr64);
3214 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3144 curr_sg->Len = cpu_to_le32(len);
3215 curr_sg->Len = len; 3145 curr_sg->Ext = cpu_to_le32(0);
3216 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
3217 curr_sg++; 3146 curr_sg++;
3218 } 3147 }
3148 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3219 3149
3220 if (use_sg + chained > h->maxSG) 3150 if (use_sg + chained > h->maxSG)
3221 h->maxSG = use_sg + chained; 3151 h->maxSG = use_sg + chained;
3222 3152
3223 if (chained) { 3153 if (chained) {
3224 cp->Header.SGList = h->max_cmd_sg_entries; 3154 cp->Header.SGList = h->max_cmd_sg_entries;
3225 cp->Header.SGTotal = (u16) (use_sg + 1); 3155 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3226 if (hpsa_map_sg_chain_block(h, cp)) { 3156 if (hpsa_map_sg_chain_block(h, cp)) {
3227 scsi_dma_unmap(cmd); 3157 scsi_dma_unmap(cmd);
3228 return -1; 3158 return -1;
@@ -3233,7 +3163,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
3233sglist_finished: 3163sglist_finished:
3234 3164
3235 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 3165 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3236 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 3166 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */
3237 return 0; 3167 return 0;
3238} 3168}
3239 3169
@@ -3325,17 +3255,12 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3325 addr64 = (u64) sg_dma_address(sg); 3255 addr64 = (u64) sg_dma_address(sg);
3326 len = sg_dma_len(sg); 3256 len = sg_dma_len(sg);
3327 total_len += len; 3257 total_len += len;
3328 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3258 curr_sg->Addr = cpu_to_le64(addr64);
3329 curr_sg->Addr.upper = 3259 curr_sg->Len = cpu_to_le32(len);
3330 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3260 curr_sg->Ext = cpu_to_le32(0);
3331 curr_sg->Len = len;
3332
3333 if (i == (scsi_sg_count(cmd) - 1))
3334 curr_sg->Ext = HPSA_SG_LAST;
3335 else
3336 curr_sg->Ext = 0; /* we are not chaining */
3337 curr_sg++; 3261 curr_sg++;
3338 } 3262 }
3263 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3339 3264
3340 switch (cmd->sc_data_direction) { 3265 switch (cmd->sc_data_direction) {
3341 case DMA_TO_DEVICE: 3266 case DMA_TO_DEVICE:
@@ -3592,7 +3517,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3592 cp->data_len = cpu_to_le32(total_len); 3517 cp->data_len = cpu_to_le32(total_len);
3593 cp->err_ptr = cpu_to_le64(c->busaddr + 3518 cp->err_ptr = cpu_to_le64(c->busaddr +
3594 offsetof(struct io_accel2_cmd, error_data)); 3519 offsetof(struct io_accel2_cmd, error_data));
3595 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); 3520 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3596 3521
3597 enqueue_cmd_and_start_io(h, c); 3522 enqueue_cmd_and_start_io(h, c);
3598 return 0; 3523 return 0;
@@ -3809,11 +3734,6 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3809 offload_to_mirror = 3734 offload_to_mirror =
3810 (offload_to_mirror >= map->layout_map_count - 1) 3735 (offload_to_mirror >= map->layout_map_count - 1)
3811 ? 0 : offload_to_mirror + 1; 3736 ? 0 : offload_to_mirror + 1;
3812 /* FIXME: remove after debug/dev */
3813 BUG_ON(offload_to_mirror >= map->layout_map_count);
3814 dev_warn(&h->pdev->dev,
3815 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3816 map_index, offload_to_mirror);
3817 dev->offload_to_mirror = offload_to_mirror; 3737 dev->offload_to_mirror = offload_to_mirror;
3818 /* Avoid direct use of dev->offload_to_mirror within this 3738 /* Avoid direct use of dev->offload_to_mirror within this
3819 * function since multiple threads might simultaneously 3739 * function since multiple threads might simultaneously
@@ -3959,8 +3879,11 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3959 dev->scsi3addr); 3879 dev->scsi3addr);
3960} 3880}
3961 3881
3962static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 3882/*
3963 void (*done)(struct scsi_cmnd *)) 3883 * Running in struct Scsi_Host->host_lock less mode using LLD internal
3884 * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection.
3885 */
3886static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
3964{ 3887{
3965 struct ctlr_info *h; 3888 struct ctlr_info *h;
3966 struct hpsa_scsi_dev_t *dev; 3889 struct hpsa_scsi_dev_t *dev;
@@ -3973,14 +3896,14 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3973 dev = cmd->device->hostdata; 3896 dev = cmd->device->hostdata;
3974 if (!dev) { 3897 if (!dev) {
3975 cmd->result = DID_NO_CONNECT << 16; 3898 cmd->result = DID_NO_CONNECT << 16;
3976 done(cmd); 3899 cmd->scsi_done(cmd);
3977 return 0; 3900 return 0;
3978 } 3901 }
3979 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3902 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3980 3903
3981 if (unlikely(lockup_detected(h))) { 3904 if (unlikely(lockup_detected(h))) {
3982 cmd->result = DID_ERROR << 16; 3905 cmd->result = DID_ERROR << 16;
3983 done(cmd); 3906 cmd->scsi_done(cmd);
3984 return 0; 3907 return 0;
3985 } 3908 }
3986 c = cmd_alloc(h); 3909 c = cmd_alloc(h);
@@ -3990,9 +3913,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3990 } 3913 }
3991 3914
3992 /* Fill in the command list header */ 3915 /* Fill in the command list header */
3993
3994 cmd->scsi_done = done; /* save this for use by completion code */
3995
3996 /* save c in case we have to abort it */ 3916 /* save c in case we have to abort it */
3997 cmd->host_scribble = (unsigned char *) c; 3917 cmd->host_scribble = (unsigned char *) c;
3998 3918
@@ -4026,8 +3946,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4026 3946
4027 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3947 c->Header.ReplyQueue = 0; /* unused in simple mode */
4028 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 3948 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4029 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 3949 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) |
4030 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 3950 DIRECT_LOOKUP_BIT);
4031 3951
4032 /* Fill in the request block... */ 3952 /* Fill in the request block... */
4033 3953
@@ -4036,17 +3956,18 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4036 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 3956 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4037 c->Request.CDBLen = cmd->cmd_len; 3957 c->Request.CDBLen = cmd->cmd_len;
4038 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 3958 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4039 c->Request.Type.Type = TYPE_CMD;
4040 c->Request.Type.Attribute = ATTR_SIMPLE;
4041 switch (cmd->sc_data_direction) { 3959 switch (cmd->sc_data_direction) {
4042 case DMA_TO_DEVICE: 3960 case DMA_TO_DEVICE:
4043 c->Request.Type.Direction = XFER_WRITE; 3961 c->Request.type_attr_dir =
3962 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4044 break; 3963 break;
4045 case DMA_FROM_DEVICE: 3964 case DMA_FROM_DEVICE:
4046 c->Request.Type.Direction = XFER_READ; 3965 c->Request.type_attr_dir =
3966 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4047 break; 3967 break;
4048 case DMA_NONE: 3968 case DMA_NONE:
4049 c->Request.Type.Direction = XFER_NONE; 3969 c->Request.type_attr_dir =
3970 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4050 break; 3971 break;
4051 case DMA_BIDIRECTIONAL: 3972 case DMA_BIDIRECTIONAL:
4052 /* This can happen if a buggy application does a scsi passthru 3973 /* This can happen if a buggy application does a scsi passthru
@@ -4054,7 +3975,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4054 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 3975 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4055 */ 3976 */
4056 3977
4057 c->Request.Type.Direction = XFER_RSVD; 3978 c->Request.type_attr_dir =
3979 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4058 /* This is technically wrong, and hpsa controllers should 3980 /* This is technically wrong, and hpsa controllers should
4059 * reject it with CMD_INVALID, which is the most correct 3981 * reject it with CMD_INVALID, which is the most correct
4060 * response, but non-fibre backends appear to let it 3982 * response, but non-fibre backends appear to let it
@@ -4081,8 +4003,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4081 return 0; 4003 return 0;
4082} 4004}
4083 4005
4084static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4085
4086static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 4006static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4087{ 4007{
4088 unsigned long flags; 4008 unsigned long flags;
@@ -4152,23 +4072,6 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
4152 return finished; 4072 return finished;
4153} 4073}
4154 4074
4155static int hpsa_change_queue_depth(struct scsi_device *sdev,
4156 int qdepth, int reason)
4157{
4158 struct ctlr_info *h = sdev_to_hba(sdev);
4159
4160 if (reason != SCSI_QDEPTH_DEFAULT)
4161 return -ENOTSUPP;
4162
4163 if (qdepth < 1)
4164 qdepth = 1;
4165 else
4166 if (qdepth > h->nr_cmds)
4167 qdepth = h->nr_cmds;
4168 scsi_adjust_queue_depth(sdev, qdepth);
4169 return sdev->queue_depth;
4170}
4171
4172static void hpsa_unregister_scsi(struct ctlr_info *h) 4075static void hpsa_unregister_scsi(struct ctlr_info *h)
4173{ 4076{
4174 /* we are being forcibly unloaded, and may not refuse. */ 4077 /* we are being forcibly unloaded, and may not refuse. */
@@ -4329,8 +4232,8 @@ static void hpsa_get_tag(struct ctlr_info *h,
4329 if (c->cmd_type == CMD_IOACCEL1) { 4232 if (c->cmd_type == CMD_IOACCEL1) {
4330 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4233 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4331 &h->ioaccel_cmd_pool[c->cmdindex]; 4234 &h->ioaccel_cmd_pool[c->cmdindex];
4332 *tagupper = cm1->Tag.upper; 4235 *tagupper = (u32) (cm1->tag >> 32);
4333 *taglower = cm1->Tag.lower; 4236 *taglower = (u32) (cm1->tag & 0x0ffffffffULL);
4334 return; 4237 return;
4335 } 4238 }
4336 if (c->cmd_type == CMD_IOACCEL2) { 4239 if (c->cmd_type == CMD_IOACCEL2) {
@@ -4341,11 +4244,10 @@ static void hpsa_get_tag(struct ctlr_info *h,
4341 *taglower = cm2->Tag; 4244 *taglower = cm2->Tag;
4342 return; 4245 return;
4343 } 4246 }
4344 *tagupper = c->Header.Tag.upper; 4247 *tagupper = (u32) (c->Header.tag >> 32);
4345 *taglower = c->Header.Tag.lower; 4248 *taglower = (u32) (c->Header.tag & 0x0ffffffffULL);
4346} 4249}
4347 4250
4348
4349static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4251static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4350 struct CommandList *abort, int swizzle) 4252 struct CommandList *abort, int swizzle)
4351{ 4253{
@@ -4410,7 +4312,7 @@ static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4410 struct CommandList *c = NULL; /* ptr into cmpQ */ 4312 struct CommandList *c = NULL; /* ptr into cmpQ */
4411 4313
4412 if (!find) 4314 if (!find)
4413 return 0; 4315 return NULL;
4414 spin_lock_irqsave(&h->lock, flags); 4316 spin_lock_irqsave(&h->lock, flags);
4415 list_for_each_entry(c, queue_head, list) { 4317 list_for_each_entry(c, queue_head, list) {
4416 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ 4318 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
@@ -4432,7 +4334,7 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4432 4334
4433 spin_lock_irqsave(&h->lock, flags); 4335 spin_lock_irqsave(&h->lock, flags);
4434 list_for_each_entry(c, queue_head, list) { 4336 list_for_each_entry(c, queue_head, list) {
4435 if (memcmp(&c->Header.Tag, tag, 8) != 0) 4337 if (memcmp(&c->Header.tag, tag, 8) != 0)
4436 continue; 4338 continue;
4437 spin_unlock_irqrestore(&h->lock, flags); 4339 spin_unlock_irqrestore(&h->lock, flags);
4438 return c; 4340 return c;
@@ -4686,19 +4588,32 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4686 int i; 4588 int i;
4687 union u64bit temp64; 4589 union u64bit temp64;
4688 dma_addr_t cmd_dma_handle, err_dma_handle; 4590 dma_addr_t cmd_dma_handle, err_dma_handle;
4689 unsigned long flags; 4591 int loopcount;
4592
4593 /* There is some *extremely* small but non-zero chance that that
4594 * multiple threads could get in here, and one thread could
4595 * be scanning through the list of bits looking for a free
4596 * one, but the free ones are always behind him, and other
4597 * threads sneak in behind him and eat them before he can
4598 * get to them, so that while there is always a free one, a
4599 * very unlucky thread might be starved anyway, never able to
4600 * beat the other threads. In reality, this happens so
4601 * infrequently as to be indistinguishable from never.
4602 */
4690 4603
4691 spin_lock_irqsave(&h->lock, flags); 4604 loopcount = 0;
4692 do { 4605 do {
4693 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 4606 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
4694 if (i == h->nr_cmds) { 4607 if (i == h->nr_cmds)
4695 spin_unlock_irqrestore(&h->lock, flags); 4608 i = 0;
4696 return NULL; 4609 loopcount++;
4697 } 4610 } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
4698 } while (test_and_set_bit 4611 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 &&
4699 (i & (BITS_PER_LONG - 1), 4612 loopcount < 10);
4700 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 4613
4701 spin_unlock_irqrestore(&h->lock, flags); 4614 /* Thread got starved? We do not expect this to ever happen. */
4615 if (loopcount >= 10)
4616 return NULL;
4702 4617
4703 c = h->cmd_pool + i; 4618 c = h->cmd_pool + i;
4704 memset(c, 0, sizeof(*c)); 4619 memset(c, 0, sizeof(*c));
@@ -4714,9 +4629,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4714 INIT_LIST_HEAD(&c->list); 4629 INIT_LIST_HEAD(&c->list);
4715 c->busaddr = (u32) cmd_dma_handle; 4630 c->busaddr = (u32) cmd_dma_handle;
4716 temp64.val = (u64) err_dma_handle; 4631 temp64.val = (u64) err_dma_handle;
4717 c->ErrDesc.Addr.lower = temp64.val32.lower; 4632 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
4718 c->ErrDesc.Addr.upper = temp64.val32.upper; 4633 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
4719 c->ErrDesc.Len = sizeof(*c->err_info);
4720 4634
4721 c->h = h; 4635 c->h = h;
4722 return c; 4636 return c;
@@ -4729,7 +4643,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4729static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 4643static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4730{ 4644{
4731 struct CommandList *c; 4645 struct CommandList *c;
4732 union u64bit temp64;
4733 dma_addr_t cmd_dma_handle, err_dma_handle; 4646 dma_addr_t cmd_dma_handle, err_dma_handle;
4734 4647
4735 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 4648 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
@@ -4750,10 +4663,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4750 4663
4751 INIT_LIST_HEAD(&c->list); 4664 INIT_LIST_HEAD(&c->list);
4752 c->busaddr = (u32) cmd_dma_handle; 4665 c->busaddr = (u32) cmd_dma_handle;
4753 temp64.val = (u64) err_dma_handle; 4666 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
4754 c->ErrDesc.Addr.lower = temp64.val32.lower; 4667 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
4755 c->ErrDesc.Addr.upper = temp64.val32.upper;
4756 c->ErrDesc.Len = sizeof(*c->err_info);
4757 4668
4758 c->h = h; 4669 c->h = h;
4759 return c; 4670 return c;
@@ -4762,30 +4673,25 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4762static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4673static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4763{ 4674{
4764 int i; 4675 int i;
4765 unsigned long flags;
4766 4676
4767 i = c - h->cmd_pool; 4677 i = c - h->cmd_pool;
4768 spin_lock_irqsave(&h->lock, flags);
4769 clear_bit(i & (BITS_PER_LONG - 1), 4678 clear_bit(i & (BITS_PER_LONG - 1),
4770 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4679 h->cmd_pool_bits + (i / BITS_PER_LONG));
4771 spin_unlock_irqrestore(&h->lock, flags);
4772} 4680}
4773 4681
4774static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 4682static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4775{ 4683{
4776 union u64bit temp64;
4777
4778 temp64.val32.lower = c->ErrDesc.Addr.lower;
4779 temp64.val32.upper = c->ErrDesc.Addr.upper;
4780 pci_free_consistent(h->pdev, sizeof(*c->err_info), 4684 pci_free_consistent(h->pdev, sizeof(*c->err_info),
4781 c->err_info, (dma_addr_t) temp64.val); 4685 c->err_info,
4686 (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr));
4782 pci_free_consistent(h->pdev, sizeof(*c), 4687 pci_free_consistent(h->pdev, sizeof(*c),
4783 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 4688 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
4784} 4689}
4785 4690
4786#ifdef CONFIG_COMPAT 4691#ifdef CONFIG_COMPAT
4787 4692
4788static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 4693static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4694 void __user *arg)
4789{ 4695{
4790 IOCTL32_Command_struct __user *arg32 = 4696 IOCTL32_Command_struct __user *arg32 =
4791 (IOCTL32_Command_struct __user *) arg; 4697 (IOCTL32_Command_struct __user *) arg;
@@ -4810,7 +4716,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4810 if (err) 4716 if (err)
4811 return -EFAULT; 4717 return -EFAULT;
4812 4718
4813 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 4719 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4814 if (err) 4720 if (err)
4815 return err; 4721 return err;
4816 err |= copy_in_user(&arg32->error_info, &p->error_info, 4722 err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -4821,7 +4727,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4821} 4727}
4822 4728
4823static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4729static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4824 int cmd, void *arg) 4730 int cmd, void __user *arg)
4825{ 4731{
4826 BIG_IOCTL32_Command_struct __user *arg32 = 4732 BIG_IOCTL32_Command_struct __user *arg32 =
4827 (BIG_IOCTL32_Command_struct __user *) arg; 4733 (BIG_IOCTL32_Command_struct __user *) arg;
@@ -4848,7 +4754,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4848 if (err) 4754 if (err)
4849 return -EFAULT; 4755 return -EFAULT;
4850 4756
4851 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 4757 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4852 if (err) 4758 if (err)
4853 return err; 4759 return err;
4854 err |= copy_in_user(&arg32->error_info, &p->error_info, 4760 err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -4858,7 +4764,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4858 return err; 4764 return err;
4859} 4765}
4860 4766
4861static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 4767static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4862{ 4768{
4863 switch (cmd) { 4769 switch (cmd) {
4864 case CCISS_GETPCIINFO: 4770 case CCISS_GETPCIINFO:
@@ -4932,7 +4838,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4932 IOCTL_Command_struct iocommand; 4838 IOCTL_Command_struct iocommand;
4933 struct CommandList *c; 4839 struct CommandList *c;
4934 char *buff = NULL; 4840 char *buff = NULL;
4935 union u64bit temp64; 4841 u64 temp64;
4936 int rc = 0; 4842 int rc = 0;
4937 4843
4938 if (!argp) 4844 if (!argp)
@@ -4971,14 +4877,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4971 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4877 c->Header.ReplyQueue = 0; /* unused in simple mode */
4972 if (iocommand.buf_size > 0) { /* buffer to fill */ 4878 if (iocommand.buf_size > 0) { /* buffer to fill */
4973 c->Header.SGList = 1; 4879 c->Header.SGList = 1;
4974 c->Header.SGTotal = 1; 4880 c->Header.SGTotal = cpu_to_le16(1);
4975 } else { /* no buffers to fill */ 4881 } else { /* no buffers to fill */
4976 c->Header.SGList = 0; 4882 c->Header.SGList = 0;
4977 c->Header.SGTotal = 0; 4883 c->Header.SGTotal = cpu_to_le16(0);
4978 } 4884 }
4979 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4885 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4980 /* use the kernel address the cmd block for tag */ 4886 /* use the kernel address the cmd block for tag */
4981 c->Header.Tag.lower = c->busaddr; 4887 c->Header.tag = c->busaddr;
4982 4888
4983 /* Fill in Request block */ 4889 /* Fill in Request block */
4984 memcpy(&c->Request, &iocommand.Request, 4890 memcpy(&c->Request, &iocommand.Request,
@@ -4986,19 +4892,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4986 4892
4987 /* Fill in the scatter gather information */ 4893 /* Fill in the scatter gather information */
4988 if (iocommand.buf_size > 0) { 4894 if (iocommand.buf_size > 0) {
4989 temp64.val = pci_map_single(h->pdev, buff, 4895 temp64 = pci_map_single(h->pdev, buff,
4990 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 4896 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4991 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 4897 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
4992 c->SG[0].Addr.lower = 0; 4898 c->SG[0].Addr = cpu_to_le64(0);
4993 c->SG[0].Addr.upper = 0; 4899 c->SG[0].Len = cpu_to_le32(0);
4994 c->SG[0].Len = 0;
4995 rc = -ENOMEM; 4900 rc = -ENOMEM;
4996 goto out; 4901 goto out;
4997 } 4902 }
4998 c->SG[0].Addr.lower = temp64.val32.lower; 4903 c->SG[0].Addr = cpu_to_le64(temp64);
4999 c->SG[0].Addr.upper = temp64.val32.upper; 4904 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5000 c->SG[0].Len = iocommand.buf_size; 4905 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
5001 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
5002 } 4906 }
5003 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4907 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5004 if (iocommand.buf_size > 0) 4908 if (iocommand.buf_size > 0)
@@ -5033,7 +4937,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5033 struct CommandList *c; 4937 struct CommandList *c;
5034 unsigned char **buff = NULL; 4938 unsigned char **buff = NULL;
5035 int *buff_size = NULL; 4939 int *buff_size = NULL;
5036 union u64bit temp64; 4940 u64 temp64;
5037 BYTE sg_used = 0; 4941 BYTE sg_used = 0;
5038 int status = 0; 4942 int status = 0;
5039 int i; 4943 int i;
@@ -5107,29 +5011,30 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5107 } 5011 }
5108 c->cmd_type = CMD_IOCTL_PEND; 5012 c->cmd_type = CMD_IOCTL_PEND;
5109 c->Header.ReplyQueue = 0; 5013 c->Header.ReplyQueue = 0;
5110 c->Header.SGList = c->Header.SGTotal = sg_used; 5014 c->Header.SGList = (u8) sg_used;
5015 c->Header.SGTotal = cpu_to_le16(sg_used);
5111 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 5016 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5112 c->Header.Tag.lower = c->busaddr; 5017 c->Header.tag = c->busaddr;
5113 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 5018 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5114 if (ioc->buf_size > 0) { 5019 if (ioc->buf_size > 0) {
5115 int i; 5020 int i;
5116 for (i = 0; i < sg_used; i++) { 5021 for (i = 0; i < sg_used; i++) {
5117 temp64.val = pci_map_single(h->pdev, buff[i], 5022 temp64 = pci_map_single(h->pdev, buff[i],
5118 buff_size[i], PCI_DMA_BIDIRECTIONAL); 5023 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5119 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 5024 if (dma_mapping_error(&h->pdev->dev,
5120 c->SG[i].Addr.lower = 0; 5025 (dma_addr_t) temp64)) {
5121 c->SG[i].Addr.upper = 0; 5026 c->SG[i].Addr = cpu_to_le64(0);
5122 c->SG[i].Len = 0; 5027 c->SG[i].Len = cpu_to_le32(0);
5123 hpsa_pci_unmap(h->pdev, c, i, 5028 hpsa_pci_unmap(h->pdev, c, i,
5124 PCI_DMA_BIDIRECTIONAL); 5029 PCI_DMA_BIDIRECTIONAL);
5125 status = -ENOMEM; 5030 status = -ENOMEM;
5126 goto cleanup0; 5031 goto cleanup0;
5127 } 5032 }
5128 c->SG[i].Addr.lower = temp64.val32.lower; 5033 c->SG[i].Addr = cpu_to_le64(temp64);
5129 c->SG[i].Addr.upper = temp64.val32.upper; 5034 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5130 c->SG[i].Len = buff_size[i]; 5035 c->SG[i].Ext = cpu_to_le32(0);
5131 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
5132 } 5036 }
5037 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5133 } 5038 }
5134 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5039 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5135 if (sg_used) 5040 if (sg_used)
@@ -5206,7 +5111,7 @@ static void decrement_passthru_count(struct ctlr_info *h)
5206/* 5111/*
5207 * ioctl 5112 * ioctl
5208 */ 5113 */
5209static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 5114static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5210{ 5115{
5211 struct ctlr_info *h; 5116 struct ctlr_info *h;
5212 void __user *argp = (void __user *)arg; 5117 void __user *argp = (void __user *)arg;
@@ -5268,20 +5173,20 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5268{ 5173{
5269 int pci_dir = XFER_NONE; 5174 int pci_dir = XFER_NONE;
5270 struct CommandList *a; /* for commands to be aborted */ 5175 struct CommandList *a; /* for commands to be aborted */
5176 u32 tupper, tlower;
5271 5177
5272 c->cmd_type = CMD_IOCTL_PEND; 5178 c->cmd_type = CMD_IOCTL_PEND;
5273 c->Header.ReplyQueue = 0; 5179 c->Header.ReplyQueue = 0;
5274 if (buff != NULL && size > 0) { 5180 if (buff != NULL && size > 0) {
5275 c->Header.SGList = 1; 5181 c->Header.SGList = 1;
5276 c->Header.SGTotal = 1; 5182 c->Header.SGTotal = cpu_to_le16(1);
5277 } else { 5183 } else {
5278 c->Header.SGList = 0; 5184 c->Header.SGList = 0;
5279 c->Header.SGTotal = 0; 5185 c->Header.SGTotal = cpu_to_le16(0);
5280 } 5186 }
5281 c->Header.Tag.lower = c->busaddr; 5187 c->Header.tag = c->busaddr;
5282 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5188 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5283 5189
5284 c->Request.Type.Type = cmd_type;
5285 if (cmd_type == TYPE_CMD) { 5190 if (cmd_type == TYPE_CMD) {
5286 switch (cmd) { 5191 switch (cmd) {
5287 case HPSA_INQUIRY: 5192 case HPSA_INQUIRY:
@@ -5291,8 +5196,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5291 c->Request.CDB[2] = (page_code & 0xff); 5196 c->Request.CDB[2] = (page_code & 0xff);
5292 } 5197 }
5293 c->Request.CDBLen = 6; 5198 c->Request.CDBLen = 6;
5294 c->Request.Type.Attribute = ATTR_SIMPLE; 5199 c->Request.type_attr_dir =
5295 c->Request.Type.Direction = XFER_READ; 5200 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5296 c->Request.Timeout = 0; 5201 c->Request.Timeout = 0;
5297 c->Request.CDB[0] = HPSA_INQUIRY; 5202 c->Request.CDB[0] = HPSA_INQUIRY;
5298 c->Request.CDB[4] = size & 0xFF; 5203 c->Request.CDB[4] = size & 0xFF;
@@ -5303,8 +5208,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5303 mode = 00 target = 0. Nothing to write. 5208 mode = 00 target = 0. Nothing to write.
5304 */ 5209 */
5305 c->Request.CDBLen = 12; 5210 c->Request.CDBLen = 12;
5306 c->Request.Type.Attribute = ATTR_SIMPLE; 5211 c->Request.type_attr_dir =
5307 c->Request.Type.Direction = XFER_READ; 5212 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5308 c->Request.Timeout = 0; 5213 c->Request.Timeout = 0;
5309 c->Request.CDB[0] = cmd; 5214 c->Request.CDB[0] = cmd;
5310 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5215 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
@@ -5314,8 +5219,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5314 break; 5219 break;
5315 case HPSA_CACHE_FLUSH: 5220 case HPSA_CACHE_FLUSH:
5316 c->Request.CDBLen = 12; 5221 c->Request.CDBLen = 12;
5317 c->Request.Type.Attribute = ATTR_SIMPLE; 5222 c->Request.type_attr_dir =
5318 c->Request.Type.Direction = XFER_WRITE; 5223 TYPE_ATTR_DIR(cmd_type,
5224 ATTR_SIMPLE, XFER_WRITE);
5319 c->Request.Timeout = 0; 5225 c->Request.Timeout = 0;
5320 c->Request.CDB[0] = BMIC_WRITE; 5226 c->Request.CDB[0] = BMIC_WRITE;
5321 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 5227 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
@@ -5324,14 +5230,14 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5324 break; 5230 break;
5325 case TEST_UNIT_READY: 5231 case TEST_UNIT_READY:
5326 c->Request.CDBLen = 6; 5232 c->Request.CDBLen = 6;
5327 c->Request.Type.Attribute = ATTR_SIMPLE; 5233 c->Request.type_attr_dir =
5328 c->Request.Type.Direction = XFER_NONE; 5234 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5329 c->Request.Timeout = 0; 5235 c->Request.Timeout = 0;
5330 break; 5236 break;
5331 case HPSA_GET_RAID_MAP: 5237 case HPSA_GET_RAID_MAP:
5332 c->Request.CDBLen = 12; 5238 c->Request.CDBLen = 12;
5333 c->Request.Type.Attribute = ATTR_SIMPLE; 5239 c->Request.type_attr_dir =
5334 c->Request.Type.Direction = XFER_READ; 5240 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5335 c->Request.Timeout = 0; 5241 c->Request.Timeout = 0;
5336 c->Request.CDB[0] = HPSA_CISS_READ; 5242 c->Request.CDB[0] = HPSA_CISS_READ;
5337 c->Request.CDB[1] = cmd; 5243 c->Request.CDB[1] = cmd;
@@ -5342,8 +5248,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5342 break; 5248 break;
5343 case BMIC_SENSE_CONTROLLER_PARAMETERS: 5249 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5344 c->Request.CDBLen = 10; 5250 c->Request.CDBLen = 10;
5345 c->Request.Type.Attribute = ATTR_SIMPLE; 5251 c->Request.type_attr_dir =
5346 c->Request.Type.Direction = XFER_READ; 5252 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5347 c->Request.Timeout = 0; 5253 c->Request.Timeout = 0;
5348 c->Request.CDB[0] = BMIC_READ; 5254 c->Request.CDB[0] = BMIC_READ;
5349 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 5255 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
@@ -5360,9 +5266,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5360 5266
5361 case HPSA_DEVICE_RESET_MSG: 5267 case HPSA_DEVICE_RESET_MSG:
5362 c->Request.CDBLen = 16; 5268 c->Request.CDBLen = 16;
5363 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 5269 c->Request.type_attr_dir =
5364 c->Request.Type.Attribute = ATTR_SIMPLE; 5270 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5365 c->Request.Type.Direction = XFER_NONE;
5366 c->Request.Timeout = 0; /* Don't time out */ 5271 c->Request.Timeout = 0; /* Don't time out */
5367 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 5272 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5368 c->Request.CDB[0] = cmd; 5273 c->Request.CDB[0] = cmd;
@@ -5376,27 +5281,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5376 break; 5281 break;
5377 case HPSA_ABORT_MSG: 5282 case HPSA_ABORT_MSG:
5378 a = buff; /* point to command to be aborted */ 5283 a = buff; /* point to command to be aborted */
5379 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n", 5284 dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx",
5380 a->Header.Tag.upper, a->Header.Tag.lower, 5285 a->Header.tag, c->Header.tag);
5381 c->Header.Tag.upper, c->Header.Tag.lower); 5286 tlower = (u32) (a->Header.tag >> 32);
5287 tupper = (u32) (a->Header.tag & 0x0ffffffffULL);
5382 c->Request.CDBLen = 16; 5288 c->Request.CDBLen = 16;
5383 c->Request.Type.Type = TYPE_MSG; 5289 c->Request.type_attr_dir =
5384 c->Request.Type.Attribute = ATTR_SIMPLE; 5290 TYPE_ATTR_DIR(cmd_type,
5385 c->Request.Type.Direction = XFER_WRITE; 5291 ATTR_SIMPLE, XFER_WRITE);
5386 c->Request.Timeout = 0; /* Don't time out */ 5292 c->Request.Timeout = 0; /* Don't time out */
5387 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 5293 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5388 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 5294 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5389 c->Request.CDB[2] = 0x00; /* reserved */ 5295 c->Request.CDB[2] = 0x00; /* reserved */
5390 c->Request.CDB[3] = 0x00; /* reserved */ 5296 c->Request.CDB[3] = 0x00; /* reserved */
5391 /* Tag to abort goes in CDB[4]-CDB[11] */ 5297 /* Tag to abort goes in CDB[4]-CDB[11] */
5392 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF; 5298 c->Request.CDB[4] = tlower & 0xFF;
5393 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF; 5299 c->Request.CDB[5] = (tlower >> 8) & 0xFF;
5394 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF; 5300 c->Request.CDB[6] = (tlower >> 16) & 0xFF;
5395 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF; 5301 c->Request.CDB[7] = (tlower >> 24) & 0xFF;
5396 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF; 5302 c->Request.CDB[8] = tupper & 0xFF;
5397 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF; 5303 c->Request.CDB[9] = (tupper >> 8) & 0xFF;
5398 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF; 5304 c->Request.CDB[10] = (tupper >> 16) & 0xFF;
5399 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF; 5305 c->Request.CDB[11] = (tupper >> 24) & 0xFF;
5400 c->Request.CDB[12] = 0x00; /* reserved */ 5306 c->Request.CDB[12] = 0x00; /* reserved */
5401 c->Request.CDB[13] = 0x00; /* reserved */ 5307 c->Request.CDB[13] = 0x00; /* reserved */
5402 c->Request.CDB[14] = 0x00; /* reserved */ 5308 c->Request.CDB[14] = 0x00; /* reserved */
@@ -5412,7 +5318,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5412 BUG(); 5318 BUG();
5413 } 5319 }
5414 5320
5415 switch (c->Request.Type.Direction) { 5321 switch (GET_DIR(c->Request.type_attr_dir)) {
5416 case XFER_READ: 5322 case XFER_READ:
5417 pci_dir = PCI_DMA_FROMDEVICE; 5323 pci_dir = PCI_DMA_FROMDEVICE;
5418 break; 5324 break;
@@ -5467,15 +5373,9 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
5467 5373
5468 /* Put job onto the completed Q */ 5374 /* Put job onto the completed Q */
5469 addQ(&h->cmpQ, c); 5375 addQ(&h->cmpQ, c);
5470 5376 atomic_inc(&h->commands_outstanding);
5471 /* Must increment commands_outstanding before unlocking
5472 * and submitting to avoid race checking for fifo full
5473 * condition.
5474 */
5475 h->commands_outstanding++;
5476
5477 /* Tell the controller execute command */
5478 spin_unlock_irqrestore(&h->lock, *flags); 5377 spin_unlock_irqrestore(&h->lock, *flags);
5378 /* Tell the controller execute command */
5479 h->access.submit_command(h, c); 5379 h->access.submit_command(h, c);
5480 spin_lock_irqsave(&h->lock, *flags); 5380 spin_lock_irqsave(&h->lock, *flags);
5481 } 5381 }
@@ -5521,6 +5421,7 @@ static inline void finish_cmd(struct CommandList *c)
5521 unsigned long flags; 5421 unsigned long flags;
5522 int io_may_be_stalled = 0; 5422 int io_may_be_stalled = 0;
5523 struct ctlr_info *h = c->h; 5423 struct ctlr_info *h = c->h;
5424 int count;
5524 5425
5525 spin_lock_irqsave(&h->lock, flags); 5426 spin_lock_irqsave(&h->lock, flags);
5526 removeQ(c); 5427 removeQ(c);
@@ -5541,11 +5442,10 @@ static inline void finish_cmd(struct CommandList *c)
5541 * want to get in a cycle where we call start_io every time 5442 * want to get in a cycle where we call start_io every time
5542 * through here. 5443 * through here.
5543 */ 5444 */
5544 if (unlikely(h->fifo_recently_full) && 5445 count = atomic_read(&h->commands_outstanding);
5545 h->commands_outstanding < 5)
5546 io_may_be_stalled = 1;
5547
5548 spin_unlock_irqrestore(&h->lock, flags); 5446 spin_unlock_irqrestore(&h->lock, flags);
5447 if (unlikely(h->fifo_recently_full) && count < 5)
5448 io_may_be_stalled = 1;
5549 5449
5550 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5450 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5551 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 5451 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
@@ -5765,22 +5665,20 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5765 5665
5766 cmd->CommandHeader.ReplyQueue = 0; 5666 cmd->CommandHeader.ReplyQueue = 0;
5767 cmd->CommandHeader.SGList = 0; 5667 cmd->CommandHeader.SGList = 0;
5768 cmd->CommandHeader.SGTotal = 0; 5668 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5769 cmd->CommandHeader.Tag.lower = paddr32; 5669 cmd->CommandHeader.tag = paddr32;
5770 cmd->CommandHeader.Tag.upper = 0;
5771 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5670 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5772 5671
5773 cmd->Request.CDBLen = 16; 5672 cmd->Request.CDBLen = 16;
5774 cmd->Request.Type.Type = TYPE_MSG; 5673 cmd->Request.type_attr_dir =
5775 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 5674 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5776 cmd->Request.Type.Direction = XFER_NONE;
5777 cmd->Request.Timeout = 0; /* Don't time out */ 5675 cmd->Request.Timeout = 0; /* Don't time out */
5778 cmd->Request.CDB[0] = opcode; 5676 cmd->Request.CDB[0] = opcode;
5779 cmd->Request.CDB[1] = type; 5677 cmd->Request.CDB[1] = type;
5780 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5678 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5781 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 5679 cmd->ErrorDescriptor.Addr =
5782 cmd->ErrorDescriptor.Addr.upper = 0; 5680 cpu_to_le64((paddr32 + sizeof(*cmd)));
5783 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 5681 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5784 5682
5785 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 5683 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5786 5684
@@ -5818,7 +5716,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5818#define hpsa_noop(p) hpsa_message(p, 3, 0) 5716#define hpsa_noop(p) hpsa_message(p, 3, 0)
5819 5717
5820static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5718static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5821 void * __iomem vaddr, u32 use_doorbell) 5719 void __iomem *vaddr, u32 use_doorbell)
5822{ 5720{
5823 u16 pmcsr; 5721 u16 pmcsr;
5824 int pos; 5722 int pos;
@@ -6056,7 +5954,7 @@ unmap_vaddr:
6056 * the io functions. 5954 * the io functions.
6057 * This is for debug only. 5955 * This is for debug only.
6058 */ 5956 */
6059static void print_cfg_table(struct device *dev, struct CfgTable *tb) 5957static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6060{ 5958{
6061#ifdef HPSA_DEBUG 5959#ifdef HPSA_DEBUG
6062 int i; 5960 int i;
@@ -6323,11 +6221,11 @@ static void hpsa_find_board_params(struct ctlr_info *h)
6323 h->max_cmd_sg_entries = 31; 6221 h->max_cmd_sg_entries = 31;
6324 if (h->maxsgentries > 512) { 6222 if (h->maxsgentries > 512) {
6325 h->max_cmd_sg_entries = 32; 6223 h->max_cmd_sg_entries = 32;
6326 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 6224 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6327 h->maxsgentries--; /* save one for chain pointer */ 6225 h->maxsgentries--; /* save one for chain pointer */
6328 } else { 6226 } else {
6329 h->maxsgentries = 31; /* default to traditional values */
6330 h->chainsize = 0; 6227 h->chainsize = 0;
6228 h->maxsgentries = 31; /* default to traditional values */
6331 } 6229 }
6332 6230
6333 /* Find out what task management functions are supported and cache */ 6231 /* Find out what task management functions are supported and cache */
@@ -6456,15 +6354,15 @@ static int hpsa_pci_init(struct ctlr_info *h)
6456 return err; 6354 return err;
6457 } 6355 }
6458 6356
6459 /* Enable bus mastering (pci_disable_device may disable this) */
6460 pci_set_master(h->pdev);
6461
6462 err = pci_request_regions(h->pdev, HPSA); 6357 err = pci_request_regions(h->pdev, HPSA);
6463 if (err) { 6358 if (err) {
6464 dev_err(&h->pdev->dev, 6359 dev_err(&h->pdev->dev,
6465 "cannot obtain PCI resources, aborting\n"); 6360 "cannot obtain PCI resources, aborting\n");
6466 return err; 6361 return err;
6467 } 6362 }
6363
6364 pci_set_master(h->pdev);
6365
6468 hpsa_interrupt_mode(h); 6366 hpsa_interrupt_mode(h);
6469 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 6367 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6470 if (err) 6368 if (err)
@@ -6544,7 +6442,9 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
6544 dev_warn(&pdev->dev, "failed to enable device.\n"); 6442 dev_warn(&pdev->dev, "failed to enable device.\n");
6545 return -ENODEV; 6443 return -ENODEV;
6546 } 6444 }
6445
6547 pci_set_master(pdev); 6446 pci_set_master(pdev);
6447
6548 /* Reset the controller with a PCI power-cycle or via doorbell */ 6448 /* Reset the controller with a PCI power-cycle or via doorbell */
6549 rc = hpsa_kdump_hard_reset_controller(pdev); 6449 rc = hpsa_kdump_hard_reset_controller(pdev);
6550 6450
@@ -7431,13 +7331,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7431 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; 7331 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7432 cp->timeout_sec = 0; 7332 cp->timeout_sec = 0;
7433 cp->ReplyQueue = 0; 7333 cp->ReplyQueue = 0;
7434 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | 7334 cp->tag =
7435 DIRECT_LOOKUP_BIT; 7335 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) |
7436 cp->Tag.upper = 0; 7336 DIRECT_LOOKUP_BIT);
7437 cp->host_addr.lower = 7337 cp->host_addr =
7438 (u32) (h->ioaccel_cmd_pool_dhandle + 7338 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7439 (i * sizeof(struct io_accel1_cmd))); 7339 (i * sizeof(struct io_accel1_cmd)));
7440 cp->host_addr.upper = 0;
7441 } 7340 }
7442 } else if (trans_support & CFGTBL_Trans_io_accel2) { 7341 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7443 u64 cfg_offset, cfg_base_addr_index; 7342 u64 cfg_offset, cfg_base_addr_index;
@@ -7711,7 +7610,7 @@ static void __attribute__((unused)) verify_offsets(void)
7711 VERIFY_OFFSET(timeout_sec, 0x62); 7610 VERIFY_OFFSET(timeout_sec, 0x62);
7712 VERIFY_OFFSET(ReplyQueue, 0x64); 7611 VERIFY_OFFSET(ReplyQueue, 0x64);
7713 VERIFY_OFFSET(reserved9, 0x65); 7612 VERIFY_OFFSET(reserved9, 0x65);
7714 VERIFY_OFFSET(Tag, 0x68); 7613 VERIFY_OFFSET(tag, 0x68);
7715 VERIFY_OFFSET(host_addr, 0x70); 7614 VERIFY_OFFSET(host_addr, 0x70);
7716 VERIFY_OFFSET(CISS_LUN, 0x78); 7615 VERIFY_OFFSET(CISS_LUN, 0x78);
7717 VERIFY_OFFSET(SG, 0x78 + 8); 7616 VERIFY_OFFSET(SG, 0x78 + 8);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 24472cec7de3..8e06d9e280ec 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -118,7 +118,7 @@ struct ctlr_info {
118 struct CfgTable __iomem *cfgtable; 118 struct CfgTable __iomem *cfgtable;
119 int interrupts_enabled; 119 int interrupts_enabled;
120 int max_commands; 120 int max_commands;
121 int commands_outstanding; 121 atomic_t commands_outstanding;
122# define PERF_MODE_INT 0 122# define PERF_MODE_INT 0
123# define DOORBELL_INT 1 123# define DOORBELL_INT 1
124# define SIMPLE_MODE_INT 2 124# define SIMPLE_MODE_INT 2
@@ -164,7 +164,7 @@ struct ctlr_info {
164 */ 164 */
165 u32 trans_support; 165 u32 trans_support;
166 u32 trans_offset; 166 u32 trans_offset;
167 struct TransTable_struct *transtable; 167 struct TransTable_struct __iomem *transtable;
168 unsigned long transMethod; 168 unsigned long transMethod;
169 169
170 /* cap concurrent passthrus at some reasonable maximum */ 170 /* cap concurrent passthrus at some reasonable maximum */
@@ -181,7 +181,7 @@ struct ctlr_info {
181 u32 *blockFetchTable; 181 u32 *blockFetchTable;
182 u32 *ioaccel1_blockFetchTable; 182 u32 *ioaccel1_blockFetchTable;
183 u32 *ioaccel2_blockFetchTable; 183 u32 *ioaccel2_blockFetchTable;
184 u32 *ioaccel2_bft2_regs; 184 u32 __iomem *ioaccel2_bft2_regs;
185 unsigned char *hba_inquiry_data; 185 unsigned char *hba_inquiry_data;
186 u32 driver_support; 186 u32 driver_support;
187 u32 fw_support; 187 u32 fw_support;
@@ -192,7 +192,7 @@ struct ctlr_info {
192 u64 last_heartbeat_timestamp; 192 u64 last_heartbeat_timestamp;
193 u32 heartbeat_sample_interval; 193 u32 heartbeat_sample_interval;
194 atomic_t firmware_flash_in_progress; 194 atomic_t firmware_flash_in_progress;
195 u32 *lockup_detected; 195 u32 __percpu *lockup_detected;
196 struct delayed_work monitor_ctlr_work; 196 struct delayed_work monitor_ctlr_work;
197 int remove_in_progress; 197 int remove_in_progress;
198 u32 fifo_recently_full; 198 u32 fifo_recently_full;
@@ -395,7 +395,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
395static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 395static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
396{ 396{
397 struct reply_queue_buffer *rq = &h->reply_queue[q]; 397 struct reply_queue_buffer *rq = &h->reply_queue[q];
398 unsigned long flags, register_value = FIFO_EMPTY; 398 unsigned long register_value = FIFO_EMPTY;
399 399
400 /* msi auto clears the interrupt pending bit. */ 400 /* msi auto clears the interrupt pending bit. */
401 if (!(h->msi_vector || h->msix_vector)) { 401 if (!(h->msi_vector || h->msix_vector)) {
@@ -413,9 +413,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
413 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 413 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
414 register_value = rq->head[rq->current_entry]; 414 register_value = rq->head[rq->current_entry];
415 rq->current_entry++; 415 rq->current_entry++;
416 spin_lock_irqsave(&h->lock, flags); 416 atomic_dec(&h->commands_outstanding);
417 h->commands_outstanding--;
418 spin_unlock_irqrestore(&h->lock, flags);
419 } else { 417 } else {
420 register_value = FIFO_EMPTY; 418 register_value = FIFO_EMPTY;
421 } 419 }
@@ -433,11 +431,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
433 */ 431 */
434static unsigned long SA5_fifo_full(struct ctlr_info *h) 432static unsigned long SA5_fifo_full(struct ctlr_info *h)
435{ 433{
436 if (h->commands_outstanding >= h->max_commands) 434 return atomic_read(&h->commands_outstanding) >= h->max_commands;
437 return 1;
438 else
439 return 0;
440
441} 435}
442/* 436/*
443 * returns value read from hardware. 437 * returns value read from hardware.
@@ -448,13 +442,9 @@ static unsigned long SA5_completed(struct ctlr_info *h,
448{ 442{
449 unsigned long register_value 443 unsigned long register_value
450 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 444 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
451 unsigned long flags;
452 445
453 if (register_value != FIFO_EMPTY) { 446 if (register_value != FIFO_EMPTY)
454 spin_lock_irqsave(&h->lock, flags); 447 atomic_dec(&h->commands_outstanding);
455 h->commands_outstanding--;
456 spin_unlock_irqrestore(&h->lock, flags);
457 }
458 448
459#ifdef HPSA_DEBUG 449#ifdef HPSA_DEBUG
460 if (register_value != FIFO_EMPTY) 450 if (register_value != FIFO_EMPTY)
@@ -510,7 +500,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
510{ 500{
511 u64 register_value; 501 u64 register_value;
512 struct reply_queue_buffer *rq = &h->reply_queue[q]; 502 struct reply_queue_buffer *rq = &h->reply_queue[q];
513 unsigned long flags;
514 503
515 BUG_ON(q >= h->nreply_queues); 504 BUG_ON(q >= h->nreply_queues);
516 505
@@ -528,9 +517,7 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
528 wmb(); 517 wmb();
529 writel((q << 24) | rq->current_entry, h->vaddr + 518 writel((q << 24) | rq->current_entry, h->vaddr +
530 IOACCEL_MODE1_CONSUMER_INDEX); 519 IOACCEL_MODE1_CONSUMER_INDEX);
531 spin_lock_irqsave(&h->lock, flags); 520 atomic_dec(&h->commands_outstanding);
532 h->commands_outstanding--;
533 spin_unlock_irqrestore(&h->lock, flags);
534 } 521 }
535 return (unsigned long) register_value; 522 return (unsigned long) register_value;
536} 523}
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index b5125dc31439..cb988c41cad9 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -252,7 +252,7 @@ struct ReportExtendedLUNdata {
252 u8 LUNListLength[4]; 252 u8 LUNListLength[4];
253 u8 extended_response_flag; 253 u8 extended_response_flag;
254 u8 reserved[3]; 254 u8 reserved[3];
255 struct ext_report_lun_entry LUN[HPSA_MAX_LUN]; 255 struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
256}; 256};
257 257
258struct SenseSubsystem_info { 258struct SenseSubsystem_info {
@@ -314,28 +314,36 @@ struct CommandListHeader {
314 u8 ReplyQueue; 314 u8 ReplyQueue;
315 u8 SGList; 315 u8 SGList;
316 u16 SGTotal; 316 u16 SGTotal;
317 struct vals32 Tag; 317 u64 tag;
318 union LUNAddr LUN; 318 union LUNAddr LUN;
319}; 319};
320 320
321struct RequestBlock { 321struct RequestBlock {
322 u8 CDBLen; 322 u8 CDBLen;
323 struct { 323 /*
324 u8 Type:3; 324 * type_attr_dir:
325 u8 Attribute:3; 325 * type: low 3 bits
326 u8 Direction:2; 326 * attr: middle 3 bits
327 } Type; 327 * dir: high 2 bits
328 */
329 u8 type_attr_dir;
330#define TYPE_ATTR_DIR(t, a, d) ((((d) & 0x03) << 6) |\
331 (((a) & 0x07) << 3) |\
332 ((t) & 0x07))
333#define GET_TYPE(tad) ((tad) & 0x07)
334#define GET_ATTR(tad) (((tad) >> 3) & 0x07)
335#define GET_DIR(tad) (((tad) >> 6) & 0x03)
328 u16 Timeout; 336 u16 Timeout;
329 u8 CDB[16]; 337 u8 CDB[16];
330}; 338};
331 339
332struct ErrDescriptor { 340struct ErrDescriptor {
333 struct vals32 Addr; 341 u64 Addr;
334 u32 Len; 342 u32 Len;
335}; 343};
336 344
337struct SGDescriptor { 345struct SGDescriptor {
338 struct vals32 Addr; 346 u64 Addr;
339 u32 Len; 347 u32 Len;
340 u32 Ext; 348 u32 Ext;
341}; 349};
@@ -434,8 +442,8 @@ struct io_accel1_cmd {
434 u16 timeout_sec; /* 0x62 - 0x63 */ 442 u16 timeout_sec; /* 0x62 - 0x63 */
435 u8 ReplyQueue; /* 0x64 */ 443 u8 ReplyQueue; /* 0x64 */
436 u8 reserved9[3]; /* 0x65 - 0x67 */ 444 u8 reserved9[3]; /* 0x65 - 0x67 */
437 struct vals32 Tag; /* 0x68 - 0x6F */ 445 u64 tag; /* 0x68 - 0x6F */
438 struct vals32 host_addr; /* 0x70 - 0x77 */ 446 u64 host_addr; /* 0x70 - 0x77 */
439 u8 CISS_LUN[8]; /* 0x78 - 0x7F */ 447 u8 CISS_LUN[8]; /* 0x78 - 0x7F */
440 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; 448 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
441} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); 449} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
@@ -555,8 +563,8 @@ struct hpsa_tmf_struct {
555 u8 reserved1; /* byte 3 Reserved */ 563 u8 reserved1; /* byte 3 Reserved */
556 u32 it_nexus; /* SCSI I-T Nexus */ 564 u32 it_nexus; /* SCSI I-T Nexus */
557 u8 lun_id[8]; /* LUN ID for TMF request */ 565 u8 lun_id[8]; /* LUN ID for TMF request */
558 struct vals32 Tag; /* cciss tag associated w/ request */ 566 u64 tag; /* cciss tag associated w/ request */
559 struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */ 567 u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */
560 u64 error_ptr; /* Error Pointer */ 568 u64 error_ptr; /* Error Pointer */
561 u32 error_len; /* Error Length */ 569 u32 error_len; /* Error Length */
562}; 570};
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 151893148abd..e995218476ed 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1118,17 +1118,13 @@ static int hptiop_reset(struct scsi_cmnd *scp)
1118} 1118}
1119 1119
1120static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, 1120static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
1121 int queue_depth, int reason) 1121 int queue_depth)
1122{ 1122{
1123 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; 1123 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
1124 1124
1125 if (reason != SCSI_QDEPTH_DEFAULT)
1126 return -EOPNOTSUPP;
1127
1128 if (queue_depth > hba->max_requests) 1125 if (queue_depth > hba->max_requests)
1129 queue_depth = hba->max_requests; 1126 queue_depth = hba->max_requests;
1130 scsi_adjust_queue_depth(sdev, queue_depth); 1127 return scsi_change_queue_depth(sdev, queue_depth);
1131 return queue_depth;
1132} 1128}
1133 1129
1134static ssize_t hptiop_show_version(struct device *dev, 1130static ssize_t hptiop_show_version(struct device *dev,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 147b80e07b00..f58c6d8e0264 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2900,17 +2900,12 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
2900 * Return value: 2900 * Return value:
2901 * actual depth set 2901 * actual depth set
2902 **/ 2902 **/
2903static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth, 2903static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2904 int reason)
2905{ 2904{
2906 if (reason != SCSI_QDEPTH_DEFAULT)
2907 return -EOPNOTSUPP;
2908
2909 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) 2905 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2910 qdepth = IBMVFC_MAX_CMDS_PER_LUN; 2906 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2911 2907
2912 scsi_adjust_queue_depth(sdev, qdepth); 2908 return scsi_change_queue_depth(sdev, qdepth);
2913 return sdev->queue_depth;
2914} 2909}
2915 2910
2916static ssize_t ibmvfc_show_host_partition_name(struct device *dev, 2911static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
@@ -3103,6 +3098,7 @@ static struct scsi_host_template driver_template = {
3103 .use_clustering = ENABLE_CLUSTERING, 3098 .use_clustering = ENABLE_CLUSTERING,
3104 .shost_attrs = ibmvfc_attrs, 3099 .shost_attrs = ibmvfc_attrs,
3105 .use_blk_tags = 1, 3100 .use_blk_tags = 1,
3101 .track_queue_depth = 1,
3106}; 3102};
3107 3103
3108/** 3104/**
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e8c3cdf0d03b..acea5d6eebd0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1941,17 +1941,11 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1941 * Return value: 1941 * Return value:
1942 * actual depth set 1942 * actual depth set
1943 **/ 1943 **/
1944static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth, 1944static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1945 int reason)
1946{ 1945{
1947 if (reason != SCSI_QDEPTH_DEFAULT)
1948 return -EOPNOTSUPP;
1949
1950 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) 1946 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1951 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; 1947 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1952 1948 return scsi_change_queue_depth(sdev, qdepth);
1953 scsi_adjust_queue_depth(sdev, qdepth);
1954 return sdev->queue_depth;
1955} 1949}
1956 1950
1957/* ------------------------------------------------------------ 1951/* ------------------------------------------------------------
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 256ef98f5c29..540294389355 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3942,8 +3942,9 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3942 return -EIO; 3942 return -EIO;
3943 } 3943 }
3944 3944
3945 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist, 3945 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3946 sglist->num_sg, DMA_TO_DEVICE); 3946 sglist->scatterlist, sglist->num_sg,
3947 DMA_TO_DEVICE);
3947 3948
3948 if (!sglist->num_dma_sg) { 3949 if (!sglist->num_dma_sg) {
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4327,16 +4328,12 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4327 * Return value: 4328 * Return value:
4328 * actual depth set 4329 * actual depth set
4329 **/ 4330 **/
4330static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth, 4331static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4331 int reason)
4332{ 4332{
4333 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4333 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4334 struct ipr_resource_entry *res; 4334 struct ipr_resource_entry *res;
4335 unsigned long lock_flags = 0; 4335 unsigned long lock_flags = 0;
4336 4336
4337 if (reason != SCSI_QDEPTH_DEFAULT)
4338 return -EOPNOTSUPP;
4339
4340 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4337 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4341 res = (struct ipr_resource_entry *)sdev->hostdata; 4338 res = (struct ipr_resource_entry *)sdev->hostdata;
4342 4339
@@ -4344,7 +4341,7 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4344 qdepth = IPR_MAX_CMD_PER_ATA_LUN; 4341 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4346 4343
4347 scsi_adjust_queue_depth(sdev, qdepth); 4344 scsi_change_queue_depth(sdev, qdepth);
4348 return sdev->queue_depth; 4345 return sdev->queue_depth;
4349} 4346}
4350 4347
@@ -4751,7 +4748,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4751 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4752 4749
4753 if (ap) { 4750 if (ap) {
4754 scsi_adjust_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN); 4751 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4755 ata_sas_slave_configure(sdev, ap); 4752 ata_sas_slave_configure(sdev, ap);
4756 } 4753 }
4757 4754
@@ -5571,7 +5568,7 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5571 nseg = scsi_dma_map(scsi_cmd); 5568 nseg = scsi_dma_map(scsi_cmd);
5572 if (nseg < 0) { 5569 if (nseg < 0) {
5573 if (printk_ratelimit()) 5570 if (printk_ratelimit())
5574 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5571 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5575 return -1; 5572 return -1;
5576 } 5573 }
5577 5574
@@ -5622,7 +5619,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5622 5619
5623 nseg = scsi_dma_map(scsi_cmd); 5620 nseg = scsi_dma_map(scsi_cmd);
5624 if (nseg < 0) { 5621 if (nseg < 0) {
5625 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5622 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5626 return -1; 5623 return -1;
5627 } 5624 }
5628 5625
@@ -8392,7 +8389,7 @@ static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8389 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8393 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 8390 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8394 8391
8395 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist, 8392 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8396 sglist->num_sg, DMA_TO_DEVICE); 8393 sglist->num_sg, DMA_TO_DEVICE);
8397 8394
8398 ipr_cmd->job_step = ipr_reset_alert; 8395 ipr_cmd->job_step = ipr_reset_alert;
@@ -8832,7 +8829,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8832 8829
8833 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8830 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8834 if (ioa_cfg->ipr_cmnd_list[i]) 8831 if (ioa_cfg->ipr_cmnd_list[i])
8835 pci_pool_free(ioa_cfg->ipr_cmd_pool, 8832 dma_pool_free(ioa_cfg->ipr_cmd_pool,
8836 ioa_cfg->ipr_cmnd_list[i], 8833 ioa_cfg->ipr_cmnd_list[i],
8837 ioa_cfg->ipr_cmnd_list_dma[i]); 8834 ioa_cfg->ipr_cmnd_list_dma[i]);
8838 8835
@@ -8840,7 +8837,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8840 } 8837 }
8841 8838
8842 if (ioa_cfg->ipr_cmd_pool) 8839 if (ioa_cfg->ipr_cmd_pool)
8843 pci_pool_destroy(ioa_cfg->ipr_cmd_pool); 8840 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8844 8841
8845 kfree(ioa_cfg->ipr_cmnd_list); 8842 kfree(ioa_cfg->ipr_cmnd_list);
8846 kfree(ioa_cfg->ipr_cmnd_list_dma); 8843 kfree(ioa_cfg->ipr_cmnd_list_dma);
@@ -8861,25 +8858,24 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8861 int i; 8858 int i;
8862 8859
8863 kfree(ioa_cfg->res_entries); 8860 kfree(ioa_cfg->res_entries);
8864 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), 8861 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8865 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 8862 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8866 ipr_free_cmd_blks(ioa_cfg); 8863 ipr_free_cmd_blks(ioa_cfg);
8867 8864
8868 for (i = 0; i < ioa_cfg->hrrq_num; i++) 8865 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8869 pci_free_consistent(ioa_cfg->pdev, 8866 dma_free_coherent(&ioa_cfg->pdev->dev,
8870 sizeof(u32) * ioa_cfg->hrrq[i].size, 8867 sizeof(u32) * ioa_cfg->hrrq[i].size,
8871 ioa_cfg->hrrq[i].host_rrq, 8868 ioa_cfg->hrrq[i].host_rrq,
8872 ioa_cfg->hrrq[i].host_rrq_dma); 8869 ioa_cfg->hrrq[i].host_rrq_dma);
8873 8870
8874 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, 8871 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8875 ioa_cfg->u.cfg_table, 8872 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8876 ioa_cfg->cfg_table_dma);
8877 8873
8878 for (i = 0; i < IPR_NUM_HCAMS; i++) { 8874 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8879 pci_free_consistent(ioa_cfg->pdev, 8875 dma_free_coherent(&ioa_cfg->pdev->dev,
8880 sizeof(struct ipr_hostrcb), 8876 sizeof(struct ipr_hostrcb),
8881 ioa_cfg->hostrcb[i], 8877 ioa_cfg->hostrcb[i],
8882 ioa_cfg->hostrcb_dma[i]); 8878 ioa_cfg->hostrcb_dma[i]);
8883 } 8879 }
8884 8880
8885 ipr_free_dump(ioa_cfg); 8881 ipr_free_dump(ioa_cfg);
@@ -8940,7 +8936,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8940 dma_addr_t dma_addr; 8936 dma_addr_t dma_addr;
8941 int i, entries_each_hrrq, hrrq_id = 0; 8937 int i, entries_each_hrrq, hrrq_id = 0;
8942 8938
8943 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev, 8939 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8944 sizeof(struct ipr_cmnd), 512, 0); 8940 sizeof(struct ipr_cmnd), 512, 0);
8945 8941
8946 if (!ioa_cfg->ipr_cmd_pool) 8942 if (!ioa_cfg->ipr_cmd_pool)
@@ -8990,7 +8986,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8990 } 8986 }
8991 8987
8992 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8988 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8993 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 8989 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8994 8990
8995 if (!ipr_cmd) { 8991 if (!ipr_cmd) {
8996 ipr_free_cmd_blks(ioa_cfg); 8992 ipr_free_cmd_blks(ioa_cfg);
@@ -9061,9 +9057,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9061 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 9057 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9062 } 9058 }
9063 9059
9064 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 9060 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9065 sizeof(struct ipr_misc_cbs), 9061 sizeof(struct ipr_misc_cbs),
9066 &ioa_cfg->vpd_cbs_dma); 9062 &ioa_cfg->vpd_cbs_dma,
9063 GFP_KERNEL);
9067 9064
9068 if (!ioa_cfg->vpd_cbs) 9065 if (!ioa_cfg->vpd_cbs)
9069 goto out_free_res_entries; 9066 goto out_free_res_entries;
@@ -9072,13 +9069,14 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9072 goto out_free_vpd_cbs; 9069 goto out_free_vpd_cbs;
9073 9070
9074 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9071 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9075 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev, 9072 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9076 sizeof(u32) * ioa_cfg->hrrq[i].size, 9073 sizeof(u32) * ioa_cfg->hrrq[i].size,
9077 &ioa_cfg->hrrq[i].host_rrq_dma); 9074 &ioa_cfg->hrrq[i].host_rrq_dma,
9075 GFP_KERNEL);
9078 9076
9079 if (!ioa_cfg->hrrq[i].host_rrq) { 9077 if (!ioa_cfg->hrrq[i].host_rrq) {
9080 while (--i > 0) 9078 while (--i > 0)
9081 pci_free_consistent(pdev, 9079 dma_free_coherent(&pdev->dev,
9082 sizeof(u32) * ioa_cfg->hrrq[i].size, 9080 sizeof(u32) * ioa_cfg->hrrq[i].size,
9083 ioa_cfg->hrrq[i].host_rrq, 9081 ioa_cfg->hrrq[i].host_rrq,
9084 ioa_cfg->hrrq[i].host_rrq_dma); 9082 ioa_cfg->hrrq[i].host_rrq_dma);
@@ -9087,17 +9085,19 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9087 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; 9085 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9088 } 9086 }
9089 9087
9090 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 9088 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9091 ioa_cfg->cfg_table_size, 9089 ioa_cfg->cfg_table_size,
9092 &ioa_cfg->cfg_table_dma); 9090 &ioa_cfg->cfg_table_dma,
9091 GFP_KERNEL);
9093 9092
9094 if (!ioa_cfg->u.cfg_table) 9093 if (!ioa_cfg->u.cfg_table)
9095 goto out_free_host_rrq; 9094 goto out_free_host_rrq;
9096 9095
9097 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9096 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9098 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev, 9097 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9099 sizeof(struct ipr_hostrcb), 9098 sizeof(struct ipr_hostrcb),
9100 &ioa_cfg->hostrcb_dma[i]); 9099 &ioa_cfg->hostrcb_dma[i],
9100 GFP_KERNEL);
9101 9101
9102 if (!ioa_cfg->hostrcb[i]) 9102 if (!ioa_cfg->hostrcb[i])
9103 goto out_free_hostrcb_dma; 9103 goto out_free_hostrcb_dma;
@@ -9121,25 +9121,24 @@ out:
9121 9121
9122out_free_hostrcb_dma: 9122out_free_hostrcb_dma:
9123 while (i-- > 0) { 9123 while (i-- > 0) {
9124 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb), 9124 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9125 ioa_cfg->hostrcb[i], 9125 ioa_cfg->hostrcb[i],
9126 ioa_cfg->hostrcb_dma[i]); 9126 ioa_cfg->hostrcb_dma[i]);
9127 } 9127 }
9128 pci_free_consistent(pdev, ioa_cfg->cfg_table_size, 9128 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9129 ioa_cfg->u.cfg_table, 9129 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9130 ioa_cfg->cfg_table_dma);
9131out_free_host_rrq: 9130out_free_host_rrq:
9132 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9131 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9133 pci_free_consistent(pdev, 9132 dma_free_coherent(&pdev->dev,
9134 sizeof(u32) * ioa_cfg->hrrq[i].size, 9133 sizeof(u32) * ioa_cfg->hrrq[i].size,
9135 ioa_cfg->hrrq[i].host_rrq, 9134 ioa_cfg->hrrq[i].host_rrq,
9136 ioa_cfg->hrrq[i].host_rrq_dma); 9135 ioa_cfg->hrrq[i].host_rrq_dma);
9137 } 9136 }
9138out_ipr_free_cmd_blocks: 9137out_ipr_free_cmd_blocks:
9139 ipr_free_cmd_blks(ioa_cfg); 9138 ipr_free_cmd_blks(ioa_cfg);
9140out_free_vpd_cbs: 9139out_free_vpd_cbs:
9141 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs), 9140 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9142 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9141 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9143out_free_res_entries: 9142out_free_res_entries:
9144 kfree(ioa_cfg->res_entries); 9143 kfree(ioa_cfg->res_entries);
9145 goto out; 9144 goto out;
@@ -9579,16 +9578,17 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9579 ipr_init_regs(ioa_cfg); 9578 ipr_init_regs(ioa_cfg);
9580 9579
9581 if (ioa_cfg->sis64) { 9580 if (ioa_cfg->sis64) {
9582 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 9581 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9583 if (rc < 0) { 9582 if (rc < 0) {
9584 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); 9583 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9585 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9584 rc = dma_set_mask_and_coherent(&pdev->dev,
9585 DMA_BIT_MASK(32));
9586 } 9586 }
9587 } else 9587 } else
9588 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9588 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9589 9589
9590 if (rc < 0) { 9590 if (rc < 0) {
9591 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 9591 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9592 goto cleanup_nomem; 9592 goto cleanup_nomem;
9593 } 9593 }
9594 9594
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index d0201ceb4aac..9ebdebd944e7 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1549,7 +1549,7 @@ struct ipr_ioa_cfg {
1549 struct ipr_misc_cbs *vpd_cbs; 1549 struct ipr_misc_cbs *vpd_cbs;
1550 dma_addr_t vpd_cbs_dma; 1550 dma_addr_t vpd_cbs_dma;
1551 1551
1552 struct pci_pool *ipr_cmd_pool; 1552 struct dma_pool *ipr_cmd_pool;
1553 1553
1554 struct ipr_cmnd *reset_cmd; 1554 struct ipr_cmnd *reset_cmd;
1555 int (*reset) (struct ipr_cmnd *); 1555 int (*reset) (struct ipr_cmnd *);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 454741a8da45..e5c28435d768 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1210,7 +1210,7 @@ ips_slave_configure(struct scsi_device * SDptr)
1210 min = ha->max_cmds / 2; 1210 min = ha->max_cmds / 2;
1211 if (ha->enq->ucLogDriveCount <= 2) 1211 if (ha->enq->ucLogDriveCount <= 2)
1212 min = ha->max_cmds - 1; 1212 min = ha->max_cmds - 1;
1213 scsi_adjust_queue_depth(SDptr, min); 1213 scsi_change_queue_depth(SDptr, min);
1214 } 1214 }
1215 1215
1216 SDptr->skip_ms_page_8 = 1; 1216 SDptr->skip_ms_page_8 = 1;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 897562056018..724c6265b667 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -173,6 +173,7 @@ static struct scsi_host_template isci_sht = {
173 .ioctl = sas_ioctl, 173 .ioctl = sas_ioctl,
174 .shost_attrs = isci_host_attrs, 174 .shost_attrs = isci_host_attrs,
175 .use_blk_tags = 1, 175 .use_blk_tags = 1,
176 .track_queue_depth = 1,
176}; 177};
177 178
178static struct sas_domain_function_template isci_transport_ops = { 179static struct sas_domain_function_template isci_transport_ops = {
@@ -259,8 +260,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
259 sas_ha->sas_port = sas_ports; 260 sas_ha->sas_port = sas_ports;
260 sas_ha->num_phys = SCI_MAX_PHYS; 261 sas_ha->num_phys = SCI_MAX_PHYS;
261 262
262 sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
263 sas_ha->lldd_max_execute_num = 1;
264 sas_ha->strict_wide_ports = 1; 263 sas_ha->strict_wide_ports = 1;
265 264
266 sas_register_ha(sas_ha); 265 sas_register_ha(sas_ha);
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 5d6fda72d659..3f63c6318b0d 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -117,104 +117,97 @@ static inline int isci_device_io_ready(struct isci_remote_device *idev,
117 * functions. This function is called by libsas to send a task down to 117 * functions. This function is called by libsas to send a task down to
118 * hardware. 118 * hardware.
119 * @task: This parameter specifies the SAS task to send. 119 * @task: This parameter specifies the SAS task to send.
120 * @num: This parameter specifies the number of tasks to queue.
121 * @gfp_flags: This parameter specifies the context of this call. 120 * @gfp_flags: This parameter specifies the context of this call.
122 * 121 *
123 * status, zero indicates success. 122 * status, zero indicates success.
124 */ 123 */
125int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) 124int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
126{ 125{
127 struct isci_host *ihost = dev_to_ihost(task->dev); 126 struct isci_host *ihost = dev_to_ihost(task->dev);
128 struct isci_remote_device *idev; 127 struct isci_remote_device *idev;
129 unsigned long flags; 128 unsigned long flags;
129 enum sci_status status = SCI_FAILURE;
130 bool io_ready; 130 bool io_ready;
131 u16 tag; 131 u16 tag;
132 132
133 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); 133 spin_lock_irqsave(&ihost->scic_lock, flags);
134 idev = isci_lookup_device(task->dev);
135 io_ready = isci_device_io_ready(idev, task);
136 tag = isci_alloc_tag(ihost);
137 spin_unlock_irqrestore(&ihost->scic_lock, flags);
134 138
135 for_each_sas_task(num, task) { 139 dev_dbg(&ihost->pdev->dev,
136 enum sci_status status = SCI_FAILURE; 140 "task: %p, dev: %p idev: %p:%#lx cmd = %p\n",
141 task, task->dev, idev, idev ? idev->flags : 0,
142 task->uldd_task);
137 143
138 spin_lock_irqsave(&ihost->scic_lock, flags); 144 if (!idev) {
139 idev = isci_lookup_device(task->dev); 145 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
140 io_ready = isci_device_io_ready(idev, task); 146 SAS_DEVICE_UNKNOWN);
141 tag = isci_alloc_tag(ihost); 147 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
142 spin_unlock_irqrestore(&ihost->scic_lock, flags); 148 /* Indicate QUEUE_FULL so that the scsi midlayer
149 * retries.
150 */
151 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
152 SAS_QUEUE_FULL);
153 } else {
154 /* There is a device and it's ready for I/O. */
155 spin_lock_irqsave(&task->task_state_lock, flags);
143 156
144 dev_dbg(&ihost->pdev->dev, 157 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
145 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n", 158 /* The I/O was aborted. */
146 task, num, task->dev, idev, idev ? idev->flags : 0, 159 spin_unlock_irqrestore(&task->task_state_lock, flags);
147 task->uldd_task); 160
148 161 isci_task_refuse(ihost, task,
149 if (!idev) { 162 SAS_TASK_UNDELIVERED,
150 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, 163 SAM_STAT_TASK_ABORTED);
151 SAS_DEVICE_UNKNOWN);
152 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
153 /* Indicate QUEUE_FULL so that the scsi midlayer
154 * retries.
155 */
156 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
157 SAS_QUEUE_FULL);
158 } else { 164 } else {
159 /* There is a device and it's ready for I/O. */ 165 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
160 spin_lock_irqsave(&task->task_state_lock, flags); 166 spin_unlock_irqrestore(&task->task_state_lock, flags);
161 167
162 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 168 /* build and send the request. */
163 /* The I/O was aborted. */ 169 status = isci_request_execute(ihost, idev, task, tag);
164 spin_unlock_irqrestore(&task->task_state_lock, 170
165 flags); 171 if (status != SCI_SUCCESS) {
166 172 spin_lock_irqsave(&task->task_state_lock, flags);
167 isci_task_refuse(ihost, task, 173 /* Did not really start this command. */
168 SAS_TASK_UNDELIVERED, 174 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
169 SAM_STAT_TASK_ABORTED);
170 } else {
171 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
172 spin_unlock_irqrestore(&task->task_state_lock, flags); 175 spin_unlock_irqrestore(&task->task_state_lock, flags);
173 176
174 /* build and send the request. */ 177 if (test_bit(IDEV_GONE, &idev->flags)) {
175 status = isci_request_execute(ihost, idev, task, tag); 178 /* Indicate that the device
176 179 * is gone.
177 if (status != SCI_SUCCESS) { 180 */
178 181 isci_task_refuse(ihost, task,
179 spin_lock_irqsave(&task->task_state_lock, flags); 182 SAS_TASK_UNDELIVERED,
180 /* Did not really start this command. */ 183 SAS_DEVICE_UNKNOWN);
181 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 184 } else {
182 spin_unlock_irqrestore(&task->task_state_lock, flags); 185 /* Indicate QUEUE_FULL so that
183 186 * the scsi midlayer retries.
184 if (test_bit(IDEV_GONE, &idev->flags)) { 187 * If the request failed for
185 188 * remote device reasons, it
186 /* Indicate that the device 189 * gets returned as
187 * is gone. 190 * SAS_TASK_UNDELIVERED next
188 */ 191 * time through.
189 isci_task_refuse(ihost, task, 192 */
190 SAS_TASK_UNDELIVERED, 193 isci_task_refuse(ihost, task,
191 SAS_DEVICE_UNKNOWN); 194 SAS_TASK_COMPLETE,
192 } else { 195 SAS_QUEUE_FULL);
193 /* Indicate QUEUE_FULL so that
194 * the scsi midlayer retries.
195 * If the request failed for
196 * remote device reasons, it
197 * gets returned as
198 * SAS_TASK_UNDELIVERED next
199 * time through.
200 */
201 isci_task_refuse(ihost, task,
202 SAS_TASK_COMPLETE,
203 SAS_QUEUE_FULL);
204 }
205 } 196 }
206 } 197 }
207 } 198 }
208 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
209 spin_lock_irqsave(&ihost->scic_lock, flags);
210 /* command never hit the device, so just free
211 * the tci and skip the sequence increment
212 */
213 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
214 spin_unlock_irqrestore(&ihost->scic_lock, flags);
215 }
216 isci_put_device(idev);
217 } 199 }
200
201 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
202 spin_lock_irqsave(&ihost->scic_lock, flags);
203 /* command never hit the device, so just free
204 * the tci and skip the sequence increment
205 */
206 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
207 spin_unlock_irqrestore(&ihost->scic_lock, flags);
208 }
209
210 isci_put_device(idev);
218 return 0; 211 return 0;
219} 212}
220 213
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 9c06cbad1d26..8f4531f22ac2 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -131,7 +131,6 @@ static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
131 131
132int isci_task_execute_task( 132int isci_task_execute_task(
133 struct sas_task *task, 133 struct sas_task *task,
134 int num,
135 gfp_t gfp_flags); 134 gfp_t gfp_flags);
136 135
137int isci_task_abort_task( 136int isci_task_abort_task(
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 427af0f24b0f..0b8af186e707 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -952,7 +952,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
952 .module = THIS_MODULE, 952 .module = THIS_MODULE,
953 .name = "iSCSI Initiator over TCP/IP", 953 .name = "iSCSI Initiator over TCP/IP",
954 .queuecommand = iscsi_queuecommand, 954 .queuecommand = iscsi_queuecommand,
955 .change_queue_depth = iscsi_change_queue_depth, 955 .change_queue_depth = scsi_change_queue_depth,
956 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, 956 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
957 .sg_tablesize = 4096, 957 .sg_tablesize = 4096,
958 .max_sectors = 0xFFFF, 958 .max_sectors = 0xFFFF,
@@ -966,6 +966,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
966 .target_alloc = iscsi_target_alloc, 966 .target_alloc = iscsi_target_alloc,
967 .proc_name = "iscsi_tcp", 967 .proc_name = "iscsi_tcp",
968 .this_id = -1, 968 .this_id = -1,
969 .track_queue_depth = 1,
969}; 970};
970 971
971static struct iscsi_transport iscsi_sw_tcp_transport = { 972static struct iscsi_transport iscsi_sw_tcp_transport = {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index bf954ee050f8..c6795941b45d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2160,37 +2160,12 @@ int fc_slave_alloc(struct scsi_device *sdev)
2160 if (!rport || fc_remote_port_chkready(rport)) 2160 if (!rport || fc_remote_port_chkready(rport))
2161 return -ENXIO; 2161 return -ENXIO;
2162 2162
2163 scsi_adjust_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH); 2163 scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
2164 return 0; 2164 return 0;
2165} 2165}
2166EXPORT_SYMBOL(fc_slave_alloc); 2166EXPORT_SYMBOL(fc_slave_alloc);
2167 2167
2168/** 2168/**
2169 * fc_change_queue_depth() - Change a device's queue depth
2170 * @sdev: The SCSI device whose queue depth is to change
2171 * @qdepth: The new queue depth
2172 * @reason: The resason for the change
2173 */
2174int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2175{
2176 switch (reason) {
2177 case SCSI_QDEPTH_DEFAULT:
2178 scsi_adjust_queue_depth(sdev, qdepth);
2179 break;
2180 case SCSI_QDEPTH_QFULL:
2181 scsi_track_queue_full(sdev, qdepth);
2182 break;
2183 case SCSI_QDEPTH_RAMP_UP:
2184 scsi_adjust_queue_depth(sdev, qdepth);
2185 break;
2186 default:
2187 return -EOPNOTSUPP;
2188 }
2189 return sdev->queue_depth;
2190}
2191EXPORT_SYMBOL(fc_change_queue_depth);
2192
2193/**
2194 * fc_fcp_destory() - Tear down the FCP layer for a given local port 2169 * fc_fcp_destory() - Tear down the FCP layer for a given local port
2195 * @lport: The local port that no longer needs the FCP layer 2170 * @lport: The local port that no longer needs the FCP layer
2196 */ 2171 */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d521624dedfb..8053f24f0349 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1771,25 +1771,6 @@ fault:
1771} 1771}
1772EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1772EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1773 1773
1774int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
1775{
1776 switch (reason) {
1777 case SCSI_QDEPTH_DEFAULT:
1778 scsi_adjust_queue_depth(sdev, depth);
1779 break;
1780 case SCSI_QDEPTH_QFULL:
1781 scsi_track_queue_full(sdev, depth);
1782 break;
1783 case SCSI_QDEPTH_RAMP_UP:
1784 scsi_adjust_queue_depth(sdev, depth);
1785 break;
1786 default:
1787 return -EOPNOTSUPP;
1788 }
1789 return sdev->queue_depth;
1790}
1791EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1792
1793int iscsi_target_alloc(struct scsi_target *starget) 1774int iscsi_target_alloc(struct scsi_target *starget)
1794{ 1775{
1795 struct iscsi_cls_session *cls_session = starget_to_session(starget); 1776 struct iscsi_cls_session *cls_session = starget_to_session(starget);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 766098af4eb7..577770fdee86 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -171,7 +171,6 @@ static void sas_ata_task_done(struct sas_task *task)
171 spin_unlock_irqrestore(ap->lock, flags); 171 spin_unlock_irqrestore(ap->lock, flags);
172 172
173qc_already_gone: 173qc_already_gone:
174 list_del_init(&task->list);
175 sas_free_task(task); 174 sas_free_task(task);
176} 175}
177 176
@@ -244,12 +243,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
244 if (qc->scsicmd) 243 if (qc->scsicmd)
245 ASSIGN_SAS_TASK(qc->scsicmd, task); 244 ASSIGN_SAS_TASK(qc->scsicmd, task);
246 245
247 if (sas_ha->lldd_max_execute_num < 2) 246 ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
248 ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
249 else
250 ret = sas_queue_up(task);
251
252 /* Examine */
253 if (ret) { 247 if (ret) {
254 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret); 248 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
255 249
@@ -485,7 +479,6 @@ static void sas_ata_internal_abort(struct sas_task *task)
485 479
486 return; 480 return;
487 out: 481 out:
488 list_del_init(&task->list);
489 sas_free_task(task); 482 sas_free_task(task);
490} 483}
491 484
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 0cac7d8fd0f7..022bb6e10d98 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -96,7 +96,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
96 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; 96 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
97 add_timer(&task->slow_task->timer); 97 add_timer(&task->slow_task->timer);
98 98
99 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); 99 res = i->dft->lldd_execute_task(task, GFP_KERNEL);
100 100
101 if (res) { 101 if (res) {
102 del_timer(&task->slow_task->timer); 102 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index dbc8a793fd86..362da44f2948 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -45,7 +45,6 @@ struct sas_task *sas_alloc_task(gfp_t flags)
45 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); 45 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
46 46
47 if (task) { 47 if (task) {
48 INIT_LIST_HEAD(&task->list);
49 spin_lock_init(&task->task_state_lock); 48 spin_lock_init(&task->task_state_lock);
50 task->task_state_flags = SAS_TASK_STATE_PENDING; 49 task->task_state_flags = SAS_TASK_STATE_PENDING;
51 } 50 }
@@ -77,7 +76,6 @@ EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
77void sas_free_task(struct sas_task *task) 76void sas_free_task(struct sas_task *task)
78{ 77{
79 if (task) { 78 if (task) {
80 BUG_ON(!list_empty(&task->list));
81 kfree(task->slow_task); 79 kfree(task->slow_task);
82 kmem_cache_free(sas_task_cache, task); 80 kmem_cache_free(sas_task_cache, task);
83 } 81 }
@@ -127,11 +125,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
127 spin_lock_init(&sas_ha->phy_port_lock); 125 spin_lock_init(&sas_ha->phy_port_lock);
128 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); 126 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
129 127
130 if (sas_ha->lldd_queue_size == 0)
131 sas_ha->lldd_queue_size = 1;
132 else if (sas_ha->lldd_queue_size == -1)
133 sas_ha->lldd_queue_size = 128; /* Sanity */
134
135 set_bit(SAS_HA_REGISTERED, &sas_ha->state); 128 set_bit(SAS_HA_REGISTERED, &sas_ha->state);
136 spin_lock_init(&sas_ha->lock); 129 spin_lock_init(&sas_ha->lock);
137 mutex_init(&sas_ha->drain_mutex); 130 mutex_init(&sas_ha->drain_mutex);
@@ -157,15 +150,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
157 goto Undo_ports; 150 goto Undo_ports;
158 } 151 }
159 152
160 if (sas_ha->lldd_max_execute_num > 1) {
161 error = sas_init_queue(sas_ha);
162 if (error) {
163 printk(KERN_NOTICE "couldn't start queue thread:%d, "
164 "running in direct mode\n", error);
165 sas_ha->lldd_max_execute_num = 1;
166 }
167 }
168
169 INIT_LIST_HEAD(&sas_ha->eh_done_q); 153 INIT_LIST_HEAD(&sas_ha->eh_done_q);
170 INIT_LIST_HEAD(&sas_ha->eh_ata_q); 154 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
171 155
@@ -201,11 +185,6 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
201 __sas_drain_work(sas_ha); 185 __sas_drain_work(sas_ha);
202 mutex_unlock(&sas_ha->drain_mutex); 186 mutex_unlock(&sas_ha->drain_mutex);
203 187
204 if (sas_ha->lldd_max_execute_num > 1) {
205 sas_shutdown_queue(sas_ha);
206 sas_ha->lldd_max_execute_num = 1;
207 }
208
209 return 0; 188 return 0;
210} 189}
211 190
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 7e7ba83f0a21..9cf0bc260b0e 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -66,9 +66,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
66 66
67enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 67enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
68 68
69int sas_init_queue(struct sas_ha_struct *sas_ha);
70int sas_init_events(struct sas_ha_struct *sas_ha); 69int sas_init_events(struct sas_ha_struct *sas_ha);
71void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
72void sas_disable_revalidation(struct sas_ha_struct *ha); 70void sas_disable_revalidation(struct sas_ha_struct *ha);
73void sas_enable_revalidation(struct sas_ha_struct *ha); 71void sas_enable_revalidation(struct sas_ha_struct *ha);
74void __sas_drain_work(struct sas_ha_struct *ha); 72void __sas_drain_work(struct sas_ha_struct *ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 89e8b687a679..72918d227ead 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -112,7 +112,6 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
112 112
113 sc->result = (hs << 16) | stat; 113 sc->result = (hs << 16) | stat;
114 ASSIGN_SAS_TASK(sc, NULL); 114 ASSIGN_SAS_TASK(sc, NULL);
115 list_del_init(&task->list);
116 sas_free_task(task); 115 sas_free_task(task);
117} 116}
118 117
@@ -138,7 +137,6 @@ static void sas_scsi_task_done(struct sas_task *task)
138 137
139 if (unlikely(!sc)) { 138 if (unlikely(!sc)) {
140 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 139 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
141 list_del_init(&task->list);
142 sas_free_task(task); 140 sas_free_task(task);
143 return; 141 return;
144 } 142 }
@@ -179,31 +177,10 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
179 return task; 177 return task;
180} 178}
181 179
182int sas_queue_up(struct sas_task *task)
183{
184 struct sas_ha_struct *sas_ha = task->dev->port->ha;
185 struct scsi_core *core = &sas_ha->core;
186 unsigned long flags;
187 LIST_HEAD(list);
188
189 spin_lock_irqsave(&core->task_queue_lock, flags);
190 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
191 spin_unlock_irqrestore(&core->task_queue_lock, flags);
192 return -SAS_QUEUE_FULL;
193 }
194 list_add_tail(&task->list, &core->task_queue);
195 core->task_queue_size += 1;
196 spin_unlock_irqrestore(&core->task_queue_lock, flags);
197 wake_up_process(core->queue_thread);
198
199 return 0;
200}
201
202int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 180int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
203{ 181{
204 struct sas_internal *i = to_sas_internal(host->transportt); 182 struct sas_internal *i = to_sas_internal(host->transportt);
205 struct domain_device *dev = cmd_to_domain_dev(cmd); 183 struct domain_device *dev = cmd_to_domain_dev(cmd);
206 struct sas_ha_struct *sas_ha = dev->port->ha;
207 struct sas_task *task; 184 struct sas_task *task;
208 int res = 0; 185 int res = 0;
209 186
@@ -224,12 +201,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
224 if (!task) 201 if (!task)
225 return SCSI_MLQUEUE_HOST_BUSY; 202 return SCSI_MLQUEUE_HOST_BUSY;
226 203
227 /* Queue up, Direct Mode or Task Collector Mode. */ 204 res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
228 if (sas_ha->lldd_max_execute_num < 2)
229 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
230 else
231 res = sas_queue_up(task);
232
233 if (res) 205 if (res)
234 goto out_free_task; 206 goto out_free_task;
235 return 0; 207 return 0;
@@ -323,37 +295,17 @@ enum task_disposition {
323 TASK_IS_DONE, 295 TASK_IS_DONE,
324 TASK_IS_ABORTED, 296 TASK_IS_ABORTED,
325 TASK_IS_AT_LU, 297 TASK_IS_AT_LU,
326 TASK_IS_NOT_AT_HA,
327 TASK_IS_NOT_AT_LU, 298 TASK_IS_NOT_AT_LU,
328 TASK_ABORT_FAILED, 299 TASK_ABORT_FAILED,
329}; 300};
330 301
331static enum task_disposition sas_scsi_find_task(struct sas_task *task) 302static enum task_disposition sas_scsi_find_task(struct sas_task *task)
332{ 303{
333 struct sas_ha_struct *ha = task->dev->port->ha;
334 unsigned long flags; 304 unsigned long flags;
335 int i, res; 305 int i, res;
336 struct sas_internal *si = 306 struct sas_internal *si =
337 to_sas_internal(task->dev->port->ha->core.shost->transportt); 307 to_sas_internal(task->dev->port->ha->core.shost->transportt);
338 308
339 if (ha->lldd_max_execute_num > 1) {
340 struct scsi_core *core = &ha->core;
341 struct sas_task *t, *n;
342
343 mutex_lock(&core->task_queue_flush);
344 spin_lock_irqsave(&core->task_queue_lock, flags);
345 list_for_each_entry_safe(t, n, &core->task_queue, list)
346 if (task == t) {
347 list_del_init(&t->list);
348 break;
349 }
350 spin_unlock_irqrestore(&core->task_queue_lock, flags);
351 mutex_unlock(&core->task_queue_flush);
352
353 if (task == t)
354 return TASK_IS_NOT_AT_HA;
355 }
356
357 for (i = 0; i < 5; i++) { 309 for (i = 0; i < 5; i++) {
358 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); 310 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
359 res = si->dft->lldd_abort_task(task); 311 res = si->dft->lldd_abort_task(task);
@@ -667,14 +619,6 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
667 cmd->eh_eflags = 0; 619 cmd->eh_eflags = 0;
668 620
669 switch (res) { 621 switch (res) {
670 case TASK_IS_NOT_AT_HA:
671 SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
672 __func__, task,
673 cmd->retries ? "retry" : "aborted");
674 if (cmd->retries)
675 cmd->retries--;
676 sas_eh_finish_cmd(cmd);
677 continue;
678 case TASK_IS_DONE: 622 case TASK_IS_DONE:
679 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 623 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
680 task); 624 task);
@@ -836,9 +780,6 @@ retry:
836 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 780 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
837 781
838out: 782out:
839 if (ha->lldd_max_execute_num > 1)
840 wake_up_process(ha->core.queue_thread);
841
842 sas_eh_handle_resets(shost); 783 sas_eh_handle_resets(shost);
843 784
844 /* now link into libata eh --- if we have any ata devices */ 785 /* now link into libata eh --- if we have any ata devices */
@@ -940,12 +881,12 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
940 sas_read_port_mode_page(scsi_dev); 881 sas_read_port_mode_page(scsi_dev);
941 882
942 if (scsi_dev->tagged_supported) { 883 if (scsi_dev->tagged_supported) {
943 scsi_adjust_queue_depth(scsi_dev, SAS_DEF_QD); 884 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
944 } else { 885 } else {
945 SAS_DPRINTK("device %llx, LUN %llx doesn't support " 886 SAS_DPRINTK("device %llx, LUN %llx doesn't support "
946 "TCQ\n", SAS_ADDR(dev->sas_addr), 887 "TCQ\n", SAS_ADDR(dev->sas_addr),
947 scsi_dev->lun); 888 scsi_dev->lun);
948 scsi_adjust_queue_depth(scsi_dev, 1); 889 scsi_change_queue_depth(scsi_dev, 1);
949 } 890 }
950 891
951 scsi_dev->allow_restart = 1; 892 scsi_dev->allow_restart = 1;
@@ -953,29 +894,16 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
953 return 0; 894 return 0;
954} 895}
955 896
956int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason) 897int sas_change_queue_depth(struct scsi_device *sdev, int depth)
957{ 898{
958 struct domain_device *dev = sdev_to_domain_dev(sdev); 899 struct domain_device *dev = sdev_to_domain_dev(sdev);
959 900
960 if (dev_is_sata(dev)) 901 if (dev_is_sata(dev))
961 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth, 902 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
962 reason);
963
964 switch (reason) {
965 case SCSI_QDEPTH_DEFAULT:
966 case SCSI_QDEPTH_RAMP_UP:
967 if (!sdev->tagged_supported)
968 depth = 1;
969 scsi_adjust_queue_depth(sdev, depth);
970 break;
971 case SCSI_QDEPTH_QFULL:
972 scsi_track_queue_full(sdev, depth);
973 break;
974 default:
975 return -EOPNOTSUPP;
976 }
977 903
978 return depth; 904 if (!sdev->tagged_supported)
905 depth = 1;
906 return scsi_change_queue_depth(sdev, depth);
979} 907}
980 908
981int sas_change_queue_type(struct scsi_device *scsi_dev, int type) 909int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
@@ -997,121 +925,6 @@ int sas_bios_param(struct scsi_device *scsi_dev,
997 return 0; 925 return 0;
998} 926}
999 927
1000/* ---------- Task Collector Thread implementation ---------- */
1001
1002static void sas_queue(struct sas_ha_struct *sas_ha)
1003{
1004 struct scsi_core *core = &sas_ha->core;
1005 unsigned long flags;
1006 LIST_HEAD(q);
1007 int can_queue;
1008 int res;
1009 struct sas_internal *i = to_sas_internal(core->shost->transportt);
1010
1011 mutex_lock(&core->task_queue_flush);
1012 spin_lock_irqsave(&core->task_queue_lock, flags);
1013 while (!kthread_should_stop() &&
1014 !list_empty(&core->task_queue) &&
1015 !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
1016
1017 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
1018 if (can_queue >= 0) {
1019 can_queue = core->task_queue_size;
1020 list_splice_init(&core->task_queue, &q);
1021 } else {
1022 struct list_head *a, *n;
1023
1024 can_queue = sas_ha->lldd_queue_size;
1025 list_for_each_safe(a, n, &core->task_queue) {
1026 list_move_tail(a, &q);
1027 if (--can_queue == 0)
1028 break;
1029 }
1030 can_queue = sas_ha->lldd_queue_size;
1031 }
1032 core->task_queue_size -= can_queue;
1033 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1034 {
1035 struct sas_task *task = list_entry(q.next,
1036 struct sas_task,
1037 list);
1038 list_del_init(&q);
1039 res = i->dft->lldd_execute_task(task, can_queue,
1040 GFP_KERNEL);
1041 if (unlikely(res))
1042 __list_add(&q, task->list.prev, &task->list);
1043 }
1044 spin_lock_irqsave(&core->task_queue_lock, flags);
1045 if (res) {
1046 list_splice_init(&q, &core->task_queue); /*at head*/
1047 core->task_queue_size += can_queue;
1048 }
1049 }
1050 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1051 mutex_unlock(&core->task_queue_flush);
1052}
1053
1054/**
1055 * sas_queue_thread -- The Task Collector thread
1056 * @_sas_ha: pointer to struct sas_ha
1057 */
1058static int sas_queue_thread(void *_sas_ha)
1059{
1060 struct sas_ha_struct *sas_ha = _sas_ha;
1061
1062 while (1) {
1063 set_current_state(TASK_INTERRUPTIBLE);
1064 schedule();
1065 sas_queue(sas_ha);
1066 if (kthread_should_stop())
1067 break;
1068 }
1069
1070 return 0;
1071}
1072
1073int sas_init_queue(struct sas_ha_struct *sas_ha)
1074{
1075 struct scsi_core *core = &sas_ha->core;
1076
1077 spin_lock_init(&core->task_queue_lock);
1078 mutex_init(&core->task_queue_flush);
1079 core->task_queue_size = 0;
1080 INIT_LIST_HEAD(&core->task_queue);
1081
1082 core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
1083 "sas_queue_%d", core->shost->host_no);
1084 if (IS_ERR(core->queue_thread))
1085 return PTR_ERR(core->queue_thread);
1086 return 0;
1087}
1088
1089void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
1090{
1091 unsigned long flags;
1092 struct scsi_core *core = &sas_ha->core;
1093 struct sas_task *task, *n;
1094
1095 kthread_stop(core->queue_thread);
1096
1097 if (!list_empty(&core->task_queue))
1098 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
1099 SAS_ADDR(sas_ha->sas_addr));
1100
1101 spin_lock_irqsave(&core->task_queue_lock, flags);
1102 list_for_each_entry_safe(task, n, &core->task_queue, list) {
1103 struct scsi_cmnd *cmd = task->uldd_task;
1104
1105 list_del_init(&task->list);
1106
1107 ASSIGN_SAS_TASK(cmd, NULL);
1108 sas_free_task(task);
1109 cmd->result = DID_ABORT << 16;
1110 cmd->scsi_done(cmd);
1111 }
1112 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1113}
1114
1115/* 928/*
1116 * Tell an upper layer that it needs to initiate an abort for a given task. 929 * Tell an upper layer that it needs to initiate an abort for a given task.
1117 * This should only ever be called by an LLDD. 930 * This should only ever be called by an LLDD.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 8533ee9b818d..fd85952b621d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -243,108 +243,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
243} 243}
244 244
245/** 245/**
246 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
247 * @phba: Pointer to HBA context object.
248 * @vport: Pointer to vport object.
249 * @ndlp: Pointer to FC node associated with the target.
250 * @lun: Lun number of the scsi device.
251 * @old_val: Old value of the queue depth.
252 * @new_val: New value of the queue depth.
253 *
254 * This function sends an event to the mgmt application indicating
255 * there is a change in the scsi device queue depth.
256 **/
257static void
258lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
259 struct lpfc_vport *vport,
260 struct lpfc_nodelist *ndlp,
261 uint64_t lun,
262 uint32_t old_val,
263 uint32_t new_val)
264{
265 struct lpfc_fast_path_event *fast_path_evt;
266 unsigned long flags;
267
268 fast_path_evt = lpfc_alloc_fast_evt(phba);
269 if (!fast_path_evt)
270 return;
271
272 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
273 FC_REG_SCSI_EVENT;
274 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
275 LPFC_EVENT_VARQUEDEPTH;
276
277 /* Report all luns with change in queue depth */
278 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
279 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
280 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
281 &ndlp->nlp_portname, sizeof(struct lpfc_name));
282 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
283 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
284 }
285
286 fast_path_evt->un.queue_depth_evt.oldval = old_val;
287 fast_path_evt->un.queue_depth_evt.newval = new_val;
288 fast_path_evt->vport = vport;
289
290 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
291 spin_lock_irqsave(&phba->hbalock, flags);
292 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
293 spin_unlock_irqrestore(&phba->hbalock, flags);
294 lpfc_worker_wake_up(phba);
295
296 return;
297}
298
299/**
300 * lpfc_change_queue_depth - Alter scsi device queue depth
301 * @sdev: Pointer the scsi device on which to change the queue depth.
302 * @qdepth: New queue depth to set the sdev to.
303 * @reason: The reason for the queue depth change.
304 *
305 * This function is called by the midlayer and the LLD to alter the queue
306 * depth for a scsi device. This function sets the queue depth to the new
307 * value and sends an event out to log the queue depth change.
308 **/
309static int
310lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
311{
312 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
313 struct lpfc_hba *phba = vport->phba;
314 struct lpfc_rport_data *rdata;
315 unsigned long new_queue_depth, old_queue_depth;
316
317 old_queue_depth = sdev->queue_depth;
318
319 switch (reason) {
320 case SCSI_QDEPTH_DEFAULT:
321 /* change request from sysfs, fall through */
322 case SCSI_QDEPTH_RAMP_UP:
323 scsi_adjust_queue_depth(sdev, qdepth);
324 break;
325 case SCSI_QDEPTH_QFULL:
326 if (scsi_track_queue_full(sdev, qdepth) == 0)
327 return sdev->queue_depth;
328
329 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
330 "0711 detected queue full - lun queue "
331 "depth adjusted to %d.\n", sdev->queue_depth);
332 break;
333 default:
334 return -EOPNOTSUPP;
335 }
336
337 new_queue_depth = sdev->queue_depth;
338 rdata = lpfc_rport_data_from_scsi_device(sdev);
339 if (rdata)
340 lpfc_send_sdev_queuedepth_change_event(phba, vport,
341 rdata->pnode, sdev->lun,
342 old_queue_depth,
343 new_queue_depth);
344 return sdev->queue_depth;
345}
346
347/**
348 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 246 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
349 * @phba: The Hba for which this call is being executed. 247 * @phba: The Hba for which this call is being executed.
350 * 248 *
@@ -429,8 +327,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
429 else 327 else
430 new_queue_depth = sdev->queue_depth - 328 new_queue_depth = sdev->queue_depth -
431 new_queue_depth; 329 new_queue_depth;
432 lpfc_change_queue_depth(sdev, new_queue_depth, 330 scsi_change_queue_depth(sdev, new_queue_depth);
433 SCSI_QDEPTH_DEFAULT);
434 } 331 }
435 } 332 }
436 lpfc_destroy_vport_work_array(phba, vports); 333 lpfc_destroy_vport_work_array(phba, vports);
@@ -5598,7 +5495,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
5598 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 5495 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5599 struct lpfc_hba *phba = vport->phba; 5496 struct lpfc_hba *phba = vport->phba;
5600 5497
5601 scsi_adjust_queue_depth(sdev, vport->cfg_lun_queue_depth); 5498 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5602 5499
5603 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5500 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5604 lpfc_sli_handle_fast_ring_event(phba, 5501 lpfc_sli_handle_fast_ring_event(phba,
@@ -5981,9 +5878,10 @@ struct scsi_host_template lpfc_template = {
5981 .shost_attrs = lpfc_hba_attrs, 5878 .shost_attrs = lpfc_hba_attrs,
5982 .max_sectors = 0xFFFF, 5879 .max_sectors = 0xFFFF,
5983 .vendor_id = LPFC_NL_VENDOR_ID, 5880 .vendor_id = LPFC_NL_VENDOR_ID,
5984 .change_queue_depth = lpfc_change_queue_depth, 5881 .change_queue_depth = scsi_change_queue_depth,
5985 .change_queue_type = scsi_change_queue_type, 5882 .change_queue_type = scsi_change_queue_type,
5986 .use_blk_tags = 1, 5883 .use_blk_tags = 1,
5884 .track_queue_depth = 1,
5987}; 5885};
5988 5886
5989struct scsi_host_template lpfc_vport_template = { 5887struct scsi_host_template lpfc_vport_template = {
@@ -6005,7 +5903,8 @@ struct scsi_host_template lpfc_vport_template = {
6005 .use_clustering = ENABLE_CLUSTERING, 5903 .use_clustering = ENABLE_CLUSTERING,
6006 .shost_attrs = lpfc_vport_attrs, 5904 .shost_attrs = lpfc_vport_attrs,
6007 .max_sectors = 0xFFFF, 5905 .max_sectors = 0xFFFF,
6008 .change_queue_depth = lpfc_change_queue_depth, 5906 .change_queue_depth = scsi_change_queue_depth,
6009 .change_queue_type = scsi_change_queue_type, 5907 .change_queue_type = scsi_change_queue_type,
6010 .use_blk_tags = 1, 5908 .use_blk_tags = 1,
5909 .track_queue_depth = 1,
6011}; 5910};
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 6a039eb1cbce..953fd9b953c7 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -9,69 +9,62 @@
9 * Generic Generic NCR5380 driver 9 * Generic Generic NCR5380 driver
10 * 10 *
11 * Copyright 1995, Russell King 11 * Copyright 1995, Russell King
12 *
13 * ALPHA RELEASE 1.
14 *
15 * For more information, please consult
16 *
17 * NCR 5380 Family
18 * SCSI Protocol Controller
19 * Databook
20 *
21 * NCR Microelectronics
22 * 1635 Aeroplaza Drive
23 * Colorado Springs, CO 80916
24 * 1+ (719) 578-3400
25 * 1+ (800) 334-5454
26 */ 12 */
27 13
28#include <linux/types.h> 14#include <linux/types.h>
29#include <linux/stddef.h>
30#include <linux/ctype.h>
31#include <linux/delay.h> 15#include <linux/delay.h>
32
33#include <linux/module.h> 16#include <linux/module.h>
34#include <linux/signal.h>
35#include <linux/ioport.h> 17#include <linux/ioport.h>
36#include <linux/init.h> 18#include <linux/init.h>
37#include <linux/blkdev.h> 19#include <linux/blkdev.h>
38#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
39 22
23#include <asm/hwtest.h>
40#include <asm/io.h> 24#include <asm/io.h>
41#include <asm/irq.h>
42
43#include <asm/macintosh.h>
44#include <asm/macints.h> 25#include <asm/macints.h>
45#include <asm/mac_via.h> 26#include <asm/setup.h>
46 27
47#include "scsi.h"
48#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
49#include "mac_scsi.h"
50 29
51/* These control the behaviour of the generic 5380 core */ 30/* Definitions for the core NCR5380 driver. */
52#define AUTOSENSE 31
53#define PSEUDO_DMA 32#define PSEUDO_DMA
54 33
55#include "NCR5380.h" 34#define NCR5380_implementation_fields unsigned char *pdma_base
35#define NCR5380_local_declare() struct Scsi_Host *_instance
36#define NCR5380_setup(instance) _instance = instance
56 37
57#define RESET_BOOT 38#define NCR5380_read(reg) macscsi_read(_instance, reg)
58#define DRIVER_SETUP 39#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value)
59 40
60extern void via_scsi_clear(void); 41#define NCR5380_pread macscsi_pread
42#define NCR5380_pwrite macscsi_pwrite
61 43
62#ifdef RESET_BOOT 44#define NCR5380_intr macscsi_intr
63static void mac_scsi_reset_boot(struct Scsi_Host *instance); 45#define NCR5380_queue_command macscsi_queue_command
64#endif 46#define NCR5380_abort macscsi_abort
47#define NCR5380_bus_reset macscsi_bus_reset
48#define NCR5380_info macscsi_info
49#define NCR5380_show_info macscsi_show_info
50#define NCR5380_write_info macscsi_write_info
51
52#include "NCR5380.h"
53
54#define RESET_BOOT
65 55
66static int setup_called = 0;
67static int setup_can_queue = -1; 56static int setup_can_queue = -1;
57module_param(setup_can_queue, int, 0);
68static int setup_cmd_per_lun = -1; 58static int setup_cmd_per_lun = -1;
59module_param(setup_cmd_per_lun, int, 0);
69static int setup_sg_tablesize = -1; 60static int setup_sg_tablesize = -1;
61module_param(setup_sg_tablesize, int, 0);
70static int setup_use_pdma = -1; 62static int setup_use_pdma = -1;
71#ifdef SUPPORT_TAGS 63module_param(setup_use_pdma, int, 0);
72static int setup_use_tagged_queuing = -1; 64static int setup_use_tagged_queuing = -1;
73#endif 65module_param(setup_use_tagged_queuing, int, 0);
74static int setup_hostid = -1; 66static int setup_hostid = -1;
67module_param(setup_hostid, int, 0);
75 68
76/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, 69/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms,
77 * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more 70 * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more
@@ -84,232 +77,48 @@ static int setup_hostid = -1;
84#define AFTER_RESET_DELAY (HZ/2) 77#define AFTER_RESET_DELAY (HZ/2)
85#endif 78#endif
86 79
87static volatile unsigned char *mac_scsi_regp = NULL;
88static volatile unsigned char *mac_scsi_drq = NULL;
89static volatile unsigned char *mac_scsi_nodrq = NULL;
90
91
92/* 80/*
93 * NCR 5380 register access functions 81 * NCR 5380 register access functions
94 */ 82 */
95 83
96#if 0 84static inline char macscsi_read(struct Scsi_Host *instance, int reg)
97/* Debug versions */
98#define CTRL(p,v) (*ctrl = (v))
99
100static char macscsi_read(struct Scsi_Host *instance, int reg)
101{ 85{
102 int iobase = instance->io_port; 86 return in_8(instance->base + (reg << 4));
103 int i;
104 int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
105
106 CTRL(iobase, 0);
107 i = in_8(iobase + (reg<<4));
108 CTRL(iobase, 0x40);
109
110 return i;
111} 87}
112 88
113static void macscsi_write(struct Scsi_Host *instance, int reg, int value) 89static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
114{
115 int iobase = instance->io_port;
116 int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
117
118 CTRL(iobase, 0);
119 out_8(iobase + (reg<<4), value);
120 CTRL(iobase, 0x40);
121}
122#else
123
124/* Fast versions */
125static __inline__ char macscsi_read(struct Scsi_Host *instance, int reg)
126{ 90{
127 return in_8(instance->io_port + (reg<<4)); 91 out_8(instance->base + (reg << 4), value);
128} 92}
129 93
130static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value) 94#ifndef MODULE
95static int __init mac_scsi_setup(char *str)
131{ 96{
132 out_8(instance->io_port + (reg<<4), value); 97 int ints[7];
133}
134#endif
135
136 98
137/* 99 (void)get_options(str, ARRAY_SIZE(ints), ints);
138 * Function : mac_scsi_setup(char *str)
139 *
140 * Purpose : booter command line initialization of the overrides array,
141 *
142 * Inputs : str - comma delimited list of options
143 *
144 */
145 100
146static int __init mac_scsi_setup(char *str) { 101 if (ints[0] < 1 || ints[0] > 6) {
147#ifdef DRIVER_SETUP 102 pr_err("Usage: mac5380=<can_queue>[,<cmd_per_lun>[,<sg_tablesize>[,<hostid>[,<use_tags>[,<use_pdma>]]]]]\n");
148 int ints[7]; 103 return 0;
149
150 (void)get_options( str, ARRAY_SIZE(ints), ints);
151
152 if (setup_called++ || ints[0] < 1 || ints[0] > 6) {
153 printk(KERN_WARNING "scsi: <mac5380>"
154 " Usage: mac5380=<can_queue>[,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>,<use_pdma>]\n");
155 printk(KERN_ALERT "scsi: <mac5380> Bad Penguin parameters?\n");
156 return 0;
157 }
158
159 if (ints[0] >= 1) {
160 if (ints[1] > 0)
161 /* no limits on this, just > 0 */
162 setup_can_queue = ints[1];
163 }
164 if (ints[0] >= 2) {
165 if (ints[2] > 0)
166 setup_cmd_per_lun = ints[2];
167 }
168 if (ints[0] >= 3) {
169 if (ints[3] >= 0) {
170 setup_sg_tablesize = ints[3];
171 /* Must be <= SG_ALL (255) */
172 if (setup_sg_tablesize > SG_ALL)
173 setup_sg_tablesize = SG_ALL;
174 }
175 }
176 if (ints[0] >= 4) {
177 /* Must be between 0 and 7 */
178 if (ints[4] >= 0 && ints[4] <= 7)
179 setup_hostid = ints[4];
180 else if (ints[4] > 7)
181 printk(KERN_WARNING "mac_scsi_setup: invalid host ID %d !\n", ints[4] );
182 }
183#ifdef SUPPORT_TAGS
184 if (ints[0] >= 5) {
185 if (ints[5] >= 0)
186 setup_use_tagged_queuing = !!ints[5];
187 } 104 }
188 105 if (ints[0] >= 1)
189 if (ints[0] == 6) { 106 setup_can_queue = ints[1];
190 if (ints[6] >= 0) 107 if (ints[0] >= 2)
108 setup_cmd_per_lun = ints[2];
109 if (ints[0] >= 3)
110 setup_sg_tablesize = ints[3];
111 if (ints[0] >= 4)
112 setup_hostid = ints[4];
113 if (ints[0] >= 5)
114 setup_use_tagged_queuing = ints[5];
115 if (ints[0] >= 6)
191 setup_use_pdma = ints[6]; 116 setup_use_pdma = ints[6];
192 }
193#else
194 if (ints[0] == 5) {
195 if (ints[5] >= 0)
196 setup_use_pdma = ints[5];
197 }
198#endif /* SUPPORT_TAGS */
199
200#endif /* DRIVER_SETUP */
201 return 1; 117 return 1;
202} 118}
203 119
204__setup("mac5380=", mac_scsi_setup); 120__setup("mac5380=", mac_scsi_setup);
205 121#endif /* !MODULE */
206/*
207 * Function : int macscsi_detect(struct scsi_host_template * tpnt)
208 *
209 * Purpose : initializes mac NCR5380 driver based on the
210 * command line / compile time port and irq definitions.
211 *
212 * Inputs : tpnt - template for this SCSI adapter.
213 *
214 * Returns : 1 if a host adapter was found, 0 if not.
215 *
216 */
217
218int __init macscsi_detect(struct scsi_host_template * tpnt)
219{
220 static int called = 0;
221 int flags = 0;
222 struct Scsi_Host *instance;
223
224 if (!MACH_IS_MAC || called)
225 return( 0 );
226
227 if (macintosh_config->scsi_type != MAC_SCSI_OLD)
228 return( 0 );
229
230 /* setup variables */
231 tpnt->can_queue =
232 (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
233 tpnt->cmd_per_lun =
234 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
235 tpnt->sg_tablesize =
236 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
237
238 if (setup_hostid >= 0)
239 tpnt->this_id = setup_hostid;
240 else {
241 /* use 7 as default */
242 tpnt->this_id = 7;
243 }
244
245#ifdef SUPPORT_TAGS
246 if (setup_use_tagged_queuing < 0)
247 setup_use_tagged_queuing = USE_TAGGED_QUEUING;
248#endif
249
250 /* Once we support multiple 5380s (e.g. DuoDock) we'll do
251 something different here */
252 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
253 if (instance == NULL)
254 return 0;
255
256 if (macintosh_config->ident == MAC_MODEL_IIFX) {
257 mac_scsi_regp = via1+0x8000;
258 mac_scsi_drq = via1+0xE000;
259 mac_scsi_nodrq = via1+0xC000;
260 /* The IIFX should be able to do true DMA, but pseudo-dma doesn't work */
261 flags = FLAG_NO_PSEUDO_DMA;
262 } else {
263 mac_scsi_regp = via1+0x10000;
264 mac_scsi_drq = via1+0x6000;
265 mac_scsi_nodrq = via1+0x12000;
266 }
267
268 if (! setup_use_pdma)
269 flags = FLAG_NO_PSEUDO_DMA;
270
271 instance->io_port = (unsigned long) mac_scsi_regp;
272 instance->irq = IRQ_MAC_SCSI;
273
274#ifdef RESET_BOOT
275 mac_scsi_reset_boot(instance);
276#endif
277
278 NCR5380_init(instance, flags);
279
280 instance->n_io_port = 255;
281
282 ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
283
284 if (instance->irq != SCSI_IRQ_NONE)
285 if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
286 printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
287 instance->host_no, instance->irq);
288 instance->irq = SCSI_IRQ_NONE;
289 }
290
291 printk(KERN_INFO "scsi%d: generic 5380 at port %lX irq", instance->host_no, instance->io_port);
292 if (instance->irq == SCSI_IRQ_NONE)
293 printk (KERN_INFO "s disabled");
294 else
295 printk (KERN_INFO " %d", instance->irq);
296 printk(KERN_INFO " options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
297 instance->can_queue, instance->cmd_per_lun, MACSCSI_PUBLIC_RELEASE);
298 printk(KERN_INFO "\nscsi%d:", instance->host_no);
299 NCR5380_print_options(instance);
300 printk("\n");
301 called = 1;
302 return 1;
303}
304
305int macscsi_release (struct Scsi_Host *shpnt)
306{
307 if (shpnt->irq != SCSI_IRQ_NONE)
308 free_irq(shpnt->irq, shpnt);
309 NCR5380_exit(shpnt);
310
311 return 0;
312}
313 122
314#ifdef RESET_BOOT 123#ifdef RESET_BOOT
315/* 124/*
@@ -349,10 +158,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
349} 158}
350#endif 159#endif
351 160
352const char * macscsi_info (struct Scsi_Host *spnt) { 161#ifdef PSEUDO_DMA
353 return "";
354}
355
356/* 162/*
357 Pseudo-DMA: (Ove Edlund) 163 Pseudo-DMA: (Ove Edlund)
358 The code attempts to catch bus errors that occur if one for example 164 The code attempts to catch bus errors that occur if one for example
@@ -422,38 +228,39 @@ __asm__ __volatile__ \
422 : "0"(s), "1"(d), "2"(len) \ 228 : "0"(s), "1"(d), "2"(len) \
423 : "d0") 229 : "d0")
424 230
425 231static int macscsi_pread(struct Scsi_Host *instance,
426static int macscsi_pread (struct Scsi_Host *instance, 232 unsigned char *dst, int len)
427 unsigned char *dst, int len)
428{ 233{
429 unsigned char *d; 234 struct NCR5380_hostdata *hostdata = shost_priv(instance);
430 volatile unsigned char *s; 235 unsigned char *d;
431 236 unsigned char *s;
432 NCR5380_local_declare(); 237
433 NCR5380_setup(instance); 238 NCR5380_local_declare();
434 239 NCR5380_setup(instance);
435 s = mac_scsi_drq+0x60; 240
436 d = dst; 241 s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
437 242 d = dst;
438/* These conditions are derived from MacOS */ 243
439 244 /* These conditions are derived from MacOS */
440 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) 245
441 && !(NCR5380_read(STATUS_REG) & SR_REQ)) 246 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
442 ; 247 !(NCR5380_read(STATUS_REG) & SR_REQ))
443 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) 248 ;
444 && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { 249
445 printk(KERN_ERR "Error in macscsi_pread\n"); 250 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
446 return -1; 251 (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
447 } 252 pr_err("Error in macscsi_pread\n");
448 253 return -1;
449 CP_IO_TO_MEM(s, d, len); 254 }
450 255
451 if (len != 0) { 256 CP_IO_TO_MEM(s, d, len);
452 printk(KERN_NOTICE "Bus error in macscsi_pread\n"); 257
453 return -1; 258 if (len != 0) {
454 } 259 pr_notice("Bus error in macscsi_pread\n");
455 260 return -1;
456 return 0; 261 }
262
263 return 0;
457} 264}
458 265
459 266
@@ -515,59 +322,172 @@ __asm__ __volatile__ \
515 : "0"(s), "1"(d), "2"(len) \ 322 : "0"(s), "1"(d), "2"(len) \
516 : "d0") 323 : "d0")
517 324
518static int macscsi_pwrite (struct Scsi_Host *instance, 325static int macscsi_pwrite(struct Scsi_Host *instance,
519 unsigned char *src, int len) 326 unsigned char *src, int len)
520{ 327{
521 unsigned char *s; 328 struct NCR5380_hostdata *hostdata = shost_priv(instance);
522 volatile unsigned char *d; 329 unsigned char *s;
523 330 unsigned char *d;
524 NCR5380_local_declare(); 331
525 NCR5380_setup(instance); 332 NCR5380_local_declare();
526 333 NCR5380_setup(instance);
527 s = src; 334
528 d = mac_scsi_drq; 335 s = src;
529 336 d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
530/* These conditions are derived from MacOS */ 337
531 338 /* These conditions are derived from MacOS */
532 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) 339
533 && (!(NCR5380_read(STATUS_REG) & SR_REQ) 340 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
534 || (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) 341 (!(NCR5380_read(STATUS_REG) & SR_REQ) ||
535 ; 342 (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)))
536 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { 343 ;
537 printk(KERN_ERR "Error in macscsi_pwrite\n"); 344
538 return -1; 345 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) {
539 } 346 pr_err("Error in macscsi_pwrite\n");
540 347 return -1;
541 CP_MEM_TO_IO(s, d, len); 348 }
542 349
543 if (len != 0) { 350 CP_MEM_TO_IO(s, d, len);
544 printk(KERN_NOTICE "Bus error in macscsi_pwrite\n"); 351
545 return -1; 352 if (len != 0) {
546 } 353 pr_notice("Bus error in macscsi_pwrite\n");
547 354 return -1;
548 return 0; 355 }
549}
550 356
357 return 0;
358}
359#endif
551 360
552#include "NCR5380.c" 361#include "NCR5380.c"
553 362
554static struct scsi_host_template driver_template = { 363#define DRV_MODULE_NAME "mac_scsi"
555 .proc_name = "Mac5380", 364#define PFX DRV_MODULE_NAME ": "
365
366static struct scsi_host_template mac_scsi_template = {
367 .module = THIS_MODULE,
368 .proc_name = DRV_MODULE_NAME,
556 .show_info = macscsi_show_info, 369 .show_info = macscsi_show_info,
557 .write_info = macscsi_write_info, 370 .write_info = macscsi_write_info,
558 .name = "Macintosh NCR5380 SCSI", 371 .name = "Macintosh NCR5380 SCSI",
559 .detect = macscsi_detect,
560 .release = macscsi_release,
561 .info = macscsi_info, 372 .info = macscsi_info,
562 .queuecommand = macscsi_queue_command, 373 .queuecommand = macscsi_queue_command,
563 .eh_abort_handler = macscsi_abort, 374 .eh_abort_handler = macscsi_abort,
564 .eh_bus_reset_handler = macscsi_bus_reset, 375 .eh_bus_reset_handler = macscsi_bus_reset,
565 .can_queue = CAN_QUEUE, 376 .can_queue = 16,
566 .this_id = 7, 377 .this_id = 7,
567 .sg_tablesize = SG_ALL, 378 .sg_tablesize = SG_ALL,
568 .cmd_per_lun = CMD_PER_LUN, 379 .cmd_per_lun = 2,
569 .use_clustering = DISABLE_CLUSTERING 380 .use_clustering = DISABLE_CLUSTERING
570}; 381};
571 382
383static int __init mac_scsi_probe(struct platform_device *pdev)
384{
385 struct Scsi_Host *instance;
386 int error;
387 int host_flags = 0;
388 struct resource *irq, *pio_mem, *pdma_mem = NULL;
389
390 pio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
391 if (!pio_mem)
392 return -ENODEV;
393
394#ifdef PSEUDO_DMA
395 pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
396#endif
397
398 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
399
400 if (!hwreg_present((unsigned char *)pio_mem->start +
401 (STATUS_REG << 4))) {
402 pr_info(PFX "no device detected at %pap\n", &pio_mem->start);
403 return -ENODEV;
404 }
405
406 if (setup_can_queue > 0)
407 mac_scsi_template.can_queue = setup_can_queue;
408 if (setup_cmd_per_lun > 0)
409 mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
410 if (setup_sg_tablesize >= 0)
411 mac_scsi_template.sg_tablesize = setup_sg_tablesize;
412 if (setup_hostid >= 0)
413 mac_scsi_template.this_id = setup_hostid & 7;
414 if (setup_use_pdma < 0)
415 setup_use_pdma = 0;
416
417 instance = scsi_host_alloc(&mac_scsi_template,
418 sizeof(struct NCR5380_hostdata));
419 if (!instance)
420 return -ENOMEM;
421
422 instance->base = pio_mem->start;
423 if (irq)
424 instance->irq = irq->start;
425 else
426 instance->irq = NO_IRQ;
427
428 if (pdma_mem && setup_use_pdma) {
429 struct NCR5380_hostdata *hostdata = shost_priv(instance);
430
431 hostdata->pdma_base = (unsigned char *)pdma_mem->start;
432 } else
433 host_flags |= FLAG_NO_PSEUDO_DMA;
434
435#ifdef RESET_BOOT
436 mac_scsi_reset_boot(instance);
437#endif
438
439#ifdef SUPPORT_TAGS
440 host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
441#endif
442
443 NCR5380_init(instance, host_flags);
444
445 if (instance->irq != NO_IRQ) {
446 error = request_irq(instance->irq, macscsi_intr, IRQF_SHARED,
447 "NCR5380", instance);
448 if (error)
449 goto fail_irq;
450 }
451
452 error = scsi_add_host(instance, NULL);
453 if (error)
454 goto fail_host;
455
456 platform_set_drvdata(pdev, instance);
457
458 scsi_scan_host(instance);
459 return 0;
460
461fail_host:
462 if (instance->irq != NO_IRQ)
463 free_irq(instance->irq, instance);
464fail_irq:
465 NCR5380_exit(instance);
466 scsi_host_put(instance);
467 return error;
468}
469
470static int __exit mac_scsi_remove(struct platform_device *pdev)
471{
472 struct Scsi_Host *instance = platform_get_drvdata(pdev);
473
474 scsi_remove_host(instance);
475 if (instance->irq != NO_IRQ)
476 free_irq(instance->irq, instance);
477 NCR5380_exit(instance);
478 scsi_host_put(instance);
479 return 0;
480}
481
482static struct platform_driver mac_scsi_driver = {
483 .remove = __exit_p(mac_scsi_remove),
484 .driver = {
485 .name = DRV_MODULE_NAME,
486 .owner = THIS_MODULE,
487 },
488};
489
490module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe);
572 491
573#include "scsi_module.c" 492MODULE_ALIAS("platform:" DRV_MODULE_NAME);
493MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h
deleted file mode 100644
index 06969b06e54b..000000000000
--- a/drivers/scsi/mac_scsi.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Cumana Generic NCR5380 driver defines
3 *
4 * Copyright 1993, Drew Eckhardt
5 * Visionary Computing
6 * (Unix and Linux consulting and custom programming)
7 * drew@colorado.edu
8 * +1 (303) 440-4894
9 *
10 * ALPHA RELEASE 1.
11 *
12 * For more information, please consult
13 *
14 * NCR 5380 Family
15 * SCSI Protocol Controller
16 * Databook
17 *
18 * NCR Microelectronics
19 * 1635 Aeroplaza Drive
20 * Colorado Springs, CO 80916
21 * 1+ (719) 578-3400
22 * 1+ (800) 334-5454
23 */
24
25#ifndef MAC_NCR5380_H
26#define MAC_NCR5380_H
27
28#define MACSCSI_PUBLIC_RELEASE 2
29
30#ifndef ASM
31
32#ifndef CMD_PER_LUN
33#define CMD_PER_LUN 2
34#endif
35
36#ifndef CAN_QUEUE
37#define CAN_QUEUE 16
38#endif
39
40#ifndef SG_TABLESIZE
41#define SG_TABLESIZE SG_NONE
42#endif
43
44#ifndef USE_TAGGED_QUEUING
45#define USE_TAGGED_QUEUING 0
46#endif
47
48#include <scsi/scsicam.h>
49
50#define NCR5380_implementation_fields \
51 int port, ctrl
52
53#define NCR5380_local_declare() \
54 struct Scsi_Host *_instance
55
56#define NCR5380_setup(instance) \
57 _instance = instance
58
59#define NCR5380_read(reg) macscsi_read(_instance, reg)
60#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value)
61
62#define NCR5380_pread macscsi_pread
63#define NCR5380_pwrite macscsi_pwrite
64
65#define NCR5380_intr macscsi_intr
66#define NCR5380_queue_command macscsi_queue_command
67#define NCR5380_abort macscsi_abort
68#define NCR5380_bus_reset macscsi_bus_reset
69#define NCR5380_show_info macscsi_show_info
70#define NCR5380_write_info macscsi_write_info
71
72#endif /* ndef ASM */
73#endif /* MAC_NCR5380_H */
74
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 6b077d839f2b..f0987f22ea70 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -332,27 +332,6 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
332 NULL, 332 NULL,
333}; 333};
334 334
335/**
336 * megaraid_change_queue_depth - Change the device's queue depth
337 * @sdev: scsi device struct
338 * @qdepth: depth to set
339 * @reason: calling context
340 *
341 * Return value:
342 * actual depth set
343 */
344static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
345 int reason)
346{
347 if (reason != SCSI_QDEPTH_DEFAULT)
348 return -EOPNOTSUPP;
349
350 if (qdepth > MBOX_MAX_SCSI_CMDS)
351 qdepth = MBOX_MAX_SCSI_CMDS;
352 scsi_adjust_queue_depth(sdev, qdepth);
353 return sdev->queue_depth;
354}
355
356/* 335/*
357 * Scsi host template for megaraid unified driver 336 * Scsi host template for megaraid unified driver
358 */ 337 */
@@ -365,7 +344,7 @@ static struct scsi_host_template megaraid_template_g = {
365 .eh_device_reset_handler = megaraid_reset_handler, 344 .eh_device_reset_handler = megaraid_reset_handler,
366 .eh_bus_reset_handler = megaraid_reset_handler, 345 .eh_bus_reset_handler = megaraid_reset_handler,
367 .eh_host_reset_handler = megaraid_reset_handler, 346 .eh_host_reset_handler = megaraid_reset_handler,
368 .change_queue_depth = megaraid_change_queue_depth, 347 .change_queue_depth = scsi_change_queue_depth,
369 .use_clustering = ENABLE_CLUSTERING, 348 .use_clustering = ENABLE_CLUSTERING,
370 .no_write_same = 1, 349 .no_write_same = 1,
371 .sdev_attrs = megaraid_sdev_attrs, 350 .sdev_attrs = megaraid_sdev_attrs,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a49914de4b95..0d44d91c2fce 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2003-2012 LSI Corporation. 4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,17 +15,18 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas.h 20 * FILE: megaraid_sas.h
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Kashyap Desai <kashyap.desai@avagotech.com>
24 * Sumit Saxena <sumit.saxena@avagotech.com>
23 * 25 *
24 * Send feedback to: <megaraidlinux@lsi.com> 26 * Send feedback to: megaraidlinux.pdl@avagotech.com
25 * 27 *
26 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 28 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
27 * ATTN: Linuxraid 29 * San Jose, California 95131
28 */ 30 */
29 31
30#ifndef LSI_MEGARAID_SAS_H 32#ifndef LSI_MEGARAID_SAS_H
@@ -33,9 +35,7 @@
33/* 35/*
34 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
35 */ 37 */
36#define MEGASAS_VERSION "06.805.06.00-rc1" 38#define MEGASAS_VERSION "06.805.06.01-rc1"
37#define MEGASAS_RELDATE "Sep. 4, 2014"
38#define MEGASAS_EXT_VERSION "Thu. Sep. 4 17:00:00 PDT 2014"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -1931,8 +1931,7 @@ u16 get_updated_dev_handle(struct megasas_instance *instance,
1931 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); 1931 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
1932void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, 1932void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
1933 struct LD_LOAD_BALANCE_INFO *lbInfo); 1933 struct LD_LOAD_BALANCE_INFO *lbInfo);
1934int megasas_get_ctrl_info(struct megasas_instance *instance, 1934int megasas_get_ctrl_info(struct megasas_instance *instance);
1935 struct megasas_ctrl_info *ctrl_info);
1936int megasas_set_crash_dump_params(struct megasas_instance *instance, 1935int megasas_set_crash_dump_params(struct megasas_instance *instance,
1937 u8 crash_buf_state); 1936 u8 crash_buf_state);
1938void megasas_free_host_crash_buffer(struct megasas_instance *instance); 1937void megasas_free_host_crash_buffer(struct megasas_instance *instance);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 107244cebd22..f05580e693d0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2003-2012 LSI Corporation. 4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,22 +15,20 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * Authors: Avago Technologies
21 * Version : 06.805.06.00-rc1
22 *
23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 21 * Sreenivas Bagalkote
25 * Sumant Patro 22 * Sumant Patro
26 * Bo Yang 23 * Bo Yang
27 * Adam Radford <linuxraid@lsi.com> 24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
28 * 27 *
29 * Send feedback to: <megaraidlinux@lsi.com> 28 * Send feedback to: megaraidlinux.pdl@avagotech.com
30 * 29 *
31 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
32 * ATTN: Linuxraid 31 * San Jose, California 95131
33 */ 32 */
34 33
35#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -1008,7 +1007,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1008 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1007 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1009 1008
1010 cmd->sync_cmd = 1; 1009 cmd->sync_cmd = 1;
1011 cmd->cmd_status = 0xFF; 1010 cmd->cmd_status = ENODATA;
1012 1011
1013 instance->instancet->issue_dcmd(instance, cmd); 1012 instance->instancet->issue_dcmd(instance, cmd);
1014 1013
@@ -1572,6 +1571,12 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1572 instance = (struct megasas_instance *) 1571 instance = (struct megasas_instance *)
1573 scmd->device->host->hostdata; 1572 scmd->device->host->hostdata;
1574 1573
1574 if (instance->unload == 1) {
1575 scmd->result = DID_NO_CONNECT << 16;
1576 scmd->scsi_done(scmd);
1577 return 0;
1578 }
1579
1575 if (instance->issuepend_done == 0) 1580 if (instance->issuepend_done == 0)
1576 return SCSI_MLQUEUE_HOST_BUSY; 1581 return SCSI_MLQUEUE_HOST_BUSY;
1577 1582
@@ -2586,19 +2591,6 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2586 } 2591 }
2587} 2592}
2588 2593
2589static int megasas_change_queue_depth(struct scsi_device *sdev,
2590 int queue_depth, int reason)
2591{
2592 if (reason != SCSI_QDEPTH_DEFAULT)
2593 return -EOPNOTSUPP;
2594
2595 if (queue_depth > sdev->host->can_queue)
2596 queue_depth = sdev->host->can_queue;
2597 scsi_adjust_queue_depth(sdev, queue_depth);
2598
2599 return queue_depth;
2600}
2601
2602static ssize_t 2594static ssize_t
2603megasas_fw_crash_buffer_store(struct device *cdev, 2595megasas_fw_crash_buffer_store(struct device *cdev,
2604 struct device_attribute *attr, const char *buf, size_t count) 2596 struct device_attribute *attr, const char *buf, size_t count)
@@ -2763,7 +2755,7 @@ static struct scsi_host_template megasas_template = {
2763 .shost_attrs = megaraid_host_attrs, 2755 .shost_attrs = megaraid_host_attrs,
2764 .bios_param = megasas_bios_param, 2756 .bios_param = megasas_bios_param,
2765 .use_clustering = ENABLE_CLUSTERING, 2757 .use_clustering = ENABLE_CLUSTERING,
2766 .change_queue_depth = megasas_change_queue_depth, 2758 .change_queue_depth = scsi_change_queue_depth,
2767 .no_write_same = 1, 2759 .no_write_same = 1,
2768}; 2760};
2769 2761
@@ -4027,25 +4019,83 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4027 return ret; 4019 return ret;
4028} 4020}
4029 4021
4022/*
4023 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4024 * instance : Controller's instance
4025*/
4026static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4027{
4028 struct fusion_context *fusion;
4029 u32 old_map_sz;
4030 u32 new_map_sz;
4031
4032 fusion = instance->ctrl_context;
4033 /* For MFI based controllers return dummy success */
4034 if (!fusion)
4035 return;
4036
4037 instance->supportmax256vd =
4038 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4039 /* Below is additional check to address future FW enhancement */
4040 if (instance->ctrl_info->max_lds > 64)
4041 instance->supportmax256vd = 1;
4042
4043 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4044 * MEGASAS_MAX_DEV_PER_CHANNEL;
4045 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4046 * MEGASAS_MAX_DEV_PER_CHANNEL;
4047 if (instance->supportmax256vd) {
4048 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4049 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4050 } else {
4051 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4052 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4053 }
4054 dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n",
4055 instance->fw_supported_vd_count,
4056 instance->fw_supported_pd_count);
4057 dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n",
4058 instance->drv_supported_vd_count,
4059 instance->drv_supported_pd_count);
4060
4061 old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4062 (sizeof(struct MR_LD_SPAN_MAP) *
4063 (instance->fw_supported_vd_count - 1));
4064 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4065 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
4066 (sizeof(struct MR_LD_SPAN_MAP) *
4067 (instance->drv_supported_vd_count - 1));
4068
4069 fusion->max_map_sz = max(old_map_sz, new_map_sz);
4070
4071
4072 if (instance->supportmax256vd)
4073 fusion->current_map_sz = new_map_sz;
4074 else
4075 fusion->current_map_sz = old_map_sz;
4076
4077}
4078
4030/** 4079/**
4031 * megasas_get_controller_info - Returns FW's controller structure 4080 * megasas_get_controller_info - Returns FW's controller structure
4032 * @instance: Adapter soft state 4081 * @instance: Adapter soft state
4033 * @ctrl_info: Controller information structure
4034 * 4082 *
4035 * Issues an internal command (DCMD) to get the FW's controller structure. 4083 * Issues an internal command (DCMD) to get the FW's controller structure.
4036 * This information is mainly used to find out the maximum IO transfer per 4084 * This information is mainly used to find out the maximum IO transfer per
4037 * command supported by the FW. 4085 * command supported by the FW.
4038 */ 4086 */
4039int 4087int
4040megasas_get_ctrl_info(struct megasas_instance *instance, 4088megasas_get_ctrl_info(struct megasas_instance *instance)
4041 struct megasas_ctrl_info *ctrl_info)
4042{ 4089{
4043 int ret = 0; 4090 int ret = 0;
4044 struct megasas_cmd *cmd; 4091 struct megasas_cmd *cmd;
4045 struct megasas_dcmd_frame *dcmd; 4092 struct megasas_dcmd_frame *dcmd;
4046 struct megasas_ctrl_info *ci; 4093 struct megasas_ctrl_info *ci;
4094 struct megasas_ctrl_info *ctrl_info;
4047 dma_addr_t ci_h = 0; 4095 dma_addr_t ci_h = 0;
4048 4096
4097 ctrl_info = instance->ctrl_info;
4098
4049 cmd = megasas_get_cmd(instance); 4099 cmd = megasas_get_cmd(instance);
4050 4100
4051 if (!cmd) { 4101 if (!cmd) {
@@ -4085,8 +4135,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
4085 else 4135 else
4086 ret = megasas_issue_polled(instance, cmd); 4136 ret = megasas_issue_polled(instance, cmd);
4087 4137
4088 if (!ret) 4138 if (!ret) {
4089 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); 4139 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4140 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4141 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4142 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4143 megasas_update_ext_vd_details(instance);
4144 }
4090 4145
4091 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), 4146 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4092 ci, ci_h); 4147 ci, ci_h);
@@ -4288,7 +4343,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
4288 if (megasas_issue_init_mfi(instance)) 4343 if (megasas_issue_init_mfi(instance))
4289 goto fail_fw_init; 4344 goto fail_fw_init;
4290 4345
4291 if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { 4346 if (megasas_get_ctrl_info(instance)) {
4292 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 4347 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4293 "Fail from %s %d\n", instance->unique_id, 4348 "Fail from %s %d\n", instance->unique_id,
4294 __func__, __LINE__); 4349 __func__, __LINE__);
@@ -4526,12 +4581,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
4526 dev_info(&instance->pdev->dev, 4581 dev_info(&instance->pdev->dev,
4527 "Controller type: iMR\n"); 4582 "Controller type: iMR\n");
4528 } 4583 }
4529 /* OnOffProperties are converted into CPU arch*/
4530 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4531 instance->disableOnlineCtrlReset = 4584 instance->disableOnlineCtrlReset =
4532 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4585 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4533 /* adapterOperations2 are converted into CPU arch*/
4534 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4535 instance->mpio = ctrl_info->adapterOperations2.mpio; 4586 instance->mpio = ctrl_info->adapterOperations2.mpio;
4536 instance->UnevenSpanSupport = 4587 instance->UnevenSpanSupport =
4537 ctrl_info->adapterOperations2.supportUnevenSpans; 4588 ctrl_info->adapterOperations2.supportUnevenSpans;
@@ -4561,7 +4612,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4561 "requestorId %d\n", instance->requestorId); 4612 "requestorId %d\n", instance->requestorId);
4562 } 4613 }
4563 4614
4564 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4565 instance->crash_dump_fw_support = 4615 instance->crash_dump_fw_support =
4566 ctrl_info->adapterOperations3.supportCrashDump; 4616 ctrl_info->adapterOperations3.supportCrashDump;
4567 instance->crash_dump_drv_support = 4617 instance->crash_dump_drv_support =
@@ -4586,8 +4636,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4586 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 4636 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
4587 instance->max_sectors_per_req = tmp_sectors; 4637 instance->max_sectors_per_req = tmp_sectors;
4588 4638
4589 kfree(ctrl_info);
4590
4591 /* Check for valid throttlequeuedepth module parameter */ 4639 /* Check for valid throttlequeuedepth module parameter */
4592 if (instance->is_imr) { 4640 if (instance->is_imr) {
4593 if (throttlequeuedepth > (instance->max_fw_cmds - 4641 if (throttlequeuedepth > (instance->max_fw_cmds -
@@ -4956,10 +5004,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
4956 return -ENODEV; 5004 return -ENODEV;
4957 } 5005 }
4958 5006
4959 /*
4960 * Trigger SCSI to scan our drives
4961 */
4962 scsi_scan_host(host);
4963 return 0; 5007 return 0;
4964} 5008}
4965 5009
@@ -5082,6 +5126,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
5082 goto fail_alloc_dma_buf; 5126 goto fail_alloc_dma_buf;
5083 } 5127 }
5084 fusion = instance->ctrl_context; 5128 fusion = instance->ctrl_context;
5129 memset(fusion, 0,
5130 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
5085 INIT_LIST_HEAD(&fusion->cmd_pool); 5131 INIT_LIST_HEAD(&fusion->cmd_pool);
5086 spin_lock_init(&fusion->mpt_pool_lock); 5132 spin_lock_init(&fusion->mpt_pool_lock);
5087 memset(fusion->load_balance_info, 0, 5133 memset(fusion->load_balance_info, 0,
@@ -5287,6 +5333,10 @@ retry_irq_register:
5287 goto fail_io_attach; 5333 goto fail_io_attach;
5288 5334
5289 instance->unload = 0; 5335 instance->unload = 0;
5336 /*
5337 * Trigger SCSI to scan our drives
5338 */
5339 scsi_scan_host(host);
5290 5340
5291 /* 5341 /*
5292 * Initiate AEN (Asynchronous Event Notification) 5342 * Initiate AEN (Asynchronous Event Notification)
@@ -6050,6 +6100,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6050 megasas_issue_blocked_cmd(instance, cmd, 0); 6100 megasas_issue_blocked_cmd(instance, cmd, 0);
6051 cmd->sync_cmd = 0; 6101 cmd->sync_cmd = 0;
6052 6102
6103 if (instance->unload == 1) {
6104 dev_info(&instance->pdev->dev, "Driver unload is in progress "
6105 "don't submit data to application\n");
6106 goto out;
6107 }
6053 /* 6108 /*
6054 * copy out the kernel buffers to user buffers 6109 * copy out the kernel buffers to user buffers
6055 */ 6110 */
@@ -6399,16 +6454,6 @@ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
6399static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); 6454static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
6400 6455
6401static ssize_t 6456static ssize_t
6402megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
6403{
6404 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
6405 MEGASAS_RELDATE);
6406}
6407
6408static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
6409 NULL);
6410
6411static ssize_t
6412megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) 6457megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
6413{ 6458{
6414 return sprintf(buf, "%u\n", support_poll_for_event); 6459 return sprintf(buf, "%u\n", support_poll_for_event);
@@ -6711,8 +6756,7 @@ static int __init megasas_init(void)
6711 /* 6756 /*
6712 * Announce driver version and other information 6757 * Announce driver version and other information
6713 */ 6758 */
6714 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, 6759 pr_info("megasas: %s\n", MEGASAS_VERSION);
6715 MEGASAS_EXT_VERSION);
6716 6760
6717 spin_lock_init(&poll_aen_lock); 6761 spin_lock_init(&poll_aen_lock);
6718 6762
@@ -6747,10 +6791,6 @@ static int __init megasas_init(void)
6747 &driver_attr_version); 6791 &driver_attr_version);
6748 if (rval) 6792 if (rval)
6749 goto err_dcf_attr_ver; 6793 goto err_dcf_attr_ver;
6750 rval = driver_create_file(&megasas_pci_driver.driver,
6751 &driver_attr_release_date);
6752 if (rval)
6753 goto err_dcf_rel_date;
6754 6794
6755 rval = driver_create_file(&megasas_pci_driver.driver, 6795 rval = driver_create_file(&megasas_pci_driver.driver,
6756 &driver_attr_support_poll_for_event); 6796 &driver_attr_support_poll_for_event);
@@ -6774,12 +6814,7 @@ err_dcf_support_device_change:
6774err_dcf_dbg_lvl: 6814err_dcf_dbg_lvl:
6775 driver_remove_file(&megasas_pci_driver.driver, 6815 driver_remove_file(&megasas_pci_driver.driver,
6776 &driver_attr_support_poll_for_event); 6816 &driver_attr_support_poll_for_event);
6777
6778err_dcf_support_poll_for_event: 6817err_dcf_support_poll_for_event:
6779 driver_remove_file(&megasas_pci_driver.driver,
6780 &driver_attr_release_date);
6781
6782err_dcf_rel_date:
6783 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 6818 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
6784err_dcf_attr_ver: 6819err_dcf_attr_ver:
6785 pci_unregister_driver(&megasas_pci_driver); 6820 pci_unregister_driver(&megasas_pci_driver);
@@ -6799,8 +6834,6 @@ static void __exit megasas_exit(void)
6799 &driver_attr_support_poll_for_event); 6834 &driver_attr_support_poll_for_event);
6800 driver_remove_file(&megasas_pci_driver.driver, 6835 driver_remove_file(&megasas_pci_driver.driver,
6801 &driver_attr_support_device_change); 6836 &driver_attr_support_device_change);
6802 driver_remove_file(&megasas_pci_driver.driver,
6803 &driver_attr_release_date);
6804 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 6837 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
6805 6838
6806 pci_unregister_driver(&megasas_pci_driver); 6839 pci_unregister_driver(&megasas_pci_driver);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 685e6f391fe4..460c6a3d4ade 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2012 LSI Corporation. 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,20 +15,21 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_fp.c 20 * FILE: megaraid_sas_fp.c
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Sumant Patro 23 * Sumant Patro
24 * Varad Talamacki 24 * Varad Talamacki
25 * Manoj Jose 25 * Manoj Jose
26 * Kashyap Desai <kashyap.desai@avagotech.com>
27 * Sumit Saxena <sumit.saxena@avagotech.com>
26 * 28 *
27 * Send feedback to: <megaraidlinux@lsi.com> 29 * Send feedback to: megaraidlinux.pdl@avagotech.com
28 * 30 *
29 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 31 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
30 * ATTN: Linuxraid 32 * San Jose, California 95131
31 */ 33 */
32 34
33#include <linux/kernel.h> 35#include <linux/kernel.h>
@@ -183,14 +185,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
183 /* New Raid map will not set totalSize, so keep expected value 185 /* New Raid map will not set totalSize, so keep expected value
184 * for legacy code in ValidateMapInfo 186 * for legacy code in ValidateMapInfo
185 */ 187 */
186 pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT); 188 pDrvRaidMap->totalSize =
189 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
187 } else { 190 } else {
188 fw_map_old = (struct MR_FW_RAID_MAP_ALL *) 191 fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
189 fusion->ld_map[(instance->map_id & 1)]; 192 fusion->ld_map[(instance->map_id & 1)];
190 pFwRaidMap = &fw_map_old->raidMap; 193 pFwRaidMap = &fw_map_old->raidMap;
191 194
192#if VD_EXT_DEBUG 195#if VD_EXT_DEBUG
193 for (i = 0; i < pFwRaidMap->ldCount; i++) { 196 for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) {
194 dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " 197 dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
195 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", 198 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
196 instance->unique_id, i, 199 instance->unique_id, i,
@@ -202,12 +205,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
202 205
203 memset(drv_map, 0, fusion->drv_map_sz); 206 memset(drv_map, 0, fusion->drv_map_sz);
204 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 207 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
205 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 208 pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount;
206 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; 209 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
207 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) 210 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
208 pDrvRaidMap->ldTgtIdToLd[i] = 211 pDrvRaidMap->ldTgtIdToLd[i] =
209 (u8)pFwRaidMap->ldTgtIdToLd[i]; 212 (u8)pFwRaidMap->ldTgtIdToLd[i];
210 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 213 for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) {
211 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; 214 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
212#if VD_EXT_DEBUG 215#if VD_EXT_DEBUG
213 dev_dbg(&instance->pdev->dev, 216 dev_dbg(&instance->pdev->dev,
@@ -268,7 +271,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
268 else 271 else
269 expected_size = 272 expected_size =
270 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) + 273 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
271 (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount))); 274 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
272 275
273 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { 276 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
274 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", 277 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
@@ -284,7 +287,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
284 287
285 mr_update_load_balance_params(drv_map, lbInfo); 288 mr_update_load_balance_params(drv_map, lbInfo);
286 289
287 num_lds = le32_to_cpu(drv_map->raidMap.ldCount); 290 num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
288 291
289 /*Convert Raid capability values to CPU arch */ 292 /*Convert Raid capability values to CPU arch */
290 for (ldCount = 0; ldCount < num_lds; ldCount++) { 293 for (ldCount = 0; ldCount < num_lds; ldCount++) {
@@ -457,7 +460,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
457 quad = &map->raidMap.ldSpanMap[ld]. 460 quad = &map->raidMap.ldSpanMap[ld].
458 spanBlock[span]. 461 spanBlock[span].
459 block_span_info.quad[info]; 462 block_span_info.quad[info];
460 if (le32_to_cpu(quad->diff == 0)) 463 if (le32_to_cpu(quad->diff) == 0)
461 return SPAN_INVALID; 464 return SPAN_INVALID;
462 if (le64_to_cpu(quad->logStart) <= row && 465 if (le64_to_cpu(quad->logStart) <= row &&
463 row <= le64_to_cpu(quad->logEnd) && 466 row <= le64_to_cpu(quad->logEnd) &&
@@ -520,7 +523,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
520 span_set->span_row_data_width) * span_set->diff; 523 span_set->span_row_data_width) * span_set->diff;
521 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 524 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
522 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 525 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
523 block_span_info.noElements >= info+1)) { 526 block_span_info.noElements) >= info+1) {
524 if (strip_offset >= 527 if (strip_offset >=
525 span_set->strip_offset[span]) 528 span_set->strip_offset[span])
526 span_offset++; 529 span_offset++;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f37eed682c75..71557f64bb5e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2012 LSI Corporation. 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,19 +15,20 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_fusion.c 20 * FILE: megaraid_sas_fusion.c
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Sumant Patro 23 * Sumant Patro
24 * Adam Radford <linuxraid@lsi.com> 24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
25 * 27 *
26 * Send feedback to: <megaraidlinux@lsi.com> 28 * Send feedback to: megaraidlinux.pdl@avagotech.com
27 * 29 *
28 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
29 * ATTN: Linuxraid 31 * San Jose, California 95131
30 */ 32 */
31 33
32#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -880,7 +882,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
880 882
881 map = fusion->ld_drv_map[instance->map_id & 1]; 883 map = fusion->ld_drv_map[instance->map_id & 1];
882 884
883 num_lds = le32_to_cpu(map->raidMap.ldCount); 885 num_lds = le16_to_cpu(map->raidMap.ldCount);
884 886
885 dcmd = &cmd->frame->dcmd; 887 dcmd = &cmd->frame->dcmd;
886 888
@@ -1065,48 +1067,16 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1065 goto fail_ioc_init; 1067 goto fail_ioc_init;
1066 1068
1067 megasas_display_intel_branding(instance); 1069 megasas_display_intel_branding(instance);
1068 if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { 1070 if (megasas_get_ctrl_info(instance)) {
1069 dev_err(&instance->pdev->dev, 1071 dev_err(&instance->pdev->dev,
1070 "Could not get controller info. Fail from %s %d\n", 1072 "Could not get controller info. Fail from %s %d\n",
1071 __func__, __LINE__); 1073 __func__, __LINE__);
1072 goto fail_ioc_init; 1074 goto fail_ioc_init;
1073 } 1075 }
1074 1076
1075 instance->supportmax256vd =
1076 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
1077 /* Below is additional check to address future FW enhancement */
1078 if (instance->ctrl_info->max_lds > 64)
1079 instance->supportmax256vd = 1;
1080 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
1081 * MEGASAS_MAX_DEV_PER_CHANNEL;
1082 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
1083 * MEGASAS_MAX_DEV_PER_CHANNEL;
1084 if (instance->supportmax256vd) {
1085 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
1086 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1087 } else {
1088 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
1089 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1090 }
1091 dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n"
1092 "Driver supports %d VDs %d PDs\n",
1093 instance->fw_supported_vd_count,
1094 instance->fw_supported_pd_count,
1095 instance->drv_supported_vd_count,
1096 instance->drv_supported_pd_count);
1097
1098 instance->flag_ieee = 1; 1077 instance->flag_ieee = 1;
1099 fusion->fast_path_io = 0; 1078 fusion->fast_path_io = 0;
1100 1079
1101 fusion->old_map_sz =
1102 sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
1103 (instance->fw_supported_vd_count - 1));
1104 fusion->new_map_sz =
1105 sizeof(struct MR_FW_RAID_MAP_EXT);
1106 fusion->drv_map_sz =
1107 sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
1108 (instance->drv_supported_vd_count - 1));
1109
1110 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1080 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1111 for (i = 0; i < 2; i++) { 1081 for (i = 0; i < 2; i++) {
1112 fusion->ld_map[i] = NULL; 1082 fusion->ld_map[i] = NULL;
@@ -1121,16 +1091,10 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1121 fusion->drv_map_pages); 1091 fusion->drv_map_pages);
1122 goto fail_ioc_init; 1092 goto fail_ioc_init;
1123 } 1093 }
1094 memset(fusion->ld_drv_map[i], 0,
1095 ((1 << PAGE_SHIFT) << fusion->drv_map_pages));
1124 } 1096 }
1125 1097
1126 fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz);
1127
1128 if (instance->supportmax256vd)
1129 fusion->current_map_sz = fusion->new_map_sz;
1130 else
1131 fusion->current_map_sz = fusion->old_map_sz;
1132
1133
1134 for (i = 0; i < 2; i++) { 1098 for (i = 0; i < 2; i++) {
1135 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1099 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1136 fusion->max_map_sz, 1100 fusion->max_map_sz,
@@ -1173,9 +1137,10 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
1173 struct megasas_register_set __iomem *regs) 1137 struct megasas_register_set __iomem *regs)
1174{ 1138{
1175#if defined(writeq) && defined(CONFIG_64BIT) 1139#if defined(writeq) && defined(CONFIG_64BIT)
1176 u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo); 1140 u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) |
1141 le32_to_cpu(req_desc_lo));
1177 1142
1178 writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port); 1143 writeq(req_data, &(regs)->inbound_low_queue_port);
1179#else 1144#else
1180 unsigned long flags; 1145 unsigned long flags;
1181 1146
@@ -1373,7 +1338,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1373 /* Logical block reference tag */ 1338 /* Logical block reference tag */
1374 io_request->CDB.EEDP32.PrimaryReferenceTag = 1339 io_request->CDB.EEDP32.PrimaryReferenceTag =
1375 cpu_to_be32(ref_tag); 1340 cpu_to_be32(ref_tag);
1376 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1341 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1377 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 1342 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1378 1343
1379 /* Transfer length */ 1344 /* Transfer length */
@@ -1769,7 +1734,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1769 1734
1770 /* set RAID context values */ 1735 /* set RAID context values */
1771 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 1736 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1772 pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; 1737 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1773 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1738 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1774 pRAID_Context->regLockRowLBA = 0; 1739 pRAID_Context->regLockRowLBA = 0;
1775 pRAID_Context->regLockLength = 0; 1740 pRAID_Context->regLockLength = 0;
@@ -2254,7 +2219,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2254 * megasas_complete_cmd 2219 * megasas_complete_cmd
2255 */ 2220 */
2256 2221
2257 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 2222 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2258 cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2223 cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2259 2224
2260 fusion = instance->ctrl_context; 2225 fusion = instance->ctrl_context;
@@ -2385,6 +2350,8 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2385 "memory allocation failed at index %d\n", i); 2350 "memory allocation failed at index %d\n", i);
2386 break; 2351 break;
2387 } 2352 }
2353 memset(instance->crash_buf[i], 0,
2354 ((1 << PAGE_SHIFT) << instance->crash_buf_pages));
2388 } 2355 }
2389 instance->drv_buf_alloc = i; 2356 instance->drv_buf_alloc = i;
2390} 2357}
@@ -2837,11 +2804,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2837 } 2804 }
2838 } 2805 }
2839 2806
2840 clear_bit(MEGASAS_FUSION_IN_RESET, 2807 if (megasas_get_ctrl_info(instance)) {
2841 &instance->reset_flags); 2808 dev_info(&instance->pdev->dev,
2842 instance->instancet->enable_intr(instance); 2809 "Failed from %s %d\n",
2843 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2810 __func__, __LINE__);
2844 2811 instance->adprecovery =
2812 MEGASAS_HW_CRITICAL_ERROR;
2813 megaraid_sas_kill_hba(instance);
2814 retval = FAILED;
2815 }
2845 /* Reset load balance info */ 2816 /* Reset load balance info */
2846 memset(fusion->load_balance_info, 0, 2817 memset(fusion->load_balance_info, 0,
2847 sizeof(struct LD_LOAD_BALANCE_INFO) 2818 sizeof(struct LD_LOAD_BALANCE_INFO)
@@ -2850,6 +2821,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2850 if (!megasas_get_map_info(instance)) 2821 if (!megasas_get_map_info(instance))
2851 megasas_sync_map_info(instance); 2822 megasas_sync_map_info(instance);
2852 2823
2824 clear_bit(MEGASAS_FUSION_IN_RESET,
2825 &instance->reset_flags);
2826 instance->instancet->enable_intr(instance);
2827 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2828
2853 /* Restart SR-IOV heartbeat */ 2829 /* Restart SR-IOV heartbeat */
2854 if (instance->requestorId) { 2830 if (instance->requestorId) {
2855 if (!megasas_sriov_start_heartbeat(instance, 0)) 2831 if (!megasas_sriov_start_heartbeat(instance, 0))
@@ -2866,14 +2842,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2866 "successful for scsi%d.\n", 2842 "successful for scsi%d.\n",
2867 instance->host->host_no); 2843 instance->host->host_no);
2868 2844
2869 if (instance->crash_dump_drv_support) { 2845 if (instance->crash_dump_drv_support &&
2870 if (instance->crash_dump_app_support) 2846 instance->crash_dump_app_support)
2871 megasas_set_crash_dump_params(instance, 2847 megasas_set_crash_dump_params(instance,
2872 MR_CRASH_BUF_TURN_ON); 2848 MR_CRASH_BUF_TURN_ON);
2873 else 2849 else
2874 megasas_set_crash_dump_params(instance, 2850 megasas_set_crash_dump_params(instance,
2875 MR_CRASH_BUF_TURN_OFF); 2851 MR_CRASH_BUF_TURN_OFF);
2876 } 2852
2877 retval = SUCCESS; 2853 retval = SUCCESS;
2878 goto out; 2854 goto out;
2879 } 2855 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 0d183d521bdd..5ab7daee11be 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2012 LSI Corporation. 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,19 +15,20 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_fusion.h 20 * FILE: megaraid_sas_fusion.h
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Manoj Jose 23 * Manoj Jose
24 * Sumant Patro 24 * Sumant Patro
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
25 * 27 *
26 * Send feedback to: <megaraidlinux@lsi.com> 28 * Send feedback to: megaraidlinux.pdl@avagotech.com
27 * 29 *
28 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
29 * ATTN: Linuxraid 31 * San Jose, California 95131
30 */ 32 */
31 33
32#ifndef _MEGARAID_SAS_FUSION_H_ 34#ifndef _MEGARAID_SAS_FUSION_H_
@@ -834,8 +836,6 @@ struct fusion_context {
834 836
835 u32 max_map_sz; 837 u32 max_map_sz;
836 u32 current_map_sz; 838 u32 current_map_sz;
837 u32 old_map_sz;
838 u32 new_map_sz;
839 u32 drv_map_sz; 839 u32 drv_map_sz;
840 u32 drv_map_pages; 840 u32 drv_map_pages;
841 u8 fast_path_io; 841 u8 fast_path_io;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 42fef914d441..8431eb10bbb1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -1179,15 +1179,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1179} 1179}
1180 1180
1181/** 1181/**
1182 * _scsih_adjust_queue_depth - setting device queue depth 1182 * _scsih_change_queue_depth - setting device queue depth
1183 * @sdev: scsi device struct 1183 * @sdev: scsi device struct
1184 * @qdepth: requested queue depth 1184 * @qdepth: requested queue depth
1185 * 1185 *
1186 * 1186 * Returns queue depth.
1187 * Returns nothing
1188 */ 1187 */
1189static void 1188static int
1190_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) 1189_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1191{ 1190{
1192 struct Scsi_Host *shost = sdev->host; 1191 struct Scsi_Host *shost = sdev->host;
1193 int max_depth; 1192 int max_depth;
@@ -1217,41 +1216,11 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
1217 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1216 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1218 1217
1219 not_sata: 1218 not_sata:
1220
1221 if (!sdev->tagged_supported) 1219 if (!sdev->tagged_supported)
1222 max_depth = 1; 1220 max_depth = 1;
1223 if (qdepth > max_depth) 1221 if (qdepth > max_depth)
1224 qdepth = max_depth; 1222 qdepth = max_depth;
1225 scsi_adjust_queue_depth(sdev, qdepth); 1223 return scsi_change_queue_depth(sdev, qdepth);
1226}
1227
1228/**
1229 * _scsih_change_queue_depth - setting device queue depth
1230 * @sdev: scsi device struct
1231 * @qdepth: requested queue depth
1232 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1233 * (see include/scsi/scsi_host.h for definition)
1234 *
1235 * Returns queue depth.
1236 */
1237static int
1238_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1239{
1240 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
1241 _scsih_adjust_queue_depth(sdev, qdepth);
1242 else if (reason == SCSI_QDEPTH_QFULL)
1243 scsi_track_queue_full(sdev, qdepth);
1244 else
1245 return -EOPNOTSUPP;
1246
1247 if (sdev->inquiry_len > 7)
1248 sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), "
1249 "simple(%d), scsi_level(%d), cmd_que(%d)\n",
1250 sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
1251 sdev->scsi_level,
1252 (sdev->inquiry[7] & 2) >> 1);
1253
1254 return sdev->queue_depth;
1255} 1224}
1256 1225
1257/** 1226/**
@@ -2082,7 +2051,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
2082 r_level, raid_device->handle, 2051 r_level, raid_device->handle,
2083 (unsigned long long)raid_device->wwid, 2052 (unsigned long long)raid_device->wwid,
2084 raid_device->num_pds, ds); 2053 raid_device->num_pds, ds);
2085 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 2054 _scsih_change_queue_depth(sdev, qdepth);
2086 /* raid transport support */ 2055 /* raid transport support */
2087 if (!ioc->is_warpdrive) 2056 if (!ioc->is_warpdrive)
2088 _scsih_set_level(sdev, raid_device->volume_type); 2057 _scsih_set_level(sdev, raid_device->volume_type);
@@ -2147,7 +2116,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
2147 _scsih_display_sata_capabilities(ioc, handle, sdev); 2116 _scsih_display_sata_capabilities(ioc, handle, sdev);
2148 2117
2149 2118
2150 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 2119 _scsih_change_queue_depth(sdev, qdepth);
2151 2120
2152 if (ssp_target) { 2121 if (ssp_target) {
2153 sas_read_port_mode_page(sdev); 2122 sas_read_port_mode_page(sdev);
@@ -7637,6 +7606,7 @@ static struct scsi_host_template scsih_driver_template = {
7637 .use_clustering = ENABLE_CLUSTERING, 7606 .use_clustering = ENABLE_CLUSTERING,
7638 .shost_attrs = mpt2sas_host_attrs, 7607 .shost_attrs = mpt2sas_host_attrs,
7639 .sdev_attrs = mpt2sas_dev_attrs, 7608 .sdev_attrs = mpt2sas_dev_attrs,
7609 .track_queue_depth = 1,
7640}; 7610};
7641 7611
7642/** 7612/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b23c2e7588e5..a2b60991efd4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1053,9 +1053,15 @@ _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1053 return found; 1053 return found;
1054} 1054}
1055 1055
1056 1056/**
1057static void 1057 * _scsih_change_queue_depth - setting device queue depth
1058_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) 1058 * @sdev: scsi device struct
1059 * @qdepth: requested queue depth
1060 *
1061 * Returns queue depth.
1062 */
1063static int
1064_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1059{ 1065{
1060 struct Scsi_Host *shost = sdev->host; 1066 struct Scsi_Host *shost = sdev->host;
1061 int max_depth; 1067 int max_depth;
@@ -1090,36 +1096,7 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
1090 max_depth = 1; 1096 max_depth = 1;
1091 if (qdepth > max_depth) 1097 if (qdepth > max_depth)
1092 qdepth = max_depth; 1098 qdepth = max_depth;
1093 scsi_adjust_queue_depth(sdev, qdepth); 1099 return scsi_change_queue_depth(sdev, qdepth);
1094}
1095
1096/**
1097 * _scsih_change_queue_depth - setting device queue depth
1098 * @sdev: scsi device struct
1099 * @qdepth: requested queue depth
1100 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1101 * (see include/scsi/scsi_host.h for definition)
1102 *
1103 * Returns queue depth.
1104 */
1105static int
1106_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1107{
1108 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
1109 _scsih_adjust_queue_depth(sdev, qdepth);
1110 else if (reason == SCSI_QDEPTH_QFULL)
1111 scsi_track_queue_full(sdev, qdepth);
1112 else
1113 return -EOPNOTSUPP;
1114
1115 if (sdev->inquiry_len > 7)
1116 sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \
1117 "simple(%d), scsi_level(%d), cmd_que(%d)\n",
1118 sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
1119 sdev->scsi_level,
1120 (sdev->inquiry[7] & 2) >> 1);
1121
1122 return sdev->queue_depth;
1123} 1100}
1124 1101
1125/** 1102/**
@@ -1739,7 +1716,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
1739 raid_device->num_pds, ds); 1716 raid_device->num_pds, ds);
1740 1717
1741 1718
1742 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1719 _scsih_change_queue_depth(sdev, qdepth);
1743 1720
1744/* raid transport support */ 1721/* raid transport support */
1745 _scsih_set_level(sdev, raid_device->volume_type); 1722 _scsih_set_level(sdev, raid_device->volume_type);
@@ -1805,7 +1782,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
1805 _scsih_display_sata_capabilities(ioc, handle, sdev); 1782 _scsih_display_sata_capabilities(ioc, handle, sdev);
1806 1783
1807 1784
1808 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1785 _scsih_change_queue_depth(sdev, qdepth);
1809 1786
1810 if (ssp_target) { 1787 if (ssp_target) {
1811 sas_read_port_mode_page(sdev); 1788 sas_read_port_mode_page(sdev);
@@ -7266,6 +7243,7 @@ static struct scsi_host_template scsih_driver_template = {
7266 .use_clustering = ENABLE_CLUSTERING, 7243 .use_clustering = ENABLE_CLUSTERING,
7267 .shost_attrs = mpt3sas_host_attrs, 7244 .shost_attrs = mpt3sas_host_attrs,
7268 .sdev_attrs = mpt3sas_dev_attrs, 7245 .sdev_attrs = mpt3sas_dev_attrs,
7246 .track_queue_depth = 1,
7269}; 7247};
7270 7248
7271/** 7249/**
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index d3c1fa5e76fb..f15df3de6790 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -26,18 +26,9 @@
26 26
27#include "mv_sas.h" 27#include "mv_sas.h"
28 28
29static int lldd_max_execute_num = 1;
30module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
31MODULE_PARM_DESC(collector, "\n"
32 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
33 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n");
36
37int interrupt_coalescing = 0x80; 29int interrupt_coalescing = 0x80;
38 30
39static struct scsi_transport_template *mvs_stt; 31static struct scsi_transport_template *mvs_stt;
40struct kmem_cache *mvs_task_list_cache;
41static const struct mvs_chip_info mvs_chips[] = { 32static const struct mvs_chip_info mvs_chips[] = {
42 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, 33 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
43 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, 34 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
@@ -77,6 +68,7 @@ static struct scsi_host_template mvs_sht = {
77 .ioctl = sas_ioctl, 68 .ioctl = sas_ioctl,
78 .shost_attrs = mvst_host_attrs, 69 .shost_attrs = mvst_host_attrs,
79 .use_blk_tags = 1, 70 .use_blk_tags = 1,
71 .track_queue_depth = 1,
80}; 72};
81 73
82static struct sas_domain_function_template mvs_transport_ops = { 74static struct sas_domain_function_template mvs_transport_ops = {
@@ -512,14 +504,11 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
512 504
513 sha->num_phys = nr_core * chip_info->n_phy; 505 sha->num_phys = nr_core * chip_info->n_phy;
514 506
515 sha->lldd_max_execute_num = lldd_max_execute_num;
516
517 if (mvi->flags & MVF_FLAG_SOC) 507 if (mvi->flags & MVF_FLAG_SOC)
518 can_queue = MVS_SOC_CAN_QUEUE; 508 can_queue = MVS_SOC_CAN_QUEUE;
519 else 509 else
520 can_queue = MVS_CHIP_SLOT_SZ; 510 can_queue = MVS_CHIP_SLOT_SZ;
521 511
522 sha->lldd_queue_size = can_queue;
523 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); 512 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
524 shost->can_queue = can_queue; 513 shost->can_queue = can_queue;
525 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; 514 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
@@ -832,16 +821,7 @@ static int __init mvs_init(void)
832 if (!mvs_stt) 821 if (!mvs_stt)
833 return -ENOMEM; 822 return -ENOMEM;
834 823
835 mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list),
836 0, SLAB_HWCACHE_ALIGN, NULL);
837 if (!mvs_task_list_cache) {
838 rc = -ENOMEM;
839 mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__);
840 goto err_out;
841 }
842
843 rc = pci_register_driver(&mvs_pci_driver); 824 rc = pci_register_driver(&mvs_pci_driver);
844
845 if (rc) 825 if (rc)
846 goto err_out; 826 goto err_out;
847 827
@@ -856,7 +836,6 @@ static void __exit mvs_exit(void)
856{ 836{
857 pci_unregister_driver(&mvs_pci_driver); 837 pci_unregister_driver(&mvs_pci_driver);
858 sas_release_transport(mvs_stt); 838 sas_release_transport(mvs_stt);
859 kmem_cache_destroy(mvs_task_list_cache);
860} 839}
861 840
862struct device_attribute *mvst_host_attrs[] = { 841struct device_attribute *mvst_host_attrs[] = {
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index ac52f7c99513..85d86a5cdb60 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -852,43 +852,7 @@ prep_out:
852 return rc; 852 return rc;
853} 853}
854 854
855static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags) 855static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
856{
857 struct mvs_task_list *first = NULL;
858
859 for (; *num > 0; --*num) {
860 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
861
862 if (!mvs_list)
863 break;
864
865 INIT_LIST_HEAD(&mvs_list->list);
866 if (!first)
867 first = mvs_list;
868 else
869 list_add_tail(&mvs_list->list, &first->list);
870
871 }
872
873 return first;
874}
875
876static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
877{
878 LIST_HEAD(list);
879 struct list_head *pos, *a;
880 struct mvs_task_list *mlist = NULL;
881
882 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
883
884 list_for_each_safe(pos, a, &list) {
885 list_del_init(pos);
886 mlist = list_entry(pos, struct mvs_task_list, list);
887 kmem_cache_free(mvs_task_list_cache, mlist);
888 }
889}
890
891static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
892 struct completion *completion, int is_tmf, 856 struct completion *completion, int is_tmf,
893 struct mvs_tmf_task *tmf) 857 struct mvs_tmf_task *tmf)
894{ 858{
@@ -912,74 +876,9 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
912 return rc; 876 return rc;
913} 877}
914 878
915static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, 879int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
916 struct completion *completion, int is_tmf,
917 struct mvs_tmf_task *tmf)
918{ 880{
919 struct domain_device *dev = task->dev; 881 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
920 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
921 struct mvs_info *mvi = NULL;
922 struct sas_task *t = task;
923 struct mvs_task_list *mvs_list = NULL, *a;
924 LIST_HEAD(q);
925 int pass[2] = {0};
926 u32 rc = 0;
927 u32 n = num;
928 unsigned long flags = 0;
929
930 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
931 if (n) {
932 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
933 rc = -ENOMEM;
934 goto free_list;
935 }
936
937 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
938
939 list_for_each_entry(a, &q, list) {
940 a->task = t;
941 t = list_entry(t->list.next, struct sas_task, list);
942 }
943
944 list_for_each_entry(a, &q , list) {
945
946 t = a->task;
947 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
948
949 spin_lock_irqsave(&mvi->lock, flags);
950 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
951 if (rc)
952 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
953 spin_unlock_irqrestore(&mvi->lock, flags);
954 }
955
956 if (likely(pass[0]))
957 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
958 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
959
960 if (likely(pass[1]))
961 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
962 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
963
964 list_del_init(&q);
965
966free_list:
967 if (mvs_list)
968 mvs_task_free_list(mvs_list);
969
970 return rc;
971}
972
973int mvs_queue_command(struct sas_task *task, const int num,
974 gfp_t gfp_flags)
975{
976 struct mvs_device *mvi_dev = task->dev->lldd_dev;
977 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
978
979 if (sas->lldd_max_execute_num < 2)
980 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
981 else
982 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
983} 882}
984 883
985static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 884static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
@@ -1411,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1411 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1310 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1412 add_timer(&task->slow_task->timer); 1311 add_timer(&task->slow_task->timer);
1413 1312
1414 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); 1313 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
1415 1314
1416 if (res) { 1315 if (res) {
1417 del_timer(&task->slow_task->timer); 1316 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index d6b19dc80bee..dc409c04747a 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -65,7 +65,6 @@ extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi; 65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch; 66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch; 67extern const struct mvs_dispatch mvs_94xx_dispatch;
68extern struct kmem_cache *mvs_task_list_cache;
69 68
70#define DEV_IS_EXPANDER(type) \ 69#define DEV_IS_EXPANDER(type) \
71 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) 70 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
@@ -440,12 +439,6 @@ struct mvs_task_exec_info {
440 int n_elem; 439 int n_elem;
441}; 440};
442 441
443struct mvs_task_list {
444 struct sas_task *task;
445 struct list_head list;
446};
447
448
449/******************** function prototype *********************/ 442/******************** function prototype *********************/
450void mvs_get_sas_addr(void *buf, u32 buflen); 443void mvs_get_sas_addr(void *buf, u32 buflen);
451void mvs_tag_clear(struct mvs_info *mvi, u32 tag); 444void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
@@ -462,8 +455,7 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
462 u32 off_hi, u64 sas_addr); 455 u32 off_hi, u64 sas_addr);
463void mvs_scan_start(struct Scsi_Host *shost); 456void mvs_scan_start(struct Scsi_Host *shost);
464int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); 457int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
465int mvs_queue_command(struct sas_task *task, const int num, 458int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
466 gfp_t gfp_flags);
467int mvs_abort_task(struct sas_task *task); 459int mvs_abort_task(struct sas_task *task);
468int mvs_abort_task_set(struct domain_device *dev, u8 *lun); 460int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
469int mvs_clear_aca(struct domain_device *dev, u8 *lun); 461int mvs_clear_aca(struct domain_device *dev, u8 *lun);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 9c331b7bfdcd..5b93ed810f6e 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -7997,7 +7997,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device)
7997 if (depth_to_use > MAX_TAGS) 7997 if (depth_to_use > MAX_TAGS)
7998 depth_to_use = MAX_TAGS; 7998 depth_to_use = MAX_TAGS;
7999 7999
8000 scsi_adjust_queue_depth(device, depth_to_use); 8000 scsi_change_queue_depth(device, depth_to_use);
8001 8001
8002 /* 8002 /*
8003 ** Since the queue depth is not tunable under Linux, 8003 ** Since the queue depth is not tunable under Linux,
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 80bacb5dc1d4..e81eadd08afc 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -1,6 +1,4 @@
1#define AUTOSENSE
2#define PSEUDO_DMA 1#define PSEUDO_DMA
3#define FOO
4#define UNSAFE /* Not unsafe for PAS16 -- use it */ 2#define UNSAFE /* Not unsafe for PAS16 -- use it */
5#define PDEBUG 0 3#define PDEBUG 0
6 4
@@ -24,47 +22,9 @@
24 * Media Vision 22 * Media Vision
25 * (510) 770-8600 23 * (510) 770-8600
26 * (800) 348-7116 24 * (800) 348-7116
27 *
28 * and
29 *
30 * NCR 5380 Family
31 * SCSI Protocol Controller
32 * Databook
33 *
34 * NCR Microelectronics
35 * 1635 Aeroplaza Drive
36 * Colorado Springs, CO 80916
37 * 1+ (719) 578-3400
38 * 1+ (800) 334-5454
39 */ 25 */
40 26
41/* 27/*
42 * Options :
43 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
44 * for commands that return with a CHECK CONDITION status.
45 *
46 * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
47 * bytes at a time. Since interrupts are disabled by default during
48 * these transfers, we might need this to give reasonable interrupt
49 * service time if the transfer size gets too large.
50 *
51 * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
52 * increase compared to polled I/O.
53 *
54 * PARITY - enable parity checking. Not supported.
55 *
56 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
57 *
58 * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This
59 * parameter comes from the NCR5380 code. It is NOT unsafe with
60 * the PAS16 and you should use it. If you don't you will have
61 * a problem with dropped characters during high speed
62 * communications during SCSI transfers. If you really don't
63 * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or
64 * twiddle with the transfer size in the high level code.
65 *
66 * USLEEP - enable support for devices that don't disconnect. Untested.
67 *
68 * The card is detected and initialized in one of several ways : 28 * The card is detected and initialized in one of several ways :
69 * 1. Autoprobe (default) - There are many different models of 29 * 1. Autoprobe (default) - There are many different models of
70 * the Pro Audio Spectrum/Studio 16, and I only have one of 30 * the Pro Audio Spectrum/Studio 16, and I only have one of
@@ -102,13 +62,11 @@
102 * If you have problems with your card not being recognized, use 62 * If you have problems with your card not being recognized, use
103 * the LILO command line override. Try to get it recognized without 63 * the LILO command line override. Try to get it recognized without
104 * interrupts. Ie, for a board at the default 0x388 base port, 64 * interrupts. Ie, for a board at the default 0x388 base port,
105 * boot: linux pas16=0x388,255 65 * boot: linux pas16=0x388,0
106 * 66 *
107 * SCSI_IRQ_NONE (255) should be specified for no interrupt, 67 * NO_IRQ (0) should be specified for no interrupt,
108 * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden 68 * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden
109 * on the command line. 69 * on the command line.
110 *
111 * (IRQ_AUTO == 254, SCSI_IRQ_NONE == 255 in NCR5380.h)
112 */ 70 */
113 71
114#include <linux/module.h> 72#include <linux/module.h>
@@ -123,15 +81,12 @@
123#include <linux/stat.h> 81#include <linux/stat.h>
124#include <linux/init.h> 82#include <linux/init.h>
125 83
126#include "scsi.h"
127#include <scsi/scsi_host.h> 84#include <scsi/scsi_host.h>
128#include "pas16.h" 85#include "pas16.h"
129#define AUTOPROBE_IRQ 86#define AUTOPROBE_IRQ
130#include "NCR5380.h" 87#include "NCR5380.h"
131 88
132 89
133static int pas_maxi = 0;
134static int pas_wmaxi = 0;
135static unsigned short pas16_addr = 0; 90static unsigned short pas16_addr = 0;
136static int pas16_irq = 0; 91static int pas16_irq = 0;
137 92
@@ -337,6 +292,7 @@ static int __init
337} 292}
338 293
339 294
295#ifndef MODULE
340/* 296/*
341 * Function : pas16_setup(char *str, int *ints) 297 * Function : pas16_setup(char *str, int *ints)
342 * 298 *
@@ -347,10 +303,13 @@ static int __init
347 * 303 *
348 */ 304 */
349 305
350void __init pas16_setup(char *str, int *ints) 306static int __init pas16_setup(char *str)
351{ 307{
352 static int commandline_current = 0; 308 static int commandline_current = 0;
353 int i; 309 int i;
310 int ints[10];
311
312 get_options(str, ARRAY_SIZE(ints), ints);
354 if (ints[0] != 2) 313 if (ints[0] != 2)
355 printk("pas16_setup : usage pas16=io_port,irq\n"); 314 printk("pas16_setup : usage pas16=io_port,irq\n");
356 else 315 else
@@ -364,8 +323,12 @@ void __init pas16_setup(char *str, int *ints)
364 } 323 }
365 ++commandline_current; 324 ++commandline_current;
366 } 325 }
326 return 1;
367} 327}
368 328
329__setup("pas16=", pas16_setup);
330#endif
331
369/* 332/*
370 * Function : int pas16_detect(struct scsi_host_template * tpnt) 333 * Function : int pas16_detect(struct scsi_host_template * tpnt)
371 * 334 *
@@ -379,7 +342,7 @@ void __init pas16_setup(char *str, int *ints)
379 * 342 *
380 */ 343 */
381 344
382int __init pas16_detect(struct scsi_host_template * tpnt) 345static int __init pas16_detect(struct scsi_host_template *tpnt)
383{ 346{
384 static int current_override = 0; 347 static int current_override = 0;
385 static unsigned short current_base = 0; 348 static unsigned short current_base = 0;
@@ -387,10 +350,6 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
387 unsigned short io_port; 350 unsigned short io_port;
388 int count; 351 int count;
389 352
390 tpnt->proc_name = "pas16";
391 tpnt->show_info = pas16_show_info;
392 tpnt->write_info = pas16_write_info;
393
394 if (pas16_addr != 0) { 353 if (pas16_addr != 0) {
395 overrides[0].io_port = pas16_addr; 354 overrides[0].io_port = pas16_addr;
396 /* 355 /*
@@ -452,15 +411,19 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
452 else 411 else
453 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); 412 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
454 413
455 if (instance->irq != SCSI_IRQ_NONE) 414 /* Compatibility with documented NCR5380 kernel parameters */
415 if (instance->irq == 255)
416 instance->irq = NO_IRQ;
417
418 if (instance->irq != NO_IRQ)
456 if (request_irq(instance->irq, pas16_intr, 0, 419 if (request_irq(instance->irq, pas16_intr, 0,
457 "pas16", instance)) { 420 "pas16", instance)) {
458 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 421 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
459 instance->host_no, instance->irq); 422 instance->host_no, instance->irq);
460 instance->irq = SCSI_IRQ_NONE; 423 instance->irq = NO_IRQ;
461 } 424 }
462 425
463 if (instance->irq == SCSI_IRQ_NONE) { 426 if (instance->irq == NO_IRQ) {
464 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 427 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
465 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 428 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
466 /* Disable 5380 interrupts, leave drive params the same */ 429 /* Disable 5380 interrupts, leave drive params the same */
@@ -472,17 +435,6 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
472 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); 435 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
473#endif 436#endif
474 437
475 printk("scsi%d : at 0x%04x", instance->host_no, (int)
476 instance->io_port);
477 if (instance->irq == SCSI_IRQ_NONE)
478 printk (" interrupts disabled");
479 else
480 printk (" irq %d", instance->irq);
481 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
482 CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE);
483 NCR5380_print_options(instance);
484 printk("\n");
485
486 ++current_override; 438 ++current_override;
487 ++count; 439 ++count;
488 } 440 }
@@ -509,8 +461,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
509 * and matching the H_C_S coordinates to what DOS uses. 461 * and matching the H_C_S coordinates to what DOS uses.
510 */ 462 */
511 463
512int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, 464static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
513 sector_t capacity, int * ip) 465 sector_t capacity, int *ip)
514{ 466{
515 int size = capacity; 467 int size = capacity;
516 ip[0] = 64; 468 ip[0] = 64;
@@ -547,6 +499,7 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
547 P_DATA_REG_OFFSET); 499 P_DATA_REG_OFFSET);
548 register int i = len; 500 register int i = len;
549 int ii = 0; 501 int ii = 0;
502 struct NCR5380_hostdata *hostdata = shost_priv(instance);
550 503
551 while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) 504 while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
552 ++ii; 505 ++ii;
@@ -559,8 +512,8 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
559 instance->host_no); 512 instance->host_no);
560 return -1; 513 return -1;
561 } 514 }
562 if (ii > pas_maxi) 515 if (ii > hostdata->spin_max_r)
563 pas_maxi = ii; 516 hostdata->spin_max_r = ii;
564 return 0; 517 return 0;
565} 518}
566 519
@@ -583,6 +536,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
583 register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); 536 register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
584 register int i = len; 537 register int i = len;
585 int ii = 0; 538 int ii = 0;
539 struct NCR5380_hostdata *hostdata = shost_priv(instance);
586 540
587 while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) 541 while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
588 ++ii; 542 ++ii;
@@ -595,8 +549,8 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
595 instance->host_no); 549 instance->host_no);
596 return -1; 550 return -1;
597 } 551 }
598 if (ii > pas_maxi) 552 if (ii > hostdata->spin_max_w)
599 pas_wmaxi = ii; 553 hostdata->spin_max_w = ii;
600 return 0; 554 return 0;
601} 555}
602 556
@@ -604,7 +558,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
604 558
605static int pas16_release(struct Scsi_Host *shost) 559static int pas16_release(struct Scsi_Host *shost)
606{ 560{
607 if (shost->irq) 561 if (shost->irq != NO_IRQ)
608 free_irq(shost->irq, shost); 562 free_irq(shost->irq, shost);
609 NCR5380_exit(shost); 563 NCR5380_exit(shost);
610 if (shost->io_port && shost->n_io_port) 564 if (shost->io_port && shost->n_io_port)
@@ -617,6 +571,10 @@ static struct scsi_host_template driver_template = {
617 .name = "Pro Audio Spectrum-16 SCSI", 571 .name = "Pro Audio Spectrum-16 SCSI",
618 .detect = pas16_detect, 572 .detect = pas16_detect,
619 .release = pas16_release, 573 .release = pas16_release,
574 .proc_name = "pas16",
575 .show_info = pas16_show_info,
576 .write_info = pas16_write_info,
577 .info = pas16_info,
620 .queuecommand = pas16_queue_command, 578 .queuecommand = pas16_queue_command,
621 .eh_abort_handler = pas16_abort, 579 .eh_abort_handler = pas16_abort,
622 .eh_bus_reset_handler = pas16_bus_reset, 580 .eh_bus_reset_handler = pas16_bus_reset,
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index aa528f53c533..c6109c80050b 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -18,26 +18,12 @@
18 * Media Vision 18 * Media Vision
19 * (510) 770-8600 19 * (510) 770-8600
20 * (800) 348-7116 20 * (800) 348-7116
21 *
22 * and
23 *
24 * NCR 5380 Family
25 * SCSI Protocol Controller
26 * Databook
27 *
28 * NCR Microelectronics
29 * 1635 Aeroplaza Drive
30 * Colorado Springs, CO 80916
31 * 1+ (719) 578-3400
32 * 1+ (800) 334-5454
33 */ 21 */
34 22
35 23
36#ifndef PAS16_H 24#ifndef PAS16_H
37#define PAS16_H 25#define PAS16_H
38 26
39#define PAS16_PUBLIC_RELEASE 3
40
41#define PDEBUG_INIT 0x1 27#define PDEBUG_INIT 0x1
42#define PDEBUG_TRANSFER 0x2 28#define PDEBUG_TRANSFER 0x2
43 29
@@ -114,12 +100,6 @@
114 100
115 101
116#ifndef ASM 102#ifndef ASM
117static int pas16_abort(Scsi_Cmnd *);
118static int pas16_biosparam(struct scsi_device *, struct block_device *,
119 sector_t, int*);
120static int pas16_detect(struct scsi_host_template *);
121static int pas16_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
122static int pas16_bus_reset(Scsi_Cmnd *);
123 103
124#ifndef CMD_PER_LUN 104#ifndef CMD_PER_LUN
125#define CMD_PER_LUN 2 105#define CMD_PER_LUN 2
@@ -161,6 +141,7 @@ static int pas16_bus_reset(Scsi_Cmnd *);
161#define NCR5380_queue_command pas16_queue_command 141#define NCR5380_queue_command pas16_queue_command
162#define NCR5380_abort pas16_abort 142#define NCR5380_abort pas16_abort
163#define NCR5380_bus_reset pas16_bus_reset 143#define NCR5380_bus_reset pas16_bus_reset
144#define NCR5380_info pas16_info
164#define NCR5380_show_info pas16_show_info 145#define NCR5380_show_info pas16_show_info
165#define NCR5380_write_info pas16_write_info 146#define NCR5380_write_info pas16_write_info
166 147
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3ff759a3b74d..329aba0083ab 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -90,6 +90,7 @@ static struct scsi_host_template pm8001_sht = {
90 .ioctl = sas_ioctl, 90 .ioctl = sas_ioctl,
91 .shost_attrs = pm8001_host_attrs, 91 .shost_attrs = pm8001_host_attrs,
92 .use_blk_tags = 1, 92 .use_blk_tags = 1,
93 .track_queue_depth = 1,
93}; 94};
94 95
95/** 96/**
@@ -600,8 +601,6 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
600 sha->lldd_module = THIS_MODULE; 601 sha->lldd_module = THIS_MODULE;
601 sha->sas_addr = &pm8001_ha->sas_addr[0]; 602 sha->sas_addr = &pm8001_ha->sas_addr[0];
602 sha->num_phys = chip_info->n_phy; 603 sha->num_phys = chip_info->n_phy;
603 sha->lldd_max_execute_num = 1;
604 sha->lldd_queue_size = PM8001_CAN_QUEUE;
605 sha->core.shost = shost; 604 sha->core.shost = shost;
606} 605}
607 606
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 76570e6a547d..b93f289b42b3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -350,7 +350,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
350 */ 350 */
351#define DEV_IS_GONE(pm8001_dev) \ 351#define DEV_IS_GONE(pm8001_dev) \
352 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) 352 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
353static int pm8001_task_exec(struct sas_task *task, const int num, 353static int pm8001_task_exec(struct sas_task *task,
354 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 354 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
355{ 355{
356 struct domain_device *dev = task->dev; 356 struct domain_device *dev = task->dev;
@@ -360,7 +360,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
360 struct sas_task *t = task; 360 struct sas_task *t = task;
361 struct pm8001_ccb_info *ccb; 361 struct pm8001_ccb_info *ccb;
362 u32 tag = 0xdeadbeef, rc, n_elem = 0; 362 u32 tag = 0xdeadbeef, rc, n_elem = 0;
363 u32 n = num;
364 unsigned long flags = 0; 363 unsigned long flags = 0;
365 364
366 if (!dev->port) { 365 if (!dev->port) {
@@ -387,18 +386,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
387 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 386 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
388 t->task_done(t); 387 t->task_done(t);
389 spin_lock_irqsave(&pm8001_ha->lock, flags); 388 spin_lock_irqsave(&pm8001_ha->lock, flags);
390 if (n > 1)
391 t = list_entry(t->list.next,
392 struct sas_task, list);
393 continue; 389 continue;
394 } else { 390 } else {
395 struct task_status_struct *ts = &t->task_status; 391 struct task_status_struct *ts = &t->task_status;
396 ts->resp = SAS_TASK_UNDELIVERED; 392 ts->resp = SAS_TASK_UNDELIVERED;
397 ts->stat = SAS_PHY_DOWN; 393 ts->stat = SAS_PHY_DOWN;
398 t->task_done(t); 394 t->task_done(t);
399 if (n > 1)
400 t = list_entry(t->list.next,
401 struct sas_task, list);
402 continue; 395 continue;
403 } 396 }
404 } 397 }
@@ -460,9 +453,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
460 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 453 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
461 spin_unlock(&t->task_state_lock); 454 spin_unlock(&t->task_state_lock);
462 pm8001_dev->running_req++; 455 pm8001_dev->running_req++;
463 if (n > 1) 456 } while (0);
464 t = list_entry(t->list.next, struct sas_task, list);
465 } while (--n);
466 rc = 0; 457 rc = 0;
467 goto out_done; 458 goto out_done;
468 459
@@ -483,14 +474,11 @@ out_done:
483 * pm8001_queue_command - register for upper layer used, all IO commands sent 474 * pm8001_queue_command - register for upper layer used, all IO commands sent
484 * to HBA are from this interface. 475 * to HBA are from this interface.
485 * @task: the task to be execute. 476 * @task: the task to be execute.
486 * @num: if can_queue great than 1, the task can be queued up. for SMP task,
487 * we always execute one one time
488 * @gfp_flags: gfp_flags 477 * @gfp_flags: gfp_flags
489 */ 478 */
490int pm8001_queue_command(struct sas_task *task, const int num, 479int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
491 gfp_t gfp_flags)
492{ 480{
493 return pm8001_task_exec(task, num, gfp_flags, 0, NULL); 481 return pm8001_task_exec(task, gfp_flags, 0, NULL);
494} 482}
495 483
496/** 484/**
@@ -708,7 +696,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
708 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 696 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
709 add_timer(&task->slow_task->timer); 697 add_timer(&task->slow_task->timer);
710 698
711 res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); 699 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
712 700
713 if (res) { 701 if (res) {
714 del_timer(&task->slow_task->timer); 702 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index f6b2ac59dae4..8dd8b7840f04 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -623,8 +623,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
623 void *funcdata); 623 void *funcdata);
624void pm8001_scan_start(struct Scsi_Host *shost); 624void pm8001_scan_start(struct Scsi_Host *shost);
625int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); 625int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
626int pm8001_queue_command(struct sas_task *task, const int num, 626int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
627 gfp_t gfp_flags);
628int pm8001_abort_task(struct sas_task *task); 627int pm8001_abort_task(struct sas_task *task);
629int pm8001_abort_task_set(struct domain_device *dev, u8 *lun); 628int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
630int pm8001_clear_aca(struct domain_device *dev, u8 *lun); 629int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index d8b9ba251fbd..b1b1f66b1ab7 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -285,23 +285,15 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
285 * pmcraid_change_queue_depth - Change the device's queue depth 285 * pmcraid_change_queue_depth - Change the device's queue depth
286 * @scsi_dev: scsi device struct 286 * @scsi_dev: scsi device struct
287 * @depth: depth to set 287 * @depth: depth to set
288 * @reason: calling context
289 * 288 *
290 * Return value 289 * Return value
291 * actual depth set 290 * actual depth set
292 */ 291 */
293static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth, 292static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
294 int reason)
295{ 293{
296 if (reason != SCSI_QDEPTH_DEFAULT)
297 return -EOPNOTSUPP;
298
299 if (depth > PMCRAID_MAX_CMD_PER_LUN) 294 if (depth > PMCRAID_MAX_CMD_PER_LUN)
300 depth = PMCRAID_MAX_CMD_PER_LUN; 295 depth = PMCRAID_MAX_CMD_PER_LUN;
301 296 return scsi_change_queue_depth(scsi_dev, depth);
302 scsi_adjust_queue_depth(scsi_dev, depth);
303
304 return scsi_dev->queue_depth;
305} 297}
306 298
307/** 299/**
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index adedb6ef8eec..c68a66e8cfc1 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1224,9 +1224,9 @@ qla1280_slave_configure(struct scsi_device *device)
1224 1224
1225 if (device->tagged_supported && 1225 if (device->tagged_supported &&
1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { 1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1227 scsi_adjust_queue_depth(device, ha->bus_settings[bus].hiwat); 1227 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1228 } else { 1228 } else {
1229 scsi_adjust_queue_depth(device, default_depth); 1229 scsi_change_queue_depth(device, default_depth);
1230 } 1230 }
1231 1231
1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; 1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b1865a72ce59..7686bfe9a4a9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -752,8 +752,6 @@ extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
752extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg); 752extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
753extern void qla8044_wr_direct(struct scsi_qla_host *vha, 753extern void qla8044_wr_direct(struct scsi_qla_host *vha,
754 const uint32_t crb_reg, const uint32_t value); 754 const uint32_t crb_reg, const uint32_t value);
755extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
756extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
757extern int qla8044_device_state_handler(struct scsi_qla_host *vha); 755extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
758extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha); 756extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
759extern void qla8044_clear_drv_active(struct qla_hw_data *); 757extern void qla8044_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 24a852828b5d..ed4d6b6b53e3 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -238,7 +238,7 @@ qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
238 return; 238 return;
239} 239}
240 240
241inline void 241static inline void
242qla8044_set_qsnt_ready(struct scsi_qla_host *vha) 242qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
243{ 243{
244 uint32_t qsnt_state; 244 uint32_t qsnt_state;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 33166ebec7d8..6b4d9235368a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -236,7 +236,6 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
236static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 236static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
237static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 237static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
238 238
239static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
240static void qla2x00_clear_drv_active(struct qla_hw_data *); 239static void qla2x00_clear_drv_active(struct qla_hw_data *);
241static void qla2x00_free_device(scsi_qla_host_t *); 240static void qla2x00_free_device(scsi_qla_host_t *);
242static void qla83xx_disable_laser(scsi_qla_host_t *vha); 241static void qla83xx_disable_laser(scsi_qla_host_t *vha);
@@ -258,7 +257,7 @@ struct scsi_host_template qla2xxx_driver_template = {
258 .slave_destroy = qla2xxx_slave_destroy, 257 .slave_destroy = qla2xxx_slave_destroy,
259 .scan_finished = qla2xxx_scan_finished, 258 .scan_finished = qla2xxx_scan_finished,
260 .scan_start = qla2xxx_scan_start, 259 .scan_start = qla2xxx_scan_start,
261 .change_queue_depth = qla2x00_change_queue_depth, 260 .change_queue_depth = scsi_change_queue_depth,
262 .change_queue_type = scsi_change_queue_type, 261 .change_queue_type = scsi_change_queue_type,
263 .this_id = -1, 262 .this_id = -1,
264 .cmd_per_lun = 3, 263 .cmd_per_lun = 3,
@@ -270,6 +269,7 @@ struct scsi_host_template qla2xxx_driver_template = {
270 269
271 .supported_mode = MODE_INITIATOR, 270 .supported_mode = MODE_INITIATOR,
272 .use_blk_tags = 1, 271 .use_blk_tags = 1,
272 .track_queue_depth = 1,
273}; 273};
274 274
275static struct scsi_transport_template *qla2xxx_transport_template = NULL; 275static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -1405,7 +1405,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1405 if (IS_T10_PI_CAPABLE(vha->hw)) 1405 if (IS_T10_PI_CAPABLE(vha->hw))
1406 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1406 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1407 1407
1408 scsi_adjust_queue_depth(sdev, req->max_q_depth); 1408 scsi_change_queue_depth(sdev, req->max_q_depth);
1409 return 0; 1409 return 0;
1410} 1410}
1411 1411
@@ -1415,58 +1415,6 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1415 sdev->hostdata = NULL; 1415 sdev->hostdata = NULL;
1416} 1416}
1417 1417
1418static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1419{
1420 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1421
1422 if (!scsi_track_queue_full(sdev, qdepth))
1423 return;
1424
1425 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1426 "Queue depth adjusted-down to %d for nexus=%ld:%d:%llu.\n",
1427 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1428}
1429
1430static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1431{
1432 fc_port_t *fcport = sdev->hostdata;
1433 struct scsi_qla_host *vha = fcport->vha;
1434 struct req_que *req = NULL;
1435
1436 req = vha->req;
1437 if (!req)
1438 return;
1439
1440 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1441 return;
1442
1443 scsi_adjust_queue_depth(sdev, qdepth);
1444
1445 ql_dbg(ql_dbg_io, vha, 0x302a,
1446 "Queue depth adjusted-up to %d for nexus=%ld:%d:%llu.\n",
1447 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1448}
1449
1450static int
1451qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1452{
1453 switch (reason) {
1454 case SCSI_QDEPTH_DEFAULT:
1455 scsi_adjust_queue_depth(sdev, qdepth);
1456 break;
1457 case SCSI_QDEPTH_QFULL:
1458 qla2x00_handle_queue_full(sdev, qdepth);
1459 break;
1460 case SCSI_QDEPTH_RAMP_UP:
1461 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1462 break;
1463 default:
1464 return -EOPNOTSUPP;
1465 }
1466
1467 return sdev->queue_depth;
1468}
1469
1470/** 1418/**
1471 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1419 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1472 * @ha: HA context 1420 * @ha: HA context
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f8724f2e0158..6d25879d87c8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -164,8 +164,6 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
164static int qla4xxx_slave_alloc(struct scsi_device *device); 164static int qla4xxx_slave_alloc(struct scsi_device *device);
165static umode_t qla4_attr_is_visible(int param_type, int param); 165static umode_t qla4_attr_is_visible(int param_type, int param);
166static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 166static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
167static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
168 int reason);
169 167
170/* 168/*
171 * iSCSI Flash DDB sysfs entry points 169 * iSCSI Flash DDB sysfs entry points
@@ -203,7 +201,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
203 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 201 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
204 202
205 .slave_alloc = qla4xxx_slave_alloc, 203 .slave_alloc = qla4xxx_slave_alloc,
206 .change_queue_depth = qla4xxx_change_queue_depth, 204 .change_queue_depth = scsi_change_queue_depth,
207 205
208 .this_id = -1, 206 .this_id = -1,
209 .cmd_per_lun = 3, 207 .cmd_per_lun = 3,
@@ -9061,19 +9059,10 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
9061 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9059 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
9062 queue_depth = ql4xmaxqdepth; 9060 queue_depth = ql4xmaxqdepth;
9063 9061
9064 scsi_adjust_queue_depth(sdev, queue_depth); 9062 scsi_change_queue_depth(sdev, queue_depth);
9065 return 0; 9063 return 0;
9066} 9064}
9067 9065
9068static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
9069 int reason)
9070{
9071 if (!ql4xqfulltracking)
9072 return -EOPNOTSUPP;
9073
9074 return iscsi_change_queue_depth(sdev, qdepth, reason);
9075}
9076
9077/** 9066/**
9078 * qla4xxx_del_from_active_array - returns an active srb 9067 * qla4xxx_del_from_active_array - returns an active srb
9079 * @ha: Pointer to host adapter structure. 9068 * @ha: Pointer to host adapter structure.
@@ -9873,6 +9862,9 @@ static int __init qla4xxx_module_init(void)
9873{ 9862{
9874 int ret; 9863 int ret;
9875 9864
9865 if (ql4xqfulltracking)
9866 qla4xxx_driver_template.track_queue_depth = 1;
9867
9876 /* Allocate cache for SRBs. */ 9868 /* Allocate cache for SRBs. */
9877 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9869 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
9878 SLAB_HWCACHE_ALIGN, NULL); 9870 SLAB_HWCACHE_ALIGN, NULL);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2d9730432233..1ad0c36375b8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -661,30 +661,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
661} 661}
662 662
663/** 663/**
664 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth 664 * scsi_change_queue_depth - change a device's queue depth
665 * @sdev: SCSI Device in question 665 * @sdev: SCSI Device in question
666 * @tags: Number of tags allowed if tagged queueing enabled, 666 * @depth: number of commands allowed to be queued to the driver
667 * or number of commands the low level driver can
668 * queue up in non-tagged mode (as per cmd_per_lun).
669 * 667 *
670 * Returns: Nothing 668 * Sets the device queue depth and returns the new value.
671 *
672 * Lock Status: None held on entry
673 *
674 * Notes: Low level drivers may call this at any time and we will do
675 * the right thing depending on whether or not the device is
676 * currently active and whether or not it even has the
677 * command blocks built yet.
678 */ 669 */
679void scsi_adjust_queue_depth(struct scsi_device *sdev, int tags) 670int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
680{ 671{
681 unsigned long flags; 672 unsigned long flags;
682 673
683 /* 674 if (depth <= 0)
684 * refuse to set tagged depth to an unworkable size 675 goto out;
685 */
686 if (tags <= 0)
687 return;
688 676
689 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 677 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
690 678
@@ -699,15 +687,17 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tags)
699 */ 687 */
700 if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) { 688 if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
701 if (blk_queue_tagged(sdev->request_queue) && 689 if (blk_queue_tagged(sdev->request_queue) &&
702 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 690 blk_queue_resize_tags(sdev->request_queue, depth) != 0)
703 goto out; 691 goto out_unlock;
704 } 692 }
705 693
706 sdev->queue_depth = tags; 694 sdev->queue_depth = depth;
707 out: 695out_unlock:
708 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 696 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
697out:
698 return sdev->queue_depth;
709} 699}
710EXPORT_SYMBOL(scsi_adjust_queue_depth); 700EXPORT_SYMBOL(scsi_change_queue_depth);
711 701
712/** 702/**
713 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth 703 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
@@ -752,12 +742,11 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
752 if (sdev->last_queue_full_depth < 8) { 742 if (sdev->last_queue_full_depth < 8) {
753 /* Drop back to untagged */ 743 /* Drop back to untagged */
754 scsi_set_tag_type(sdev, 0); 744 scsi_set_tag_type(sdev, 0);
755 scsi_adjust_queue_depth(sdev, sdev->host->cmd_per_lun); 745 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
756 return -1; 746 return -1;
757 } 747 }
758 748
759 scsi_adjust_queue_depth(sdev, depth); 749 return scsi_change_queue_depth(sdev, depth);
760 return depth;
761} 750}
762EXPORT_SYMBOL(scsi_track_queue_full); 751EXPORT_SYMBOL(scsi_track_queue_full);
763 752
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 378e0aae29ca..aa4b6b80aade 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -63,8 +63,8 @@
63#include "sd.h" 63#include "sd.h"
64#include "scsi_logging.h" 64#include "scsi_logging.h"
65 65
66#define SCSI_DEBUG_VERSION "1.84" 66#define SCSI_DEBUG_VERSION "1.85"
67static const char *scsi_debug_version_date = "20140706"; 67static const char *scsi_debug_version_date = "20141022";
68 68
69#define MY_NAME "scsi_debug" 69#define MY_NAME "scsi_debug"
70 70
@@ -75,19 +75,22 @@ static const char *scsi_debug_version_date = "20140706";
75#define UNRECOVERED_READ_ERR 0x11 75#define UNRECOVERED_READ_ERR 0x11
76#define PARAMETER_LIST_LENGTH_ERR 0x1a 76#define PARAMETER_LIST_LENGTH_ERR 0x1a
77#define INVALID_OPCODE 0x20 77#define INVALID_OPCODE 0x20
78#define ADDR_OUT_OF_RANGE 0x21 78#define LBA_OUT_OF_RANGE 0x21
79#define INVALID_COMMAND_OPCODE 0x20
80#define INVALID_FIELD_IN_CDB 0x24 79#define INVALID_FIELD_IN_CDB 0x24
81#define INVALID_FIELD_IN_PARAM_LIST 0x26 80#define INVALID_FIELD_IN_PARAM_LIST 0x26
82#define UA_RESET_ASC 0x29 81#define UA_RESET_ASC 0x29
83#define UA_CHANGED_ASC 0x2a 82#define UA_CHANGED_ASC 0x2a
83#define INSUFF_RES_ASC 0x55
84#define INSUFF_RES_ASCQ 0x3
84#define POWER_ON_RESET_ASCQ 0x0 85#define POWER_ON_RESET_ASCQ 0x0
85#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */ 86#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
86#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */ 87#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88#define CAPACITY_CHANGED_ASCQ 0x9
87#define SAVING_PARAMS_UNSUP 0x39 89#define SAVING_PARAMS_UNSUP 0x39
88#define TRANSPORT_PROBLEM 0x4b 90#define TRANSPORT_PROBLEM 0x4b
89#define THRESHOLD_EXCEEDED 0x5d 91#define THRESHOLD_EXCEEDED 0x5d
90#define LOW_POWER_COND_ON 0x5e 92#define LOW_POWER_COND_ON 0x5e
93#define MISCOMPARE_VERIFY_ASC 0x1d
91 94
92/* Additional Sense Code Qualifier (ASCQ) */ 95/* Additional Sense Code Qualifier (ASCQ) */
93#define ACK_NAK_TO 0x3 96#define ACK_NAK_TO 0x3
@@ -133,6 +136,7 @@ static const char *scsi_debug_version_date = "20140706";
133#define DEF_VIRTUAL_GB 0 136#define DEF_VIRTUAL_GB 0
134#define DEF_VPD_USE_HOSTNO 1 137#define DEF_VPD_USE_HOSTNO 1
135#define DEF_WRITESAME_LENGTH 0xFFFF 138#define DEF_WRITESAME_LENGTH 0xFFFF
139#define DEF_STRICT 0
136#define DELAY_OVERRIDDEN -9999 140#define DELAY_OVERRIDDEN -9999
137 141
138/* bit mask values for scsi_debug_opts */ 142/* bit mask values for scsi_debug_opts */
@@ -176,11 +180,12 @@ static const char *scsi_debug_version_date = "20140706";
176#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */ 180#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
177#define SDEBUG_UA_BUS_RESET 1 181#define SDEBUG_UA_BUS_RESET 1
178#define SDEBUG_UA_MODE_CHANGED 2 182#define SDEBUG_UA_MODE_CHANGED 2
179#define SDEBUG_NUM_UAS 3 183#define SDEBUG_UA_CAPACITY_CHANGED 3
184#define SDEBUG_NUM_UAS 4
180 185
181/* for check_readiness() */ 186/* for check_readiness() */
182#define UAS_ONLY 1 187#define UAS_ONLY 1 /* check for UAs only */
183#define UAS_TUR 0 188#define UAS_TUR 0 /* if no UAs then check if media access possible */
184 189
185/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this 190/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
186 * sector on read commands: */ 191 * sector on read commands: */
@@ -206,6 +211,301 @@ static const char *scsi_debug_version_date = "20140706";
206#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE" 211#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
207#endif 212#endif
208 213
214/* SCSI opcodes (first byte of cdb) mapped onto these indexes */
215enum sdeb_opcode_index {
216 SDEB_I_INVALID_OPCODE = 0,
217 SDEB_I_INQUIRY = 1,
218 SDEB_I_REPORT_LUNS = 2,
219 SDEB_I_REQUEST_SENSE = 3,
220 SDEB_I_TEST_UNIT_READY = 4,
221 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
222 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
223 SDEB_I_LOG_SENSE = 7,
224 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
225 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
226 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
227 SDEB_I_START_STOP = 11,
228 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
229 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
230 SDEB_I_MAINT_IN = 14,
231 SDEB_I_MAINT_OUT = 15,
232 SDEB_I_VERIFY = 16, /* 10 only */
233 SDEB_I_VARIABLE_LEN = 17,
234 SDEB_I_RESERVE = 18, /* 6, 10 */
235 SDEB_I_RELEASE = 19, /* 6, 10 */
236 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
237 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
238 SDEB_I_ATA_PT = 22, /* 12, 16 */
239 SDEB_I_SEND_DIAG = 23,
240 SDEB_I_UNMAP = 24,
241 SDEB_I_XDWRITEREAD = 25, /* 10 only */
242 SDEB_I_WRITE_BUFFER = 26,
243 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
244 SDEB_I_SYNC_CACHE = 28, /* 10 only */
245 SDEB_I_COMP_WRITE = 29,
246 SDEB_I_LAST_ELEMENT = 30, /* keep this last */
247};
248
249static const unsigned char opcode_ind_arr[256] = {
250/* 0x0; 0x0->0x1f: 6 byte cdbs */
251 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
252 0, 0, 0, 0,
253 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
254 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
255 SDEB_I_RELEASE,
256 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
257 SDEB_I_ALLOW_REMOVAL, 0,
258/* 0x20; 0x20->0x3f: 10 byte cdbs */
259 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
260 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
261 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
262 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
263/* 0x40; 0x40->0x5f: 10 byte cdbs */
264 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
265 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
266 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
267 SDEB_I_RELEASE,
268 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
269/* 0x60; 0x60->0x7d are reserved */
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 0, SDEB_I_VARIABLE_LEN,
273/* 0x80; 0x80->0x9f: 16 byte cdbs */
274 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
275 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
276 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
277 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
278/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
279 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
280 SDEB_I_MAINT_OUT, 0, 0, 0,
281 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
282 0, 0, 0, 0,
283 0, 0, 0, 0, 0, 0, 0, 0,
284 0, 0, 0, 0, 0, 0, 0, 0,
285/* 0xc0; 0xc0->0xff: vendor specific */
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
290};
291
292#define F_D_IN 1
293#define F_D_OUT 2
294#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
295#define F_D_UNKN 8
296#define F_RL_WLUN_OK 0x10
297#define F_SKIP_UA 0x20
298#define F_DELAY_OVERR 0x40
299#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
300#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
301#define F_INV_OP 0x200
302#define F_FAKE_RW 0x400
303#define F_M_ACCESS 0x800 /* media access */
304
305#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
306#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
307#define FF_SA (F_SA_HIGH | F_SA_LOW)
308
309struct sdebug_dev_info;
310static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
311static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
312static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
313static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
314static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
315static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
316static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
317static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
318static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
319static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
320static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
321static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
322static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
323static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
324static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
325static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
326static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
327static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
328static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
329static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
330
331struct opcode_info_t {
332 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
333 * for terminating element */
334 u8 opcode; /* if num_attached > 0, preferred */
335 u16 sa; /* service action */
336 u32 flags; /* OR-ed set of SDEB_F_* */
337 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
338 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
339 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
340 /* ignore cdb bytes after position 15 */
341};
342
343static const struct opcode_info_t msense_iarr[1] = {
344 {0, 0x1a, 0, F_D_IN, NULL, NULL,
345 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
346};
347
348static const struct opcode_info_t mselect_iarr[1] = {
349 {0, 0x15, 0, F_D_OUT, NULL, NULL,
350 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
351};
352
353static const struct opcode_info_t read_iarr[3] = {
354 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
355 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
356 0, 0, 0, 0} },
357 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
358 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
359 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
360 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
361 0xc7, 0, 0, 0, 0} },
362};
363
364static const struct opcode_info_t write_iarr[3] = {
365 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
366 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
367 0, 0, 0, 0} },
368 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
369 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
370 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
371 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
372 0xc7, 0, 0, 0, 0} },
373};
374
375static const struct opcode_info_t sa_in_iarr[1] = {
376 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
377 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
378 0xff, 0xff, 0xff, 0, 0xc7} },
379};
380
381static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
382 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
383 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
384 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
385};
386
387static const struct opcode_info_t maint_in_iarr[2] = {
388 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
389 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
390 0xc7, 0, 0, 0, 0} },
391 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
392 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
393 0, 0} },
394};
395
396static const struct opcode_info_t write_same_iarr[1] = {
397 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
398 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
399 0xff, 0xff, 0xff, 0x1f, 0xc7} },
400};
401
402static const struct opcode_info_t reserve_iarr[1] = {
403 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
404 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
405};
406
407static const struct opcode_info_t release_iarr[1] = {
408 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
409 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
410};
411
412
413/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
414 * plus the terminating elements for logic that scans this table such as
415 * REPORT SUPPORTED OPERATION CODES. */
416static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
417/* 0 */
418 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
419 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
421 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
422 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
423 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
424 0, 0} },
425 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
426 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
427 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
428 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
430 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
431 0} },
432 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
433 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
434 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
435 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
436 0, 0, 0} },
437 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
438 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
439 0, 0} },
440 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
441 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
442 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
443/* 10 */
444 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
445 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
446 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
447 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
448 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
449 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
450 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
451 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
452 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
453 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
454 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
455 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
456 0} },
457 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
458 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
459 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
460 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
461 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
462 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
463 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
464 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
465 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
466 0} },
467 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
468 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
469 0} },
470/* 20 */
471 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
472 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
473 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
474 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
475 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
476 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
477 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
478 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
479 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
480 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
481 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
482 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
483 0, 0, 0, 0, 0, 0} },
484 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
485 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
487 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
488 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
489 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
490 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
491 0, 0, 0, 0} },
492 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
493 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
494 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
495
496/* 30 */
497 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
498 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
499};
500
501struct sdebug_scmd_extra_t {
502 bool inj_recovered;
503 bool inj_transport;
504 bool inj_dif;
505 bool inj_dix;
506 bool inj_short;
507};
508
209static int scsi_debug_add_host = DEF_NUM_HOST; 509static int scsi_debug_add_host = DEF_NUM_HOST;
210static int scsi_debug_ato = DEF_ATO; 510static int scsi_debug_ato = DEF_ATO;
211static int scsi_debug_delay = DEF_DELAY; 511static int scsi_debug_delay = DEF_DELAY;
@@ -245,6 +545,8 @@ static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
245static bool scsi_debug_removable = DEF_REMOVABLE; 545static bool scsi_debug_removable = DEF_REMOVABLE;
246static bool scsi_debug_clustering; 546static bool scsi_debug_clustering;
247static bool scsi_debug_host_lock = DEF_HOST_LOCK; 547static bool scsi_debug_host_lock = DEF_HOST_LOCK;
548static bool scsi_debug_strict = DEF_STRICT;
549static bool sdebug_any_injecting_opt;
248 550
249static atomic_t sdebug_cmnd_count; 551static atomic_t sdebug_cmnd_count;
250static atomic_t sdebug_completions; 552static atomic_t sdebug_completions;
@@ -277,11 +579,10 @@ struct sdebug_dev_info {
277 unsigned int target; 579 unsigned int target;
278 u64 lun; 580 u64 lun;
279 struct sdebug_host_info *sdbg_host; 581 struct sdebug_host_info *sdbg_host;
280 u64 wlun;
281 unsigned long uas_bm[1]; 582 unsigned long uas_bm[1];
282 atomic_t num_in_q; 583 atomic_t num_in_q;
283 char stopped; 584 char stopped; /* TODO: should be atomic */
284 char used; 585 bool used;
285}; 586};
286 587
287struct sdebug_host_info { 588struct sdebug_host_info {
@@ -394,6 +695,50 @@ static void sdebug_max_tgts_luns(void)
394 spin_unlock(&sdebug_host_list_lock); 695 spin_unlock(&sdebug_host_list_lock);
395} 696}
396 697
698enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
699
700/* Set in_bit to -1 to indicate no bit position of invalid field */
701static void
702mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
703 int in_byte, int in_bit)
704{
705 unsigned char *sbuff;
706 u8 sks[4];
707 int sl, asc;
708
709 sbuff = scp->sense_buffer;
710 if (!sbuff) {
711 sdev_printk(KERN_ERR, scp->device,
712 "%s: sense_buffer is NULL\n", __func__);
713 return;
714 }
715 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
716 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
717 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
718 asc, 0);
719 memset(sks, 0, sizeof(sks));
720 sks[0] = 0x80;
721 if (c_d)
722 sks[0] |= 0x40;
723 if (in_bit >= 0) {
724 sks[0] |= 0x8;
725 sks[0] |= 0x7 & in_bit;
726 }
727 put_unaligned_be16(in_byte, sks + 1);
728 if (scsi_debug_dsense) {
729 sl = sbuff[7] + 8;
730 sbuff[7] = sl;
731 sbuff[sl] = 0x2;
732 sbuff[sl + 1] = 0x6;
733 memcpy(sbuff + sl + 4, sks, 3);
734 } else
735 memcpy(sbuff + 15, sks, 3);
736 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
737 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
738 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
739 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
740}
741
397static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) 742static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
398{ 743{
399 unsigned char *sbuff; 744 unsigned char *sbuff;
@@ -414,63 +759,10 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
414 my_name, key, asc, asq); 759 my_name, key, asc, asq);
415} 760}
416 761
417static void get_data_transfer_info(unsigned char *cmd, 762static void
418 unsigned long long *lba, unsigned int *num, 763mk_sense_invalid_opcode(struct scsi_cmnd *scp)
419 u32 *ei_lba)
420{ 764{
421 *ei_lba = 0; 765 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
422
423 switch (*cmd) {
424 case VARIABLE_LENGTH_CMD:
425 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
426 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
427 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
428 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
429
430 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
431 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
432
433 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
434 (u32)cmd[28] << 24;
435 break;
436
437 case WRITE_SAME_16:
438 case WRITE_16:
439 case READ_16:
440 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
441 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
442 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
443 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
444
445 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
446 (u32)cmd[10] << 24;
447 break;
448 case WRITE_12:
449 case READ_12:
450 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
451 (u32)cmd[2] << 24;
452
453 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
454 (u32)cmd[6] << 24;
455 break;
456 case WRITE_SAME:
457 case WRITE_10:
458 case READ_10:
459 case XDWRITEREAD_10:
460 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
461 (u32)cmd[2] << 24;
462
463 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
464 break;
465 case WRITE_6:
466 case READ_6:
467 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
468 (u32)(cmd[1] & 0x1f) << 16;
469 *num = (0 == cmd[4]) ? 256 : cmd[4];
470 break;
471 default:
472 break;
473 }
474} 766}
475 767
476static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 768static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
@@ -520,6 +812,11 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
520 if (debug) 812 if (debug)
521 cp = "mode parameters changed"; 813 cp = "mode parameters changed";
522 break; 814 break;
815 case SDEBUG_UA_CAPACITY_CHANGED:
816 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
817 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
818 if (debug)
819 cp = "capacity data changed";
523 default: 820 default:
524 pr_warn("%s: unexpected unit attention code=%d\n", 821 pr_warn("%s: unexpected unit attention code=%d\n",
525 __func__, k); 822 __func__, k);
@@ -924,19 +1221,20 @@ static int inquiry_evpd_b2(unsigned char *arr)
924#define SDEBUG_LONG_INQ_SZ 96 1221#define SDEBUG_LONG_INQ_SZ 96
925#define SDEBUG_MAX_INQ_ARR_SZ 584 1222#define SDEBUG_MAX_INQ_ARR_SZ 584
926 1223
927static int resp_inquiry(struct scsi_cmnd *scp, int target, 1224static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
928 struct sdebug_dev_info * devip)
929{ 1225{
930 unsigned char pq_pdt; 1226 unsigned char pq_pdt;
931 unsigned char * arr; 1227 unsigned char * arr;
932 unsigned char *cmd = scp->cmnd; 1228 unsigned char *cmd = scp->cmnd;
933 int alloc_len, n, ret; 1229 int alloc_len, n, ret;
1230 bool have_wlun;
934 1231
935 alloc_len = (cmd[3] << 8) + cmd[4]; 1232 alloc_len = (cmd[3] << 8) + cmd[4];
936 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); 1233 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
937 if (! arr) 1234 if (! arr)
938 return DID_REQUEUE << 16; 1235 return DID_REQUEUE << 16;
939 if (devip->wlun) 1236 have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1237 if (have_wlun)
940 pq_pdt = 0x1e; /* present, wlun */ 1238 pq_pdt = 0x1e; /* present, wlun */
941 else if (scsi_debug_no_lun_0 && (0 == devip->lun)) 1239 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
942 pq_pdt = 0x7f; /* not present, no device type */ 1240 pq_pdt = 0x7f; /* not present, no device type */
@@ -944,8 +1242,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
944 pq_pdt = (scsi_debug_ptype & 0x1f); 1242 pq_pdt = (scsi_debug_ptype & 0x1f);
945 arr[0] = pq_pdt; 1243 arr[0] = pq_pdt;
946 if (0x2 & cmd[1]) { /* CMDDT bit set */ 1244 if (0x2 & cmd[1]) { /* CMDDT bit set */
947 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1245 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
948 0);
949 kfree(arr); 1246 kfree(arr);
950 return check_condition_result; 1247 return check_condition_result;
951 } else if (0x1 & cmd[1]) { /* EVPD bit set */ 1248 } else if (0x1 & cmd[1]) { /* EVPD bit set */
@@ -957,7 +1254,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
957 (devip->channel & 0x7f); 1254 (devip->channel & 0x7f);
958 if (0 == scsi_debug_vpd_use_hostno) 1255 if (0 == scsi_debug_vpd_use_hostno)
959 host_no = 0; 1256 host_no = 0;
960 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) + 1257 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
961 (devip->target * 1000) + devip->lun); 1258 (devip->target * 1000) + devip->lun);
962 target_dev_id = ((host_no + 1) * 2000) + 1259 target_dev_id = ((host_no + 1) * 2000) +
963 (devip->target * 1000) - 3; 1260 (devip->target * 1000) - 3;
@@ -1029,9 +1326,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
1029 arr[1] = cmd[2]; /*sanity */ 1326 arr[1] = cmd[2]; /*sanity */
1030 arr[3] = inquiry_evpd_b2(&arr[4]); 1327 arr[3] = inquiry_evpd_b2(&arr[4]);
1031 } else { 1328 } else {
1032 /* Illegal request, invalid field in cdb */ 1329 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1033 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1034 INVALID_FIELD_IN_CDB, 0);
1035 kfree(arr); 1330 kfree(arr);
1036 return check_condition_result; 1331 return check_condition_result;
1037 } 1332 }
@@ -1077,18 +1372,20 @@ static int resp_requests(struct scsi_cmnd * scp,
1077 unsigned char * sbuff; 1372 unsigned char * sbuff;
1078 unsigned char *cmd = scp->cmnd; 1373 unsigned char *cmd = scp->cmnd;
1079 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; 1374 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1080 int want_dsense; 1375 bool dsense, want_dsense;
1081 int len = 18; 1376 int len = 18;
1082 1377
1083 memset(arr, 0, sizeof(arr)); 1378 memset(arr, 0, sizeof(arr));
1084 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense; 1379 dsense = !!(cmd[1] & 1);
1380 want_dsense = dsense || scsi_debug_dsense;
1085 sbuff = scp->sense_buffer; 1381 sbuff = scp->sense_buffer;
1086 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { 1382 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1087 if (want_dsense) { 1383 if (dsense) {
1088 arr[0] = 0x72; 1384 arr[0] = 0x72;
1089 arr[1] = 0x0; /* NO_SENSE in sense_key */ 1385 arr[1] = 0x0; /* NO_SENSE in sense_key */
1090 arr[2] = THRESHOLD_EXCEEDED; 1386 arr[2] = THRESHOLD_EXCEEDED;
1091 arr[3] = 0xff; /* TEST set and MRIE==6 */ 1387 arr[3] = 0xff; /* TEST set and MRIE==6 */
1388 len = 8;
1092 } else { 1389 } else {
1093 arr[0] = 0x70; 1390 arr[0] = 0x70;
1094 arr[2] = 0x0; /* NO_SENSE in sense_key */ 1391 arr[2] = 0x0; /* NO_SENSE in sense_key */
@@ -1098,15 +1395,34 @@ static int resp_requests(struct scsi_cmnd * scp,
1098 } 1395 }
1099 } else { 1396 } else {
1100 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); 1397 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1101 if ((cmd[1] & 1) && (! scsi_debug_dsense)) { 1398 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1102 /* DESC bit set and sense_buff in fixed format */ 1399 ; /* have sense and formats match */
1103 memset(arr, 0, sizeof(arr)); 1400 else if (arr[0] <= 0x70) {
1401 if (dsense) {
1402 memset(arr, 0, 8);
1403 arr[0] = 0x72;
1404 len = 8;
1405 } else {
1406 memset(arr, 0, 18);
1407 arr[0] = 0x70;
1408 arr[7] = 0xa;
1409 }
1410 } else if (dsense) {
1411 memset(arr, 0, 8);
1104 arr[0] = 0x72; 1412 arr[0] = 0x72;
1105 arr[1] = sbuff[2]; /* sense key */ 1413 arr[1] = sbuff[2]; /* sense key */
1106 arr[2] = sbuff[12]; /* asc */ 1414 arr[2] = sbuff[12]; /* asc */
1107 arr[3] = sbuff[13]; /* ascq */ 1415 arr[3] = sbuff[13]; /* ascq */
1108 len = 8; 1416 len = 8;
1417 } else {
1418 memset(arr, 0, 18);
1419 arr[0] = 0x70;
1420 arr[2] = sbuff[1];
1421 arr[7] = 0xa;
1422 arr[12] = sbuff[1];
1423 arr[13] = sbuff[3];
1109 } 1424 }
1425
1110 } 1426 }
1111 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0); 1427 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1112 return fill_from_dev_buffer(scp, arr, len); 1428 return fill_from_dev_buffer(scp, arr, len);
@@ -1116,15 +1432,11 @@ static int resp_start_stop(struct scsi_cmnd * scp,
1116 struct sdebug_dev_info * devip) 1432 struct sdebug_dev_info * devip)
1117{ 1433{
1118 unsigned char *cmd = scp->cmnd; 1434 unsigned char *cmd = scp->cmnd;
1119 int power_cond, errsts, start; 1435 int power_cond, start;
1120 1436
1121 errsts = check_readiness(scp, UAS_ONLY, devip);
1122 if (errsts)
1123 return errsts;
1124 power_cond = (cmd[4] & 0xf0) >> 4; 1437 power_cond = (cmd[4] & 0xf0) >> 4;
1125 if (power_cond) { 1438 if (power_cond) {
1126 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1439 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1127 0);
1128 return check_condition_result; 1440 return check_condition_result;
1129 } 1441 }
1130 start = cmd[4] & 1; 1442 start = cmd[4] & 1;
@@ -1148,11 +1460,7 @@ static int resp_readcap(struct scsi_cmnd * scp,
1148{ 1460{
1149 unsigned char arr[SDEBUG_READCAP_ARR_SZ]; 1461 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1150 unsigned int capac; 1462 unsigned int capac;
1151 int errsts;
1152 1463
1153 errsts = check_readiness(scp, UAS_ONLY, devip);
1154 if (errsts)
1155 return errsts;
1156 /* following just in case virtual_gb changed */ 1464 /* following just in case virtual_gb changed */
1157 sdebug_capacity = get_sdebug_capacity(); 1465 sdebug_capacity = get_sdebug_capacity();
1158 memset(arr, 0, SDEBUG_READCAP_ARR_SZ); 1466 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
@@ -1180,11 +1488,8 @@ static int resp_readcap16(struct scsi_cmnd * scp,
1180 unsigned char *cmd = scp->cmnd; 1488 unsigned char *cmd = scp->cmnd;
1181 unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; 1489 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1182 unsigned long long capac; 1490 unsigned long long capac;
1183 int errsts, k, alloc_len; 1491 int k, alloc_len;
1184 1492
1185 errsts = check_readiness(scp, UAS_ONLY, devip);
1186 if (errsts)
1187 return errsts;
1188 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) 1493 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1189 + cmd[13]); 1494 + cmd[13]);
1190 /* following just in case virtual_gb changed */ 1495 /* following just in case virtual_gb changed */
@@ -1300,6 +1605,184 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1300 return ret; 1605 return ret;
1301} 1606}
1302 1607
1608static int
1609resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1610{
1611 bool rctd;
1612 u8 reporting_opts, req_opcode, sdeb_i, supp;
1613 u16 req_sa, u;
1614 u32 alloc_len, a_len;
1615 int k, offset, len, errsts, count, bump, na;
1616 const struct opcode_info_t *oip;
1617 const struct opcode_info_t *r_oip;
1618 u8 *arr;
1619 u8 *cmd = scp->cmnd;
1620
1621 rctd = !!(cmd[2] & 0x80);
1622 reporting_opts = cmd[2] & 0x7;
1623 req_opcode = cmd[3];
1624 req_sa = get_unaligned_be16(cmd + 4);
1625 alloc_len = get_unaligned_be32(cmd + 6);
1626 if (alloc_len < 4 && alloc_len > 0xffff) {
1627 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1628 return check_condition_result;
1629 }
1630 if (alloc_len > 8192)
1631 a_len = 8192;
1632 else
1633 a_len = alloc_len;
1634 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
1635 if (NULL == arr) {
1636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1637 INSUFF_RES_ASCQ);
1638 return check_condition_result;
1639 }
1640 switch (reporting_opts) {
1641 case 0: /* all commands */
1642 /* count number of commands */
1643 for (count = 0, oip = opcode_info_arr;
1644 oip->num_attached != 0xff; ++oip) {
1645 if (F_INV_OP & oip->flags)
1646 continue;
1647 count += (oip->num_attached + 1);
1648 }
1649 bump = rctd ? 20 : 8;
1650 put_unaligned_be32(count * bump, arr);
1651 for (offset = 4, oip = opcode_info_arr;
1652 oip->num_attached != 0xff && offset < a_len; ++oip) {
1653 if (F_INV_OP & oip->flags)
1654 continue;
1655 na = oip->num_attached;
1656 arr[offset] = oip->opcode;
1657 put_unaligned_be16(oip->sa, arr + offset + 2);
1658 if (rctd)
1659 arr[offset + 5] |= 0x2;
1660 if (FF_SA & oip->flags)
1661 arr[offset + 5] |= 0x1;
1662 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1663 if (rctd)
1664 put_unaligned_be16(0xa, arr + offset + 8);
1665 r_oip = oip;
1666 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1667 if (F_INV_OP & oip->flags)
1668 continue;
1669 offset += bump;
1670 arr[offset] = oip->opcode;
1671 put_unaligned_be16(oip->sa, arr + offset + 2);
1672 if (rctd)
1673 arr[offset + 5] |= 0x2;
1674 if (FF_SA & oip->flags)
1675 arr[offset + 5] |= 0x1;
1676 put_unaligned_be16(oip->len_mask[0],
1677 arr + offset + 6);
1678 if (rctd)
1679 put_unaligned_be16(0xa,
1680 arr + offset + 8);
1681 }
1682 oip = r_oip;
1683 offset += bump;
1684 }
1685 break;
1686 case 1: /* one command: opcode only */
1687 case 2: /* one command: opcode plus service action */
1688 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1689 sdeb_i = opcode_ind_arr[req_opcode];
1690 oip = &opcode_info_arr[sdeb_i];
1691 if (F_INV_OP & oip->flags) {
1692 supp = 1;
1693 offset = 4;
1694 } else {
1695 if (1 == reporting_opts) {
1696 if (FF_SA & oip->flags) {
1697 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1698 2, 2);
1699 kfree(arr);
1700 return check_condition_result;
1701 }
1702 req_sa = 0;
1703 } else if (2 == reporting_opts &&
1704 0 == (FF_SA & oip->flags)) {
1705 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1706 kfree(arr); /* point at requested sa */
1707 return check_condition_result;
1708 }
1709 if (0 == (FF_SA & oip->flags) &&
1710 req_opcode == oip->opcode)
1711 supp = 3;
1712 else if (0 == (FF_SA & oip->flags)) {
1713 na = oip->num_attached;
1714 for (k = 0, oip = oip->arrp; k < na;
1715 ++k, ++oip) {
1716 if (req_opcode == oip->opcode)
1717 break;
1718 }
1719 supp = (k >= na) ? 1 : 3;
1720 } else if (req_sa != oip->sa) {
1721 na = oip->num_attached;
1722 for (k = 0, oip = oip->arrp; k < na;
1723 ++k, ++oip) {
1724 if (req_sa == oip->sa)
1725 break;
1726 }
1727 supp = (k >= na) ? 1 : 3;
1728 } else
1729 supp = 3;
1730 if (3 == supp) {
1731 u = oip->len_mask[0];
1732 put_unaligned_be16(u, arr + 2);
1733 arr[4] = oip->opcode;
1734 for (k = 1; k < u; ++k)
1735 arr[4 + k] = (k < 16) ?
1736 oip->len_mask[k] : 0xff;
1737 offset = 4 + u;
1738 } else
1739 offset = 4;
1740 }
1741 arr[1] = (rctd ? 0x80 : 0) | supp;
1742 if (rctd) {
1743 put_unaligned_be16(0xa, arr + offset);
1744 offset += 12;
1745 }
1746 break;
1747 default:
1748 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1749 kfree(arr);
1750 return check_condition_result;
1751 }
1752 offset = (offset < a_len) ? offset : a_len;
1753 len = (offset < alloc_len) ? offset : alloc_len;
1754 errsts = fill_from_dev_buffer(scp, arr, len);
1755 kfree(arr);
1756 return errsts;
1757}
1758
1759static int
1760resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1761{
1762 bool repd;
1763 u32 alloc_len, len;
1764 u8 arr[16];
1765 u8 *cmd = scp->cmnd;
1766
1767 memset(arr, 0, sizeof(arr));
1768 repd = !!(cmd[2] & 0x80);
1769 alloc_len = get_unaligned_be32(cmd + 6);
1770 if (alloc_len < 4) {
1771 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1772 return check_condition_result;
1773 }
1774 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1775 arr[1] = 0x1; /* ITNRS */
1776 if (repd) {
1777 arr[3] = 0xc;
1778 len = 16;
1779 } else
1780 len = 4;
1781
1782 len = (len < alloc_len) ? len : alloc_len;
1783 return fill_from_dev_buffer(scp, arr, len);
1784}
1785
1303/* <<Following mode page info copied from ST318451LW>> */ 1786/* <<Following mode page info copied from ST318451LW>> */
1304 1787
1305static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target) 1788static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
@@ -1459,20 +1942,18 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1459 1942
1460#define SDEBUG_MAX_MSENSE_SZ 256 1943#define SDEBUG_MAX_MSENSE_SZ 256
1461 1944
1462static int resp_mode_sense(struct scsi_cmnd * scp, int target, 1945static int
1463 struct sdebug_dev_info * devip) 1946resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1464{ 1947{
1465 unsigned char dbd, llbaa; 1948 unsigned char dbd, llbaa;
1466 int pcontrol, pcode, subpcode, bd_len; 1949 int pcontrol, pcode, subpcode, bd_len;
1467 unsigned char dev_spec; 1950 unsigned char dev_spec;
1468 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id; 1951 int k, alloc_len, msense_6, offset, len, target_dev_id;
1952 int target = scp->device->id;
1469 unsigned char * ap; 1953 unsigned char * ap;
1470 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1954 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1471 unsigned char *cmd = scp->cmnd; 1955 unsigned char *cmd = scp->cmnd;
1472 1956
1473 errsts = check_readiness(scp, UAS_ONLY, devip);
1474 if (errsts)
1475 return errsts;
1476 dbd = !!(cmd[1] & 0x8); 1957 dbd = !!(cmd[1] & 0x8);
1477 pcontrol = (cmd[2] & 0xc0) >> 6; 1958 pcontrol = (cmd[2] & 0xc0) >> 6;
1478 pcode = cmd[2] & 0x3f; 1959 pcode = cmd[2] & 0x3f;
@@ -1542,8 +2023,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1542 2023
1543 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { 2024 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1544 /* TODO: Control Extension page */ 2025 /* TODO: Control Extension page */
1545 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 2026 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1546 0);
1547 return check_condition_result; 2027 return check_condition_result;
1548 } 2028 }
1549 switch (pcode) { 2029 switch (pcode) {
@@ -1569,8 +2049,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1569 break; 2049 break;
1570 case 0x19: /* if spc==1 then sas phy, control+discover */ 2050 case 0x19: /* if spc==1 then sas phy, control+discover */
1571 if ((subpcode > 0x2) && (subpcode < 0xff)) { 2051 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1572 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2052 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1573 INVALID_FIELD_IN_CDB, 0);
1574 return check_condition_result; 2053 return check_condition_result;
1575 } 2054 }
1576 len = 0; 2055 len = 0;
@@ -1602,15 +2081,13 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1602 } 2081 }
1603 len += resp_iec_m_pg(ap + len, pcontrol, target); 2082 len += resp_iec_m_pg(ap + len, pcontrol, target);
1604 } else { 2083 } else {
1605 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2084 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1606 INVALID_FIELD_IN_CDB, 0);
1607 return check_condition_result; 2085 return check_condition_result;
1608 } 2086 }
1609 offset += len; 2087 offset += len;
1610 break; 2088 break;
1611 default: 2089 default:
1612 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 2090 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1613 0);
1614 return check_condition_result; 2091 return check_condition_result;
1615 } 2092 }
1616 if (msense_6) 2093 if (msense_6)
@@ -1624,24 +2101,21 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1624 2101
1625#define SDEBUG_MAX_MSELECT_SZ 512 2102#define SDEBUG_MAX_MSELECT_SZ 512
1626 2103
1627static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, 2104static int
1628 struct sdebug_dev_info * devip) 2105resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1629{ 2106{
1630 int pf, sp, ps, md_len, bd_len, off, spf, pg_len; 2107 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1631 int param_len, res, errsts, mpage; 2108 int param_len, res, mpage;
1632 unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; 2109 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1633 unsigned char *cmd = scp->cmnd; 2110 unsigned char *cmd = scp->cmnd;
2111 int mselect6 = (MODE_SELECT == cmd[0]);
1634 2112
1635 errsts = check_readiness(scp, UAS_ONLY, devip);
1636 if (errsts)
1637 return errsts;
1638 memset(arr, 0, sizeof(arr)); 2113 memset(arr, 0, sizeof(arr));
1639 pf = cmd[1] & 0x10; 2114 pf = cmd[1] & 0x10;
1640 sp = cmd[1] & 0x1; 2115 sp = cmd[1] & 0x1;
1641 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); 2116 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1642 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { 2117 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1643 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2118 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
1644 INVALID_FIELD_IN_CDB, 0);
1645 return check_condition_result; 2119 return check_condition_result;
1646 } 2120 }
1647 res = fetch_to_dev_buffer(scp, arr, param_len); 2121 res = fetch_to_dev_buffer(scp, arr, param_len);
@@ -1655,16 +2129,14 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1655 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); 2129 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1656 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); 2130 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1657 if (md_len > 2) { 2131 if (md_len > 2) {
1658 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2132 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
1659 INVALID_FIELD_IN_PARAM_LIST, 0);
1660 return check_condition_result; 2133 return check_condition_result;
1661 } 2134 }
1662 off = bd_len + (mselect6 ? 4 : 8); 2135 off = bd_len + (mselect6 ? 4 : 8);
1663 mpage = arr[off] & 0x3f; 2136 mpage = arr[off] & 0x3f;
1664 ps = !!(arr[off] & 0x80); 2137 ps = !!(arr[off] & 0x80);
1665 if (ps) { 2138 if (ps) {
1666 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2139 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
1667 INVALID_FIELD_IN_PARAM_LIST, 0);
1668 return check_condition_result; 2140 return check_condition_result;
1669 } 2141 }
1670 spf = !!(arr[off] & 0x40); 2142 spf = !!(arr[off] & 0x40);
@@ -1701,8 +2173,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1701 default: 2173 default:
1702 break; 2174 break;
1703 } 2175 }
1704 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2176 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
1705 INVALID_FIELD_IN_PARAM_LIST, 0);
1706 return check_condition_result; 2177 return check_condition_result;
1707set_mode_changed_ua: 2178set_mode_changed_ua:
1708 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); 2179 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
@@ -1737,19 +2208,15 @@ static int resp_ie_l_pg(unsigned char * arr)
1737static int resp_log_sense(struct scsi_cmnd * scp, 2208static int resp_log_sense(struct scsi_cmnd * scp,
1738 struct sdebug_dev_info * devip) 2209 struct sdebug_dev_info * devip)
1739{ 2210{
1740 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n; 2211 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
1741 unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; 2212 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1742 unsigned char *cmd = scp->cmnd; 2213 unsigned char *cmd = scp->cmnd;
1743 2214
1744 errsts = check_readiness(scp, UAS_ONLY, devip);
1745 if (errsts)
1746 return errsts;
1747 memset(arr, 0, sizeof(arr)); 2215 memset(arr, 0, sizeof(arr));
1748 ppc = cmd[1] & 0x2; 2216 ppc = cmd[1] & 0x2;
1749 sp = cmd[1] & 0x1; 2217 sp = cmd[1] & 0x1;
1750 if (ppc || sp) { 2218 if (ppc || sp) {
1751 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2219 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
1752 INVALID_FIELD_IN_CDB, 0);
1753 return check_condition_result; 2220 return check_condition_result;
1754 } 2221 }
1755 pcontrol = (cmd[2] & 0xc0) >> 6; 2222 pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -1773,8 +2240,7 @@ static int resp_log_sense(struct scsi_cmnd * scp,
1773 arr[3] = resp_ie_l_pg(arr + 4); 2240 arr[3] = resp_ie_l_pg(arr + 4);
1774 break; 2241 break;
1775 default: 2242 default:
1776 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2243 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1777 INVALID_FIELD_IN_CDB, 0);
1778 return check_condition_result; 2244 return check_condition_result;
1779 } 2245 }
1780 } else if (0xff == subpcode) { 2246 } else if (0xff == subpcode) {
@@ -1806,13 +2272,11 @@ static int resp_log_sense(struct scsi_cmnd * scp,
1806 arr[3] = n - 4; 2272 arr[3] = n - 4;
1807 break; 2273 break;
1808 default: 2274 default:
1809 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2275 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1810 INVALID_FIELD_IN_CDB, 0);
1811 return check_condition_result; 2276 return check_condition_result;
1812 } 2277 }
1813 } else { 2278 } else {
1814 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2279 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1815 INVALID_FIELD_IN_CDB, 0);
1816 return check_condition_result; 2280 return check_condition_result;
1817 } 2281 }
1818 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); 2282 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
@@ -1824,11 +2288,12 @@ static int check_device_access_params(struct scsi_cmnd *scp,
1824 unsigned long long lba, unsigned int num) 2288 unsigned long long lba, unsigned int num)
1825{ 2289{
1826 if (lba + num > sdebug_capacity) { 2290 if (lba + num > sdebug_capacity) {
1827 mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0); 2291 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
1828 return check_condition_result; 2292 return check_condition_result;
1829 } 2293 }
1830 /* transfer length excessive (tie in to block limits VPD page) */ 2294 /* transfer length excessive (tie in to block limits VPD page) */
1831 if (num > sdebug_store_sectors) { 2295 if (num > sdebug_store_sectors) {
2296 /* needs work to find which cdb byte 'num' comes from */
1832 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 2297 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1833 return check_condition_result; 2298 return check_condition_result;
1834 } 2299 }
@@ -1836,17 +2301,17 @@ static int check_device_access_params(struct scsi_cmnd *scp,
1836} 2301}
1837 2302
1838/* Returns number of bytes copied or -1 if error. */ 2303/* Returns number of bytes copied or -1 if error. */
1839static int do_device_access(struct scsi_cmnd *scmd, 2304static int
1840 unsigned long long lba, unsigned int num, int write) 2305do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
1841{ 2306{
1842 int ret; 2307 int ret;
1843 unsigned long long block, rest = 0; 2308 u64 block, rest = 0;
1844 struct scsi_data_buffer *sdb; 2309 struct scsi_data_buffer *sdb;
1845 enum dma_data_direction dir; 2310 enum dma_data_direction dir;
1846 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, 2311 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1847 off_t); 2312 off_t);
1848 2313
1849 if (write) { 2314 if (do_write) {
1850 sdb = scsi_out(scmd); 2315 sdb = scsi_out(scmd);
1851 dir = DMA_TO_DEVICE; 2316 dir = DMA_TO_DEVICE;
1852 func = sg_pcopy_to_buffer; 2317 func = sg_pcopy_to_buffer;
@@ -1880,6 +2345,38 @@ static int do_device_access(struct scsi_cmnd *scmd,
1880 return ret; 2345 return ret;
1881} 2346}
1882 2347
2348/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2349 * arr into fake_store(lba,num) and return true. If comparison fails then
2350 * return false. */
2351static bool
2352comp_write_worker(u64 lba, u32 num, const u8 *arr)
2353{
2354 bool res;
2355 u64 block, rest = 0;
2356 u32 store_blks = sdebug_store_sectors;
2357 u32 lb_size = scsi_debug_sector_size;
2358
2359 block = do_div(lba, store_blks);
2360 if (block + num > store_blks)
2361 rest = block + num - store_blks;
2362
2363 res = !memcmp(fake_storep + (block * lb_size), arr,
2364 (num - rest) * lb_size);
2365 if (!res)
2366 return res;
2367 if (rest)
2368 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2369 rest * lb_size);
2370 if (!res)
2371 return res;
2372 arr += num * lb_size;
2373 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2374 if (rest)
2375 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2376 rest * lb_size);
2377 return res;
2378}
2379
1883static __be16 dif_compute_csum(const void *buf, int len) 2380static __be16 dif_compute_csum(const void *buf, int len)
1884{ 2381{
1885 __be16 csum; 2382 __be16 csum;
@@ -1992,55 +2489,143 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1992 return 0; 2489 return 0;
1993} 2490}
1994 2491
1995static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, 2492static int
1996 unsigned int num, u32 ei_lba) 2493resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1997{ 2494{
2495 u8 *cmd = scp->cmnd;
2496 u64 lba;
2497 u32 num;
2498 u32 ei_lba;
1998 unsigned long iflags; 2499 unsigned long iflags;
1999 int ret; 2500 int ret;
2501 bool check_prot;
2000 2502
2001 ret = check_device_access_params(SCpnt, lba, num); 2503 switch (cmd[0]) {
2002 if (ret) 2504 case READ_16:
2003 return ret; 2505 ei_lba = 0;
2506 lba = get_unaligned_be64(cmd + 2);
2507 num = get_unaligned_be32(cmd + 10);
2508 check_prot = true;
2509 break;
2510 case READ_10:
2511 ei_lba = 0;
2512 lba = get_unaligned_be32(cmd + 2);
2513 num = get_unaligned_be16(cmd + 7);
2514 check_prot = true;
2515 break;
2516 case READ_6:
2517 ei_lba = 0;
2518 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2519 (u32)(cmd[1] & 0x1f) << 16;
2520 num = (0 == cmd[4]) ? 256 : cmd[4];
2521 check_prot = true;
2522 break;
2523 case READ_12:
2524 ei_lba = 0;
2525 lba = get_unaligned_be32(cmd + 2);
2526 num = get_unaligned_be32(cmd + 6);
2527 check_prot = true;
2528 break;
2529 case XDWRITEREAD_10:
2530 ei_lba = 0;
2531 lba = get_unaligned_be32(cmd + 2);
2532 num = get_unaligned_be16(cmd + 7);
2533 check_prot = false;
2534 break;
2535 default: /* assume READ(32) */
2536 lba = get_unaligned_be64(cmd + 12);
2537 ei_lba = get_unaligned_be32(cmd + 20);
2538 num = get_unaligned_be32(cmd + 28);
2539 check_prot = false;
2540 break;
2541 }
2542 if (check_prot) {
2543 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2544 (cmd[1] & 0xe0)) {
2545 mk_sense_invalid_opcode(scp);
2546 return check_condition_result;
2547 }
2548 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2549 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2550 (cmd[1] & 0xe0) == 0)
2551 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2552 "to DIF device\n");
2553 }
2554 if (sdebug_any_injecting_opt) {
2555 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2556
2557 if (ep->inj_short)
2558 num /= 2;
2559 }
2560
2561 /* inline check_device_access_params() */
2562 if (lba + num > sdebug_capacity) {
2563 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2564 return check_condition_result;
2565 }
2566 /* transfer length excessive (tie in to block limits VPD page) */
2567 if (num > sdebug_store_sectors) {
2568 /* needs work to find which cdb byte 'num' comes from */
2569 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2570 return check_condition_result;
2571 }
2004 2572
2005 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && 2573 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2006 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && 2574 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2007 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { 2575 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2008 /* claim unrecoverable read error */ 2576 /* claim unrecoverable read error */
2009 mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); 2577 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2010 /* set info field and valid bit for fixed descriptor */ 2578 /* set info field and valid bit for fixed descriptor */
2011 if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) { 2579 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2012 SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */ 2580 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2013 ret = (lba < OPT_MEDIUM_ERR_ADDR) 2581 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2014 ? OPT_MEDIUM_ERR_ADDR : (int)lba; 2582 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2015 SCpnt->sense_buffer[3] = (ret >> 24) & 0xff; 2583 put_unaligned_be32(ret, scp->sense_buffer + 3);
2016 SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
2017 SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
2018 SCpnt->sense_buffer[6] = ret & 0xff;
2019 } 2584 }
2020 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); 2585 scsi_set_resid(scp, scsi_bufflen(scp));
2021 return check_condition_result; 2586 return check_condition_result;
2022 } 2587 }
2023 2588
2024 read_lock_irqsave(&atomic_rw, iflags); 2589 read_lock_irqsave(&atomic_rw, iflags);
2025 2590
2026 /* DIX + T10 DIF */ 2591 /* DIX + T10 DIF */
2027 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 2592 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2028 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba); 2593 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2029 2594
2030 if (prot_ret) { 2595 if (prot_ret) {
2031 read_unlock_irqrestore(&atomic_rw, iflags); 2596 read_unlock_irqrestore(&atomic_rw, iflags);
2032 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret); 2597 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2033 return illegal_condition_result; 2598 return illegal_condition_result;
2034 } 2599 }
2035 } 2600 }
2036 2601
2037 ret = do_device_access(SCpnt, lba, num, 0); 2602 ret = do_device_access(scp, lba, num, false);
2038 read_unlock_irqrestore(&atomic_rw, iflags); 2603 read_unlock_irqrestore(&atomic_rw, iflags);
2039 if (ret == -1) 2604 if (ret == -1)
2040 return DID_ERROR << 16; 2605 return DID_ERROR << 16;
2041 2606
2042 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret; 2607 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2043 2608
2609 if (sdebug_any_injecting_opt) {
2610 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2611
2612 if (ep->inj_recovered) {
2613 mk_sense_buffer(scp, RECOVERED_ERROR,
2614 THRESHOLD_EXCEEDED, 0);
2615 return check_condition_result;
2616 } else if (ep->inj_transport) {
2617 mk_sense_buffer(scp, ABORTED_COMMAND,
2618 TRANSPORT_PROBLEM, ACK_NAK_TO);
2619 return check_condition_result;
2620 } else if (ep->inj_dif) {
2621 /* Logical block guard check failed */
2622 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2623 return illegal_condition_result;
2624 } else if (ep->inj_dix) {
2625 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2626 return illegal_condition_result;
2627 }
2628 }
2044 return 0; 2629 return 0;
2045} 2630}
2046 2631
@@ -2223,31 +2808,95 @@ static void unmap_region(sector_t lba, unsigned int len)
2223 } 2808 }
2224} 2809}
2225 2810
2226static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, 2811static int
2227 unsigned int num, u32 ei_lba) 2812resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2228{ 2813{
2814 u8 *cmd = scp->cmnd;
2815 u64 lba;
2816 u32 num;
2817 u32 ei_lba;
2229 unsigned long iflags; 2818 unsigned long iflags;
2230 int ret; 2819 int ret;
2820 bool check_prot;
2231 2821
2232 ret = check_device_access_params(SCpnt, lba, num); 2822 switch (cmd[0]) {
2233 if (ret) 2823 case WRITE_16:
2234 return ret; 2824 ei_lba = 0;
2825 lba = get_unaligned_be64(cmd + 2);
2826 num = get_unaligned_be32(cmd + 10);
2827 check_prot = true;
2828 break;
2829 case WRITE_10:
2830 ei_lba = 0;
2831 lba = get_unaligned_be32(cmd + 2);
2832 num = get_unaligned_be16(cmd + 7);
2833 check_prot = true;
2834 break;
2835 case WRITE_6:
2836 ei_lba = 0;
2837 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2838 (u32)(cmd[1] & 0x1f) << 16;
2839 num = (0 == cmd[4]) ? 256 : cmd[4];
2840 check_prot = true;
2841 break;
2842 case WRITE_12:
2843 ei_lba = 0;
2844 lba = get_unaligned_be32(cmd + 2);
2845 num = get_unaligned_be32(cmd + 6);
2846 check_prot = true;
2847 break;
2848 case 0x53: /* XDWRITEREAD(10) */
2849 ei_lba = 0;
2850 lba = get_unaligned_be32(cmd + 2);
2851 num = get_unaligned_be16(cmd + 7);
2852 check_prot = false;
2853 break;
2854 default: /* assume WRITE(32) */
2855 lba = get_unaligned_be64(cmd + 12);
2856 ei_lba = get_unaligned_be32(cmd + 20);
2857 num = get_unaligned_be32(cmd + 28);
2858 check_prot = false;
2859 break;
2860 }
2861 if (check_prot) {
2862 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2863 (cmd[1] & 0xe0)) {
2864 mk_sense_invalid_opcode(scp);
2865 return check_condition_result;
2866 }
2867 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2868 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2869 (cmd[1] & 0xe0) == 0)
2870 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2871 "to DIF device\n");
2872 }
2873
2874 /* inline check_device_access_params() */
2875 if (lba + num > sdebug_capacity) {
2876 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2877 return check_condition_result;
2878 }
2879 /* transfer length excessive (tie in to block limits VPD page) */
2880 if (num > sdebug_store_sectors) {
2881 /* needs work to find which cdb byte 'num' comes from */
2882 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2883 return check_condition_result;
2884 }
2235 2885
2236 write_lock_irqsave(&atomic_rw, iflags); 2886 write_lock_irqsave(&atomic_rw, iflags);
2237 2887
2238 /* DIX + T10 DIF */ 2888 /* DIX + T10 DIF */
2239 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 2889 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2240 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba); 2890 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2241 2891
2242 if (prot_ret) { 2892 if (prot_ret) {
2243 write_unlock_irqrestore(&atomic_rw, iflags); 2893 write_unlock_irqrestore(&atomic_rw, iflags);
2244 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 2894 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2245 prot_ret);
2246 return illegal_condition_result; 2895 return illegal_condition_result;
2247 } 2896 }
2248 } 2897 }
2249 2898
2250 ret = do_device_access(SCpnt, lba, num, 1); 2899 ret = do_device_access(scp, lba, num, true);
2251 if (scsi_debug_lbp()) 2900 if (scsi_debug_lbp())
2252 map_region(lba, num); 2901 map_region(lba, num);
2253 write_unlock_irqrestore(&atomic_rw, iflags); 2902 write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2255,30 +2904,41 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2255 return (DID_ERROR << 16); 2904 return (DID_ERROR << 16);
2256 else if ((ret < (num * scsi_debug_sector_size)) && 2905 else if ((ret < (num * scsi_debug_sector_size)) &&
2257 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2906 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2258 sdev_printk(KERN_INFO, SCpnt->device, 2907 sdev_printk(KERN_INFO, scp->device,
2259 "%s: write: cdb indicated=%u, IO sent=%d bytes\n", 2908 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2260 my_name, num * scsi_debug_sector_size, ret); 2909 my_name, num * scsi_debug_sector_size, ret);
2261 2910
2911 if (sdebug_any_injecting_opt) {
2912 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2913
2914 if (ep->inj_recovered) {
2915 mk_sense_buffer(scp, RECOVERED_ERROR,
2916 THRESHOLD_EXCEEDED, 0);
2917 return check_condition_result;
2918 } else if (ep->inj_dif) {
2919 /* Logical block guard check failed */
2920 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2921 return illegal_condition_result;
2922 } else if (ep->inj_dix) {
2923 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2924 return illegal_condition_result;
2925 }
2926 }
2262 return 0; 2927 return 0;
2263} 2928}
2264 2929
2265static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, 2930static int
2266 unsigned int num, u32 ei_lba, unsigned int unmap) 2931resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2932 bool unmap, bool ndob)
2267{ 2933{
2268 unsigned long iflags; 2934 unsigned long iflags;
2269 unsigned long long i; 2935 unsigned long long i;
2270 int ret; 2936 int ret;
2271 2937
2272 ret = check_device_access_params(scmd, lba, num); 2938 ret = check_device_access_params(scp, lba, num);
2273 if (ret) 2939 if (ret)
2274 return ret; 2940 return ret;
2275 2941
2276 if (num > scsi_debug_write_same_length) {
2277 mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2278 0);
2279 return check_condition_result;
2280 }
2281
2282 write_lock_irqsave(&atomic_rw, iflags); 2942 write_lock_irqsave(&atomic_rw, iflags);
2283 2943
2284 if (unmap && scsi_debug_lbp()) { 2944 if (unmap && scsi_debug_lbp()) {
@@ -2286,17 +2946,22 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2286 goto out; 2946 goto out;
2287 } 2947 }
2288 2948
2289 /* Else fetch one logical block */ 2949 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2290 ret = fetch_to_dev_buffer(scmd, 2950 if (ndob) {
2291 fake_storep + (lba * scsi_debug_sector_size), 2951 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2292 scsi_debug_sector_size); 2952 scsi_debug_sector_size);
2953 ret = 0;
2954 } else
2955 ret = fetch_to_dev_buffer(scp, fake_storep +
2956 (lba * scsi_debug_sector_size),
2957 scsi_debug_sector_size);
2293 2958
2294 if (-1 == ret) { 2959 if (-1 == ret) {
2295 write_unlock_irqrestore(&atomic_rw, iflags); 2960 write_unlock_irqrestore(&atomic_rw, iflags);
2296 return (DID_ERROR << 16); 2961 return (DID_ERROR << 16);
2297 } else if ((ret < (num * scsi_debug_sector_size)) && 2962 } else if ((ret < (num * scsi_debug_sector_size)) &&
2298 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2963 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2299 sdev_printk(KERN_INFO, scmd->device, 2964 sdev_printk(KERN_INFO, scp->device,
2300 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", 2965 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2301 my_name, "write same", 2966 my_name, "write same",
2302 num * scsi_debug_sector_size, ret); 2967 num * scsi_debug_sector_size, ret);
@@ -2315,13 +2980,143 @@ out:
2315 return 0; 2980 return 0;
2316} 2981}
2317 2982
2983static int
2984resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2985{
2986 u8 *cmd = scp->cmnd;
2987 u32 lba;
2988 u16 num;
2989 u32 ei_lba = 0;
2990 bool unmap = false;
2991
2992 if (cmd[1] & 0x8) {
2993 if (scsi_debug_lbpws10 == 0) {
2994 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2995 return check_condition_result;
2996 } else
2997 unmap = true;
2998 }
2999 lba = get_unaligned_be32(cmd + 2);
3000 num = get_unaligned_be16(cmd + 7);
3001 if (num > scsi_debug_write_same_length) {
3002 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3003 return check_condition_result;
3004 }
3005 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3006}
3007
3008static int
3009resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3010{
3011 u8 *cmd = scp->cmnd;
3012 u64 lba;
3013 u32 num;
3014 u32 ei_lba = 0;
3015 bool unmap = false;
3016 bool ndob = false;
3017
3018 if (cmd[1] & 0x8) { /* UNMAP */
3019 if (scsi_debug_lbpws == 0) {
3020 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3021 return check_condition_result;
3022 } else
3023 unmap = true;
3024 }
3025 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3026 ndob = true;
3027 lba = get_unaligned_be64(cmd + 2);
3028 num = get_unaligned_be32(cmd + 10);
3029 if (num > scsi_debug_write_same_length) {
3030 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3031 return check_condition_result;
3032 }
3033 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3034}
3035
3036static int
3037resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3038{
3039 u8 *cmd = scp->cmnd;
3040 u8 *arr;
3041 u8 *fake_storep_hold;
3042 u64 lba;
3043 u32 dnum;
3044 u32 lb_size = scsi_debug_sector_size;
3045 u8 num;
3046 unsigned long iflags;
3047 int ret;
3048
3049 lba = get_unaligned_be32(cmd + 2);
3050 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3051 if (0 == num)
3052 return 0; /* degenerate case, not an error */
3053 dnum = 2 * num;
3054 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3055 if (NULL == arr) {
3056 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3057 INSUFF_RES_ASCQ);
3058 return check_condition_result;
3059 }
3060 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3061 (cmd[1] & 0xe0)) {
3062 mk_sense_invalid_opcode(scp);
3063 return check_condition_result;
3064 }
3065 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3066 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3067 (cmd[1] & 0xe0) == 0)
3068 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3069 "to DIF device\n");
3070
3071 /* inline check_device_access_params() */
3072 if (lba + num > sdebug_capacity) {
3073 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3074 return check_condition_result;
3075 }
3076 /* transfer length excessive (tie in to block limits VPD page) */
3077 if (num > sdebug_store_sectors) {
3078 /* needs work to find which cdb byte 'num' comes from */
3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3080 return check_condition_result;
3081 }
3082
3083 write_lock_irqsave(&atomic_rw, iflags);
3084
3085 /* trick do_device_access() to fetch both compare and write buffers
3086 * from data-in into arr. Safe (atomic) since write_lock held. */
3087 fake_storep_hold = fake_storep;
3088 fake_storep = arr;
3089 ret = do_device_access(scp, 0, dnum, true);
3090 fake_storep = fake_storep_hold;
3091 if (ret == -1) {
3092 write_unlock_irqrestore(&atomic_rw, iflags);
3093 kfree(arr);
3094 return DID_ERROR << 16;
3095 } else if ((ret < (dnum * lb_size)) &&
3096 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3097 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3098 "indicated=%u, IO sent=%d bytes\n", my_name,
3099 dnum * lb_size, ret);
3100 if (!comp_write_worker(lba, num, arr)) {
3101 write_unlock_irqrestore(&atomic_rw, iflags);
3102 kfree(arr);
3103 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3104 return check_condition_result;
3105 }
3106 if (scsi_debug_lbp())
3107 map_region(lba, num);
3108 write_unlock_irqrestore(&atomic_rw, iflags);
3109 return 0;
3110}
3111
2318struct unmap_block_desc { 3112struct unmap_block_desc {
2319 __be64 lba; 3113 __be64 lba;
2320 __be32 blocks; 3114 __be32 blocks;
2321 __be32 __reserved; 3115 __be32 __reserved;
2322}; 3116};
2323 3117
2324static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) 3118static int
3119resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2325{ 3120{
2326 unsigned char *buf; 3121 unsigned char *buf;
2327 struct unmap_block_desc *desc; 3122 struct unmap_block_desc *desc;
@@ -2329,20 +3124,26 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2329 int ret; 3124 int ret;
2330 unsigned long iflags; 3125 unsigned long iflags;
2331 3126
2332 ret = check_readiness(scmd, UAS_ONLY, devip);
2333 if (ret)
2334 return ret;
2335 3127
2336 payload_len = get_unaligned_be16(&scmd->cmnd[7]); 3128 if (!scsi_debug_lbp())
2337 BUG_ON(scsi_bufflen(scmd) != payload_len); 3129 return 0; /* fib and say its done */
3130 payload_len = get_unaligned_be16(scp->cmnd + 7);
3131 BUG_ON(scsi_bufflen(scp) != payload_len);
2338 3132
2339 descriptors = (payload_len - 8) / 16; 3133 descriptors = (payload_len - 8) / 16;
3134 if (descriptors > scsi_debug_unmap_max_desc) {
3135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3136 return check_condition_result;
3137 }
2340 3138
2341 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC); 3139 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2342 if (!buf) 3140 if (!buf) {
3141 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3142 INSUFF_RES_ASCQ);
2343 return check_condition_result; 3143 return check_condition_result;
3144 }
2344 3145
2345 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 3146 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2346 3147
2347 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); 3148 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2348 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); 3149 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
@@ -2355,7 +3156,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2355 unsigned long long lba = get_unaligned_be64(&desc[i].lba); 3156 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2356 unsigned int num = get_unaligned_be32(&desc[i].blocks); 3157 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2357 3158
2358 ret = check_device_access_params(scmd, lba, num); 3159 ret = check_device_access_params(scp, lba, num);
2359 if (ret) 3160 if (ret)
2360 goto out; 3161 goto out;
2361 3162
@@ -2373,37 +3174,44 @@ out:
2373 3174
2374#define SDEBUG_GET_LBA_STATUS_LEN 32 3175#define SDEBUG_GET_LBA_STATUS_LEN 32
2375 3176
2376static int resp_get_lba_status(struct scsi_cmnd * scmd, 3177static int
2377 struct sdebug_dev_info * devip) 3178resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2378{ 3179{
2379 unsigned long long lba; 3180 u8 *cmd = scp->cmnd;
2380 unsigned int alloc_len, mapped, num; 3181 u64 lba;
2381 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; 3182 u32 alloc_len, mapped, num;
3183 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
2382 int ret; 3184 int ret;
2383 3185
2384 ret = check_readiness(scmd, UAS_ONLY, devip); 3186 lba = get_unaligned_be64(cmd + 2);
2385 if (ret) 3187 alloc_len = get_unaligned_be32(cmd + 10);
2386 return ret;
2387
2388 lba = get_unaligned_be64(&scmd->cmnd[2]);
2389 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2390 3188
2391 if (alloc_len < 24) 3189 if (alloc_len < 24)
2392 return 0; 3190 return 0;
2393 3191
2394 ret = check_device_access_params(scmd, lba, 1); 3192 ret = check_device_access_params(scp, lba, 1);
2395 if (ret) 3193 if (ret)
2396 return ret; 3194 return ret;
2397 3195
2398 mapped = map_state(lba, &num); 3196 if (scsi_debug_lbp())
3197 mapped = map_state(lba, &num);
3198 else {
3199 mapped = 1;
3200 /* following just in case virtual_gb changed */
3201 sdebug_capacity = get_sdebug_capacity();
3202 if (sdebug_capacity - lba <= 0xffffffff)
3203 num = sdebug_capacity - lba;
3204 else
3205 num = 0xffffffff;
3206 }
2399 3207
2400 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); 3208 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2401 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */ 3209 put_unaligned_be32(20, arr); /* Parameter Data Length */
2402 put_unaligned_be64(lba, &arr[8]); /* LBA */ 3210 put_unaligned_be64(lba, arr + 8); /* LBA */
2403 put_unaligned_be32(num, &arr[16]); /* Number of blocks */ 3211 put_unaligned_be32(num, arr + 16); /* Number of blocks */
2404 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */ 3212 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
2405 3213
2406 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN); 3214 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
2407} 3215}
2408 3216
2409#define SDEBUG_RLUN_ARR_SZ 256 3217#define SDEBUG_RLUN_ARR_SZ 256
@@ -2412,8 +3220,8 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2412 struct sdebug_dev_info * devip) 3220 struct sdebug_dev_info * devip)
2413{ 3221{
2414 unsigned int alloc_len; 3222 unsigned int alloc_len;
2415 int lun_cnt, i, upper, num, n; 3223 int lun_cnt, i, upper, num, n, want_wlun, shortish;
2416 u64 wlun, lun; 3224 u64 lun;
2417 unsigned char *cmd = scp->cmnd; 3225 unsigned char *cmd = scp->cmnd;
2418 int select_report = (int)cmd[2]; 3226 int select_report = (int)cmd[2];
2419 struct scsi_lun *one_lun; 3227 struct scsi_lun *one_lun;
@@ -2421,9 +3229,9 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2421 unsigned char * max_addr; 3229 unsigned char * max_addr;
2422 3230
2423 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); 3231 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2424 if ((alloc_len < 4) || (select_report > 2)) { 3232 shortish = (alloc_len < 4);
2425 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 3233 if (shortish || (select_report > 2)) {
2426 0); 3234 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
2427 return check_condition_result; 3235 return check_condition_result;
2428 } 3236 }
2429 /* can produce response with up to 16k luns (lun 0 to lun 16383) */ 3237 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
@@ -2433,14 +3241,14 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2433 lun_cnt = 0; 3241 lun_cnt = 0;
2434 else if (scsi_debug_no_lun_0 && (lun_cnt > 0)) 3242 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2435 --lun_cnt; 3243 --lun_cnt;
2436 wlun = (select_report > 0) ? 1 : 0; 3244 want_wlun = (select_report > 0) ? 1 : 0;
2437 num = lun_cnt + wlun; 3245 num = lun_cnt + want_wlun;
2438 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff; 3246 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2439 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff; 3247 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2440 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / 3248 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2441 sizeof(struct scsi_lun)), num); 3249 sizeof(struct scsi_lun)), num);
2442 if (n < num) { 3250 if (n < num) {
2443 wlun = 0; 3251 want_wlun = 0;
2444 lun_cnt = n; 3252 lun_cnt = n;
2445 } 3253 }
2446 one_lun = (struct scsi_lun *) &arr[8]; 3254 one_lun = (struct scsi_lun *) &arr[8];
@@ -2454,7 +3262,7 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2454 (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); 3262 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2455 one_lun[i].scsi_lun[1] = lun & 0xff; 3263 one_lun[i].scsi_lun[1] = lun & 0xff;
2456 } 3264 }
2457 if (wlun) { 3265 if (want_wlun) {
2458 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff; 3266 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2459 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff; 3267 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2460 i++; 3268 i++;
@@ -2476,8 +3284,8 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2476 /* better not to use temporary buffer. */ 3284 /* better not to use temporary buffer. */
2477 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); 3285 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2478 if (!buf) { 3286 if (!buf) {
2479 mk_sense_buffer(scp, NOT_READY, 3287 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2480 LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 3288 INSUFF_RES_ASCQ);
2481 return check_condition_result; 3289 return check_condition_result;
2482 } 3290 }
2483 3291
@@ -2500,6 +3308,32 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2500 return 0; 3308 return 0;
2501} 3309}
2502 3310
3311static int
3312resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3313{
3314 u8 *cmd = scp->cmnd;
3315 u64 lba;
3316 u32 num;
3317 int errsts;
3318
3319 if (!scsi_bidi_cmnd(scp)) {
3320 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3321 INSUFF_RES_ASCQ);
3322 return check_condition_result;
3323 }
3324 errsts = resp_read_dt0(scp, devip);
3325 if (errsts)
3326 return errsts;
3327 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3328 errsts = resp_write_dt0(scp, devip);
3329 if (errsts)
3330 return errsts;
3331 }
3332 lba = get_unaligned_be32(cmd + 2);
3333 num = get_unaligned_be16(cmd + 7);
3334 return resp_xdwriteread(scp, lba, num, devip);
3335}
3336
2503/* When timer or tasklet goes off this function is called. */ 3337/* When timer or tasklet goes off this function is called. */
2504static void sdebug_q_cmd_complete(unsigned long indx) 3338static void sdebug_q_cmd_complete(unsigned long indx)
2505{ 3339{
@@ -2672,10 +3506,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2672 open_devip->sdbg_host = sdbg_host; 3506 open_devip->sdbg_host = sdbg_host;
2673 atomic_set(&open_devip->num_in_q, 0); 3507 atomic_set(&open_devip->num_in_q, 0);
2674 set_bit(SDEBUG_UA_POR, open_devip->uas_bm); 3508 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
2675 open_devip->used = 1; 3509 open_devip->used = true;
2676 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2677 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2678
2679 return open_devip; 3510 return open_devip;
2680} 3511}
2681 3512
@@ -2717,7 +3548,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2717 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3548 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2718 if (devip) { 3549 if (devip) {
2719 /* make this slot available for re-use */ 3550 /* make this slot available for re-use */
2720 devip->used = 0; 3551 devip->used = false;
2721 sdp->hostdata = NULL; 3552 sdp->hostdata = NULL;
2722 } 3553 }
2723} 3554}
@@ -3162,6 +3993,7 @@ module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3162module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR); 3993module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3163module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); 3994module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3164module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); 3995module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3996module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3165module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); 3997module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3166module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); 3998module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3167module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); 3999module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
@@ -3181,7 +4013,7 @@ MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3181MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 4013MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3182MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); 4014MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3183MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny"); 4015MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3184MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); 4016MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
3185MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 4017MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3186MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); 4018MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3187MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); 4019MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
@@ -3208,11 +4040,12 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3208MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); 4040MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3209MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])"); 4041MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3210MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 4042MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4043MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
3211MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 4044MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3212MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); 4045MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3213MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); 4046MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3214MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); 4047MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3215MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 4048MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
3216MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 4049MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3217MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); 4050MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3218 4051
@@ -3378,6 +4211,16 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3378 return -EINVAL; 4211 return -EINVAL;
3379opts_done: 4212opts_done:
3380 scsi_debug_opts = opts; 4213 scsi_debug_opts = opts;
4214 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4215 sdebug_any_injecting_opt = true;
4216 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4217 sdebug_any_injecting_opt = true;
4218 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4219 sdebug_any_injecting_opt = true;
4220 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4221 sdebug_any_injecting_opt = true;
4222 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4223 sdebug_any_injecting_opt = true;
3381 atomic_set(&sdebug_cmnd_count, 0); 4224 atomic_set(&sdebug_cmnd_count, 0);
3382 atomic_set(&sdebug_a_tsf, 0); 4225 atomic_set(&sdebug_a_tsf, 0);
3383 return count; 4226 return count;
@@ -3585,12 +4428,25 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3585 size_t count) 4428 size_t count)
3586{ 4429{
3587 int n; 4430 int n;
4431 bool changed;
3588 4432
3589 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4433 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4434 changed = (scsi_debug_virtual_gb != n);
3590 scsi_debug_virtual_gb = n; 4435 scsi_debug_virtual_gb = n;
3591
3592 sdebug_capacity = get_sdebug_capacity(); 4436 sdebug_capacity = get_sdebug_capacity();
3593 4437 if (changed) {
4438 struct sdebug_host_info *sdhp;
4439 struct sdebug_dev_info *dp;
4440
4441 list_for_each_entry(sdhp, &sdebug_host_list,
4442 host_list) {
4443 list_for_each_entry(dp, &sdhp->dev_info_list,
4444 dev_list) {
4445 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4446 dp->uas_bm);
4447 }
4448 }
4449 }
3594 return count; 4450 return count;
3595 } 4451 }
3596 return -EINVAL; 4452 return -EINVAL;
@@ -3736,6 +4592,23 @@ static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
3736} 4592}
3737static DRIVER_ATTR_RW(host_lock); 4593static DRIVER_ATTR_RW(host_lock);
3738 4594
4595static ssize_t strict_show(struct device_driver *ddp, char *buf)
4596{
4597 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4598}
4599static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4600 size_t count)
4601{
4602 int n;
4603
4604 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4605 scsi_debug_strict = (n > 0);
4606 return count;
4607 }
4608 return -EINVAL;
4609}
4610static DRIVER_ATTR_RW(strict);
4611
3739 4612
3740/* Note: The following array creates attribute files in the 4613/* Note: The following array creates attribute files in the
3741 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 4614 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -3771,6 +4644,7 @@ static struct attribute *sdebug_drv_attrs[] = {
3771 &driver_attr_removable.attr, 4644 &driver_attr_removable.attr,
3772 &driver_attr_host_lock.attr, 4645 &driver_attr_host_lock.attr,
3773 &driver_attr_ndelay.attr, 4646 &driver_attr_ndelay.attr,
4647 &driver_attr_strict.attr,
3774 NULL, 4648 NULL,
3775}; 4649};
3776ATTRIBUTE_GROUPS(sdebug_drv); 4650ATTRIBUTE_GROUPS(sdebug_drv);
@@ -4083,396 +4957,9 @@ static void sdebug_remove_adapter(void)
4083} 4957}
4084 4958
4085static int 4959static int
4086scsi_debug_queuecommand(struct scsi_cmnd *SCpnt) 4960sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4087{
4088 unsigned char *cmd = SCpnt->cmnd;
4089 int len, k;
4090 unsigned int num;
4091 unsigned long long lba;
4092 u32 ei_lba;
4093 int errsts = 0;
4094 int target = SCpnt->device->id;
4095 struct sdebug_dev_info *devip = NULL;
4096 int inj_recovered = 0;
4097 int inj_transport = 0;
4098 int inj_dif = 0;
4099 int inj_dix = 0;
4100 int inj_short = 0;
4101 int delay_override = 0;
4102 int unmap = 0;
4103
4104 scsi_set_resid(SCpnt, 0);
4105 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4106 !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
4107 char b[120];
4108 int n;
4109
4110 len = SCpnt->cmd_len;
4111 if (len > 32)
4112 strcpy(b, "too long, over 32 bytes");
4113 else {
4114 for (k = 0, n = 0; k < len; ++k)
4115 n += scnprintf(b + n, sizeof(b) - n, "%02x ",
4116 (unsigned int)cmd[k]);
4117 }
4118 sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
4119 b);
4120 }
4121
4122 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
4123 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
4124 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4125 devip = devInfoReg(SCpnt->device);
4126 if (NULL == devip)
4127 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4128
4129 if ((scsi_debug_every_nth != 0) &&
4130 (atomic_inc_return(&sdebug_cmnd_count) >=
4131 abs(scsi_debug_every_nth))) {
4132 atomic_set(&sdebug_cmnd_count, 0);
4133 if (scsi_debug_every_nth < -1)
4134 scsi_debug_every_nth = -1;
4135 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4136 return 0; /* ignore command causing timeout */
4137 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4138 scsi_medium_access_command(SCpnt))
4139 return 0; /* time out reads and writes */
4140 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
4141 inj_recovered = 1; /* to reads and writes below */
4142 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
4143 inj_transport = 1; /* to reads and writes below */
4144 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
4145 inj_dif = 1; /* to reads and writes below */
4146 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
4147 inj_dix = 1; /* to reads and writes below */
4148 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
4149 inj_short = 1;
4150 }
4151
4152 if (devip->wlun) {
4153 switch (*cmd) {
4154 case INQUIRY:
4155 case REQUEST_SENSE:
4156 case TEST_UNIT_READY:
4157 case REPORT_LUNS:
4158 break; /* only allowable wlun commands */
4159 default:
4160 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4161 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
4162 "not supported for wlun\n", *cmd);
4163 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4164 INVALID_OPCODE, 0);
4165 errsts = check_condition_result;
4166 return schedule_resp(SCpnt, devip, errsts, 0);
4167 }
4168 }
4169
4170 switch (*cmd) {
4171 case INQUIRY: /* mandatory, ignore unit attention */
4172 delay_override = 1;
4173 errsts = resp_inquiry(SCpnt, target, devip);
4174 break;
4175 case REQUEST_SENSE: /* mandatory, ignore unit attention */
4176 delay_override = 1;
4177 errsts = resp_requests(SCpnt, devip);
4178 break;
4179 case REZERO_UNIT: /* actually this is REWIND for SSC */
4180 case START_STOP:
4181 errsts = resp_start_stop(SCpnt, devip);
4182 break;
4183 case ALLOW_MEDIUM_REMOVAL:
4184 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4185 if (errsts)
4186 break;
4187 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4188 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
4189 cmd[4] ? "inhibited" : "enabled");
4190 break;
4191 case SEND_DIAGNOSTIC: /* mandatory */
4192 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4193 break;
4194 case TEST_UNIT_READY: /* mandatory */
4195 /* delay_override = 1; */
4196 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4197 break;
4198 case RESERVE:
4199 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4200 break;
4201 case RESERVE_10:
4202 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4203 break;
4204 case RELEASE:
4205 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4206 break;
4207 case RELEASE_10:
4208 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4209 break;
4210 case READ_CAPACITY:
4211 errsts = resp_readcap(SCpnt, devip);
4212 break;
4213 case SERVICE_ACTION_IN_16:
4214 if (cmd[1] == SAI_READ_CAPACITY_16)
4215 errsts = resp_readcap16(SCpnt, devip);
4216 else if (cmd[1] == SAI_GET_LBA_STATUS) {
4217
4218 if (scsi_debug_lbp() == 0) {
4219 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4220 INVALID_COMMAND_OPCODE, 0);
4221 errsts = check_condition_result;
4222 } else
4223 errsts = resp_get_lba_status(SCpnt, devip);
4224 } else {
4225 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4226 INVALID_OPCODE, 0);
4227 errsts = check_condition_result;
4228 }
4229 break;
4230 case MAINTENANCE_IN:
4231 if (MI_REPORT_TARGET_PGS != cmd[1]) {
4232 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4233 INVALID_OPCODE, 0);
4234 errsts = check_condition_result;
4235 break;
4236 }
4237 errsts = resp_report_tgtpgs(SCpnt, devip);
4238 break;
4239 case READ_16:
4240 case READ_12:
4241 case READ_10:
4242 /* READ{10,12,16} and DIF Type 2 are natural enemies */
4243 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4244 cmd[1] & 0xe0) {
4245 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4246 INVALID_COMMAND_OPCODE, 0);
4247 errsts = check_condition_result;
4248 break;
4249 }
4250
4251 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4252 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4253 (cmd[1] & 0xe0) == 0)
4254 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4255
4256 /* fall through */
4257 case READ_6:
4258read:
4259 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4260 if (errsts)
4261 break;
4262 if (scsi_debug_fake_rw)
4263 break;
4264 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4265
4266 if (inj_short)
4267 num /= 2;
4268
4269 errsts = resp_read(SCpnt, lba, num, ei_lba);
4270 if (inj_recovered && (0 == errsts)) {
4271 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4272 THRESHOLD_EXCEEDED, 0);
4273 errsts = check_condition_result;
4274 } else if (inj_transport && (0 == errsts)) {
4275 mk_sense_buffer(SCpnt, ABORTED_COMMAND,
4276 TRANSPORT_PROBLEM, ACK_NAK_TO);
4277 errsts = check_condition_result;
4278 } else if (inj_dif && (0 == errsts)) {
4279 /* Logical block guard check failed */
4280 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4281 errsts = illegal_condition_result;
4282 } else if (inj_dix && (0 == errsts)) {
4283 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4284 errsts = illegal_condition_result;
4285 }
4286 break;
4287 case REPORT_LUNS: /* mandatory, ignore unit attention */
4288 delay_override = 1;
4289 errsts = resp_report_luns(SCpnt, devip);
4290 break;
4291 case VERIFY: /* 10 byte SBC-2 command */
4292 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4293 break;
4294 case WRITE_16:
4295 case WRITE_12:
4296 case WRITE_10:
4297 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
4298 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4299 cmd[1] & 0xe0) {
4300 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4301 INVALID_COMMAND_OPCODE, 0);
4302 errsts = check_condition_result;
4303 break;
4304 }
4305
4306 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4307 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4308 (cmd[1] & 0xe0) == 0)
4309 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4310
4311 /* fall through */
4312 case WRITE_6:
4313write:
4314 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4315 if (errsts)
4316 break;
4317 if (scsi_debug_fake_rw)
4318 break;
4319 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4320 errsts = resp_write(SCpnt, lba, num, ei_lba);
4321 if (inj_recovered && (0 == errsts)) {
4322 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4323 THRESHOLD_EXCEEDED, 0);
4324 errsts = check_condition_result;
4325 } else if (inj_dif && (0 == errsts)) {
4326 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4327 errsts = illegal_condition_result;
4328 } else if (inj_dix && (0 == errsts)) {
4329 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4330 errsts = illegal_condition_result;
4331 }
4332 break;
4333 case WRITE_SAME_16:
4334 case WRITE_SAME:
4335 if (cmd[1] & 0x8) {
4336 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
4337 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
4338 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4339 INVALID_FIELD_IN_CDB, 0);
4340 errsts = check_condition_result;
4341 } else
4342 unmap = 1;
4343 }
4344 if (errsts)
4345 break;
4346 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4347 if (errsts)
4348 break;
4349 if (scsi_debug_fake_rw)
4350 break;
4351 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4352 errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
4353 break;
4354 case UNMAP:
4355 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4356 if (errsts)
4357 break;
4358 if (scsi_debug_fake_rw)
4359 break;
4360
4361 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
4362 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4363 INVALID_COMMAND_OPCODE, 0);
4364 errsts = check_condition_result;
4365 } else
4366 errsts = resp_unmap(SCpnt, devip);
4367 break;
4368 case MODE_SENSE:
4369 case MODE_SENSE_10:
4370 errsts = resp_mode_sense(SCpnt, target, devip);
4371 break;
4372 case MODE_SELECT:
4373 errsts = resp_mode_select(SCpnt, 1, devip);
4374 break;
4375 case MODE_SELECT_10:
4376 errsts = resp_mode_select(SCpnt, 0, devip);
4377 break;
4378 case LOG_SENSE:
4379 errsts = resp_log_sense(SCpnt, devip);
4380 break;
4381 case SYNCHRONIZE_CACHE:
4382 delay_override = 1;
4383 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4384 break;
4385 case WRITE_BUFFER:
4386 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4387 break;
4388 case XDWRITEREAD_10:
4389 if (!scsi_bidi_cmnd(SCpnt)) {
4390 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4391 INVALID_FIELD_IN_CDB, 0);
4392 errsts = check_condition_result;
4393 break;
4394 }
4395
4396 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4397 if (errsts)
4398 break;
4399 if (scsi_debug_fake_rw)
4400 break;
4401 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4402 errsts = resp_read(SCpnt, lba, num, ei_lba);
4403 if (errsts)
4404 break;
4405 errsts = resp_write(SCpnt, lba, num, ei_lba);
4406 if (errsts)
4407 break;
4408 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
4409 break;
4410 case VARIABLE_LENGTH_CMD:
4411 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
4412
4413 if ((cmd[10] & 0xe0) == 0)
4414 printk(KERN_ERR
4415 "Unprotected RD/WR to DIF device\n");
4416
4417 if (cmd[9] == READ_32) {
4418 BUG_ON(SCpnt->cmd_len < 32);
4419 goto read;
4420 }
4421
4422 if (cmd[9] == WRITE_32) {
4423 BUG_ON(SCpnt->cmd_len < 32);
4424 goto write;
4425 }
4426 }
4427
4428 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4429 INVALID_FIELD_IN_CDB, 0);
4430 errsts = check_condition_result;
4431 break;
4432 case 0x85:
4433 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4434 sdev_printk(KERN_INFO, SCpnt->device,
4435 "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
4436 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4437 INVALID_OPCODE, 0);
4438 errsts = check_condition_result;
4439 break;
4440 default:
4441 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4442 sdev_printk(KERN_INFO, SCpnt->device,
4443 "%s: Opcode: 0x%x not supported\n",
4444 my_name, *cmd);
4445 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4446 if (errsts)
4447 break; /* Unit attention takes precedence */
4448 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
4449 errsts = check_condition_result;
4450 break;
4451 }
4452 return schedule_resp(SCpnt, devip, errsts,
4453 (delay_override ? 0 : scsi_debug_delay));
4454}
4455
4456static int
4457sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4458{
4459 if (scsi_debug_host_lock) {
4460 unsigned long iflags;
4461 int rc;
4462
4463 spin_lock_irqsave(shost->host_lock, iflags);
4464 rc = scsi_debug_queuecommand(cmd);
4465 spin_unlock_irqrestore(shost->host_lock, iflags);
4466 return rc;
4467 } else
4468 return scsi_debug_queuecommand(cmd);
4469}
4470
4471static int
4472sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4473{ 4961{
4474 int num_in_q = 0; 4962 int num_in_q = 0;
4475 int bad = 0;
4476 unsigned long iflags; 4963 unsigned long iflags;
4477 struct sdebug_dev_info *devip; 4964 struct sdebug_dev_info *devip;
4478 4965
@@ -4484,43 +4971,18 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4484 } 4971 }
4485 num_in_q = atomic_read(&devip->num_in_q); 4972 num_in_q = atomic_read(&devip->num_in_q);
4486 spin_unlock_irqrestore(&queued_arr_lock, iflags); 4973 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4487 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { 4974
4488 if (qdepth < 1) 4975 if (qdepth < 1)
4489 qdepth = 1; 4976 qdepth = 1;
4490 /* allow to exceed max host queued_arr elements for testing */ 4977 /* allow to exceed max host queued_arr elements for testing */
4491 if (qdepth > SCSI_DEBUG_CANQUEUE + 10) 4978 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4492 qdepth = SCSI_DEBUG_CANQUEUE + 10; 4979 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4493 scsi_adjust_queue_depth(sdev, qdepth); 4980 scsi_change_queue_depth(sdev, qdepth);
4494 } else if (reason == SCSI_QDEPTH_QFULL) 4981
4495 scsi_track_queue_full(sdev, qdepth);
4496 else
4497 bad = 1;
4498 if (bad)
4499 sdev_printk(KERN_WARNING, sdev,
4500 "%s: unknown reason=0x%x\n", __func__, reason);
4501 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { 4982 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4502 if (SCSI_QDEPTH_QFULL == reason) 4983 sdev_printk(KERN_INFO, sdev,
4503 sdev_printk(KERN_INFO, sdev, 4984 "%s: qdepth=%d, num_in_q=%d\n",
4504 "%s: -> %d, num_in_q=%d, reason: queue full\n", 4985 __func__, qdepth, num_in_q);
4505 __func__, qdepth, num_in_q);
4506 else {
4507 const char *cp;
4508
4509 switch (reason) {
4510 case SCSI_QDEPTH_DEFAULT:
4511 cp = "default (sysfs ?)";
4512 break;
4513 case SCSI_QDEPTH_RAMP_UP:
4514 cp = "ramp up";
4515 break;
4516 default:
4517 cp = "unknown";
4518 break;
4519 }
4520 sdev_printk(KERN_INFO, sdev,
4521 "%s: qdepth=%d, num_in_q=%d, reason: %s\n",
4522 __func__, qdepth, num_in_q, cp);
4523 }
4524 } 4986 }
4525 return sdev->queue_depth; 4987 return sdev->queue_depth;
4526} 4988}
@@ -4551,6 +5013,193 @@ sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4551 return qtype; 5013 return qtype;
4552} 5014}
4553 5015
5016static int
5017check_inject(struct scsi_cmnd *scp)
5018{
5019 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5020
5021 memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5022
5023 if (atomic_inc_return(&sdebug_cmnd_count) >=
5024 abs(scsi_debug_every_nth)) {
5025 atomic_set(&sdebug_cmnd_count, 0);
5026 if (scsi_debug_every_nth < -1)
5027 scsi_debug_every_nth = -1;
5028 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5029 return 1; /* ignore command causing timeout */
5030 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5031 scsi_medium_access_command(scp))
5032 return 1; /* time out reads and writes */
5033 if (sdebug_any_injecting_opt) {
5034 int opts = scsi_debug_opts;
5035
5036 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5037 ep->inj_recovered = true;
5038 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5039 ep->inj_transport = true;
5040 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5041 ep->inj_dif = true;
5042 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5043 ep->inj_dix = true;
5044 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5045 ep->inj_short = true;
5046 }
5047 }
5048 return 0;
5049}
5050
5051static int
5052scsi_debug_queuecommand(struct scsi_cmnd *scp)
5053{
5054 u8 sdeb_i;
5055 struct scsi_device *sdp = scp->device;
5056 const struct opcode_info_t *oip;
5057 const struct opcode_info_t *r_oip;
5058 struct sdebug_dev_info *devip;
5059 u8 *cmd = scp->cmnd;
5060 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5061 int k, na;
5062 int errsts = 0;
5063 int errsts_no_connect = DID_NO_CONNECT << 16;
5064 u32 flags;
5065 u16 sa;
5066 u8 opcode = cmd[0];
5067 bool has_wlun_rl;
5068 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5069
5070 scsi_set_resid(scp, 0);
5071 if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5072 char b[120];
5073 int n, len, sb;
5074
5075 len = scp->cmd_len;
5076 sb = (int)sizeof(b);
5077 if (len > 32)
5078 strcpy(b, "too long, over 32 bytes");
5079 else {
5080 for (k = 0, n = 0; k < len && n < sb; ++k)
5081 n += scnprintf(b + n, sb - n, "%02x ",
5082 (u32)cmd[k]);
5083 }
5084 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5085 }
5086 has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5087 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5088 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5089
5090 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5091 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5092 devip = (struct sdebug_dev_info *)sdp->hostdata;
5093 if (!devip) {
5094 devip = devInfoReg(sdp);
5095 if (NULL == devip)
5096 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5097 }
5098 na = oip->num_attached;
5099 r_pfp = oip->pfp;
5100 if (na) { /* multiple commands with this opcode */
5101 r_oip = oip;
5102 if (FF_SA & r_oip->flags) {
5103 if (F_SA_LOW & oip->flags)
5104 sa = 0x1f & cmd[1];
5105 else
5106 sa = get_unaligned_be16(cmd + 8);
5107 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5108 if (opcode == oip->opcode && sa == oip->sa)
5109 break;
5110 }
5111 } else { /* since no service action only check opcode */
5112 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5113 if (opcode == oip->opcode)
5114 break;
5115 }
5116 }
5117 if (k > na) {
5118 if (F_SA_LOW & r_oip->flags)
5119 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5120 else if (F_SA_HIGH & r_oip->flags)
5121 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5122 else
5123 mk_sense_invalid_opcode(scp);
5124 goto check_cond;
5125 }
5126 } /* else (when na==0) we assume the oip is a match */
5127 flags = oip->flags;
5128 if (F_INV_OP & flags) {
5129 mk_sense_invalid_opcode(scp);
5130 goto check_cond;
5131 }
5132 if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5133 if (debug)
5134 sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5135 "0x%x not supported for wlun\n", opcode);
5136 mk_sense_invalid_opcode(scp);
5137 goto check_cond;
5138 }
5139 if (scsi_debug_strict) { /* check cdb against mask */
5140 u8 rem;
5141 int j;
5142
5143 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5144 rem = ~oip->len_mask[k] & cmd[k];
5145 if (rem) {
5146 for (j = 7; j >= 0; --j, rem <<= 1) {
5147 if (0x80 & rem)
5148 break;
5149 }
5150 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5151 goto check_cond;
5152 }
5153 }
5154 }
5155 if (!(F_SKIP_UA & flags) &&
5156 SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5157 errsts = check_readiness(scp, UAS_ONLY, devip);
5158 if (errsts)
5159 goto check_cond;
5160 }
5161 if ((F_M_ACCESS & flags) && devip->stopped) {
5162 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5163 if (debug)
5164 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5165 "%s\n", my_name, "initializing command "
5166 "required");
5167 errsts = check_condition_result;
5168 goto fini;
5169 }
5170 if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5171 goto fini;
5172 if (scsi_debug_every_nth) {
5173 if (check_inject(scp))
5174 return 0; /* ignore command: make trouble */
5175 }
5176 if (oip->pfp) /* if this command has a resp_* function, call it */
5177 errsts = oip->pfp(scp, devip);
5178 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5179 errsts = r_pfp(scp, devip);
5180
5181fini:
5182 return schedule_resp(scp, devip, errsts,
5183 ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5184check_cond:
5185 return schedule_resp(scp, devip, check_condition_result, 0);
5186}
5187
5188static int
5189sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5190{
5191 if (scsi_debug_host_lock) {
5192 unsigned long iflags;
5193 int rc;
5194
5195 spin_lock_irqsave(shost->host_lock, iflags);
5196 rc = scsi_debug_queuecommand(cmd);
5197 spin_unlock_irqrestore(shost->host_lock, iflags);
5198 return rc;
5199 } else
5200 return scsi_debug_queuecommand(cmd);
5201}
5202
4554static struct scsi_host_template sdebug_driver_template = { 5203static struct scsi_host_template sdebug_driver_template = {
4555 .show_info = scsi_debug_show_info, 5204 .show_info = scsi_debug_show_info,
4556 .write_info = scsi_debug_write_info, 5205 .write_info = scsi_debug_write_info,
@@ -4576,13 +5225,16 @@ static struct scsi_host_template sdebug_driver_template = {
4576 .max_sectors = -1U, 5225 .max_sectors = -1U,
4577 .use_clustering = DISABLE_CLUSTERING, 5226 .use_clustering = DISABLE_CLUSTERING,
4578 .module = THIS_MODULE, 5227 .module = THIS_MODULE,
5228 .track_queue_depth = 1,
5229 .cmd_size = sizeof(struct sdebug_scmd_extra_t),
4579}; 5230};
4580 5231
4581static int sdebug_driver_probe(struct device * dev) 5232static int sdebug_driver_probe(struct device * dev)
4582{ 5233{
4583 int error = 0; 5234 int error = 0;
4584 struct sdebug_host_info *sdbg_host; 5235 int opts;
4585 struct Scsi_Host *hpnt; 5236 struct sdebug_host_info *sdbg_host;
5237 struct Scsi_Host *hpnt;
4586 int host_prot; 5238 int host_prot;
4587 5239
4588 sdbg_host = to_sdebug_host(dev); 5240 sdbg_host = to_sdebug_host(dev);
@@ -4592,7 +5244,7 @@ static int sdebug_driver_probe(struct device * dev)
4592 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; 5244 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4593 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 5245 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4594 if (NULL == hpnt) { 5246 if (NULL == hpnt) {
4595 printk(KERN_ERR "%s: scsi_register failed\n", __func__); 5247 pr_err("%s: scsi_host_alloc failed\n", __func__);
4596 error = -ENODEV; 5248 error = -ENODEV;
4597 return error; 5249 return error;
4598 } 5250 }
@@ -4649,6 +5301,18 @@ static int sdebug_driver_probe(struct device * dev)
4649 else 5301 else
4650 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); 5302 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4651 5303
5304 opts = scsi_debug_opts;
5305 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5306 sdebug_any_injecting_opt = true;
5307 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5308 sdebug_any_injecting_opt = true;
5309 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5310 sdebug_any_injecting_opt = true;
5311 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5312 sdebug_any_injecting_opt = true;
5313 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5314 sdebug_any_injecting_opt = true;
5315
4652 error = scsi_add_host(hpnt, &sdbg_host->dev); 5316 error = scsi_add_host(hpnt, &sdbg_host->dev);
4653 if (error) { 5317 if (error) {
4654 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 5318 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 96627bae753c..e42fff6e8c10 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -610,7 +610,7 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
610 struct scsi_host_template *sht = sdev->host->hostt; 610 struct scsi_host_template *sht = sdev->host->hostt;
611 struct scsi_device *tmp_sdev; 611 struct scsi_device *tmp_sdev;
612 612
613 if (!sht->change_queue_depth || 613 if (!sht->track_queue_depth ||
614 sdev->queue_depth >= sdev->max_queue_depth) 614 sdev->queue_depth >= sdev->max_queue_depth)
615 return; 615 return;
616 616
@@ -631,12 +631,8 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
631 tmp_sdev->id != sdev->id || 631 tmp_sdev->id != sdev->id ||
632 tmp_sdev->queue_depth == sdev->max_queue_depth) 632 tmp_sdev->queue_depth == sdev->max_queue_depth)
633 continue; 633 continue;
634 /* 634
635 * call back into LLD to increase queue_depth by one 635 scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
636 * with ramp up reason code.
637 */
638 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1,
639 SCSI_QDEPTH_RAMP_UP);
640 sdev->last_queue_ramp_up = jiffies; 636 sdev->last_queue_ramp_up = jiffies;
641 } 637 }
642} 638}
@@ -646,7 +642,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev)
646 struct scsi_host_template *sht = sdev->host->hostt; 642 struct scsi_host_template *sht = sdev->host->hostt;
647 struct scsi_device *tmp_sdev; 643 struct scsi_device *tmp_sdev;
648 644
649 if (!sht->change_queue_depth) 645 if (!sht->track_queue_depth)
650 return; 646 return;
651 647
652 shost_for_each_device(tmp_sdev, sdev->host) { 648 shost_for_each_device(tmp_sdev, sdev->host) {
@@ -658,8 +654,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev)
658 * the device when we got the queue full so we start 654 * the device when we got the queue full so we start
659 * from the highest possible value and work our way down. 655 * from the highest possible value and work our way down.
660 */ 656 */
661 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1, 657 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
662 SCSI_QDEPTH_QFULL);
663 } 658 }
664} 659}
665 660
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0cda53adfd35..983aed10ff2f 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -292,7 +292,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
292 blk_queue_init_tags(sdev->request_queue, 292 blk_queue_init_tags(sdev->request_queue,
293 sdev->host->cmd_per_lun, shost->bqt); 293 sdev->host->cmd_per_lun, shost->bqt);
294 } 294 }
295 scsi_adjust_queue_depth(sdev, sdev->host->cmd_per_lun); 295 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
296 296
297 scsi_sysfs_device_initialize(sdev); 297 scsi_sysfs_device_initialize(sdev);
298 298
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 35d93b0af82b..1cb64a8e18c9 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -877,11 +877,10 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
877 877
878 depth = simple_strtoul(buf, NULL, 0); 878 depth = simple_strtoul(buf, NULL, 0);
879 879
880 if (depth < 1) 880 if (depth < 1 || depth > sht->can_queue)
881 return -EINVAL; 881 return -EINVAL;
882 882
883 retval = sht->change_queue_depth(sdev, depth, 883 retval = sht->change_queue_depth(sdev, depth);
884 SCSI_QDEPTH_DEFAULT);
885 if (retval < 0) 884 if (retval < 0)
886 return retval; 885 return retval;
887 886
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ff8befbdf17c..e3ba251fb6e7 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1429,7 +1429,7 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
1429 1429
1430static int storvsc_device_configure(struct scsi_device *sdevice) 1430static int storvsc_device_configure(struct scsi_device *sdevice)
1431{ 1431{
1432 scsi_adjust_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); 1432 scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS);
1433 1433
1434 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); 1434 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1435 1435
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
deleted file mode 100644
index 835bd8dafe0a..000000000000
--- a/drivers/scsi/sun3_NCR5380.c
+++ /dev/null
@@ -1,2932 +0,0 @@
1/* sun3_NCR5380.c -- adapted from atari_NCR5380.c for the sun3 by
2 Sam Creasey. */
3/*
4 * NCR 5380 generic driver routines. These should make it *trivial*
5 * to implement 5380 SCSI drivers under Linux with a non-trantor
6 * architecture.
7 *
8 * Note that these routines also work with NR53c400 family chips.
9 *
10 * Copyright 1993, Drew Eckhardt
11 * Visionary Computing
12 * (Unix and Linux consulting and custom programming)
13 * drew@colorado.edu
14 * +1 (303) 666-5836
15 *
16 * DISTRIBUTION RELEASE 6.
17 *
18 * For more information, please consult
19 *
20 * NCR 5380 Family
21 * SCSI Protocol Controller
22 * Databook
23 *
24 * NCR Microelectronics
25 * 1635 Aeroplaza Drive
26 * Colorado Springs, CO 80916
27 * 1+ (719) 578-3400
28 * 1+ (800) 334-5454
29 */
30
31/*
32 * ++roman: To port the 5380 driver to the Atari, I had to do some changes in
33 * this file, too:
34 *
35 * - Some of the debug statements were incorrect (undefined variables and the
36 * like). I fixed that.
37 *
38 * - In information_transfer(), I think a #ifdef was wrong. Looking at the
39 * possible DMA transfer size should also happen for REAL_DMA. I added this
40 * in the #if statement.
41 *
42 * - When using real DMA, information_transfer() should return in a DATAOUT
43 * phase after starting the DMA. It has nothing more to do.
44 *
45 * - The interrupt service routine should run main after end of DMA, too (not
46 * only after RESELECTION interrupts). Additionally, it should _not_ test
47 * for more interrupts after running main, since a DMA process may have
48 * been started and interrupts are turned on now. The new int could happen
49 * inside the execution of NCR5380_intr(), leading to recursive
50 * calls.
51 *
52 * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA
53 * and USLEEP, because these were messing up readability and will never be
54 * needed for Atari SCSI.
55 *
56 * - I've revised the NCR5380_main() calling scheme (relax the 'main_running'
57 * stuff), and 'main' is executed in a bottom half if awoken by an
58 * interrupt.
59 *
60 * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..."
61 * constructs. In my eyes, this made the source rather unreadable, so I
62 * finally replaced that by the *_PRINTK() macros.
63 *
64 */
65#include <scsi/scsi_dbg.h>
66#include <scsi/scsi_transport_spi.h>
67
68/*
69 * Further development / testing that should be done :
70 * 1. Test linked command handling code after Eric is ready with
71 * the high level code.
72 */
73
74#if (NDEBUG & NDEBUG_LISTS)
75#define LIST(x,y) \
76 { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \
77 if ((x)==(y)) udelay(5); }
78#define REMOVE(w,x,y,z) \
79 { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \
80 (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \
81 if ((x)==(y)) udelay(5); }
82#else
83#define LIST(x,y)
84#define REMOVE(w,x,y,z)
85#endif
86
87#ifndef notyet
88#undef LINKED
89#endif
90
91/*
92 * Design
93 * Issues :
94 *
95 * The other Linux SCSI drivers were written when Linux was Intel PC-only,
96 * and specifically for each board rather than each chip. This makes their
97 * adaptation to platforms like the Mac (Some of which use NCR5380's)
98 * more difficult than it has to be.
99 *
100 * Also, many of the SCSI drivers were written before the command queuing
101 * routines were implemented, meaning their implementations of queued
102 * commands were hacked on rather than designed in from the start.
103 *
104 * When I designed the Linux SCSI drivers I figured that
105 * while having two different SCSI boards in a system might be useful
106 * for debugging things, two of the same type wouldn't be used.
107 * Well, I was wrong and a number of users have mailed me about running
108 * multiple high-performance SCSI boards in a server.
109 *
110 * Finally, when I get questions from users, I have no idea what
111 * revision of my driver they are running.
112 *
113 * This driver attempts to address these problems :
114 * This is a generic 5380 driver. To use it on a different platform,
115 * one simply writes appropriate system specific macros (ie, data
116 * transfer - some PC's will use the I/O bus, 68K's must use
117 * memory mapped) and drops this file in their 'C' wrapper.
118 *
119 * As far as command queueing, two queues are maintained for
120 * each 5380 in the system - commands that haven't been issued yet,
121 * and commands that are currently executing. This means that an
122 * unlimited number of commands may be queued, letting
123 * more commands propagate from the higher driver levels giving higher
124 * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
125 * allowing multiple commands to propagate all the way to a SCSI-II device
126 * while a command is already executing.
127 *
128 * To solve the multiple-boards-in-the-same-system problem,
129 * there is a separate instance structure for each instance
130 * of a 5380 in the system. So, multiple NCR5380 drivers will
131 * be able to coexist with appropriate changes to the high level
132 * SCSI code.
133 *
134 * A NCR5380_PUBLIC_REVISION macro is provided, with the release
135 * number (updated for each public release) printed by the
136 * NCR5380_print_options command, which should be called from the
137 * wrapper detect function, so that I know what release of the driver
138 * users are using.
139 *
140 * Issues specific to the NCR5380 :
141 *
142 * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
143 * piece of hardware that requires you to sit in a loop polling for
144 * the REQ signal as long as you are connected. Some devices are
145 * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
146 * while doing long seek operations.
147 *
148 * The workaround for this is to keep track of devices that have
149 * disconnected. If the device hasn't disconnected, for commands that
150 * should disconnect, we do something like
151 *
152 * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
153 *
154 * Some tweaking of N and M needs to be done. An algorithm based
155 * on "time to data" would give the best results as long as short time
156 * to datas (ie, on the same track) were considered, however these
157 * broken devices are the exception rather than the rule and I'd rather
158 * spend my time optimizing for the normal case.
159 *
160 * Architecture :
161 *
162 * At the heart of the design is a coroutine, NCR5380_main,
163 * which is started when not running by the interrupt handler,
164 * timer, and queue command function. It attempts to establish
165 * I_T_L or I_T_L_Q nexuses by removing the commands from the
166 * issue queue and calling NCR5380_select() if a nexus
167 * is not established.
168 *
169 * Once a nexus is established, the NCR5380_information_transfer()
170 * phase goes through the various phases as instructed by the target.
171 * if the target goes into MSG IN and sends a DISCONNECT message,
172 * the command structure is placed into the per instance disconnected
173 * queue, and NCR5380_main tries to find more work. If USLEEP
174 * was defined, and the target is idle for too long, the system
175 * will try to sleep.
176 *
177 * If a command has disconnected, eventually an interrupt will trigger,
178 * calling NCR5380_intr() which will in turn call NCR5380_reselect
179 * to reestablish a nexus. This will run main if necessary.
180 *
181 * On command termination, the done function will be called as
182 * appropriate.
183 *
184 * SCSI pointers are maintained in the SCp field of SCSI command
185 * structures, being initialized after the command is connected
186 * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
187 * Note that in violation of the standard, an implicit SAVE POINTERS operation
188 * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
189 */
190
191/*
192 * Using this file :
193 * This file a skeleton Linux SCSI driver for the NCR 5380 series
194 * of chips. To use it, you write an architecture specific functions
195 * and macros and include this file in your driver.
196 *
197 * These macros control options :
198 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
199 * for commands that return with a CHECK CONDITION status.
200 *
201 * LINKED - if defined, linked commands are supported.
202 *
203 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
204 *
205 * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible
206 *
207 * These macros MUST be defined :
208 *
209 * NCR5380_read(register) - read from the specified register
210 *
211 * NCR5380_write(register, value) - write to the specific register
212 *
213 * Either real DMA *or* pseudo DMA may be implemented
214 * REAL functions :
215 * NCR5380_REAL_DMA should be defined if real DMA is to be used.
216 * Note that the DMA setup functions should return the number of bytes
217 * that they were able to program the controller for.
218 *
219 * Also note that generic i386/PC versions of these macros are
220 * available as NCR5380_i386_dma_write_setup,
221 * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
222 *
223 * NCR5380_dma_write_setup(instance, src, count) - initialize
224 * NCR5380_dma_read_setup(instance, dst, count) - initialize
225 * NCR5380_dma_residual(instance); - residual count
226 *
227 * PSEUDO functions :
228 * NCR5380_pwrite(instance, src, count)
229 * NCR5380_pread(instance, dst, count);
230 *
231 * If nothing specific to this implementation needs doing (ie, with external
232 * hardware), you must also define
233 *
234 * NCR5380_queue_command
235 * NCR5380_reset
236 * NCR5380_abort
237 * NCR5380_proc_info
238 *
239 * to be the global entry points into the specific driver, ie
240 * #define NCR5380_queue_command t128_queue_command.
241 *
242 * If this is not done, the routines will be defined as static functions
243 * with the NCR5380* names and the user must provide a globally
244 * accessible wrapper function.
245 *
246 * The generic driver is initialized by calling NCR5380_init(instance),
247 * after setting the appropriate host specific fields and ID. If the
248 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
249 * possible) function may be used. Before the specific driver initialization
250 * code finishes, NCR5380_print_options should be called.
251 */
252
253static struct Scsi_Host *first_instance = NULL;
254static struct scsi_host_template *the_template = NULL;
255
256/* Macros ease life... :-) */
257#define SETUP_HOSTDATA(in) \
258 struct NCR5380_hostdata *hostdata = \
259 (struct NCR5380_hostdata *)(in)->hostdata
260#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
261
262#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble)
263#define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next))
264#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble))
265
266#define HOSTNO instance->host_no
267#define H_NO(cmd) (cmd)->device->host->host_no
268
269#define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer)))))
270
271#ifdef SUPPORT_TAGS
272
273/*
274 * Functions for handling tagged queuing
275 * =====================================
276 *
277 * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes:
278 *
279 * Using consecutive numbers for the tags is no good idea in my eyes. There
280 * could be wrong re-usings if the counter (8 bit!) wraps and some early
281 * command has been preempted for a long time. My solution: a bitfield for
282 * remembering used tags.
283 *
284 * There's also the problem that each target has a certain queue size, but we
285 * cannot know it in advance :-( We just see a QUEUE_FULL status being
286 * returned. So, in this case, the driver internal queue size assumption is
287 * reduced to the number of active tags if QUEUE_FULL is returned by the
288 * target. The command is returned to the mid-level, but with status changed
289 * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL
290 * correctly.
291 *
292 * We're also not allowed running tagged commands as long as an untagged
293 * command is active. And REQUEST SENSE commands after a contingent allegiance
294 * condition _must_ be untagged. To keep track whether an untagged command has
295 * been issued, the host->busy array is still employed, as it is without
296 * support for tagged queuing.
297 *
298 * One could suspect that there are possible race conditions between
299 * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the
300 * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(),
301 * which already guaranteed to be running at most once. It is also the only
302 * place where tags/LUNs are allocated. So no other allocation can slip
303 * between that pair, there could only happen a reselection, which can free a
304 * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes
305 * important: the tag bit must be cleared before 'nr_allocated' is decreased.
306 */
307
308/* -1 for TAG_NONE is not possible with unsigned char cmd->tag */
309#undef TAG_NONE
310#define TAG_NONE 0xff
311
312/* For the m68k, the number of bits in 'allocated' must be a multiple of 32! */
313#if (MAX_TAGS % 32) != 0
314#error "MAX_TAGS must be a multiple of 32!"
315#endif
316
317typedef struct {
318 char allocated[MAX_TAGS/8];
319 int nr_allocated;
320 int queue_size;
321} TAG_ALLOC;
322
323static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */
324
325
326static void __init init_tags( void )
327{
328 int target, lun;
329 TAG_ALLOC *ta;
330
331 if (!setup_use_tagged_queuing)
332 return;
333
334 for( target = 0; target < 8; ++target ) {
335 for( lun = 0; lun < 8; ++lun ) {
336 ta = &TagAlloc[target][lun];
337 memset( &ta->allocated, 0, MAX_TAGS/8 );
338 ta->nr_allocated = 0;
339 /* At the beginning, assume the maximum queue size we could
340 * support (MAX_TAGS). This value will be decreased if the target
341 * returns QUEUE_FULL status.
342 */
343 ta->queue_size = MAX_TAGS;
344 }
345 }
346}
347
348
349/* Check if we can issue a command to this LUN: First see if the LUN is marked
350 * busy by an untagged command. If the command should use tagged queuing, also
351 * check that there is a free tag and the target's queue won't overflow. This
352 * function should be called with interrupts disabled to avoid race
353 * conditions.
354 */
355
356static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
357{
358 u8 lun = cmd->device->lun;
359 SETUP_HOSTDATA(cmd->device->host);
360
361 if (hostdata->busy[cmd->device->id] & (1 << lun))
362 return( 1 );
363 if (!should_be_tagged ||
364 !setup_use_tagged_queuing || !cmd->device->tagged_supported)
365 return( 0 );
366 if (TagAlloc[cmd->device->id][lun].nr_allocated >=
367 TagAlloc[cmd->device->id][lun].queue_size ) {
368 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
369 H_NO(cmd), cmd->device->id, lun );
370 return( 1 );
371 }
372 return( 0 );
373}
374
375
376/* Allocate a tag for a command (there are no checks anymore, check_lun_busy()
377 * must be called before!), or reserve the LUN in 'busy' if the command is
378 * untagged.
379 */
380
381static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
382{
383 u8 lun = cmd->device->lun;
384 SETUP_HOSTDATA(cmd->device->host);
385
386 /* If we or the target don't support tagged queuing, allocate the LUN for
387 * an untagged command.
388 */
389 if (!should_be_tagged ||
390 !setup_use_tagged_queuing || !cmd->device->tagged_supported) {
391 cmd->tag = TAG_NONE;
392 hostdata->busy[cmd->device->id] |= (1 << lun);
393 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
394 "command\n", H_NO(cmd), cmd->device->id, lun );
395 }
396 else {
397 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
398
399 cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );
400 set_bit( cmd->tag, &ta->allocated );
401 ta->nr_allocated++;
402 dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
403 "(now %d tags in use)\n",
404 H_NO(cmd), cmd->tag, cmd->device->id, lun,
405 ta->nr_allocated );
406 }
407}
408
409
410/* Mark the tag of command 'cmd' as free, or in case of an untagged command,
411 * unlock the LUN.
412 */
413
414static void cmd_free_tag(struct scsi_cmnd *cmd)
415{
416 u8 lun = cmd->device->lun;
417 SETUP_HOSTDATA(cmd->device->host);
418
419 if (cmd->tag == TAG_NONE) {
420 hostdata->busy[cmd->device->id] &= ~(1 << lun);
421 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
422 H_NO(cmd), cmd->device->id, lun );
423 }
424 else if (cmd->tag >= MAX_TAGS) {
425 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
426 H_NO(cmd), cmd->tag );
427 }
428 else {
429 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
430 clear_bit( cmd->tag, &ta->allocated );
431 ta->nr_allocated--;
432 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
433 H_NO(cmd), cmd->tag, cmd->device->id, lun );
434 }
435}
436
437
438static void free_all_tags( void )
439{
440 int target, lun;
441 TAG_ALLOC *ta;
442
443 if (!setup_use_tagged_queuing)
444 return;
445
446 for( target = 0; target < 8; ++target ) {
447 for( lun = 0; lun < 8; ++lun ) {
448 ta = &TagAlloc[target][lun];
449 memset( &ta->allocated, 0, MAX_TAGS/8 );
450 ta->nr_allocated = 0;
451 }
452 }
453}
454
455#endif /* SUPPORT_TAGS */
456
457
458/*
459 * Function : void initialize_SCp(struct scsi_cmnd *cmd)
460 *
461 * Purpose : initialize the saved data pointers for cmd to point to the
462 * start of the buffer.
463 *
464 * Inputs : cmd - struct scsi_cmnd structure to have pointers reset.
465 */
466
467static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
468{
469 /*
470 * Initialize the Scsi Pointer field so that all of the commands in the
471 * various queues are valid.
472 */
473
474 if (scsi_bufflen(cmd)) {
475 cmd->SCp.buffer = scsi_sglist(cmd);
476 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
477 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
478 cmd->SCp.this_residual = cmd->SCp.buffer->length;
479 } else {
480 cmd->SCp.buffer = NULL;
481 cmd->SCp.buffers_residual = 0;
482 cmd->SCp.ptr = NULL;
483 cmd->SCp.this_residual = 0;
484 }
485
486}
487
488#include <linux/delay.h>
489
490#if NDEBUG
491static struct {
492 unsigned char mask;
493 const char * name;}
494signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" },
495 { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" },
496 { SR_SEL, "SEL" }, {0, NULL}},
497basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}},
498icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"},
499 {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"},
500 {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"},
501 {0, NULL}},
502mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
503 {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR,
504 "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"},
505 {MR_MONITOR_BSY, "MODE MONITOR BSY"},
506 {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"},
507 {0, NULL}};
508
509/*
510 * Function : void NCR5380_print(struct Scsi_Host *instance)
511 *
512 * Purpose : print the SCSI bus signals for debugging purposes
513 *
514 * Input : instance - which NCR5380
515 */
516
517static void NCR5380_print(struct Scsi_Host *instance) {
518 unsigned char status, data, basr, mr, icr, i;
519 unsigned long flags;
520
521 local_irq_save(flags);
522 data = NCR5380_read(CURRENT_SCSI_DATA_REG);
523 status = NCR5380_read(STATUS_REG);
524 mr = NCR5380_read(MODE_REG);
525 icr = NCR5380_read(INITIATOR_COMMAND_REG);
526 basr = NCR5380_read(BUS_AND_STATUS_REG);
527 local_irq_restore(flags);
528 printk("STATUS_REG: %02x ", status);
529 for (i = 0; signals[i].mask ; ++i)
530 if (status & signals[i].mask)
531 printk(",%s", signals[i].name);
532 printk("\nBASR: %02x ", basr);
533 for (i = 0; basrs[i].mask ; ++i)
534 if (basr & basrs[i].mask)
535 printk(",%s", basrs[i].name);
536 printk("\nICR: %02x ", icr);
537 for (i = 0; icrs[i].mask; ++i)
538 if (icr & icrs[i].mask)
539 printk(",%s", icrs[i].name);
540 printk("\nMODE: %02x ", mr);
541 for (i = 0; mrs[i].mask; ++i)
542 if (mr & mrs[i].mask)
543 printk(",%s", mrs[i].name);
544 printk("\n");
545}
546
547static struct {
548 unsigned char value;
549 const char *name;
550} phases[] = {
551 {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
552 {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
553 {PHASE_UNKNOWN, "UNKNOWN"}};
554
555/*
556 * Function : void NCR5380_print_phase(struct Scsi_Host *instance)
557 *
558 * Purpose : print the current SCSI phase for debugging purposes
559 *
560 * Input : instance - which NCR5380
561 */
562
563static void NCR5380_print_phase(struct Scsi_Host *instance)
564{
565 unsigned char status;
566 int i;
567
568 status = NCR5380_read(STATUS_REG);
569 if (!(status & SR_REQ))
570 printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO);
571 else {
572 for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
573 (phases[i].value != (status & PHASE_MASK)); ++i);
574 printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name);
575 }
576}
577
578#endif
579
580/*
581 * ++roman: New scheme of calling NCR5380_main()
582 *
583 * If we're not in an interrupt, we can call our main directly, it cannot be
584 * already running. Else, we queue it on a task queue, if not 'main_running'
585 * tells us that a lower level is already executing it. This way,
586 * 'main_running' needs not be protected in a special way.
587 *
588 * queue_main() is a utility function for putting our main onto the task
589 * queue, if main_running is false. It should be called only from a
590 * interrupt or bottom half.
591 */
592
593#include <linux/gfp.h>
594#include <linux/workqueue.h>
595#include <linux/interrupt.h>
596
597static volatile int main_running = 0;
598static DECLARE_WORK(NCR5380_tqueue, NCR5380_main);
599
600static __inline__ void queue_main(void)
601{
602 if (!main_running) {
603 /* If in interrupt and NCR5380_main() not already running,
604 queue it on the 'immediate' task queue, to be processed
605 immediately after the current interrupt processing has
606 finished. */
607 schedule_work(&NCR5380_tqueue);
608 }
609 /* else: nothing to do: the running NCR5380_main() will pick up
610 any newly queued command. */
611}
612
613
614static inline void NCR5380_all_init (void)
615{
616 static int done = 0;
617 if (!done) {
618 dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
619 done = 1;
620 }
621}
622
623
624/*
625 * Function : void NCR58380_print_options (struct Scsi_Host *instance)
626 *
627 * Purpose : called by probe code indicating the NCR5380 driver
628 * options that were selected.
629 *
630 * Inputs : instance, pointer to this instance. Unused.
631 */
632
633static void __init NCR5380_print_options (struct Scsi_Host *instance)
634{
635 printk(" generic options"
636#ifdef AUTOSENSE
637 " AUTOSENSE"
638#endif
639#ifdef REAL_DMA
640 " REAL DMA"
641#endif
642#ifdef PARITY
643 " PARITY"
644#endif
645#ifdef SUPPORT_TAGS
646 " SCSI-2 TAGGED QUEUING"
647#endif
648 );
649 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
650}
651
652/*
653 * Function : void NCR5380_print_status (struct Scsi_Host *instance)
654 *
655 * Purpose : print commands in the various queues, called from
656 * NCR5380_abort and NCR5380_debug to aid debugging.
657 *
658 * Inputs : instance, pointer to this instance.
659 */
660
661static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
662{
663 int i, s;
664 unsigned char *command;
665 printk("scsi%d: destination target %d, lun %llu\n",
666 H_NO(cmd), cmd->device->id, cmd->device->lun);
667 printk(KERN_CONT " command = ");
668 command = cmd->cmnd;
669 printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]);
670 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
671 printk(KERN_CONT " %02x", command[i]);
672 printk("\n");
673}
674
675static void NCR5380_print_status(struct Scsi_Host *instance)
676{
677 struct NCR5380_hostdata *hostdata;
678 Scsi_Cmnd *ptr;
679 unsigned long flags;
680
681 NCR5380_dprint(NDEBUG_ANY, instance);
682 NCR5380_dprint_phase(NDEBUG_ANY, instance);
683
684 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
685
686 printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
687 local_irq_save(flags);
688 printk("NCR5380: coroutine is%s running.\n",
689 main_running ? "" : "n't");
690 if (!hostdata->connected)
691 printk("scsi%d: no currently connected command\n", HOSTNO);
692 else
693 lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected);
694 printk("scsi%d: issue_queue\n", HOSTNO);
695 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
696 lprint_Scsi_Cmnd(ptr);
697
698 printk("scsi%d: disconnected_queue\n", HOSTNO);
699 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
700 ptr = NEXT(ptr))
701 lprint_Scsi_Cmnd(ptr);
702
703 local_irq_restore(flags);
704 printk("\n");
705}
706
707static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
708{
709 int i, s;
710 unsigned char *command;
711 seq_printf(m, "scsi%d: destination target %d, lun %llu\n",
712 H_NO(cmd), cmd->device->id, cmd->device->lun);
713 seq_printf(m, " command = ");
714 command = cmd->cmnd;
715 seq_printf(m, "%2d (0x%02x)", command[0], command[0]);
716 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
717 seq_printf(m, " %02x", command[i]);
718 seq_printf(m, "\n");
719}
720
721static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance)
722{
723 struct NCR5380_hostdata *hostdata;
724 Scsi_Cmnd *ptr;
725 unsigned long flags;
726
727 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
728
729 seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
730 local_irq_save(flags);
731 seq_printf(m, "NCR5380: coroutine is%s running.\n",
732 main_running ? "" : "n't");
733 if (!hostdata->connected)
734 seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO);
735 else
736 show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m);
737 seq_printf(m, "scsi%d: issue_queue\n", HOSTNO);
738 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
739 show_Scsi_Cmnd(ptr, m);
740
741 seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO);
742 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
743 ptr = NEXT(ptr))
744 show_Scsi_Cmnd(ptr, m);
745
746 local_irq_restore(flags);
747 return 0;
748}
749
750/*
751 * Function : void NCR5380_init (struct Scsi_Host *instance)
752 *
753 * Purpose : initializes *instance and corresponding 5380 chip.
754 *
755 * Inputs : instance - instantiation of the 5380 driver.
756 *
757 * Notes : I assume that the host, hostno, and id bits have been
758 * set correctly. I don't care about the irq and other fields.
759 *
760 */
761
762static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
763{
764 int i;
765 SETUP_HOSTDATA(instance);
766
767 NCR5380_all_init();
768
769 hostdata->aborted = 0;
770 hostdata->id_mask = 1 << instance->this_id;
771 hostdata->id_higher_mask = 0;
772 for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
773 if (i > hostdata->id_mask)
774 hostdata->id_higher_mask |= i;
775 for (i = 0; i < 8; ++i)
776 hostdata->busy[i] = 0;
777#ifdef SUPPORT_TAGS
778 init_tags();
779#endif
780#if defined (REAL_DMA)
781 hostdata->dma_len = 0;
782#endif
783 hostdata->targets_present = 0;
784 hostdata->connected = NULL;
785 hostdata->issue_queue = NULL;
786 hostdata->disconnected_queue = NULL;
787 hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT;
788
789 if (!the_template) {
790 the_template = instance->hostt;
791 first_instance = instance;
792 }
793
794
795#ifndef AUTOSENSE
796 if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1))
797 printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n"
798 " without AUTOSENSE option, contingent allegiance conditions may\n"
799 " be incorrectly cleared.\n", HOSTNO);
800#endif /* def AUTOSENSE */
801
802 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
803 NCR5380_write(MODE_REG, MR_BASE);
804 NCR5380_write(TARGET_COMMAND_REG, 0);
805 NCR5380_write(SELECT_ENABLE_REG, 0);
806
807 return 0;
808}
809
810static void NCR5380_exit(struct Scsi_Host *instance)
811{
812 /* Empty, as we didn't schedule any delayed work */
813}
814
815/*
816 * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd,
817 * void (*done)(struct scsi_cmnd *))
818 *
819 * Purpose : enqueues a SCSI command
820 *
821 * Inputs : cmd - SCSI command, done - function called on completion, with
822 * a pointer to the command descriptor.
823 *
824 * Returns : 0
825 *
826 * Side effects :
827 * cmd is added to the per instance issue_queue, with minor
828 * twiddling done to the host specific fields of cmd. If the
829 * main coroutine is not running, it is restarted.
830 *
831 */
832
833/* Only make static if a wrapper function is used */
834static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
835 void (*done)(struct scsi_cmnd *))
836{
837 SETUP_HOSTDATA(cmd->device->host);
838 struct scsi_cmnd *tmp;
839 unsigned long flags;
840
841#if (NDEBUG & NDEBUG_NO_WRITE)
842 switch (cmd->cmnd[0]) {
843 case WRITE_6:
844 case WRITE_10:
845 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n",
846 H_NO(cmd));
847 cmd->result = (DID_ERROR << 16);
848 done(cmd);
849 return 0;
850 }
851#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
852
853
854#ifdef NCR5380_STATS
855# if 0
856 if (!hostdata->connected && !hostdata->issue_queue &&
857 !hostdata->disconnected_queue) {
858 hostdata->timebase = jiffies;
859 }
860# endif
861# ifdef NCR5380_STAT_LIMIT
862 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
863# endif
864 switch (cmd->cmnd[0])
865 {
866 case WRITE:
867 case WRITE_6:
868 case WRITE_10:
869 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
870 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
871 hostdata->pendingw++;
872 break;
873 case READ:
874 case READ_6:
875 case READ_10:
876 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
877 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
878 hostdata->pendingr++;
879 break;
880 }
881#endif
882
883 /*
884 * We use the host_scribble field as a pointer to the next command
885 * in a queue
886 */
887
888 SET_NEXT(cmd, NULL);
889 cmd->scsi_done = done;
890
891 cmd->result = 0;
892
893
894 /*
895 * Insert the cmd into the issue queue. Note that REQUEST SENSE
896 * commands are added to the head of the queue since any command will
897 * clear the contingent allegiance condition that exists and the
898 * sense data is only guaranteed to be valid while the condition exists.
899 */
900
901 local_irq_save(flags);
902 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA.
903 * Otherwise a running NCR5380_main may steal the lock.
904 * Lock before actually inserting due to fairness reasons explained in
905 * atari_scsi.c. If we insert first, then it's impossible for this driver
906 * to release the lock.
907 * Stop timer for this command while waiting for the lock, or timeouts
908 * may happen (and they really do), and it's no good if the command doesn't
909 * appear in any of the queues.
910 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
911 * because also a timer int can trigger an abort or reset, which would
912 * alter queues and touch the lock.
913 */
914 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
915 LIST(cmd, hostdata->issue_queue);
916 SET_NEXT(cmd, hostdata->issue_queue);
917 hostdata->issue_queue = cmd;
918 } else {
919 for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
920 NEXT(tmp); tmp = NEXT(tmp))
921 ;
922 LIST(cmd, tmp);
923 SET_NEXT(tmp, cmd);
924 }
925
926 local_irq_restore(flags);
927
928 dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
929 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
930
931 /* If queue_command() is called from an interrupt (real one or bottom
932 * half), we let queue_main() do the job of taking care about main. If it
933 * is already running, this is a no-op, else main will be queued.
934 *
935 * If we're not in an interrupt, we can call NCR5380_main()
936 * unconditionally, because it cannot be already running.
937 */
938 if (in_interrupt() || ((flags >> 8) & 7) >= 6)
939 queue_main();
940 else
941 NCR5380_main(NULL);
942 return 0;
943}
944
945static DEF_SCSI_QCMD(NCR5380_queue_command)
946
947/*
948 * Function : NCR5380_main (void)
949 *
950 * Purpose : NCR5380_main is a coroutine that runs as long as more work can
951 * be done on the NCR5380 host adapters in a system. Both
952 * NCR5380_queue_command() and NCR5380_intr() will try to start it
953 * in case it is not running.
954 *
955 * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should
956 * reenable them. This prevents reentrancy and kernel stack overflow.
957 */
958
959static void NCR5380_main (struct work_struct *bl)
960{
961 struct scsi_cmnd *tmp, *prev;
962 struct Scsi_Host *instance = first_instance;
963 struct NCR5380_hostdata *hostdata = HOSTDATA(instance);
964 int done;
965 unsigned long flags;
966
967 /*
968 * We run (with interrupts disabled) until we're sure that none of
969 * the host adapters have anything that can be done, at which point
970 * we set main_running to 0 and exit.
971 *
972 * Interrupts are enabled before doing various other internal
973 * instructions, after we've decided that we need to run through
974 * the loop again.
975 *
976 * this should prevent any race conditions.
977 *
978 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
979 * because also a timer int can trigger an abort or reset, which can
980 * alter queues and touch the Falcon lock.
981 */
982
983 /* Tell int handlers main() is now already executing. Note that
984 no races are possible here. If an int comes in before
985 'main_running' is set here, and queues/executes main via the
986 task queue, it doesn't do any harm, just this instance of main
987 won't find any work left to do. */
988 if (main_running)
989 return;
990 main_running = 1;
991
992 local_save_flags(flags);
993 do {
994 local_irq_disable(); /* Freeze request queues */
995 done = 1;
996
997 if (!hostdata->connected) {
998 dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO );
999 /*
1000 * Search through the issue_queue for a command destined
1001 * for a target that's not busy.
1002 */
1003#if (NDEBUG & NDEBUG_LISTS)
1004 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL;
1005 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp))
1006 ;
1007 if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/
1008#endif
1009 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
1010 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) {
1011
1012 if (prev != tmp)
1013 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
1014 /* When we find one, remove it from the issue queue. */
1015 /* ++guenther: possible race with Falcon locking */
1016 if (
1017#ifdef SUPPORT_TAGS
1018 !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE)
1019#else
1020 !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))
1021#endif
1022 ) {
1023 /* ++guenther: just to be sure, this must be atomic */
1024 local_irq_disable();
1025 if (prev) {
1026 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
1027 SET_NEXT(prev, NEXT(tmp));
1028 } else {
1029 REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp));
1030 hostdata->issue_queue = NEXT(tmp);
1031 }
1032 SET_NEXT(tmp, NULL);
1033
1034 /* reenable interrupts after finding one */
1035 local_irq_restore(flags);
1036
1037 /*
1038 * Attempt to establish an I_T_L nexus here.
1039 * On success, instance->hostdata->connected is set.
1040 * On failure, we must add the command back to the
1041 * issue queue so we can keep trying.
1042 */
1043 dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
1044 "lun %llu removed from issue_queue\n",
1045 HOSTNO, tmp->device->id, tmp->device->lun);
1046 /*
1047 * REQUEST SENSE commands are issued without tagged
1048 * queueing, even on SCSI-II devices because the
1049 * contingent allegiance condition exists for the
1050 * entire unit.
1051 */
1052 /* ++roman: ...and the standard also requires that
1053 * REQUEST SENSE command are untagged.
1054 */
1055
1056#ifdef SUPPORT_TAGS
1057 cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE );
1058#endif
1059 if (!NCR5380_select(instance, tmp,
1060 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE :
1061 TAG_NEXT)) {
1062 break;
1063 } else {
1064 local_irq_disable();
1065 LIST(tmp, hostdata->issue_queue);
1066 SET_NEXT(tmp, hostdata->issue_queue);
1067 hostdata->issue_queue = tmp;
1068#ifdef SUPPORT_TAGS
1069 cmd_free_tag( tmp );
1070#endif
1071 local_irq_restore(flags);
1072 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
1073 "returned to issue_queue\n", HOSTNO);
1074 if (hostdata->connected)
1075 break;
1076 }
1077 } /* if target/lun/target queue is not busy */
1078 } /* for issue_queue */
1079 } /* if (!hostdata->connected) */
1080 if (hostdata->connected
1081#ifdef REAL_DMA
1082 && !hostdata->dma_len
1083#endif
1084 ) {
1085 local_irq_restore(flags);
1086 dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
1087 HOSTNO);
1088 NCR5380_information_transfer(instance);
1089 dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
1090 done = 0;
1091 }
1092 } while (!done);
1093
1094 /* Better allow ints _after_ 'main_running' has been cleared, else
1095 an interrupt could believe we'll pick up the work it left for
1096 us, but we won't see it anymore here... */
1097 main_running = 0;
1098 local_irq_restore(flags);
1099}
1100
1101
1102#ifdef REAL_DMA
1103/*
1104 * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
1105 *
1106 * Purpose : Called by interrupt handler when DMA finishes or a phase
1107 * mismatch occurs (which would finish the DMA transfer).
1108 *
1109 * Inputs : instance - this instance of the NCR5380.
1110 *
1111 */
1112
1113static void NCR5380_dma_complete( struct Scsi_Host *instance )
1114{
1115 SETUP_HOSTDATA(instance);
1116 int transfered;
1117 unsigned char **data;
1118 volatile int *count;
1119
1120 if (!hostdata->connected) {
1121 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with "
1122 "no connected cmd\n", HOSTNO);
1123 return;
1124 }
1125
1126 dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
1127 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
1128 NCR5380_read(STATUS_REG));
1129
1130 if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
1131 printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO);
1132 printk("please e-mail sammy@sammy.net with a description of how this\n");
1133 printk("error was produced.\n");
1134 BUG();
1135 }
1136
1137 /* make sure we're not stuck in a data phase */
1138 if((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH |
1139 BASR_ACK)) ==
1140 (BASR_PHASE_MATCH | BASR_ACK)) {
1141 printk("scsi%d: BASR %02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG));
1142 printk("scsi%d: bus stuck in data phase -- probably a single byte "
1143 "overrun!\n", HOSTNO);
1144 printk("not prepared for this error!\n");
1145 printk("please e-mail sammy@sammy.net with a description of how this\n");
1146 printk("error was produced.\n");
1147 BUG();
1148 }
1149
1150
1151
1152 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1153 NCR5380_write(MODE_REG, MR_BASE);
1154 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1155
1156 transfered = hostdata->dma_len - NCR5380_dma_residual(instance);
1157 hostdata->dma_len = 0;
1158
1159 data = (unsigned char **) &(hostdata->connected->SCp.ptr);
1160 count = &(hostdata->connected->SCp.this_residual);
1161 *data += transfered;
1162 *count -= transfered;
1163
1164}
1165#endif /* REAL_DMA */
1166
1167
1168/*
1169 * Function : void NCR5380_intr (int irq)
1170 *
1171 * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
1172 * from the disconnected queue, and restarting NCR5380_main()
1173 * as required.
1174 *
1175 * Inputs : int irq, irq that caused this interrupt.
1176 *
1177 */
1178
1179static irqreturn_t NCR5380_intr (int irq, void *dev_id)
1180{
1181 struct Scsi_Host *instance = first_instance;
1182 int done = 1, handled = 0;
1183 unsigned char basr;
1184
1185 dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
1186
1187 /* Look for pending interrupts */
1188 basr = NCR5380_read(BUS_AND_STATUS_REG);
1189 dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
1190 /* dispatch to appropriate routine if found and done=0 */
1191 if (basr & BASR_IRQ) {
1192 NCR5380_dprint(NDEBUG_INTR, instance);
1193 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
1194 done = 0;
1195// ENABLE_IRQ();
1196 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
1197 NCR5380_reselect(instance);
1198 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1199 }
1200 else if (basr & BASR_PARITY_ERROR) {
1201 dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
1202 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1203 }
1204 else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
1205 dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
1206 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1207 }
1208 else {
1209 /*
1210 * The rest of the interrupt conditions can occur only during a
1211 * DMA transfer
1212 */
1213
1214#if defined(REAL_DMA)
1215 /*
1216 * We should only get PHASE MISMATCH and EOP interrupts if we have
1217 * DMA enabled, so do a sanity check based on the current setting
1218 * of the MODE register.
1219 */
1220
1221 if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) &&
1222 ((basr & BASR_END_DMA_TRANSFER) ||
1223 !(basr & BASR_PHASE_MATCH))) {
1224
1225 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
1226 NCR5380_dma_complete( instance );
1227 done = 0;
1228// ENABLE_IRQ();
1229 } else
1230#endif /* REAL_DMA */
1231 {
1232/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
1233 if (basr & BASR_PHASE_MATCH)
1234 dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
1235 "BASR 0x%x, MR 0x%x, SR 0x%x\n",
1236 HOSTNO, basr, NCR5380_read(MODE_REG),
1237 NCR5380_read(STATUS_REG));
1238 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1239#ifdef SUN3_SCSI_VME
1240 dregs->csr |= CSR_DMA_ENABLE;
1241#endif
1242 }
1243 } /* if !(SELECTION || PARITY) */
1244 handled = 1;
1245 } /* BASR & IRQ */
1246 else {
1247
1248 printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, "
1249 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr,
1250 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
1251 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1252#ifdef SUN3_SCSI_VME
1253 dregs->csr |= CSR_DMA_ENABLE;
1254#endif
1255 }
1256
1257 if (!done) {
1258 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
1259 /* Put a call to NCR5380_main() on the queue... */
1260 queue_main();
1261 }
1262 return IRQ_RETVAL(handled);
1263}
1264
1265#ifdef NCR5380_STATS
1266static void collect_stats(struct NCR5380_hostdata *hostdata,
1267 struct scsi_cmnd *cmd)
1268{
1269# ifdef NCR5380_STAT_LIMIT
1270 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
1271# endif
1272 switch (cmd->cmnd[0])
1273 {
1274 case WRITE:
1275 case WRITE_6:
1276 case WRITE_10:
1277 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
1278 /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
1279 hostdata->pendingw--;
1280 break;
1281 case READ:
1282 case READ_6:
1283 case READ_10:
1284 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
1285 /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
1286 hostdata->pendingr--;
1287 break;
1288 }
1289}
1290#endif
1291
1292/*
1293 * Function : int NCR5380_select(struct Scsi_Host *instance,
1294 * struct scsi_cmnd *cmd, int tag);
1295 *
1296 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
1297 * including ARBITRATION, SELECTION, and initial message out for
1298 * IDENTIFY and queue messages.
1299 *
1300 * Inputs : instance - instantiation of the 5380 driver on which this
1301 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for
1302 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
1303 * the command that is presently connected.
1304 *
1305 * Returns : -1 if selection could not execute for some reason,
1306 * 0 if selection succeeded or failed because the target
1307 * did not respond.
1308 *
1309 * Side effects :
1310 * If bus busy, arbitration failed, etc, NCR5380_select() will exit
1311 * with registers as they should have been on entry - ie
1312 * SELECT_ENABLE will be set appropriately, the NCR5380
1313 * will cease to drive any SCSI bus signals.
1314 *
1315 * If successful : I_T_L or I_T_L_Q nexus will be established,
1316 * instance->connected will be set to cmd.
1317 * SELECT interrupt will be disabled.
1318 *
1319 * If failed (no target) : cmd->scsi_done() will be called, and the
1320 * cmd->result host byte set to DID_BAD_TARGET.
1321 */
1322
1323static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1324 int tag)
1325{
1326 SETUP_HOSTDATA(instance);
1327 unsigned char tmp[3], phase;
1328 unsigned char *data;
1329 int len;
1330 unsigned long timeout;
1331 unsigned long flags;
1332
1333 hostdata->restart_select = 0;
1334 NCR5380_dprint(NDEBUG_ARBITRATION, instance);
1335 dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
1336 instance->this_id);
1337
1338 /*
1339 * Set the phase bits to 0, otherwise the NCR5380 won't drive the
1340 * data bus during SELECTION.
1341 */
1342
1343 local_irq_save(flags);
1344 if (hostdata->connected) {
1345 local_irq_restore(flags);
1346 return -1;
1347 }
1348 NCR5380_write(TARGET_COMMAND_REG, 0);
1349
1350
1351 /*
1352 * Start arbitration.
1353 */
1354
1355 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
1356 NCR5380_write(MODE_REG, MR_ARBITRATE);
1357
1358 local_irq_restore(flags);
1359
1360 /* Wait for arbitration logic to complete */
1361#ifdef NCR_TIMEOUT
1362 {
1363 unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
1364
1365 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
1366 && time_before(jiffies, timeout) && !hostdata->connected)
1367 ;
1368 if (time_after_eq(jiffies, timeout))
1369 {
1370 printk("scsi : arbitration timeout at %d\n", __LINE__);
1371 NCR5380_write(MODE_REG, MR_BASE);
1372 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1373 return -1;
1374 }
1375 }
1376#else /* NCR_TIMEOUT */
1377 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
1378 && !hostdata->connected);
1379#endif
1380
1381 dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
1382
1383 if (hostdata->connected) {
1384 NCR5380_write(MODE_REG, MR_BASE);
1385 return -1;
1386 }
1387 /*
1388 * The arbitration delay is 2.2us, but this is a minimum and there is
1389 * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
1390 * the integral nature of udelay().
1391 *
1392 */
1393
1394 udelay(3);
1395
1396 /* Check for lost arbitration */
1397 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1398 (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
1399 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1400 hostdata->connected) {
1401 NCR5380_write(MODE_REG, MR_BASE);
1402 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
1403 HOSTNO);
1404 return -1;
1405 }
1406
1407 /* after/during arbitration, BSY should be asserted.
1408 IBM DPES-31080 Version S31Q works now */
1409 /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */
1410 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL |
1411 ICR_ASSERT_BSY ) ;
1412
1413 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1414 hostdata->connected) {
1415 NCR5380_write(MODE_REG, MR_BASE);
1416 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1417 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
1418 HOSTNO);
1419 return -1;
1420 }
1421
1422 /*
1423 * Again, bus clear + bus settle time is 1.2us, however, this is
1424 * a minimum so we'll udelay ceil(1.2)
1425 */
1426
1427#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY
1428 /* ++roman: But some targets (see above :-) seem to need a bit more... */
1429 udelay(15);
1430#else
1431 udelay(2);
1432#endif
1433
1434 if (hostdata->connected) {
1435 NCR5380_write(MODE_REG, MR_BASE);
1436 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1437 return -1;
1438 }
1439
1440 dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
1441
1442 /*
1443 * Now that we have won arbitration, start Selection process, asserting
1444 * the host and target ID's on the SCSI bus.
1445 */
1446
1447 NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id)));
1448
1449 /*
1450 * Raise ATN while SEL is true before BSY goes false from arbitration,
1451 * since this is the only way to guarantee that we'll get a MESSAGE OUT
1452 * phase immediately after selection.
1453 */
1454
1455 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY |
1456 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL ));
1457 NCR5380_write(MODE_REG, MR_BASE);
1458
1459 /*
1460 * Reselect interrupts must be turned off prior to the dropping of BSY,
1461 * otherwise we will trigger an interrupt.
1462 */
1463
1464 if (hostdata->connected) {
1465 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1466 return -1;
1467 }
1468
1469 NCR5380_write(SELECT_ENABLE_REG, 0);
1470
1471 /*
1472 * The initiator shall then wait at least two deskew delays and release
1473 * the BSY signal.
1474 */
1475 udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
1476
1477 /* Reset BSY */
1478 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
1479 ICR_ASSERT_ATN | ICR_ASSERT_SEL));
1480
1481 /*
1482 * Something weird happens when we cease to drive BSY - looks
1483 * like the board/chip is letting us do another read before the
1484 * appropriate propagation delay has expired, and we're confusing
1485 * a BSY signal from ourselves as the target's response to SELECTION.
1486 *
1487 * A small delay (the 'C++' frontend breaks the pipeline with an
1488 * unnecessary jump, making it work on my 386-33/Trantor T128, the
1489 * tighter 'C' code breaks and requires this) solves the problem -
1490 * the 1 us delay is arbitrary, and only used because this delay will
1491 * be the same on other platforms and since it works here, it should
1492 * work there.
1493 *
1494 * wingel suggests that this could be due to failing to wait
1495 * one deskew delay.
1496 */
1497
1498 udelay(1);
1499
1500 dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
1501
1502 /*
1503 * The SCSI specification calls for a 250 ms timeout for the actual
1504 * selection.
1505 */
1506
1507 timeout = jiffies + 25;
1508
1509 /*
1510 * XXX very interesting - we're seeing a bounce where the BSY we
1511 * asserted is being reflected / still asserted (propagation delay?)
1512 * and it's detecting as true. Sigh.
1513 */
1514
1515#if 0
1516 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert
1517 * IO while SEL is true. But again, there are some disks out the in the
1518 * world that do that nevertheless. (Somebody claimed that this announces
1519 * reselection capability of the target.) So we better skip that test and
1520 * only wait for BSY... (Famous german words: Der Klügere gibt nach :-)
1521 */
1522
1523 while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) &
1524 (SR_BSY | SR_IO)));
1525
1526 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
1527 (SR_SEL | SR_IO)) {
1528 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1529 NCR5380_reselect(instance);
1530 printk (KERN_ERR "scsi%d: reselection after won arbitration?\n",
1531 HOSTNO);
1532 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1533 return -1;
1534 }
1535#else
1536 while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY));
1537#endif
1538
1539 /*
1540 * No less than two deskew delays after the initiator detects the
1541 * BSY signal is true, it shall release the SEL signal and may
1542 * change the DATA BUS. -wingel
1543 */
1544
1545 udelay(1);
1546
1547 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1548
1549 if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
1550 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1551 if (hostdata->targets_present & (1 << cmd->device->id)) {
1552 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
1553 if (hostdata->restart_select)
1554 printk(KERN_NOTICE "\trestart select\n");
1555 NCR5380_dprint(NDEBUG_ANY, instance);
1556 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1557 return -1;
1558 }
1559 cmd->result = DID_BAD_TARGET << 16;
1560#ifdef NCR5380_STATS
1561 collect_stats(hostdata, cmd);
1562#endif
1563#ifdef SUPPORT_TAGS
1564 cmd_free_tag( cmd );
1565#endif
1566 cmd->scsi_done(cmd);
1567 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1568 dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
1569 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1570 return 0;
1571 }
1572
1573 hostdata->targets_present |= (1 << cmd->device->id);
1574
1575 /*
1576 * Since we followed the SCSI spec, and raised ATN while SEL
1577 * was true but before BSY was false during selection, the information
1578 * transfer phase should be a MESSAGE OUT phase so that we can send the
1579 * IDENTIFY message.
1580 *
1581 * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
1582 * message (2 bytes) with a tag ID that we increment with every command
1583 * until it wraps back to 0.
1584 *
1585 * XXX - it turns out that there are some broken SCSI-II devices,
1586 * which claim to support tagged queuing but fail when more than
1587 * some number of commands are issued at once.
1588 */
1589
1590 /* Wait for start of REQ/ACK handshake */
1591 while (!(NCR5380_read(STATUS_REG) & SR_REQ));
1592
1593 dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
1594 HOSTNO, cmd->device->id);
1595 tmp[0] = IDENTIFY(1, cmd->device->lun);
1596
1597#ifdef SUPPORT_TAGS
1598 if (cmd->tag != TAG_NONE) {
1599 tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG;
1600 tmp[2] = cmd->tag;
1601 len = 3;
1602 } else
1603 len = 1;
1604#else
1605 len = 1;
1606 cmd->tag=0;
1607#endif /* SUPPORT_TAGS */
1608
1609 /* Send message(s) */
1610 data = tmp;
1611 phase = PHASE_MSGOUT;
1612 NCR5380_transfer_pio(instance, &phase, &len, &data);
1613 dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
1614 /* XXX need to handle errors here */
1615 hostdata->connected = cmd;
1616#ifndef SUPPORT_TAGS
1617 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
1618#endif
1619#ifdef SUN3_SCSI_VME
1620 dregs->csr |= CSR_INTR;
1621#endif
1622 initialize_SCp(cmd);
1623
1624
1625 return 0;
1626}
1627
1628/*
1629 * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
1630 * unsigned char *phase, int *count, unsigned char **data)
1631 *
1632 * Purpose : transfers data in given phase using polled I/O
1633 *
1634 * Inputs : instance - instance of driver, *phase - pointer to
1635 * what phase is expected, *count - pointer to number of
1636 * bytes to transfer, **data - pointer to data pointer.
1637 *
1638 * Returns : -1 when different phase is entered without transferring
1639 * maximum number of bytes, 0 if all bytes are transferred or exit
1640 * is in same phase.
1641 *
1642 * Also, *phase, *count, *data are modified in place.
1643 *
1644 * XXX Note : handling for bus free may be useful.
1645 */
1646
1647/*
1648 * Note : this code is not as quick as it could be, however it
1649 * IS 100% reliable, and for the actual data transfer where speed
1650 * counts, we will always do a pseudo DMA or DMA transfer.
1651 */
1652
1653static int NCR5380_transfer_pio( struct Scsi_Host *instance,
1654 unsigned char *phase, int *count,
1655 unsigned char **data)
1656{
1657 register unsigned char p = *phase, tmp;
1658 register int c = *count;
1659 register unsigned char *d = *data;
1660
1661 /*
1662 * The NCR5380 chip will only drive the SCSI bus when the
1663 * phase specified in the appropriate bits of the TARGET COMMAND
1664 * REGISTER match the STATUS REGISTER
1665 */
1666
1667 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
1668
1669 do {
1670 /*
1671 * Wait for assertion of REQ, after which the phase bits will be
1672 * valid
1673 */
1674 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
1675
1676 dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
1677
1678 /* Check for phase mismatch */
1679 if ((tmp & PHASE_MASK) != p) {
1680 dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
1681 NCR5380_dprint_phase(NDEBUG_PIO, instance);
1682 break;
1683 }
1684
1685 /* Do actual transfer from SCSI bus to / from memory */
1686 if (!(p & SR_IO))
1687 NCR5380_write(OUTPUT_DATA_REG, *d);
1688 else
1689 *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
1690
1691 ++d;
1692
1693 /*
1694 * The SCSI standard suggests that in MSGOUT phase, the initiator
1695 * should drop ATN on the last byte of the message phase
1696 * after REQ has been asserted for the handshake but before
1697 * the initiator raises ACK.
1698 */
1699
1700 if (!(p & SR_IO)) {
1701 if (!((p & SR_MSG) && c > 1)) {
1702 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1703 ICR_ASSERT_DATA);
1704 NCR5380_dprint(NDEBUG_PIO, instance);
1705 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1706 ICR_ASSERT_DATA | ICR_ASSERT_ACK);
1707 } else {
1708 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1709 ICR_ASSERT_DATA | ICR_ASSERT_ATN);
1710 NCR5380_dprint(NDEBUG_PIO, instance);
1711 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1712 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
1713 }
1714 } else {
1715 NCR5380_dprint(NDEBUG_PIO, instance);
1716 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
1717 }
1718
1719 while (NCR5380_read(STATUS_REG) & SR_REQ);
1720
1721 dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
1722
1723/*
1724 * We have several special cases to consider during REQ/ACK handshaking :
1725 * 1. We were in MSGOUT phase, and we are on the last byte of the
1726 * message. ATN must be dropped as ACK is dropped.
1727 *
1728 * 2. We are in a MSGIN phase, and we are on the last byte of the
1729 * message. We must exit with ACK asserted, so that the calling
1730 * code may raise ATN before dropping ACK to reject the message.
1731 *
1732 * 3. ACK and ATN are clear and the target may proceed as normal.
1733 */
1734 if (!(p == PHASE_MSGIN && c == 1)) {
1735 if (p == PHASE_MSGOUT && c > 1)
1736 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1737 else
1738 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1739 }
1740 } while (--c);
1741
1742 dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
1743
1744 *count = c;
1745 *data = d;
1746 tmp = NCR5380_read(STATUS_REG);
1747 /* The phase read from the bus is valid if either REQ is (already)
1748 * asserted or if ACK hasn't been released yet. The latter is the case if
1749 * we're in MSGIN and all wanted bytes have been received. */
1750 if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0))
1751 *phase = tmp & PHASE_MASK;
1752 else
1753 *phase = PHASE_UNKNOWN;
1754
1755 if (!c || (*phase == p))
1756 return 0;
1757 else
1758 return -1;
1759}
1760
1761/*
1762 * Function : do_abort (Scsi_Host *host)
1763 *
1764 * Purpose : abort the currently established nexus. Should only be
1765 * called from a routine which can drop into a
1766 *
1767 * Returns : 0 on success, -1 on failure.
1768 */
1769
1770static int do_abort (struct Scsi_Host *host)
1771{
1772 unsigned char tmp, *msgptr, phase;
1773 int len;
1774
1775 /* Request message out phase */
1776 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1777
1778 /*
1779 * Wait for the target to indicate a valid phase by asserting
1780 * REQ. Once this happens, we'll have either a MSGOUT phase
1781 * and can immediately send the ABORT message, or we'll have some
1782 * other phase and will have to source/sink data.
1783 *
1784 * We really don't care what value was on the bus or what value
1785 * the target sees, so we just handshake.
1786 */
1787
1788 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
1789
1790 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
1791
1792 if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
1793 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
1794 ICR_ASSERT_ACK);
1795 while (NCR5380_read(STATUS_REG) & SR_REQ);
1796 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1797 }
1798
1799 tmp = ABORT;
1800 msgptr = &tmp;
1801 len = 1;
1802 phase = PHASE_MSGOUT;
1803 NCR5380_transfer_pio (host, &phase, &len, &msgptr);
1804
1805 /*
1806 * If we got here, and the command completed successfully,
1807 * we're about to go into bus free state.
1808 */
1809
1810 return len ? -1 : 0;
1811}
1812
1813#if defined(REAL_DMA)
1814/*
1815 * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
1816 * unsigned char *phase, int *count, unsigned char **data)
1817 *
1818 * Purpose : transfers data in given phase using either real
1819 * or pseudo DMA.
1820 *
1821 * Inputs : instance - instance of driver, *phase - pointer to
1822 * what phase is expected, *count - pointer to number of
1823 * bytes to transfer, **data - pointer to data pointer.
1824 *
1825 * Returns : -1 when different phase is entered without transferring
1826 * maximum number of bytes, 0 if all bytes or transferred or exit
1827 * is in same phase.
1828 *
1829 * Also, *phase, *count, *data are modified in place.
1830 *
1831 */
1832
1833
1834static int NCR5380_transfer_dma( struct Scsi_Host *instance,
1835 unsigned char *phase, int *count,
1836 unsigned char **data)
1837{
1838 SETUP_HOSTDATA(instance);
1839 register int c = *count;
1840 register unsigned char p = *phase;
1841 unsigned long flags;
1842
1843 /* sanity check */
1844 if(!sun3_dma_setup_done) {
1845 printk("scsi%d: transfer_dma without setup!\n", HOSTNO);
1846 BUG();
1847 }
1848 hostdata->dma_len = c;
1849
1850 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1851 HOSTNO, (p & SR_IO) ? "reading" : "writing",
1852 c, (p & SR_IO) ? "to" : "from", *data);
1853
1854 /* netbsd turns off ints here, why not be safe and do it too */
1855 local_irq_save(flags);
1856
1857 /* send start chain */
1858 sun3scsi_dma_start(c, *data);
1859
1860 if (p & SR_IO) {
1861 NCR5380_write(TARGET_COMMAND_REG, 1);
1862 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1863 NCR5380_write(INITIATOR_COMMAND_REG, 0);
1864 NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1865 NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
1866 } else {
1867 NCR5380_write(TARGET_COMMAND_REG, 0);
1868 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1869 NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA);
1870 NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1871 NCR5380_write(START_DMA_SEND_REG, 0);
1872 }
1873
1874#ifdef SUN3_SCSI_VME
1875 dregs->csr |= CSR_DMA_ENABLE;
1876#endif
1877
1878 local_irq_restore(flags);
1879
1880 sun3_dma_active = 1;
1881 return 0;
1882}
1883#endif /* defined(REAL_DMA) */
1884
1885/*
1886 * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
1887 *
1888 * Purpose : run through the various SCSI phases and do as the target
1889 * directs us to. Operates on the currently connected command,
1890 * instance->connected.
1891 *
1892 * Inputs : instance, instance for which we are doing commands
1893 *
1894 * Side effects : SCSI things happen, the disconnected queue will be
1895 * modified if a command disconnects, *instance->connected will
1896 * change.
1897 *
1898 * XXX Note : we need to watch for bus free or a reset condition here
1899 * to recover from an unexpected bus free condition.
1900 */
1901
1902static void NCR5380_information_transfer (struct Scsi_Host *instance)
1903{
1904 SETUP_HOSTDATA(instance);
1905 unsigned long flags;
1906 unsigned char msgout = NOP;
1907 int sink = 0;
1908 int len;
1909#if defined(REAL_DMA)
1910 int transfersize;
1911#endif
1912 unsigned char *data;
1913 unsigned char phase, tmp, extended_msg[10], old_phase=0xff;
1914 struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
1915
1916#ifdef SUN3_SCSI_VME
1917 dregs->csr |= CSR_INTR;
1918#endif
1919
1920 while (1) {
1921 tmp = NCR5380_read(STATUS_REG);
1922 /* We only have a valid SCSI phase when REQ is asserted */
1923 if (tmp & SR_REQ) {
1924 phase = (tmp & PHASE_MASK);
1925 if (phase != old_phase) {
1926 old_phase = phase;
1927 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
1928 }
1929
1930 if(phase == PHASE_CMDOUT) {
1931 void *d;
1932 unsigned long count;
1933
1934 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
1935 count = cmd->SCp.buffer->length;
1936 d = SGADDR(cmd->SCp.buffer);
1937 } else {
1938 count = cmd->SCp.this_residual;
1939 d = cmd->SCp.ptr;
1940 }
1941#ifdef REAL_DMA
1942 /* this command setup for dma yet? */
1943 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done
1944 != cmd))
1945 {
1946 if (cmd->request->cmd_type == REQ_TYPE_FS) {
1947 sun3scsi_dma_setup(d, count,
1948 rq_data_dir(cmd->request));
1949 sun3_dma_setup_done = cmd;
1950 }
1951 }
1952#endif
1953#ifdef SUN3_SCSI_VME
1954 dregs->csr |= CSR_INTR;
1955#endif
1956 }
1957
1958
1959 if (sink && (phase != PHASE_MSGOUT)) {
1960 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
1961
1962 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
1963 ICR_ASSERT_ACK);
1964 while (NCR5380_read(STATUS_REG) & SR_REQ);
1965 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1966 ICR_ASSERT_ATN);
1967 sink = 0;
1968 continue;
1969 }
1970
1971 switch (phase) {
1972 case PHASE_DATAOUT:
1973#if (NDEBUG & NDEBUG_NO_DATAOUT)
1974 printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT "
1975 "aborted\n", HOSTNO);
1976 sink = 1;
1977 do_abort(instance);
1978 cmd->result = DID_ERROR << 16;
1979 cmd->scsi_done(cmd);
1980 return;
1981#endif
1982 case PHASE_DATAIN:
1983 /*
1984 * If there is no room left in the current buffer in the
1985 * scatter-gather list, move onto the next one.
1986 */
1987 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
1988 ++cmd->SCp.buffer;
1989 --cmd->SCp.buffers_residual;
1990 cmd->SCp.this_residual = cmd->SCp.buffer->length;
1991 cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
1992 dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
1993 HOSTNO, cmd->SCp.this_residual,
1994 cmd->SCp.buffers_residual);
1995 }
1996
1997 /*
1998 * The preferred transfer method is going to be
1999 * PSEUDO-DMA for systems that are strictly PIO,
2000 * since we can let the hardware do the handshaking.
2001 *
2002 * For this to work, we need to know the transfersize
2003 * ahead of time, since the pseudo-DMA code will sit
2004 * in an unconditional loop.
2005 */
2006
2007/* ++roman: I suggest, this should be
2008 * #if def(REAL_DMA)
2009 * instead of leaving REAL_DMA out.
2010 */
2011
2012#if defined(REAL_DMA)
2013// if (!cmd->device->borken &&
2014 if((transfersize =
2015 NCR5380_dma_xfer_len(instance,cmd,phase)) > SUN3_DMA_MINSIZE) {
2016 len = transfersize;
2017 cmd->SCp.phase = phase;
2018
2019 if (NCR5380_transfer_dma(instance, &phase,
2020 &len, (unsigned char **) &cmd->SCp.ptr)) {
2021 /*
2022 * If the watchdog timer fires, all future
2023 * accesses to this device will use the
2024 * polled-IO. */
2025 printk(KERN_NOTICE "scsi%d: switching target %d "
2026 "lun %llu to slow handshake\n", HOSTNO,
2027 cmd->device->id, cmd->device->lun);
2028 cmd->device->borken = 1;
2029 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2030 ICR_ASSERT_ATN);
2031 sink = 1;
2032 do_abort(instance);
2033 cmd->result = DID_ERROR << 16;
2034 cmd->scsi_done(cmd);
2035 /* XXX - need to source or sink data here, as appropriate */
2036 } else {
2037#ifdef REAL_DMA
2038 /* ++roman: When using real DMA,
2039 * information_transfer() should return after
2040 * starting DMA since it has nothing more to
2041 * do.
2042 */
2043 return;
2044#else
2045 cmd->SCp.this_residual -= transfersize - len;
2046#endif
2047 }
2048 } else
2049#endif /* defined(REAL_DMA) */
2050 NCR5380_transfer_pio(instance, &phase,
2051 (int *) &cmd->SCp.this_residual, (unsigned char **)
2052 &cmd->SCp.ptr);
2053#ifdef REAL_DMA
2054 /* if we had intended to dma that command clear it */
2055 if(sun3_dma_setup_done == cmd)
2056 sun3_dma_setup_done = NULL;
2057#endif
2058
2059 break;
2060 case PHASE_MSGIN:
2061 len = 1;
2062 data = &tmp;
2063 NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */
2064 NCR5380_transfer_pio(instance, &phase, &len, &data);
2065 cmd->SCp.Message = tmp;
2066
2067 switch (tmp) {
2068 /*
2069 * Linking lets us reduce the time required to get the
2070 * next command out to the device, hopefully this will
2071 * mean we don't waste another revolution due to the delays
2072 * required by ARBITRATION and another SELECTION.
2073 *
2074 * In the current implementation proposal, low level drivers
2075 * merely have to start the next command, pointed to by
2076 * next_link, done() is called as with unlinked commands.
2077 */
2078#ifdef LINKED
2079 case LINKED_CMD_COMPLETE:
2080 case LINKED_FLG_CMD_COMPLETE:
2081 /* Accept message by clearing ACK */
2082 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2083
2084 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command "
2085 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
2086
2087 /* Enable reselect interrupts */
2088 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2089 /*
2090 * Sanity check : A linked command should only terminate
2091 * with one of these messages if there are more linked
2092 * commands available.
2093 */
2094
2095 if (!cmd->next_link) {
2096 printk(KERN_NOTICE "scsi%d: target %d lun %llu "
2097 "linked command complete, no next_link\n",
2098 HOSTNO, cmd->device->id, cmd->device->lun);
2099 sink = 1;
2100 do_abort (instance);
2101 return;
2102 }
2103
2104 initialize_SCp(cmd->next_link);
2105 /* The next command is still part of this process; copy it
2106 * and don't free it! */
2107 cmd->next_link->tag = cmd->tag;
2108 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2109 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request "
2110 "done, calling scsi_done().\n",
2111 HOSTNO, cmd->device->id, cmd->device->lun);
2112#ifdef NCR5380_STATS
2113 collect_stats(hostdata, cmd);
2114#endif
2115 cmd->scsi_done(cmd);
2116 cmd = hostdata->connected;
2117 break;
2118#endif /* def LINKED */
2119 case ABORT:
2120 case COMMAND_COMPLETE:
2121 /* Accept message by clearing ACK */
2122 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2123 hostdata->connected = NULL;
2124 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu "
2125 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
2126#ifdef SUPPORT_TAGS
2127 cmd_free_tag( cmd );
2128 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
2129 /* Turn a QUEUE FULL status into BUSY, I think the
2130 * mid level cannot handle QUEUE FULL :-( (The
2131 * command is retried after BUSY). Also update our
2132 * queue size to the number of currently issued
2133 * commands now.
2134 */
2135 /* ++Andreas: the mid level code knows about
2136 QUEUE_FULL now. */
2137 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
2138 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned "
2139 "QUEUE_FULL after %d commands\n",
2140 HOSTNO, cmd->device->id, cmd->device->lun,
2141 ta->nr_allocated);
2142 if (ta->queue_size > ta->nr_allocated)
2143 ta->nr_allocated = ta->queue_size;
2144 }
2145#else
2146 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2147#endif
2148 /* Enable reselect interrupts */
2149 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2150
2151 /*
2152 * I'm not sure what the correct thing to do here is :
2153 *
2154 * If the command that just executed is NOT a request
2155 * sense, the obvious thing to do is to set the result
2156 * code to the values of the stored parameters.
2157 *
2158 * If it was a REQUEST SENSE command, we need some way to
2159 * differentiate between the failure code of the original
2160 * and the failure code of the REQUEST sense - the obvious
2161 * case is success, where we fall through and leave the
2162 * result code unchanged.
2163 *
2164 * The non-obvious place is where the REQUEST SENSE failed
2165 */
2166
2167 if (cmd->cmnd[0] != REQUEST_SENSE)
2168 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2169 else if (status_byte(cmd->SCp.Status) != GOOD)
2170 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
2171
2172#ifdef AUTOSENSE
2173 if ((cmd->cmnd[0] == REQUEST_SENSE) &&
2174 hostdata->ses.cmd_len) {
2175 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
2176 hostdata->ses.cmd_len = 0 ;
2177 }
2178
2179 if ((cmd->cmnd[0] != REQUEST_SENSE) &&
2180 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
2181 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
2182 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n",
2183 HOSTNO);
2184 /* this is initialized from initialize_SCp
2185 cmd->SCp.buffer = NULL;
2186 cmd->SCp.buffers_residual = 0;
2187 */
2188
2189 local_irq_save(flags);
2190 LIST(cmd,hostdata->issue_queue);
2191 SET_NEXT(cmd, hostdata->issue_queue);
2192 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2193 local_irq_restore(flags);
2194 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
2195 "issue queue\n", H_NO(cmd));
2196 } else
2197#endif /* def AUTOSENSE */
2198 {
2199#ifdef NCR5380_STATS
2200 collect_stats(hostdata, cmd);
2201#endif
2202 cmd->scsi_done(cmd);
2203 }
2204
2205 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2206 /*
2207 * Restore phase bits to 0 so an interrupted selection,
2208 * arbitration can resume.
2209 */
2210 NCR5380_write(TARGET_COMMAND_REG, 0);
2211
2212 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2213 barrier();
2214
2215 return;
2216 case MESSAGE_REJECT:
2217 /* Accept message by clearing ACK */
2218 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2219 /* Enable reselect interrupts */
2220 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2221 switch (hostdata->last_message) {
2222 case HEAD_OF_QUEUE_TAG:
2223 case ORDERED_QUEUE_TAG:
2224 case SIMPLE_QUEUE_TAG:
2225 /* The target obviously doesn't support tagged
2226 * queuing, even though it announced this ability in
2227 * its INQUIRY data ?!? (maybe only this LUN?) Ok,
2228 * clear 'tagged_supported' and lock the LUN, since
2229 * the command is treated as untagged further on.
2230 */
2231 cmd->device->tagged_supported = 0;
2232 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
2233 cmd->tag = TAG_NONE;
2234 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected "
2235 "QUEUE_TAG message; tagged queuing "
2236 "disabled\n",
2237 HOSTNO, cmd->device->id, cmd->device->lun);
2238 break;
2239 }
2240 break;
2241 case DISCONNECT:
2242 /* Accept message by clearing ACK */
2243 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2244 local_irq_save(flags);
2245 cmd->device->disconnect = 1;
2246 LIST(cmd,hostdata->disconnected_queue);
2247 SET_NEXT(cmd, hostdata->disconnected_queue);
2248 hostdata->connected = NULL;
2249 hostdata->disconnected_queue = cmd;
2250 local_irq_restore(flags);
2251 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was "
2252 "moved from connected to the "
2253 "disconnected_queue\n", HOSTNO,
2254 cmd->device->id, cmd->device->lun);
2255 /*
2256 * Restore phase bits to 0 so an interrupted selection,
2257 * arbitration can resume.
2258 */
2259 NCR5380_write(TARGET_COMMAND_REG, 0);
2260
2261 /* Enable reselect interrupts */
2262 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2263 /* Wait for bus free to avoid nasty timeouts */
2264 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2265 barrier();
2266#ifdef SUN3_SCSI_VME
2267 dregs->csr |= CSR_DMA_ENABLE;
2268#endif
2269 return;
2270 /*
2271 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
2272 * operation, in violation of the SCSI spec so we can safely
2273 * ignore SAVE/RESTORE pointers calls.
2274 *
2275 * Unfortunately, some disks violate the SCSI spec and
2276 * don't issue the required SAVE_POINTERS message before
2277 * disconnecting, and we have to break spec to remain
2278 * compatible.
2279 */
2280 case SAVE_POINTERS:
2281 case RESTORE_POINTERS:
2282 /* Accept message by clearing ACK */
2283 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2284 /* Enable reselect interrupts */
2285 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2286 break;
2287 case EXTENDED_MESSAGE:
2288/*
2289 * Extended messages are sent in the following format :
2290 * Byte
2291 * 0 EXTENDED_MESSAGE == 1
2292 * 1 length (includes one byte for code, doesn't
2293 * include first two bytes)
2294 * 2 code
2295 * 3..length+1 arguments
2296 *
2297 * Start the extended message buffer with the EXTENDED_MESSAGE
2298 * byte, since spi_print_msg() wants the whole thing.
2299 */
2300 extended_msg[0] = EXTENDED_MESSAGE;
2301 /* Accept first byte by clearing ACK */
2302 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2303
2304 dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
2305
2306 len = 2;
2307 data = extended_msg + 1;
2308 phase = PHASE_MSGIN;
2309 NCR5380_transfer_pio(instance, &phase, &len, &data);
2310 dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
2311 (int)extended_msg[1], (int)extended_msg[2]);
2312
2313 if (!len && extended_msg[1] <=
2314 (sizeof (extended_msg) - 1)) {
2315 /* Accept third byte by clearing ACK */
2316 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2317 len = extended_msg[1] - 1;
2318 data = extended_msg + 3;
2319 phase = PHASE_MSGIN;
2320
2321 NCR5380_transfer_pio(instance, &phase, &len, &data);
2322 dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
2323 HOSTNO, len);
2324
2325 switch (extended_msg[2]) {
2326 case EXTENDED_SDTR:
2327 case EXTENDED_WDTR:
2328 case EXTENDED_MODIFY_DATA_POINTER:
2329 case EXTENDED_EXTENDED_IDENTIFY:
2330 tmp = 0;
2331 }
2332 } else if (len) {
2333 printk(KERN_NOTICE "scsi%d: error receiving "
2334 "extended message\n", HOSTNO);
2335 tmp = 0;
2336 } else {
2337 printk(KERN_NOTICE "scsi%d: extended message "
2338 "code %02x length %d is too long\n",
2339 HOSTNO, extended_msg[2], extended_msg[1]);
2340 tmp = 0;
2341 }
2342 /* Fall through to reject message */
2343
2344 /*
2345 * If we get something weird that we aren't expecting,
2346 * reject it.
2347 */
2348 default:
2349 if (!tmp) {
2350 printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO);
2351 spi_print_msg(extended_msg);
2352 printk("\n");
2353 } else if (tmp != EXTENDED_MESSAGE)
2354 printk(KERN_DEBUG "scsi%d: rejecting unknown "
2355 "message %02x from target %d, lun %llu\n",
2356 HOSTNO, tmp, cmd->device->id, cmd->device->lun);
2357 else
2358 printk(KERN_DEBUG "scsi%d: rejecting unknown "
2359 "extended message "
2360 "code %02x, length %d from target %d, lun %llu\n",
2361 HOSTNO, extended_msg[1], extended_msg[0],
2362 cmd->device->id, cmd->device->lun);
2363
2364
2365 msgout = MESSAGE_REJECT;
2366 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2367 ICR_ASSERT_ATN);
2368 break;
2369 } /* switch (tmp) */
2370 break;
2371 case PHASE_MSGOUT:
2372 len = 1;
2373 data = &msgout;
2374 hostdata->last_message = msgout;
2375 NCR5380_transfer_pio(instance, &phase, &len, &data);
2376 if (msgout == ABORT) {
2377#ifdef SUPPORT_TAGS
2378 cmd_free_tag( cmd );
2379#else
2380 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2381#endif
2382 hostdata->connected = NULL;
2383 cmd->result = DID_ERROR << 16;
2384#ifdef NCR5380_STATS
2385 collect_stats(hostdata, cmd);
2386#endif
2387 cmd->scsi_done(cmd);
2388 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2389 return;
2390 }
2391 msgout = NOP;
2392 break;
2393 case PHASE_CMDOUT:
2394 len = cmd->cmd_len;
2395 data = cmd->cmnd;
2396 /*
2397 * XXX for performance reasons, on machines with a
2398 * PSEUDO-DMA architecture we should probably
2399 * use the dma transfer function.
2400 */
2401 NCR5380_transfer_pio(instance, &phase, &len,
2402 &data);
2403 break;
2404 case PHASE_STATIN:
2405 len = 1;
2406 data = &tmp;
2407 NCR5380_transfer_pio(instance, &phase, &len, &data);
2408 cmd->SCp.Status = tmp;
2409 break;
2410 default:
2411 printk("scsi%d: unknown phase\n", HOSTNO);
2412 NCR5380_dprint(NDEBUG_ANY, instance);
2413 } /* switch(phase) */
2414 } /* if (tmp * SR_REQ) */
2415 } /* while (1) */
2416}
2417
2418/*
2419 * Function : void NCR5380_reselect (struct Scsi_Host *instance)
2420 *
2421 * Purpose : does reselection, initializing the instance->connected
2422 * field to point to the struct scsi_cmnd for which the I_T_L or I_T_L_Q
2423 * nexus has been reestablished,
2424 *
2425 * Inputs : instance - this instance of the NCR5380.
2426 *
2427 */
2428
2429/* it might eventually prove necessary to do a dma setup on
2430 reselection, but it doesn't seem to be needed now -- sam */
2431
2432static void NCR5380_reselect (struct Scsi_Host *instance)
2433{
2434 SETUP_HOSTDATA(instance);
2435 unsigned char target_mask;
2436 unsigned char lun;
2437#ifdef SUPPORT_TAGS
2438 unsigned char tag;
2439#endif
2440 unsigned char msg[3];
2441 struct scsi_cmnd *tmp = NULL, *prev;
2442/* unsigned long flags; */
2443
2444 /*
2445 * Disable arbitration, etc. since the host adapter obviously
2446 * lost, and tell an interrupted NCR5380_select() to restart.
2447 */
2448
2449 NCR5380_write(MODE_REG, MR_BASE);
2450 hostdata->restart_select = 1;
2451
2452 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
2453
2454 dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
2455
2456 /*
2457 * At this point, we have detected that our SCSI ID is on the bus,
2458 * SEL is true and BSY was false for at least one bus settle delay
2459 * (400 ns).
2460 *
2461 * We must assert BSY ourselves, until the target drops the SEL
2462 * signal.
2463 */
2464
2465 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
2466
2467 while (NCR5380_read(STATUS_REG) & SR_SEL);
2468 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2469
2470 /*
2471 * Wait for target to go into MSGIN.
2472 */
2473
2474 while (!(NCR5380_read(STATUS_REG) & SR_REQ));
2475
2476#if 1
2477 // acknowledge toggle to MSGIN
2478 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
2479
2480 // peek at the byte without really hitting the bus
2481 msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
2482#endif
2483
2484 if (!(msg[0] & 0x80)) {
2485 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
2486 spi_print_msg(msg);
2487 do_abort(instance);
2488 return;
2489 }
2490 lun = (msg[0] & 0x07);
2491
2492 /*
2493 * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
2494 * just reestablished, and remove it from the disconnected queue.
2495 */
2496
2497 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL;
2498 tmp; prev = tmp, tmp = NEXT(tmp) ) {
2499 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
2500#ifdef SUPPORT_TAGS
2501 && (tag == tmp->tag)
2502#endif
2503 ) {
2504 if (prev) {
2505 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
2506 SET_NEXT(prev, NEXT(tmp));
2507 } else {
2508 REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp));
2509 hostdata->disconnected_queue = NEXT(tmp);
2510 }
2511 SET_NEXT(tmp, NULL);
2512 break;
2513 }
2514 }
2515
2516 if (!tmp) {
2517 printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d "
2518#ifdef SUPPORT_TAGS
2519 "tag %d "
2520#endif
2521 "not in disconnected_queue.\n",
2522 HOSTNO, target_mask, lun
2523#ifdef SUPPORT_TAGS
2524 , tag
2525#endif
2526 );
2527 /*
2528 * Since we have an established nexus that we can't do anything
2529 * with, we must abort it.
2530 */
2531 do_abort(instance);
2532 return;
2533 }
2534#if 1
2535 /* engage dma setup for the command we just saw */
2536 {
2537 void *d;
2538 unsigned long count;
2539
2540 if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
2541 count = tmp->SCp.buffer->length;
2542 d = SGADDR(tmp->SCp.buffer);
2543 } else {
2544 count = tmp->SCp.this_residual;
2545 d = tmp->SCp.ptr;
2546 }
2547#ifdef REAL_DMA
2548 /* setup this command for dma if not already */
2549 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp))
2550 {
2551 sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request));
2552 sun3_dma_setup_done = tmp;
2553 }
2554#endif
2555 }
2556#endif
2557
2558 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
2559 /* Accept message by clearing ACK */
2560 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2561
2562#ifdef SUPPORT_TAGS
2563 /* If the phase is still MSGIN, the target wants to send some more
2564 * messages. In case it supports tagged queuing, this is probably a
2565 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
2566 */
2567 tag = TAG_NONE;
2568 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
2569 /* Accept previous IDENTIFY message by clearing ACK */
2570 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
2571 len = 2;
2572 data = msg+1;
2573 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
2574 msg[1] == SIMPLE_QUEUE_TAG)
2575 tag = msg[2];
2576 dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
2577 "reselection\n", HOSTNO, target_mask, lun, tag);
2578 }
2579#endif
2580
2581 hostdata->connected = tmp;
2582 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n",
2583 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
2584}
2585
2586
2587/*
2588 * Function : int NCR5380_abort(struct scsi_cmnd *cmd)
2589 *
2590 * Purpose : abort a command
2591 *
2592 * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the
2593 * host byte of the result field to, if zero DID_ABORTED is
2594 * used.
2595 *
2596 * Returns : SUCCESS - success, FAILED on failure.
2597 *
2598 * XXX - there is no way to abort the command that is currently
2599 * connected, you have to wait for it to complete. If this is
2600 * a problem, we could implement longjmp() / setjmp(), setjmp()
2601 * called where the loop started in NCR5380_main().
2602 */
2603
2604static int NCR5380_abort(struct scsi_cmnd *cmd)
2605{
2606 struct Scsi_Host *instance = cmd->device->host;
2607 SETUP_HOSTDATA(instance);
2608 struct scsi_cmnd *tmp, **prev;
2609 unsigned long flags;
2610
2611 scmd_printk(KERN_NOTICE, cmd, "aborting command\n");
2612
2613 NCR5380_print_status (instance);
2614
2615 local_irq_save(flags);
2616
2617 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
2618 NCR5380_read(BUS_AND_STATUS_REG),
2619 NCR5380_read(STATUS_REG));
2620
2621#if 1
2622/*
2623 * Case 1 : If the command is the currently executing command,
2624 * we'll set the aborted flag and return control so that
2625 * information transfer routine can exit cleanly.
2626 */
2627
2628 if (hostdata->connected == cmd) {
2629
2630 dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
2631/*
2632 * We should perform BSY checking, and make sure we haven't slipped
2633 * into BUS FREE.
2634 */
2635
2636/* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */
2637/*
2638 * Since we can't change phases until we've completed the current
2639 * handshake, we have to source or sink a byte of data if the current
2640 * phase is not MSGOUT.
2641 */
2642
2643/*
2644 * Return control to the executing NCR drive so we can clear the
2645 * aborted flag and get back into our main loop.
2646 */
2647
2648 if (do_abort(instance) == 0) {
2649 hostdata->aborted = 1;
2650 hostdata->connected = NULL;
2651 cmd->result = DID_ABORT << 16;
2652#ifdef SUPPORT_TAGS
2653 cmd_free_tag( cmd );
2654#else
2655 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2656#endif
2657 local_irq_restore(flags);
2658 cmd->scsi_done(cmd);
2659 return SUCCESS;
2660 } else {
2661/* local_irq_restore(flags); */
2662 printk("scsi%d: abort of connected command failed!\n", HOSTNO);
2663 return FAILED;
2664 }
2665 }
2666#endif
2667
2668/*
2669 * Case 2 : If the command hasn't been issued yet, we simply remove it
2670 * from the issue queue.
2671 */
2672 for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue),
2673 tmp = (struct scsi_cmnd *) hostdata->issue_queue;
2674 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp))
2675 if (cmd == tmp) {
2676 REMOVE(5, *prev, tmp, NEXT(tmp));
2677 (*prev) = NEXT(tmp);
2678 SET_NEXT(tmp, NULL);
2679 tmp->result = DID_ABORT << 16;
2680 local_irq_restore(flags);
2681 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
2682 HOSTNO);
2683 /* Tagged queuing note: no tag to free here, hasn't been assigned
2684 * yet... */
2685 tmp->scsi_done(tmp);
2686 return SUCCESS;
2687 }
2688
2689/*
2690 * Case 3 : If any commands are connected, we're going to fail the abort
2691 * and let the high level SCSI driver retry at a later time or
2692 * issue a reset.
2693 *
2694 * Timeouts, and therefore aborted commands, will be highly unlikely
2695 * and handling them cleanly in this situation would make the common
2696 * case of noresets less efficient, and would pollute our code. So,
2697 * we fail.
2698 */
2699
2700 if (hostdata->connected) {
2701 local_irq_restore(flags);
2702 dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
2703 return FAILED;
2704 }
2705
2706/*
2707 * Case 4: If the command is currently disconnected from the bus, and
2708 * there are no connected commands, we reconnect the I_T_L or
2709 * I_T_L_Q nexus associated with it, go into message out, and send
2710 * an abort message.
2711 *
2712 * This case is especially ugly. In order to reestablish the nexus, we
2713 * need to call NCR5380_select(). The easiest way to implement this
2714 * function was to abort if the bus was busy, and let the interrupt
2715 * handler triggered on the SEL for reselect take care of lost arbitrations
2716 * where necessary, meaning interrupts need to be enabled.
2717 *
2718 * When interrupts are enabled, the queues may change - so we
2719 * can't remove it from the disconnected queue before selecting it
2720 * because that could cause a failure in hashing the nexus if that
2721 * device reselected.
2722 *
2723 * Since the queues may change, we can't use the pointers from when we
2724 * first locate it.
2725 *
2726 * So, we must first locate the command, and if NCR5380_select()
2727 * succeeds, then issue the abort, relocate the command and remove
2728 * it from the disconnected queue.
2729 */
2730
2731 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp;
2732 tmp = NEXT(tmp))
2733 if (cmd == tmp) {
2734 local_irq_restore(flags);
2735 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
2736
2737 if (NCR5380_select (instance, cmd, (int) cmd->tag))
2738 return FAILED;
2739
2740 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
2741
2742 do_abort (instance);
2743
2744 local_irq_save(flags);
2745 for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue),
2746 tmp = (struct scsi_cmnd *) hostdata->disconnected_queue;
2747 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) )
2748 if (cmd == tmp) {
2749 REMOVE(5, *prev, tmp, NEXT(tmp));
2750 *prev = NEXT(tmp);
2751 SET_NEXT(tmp, NULL);
2752 tmp->result = DID_ABORT << 16;
2753 /* We must unlock the tag/LUN immediately here, since the
2754 * target goes to BUS FREE and doesn't send us another
2755 * message (COMMAND_COMPLETE or the like)
2756 */
2757#ifdef SUPPORT_TAGS
2758 cmd_free_tag( tmp );
2759#else
2760 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2761#endif
2762 local_irq_restore(flags);
2763 tmp->scsi_done(tmp);
2764 return SUCCESS;
2765 }
2766 }
2767
2768/*
2769 * Case 5 : If we reached this point, the command was not found in any of
2770 * the queues.
2771 *
2772 * We probably reached this point because of an unlikely race condition
2773 * between the command completing successfully and the abortion code,
2774 * so we won't panic, but we will notify the user in case something really
2775 * broke.
2776 */
2777
2778 local_irq_restore(flags);
2779 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
2780
2781 return FAILED;
2782}
2783
2784
2785/*
2786 * Function : int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2787 *
2788 * Purpose : reset the SCSI bus.
2789 *
2790 * Returns : SUCCESS or FAILURE
2791 *
2792 */
2793
2794static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2795{
2796 SETUP_HOSTDATA(cmd->device->host);
2797 int i;
2798 unsigned long flags;
2799#if defined(RESET_RUN_DONE)
2800 struct scsi_cmnd *connected, *disconnected_queue;
2801#endif
2802
2803
2804 NCR5380_print_status (cmd->device->host);
2805
2806 /* get in phase */
2807 NCR5380_write( TARGET_COMMAND_REG,
2808 PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
2809 /* assert RST */
2810 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
2811 udelay (40);
2812 /* reset NCR registers */
2813 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
2814 NCR5380_write( MODE_REG, MR_BASE );
2815 NCR5380_write( TARGET_COMMAND_REG, 0 );
2816 NCR5380_write( SELECT_ENABLE_REG, 0 );
2817 /* ++roman: reset interrupt condition! otherwise no interrupts don't get
2818 * through anymore ... */
2819 (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG );
2820
2821 /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
2822 * should go.
2823 * Catch-22: if we don't clear all queues, the SCSI driver lock will
2824 * not be released by atari_scsi_reset()!
2825 */
2826
2827#if defined(RESET_RUN_DONE)
2828 /* XXX Should now be done by midlevel code, but it's broken XXX */
2829 /* XXX see below XXX */
2830
2831 /* MSch: old-style reset: actually abort all command processing here */
2832
2833 /* After the reset, there are no more connected or disconnected commands
2834 * and no busy units; to avoid problems with re-inserting the commands
2835 * into the issue_queue (via scsi_done()), the aborted commands are
2836 * remembered in local variables first.
2837 */
2838 local_irq_save(flags);
2839 connected = (struct scsi_cmnd *)hostdata->connected;
2840 hostdata->connected = NULL;
2841 disconnected_queue = (struct scsi_cmnd *)hostdata->disconnected_queue;
2842 hostdata->disconnected_queue = NULL;
2843#ifdef SUPPORT_TAGS
2844 free_all_tags();
2845#endif
2846 for( i = 0; i < 8; ++i )
2847 hostdata->busy[i] = 0;
2848#ifdef REAL_DMA
2849 hostdata->dma_len = 0;
2850#endif
2851 local_irq_restore(flags);
2852
2853 /* In order to tell the mid-level code which commands were aborted,
2854 * set the command status to DID_RESET and call scsi_done() !!!
2855 * This ultimately aborts processing of these commands in the mid-level.
2856 */
2857
2858 if ((cmd = connected)) {
2859 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2860 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2861 cmd->scsi_done( cmd );
2862 }
2863
2864 for (i = 0; (cmd = disconnected_queue); ++i) {
2865 disconnected_queue = NEXT(cmd);
2866 SET_NEXT(cmd, NULL);
2867 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2868 cmd->scsi_done( cmd );
2869 }
2870 if (i > 0)
2871 dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
2872
2873
2874 /* since all commands have been explicitly terminated, we need to tell
2875 * the midlevel code that the reset was SUCCESSFUL, and there is no
2876 * need to 'wake up' the commands by a request_sense
2877 */
2878 return SUCCESS;
2879#else /* 1 */
2880
2881 /* MSch: new-style reset handling: let the mid-level do what it can */
2882
2883 /* ++guenther: MID-LEVEL IS STILL BROKEN.
2884 * Mid-level is supposed to requeue all commands that were active on the
2885 * various low-level queues. In fact it does this, but that's not enough
2886 * because all these commands are subject to timeout. And if a timeout
2887 * happens for any removed command, *_abort() is called but all queues
2888 * are now empty. Abort then gives up the falcon lock, which is fatal,
2889 * since the mid-level will queue more commands and must have the lock
2890 * (it's all happening inside timer interrupt handler!!).
2891 * Even worse, abort will return NOT_RUNNING for all those commands not
2892 * on any queue, so they won't be retried ...
2893 *
2894 * Conclusion: either scsi.c disables timeout for all resetted commands
2895 * immediately, or we lose! As of linux-2.0.20 it doesn't.
2896 */
2897
2898 /* After the reset, there are no more connected or disconnected commands
2899 * and no busy units; so clear the low-level status here to avoid
2900 * conflicts when the mid-level code tries to wake up the affected
2901 * commands!
2902 */
2903
2904 if (hostdata->issue_queue)
2905 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
2906 if (hostdata->connected)
2907 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2908 if (hostdata->disconnected_queue)
2909 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
2910
2911 local_irq_save(flags);
2912 hostdata->issue_queue = NULL;
2913 hostdata->connected = NULL;
2914 hostdata->disconnected_queue = NULL;
2915#ifdef SUPPORT_TAGS
2916 free_all_tags();
2917#endif
2918 for( i = 0; i < 8; ++i )
2919 hostdata->busy[i] = 0;
2920#ifdef REAL_DMA
2921 hostdata->dma_len = 0;
2922#endif
2923 local_irq_restore(flags);
2924
2925 /* we did no complete reset of all commands, so a wakeup is required */
2926 return SUCCESS;
2927#endif /* 1 */
2928}
2929
2930/* Local Variables: */
2931/* tab-width: 8 */
2932/* End: */
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 9707b7494a89..2a906d1d34ba 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -20,89 +20,58 @@
20 * Generic Generic NCR5380 driver 20 * Generic Generic NCR5380 driver
21 * 21 *
22 * Copyright 1995, Russell King 22 * Copyright 1995, Russell King
23 *
24 * ALPHA RELEASE 1.
25 *
26 * For more information, please consult
27 *
28 * NCR 5380 Family
29 * SCSI Protocol Controller
30 * Databook
31 *
32 * NCR Microelectronics
33 * 1635 Aeroplaza Drive
34 * Colorado Springs, CO 80916
35 * 1+ (719) 578-3400
36 * 1+ (800) 334-5454
37 */ 23 */
38 24
39
40/*
41 * This is from mac_scsi.h, but hey, maybe this is useful for Sun3 too! :)
42 *
43 * Options :
44 *
45 * PARITY - enable parity checking. Not supported.
46 *
47 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
48 *
49 * USLEEP - enable support for devices that don't disconnect. Untested.
50 */
51
52#define AUTOSENSE
53
54#include <linux/types.h> 25#include <linux/types.h>
55#include <linux/stddef.h>
56#include <linux/ctype.h>
57#include <linux/delay.h> 26#include <linux/delay.h>
58
59#include <linux/module.h> 27#include <linux/module.h>
60#include <linux/signal.h>
61#include <linux/ioport.h> 28#include <linux/ioport.h>
62#include <linux/init.h> 29#include <linux/init.h>
63#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/platform_device.h>
64 32
65#include <asm/io.h> 33#include <asm/io.h>
66
67#include <asm/sun3ints.h>
68#include <asm/dvma.h> 34#include <asm/dvma.h>
69#include <asm/idprom.h>
70#include <asm/machines.h>
71 35
72/* dma on! */
73#define REAL_DMA
74
75#include "scsi.h"
76#include <scsi/scsi_host.h> 36#include <scsi/scsi_host.h>
77#include "sun3_scsi.h" 37#include "sun3_scsi.h"
78#include "NCR5380.h"
79 38
80extern int sun3_map_test(unsigned long, char *); 39/* Definitions for the core NCR5380 driver. */
81 40
82#define USE_WRAPPER 41#define REAL_DMA
83/*#define RESET_BOOT */ 42/* #define SUPPORT_TAGS */
84#define DRIVER_SETUP 43/* minimum number of bytes to do dma on */
44#define DMA_MIN_SIZE 129
85 45
86/* 46/* #define MAX_TAGS 32 */
87 * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
88 */
89#ifdef BUG
90#undef RESET_BOOT
91#undef DRIVER_SETUP
92#endif
93 47
94/* #define SUPPORT_TAGS */ 48#define NCR5380_implementation_fields /* none */
95 49
96#ifdef SUN3_SCSI_VME 50#define NCR5380_read(reg) sun3scsi_read(reg)
97#define ENABLE_IRQ() 51#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
98#else 52
99#define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); 53#define NCR5380_queue_command sun3scsi_queue_command
100#endif 54#define NCR5380_bus_reset sun3scsi_bus_reset
55#define NCR5380_abort sun3scsi_abort
56#define NCR5380_show_info sun3scsi_show_info
57#define NCR5380_info sun3scsi_info
101 58
59#define NCR5380_dma_read_setup(instance, data, count) \
60 sun3scsi_dma_setup(data, count, 0)
61#define NCR5380_dma_write_setup(instance, data, count) \
62 sun3scsi_dma_setup(data, count, 1)
63#define NCR5380_dma_residual(instance) \
64 sun3scsi_dma_residual(instance)
65#define NCR5380_dma_xfer_len(instance, cmd, phase) \
66 sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
102 67
103static irqreturn_t scsi_sun3_intr(int irq, void *dummy); 68#define NCR5380_acquire_dma_irq(instance) (1)
104static inline unsigned char sun3scsi_read(int reg); 69#define NCR5380_release_dma_irq(instance)
105static inline void sun3scsi_write(int reg, int value); 70
71#include "NCR5380.h"
72
73
74extern int sun3_map_test(unsigned long, char *);
106 75
107static int setup_can_queue = -1; 76static int setup_can_queue = -1;
108module_param(setup_can_queue, int, 0); 77module_param(setup_can_queue, int, 0);
@@ -117,9 +86,7 @@ module_param(setup_use_tagged_queuing, int, 0);
117static int setup_hostid = -1; 86static int setup_hostid = -1;
118module_param(setup_hostid, int, 0); 87module_param(setup_hostid, int, 0);
119 88
120static struct scsi_cmnd *sun3_dma_setup_done = NULL; 89/* #define RESET_BOOT */
121
122#define RESET_RUN_DONE
123 90
124#define AFTER_RESET_DELAY (HZ/2) 91#define AFTER_RESET_DELAY (HZ/2)
125 92
@@ -129,18 +96,15 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL;
129/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ 96/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */
130#define SUN3_DVMA_BUFSIZE 0xe000 97#define SUN3_DVMA_BUFSIZE 0xe000
131 98
132/* minimum number of bytes to do dma on */ 99static struct scsi_cmnd *sun3_dma_setup_done;
133#define SUN3_DMA_MINSIZE 128 100static unsigned char *sun3_scsi_regp;
134
135static volatile unsigned char *sun3_scsi_regp;
136static volatile struct sun3_dma_regs *dregs; 101static volatile struct sun3_dma_regs *dregs;
137#ifndef SUN3_SCSI_VME 102static struct sun3_udc_regs *udc_regs;
138static struct sun3_udc_regs *udc_regs = NULL;
139#endif
140static unsigned char *sun3_dma_orig_addr = NULL; 103static unsigned char *sun3_dma_orig_addr = NULL;
141static unsigned long sun3_dma_orig_count = 0; 104static unsigned long sun3_dma_orig_count = 0;
142static int sun3_dma_active = 0; 105static int sun3_dma_active = 0;
143static unsigned long last_residual = 0; 106static unsigned long last_residual = 0;
107static struct Scsi_Host *default_instance;
144 108
145/* 109/*
146 * NCR 5380 register access functions 110 * NCR 5380 register access functions
@@ -148,12 +112,12 @@ static unsigned long last_residual = 0;
148 112
149static inline unsigned char sun3scsi_read(int reg) 113static inline unsigned char sun3scsi_read(int reg)
150{ 114{
151 return( sun3_scsi_regp[reg] ); 115 return in_8(sun3_scsi_regp + reg);
152} 116}
153 117
154static inline void sun3scsi_write(int reg, int value) 118static inline void sun3scsi_write(int reg, int value)
155{ 119{
156 sun3_scsi_regp[reg] = value; 120 out_8(sun3_scsi_regp + reg, value);
157} 121}
158 122
159#ifndef SUN3_SCSI_VME 123#ifndef SUN3_SCSI_VME
@@ -180,213 +144,10 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg)
180} 144}
181#endif 145#endif
182 146
183/*
184 * XXX: status debug
185 */
186static struct Scsi_Host *default_instance;
187
188/*
189 * Function : int sun3scsi_detect(struct scsi_host_template * tpnt)
190 *
191 * Purpose : initializes mac NCR5380 driver based on the
192 * command line / compile time port and irq definitions.
193 *
194 * Inputs : tpnt - template for this SCSI adapter.
195 *
196 * Returns : 1 if a host adapter was found, 0 if not.
197 *
198 */
199
200static int __init sun3scsi_detect(struct scsi_host_template *tpnt)
201{
202 unsigned long ioaddr, irq;
203 static int called = 0;
204 struct Scsi_Host *instance;
205#ifdef SUN3_SCSI_VME
206 int i;
207 unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,
208 IOBASE_SUN3_VMESCSI + 0x4000,
209 0 };
210 unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
211 SUN3_VEC_VMESCSI1,
212 0 };
213#endif
214
215 /* check that this machine has an onboard 5380 */
216 switch(idprom->id_machtype) {
217#ifdef SUN3_SCSI_VME
218 case SM_SUN3|SM_3_160:
219 case SM_SUN3|SM_3_260:
220 break;
221#else
222 case SM_SUN3|SM_3_50:
223 case SM_SUN3|SM_3_60:
224 break;
225#endif
226
227 default:
228 return 0;
229 }
230
231 if(called)
232 return 0;
233
234#ifdef SUN3_SCSI_VME
235 tpnt->proc_name = "Sun3 5380 VME SCSI";
236#else
237 tpnt->proc_name = "Sun3 5380 SCSI";
238#endif
239
240 /* setup variables */
241 tpnt->can_queue =
242 (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
243 tpnt->cmd_per_lun =
244 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
245 tpnt->sg_tablesize =
246 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
247
248 if (setup_hostid >= 0)
249 tpnt->this_id = setup_hostid;
250 else {
251 /* use 7 as default */
252 tpnt->this_id = 7;
253 }
254
255#ifdef SUN3_SCSI_VME
256 ioaddr = 0;
257 for (i = 0; addrs[i] != 0; i++) {
258 unsigned char x;
259
260 ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
261 SUN3_PAGE_TYPE_VME16);
262 irq = vecs[i];
263 sun3_scsi_regp = (unsigned char *)ioaddr;
264
265 dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
266
267 if (sun3_map_test((unsigned long)dregs, &x)) {
268 unsigned short oldcsr;
269
270 oldcsr = dregs->csr;
271 dregs->csr = 0;
272 udelay(SUN3_DMA_DELAY);
273 if (dregs->csr == 0x1400)
274 break;
275
276 dregs->csr = oldcsr;
277 }
278
279 iounmap((void *)ioaddr);
280 ioaddr = 0;
281 }
282
283 if (!ioaddr)
284 return 0;
285#else
286 irq = IRQ_SUN3_SCSI;
287 ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE);
288 sun3_scsi_regp = (unsigned char *)ioaddr;
289
290 dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
291
292 if((udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs)))
293 == NULL) {
294 printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
295 return 0;
296 }
297#endif
298#ifdef SUPPORT_TAGS
299 if (setup_use_tagged_queuing < 0)
300 setup_use_tagged_queuing = USE_TAGGED_QUEUING;
301#endif
302
303 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
304 if(instance == NULL)
305 return 0;
306
307 default_instance = instance;
308
309 instance->io_port = (unsigned long) ioaddr;
310 instance->irq = irq;
311
312 NCR5380_init(instance, 0);
313
314 instance->n_io_port = 32;
315
316 ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
317
318 if (request_irq(instance->irq, scsi_sun3_intr,
319 0, "Sun3SCSI-5380", instance)) {
320#ifndef REAL_DMA
321 printk("scsi%d: IRQ%d not free, interrupts disabled\n",
322 instance->host_no, instance->irq);
323 instance->irq = SCSI_IRQ_NONE;
324#else
325 printk("scsi%d: IRQ%d not free, bailing out\n",
326 instance->host_no, instance->irq);
327 return 0;
328#endif
329 }
330
331 pr_info("scsi%d: %s at port %lX irq", instance->host_no,
332 tpnt->proc_name, instance->io_port);
333 if (instance->irq == SCSI_IRQ_NONE)
334 printk ("s disabled");
335 else
336 printk (" %d", instance->irq);
337 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
338 instance->can_queue, instance->cmd_per_lun,
339 SUN3SCSI_PUBLIC_RELEASE);
340 printk("\nscsi%d:", instance->host_no);
341 NCR5380_print_options(instance);
342 printk("\n");
343
344 dregs->csr = 0;
345 udelay(SUN3_DMA_DELAY);
346 dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
347 udelay(SUN3_DMA_DELAY);
348 dregs->fifo_count = 0;
349#ifdef SUN3_SCSI_VME
350 dregs->fifo_count_hi = 0;
351 dregs->dma_addr_hi = 0;
352 dregs->dma_addr_lo = 0;
353 dregs->dma_count_hi = 0;
354 dregs->dma_count_lo = 0;
355
356 dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
357#endif
358
359 called = 1;
360
361#ifdef RESET_BOOT
362 sun3_scsi_reset_boot(instance);
363#endif
364
365 return 1;
366}
367
368int sun3scsi_release (struct Scsi_Host *shpnt)
369{
370 if (shpnt->irq != SCSI_IRQ_NONE)
371 free_irq(shpnt->irq, shpnt);
372
373 iounmap((void *)sun3_scsi_regp);
374
375 NCR5380_exit(shpnt);
376 return 0;
377}
378
379#ifdef RESET_BOOT 147#ifdef RESET_BOOT
380/*
381 * Our 'bus reset on boot' function
382 */
383
384static void sun3_scsi_reset_boot(struct Scsi_Host *instance) 148static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
385{ 149{
386 unsigned long end; 150 unsigned long end;
387
388 NCR5380_local_declare();
389 NCR5380_setup(instance);
390 151
391 /* 152 /*
392 * Do a SCSI reset to clean up the bus during initialization. No 153 * Do a SCSI reset to clean up the bus during initialization. No
@@ -422,11 +183,6 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
422} 183}
423#endif 184#endif
424 185
425static const char *sun3scsi_info(struct Scsi_Host *spnt)
426{
427 return "";
428}
429
430// safe bits for the CSR 186// safe bits for the CSR
431#define CSR_GOOD 0x060f 187#define CSR_GOOD 0x060f
432 188
@@ -468,7 +224,6 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
468void sun3_sun3_debug (void) 224void sun3_sun3_debug (void)
469{ 225{
470 unsigned long flags; 226 unsigned long flags;
471 NCR5380_local_declare();
472 227
473 if (default_instance) { 228 if (default_instance) {
474 local_irq_save(flags); 229 local_irq_save(flags);
@@ -732,25 +487,200 @@ static int sun3scsi_dma_finish(int write_flag)
732 487
733} 488}
734 489
735#include "sun3_NCR5380.c" 490#include "atari_NCR5380.c"
736 491
737static struct scsi_host_template driver_template = { 492#ifdef SUN3_SCSI_VME
493#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI"
494#define DRV_MODULE_NAME "sun3_scsi_vme"
495#else
496#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
497#define DRV_MODULE_NAME "sun3_scsi"
498#endif
499
500#define PFX DRV_MODULE_NAME ": "
501
502static struct scsi_host_template sun3_scsi_template = {
503 .module = THIS_MODULE,
504 .proc_name = DRV_MODULE_NAME,
738 .show_info = sun3scsi_show_info, 505 .show_info = sun3scsi_show_info,
739 .name = SUN3_SCSI_NAME, 506 .name = SUN3_SCSI_NAME,
740 .detect = sun3scsi_detect,
741 .release = sun3scsi_release,
742 .info = sun3scsi_info, 507 .info = sun3scsi_info,
743 .queuecommand = sun3scsi_queue_command, 508 .queuecommand = sun3scsi_queue_command,
744 .eh_abort_handler = sun3scsi_abort, 509 .eh_abort_handler = sun3scsi_abort,
745 .eh_bus_reset_handler = sun3scsi_bus_reset, 510 .eh_bus_reset_handler = sun3scsi_bus_reset,
746 .can_queue = CAN_QUEUE, 511 .can_queue = 16,
747 .this_id = 7, 512 .this_id = 7,
748 .sg_tablesize = SG_TABLESIZE, 513 .sg_tablesize = SG_NONE,
749 .cmd_per_lun = CMD_PER_LUN, 514 .cmd_per_lun = 2,
750 .use_clustering = DISABLE_CLUSTERING 515 .use_clustering = DISABLE_CLUSTERING
751}; 516};
752 517
518static int __init sun3_scsi_probe(struct platform_device *pdev)
519{
520 struct Scsi_Host *instance;
521 int error;
522 struct resource *irq, *mem;
523 unsigned char *ioaddr;
524 int host_flags = 0;
525#ifdef SUN3_SCSI_VME
526 int i;
527#endif
528
529 if (setup_can_queue > 0)
530 sun3_scsi_template.can_queue = setup_can_queue;
531 if (setup_cmd_per_lun > 0)
532 sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
533 if (setup_sg_tablesize >= 0)
534 sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
535 if (setup_hostid >= 0)
536 sun3_scsi_template.this_id = setup_hostid & 7;
537
538#ifdef SUN3_SCSI_VME
539 ioaddr = NULL;
540 for (i = 0; i < 2; i++) {
541 unsigned char x;
542
543 irq = platform_get_resource(pdev, IORESOURCE_IRQ, i);
544 mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
545 if (!irq || !mem)
546 break;
547
548 ioaddr = sun3_ioremap(mem->start, resource_size(mem),
549 SUN3_PAGE_TYPE_VME16);
550 dregs = (struct sun3_dma_regs *)(ioaddr + 8);
551
552 if (sun3_map_test((unsigned long)dregs, &x)) {
553 unsigned short oldcsr;
554
555 oldcsr = dregs->csr;
556 dregs->csr = 0;
557 udelay(SUN3_DMA_DELAY);
558 if (dregs->csr == 0x1400)
559 break;
560
561 dregs->csr = oldcsr;
562 }
563
564 iounmap(ioaddr);
565 ioaddr = NULL;
566 }
567 if (!ioaddr)
568 return -ENODEV;
569#else
570 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
571 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
572 if (!irq || !mem)
573 return -ENODEV;
574
575 ioaddr = ioremap(mem->start, resource_size(mem));
576 dregs = (struct sun3_dma_regs *)(ioaddr + 8);
577
578 udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs));
579 if (!udc_regs) {
580 pr_err(PFX "couldn't allocate DVMA memory!\n");
581 iounmap(ioaddr);
582 return -ENOMEM;
583 }
584#endif
585
586 sun3_scsi_regp = ioaddr;
587
588 instance = scsi_host_alloc(&sun3_scsi_template,
589 sizeof(struct NCR5380_hostdata));
590 if (!instance) {
591 error = -ENOMEM;
592 goto fail_alloc;
593 }
594 default_instance = instance;
595
596 instance->io_port = (unsigned long)ioaddr;
597 instance->irq = irq->start;
598
599#ifdef SUPPORT_TAGS
600 host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
601#endif
602
603 NCR5380_init(instance, host_flags);
604
605 error = request_irq(instance->irq, scsi_sun3_intr, 0,
606 "NCR5380", instance);
607 if (error) {
608#ifdef REAL_DMA
609 pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n",
610 instance->host_no, instance->irq);
611 goto fail_irq;
612#else
613 pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n",
614 instance->host_no, instance->irq);
615 instance->irq = NO_IRQ;
616#endif
617 }
618
619 dregs->csr = 0;
620 udelay(SUN3_DMA_DELAY);
621 dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
622 udelay(SUN3_DMA_DELAY);
623 dregs->fifo_count = 0;
624#ifdef SUN3_SCSI_VME
625 dregs->fifo_count_hi = 0;
626 dregs->dma_addr_hi = 0;
627 dregs->dma_addr_lo = 0;
628 dregs->dma_count_hi = 0;
629 dregs->dma_count_lo = 0;
630
631 dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
632#endif
633
634#ifdef RESET_BOOT
635 sun3_scsi_reset_boot(instance);
636#endif
637
638 error = scsi_add_host(instance, NULL);
639 if (error)
640 goto fail_host;
641
642 platform_set_drvdata(pdev, instance);
643
644 scsi_scan_host(instance);
645 return 0;
646
647fail_host:
648 if (instance->irq != NO_IRQ)
649 free_irq(instance->irq, instance);
650fail_irq:
651 NCR5380_exit(instance);
652 scsi_host_put(instance);
653fail_alloc:
654 if (udc_regs)
655 dvma_free(udc_regs);
656 iounmap(sun3_scsi_regp);
657 return error;
658}
659
660static int __exit sun3_scsi_remove(struct platform_device *pdev)
661{
662 struct Scsi_Host *instance = platform_get_drvdata(pdev);
663
664 scsi_remove_host(instance);
665 if (instance->irq != NO_IRQ)
666 free_irq(instance->irq, instance);
667 NCR5380_exit(instance);
668 scsi_host_put(instance);
669 if (udc_regs)
670 dvma_free(udc_regs);
671 iounmap(sun3_scsi_regp);
672 return 0;
673}
674
675static struct platform_driver sun3_scsi_driver = {
676 .remove = __exit_p(sun3_scsi_remove),
677 .driver = {
678 .name = DRV_MODULE_NAME,
679 .owner = THIS_MODULE,
680 },
681};
753 682
754#include "scsi_module.c" 683module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe);
755 684
685MODULE_ALIAS("platform:" DRV_MODULE_NAME);
756MODULE_LICENSE("GPL"); 686MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
index e96a37cf06ac..d22745fae328 100644
--- a/drivers/scsi/sun3_scsi.h
+++ b/drivers/scsi/sun3_scsi.h
@@ -13,95 +13,11 @@
13 * (Unix and Linux consulting and custom programming) 13 * (Unix and Linux consulting and custom programming)
14 * drew@colorado.edu 14 * drew@colorado.edu
15 * +1 (303) 440-4894 15 * +1 (303) 440-4894
16 *
17 * ALPHA RELEASE 1.
18 *
19 * For more information, please consult
20 *
21 * NCR 5380 Family
22 * SCSI Protocol Controller
23 * Databook
24 *
25 * NCR Microelectronics
26 * 1635 Aeroplaza Drive
27 * Colorado Springs, CO 80916
28 * 1+ (719) 578-3400
29 * 1+ (800) 334-5454
30 */ 16 */
31 17
32#ifndef SUN3_SCSI_H 18#ifndef SUN3_SCSI_H
33#define SUN3_SCSI_H 19#define SUN3_SCSI_H
34 20
35#define SUN3SCSI_PUBLIC_RELEASE 1
36
37/*
38 * Int: level 2 autovector
39 * IO: type 1, base 0x00140000, 5 bits phys space: A<4..0>
40 */
41#define IRQ_SUN3_SCSI 2
42#define IOBASE_SUN3_SCSI 0x00140000
43
44#define IOBASE_SUN3_VMESCSI 0xff200000
45
46static int sun3scsi_abort(struct scsi_cmnd *);
47static int sun3scsi_detect (struct scsi_host_template *);
48static const char *sun3scsi_info (struct Scsi_Host *);
49static int sun3scsi_bus_reset(struct scsi_cmnd *);
50static int sun3scsi_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
51static int sun3scsi_release (struct Scsi_Host *);
52
53#ifndef CMD_PER_LUN
54#define CMD_PER_LUN 2
55#endif
56
57#ifndef CAN_QUEUE
58#define CAN_QUEUE 16
59#endif
60
61#ifndef SG_TABLESIZE
62#define SG_TABLESIZE SG_NONE
63#endif
64
65#ifndef MAX_TAGS
66#define MAX_TAGS 32
67#endif
68
69#ifndef USE_TAGGED_QUEUING
70#define USE_TAGGED_QUEUING 1
71#endif
72
73#include <scsi/scsicam.h>
74
75#ifdef SUN3_SCSI_VME
76#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI"
77#else
78#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
79#endif
80
81#define NCR5380_implementation_fields \
82 int port, ctrl
83
84#define NCR5380_local_declare() \
85 struct Scsi_Host *_instance
86
87#define NCR5380_setup(instance) \
88 _instance = instance
89
90#define NCR5380_read(reg) sun3scsi_read(reg)
91#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
92
93#define NCR5380_intr sun3scsi_intr
94#define NCR5380_queue_command sun3scsi_queue_command
95#define NCR5380_bus_reset sun3scsi_bus_reset
96#define NCR5380_abort sun3scsi_abort
97#define NCR5380_show_info sun3scsi_show_info
98#define NCR5380_dma_xfer_len(i, cmd, phase) \
99 sun3scsi_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
100
101#define NCR5380_dma_write_setup(instance, data, count) sun3scsi_dma_setup(data, count, 1)
102#define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0)
103#define NCR5380_dma_residual sun3scsi_dma_residual
104
105/* additional registers - mainly DMA control regs */ 21/* additional registers - mainly DMA control regs */
106/* these start at regbase + 8 -- directly after the NCR regs */ 22/* these start at regbase + 8 -- directly after the NCR regs */
107struct sun3_dma_regs { 23struct sun3_dma_regs {
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 3557b385251a..5d00e514ff28 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -820,7 +820,7 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
820 if (reqtags > SYM_CONF_MAX_TAG) 820 if (reqtags > SYM_CONF_MAX_TAG)
821 reqtags = SYM_CONF_MAX_TAG; 821 reqtags = SYM_CONF_MAX_TAG;
822 depth_to_use = reqtags ? reqtags : 1; 822 depth_to_use = reqtags ? reqtags : 1;
823 scsi_adjust_queue_depth(sdev, depth_to_use); 823 scsi_change_queue_depth(sdev, depth_to_use);
824 lp->s.scdev_depth = depth_to_use; 824 lp->s.scdev_depth = depth_to_use;
825 sym_tune_dev_queuing(tp, sdev->lun, reqtags); 825 sym_tune_dev_queuing(tp, sdev->lun, reqtags);
826 826
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index 8cc80931df14..87828acbf7c6 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -1,4 +1,3 @@
1#define AUTOSENSE
2#define PSEUDO_DMA 1#define PSEUDO_DMA
3 2
4/* 3/*
@@ -12,8 +11,6 @@
12 * drew@colorado.edu 11 * drew@colorado.edu
13 * +1 (303) 440-4894 12 * +1 (303) 440-4894
14 * 13 *
15 * DISTRIBUTION RELEASE 3.
16 *
17 * For more information, please consult 14 * For more information, please consult
18 * 15 *
19 * Trantor Systems, Ltd. 16 * Trantor Systems, Ltd.
@@ -24,40 +21,9 @@
24 * 5415 Randall Place 21 * 5415 Randall Place
25 * Fremont, CA 94538 22 * Fremont, CA 94538
26 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 23 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
27 *
28 * and
29 *
30 * NCR 5380 Family
31 * SCSI Protocol Controller
32 * Databook
33 *
34 * NCR Microelectronics
35 * 1635 Aeroplaza Drive
36 * Colorado Springs, CO 80916
37 * 1+ (719) 578-3400
38 * 1+ (800) 334-5454
39 */ 24 */
40 25
41/* 26/*
42 * Options :
43 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
44 * for commands that return with a CHECK CONDITION status.
45 *
46 * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
47 * increase compared to polled I/O.
48 *
49 * PARITY - enable parity checking. Not supported.
50 *
51 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
52 *
53 *
54 * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
55 * only really want to use this if you're having a problem with
56 * dropped characters during high speed communications, and even
57 * then, you're going to be better off twiddling with transfersize.
58 *
59 * USLEEP - enable support for devices that don't disconnect. Untested.
60 *
61 * The card is detected and initialized in one of several ways : 27 * The card is detected and initialized in one of several ways :
62 * 1. Autoprobe (default) - since the board is memory mapped, 28 * 1. Autoprobe (default) - since the board is memory mapped,
63 * a BIOS signature is scanned for to locate the registers. 29 * a BIOS signature is scanned for to locate the registers.
@@ -111,7 +77,6 @@
111#include <linux/module.h> 77#include <linux/module.h>
112#include <linux/delay.h> 78#include <linux/delay.h>
113 79
114#include "scsi.h"
115#include <scsi/scsi_host.h> 80#include <scsi/scsi_host.h>
116#include "t128.h" 81#include "t128.h"
117#define AUTOPROBE_IRQ 82#define AUTOPROBE_IRQ
@@ -148,6 +113,7 @@ static struct signature {
148 113
149#define NO_SIGNATURES ARRAY_SIZE(signatures) 114#define NO_SIGNATURES ARRAY_SIZE(signatures)
150 115
116#ifndef MODULE
151/* 117/*
152 * Function : t128_setup(char *str, int *ints) 118 * Function : t128_setup(char *str, int *ints)
153 * 119 *
@@ -158,9 +124,13 @@ static struct signature {
158 * 124 *
159 */ 125 */
160 126
161void __init t128_setup(char *str, int *ints){ 127static int __init t128_setup(char *str)
128{
162 static int commandline_current = 0; 129 static int commandline_current = 0;
163 int i; 130 int i;
131 int ints[10];
132
133 get_options(str, ARRAY_SIZE(ints), ints);
164 if (ints[0] != 2) 134 if (ints[0] != 2)
165 printk("t128_setup : usage t128=address,irq\n"); 135 printk("t128_setup : usage t128=address,irq\n");
166 else 136 else
@@ -174,8 +144,12 @@ void __init t128_setup(char *str, int *ints){
174 } 144 }
175 ++commandline_current; 145 ++commandline_current;
176 } 146 }
147 return 1;
177} 148}
178 149
150__setup("t128=", t128_setup);
151#endif
152
179/* 153/*
180 * Function : int t128_detect(struct scsi_host_template * tpnt) 154 * Function : int t128_detect(struct scsi_host_template * tpnt)
181 * 155 *
@@ -189,17 +163,14 @@ void __init t128_setup(char *str, int *ints){
189 * 163 *
190 */ 164 */
191 165
192int __init t128_detect(struct scsi_host_template * tpnt){ 166static int __init t128_detect(struct scsi_host_template *tpnt)
167{
193 static int current_override = 0, current_base = 0; 168 static int current_override = 0, current_base = 0;
194 struct Scsi_Host *instance; 169 struct Scsi_Host *instance;
195 unsigned long base; 170 unsigned long base;
196 void __iomem *p; 171 void __iomem *p;
197 int sig, count; 172 int sig, count;
198 173
199 tpnt->proc_name = "t128";
200 tpnt->show_info = t128_show_info;
201 tpnt->write_info = t128_write_info;
202
203 for (count = 0; current_override < NO_OVERRIDES; ++current_override) { 174 for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
204 base = 0; 175 base = 0;
205 p = NULL; 176 p = NULL;
@@ -254,15 +225,19 @@ found:
254 else 225 else
255 instance->irq = NCR5380_probe_irq(instance, T128_IRQS); 226 instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
256 227
257 if (instance->irq != SCSI_IRQ_NONE) 228 /* Compatibility with documented NCR5380 kernel parameters */
229 if (instance->irq == 255)
230 instance->irq = NO_IRQ;
231
232 if (instance->irq != NO_IRQ)
258 if (request_irq(instance->irq, t128_intr, 0, "t128", 233 if (request_irq(instance->irq, t128_intr, 0, "t128",
259 instance)) { 234 instance)) {
260 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 235 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
261 instance->host_no, instance->irq); 236 instance->host_no, instance->irq);
262 instance->irq = SCSI_IRQ_NONE; 237 instance->irq = NO_IRQ;
263 } 238 }
264 239
265 if (instance->irq == SCSI_IRQ_NONE) { 240 if (instance->irq == NO_IRQ) {
266 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 241 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
267 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 242 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
268 } 243 }
@@ -271,16 +246,6 @@ found:
271 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); 246 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
272#endif 247#endif
273 248
274 printk("scsi%d : at 0x%08lx", instance->host_no, instance->base);
275 if (instance->irq == SCSI_IRQ_NONE)
276 printk (" interrupts disabled");
277 else
278 printk (" irq %d", instance->irq);
279 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
280 CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE);
281 NCR5380_print_options(instance);
282 printk("\n");
283
284 ++current_override; 249 ++current_override;
285 ++count; 250 ++count;
286 } 251 }
@@ -291,7 +256,7 @@ static int t128_release(struct Scsi_Host *shost)
291{ 256{
292 NCR5380_local_declare(); 257 NCR5380_local_declare();
293 NCR5380_setup(shost); 258 NCR5380_setup(shost);
294 if (shost->irq) 259 if (shost->irq != NO_IRQ)
295 free_irq(shost->irq, shost); 260 free_irq(shost->irq, shost);
296 NCR5380_exit(shost); 261 NCR5380_exit(shost);
297 if (shost->io_port && shost->n_io_port) 262 if (shost->io_port && shost->n_io_port)
@@ -321,8 +286,8 @@ static int t128_release(struct Scsi_Host *shost)
321 * and matching the H_C_S coordinates to what DOS uses. 286 * and matching the H_C_S coordinates to what DOS uses.
322 */ 287 */
323 288
324int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, 289static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
325 sector_t capacity, int * ip) 290 sector_t capacity, int *ip)
326{ 291{
327 ip[0] = 64; 292 ip[0] = 64;
328 ip[1] = 32; 293 ip[1] = 32;
@@ -430,6 +395,10 @@ static struct scsi_host_template driver_template = {
430 .name = "Trantor T128/T128F/T228", 395 .name = "Trantor T128/T128F/T228",
431 .detect = t128_detect, 396 .detect = t128_detect,
432 .release = t128_release, 397 .release = t128_release,
398 .proc_name = "t128",
399 .show_info = t128_show_info,
400 .write_info = t128_write_info,
401 .info = t128_info,
433 .queuecommand = t128_queue_command, 402 .queuecommand = t128_queue_command,
434 .eh_abort_handler = t128_abort, 403 .eh_abort_handler = t128_abort,
435 .eh_bus_reset_handler = t128_bus_reset, 404 .eh_bus_reset_handler = t128_bus_reset,
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index fd68cecc62af..2c7371454dfd 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -8,8 +8,6 @@
8 * drew@colorado.edu 8 * drew@colorado.edu
9 * +1 (303) 440-4894 9 * +1 (303) 440-4894
10 * 10 *
11 * DISTRIBUTION RELEASE 3.
12 *
13 * For more information, please consult 11 * For more information, please consult
14 * 12 *
15 * Trantor Systems, Ltd. 13 * Trantor Systems, Ltd.
@@ -20,25 +18,11 @@
20 * 5415 Randall Place 18 * 5415 Randall Place
21 * Fremont, CA 94538 19 * Fremont, CA 94538
22 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 20 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
23 *
24 * and
25 *
26 * NCR 5380 Family
27 * SCSI Protocol Controller
28 * Databook
29 *
30 * NCR Microelectronics
31 * 1635 Aeroplaza Drive
32 * Colorado Springs, CO 80916
33 * 1+ (719) 578-3400
34 * 1+ (800) 334-5454
35 */ 21 */
36 22
37#ifndef T128_H 23#ifndef T128_H
38#define T128_H 24#define T128_H
39 25
40#define T128_PUBLIC_RELEASE 3
41
42#define TDEBUG 0 26#define TDEBUG 0
43#define TDEBUG_INIT 0x1 27#define TDEBUG_INIT 0x1
44#define TDEBUG_TRANSFER 0x2 28#define TDEBUG_TRANSFER 0x2
@@ -88,12 +72,6 @@
88#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */ 72#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
89 73
90#ifndef ASM 74#ifndef ASM
91static int t128_abort(struct scsi_cmnd *);
92static int t128_biosparam(struct scsi_device *, struct block_device *,
93 sector_t, int*);
94static int t128_detect(struct scsi_host_template *);
95static int t128_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
96static int t128_bus_reset(struct scsi_cmnd *);
97 75
98#ifndef CMD_PER_LUN 76#ifndef CMD_PER_LUN
99#define CMD_PER_LUN 2 77#define CMD_PER_LUN 2
@@ -134,6 +112,7 @@ static int t128_bus_reset(struct scsi_cmnd *);
134#define NCR5380_queue_command t128_queue_command 112#define NCR5380_queue_command t128_queue_command
135#define NCR5380_abort t128_abort 113#define NCR5380_abort t128_abort
136#define NCR5380_bus_reset t128_bus_reset 114#define NCR5380_bus_reset t128_bus_reset
115#define NCR5380_info t128_info
137#define NCR5380_show_info t128_show_info 116#define NCR5380_show_info t128_show_info
138#define NCR5380_write_info t128_write_info 117#define NCR5380_write_info t128_write_info
139 118
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
deleted file mode 100644
index 844c9a048c00..000000000000
--- a/drivers/scsi/tmscsim.c
+++ /dev/null
@@ -1,2626 +0,0 @@
1/************************************************************************
2 * FILE NAME : TMSCSIM.C *
3 * BY : C.L. Huang, ching@tekram.com.tw *
4 * Description: Device Driver for Tekram DC-390(T) PCI SCSI *
5 * Bus Master Host Adapter *
6 * (C)Copyright 1995-1996 Tekram Technology Co., Ltd. *
7 ************************************************************************
8 * (C) Copyright: put under GNU GPL in 10/96 *
9 * (see Documentation/scsi/tmscsim.txt) *
10 ************************************************************************
11 * $Id: tmscsim.c,v 2.60.2.30 2000/12/20 01:07:12 garloff Exp $ *
12 * Enhancements and bugfixes by *
13 * Kurt Garloff <kurt@garloff.de> <garloff@suse.de> *
14 ************************************************************************
15 * HISTORY: *
16 * *
17 * REV# DATE NAME DESCRIPTION *
18 * 1.00 96/04/24 CLH First release *
19 * 1.01 96/06/12 CLH Fixed bug of Media Change for Removable *
20 * Device, scan all LUN. Support Pre2.0.10 *
21 * 1.02 96/06/18 CLH Fixed bug of Command timeout ... *
22 * 1.03 96/09/25 KG Added tmscsim_proc_info() *
23 * 1.04 96/10/11 CLH Updating for support KV 2.0.x *
24 * 1.05 96/10/18 KG Fixed bug in DC390_abort(null ptr deref)*
25 * 1.06 96/10/25 KG Fixed module support *
26 * 1.07 96/11/09 KG Fixed tmscsim_proc_info() *
27 * 1.08 96/11/18 KG Fixed null ptr in DC390_Disconnect() *
28 * 1.09 96/11/30 KG Added register the allocated IO space *
29 * 1.10 96/12/05 CLH Modified tmscsim_proc_info(), and reset *
30 * pending interrupt in DC390_detect() *
31 * 1.11 97/02/05 KG/CLH Fixeds problem with partitions greater *
32 * than 1GB *
33 * 1.12 98/02/15 MJ Rewritten PCI probing *
34 * 1.13 98/04/08 KG Support for non DC390, __initfunc decls,*
35 * changed max devs from 10 to 16 *
36 * 1.14a 98/05/05 KG Dynamic DCB allocation, add-single-dev *
37 * for LUNs if LUN_SCAN (BIOS) not set *
38 * runtime config using /proc interface *
39 * 1.14b 98/05/06 KG eliminated cli (); sti (); spinlocks *
40 * 1.14c 98/05/07 KG 2.0.x compatibility *
41 * 1.20a 98/05/07 KG changed names of funcs to be consistent *
42 * DC390_ (entry points), dc390_ (internal)*
43 * reworked locking *
44 * 1.20b 98/05/12 KG bugs: version, kfree, _ctmp *
45 * debug output *
46 * 1.20c 98/05/12 KG bugs: kfree, parsing, EEpromDefaults *
47 * 1.20d 98/05/14 KG bugs: list linkage, clear flag after *
48 * reset on startup, code cleanup *
49 * 1.20e 98/05/15 KG spinlock comments, name space cleanup *
50 * pLastDCB now part of ACB structure *
51 * added stats, timeout for 2.1, TagQ bug *
52 * RESET and INQUIRY interface commands *
53 * 1.20f 98/05/18 KG spinlocks fixes, max_lun fix, free DCBs *
54 * for missing LUNs, pending int *
55 * 1.20g 98/05/19 KG Clean up: Avoid short *
56 * 1.20h 98/05/21 KG Remove AdaptSCSIID, max_lun ... *
57 * 1.20i 98/05/21 KG Aiiie: Bug with TagQMask *
58 * 1.20j 98/05/24 KG Handle STAT_BUSY, handle pACB->pLinkDCB *
59 * == 0 in remove_dev and DoingSRB_Done *
60 * 1.20k 98/05/25 KG DMA_INT (experimental) *
61 * 1.20l 98/05/27 KG remove DMA_INT; DMA_IDLE cmds added; *
62 * 1.20m 98/06/10 KG glitch configurable; made some global *
63 * vars part of ACB; use DC390_readX *
64 * 1.20n 98/06/11 KG startup params *
65 * 1.20o 98/06/15 KG added TagMaxNum to boot/module params *
66 * Device Nr -> Idx, TagMaxNum power of 2 *
67 * 1.20p 98/06/17 KG Docu updates. Reset depends on settings *
68 * pci_set_master added; 2.0.xx: pcibios_* *
69 * used instead of MechNum things ... *
70 * 1.20q 98/06/23 KG Changed defaults. Added debug code for *
71 * removable media and fixed it. TagMaxNum *
72 * fixed for DC390. Locking: ACB, DRV for *
73 * better IRQ sharing. Spelling: Queueing *
74 * Parsing and glitch_cfg changes. Display *
75 * real SyncSpeed value. Made DisConn *
76 * functional (!) *
77 * 1.20r 98/06/30 KG Debug macros, allow disabling DsCn, set *
78 * BIT4 in CtrlR4, EN_PAGE_INT, 2.0 module *
79 * param -1 fixed. *
80 * 1.20s 98/08/20 KG Debug info on abort(), try to check PCI,*
81 * phys_to_bus instead of phys_to_virt, *
82 * fixed sel. process, fixed locking, *
83 * added MODULE_XXX infos, changed IRQ *
84 * request flags, disable DMA_INT *
85 * 1.20t 98/09/07 KG TagQ report fixed; Write Erase DMA Stat;*
86 * initfunc -> __init; better abort; *
87 * Timeout for XFER_DONE & BLAST_COMPLETE; *
88 * Allow up to 33 commands being processed *
89 * 2.0a 98/10/14 KG Max Cmnds back to 17. DMA_Stat clearing *
90 * all flags. Clear within while() loops *
91 * in DataIn_0/Out_0. Null ptr in dumpinfo *
92 * for pSRB==0. Better locking during init.*
93 * bios_param() now respects part. table. *
94 * 2.0b 98/10/24 KG Docu fixes. Timeout Msg in DMA Blast. *
95 * Disallow illegal idx in INQUIRY/REMOVE *
96 * 2.0c 98/11/19 KG Cleaned up detect/init for SMP boxes, *
97 * Write Erase DMA (1.20t) caused problems *
98 * 2.0d 98/12/25 KG Christmas release ;-) Message handling *
99 * completely reworked. Handle target ini- *
100 * tiated SDTR correctly. *
101 * 2.0d1 99/01/25 KG Try to handle RESTORE_PTR *
102 * 2.0d2 99/02/08 KG Check for failure of kmalloc, correct *
103 * inclusion of scsicam.h, DelayReset *
104 * 2.0d3 99/05/31 KG DRIVER_OK -> DID_OK, DID_NO_CONNECT, *
105 * detect Target mode and warn. *
106 * pcmd->result handling cleaned up. *
107 * 2.0d4 99/06/01 KG Cleaned selection process. Found bug *
108 * which prevented more than 16 tags. Now: *
109 * 24. SDTR cleanup. Cleaner multi-LUN *
110 * handling. Don't modify ControlRegs/FIFO *
111 * when connected. *
112 * 2.0d5 99/06/01 KG Clear DevID, Fix INQUIRY after cfg chg. *
113 * 2.0d6 99/06/02 KG Added ADD special command to allow cfg. *
114 * before detection. Reset SYNC_NEGO_DONE *
115 * after a bus reset. *
116 * 2.0d7 99/06/03 KG Fixed bugs wrt add,remove commands *
117 * 2.0d8 99/06/04 KG Removed copying of cmnd into CmdBlock. *
118 * Fixed Oops in _release(). *
119 * 2.0d9 99/06/06 KG Also tag queue INQUIRY, T_U_R, ... *
120 * Allow arb. no. of Tagged Cmnds. Max 32 *
121 * 2.0d1099/06/20 KG TagMaxNo changes now honoured! Queueing *
122 * clearified (renamed ..) TagMask handling*
123 * cleaned. *
124 * 2.0d1199/06/28 KG cmd->result now identical to 2.0d2 *
125 * 2.0d1299/07/04 KG Changed order of processing in IRQ *
126 * 2.0d1399/07/05 KG Don't update DCB fields if removed *
127 * 2.0d1499/07/05 KG remove_dev: Move kfree() to the end *
128 * 2.0d1599/07/12 KG use_new_eh_code: 0, ULONG -> UINT where *
129 * appropriate *
130 * 2.0d1699/07/13 KG Reenable StartSCSI interrupt, Retry msg *
131 * 2.0d1799/07/15 KG Remove debug msg. Disable recfg. when *
132 * there are queued cmnds *
133 * 2.0d1899/07/18 KG Selection timeout: Don't requeue *
134 * 2.0d1999/07/18 KG Abort: Only call scsi_done if dequeued *
135 * 2.0d2099/07/19 KG Rst_Detect: DoingSRB_Done *
136 * 2.0d2199/08/15 KG dev_id for request/free_irq, cmnd[0] for*
137 * RETRY, SRBdone does DID_ABORT for the *
138 * cmd passed by DC390_reset() *
139 * 2.0d2299/08/25 KG dev_id fixed. can_queue: 42 *
140 * 2.0d2399/08/25 KG Removed some debugging code. dev_id *
141 * now is set to pACB. Use u8,u16,u32. *
142 * 2.0d2499/11/14 KG Unreg. I/O if failed IRQ alloc. Call *
143 * done () w/ DID_BAD_TARGET in case of *
144 * missing DCB. We are old EH!! *
145 * 2.0d2500/01/15 KG 2.3.3x compat from Andreas Schultz *
146 * set unique_id. Disable RETRY message. *
147 * 2.0d2600/01/29 KG Go to new EH. *
148 * 2.0d2700/01/31 KG ... but maintain 2.0 compat. *
149 * and fix DCB freeing *
150 * 2.0d2800/02/14 KG Queue statistics fixed, dump special cmd*
151 * Waiting_Timer for failed StartSCSI *
152 * New EH: Don't return cmnds to ML on RST *
153 * Use old EH (don't have new EH fns yet) *
154 * Reset: Unlock, but refuse to queue *
155 * 2.3 __setup function *
156 * 2.0e 00/05/22 KG Return residual for 2.3 *
157 * 2.0e1 00/05/25 KG Compile fixes for 2.3.99 *
158 * 2.0e2 00/05/27 KG Jeff Garzik's pci_enable_device() *
159 * 2.0e3 00/09/29 KG Some 2.4 changes. Don't try Sync Nego *
160 * before INQUIRY has reported ability. *
161 * Recognise INQUIRY as scanning command. *
162 * 2.0e4 00/10/13 KG Allow compilation into 2.4 kernel *
163 * 2.0e5 00/11/17 KG Store Inq.flags in DCB *
164 * 2.0e6 00/11/22 KG 2.4 init function (Thx to O.Schumann) *
165 * 2.4 PCI device table (Thx to A.Richter) *
166 * 2.0e7 00/11/28 KG Allow overriding of BIOS settings *
167 * 2.0f 00/12/20 KG Handle failed INQUIRYs during scan *
168 * 2.1a 03/11/29 GL, KG Initial fixing for 2.6. Convert to *
169 * use the current PCI-mapping API, update *
170 * command-queuing. *
171 * 2.1b 04/04/13 GL Fix for 64-bit platforms *
172 * 2.1b1 04/01/31 GL (applied 05.04) Remove internal *
173 * command-queuing. *
174 * 2.1b2 04/02/01 CH (applied 05.04) Fix error-handling *
175 * 2.1c 04/05/23 GL Update to use the new pci_driver API, *
176 * some scsi EH updates, more cleanup. *
177 * 2.1d 04/05/27 GL Moved setting of scan_devices to *
178 * slave_alloc/_configure/_destroy, as *
179 * suggested by CH. *
180 ***********************************************************************/
181
182/* DEBUG options */
183//#define DC390_DEBUG0
184//#define DC390_DEBUG1
185//#define DC390_DCBDEBUG
186//#define DC390_PARSEDEBUG
187//#define DC390_REMOVABLEDEBUG
188//#define DC390_LOCKDEBUG
189
190//#define NOP do{}while(0)
191#define C_NOP
192
193/* Debug definitions */
194#ifdef DC390_DEBUG0
195# define DEBUG0(x) x
196#else
197# define DEBUG0(x) C_NOP
198#endif
199#ifdef DC390_DEBUG1
200# define DEBUG1(x) x
201#else
202# define DEBUG1(x) C_NOP
203#endif
204#ifdef DC390_DCBDEBUG
205# define DCBDEBUG(x) x
206#else
207# define DCBDEBUG(x) C_NOP
208#endif
209#ifdef DC390_PARSEDEBUG
210# define PARSEDEBUG(x) x
211#else
212# define PARSEDEBUG(x) C_NOP
213#endif
214#ifdef DC390_REMOVABLEDEBUG
215# define REMOVABLEDEBUG(x) x
216#else
217# define REMOVABLEDEBUG(x) C_NOP
218#endif
219#define DCBDEBUG1(x) C_NOP
220
221#include <linux/module.h>
222#include <linux/delay.h>
223#include <linux/signal.h>
224#include <linux/errno.h>
225#include <linux/kernel.h>
226#include <linux/ioport.h>
227#include <linux/pci.h>
228#include <linux/proc_fs.h>
229#include <linux/string.h>
230#include <linux/mm.h>
231#include <linux/blkdev.h>
232#include <linux/timer.h>
233#include <linux/interrupt.h>
234#include <linux/init.h>
235#include <linux/spinlock.h>
236#include <linux/slab.h>
237#include <asm/io.h>
238
239#include <scsi/scsi.h>
240#include <scsi/scsi_cmnd.h>
241#include <scsi/scsi_device.h>
242#include <scsi/scsi_host.h>
243#include <scsi/scsicam.h>
244#include <scsi/scsi_tcq.h>
245
246#define DC390_BANNER "Tekram DC390/AM53C974"
247#define DC390_VERSION "2.1d 2004-05-27"
248
249#define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI
250
251#include "tmscsim.h"
252
253
254static void dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
255static void dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
256static void dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
257static void dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
258static void dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
259static void dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
260static void dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
261static void dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
262static void dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
263static void dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
264static void dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
265static void dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
266static void dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
267static void dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
268
269static void dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB );
270static void dc390_Disconnect( struct dc390_acb* pACB );
271static void dc390_Reselect( struct dc390_acb* pACB );
272static void dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB );
273static void dc390_ScsiRstDetect( struct dc390_acb* pACB );
274static void dc390_EnableMsgOut_Abort(struct dc390_acb*, struct dc390_srb*);
275static void dc390_dumpinfo(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB);
276static void dc390_ResetDevParam(struct dc390_acb* pACB);
277
278static u32 dc390_laststatus = 0;
279static u8 dc390_adapterCnt = 0;
280
281static int disable_clustering;
282module_param(disable_clustering, int, S_IRUGO);
283MODULE_PARM_DESC(disable_clustering, "If you experience problems with your devices, try setting to 1");
284
285/* Startup values, to be overriden on the commandline */
286static int tmscsim[] = {-2, -2, -2, -2, -2, -2};
287
288module_param_array(tmscsim, int, NULL, 0);
289MODULE_PARM_DESC(tmscsim, "Host SCSI ID, Speed (0=10MHz), Device Flags, Adapter Flags, Max Tags (log2(tags)-1), DelayReset (s)");
290MODULE_AUTHOR("C.L. Huang / Kurt Garloff");
291MODULE_DESCRIPTION("SCSI host adapter driver for Tekram DC390 and other AMD53C974A based PCI SCSI adapters");
292MODULE_LICENSE("GPL");
293MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
294
295static void *dc390_phase0[]={
296 dc390_DataOut_0,
297 dc390_DataIn_0,
298 dc390_Command_0,
299 dc390_Status_0,
300 dc390_Nop_0,
301 dc390_Nop_0,
302 dc390_MsgOut_0,
303 dc390_MsgIn_0,
304 dc390_Nop_1
305 };
306
307static void *dc390_phase1[]={
308 dc390_DataOutPhase,
309 dc390_DataInPhase,
310 dc390_CommandPhase,
311 dc390_StatusPhase,
312 dc390_Nop_0,
313 dc390_Nop_0,
314 dc390_MsgOutPhase,
315 dc390_MsgInPhase,
316 dc390_Nop_1
317 };
318
319#ifdef DC390_DEBUG1
320static char* dc390_p0_str[] = {
321 "dc390_DataOut_0",
322 "dc390_DataIn_0",
323 "dc390_Command_0",
324 "dc390_Status_0",
325 "dc390_Nop_0",
326 "dc390_Nop_0",
327 "dc390_MsgOut_0",
328 "dc390_MsgIn_0",
329 "dc390_Nop_1"
330 };
331
332static char* dc390_p1_str[] = {
333 "dc390_DataOutPhase",
334 "dc390_DataInPhase",
335 "dc390_CommandPhase",
336 "dc390_StatusPhase",
337 "dc390_Nop_0",
338 "dc390_Nop_0",
339 "dc390_MsgOutPhase",
340 "dc390_MsgInPhase",
341 "dc390_Nop_1"
342 };
343#endif
344
345static u8 dc390_eepromBuf[MAX_ADAPTER_NUM][EE_LEN];
346static u8 dc390_clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20};
347static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20};
348
349/***********************************************************************
350 * Functions for the management of the internal structures
351 * (DCBs, SRBs, Queueing)
352 *
353 **********************************************************************/
354static void inline dc390_start_segment(struct dc390_srb* pSRB)
355{
356 struct scatterlist *psgl = pSRB->pSegmentList;
357
358 /* start new sg segment */
359 pSRB->SGBusAddr = sg_dma_address(psgl);
360 pSRB->SGToBeXferLen = sg_dma_len(psgl);
361}
362
363static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue)
364{
365 unsigned long xfer = pSRB->SGToBeXferLen - residue;
366
367 /* xfer more bytes transferred */
368 pSRB->SGBusAddr += xfer;
369 pSRB->TotalXferredLen += xfer;
370 pSRB->SGToBeXferLen = residue;
371
372 return xfer;
373}
374
375static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun)
376{
377 struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL;
378 while (pDCB->TargetID != id || pDCB->TargetLUN != lun)
379 {
380 pDCB = pDCB->pNextDCB;
381 if (pDCB == pACB->pLinkDCB)
382 return NULL;
383 }
384 DCBDEBUG1( printk (KERN_DEBUG "DCB %p (%02x,%02x) found.\n", \
385 pDCB, pDCB->TargetID, pDCB->TargetLUN));
386 return pDCB;
387}
388
389/* Insert SRB oin top of free list */
390static __inline__ void dc390_Free_insert (struct dc390_acb* pACB, struct dc390_srb* pSRB)
391{
392 DEBUG0(printk ("DC390: Free SRB %p\n", pSRB));
393 pSRB->pNextSRB = pACB->pFreeSRB;
394 pACB->pFreeSRB = pSRB;
395}
396
397static __inline__ void dc390_Going_append (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
398{
399 pDCB->GoingSRBCnt++;
400 DEBUG0(printk("DC390: Append SRB %p to Going\n", pSRB));
401 /* Append to the list of Going commands */
402 if( pDCB->pGoingSRB )
403 pDCB->pGoingLast->pNextSRB = pSRB;
404 else
405 pDCB->pGoingSRB = pSRB;
406
407 pDCB->pGoingLast = pSRB;
408 /* No next one in sent list */
409 pSRB->pNextSRB = NULL;
410}
411
412static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
413{
414 DEBUG0(printk("DC390: Remove SRB %p from Going\n", pSRB));
415 if (pSRB == pDCB->pGoingSRB)
416 pDCB->pGoingSRB = pSRB->pNextSRB;
417 else
418 {
419 struct dc390_srb* psrb = pDCB->pGoingSRB;
420 while (psrb && psrb->pNextSRB != pSRB)
421 psrb = psrb->pNextSRB;
422 if (!psrb)
423 { printk (KERN_ERR "DC390: Remove non-ex. SRB %p from Going!\n", pSRB); return; }
424 psrb->pNextSRB = pSRB->pNextSRB;
425 if (pSRB == pDCB->pGoingLast)
426 pDCB->pGoingLast = psrb;
427 }
428 pDCB->GoingSRBCnt--;
429}
430
431static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length)
432{
433 sg_init_one(sg, addr, length);
434 return sg;
435}
436
437/* Create pci mapping */
438static int dc390_pci_map (struct dc390_srb* pSRB)
439{
440 int error = 0;
441 struct scsi_cmnd *pcmd = pSRB->pcmd;
442 struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev;
443 dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp));
444
445 /* Map sense buffer */
446 if (pSRB->SRBFlag & AUTO_REQSENSE) {
447 pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
448 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
449 DMA_FROM_DEVICE);
450 cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
451
452 /* TODO: error handling */
453 if (pSRB->SGcount != 1)
454 error = 1;
455 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle));
456 /* Map SG list */
457 } else if (scsi_sg_count(pcmd)) {
458 int nseg;
459
460 nseg = scsi_dma_map(pcmd);
461
462 pSRB->pSegmentList = scsi_sglist(pcmd);
463 pSRB->SGcount = nseg;
464
465 /* TODO: error handling */
466 if (nseg < 0)
467 error = 1;
468 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
469 __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
470 /* Map single segment */
471 } else
472 pSRB->SGcount = 0;
473
474 return error;
475}
476
477/* Remove pci mapping */
478static void dc390_pci_unmap (struct dc390_srb* pSRB)
479{
480 struct scsi_cmnd *pcmd = pSRB->pcmd;
481 struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev;
482 DEBUG1(dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp)));
483
484 if (pSRB->SRBFlag) {
485 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
486 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle));
487 } else {
488 scsi_dma_unmap(pcmd);
489 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
490 __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
491 }
492}
493
494static void __inline__
495dc390_freetag (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
496{
497 if (pSRB->TagNumber != SCSI_NO_TAG) {
498 pDCB->TagMask &= ~(1 << pSRB->TagNumber); /* free tag mask */
499 pSRB->TagNumber = SCSI_NO_TAG;
500 }
501}
502
503
504static int
505dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB )
506{
507 struct scsi_cmnd *scmd = pSRB->pcmd;
508 struct scsi_device *sdev = scmd->device;
509 u8 cmd, disc_allowed, try_sync_nego;
510
511 pSRB->ScsiPhase = SCSI_NOP0;
512
513 if (pACB->Connected)
514 {
515 // Should not happen normally
516 printk (KERN_WARNING "DC390: Can't select when connected! (%08x,%02x)\n",
517 pSRB->SRBState, pSRB->SRBFlag);
518 pSRB->SRBState = SRB_READY;
519 pACB->SelConn++;
520 return 1;
521 }
522 if (time_before (jiffies, pACB->last_reset))
523 {
524 DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));
525 return 1;
526 }
527 /* KG: Moved pci mapping here */
528 dc390_pci_map(pSRB);
529 /* TODO: error handling */
530 DC390_write8 (Scsi_Dest_ID, pDCB->TargetID);
531 DC390_write8 (Sync_Period, pDCB->SyncPeriod);
532 DC390_write8 (Sync_Offset, pDCB->SyncOffset);
533 DC390_write8 (CtrlReg1, pDCB->CtrlR1);
534 DC390_write8 (CtrlReg3, pDCB->CtrlR3);
535 DC390_write8 (CtrlReg4, pDCB->CtrlR4);
536 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); /* Flush FIFO */
537 DEBUG1(printk (KERN_INFO "DC390: Start SCSI command: %02x (Sync:%02x)\n",\
538 scmd->cmnd[0], pDCB->SyncMode));
539
540 /* Don't disconnect on AUTO_REQSENSE, cause it might be an
541 * Contingent Allegiance Condition (6.6), where no tags should be used.
542 * All other have to be allowed to disconnect to prevent Incorrect
543 * Initiator Connection (6.8.2/6.5.2) */
544 /* Changed KG, 99/06/06 */
545 if (! (pSRB->SRBFlag & AUTO_REQSENSE))
546 disc_allowed = pDCB->DevMode & EN_DISCONNECT_;
547 else
548 disc_allowed = 0;
549
550 if ((pDCB->SyncMode & SYNC_ENABLE) && pDCB->TargetLUN == 0 && sdev->sdtr &&
551 (((scmd->cmnd[0] == REQUEST_SENSE || (pSRB->SRBFlag & AUTO_REQSENSE)) &&
552 !(pDCB->SyncMode & SYNC_NEGO_DONE)) || scmd->cmnd[0] == INQUIRY))
553 try_sync_nego = 1;
554 else
555 try_sync_nego = 0;
556
557 pSRB->MsgCnt = 0;
558 cmd = SEL_W_ATN;
559 DC390_write8 (ScsiFifo, IDENTIFY(disc_allowed, pDCB->TargetLUN));
560 /* Change 99/05/31: Don't use tags when not disconnecting (BUSY) */
561 if ((pDCB->SyncMode & EN_TAG_QUEUEING) && disc_allowed && (scmd->flags & SCMD_TAGGED)) {
562 DC390_write8(ScsiFifo, MSG_SIMPLE_TAG);
563 pDCB->TagMask |= 1 << scmd->request->tag;
564 pSRB->TagNumber = scmd->request->tag;
565 DC390_write8(ScsiFifo, scmd->request->tag);
566 DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1]));
567 cmd = SEL_W_ATN3;
568 } else {
569 /* No TagQ */
570//no_tag:
571 DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB));
572 }
573
574 pSRB->SRBState = SRB_START_;
575
576 if (try_sync_nego)
577 {
578 u8 Sync_Off = pDCB->SyncOffset;
579 DEBUG0(printk (KERN_INFO "DC390: NEW Sync Nego code triggered (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN));
580 pSRB->MsgOutBuf[0] = EXTENDED_MESSAGE;
581 pSRB->MsgOutBuf[1] = 3;
582 pSRB->MsgOutBuf[2] = EXTENDED_SDTR;
583 pSRB->MsgOutBuf[3] = pDCB->NegoPeriod;
584 if (!(Sync_Off & 0x0f)) Sync_Off = SYNC_NEGO_OFFSET;
585 pSRB->MsgOutBuf[4] = Sync_Off;
586 pSRB->MsgCnt = 5;
587 //pSRB->SRBState = SRB_MSGOUT_;
588 pSRB->SRBState |= DO_SYNC_NEGO;
589 cmd = SEL_W_ATN_STOP;
590 }
591
592 /* Command is written in CommandPhase, if SEL_W_ATN_STOP ... */
593 if (cmd != SEL_W_ATN_STOP)
594 {
595 if( pSRB->SRBFlag & AUTO_REQSENSE )
596 {
597 DC390_write8 (ScsiFifo, REQUEST_SENSE);
598 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
599 DC390_write8 (ScsiFifo, 0);
600 DC390_write8 (ScsiFifo, 0);
601 DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
602 DC390_write8 (ScsiFifo, 0);
603 DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n"));
604 }
605 else /* write cmnd to bus */
606 {
607 u8 *ptr; u8 i;
608 ptr = (u8 *)scmd->cmnd;
609 for (i = 0; i < scmd->cmd_len; i++)
610 DC390_write8 (ScsiFifo, *(ptr++));
611 }
612 }
613 DEBUG0(if (pACB->pActiveDCB) \
614 printk (KERN_WARNING "DC390: ActiveDCB != 0\n"));
615 DEBUG0(if (pDCB->pActiveSRB) \
616 printk (KERN_WARNING "DC390: ActiveSRB != 0\n"));
617 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
618 if (DC390_read8 (Scsi_Status) & INTERRUPT)
619 {
620 dc390_freetag (pDCB, pSRB);
621 DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n",
622 scmd->device->id, (u8)scmd->device->lun));
623 pSRB->SRBState = SRB_READY;
624 //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
625 pACB->SelLost++;
626 return 1;
627 }
628 DC390_write8 (ScsiCmd, cmd);
629 pACB->pActiveDCB = pDCB;
630 pDCB->pActiveSRB = pSRB;
631 pACB->Connected = 1;
632 pSRB->ScsiPhase = SCSI_NOP1;
633 return 0;
634}
635
636
637static void __inline__
638dc390_InvalidCmd(struct dc390_acb* pACB)
639{
640 if (pACB->pActiveDCB->pActiveSRB->SRBState & (SRB_START_ | SRB_MSGOUT))
641 DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
642}
643
644
645static irqreturn_t __inline__
646DC390_Interrupt(void *dev_id)
647{
648 struct dc390_acb *pACB = dev_id;
649 struct dc390_dcb *pDCB;
650 struct dc390_srb *pSRB;
651 u8 sstatus=0;
652 u8 phase;
653 void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *);
654 u8 istate, istatus;
655
656 sstatus = DC390_read8 (Scsi_Status);
657 if( !(sstatus & INTERRUPT) )
658 return IRQ_NONE;
659
660 DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus));
661
662 //DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
663 //dstatus = DC390_read8 (DMA_Status);
664 //DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
665
666 spin_lock_irq(pACB->pScsiHost->host_lock);
667
668 istate = DC390_read8 (Intern_State);
669 istatus = DC390_read8 (INT_Status); /* This clears Scsi_Status, Intern_State and INT_Status ! */
670
671 DEBUG1(printk (KERN_INFO "Istatus(Res,Inv,Dis,Serv,Succ,ReS,SelA,Sel)=%02x,",istatus));
672 dc390_laststatus &= ~0x00ffffff;
673 dc390_laststatus |= /* dstatus<<24 | */ sstatus<<16 | istate<<8 | istatus;
674
675 if (sstatus & ILLEGAL_OP_ERR)
676 {
677 printk ("DC390: Illegal Operation detected (%08x)!\n", dc390_laststatus);
678 dc390_dumpinfo (pACB, pACB->pActiveDCB, pACB->pActiveDCB->pActiveSRB);
679 }
680
681 else if (istatus & INVALID_CMD)
682 {
683 printk ("DC390: Invalid Command detected (%08x)!\n", dc390_laststatus);
684 dc390_InvalidCmd( pACB );
685 goto unlock;
686 }
687
688 if (istatus & SCSI_RESET)
689 {
690 dc390_ScsiRstDetect( pACB );
691 goto unlock;
692 }
693
694 if (istatus & DISCONNECTED)
695 {
696 dc390_Disconnect( pACB );
697 goto unlock;
698 }
699
700 if (istatus & RESELECTED)
701 {
702 dc390_Reselect( pACB );
703 goto unlock;
704 }
705
706 else if (istatus & (SELECTED | SEL_ATTENTION))
707 {
708 printk (KERN_ERR "DC390: Target mode not supported!\n");
709 goto unlock;
710 }
711
712 if (istatus & (SUCCESSFUL_OP|SERVICE_REQUEST) )
713 {
714 pDCB = pACB->pActiveDCB;
715 if (!pDCB)
716 {
717 printk (KERN_ERR "DC390: Suc. op/ Serv. req: pActiveDCB = 0!\n");
718 goto unlock;
719 }
720 pSRB = pDCB->pActiveSRB;
721 if( pDCB->DCBFlag & ABORT_DEV_ )
722 dc390_EnableMsgOut_Abort (pACB, pSRB);
723
724 phase = pSRB->ScsiPhase;
725 DEBUG1(printk (KERN_INFO "DC390: [%i]%s(0) (%02x)\n", phase, dc390_p0_str[phase], sstatus));
726 stateV = (void *) dc390_phase0[phase];
727 ( *stateV )( pACB, pSRB, &sstatus );
728
729 pSRB->ScsiPhase = sstatus & 7;
730 phase = (u8) sstatus & 7;
731 DEBUG1(printk (KERN_INFO "DC390: [%i]%s(1) (%02x)\n", phase, dc390_p1_str[phase], sstatus));
732 stateV = (void *) dc390_phase1[phase];
733 ( *stateV )( pACB, pSRB, &sstatus );
734 }
735
736 unlock:
737 spin_unlock_irq(pACB->pScsiHost->host_lock);
738 return IRQ_HANDLED;
739}
740
741static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id)
742{
743 irqreturn_t ret;
744 DEBUG1(printk (KERN_INFO "DC390: Irq (%i) caught: ", irq));
745 /* Locking is done in DC390_Interrupt */
746 ret = DC390_Interrupt(dev_id);
747 DEBUG1(printk (".. IRQ returned\n"));
748 return ret;
749}
750
751static void
752dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
753{
754 u8 sstatus;
755 u32 ResidCnt;
756 u8 dstate = 0;
757
758 sstatus = *psstatus;
759
760 if( !(pSRB->SRBState & SRB_XFERPAD) )
761 {
762 if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR) )
763 pSRB->SRBStatus |= PARITY_ERROR;
764
765 if( sstatus & COUNT_2_ZERO )
766 {
767 unsigned long timeout = jiffies + HZ;
768
769 /* Function called from the ISR with the host_lock held and interrupts disabled */
770 if (pSRB->SGToBeXferLen)
771 while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) {
772 spin_unlock_irq(pACB->pScsiHost->host_lock);
773 udelay(50);
774 spin_lock_irq(pACB->pScsiHost->host_lock);
775 }
776 if (!time_before(jiffies, timeout))
777 printk (KERN_CRIT "DC390: Deadlock in DataOut_0: DMA aborted unfinished: %06x bytes remain!!\n",
778 DC390_read32 (DMA_Wk_ByteCntr));
779 dc390_laststatus &= ~0xff000000;
780 dc390_laststatus |= dstate << 24;
781 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
782 pSRB->SGIndex++;
783 if( pSRB->SGIndex < pSRB->SGcount )
784 {
785 pSRB->pSegmentList++;
786
787 dc390_start_segment(pSRB);
788 }
789 else
790 pSRB->SGToBeXferLen = 0;
791 }
792 else
793 {
794 ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) +
795 (((u32) DC390_read8 (CtcReg_High) << 16) |
796 ((u32) DC390_read8 (CtcReg_Mid) << 8) |
797 (u32) DC390_read8 (CtcReg_Low));
798
799 dc390_advance_segment(pSRB, ResidCnt);
800 }
801 }
802 if ((*psstatus & 7) != SCSI_DATA_OUT)
803 {
804 DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD);
805 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
806 }
807}
808
809static void
810dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
811{
812 u8 sstatus, residual, bval;
813 u32 ResidCnt, i;
814 unsigned long xferCnt;
815
816 sstatus = *psstatus;
817
818 if( !(pSRB->SRBState & SRB_XFERPAD) )
819 {
820 if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR))
821 pSRB->SRBStatus |= PARITY_ERROR;
822
823 if( sstatus & COUNT_2_ZERO )
824 {
825 int dstate = 0;
826 unsigned long timeout = jiffies + HZ;
827
828 /* Function called from the ISR with the host_lock held and interrupts disabled */
829 if (pSRB->SGToBeXferLen)
830 while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) {
831 spin_unlock_irq(pACB->pScsiHost->host_lock);
832 udelay(50);
833 spin_lock_irq(pACB->pScsiHost->host_lock);
834 }
835 if (!time_before(jiffies, timeout)) {
836 printk (KERN_CRIT "DC390: Deadlock in DataIn_0: DMA aborted unfinished: %06x bytes remain!!\n",
837 DC390_read32 (DMA_Wk_ByteCntr));
838 printk (KERN_CRIT "DC390: DataIn_0: DMA State: %i\n", dstate);
839 }
840 dc390_laststatus &= ~0xff000000;
841 dc390_laststatus |= dstate << 24;
842 DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \
843 + ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \
844 + ((unsigned long) DC390_read8 (CtcReg_Low)));
845 DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen));
846
847 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
848
849 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
850 pSRB->SGIndex++;
851 if( pSRB->SGIndex < pSRB->SGcount )
852 {
853 pSRB->pSegmentList++;
854
855 dc390_start_segment(pSRB);
856 }
857 else
858 pSRB->SGToBeXferLen = 0;
859 }
860 else /* phase changed */
861 {
862 residual = 0;
863 bval = DC390_read8 (Current_Fifo);
864 while( bval & 0x1f )
865 {
866 DEBUG1(printk (KERN_DEBUG "Check for residuals,"));
867 if( (bval & 0x1f) == 1 )
868 {
869 for(i=0; i < 0x100; i++)
870 {
871 bval = DC390_read8 (Current_Fifo);
872 if( !(bval & 0x1f) )
873 goto din_1;
874 else if( i == 0x0ff )
875 {
876 residual = 1; /* ;1 residual byte */
877 goto din_1;
878 }
879 }
880 }
881 else
882 bval = DC390_read8 (Current_Fifo);
883 }
884din_1:
885 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_BLAST_CMD);
886 for (i = 0xa000; i; i--)
887 {
888 bval = DC390_read8 (DMA_Status);
889 if (bval & BLAST_COMPLETE)
890 break;
891 }
892 /* It seems a DMA Blast abort isn't that bad ... */
893 if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n");
894 //DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
895 dc390_laststatus &= ~0xff000000;
896 dc390_laststatus |= bval << 24;
897
898 DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval));
899 ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) |
900 ((u32) DC390_read8 (CtcReg_Mid) << 8)) |
901 (u32) DC390_read8 (CtcReg_Low);
902
903 xferCnt = dc390_advance_segment(pSRB, ResidCnt);
904
905 if (residual) {
906 size_t count = 1;
907 size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList);
908 unsigned long flags;
909 u8 *ptr;
910
911 bval = DC390_read8 (ScsiFifo); /* get one residual byte */
912
913 local_irq_save(flags);
914 ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count);
915 if (likely(ptr)) {
916 *(ptr + offset) = bval;
917 scsi_kunmap_atomic_sg(ptr);
918 }
919 local_irq_restore(flags);
920 WARN_ON(!ptr);
921
922 /* 1 more byte read */
923 xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1);
924 }
925 DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\
926 pSRB->TotalXferredLen, pSRB->SGToBeXferLen));
927 }
928 }
929 if ((*psstatus & 7) != SCSI_DATA_IN)
930 {
931 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
932 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
933 }
934}
935
936static void
937dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
938{
939}
940
941static void
942dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
943{
944
945 pSRB->TargetStatus = DC390_read8 (ScsiFifo);
946 //udelay (1);
947 pSRB->EndMessage = DC390_read8 (ScsiFifo); /* get message */
948
949 *psstatus = SCSI_NOP0;
950 pSRB->SRBState = SRB_COMPLETED;
951 DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD);
952}
953
954static void
955dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
956{
957 if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) )
958 *psstatus = SCSI_NOP0;
959 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
960}
961
962
963static void __inline__
964dc390_reprog (struct dc390_acb* pACB, struct dc390_dcb* pDCB)
965{
966 DC390_write8 (Sync_Period, pDCB->SyncPeriod);
967 DC390_write8 (Sync_Offset, pDCB->SyncOffset);
968 DC390_write8 (CtrlReg3, pDCB->CtrlR3);
969 DC390_write8 (CtrlReg4, pDCB->CtrlR4);
970 dc390_SetXferRate (pACB, pDCB);
971}
972
973
974#ifdef DC390_DEBUG0
975static void
976dc390_printMsg (u8 *MsgBuf, u8 len)
977{
978 int i;
979 printk (" %02x", MsgBuf[0]);
980 for (i = 1; i < len; i++)
981 printk (" %02x", MsgBuf[i]);
982 printk ("\n");
983}
984#endif
985
986#define DC390_ENABLE_MSGOUT DC390_write8 (ScsiCmd, SET_ATN_CMD)
987
988/* reject_msg */
989static void __inline__
990dc390_MsgIn_reject (struct dc390_acb* pACB, struct dc390_srb* pSRB)
991{
992 pSRB->MsgOutBuf[0] = MESSAGE_REJECT;
993 pSRB->MsgCnt = 1;
994 DC390_ENABLE_MSGOUT;
995 DEBUG0 (printk (KERN_INFO "DC390: Reject message\n"));
996}
997
998/* abort command */
999static void
1000dc390_EnableMsgOut_Abort ( struct dc390_acb* pACB, struct dc390_srb* pSRB )
1001{
1002 pSRB->MsgOutBuf[0] = ABORT;
1003 pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT;
1004 pSRB->pSRBDCB->DCBFlag &= ~ABORT_DEV_;
1005}
1006
1007static struct dc390_srb*
1008dc390_MsgIn_QTag (struct dc390_acb* pACB, struct dc390_dcb* pDCB, s8 tag)
1009{
1010 struct dc390_srb* pSRB = pDCB->pGoingSRB;
1011
1012 if (pSRB)
1013 {
1014 struct scsi_cmnd *scmd = scsi_find_tag(pSRB->pcmd->device, tag);
1015 pSRB = (struct dc390_srb *)scmd->host_scribble;
1016
1017 if (pDCB->DCBFlag & ABORT_DEV_)
1018 {
1019 pSRB->SRBState = SRB_ABORT_SENT;
1020 dc390_EnableMsgOut_Abort( pACB, pSRB );
1021 }
1022
1023 if (!(pSRB->SRBState & SRB_DISCONNECT))
1024 goto mingx0;
1025
1026 pDCB->pActiveSRB = pSRB;
1027 pSRB->SRBState = SRB_DATA_XFER;
1028 }
1029 else
1030 {
1031 mingx0:
1032 pSRB = pACB->pTmpSRB;
1033 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1034 pDCB->pActiveSRB = pSRB;
1035 pSRB->MsgOutBuf[0] = ABORT_TAG;
1036 pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT;
1037 }
1038 return pSRB;
1039}
1040
1041
1042/* set async transfer mode */
1043static void
1044dc390_MsgIn_set_async (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1045{
1046 struct dc390_dcb* pDCB = pSRB->pSRBDCB;
1047 if (!(pSRB->SRBState & DO_SYNC_NEGO))
1048 printk (KERN_INFO "DC390: Target %i initiates Non-Sync?\n", pDCB->TargetID);
1049 pSRB->SRBState &= ~DO_SYNC_NEGO;
1050 pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE);
1051 pDCB->SyncPeriod = 0;
1052 pDCB->SyncOffset = 0;
1053 //pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */
1054 pDCB->CtrlR3 = FAST_CLK; /* fast clock / normal scsi */
1055 pDCB->CtrlR4 &= 0x3f;
1056 pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */
1057 dc390_reprog (pACB, pDCB);
1058}
1059
1060/* set sync transfer mode */
1061static void
1062dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1063{
1064 u8 bval;
1065 u16 wval, wval1;
1066 struct dc390_dcb* pDCB = pSRB->pSRBDCB;
1067 u8 oldsyncperiod = pDCB->SyncPeriod;
1068 u8 oldsyncoffset = pDCB->SyncOffset;
1069
1070 if (!(pSRB->SRBState & DO_SYNC_NEGO))
1071 {
1072 printk (KERN_INFO "DC390: Target %i initiates Sync: %ins %i ... answer ...\n",
1073 pDCB->TargetID, pSRB->MsgInBuf[3]<<2, pSRB->MsgInBuf[4]);
1074
1075 /* reject */
1076 //dc390_MsgIn_reject (pACB, pSRB);
1077 //return dc390_MsgIn_set_async (pACB, pSRB);
1078
1079 /* Reply with corrected SDTR Message */
1080 if (pSRB->MsgInBuf[4] > 15)
1081 {
1082 printk (KERN_INFO "DC390: Lower Sync Offset to 15\n");
1083 pSRB->MsgInBuf[4] = 15;
1084 }
1085 if (pSRB->MsgInBuf[3] < pDCB->NegoPeriod)
1086 {
1087 printk (KERN_INFO "DC390: Set sync nego period to %ins\n", pDCB->NegoPeriod << 2);
1088 pSRB->MsgInBuf[3] = pDCB->NegoPeriod;
1089 }
1090 memcpy (pSRB->MsgOutBuf, pSRB->MsgInBuf, 5);
1091 pSRB->MsgCnt = 5;
1092 DC390_ENABLE_MSGOUT;
1093 }
1094
1095 pSRB->SRBState &= ~DO_SYNC_NEGO;
1096 pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE;
1097 pDCB->SyncOffset &= 0x0f0;
1098 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1099 pDCB->NegoPeriod = pSRB->MsgInBuf[3];
1100
1101 wval = (u16) pSRB->MsgInBuf[3];
1102 wval = wval << 2; wval -= 3; wval1 = wval / 25; /* compute speed */
1103 if( (wval1 * 25) != wval) wval1++;
1104 bval = FAST_CLK+FAST_SCSI; /* fast clock / fast scsi */
1105
1106 pDCB->CtrlR4 &= 0x3f; /* Glitch eater: 12ns less than normal */
1107 if (pACB->glitch_cfg != NS_TO_GLITCH(0))
1108 pDCB->CtrlR4 |= NS_TO_GLITCH(((GLITCH_TO_NS(pACB->glitch_cfg)) - 1));
1109 else
1110 pDCB->CtrlR4 |= NS_TO_GLITCH(0);
1111 if (wval1 < 4) pDCB->CtrlR4 |= NS_TO_GLITCH(0); /* Ultra */
1112
1113 if (wval1 >= 8)
1114 {
1115 wval1--; /* Timing computation differs by 1 from FAST_SCSI */
1116 bval = FAST_CLK; /* fast clock / normal scsi */
1117 pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */
1118 }
1119
1120 pDCB->CtrlR3 = bval;
1121 pDCB->SyncPeriod = (u8)wval1;
1122
1123 if ((oldsyncperiod != wval1 || oldsyncoffset != pDCB->SyncOffset) && pDCB->TargetLUN == 0)
1124 {
1125 if (! (bval & FAST_SCSI)) wval1++;
1126 printk (KERN_INFO "DC390: Target %i: Sync transfer %i.%1i MHz, Offset %i\n", pDCB->TargetID,
1127 40/wval1, ((40%wval1)*10+wval1/2)/wval1, pDCB->SyncOffset & 0x0f);
1128 }
1129
1130 dc390_reprog (pACB, pDCB);
1131}
1132
1133
1134/* handle RESTORE_PTR */
1135/* This doesn't look very healthy... to-be-fixed */
1136static void
1137dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1138{
1139 struct scsi_cmnd *pcmd = pSRB->pcmd;
1140 struct scatterlist *psgl;
1141 pSRB->TotalXferredLen = 0;
1142 pSRB->SGIndex = 0;
1143 if (scsi_sg_count(pcmd)) {
1144 size_t saved;
1145 pSRB->pSegmentList = scsi_sglist(pcmd);
1146 psgl = pSRB->pSegmentList;
1147 //dc390_pci_sync(pSRB);
1148
1149 while (pSRB->TotalXferredLen + (unsigned long) sg_dma_len(psgl) < pSRB->Saved_Ptr)
1150 {
1151 pSRB->TotalXferredLen += (unsigned long) sg_dma_len(psgl);
1152 pSRB->SGIndex++;
1153 if( pSRB->SGIndex < pSRB->SGcount )
1154 {
1155 pSRB->pSegmentList++;
1156
1157 dc390_start_segment(pSRB);
1158 }
1159 else
1160 pSRB->SGToBeXferLen = 0;
1161 }
1162
1163 saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen;
1164 pSRB->SGToBeXferLen -= saved;
1165 pSRB->SGBusAddr += saved;
1166 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
1167 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
1168
1169 } else {
1170 pSRB->SGcount = 0;
1171 printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n");
1172 }
1173
1174 pSRB->TotalXferredLen = pSRB->Saved_Ptr;
1175}
1176
1177
1178/* According to the docs, the AM53C974 reads the message and
1179 * generates a Successful Operation IRQ before asserting ACK for
1180 * the last byte (how does it know whether it's the last ?) */
1181/* The old code handled it in another way, indicating, that on
1182 * every message byte an IRQ is generated and every byte has to
1183 * be manually ACKed. Hmmm ? (KG, 98/11/28) */
1184/* The old implementation was correct. Sigh! */
1185
1186/* Check if the message is complete */
1187static u8 __inline__
1188dc390_MsgIn_complete (u8 *msgbuf, u32 len)
1189{
1190 if (*msgbuf == EXTENDED_MESSAGE)
1191 {
1192 if (len < 2) return 0;
1193 if (len < msgbuf[1] + 2) return 0;
1194 }
1195 else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) // two byte messages
1196 if (len < 2) return 0;
1197 return 1;
1198}
1199
1200
1201
1202/* read and eval received messages */
1203static void
1204dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1205{
1206 struct dc390_dcb* pDCB = pACB->pActiveDCB;
1207
1208 /* Read the msg */
1209
1210 pSRB->MsgInBuf[pACB->MsgLen++] = DC390_read8 (ScsiFifo);
1211 //pSRB->SRBState = 0;
1212
1213 /* Msg complete ? */
1214 if (dc390_MsgIn_complete (pSRB->MsgInBuf, pACB->MsgLen))
1215 {
1216 DEBUG0 (printk (KERN_INFO "DC390: MsgIn:"); dc390_printMsg (pSRB->MsgInBuf, pACB->MsgLen));
1217 /* Now eval the msg */
1218 switch (pSRB->MsgInBuf[0])
1219 {
1220 case DISCONNECT:
1221 pSRB->SRBState = SRB_DISCONNECT; break;
1222
1223 case SIMPLE_QUEUE_TAG:
1224 case HEAD_OF_QUEUE_TAG:
1225 case ORDERED_QUEUE_TAG:
1226 pSRB = dc390_MsgIn_QTag (pACB, pDCB, pSRB->MsgInBuf[1]);
1227 break;
1228
1229 case MESSAGE_REJECT:
1230 DC390_write8 (ScsiCmd, RESET_ATN_CMD);
1231 pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */
1232 if( pSRB->SRBState & DO_SYNC_NEGO)
1233 dc390_MsgIn_set_async (pACB, pSRB);
1234 break;
1235
1236 case EXTENDED_MESSAGE:
1237 /* reject every extended msg but SDTR */
1238 if (pSRB->MsgInBuf[1] != 3 || pSRB->MsgInBuf[2] != EXTENDED_SDTR)
1239 dc390_MsgIn_reject (pACB, pSRB);
1240 else
1241 {
1242 if (pSRB->MsgInBuf[3] == 0 || pSRB->MsgInBuf[4] == 0)
1243 dc390_MsgIn_set_async (pACB, pSRB);
1244 else
1245 dc390_MsgIn_set_sync (pACB, pSRB);
1246 }
1247
1248 // nothing has to be done
1249 case COMMAND_COMPLETE: break;
1250
1251 // SAVE POINTER may be ignored as we have the struct dc390_srb* associated with the
1252 // scsi command. Thanks, Gerard, for pointing it out.
1253 case SAVE_POINTERS:
1254 pSRB->Saved_Ptr = pSRB->TotalXferredLen;
1255 break;
1256 // The device might want to restart transfer with a RESTORE
1257 case RESTORE_POINTERS:
1258 DEBUG0(printk ("DC390: RESTORE POINTER message received ... try to handle\n"));
1259 dc390_restore_ptr (pACB, pSRB);
1260 break;
1261
1262 // reject unknown messages
1263 default: dc390_MsgIn_reject (pACB, pSRB);
1264 }
1265
1266 /* Clear counter and MsgIn state */
1267 pSRB->SRBState &= ~SRB_MSGIN;
1268 pACB->MsgLen = 0;
1269 }
1270
1271 *psstatus = SCSI_NOP0;
1272 DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD);
1273 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1274}
1275
1276
1277static void
1278dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
1279{
1280 unsigned long lval;
1281 struct dc390_dcb* pDCB = pACB->pActiveDCB;
1282
1283 if (pSRB == pACB->pTmpSRB)
1284 {
1285 if (pDCB)
1286 printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (%02i-%i)\n", pDCB->TargetID, pDCB->TargetLUN);
1287 else
1288 printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (DCB 0!)\n");
1289
1290 /* Try to recover - some broken disks react badly to tagged INQUIRY */
1291 if (pDCB && pACB->scan_devices && pDCB->GoingSRBCnt == 1) {
1292 pSRB = pDCB->pGoingSRB;
1293 pDCB->pActiveSRB = pSRB;
1294 } else {
1295 pSRB->pSRBDCB = pDCB;
1296 dc390_EnableMsgOut_Abort(pACB, pSRB);
1297 if (pDCB)
1298 pDCB->DCBFlag |= ABORT_DEV;
1299 return;
1300 }
1301 }
1302
1303 if( pSRB->SGIndex < pSRB->SGcount )
1304 {
1305 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1306 if( !pSRB->SGToBeXferLen )
1307 {
1308 dc390_start_segment(pSRB);
1309
1310 DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment."));
1311 }
1312 lval = pSRB->SGToBeXferLen;
1313 DEBUG1(printk (KERN_DEBUG " DC390: Start transfer: %li bytes (address %08lx)\n", lval, pSRB->SGBusAddr));
1314 DC390_write8 (CtcReg_Low, (u8) lval);
1315 lval >>= 8;
1316 DC390_write8 (CtcReg_Mid, (u8) lval);
1317 lval >>= 8;
1318 DC390_write8 (CtcReg_High, (u8) lval);
1319
1320 DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen);
1321 DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr);
1322
1323 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1324 pSRB->SRBState = SRB_DATA_XFER;
1325
1326 DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD);
1327
1328 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
1329 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT));
1330 //DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status)));
1331 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT));
1332 }
1333 else /* xfer pad */
1334 {
1335 if( pSRB->SGcount )
1336 {
1337 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1338 pSRB->SRBStatus |= OVER_RUN;
1339 DEBUG0(printk (KERN_WARNING " DC390: Overrun -"));
1340 }
1341 DEBUG0(printk (KERN_WARNING " Clear transfer pad \n"));
1342 DC390_write8 (CtcReg_Low, 0);
1343 DC390_write8 (CtcReg_Mid, 0);
1344 DC390_write8 (CtcReg_High, 0);
1345
1346 pSRB->SRBState |= SRB_XFERPAD;
1347 DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE);
1348/*
1349 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1350 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
1351*/
1352 }
1353}
1354
1355
1356static void
1357dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1358{
1359 dc390_DataIO_Comm (pACB, pSRB, WRITE_DIRECTION);
1360}
1361
1362static void
1363dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1364{
1365 dc390_DataIO_Comm (pACB, pSRB, READ_DIRECTION);
1366}
1367
1368static void
1369dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1370{
1371 struct dc390_dcb* pDCB;
1372 u8 i, cnt;
1373 u8 *ptr;
1374
1375 DC390_write8 (ScsiCmd, RESET_ATN_CMD);
1376 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1377 if( !(pSRB->SRBFlag & AUTO_REQSENSE) )
1378 {
1379 cnt = (u8) pSRB->pcmd->cmd_len;
1380 ptr = (u8 *) pSRB->pcmd->cmnd;
1381 for(i=0; i < cnt; i++)
1382 DC390_write8 (ScsiFifo, *(ptr++));
1383 }
1384 else
1385 {
1386 DC390_write8 (ScsiFifo, REQUEST_SENSE);
1387 pDCB = pACB->pActiveDCB;
1388 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
1389 DC390_write8 (ScsiFifo, 0);
1390 DC390_write8 (ScsiFifo, 0);
1391 DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
1392 DC390_write8 (ScsiFifo, 0);
1393 DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n"));
1394 }
1395 pSRB->SRBState = SRB_COMMAND;
1396 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1397}
1398
1399static void
1400dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1401{
1402 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1403 pSRB->SRBState = SRB_STATUS;
1404 DC390_write8 (ScsiCmd, INITIATOR_CMD_CMPLTE);
1405 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1406}
1407
1408static void
1409dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1410{
1411 u8 bval, i, cnt;
1412 u8 *ptr;
1413 struct dc390_dcb* pDCB;
1414
1415 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1416 pDCB = pACB->pActiveDCB;
1417 if( !(pSRB->SRBState & SRB_MSGOUT) )
1418 {
1419 cnt = pSRB->MsgCnt;
1420 if( cnt )
1421 {
1422 ptr = (u8 *) pSRB->MsgOutBuf;
1423 for(i=0; i < cnt; i++)
1424 DC390_write8 (ScsiFifo, *(ptr++));
1425 pSRB->MsgCnt = 0;
1426 if( (pDCB->DCBFlag & ABORT_DEV_) &&
1427 (pSRB->MsgOutBuf[0] == ABORT) )
1428 pSRB->SRBState = SRB_ABORT_SENT;
1429 }
1430 else
1431 {
1432 bval = ABORT; /* ??? MSG_NOP */
1433 if( (pSRB->pcmd->cmnd[0] == INQUIRY ) ||
1434 (pSRB->pcmd->cmnd[0] == REQUEST_SENSE) ||
1435 (pSRB->SRBFlag & AUTO_REQSENSE) )
1436 {
1437 if( pDCB->SyncMode & SYNC_ENABLE )
1438 goto mop1;
1439 }
1440 DC390_write8 (ScsiFifo, bval);
1441 }
1442 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1443 }
1444 else
1445 {
1446mop1:
1447 printk (KERN_ERR "DC390: OLD Sync Nego code triggered! (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN);
1448 DC390_write8 (ScsiFifo, EXTENDED_MESSAGE);
1449 DC390_write8 (ScsiFifo, 3); /* ;length of extended msg */
1450 DC390_write8 (ScsiFifo, EXTENDED_SDTR); /* ; sync nego */
1451 DC390_write8 (ScsiFifo, pDCB->NegoPeriod);
1452 if (pDCB->SyncOffset & 0x0f)
1453 DC390_write8 (ScsiFifo, pDCB->SyncOffset);
1454 else
1455 DC390_write8 (ScsiFifo, SYNC_NEGO_OFFSET);
1456 pSRB->SRBState |= DO_SYNC_NEGO;
1457 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1458 }
1459}
1460
1461static void
1462dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1463{
1464 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1465 if( !(pSRB->SRBState & SRB_MSGIN) )
1466 {
1467 pSRB->SRBState &= ~SRB_DISCONNECT;
1468 pSRB->SRBState |= SRB_MSGIN;
1469 }
1470 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1471 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1472}
1473
1474static void
1475dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1476{
1477}
1478
1479static void
1480dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1481{
1482}
1483
1484
1485static void
1486dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB )
1487{
1488 u8 bval, i, cnt;
1489 struct dc390_dcb* ptr;
1490
1491 if( !(pDCB->TargetLUN) )
1492 {
1493 if( !pACB->scan_devices )
1494 {
1495 ptr = pACB->pLinkDCB;
1496 cnt = pACB->DCBCnt;
1497 bval = pDCB->TargetID;
1498 for(i=0; i<cnt; i++)
1499 {
1500 if( ptr->TargetID == bval )
1501 {
1502 ptr->SyncPeriod = pDCB->SyncPeriod;
1503 ptr->SyncOffset = pDCB->SyncOffset;
1504 ptr->CtrlR3 = pDCB->CtrlR3;
1505 ptr->CtrlR4 = pDCB->CtrlR4;
1506 ptr->SyncMode = pDCB->SyncMode;
1507 }
1508 ptr = ptr->pNextDCB;
1509 }
1510 }
1511 }
1512 return;
1513}
1514
1515
1516static void
1517dc390_Disconnect( struct dc390_acb* pACB )
1518{
1519 struct dc390_dcb *pDCB;
1520 struct dc390_srb *pSRB, *psrb;
1521 u8 i, cnt;
1522
1523 DEBUG0(printk(KERN_INFO "DISC,"));
1524
1525 if (!pACB->Connected) printk(KERN_ERR "DC390: Disconnect not-connected bus?\n");
1526 pACB->Connected = 0;
1527 pDCB = pACB->pActiveDCB;
1528 if (!pDCB)
1529 {
1530 DEBUG0(printk(KERN_ERR "ACB:%p->ActiveDCB:%p IOPort:%04x IRQ:%02x !\n",\
1531 pACB, pDCB, pACB->IOPortBase, pACB->IRQLevel));
1532 mdelay(400);
1533 DC390_read8 (INT_Status); /* Reset Pending INT */
1534 DC390_write8 (ScsiCmd, EN_SEL_RESEL);
1535 return;
1536 }
1537 DC390_write8 (ScsiCmd, EN_SEL_RESEL);
1538 pSRB = pDCB->pActiveSRB;
1539 pACB->pActiveDCB = NULL;
1540 pSRB->ScsiPhase = SCSI_NOP0;
1541 if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
1542 pSRB->SRBState = 0;
1543 else if( pSRB->SRBState & SRB_ABORT_SENT )
1544 {
1545 pDCB->TagMask = 0;
1546 pDCB->DCBFlag = 0;
1547 cnt = pDCB->GoingSRBCnt;
1548 pDCB->GoingSRBCnt = 0;
1549 pSRB = pDCB->pGoingSRB;
1550 for( i=0; i < cnt; i++)
1551 {
1552 psrb = pSRB->pNextSRB;
1553 dc390_Free_insert (pACB, pSRB);
1554 pSRB = psrb;
1555 }
1556 pDCB->pGoingSRB = NULL;
1557 }
1558 else
1559 {
1560 if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
1561 !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
1562 { /* Selection time out */
1563 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1564 pSRB->TargetStatus = 0;
1565 goto disc1;
1566 }
1567 else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED))
1568 {
1569disc1:
1570 dc390_freetag (pDCB, pSRB);
1571 pDCB->pActiveSRB = NULL;
1572 pSRB->SRBState = SRB_FREE;
1573 dc390_SRBdone( pACB, pDCB, pSRB);
1574 }
1575 }
1576 pACB->MsgLen = 0;
1577}
1578
1579
1580static void
1581dc390_Reselect( struct dc390_acb* pACB )
1582{
1583 struct dc390_dcb* pDCB;
1584 struct dc390_srb* pSRB;
1585 u8 id, lun;
1586
1587 DEBUG0(printk(KERN_INFO "RSEL,"));
1588 pACB->Connected = 1;
1589 pDCB = pACB->pActiveDCB;
1590 if( pDCB )
1591 { /* Arbitration lost but Reselection won */
1592 DEBUG0(printk ("DC390: (ActiveDCB != 0: Arb. lost but resel. won)!\n"));
1593 pSRB = pDCB->pActiveSRB;
1594 if( !( pACB->scan_devices ) )
1595 {
1596 struct scsi_cmnd *pcmd = pSRB->pcmd;
1597 scsi_set_resid(pcmd, scsi_bufflen(pcmd));
1598 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1599 dc390_Going_remove(pDCB, pSRB);
1600 dc390_Free_insert(pACB, pSRB);
1601 pcmd->scsi_done (pcmd);
1602 DEBUG0(printk(KERN_DEBUG"DC390: Return SRB %p to free\n", pSRB));
1603 }
1604 }
1605 /* Get ID */
1606 lun = DC390_read8 (ScsiFifo);
1607 DEBUG0(printk ("Dev %02x,", lun));
1608 if (!(lun & (1 << pACB->pScsiHost->this_id)))
1609 printk (KERN_ERR "DC390: Reselection must select host adapter: %02x!\n", lun);
1610 else
1611 lun ^= 1 << pACB->pScsiHost->this_id; /* Mask AdapterID */
1612 id = 0; while (lun >>= 1) id++;
1613 /* Get LUN */
1614 lun = DC390_read8 (ScsiFifo);
1615 if (!(lun & IDENTIFY_BASE)) printk (KERN_ERR "DC390: Resel: Expect identify message!\n");
1616 lun &= 7;
1617 DEBUG0(printk ("(%02i-%i),", id, lun));
1618 pDCB = dc390_findDCB (pACB, id, lun);
1619 if (!pDCB)
1620 {
1621 printk (KERN_ERR "DC390: Reselect from non existing device (%02i-%i)\n",
1622 id, lun);
1623 return;
1624 }
1625 pACB->pActiveDCB = pDCB;
1626 /* TagQ: We expect a message soon, so never mind the exact SRB */
1627 if( pDCB->SyncMode & EN_TAG_QUEUEING )
1628 {
1629 pSRB = pACB->pTmpSRB;
1630 pDCB->pActiveSRB = pSRB;
1631 }
1632 else
1633 {
1634 pSRB = pDCB->pActiveSRB;
1635 if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) )
1636 {
1637 pSRB= pACB->pTmpSRB;
1638 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1639 printk (KERN_ERR "DC390: Reselect without outstanding cmnd (%02i-%i)\n",
1640 id, lun);
1641 pDCB->pActiveSRB = pSRB;
1642 dc390_EnableMsgOut_Abort ( pACB, pSRB );
1643 }
1644 else
1645 {
1646 if( pDCB->DCBFlag & ABORT_DEV_ )
1647 {
1648 pSRB->SRBState = SRB_ABORT_SENT;
1649 printk (KERN_INFO "DC390: Reselect: Abort (%02i-%i)\n",
1650 id, lun);
1651 dc390_EnableMsgOut_Abort( pACB, pSRB );
1652 }
1653 else
1654 pSRB->SRBState = SRB_DATA_XFER;
1655 }
1656 }
1657
1658 DEBUG1(printk (KERN_DEBUG "Resel SRB(%p): TagNum (%02x)\n", pSRB, pSRB->TagNumber));
1659 pSRB->ScsiPhase = SCSI_NOP0;
1660 DC390_write8 (Scsi_Dest_ID, pDCB->TargetID);
1661 DC390_write8 (Sync_Period, pDCB->SyncPeriod);
1662 DC390_write8 (Sync_Offset, pDCB->SyncOffset);
1663 DC390_write8 (CtrlReg1, pDCB->CtrlR1);
1664 DC390_write8 (CtrlReg3, pDCB->CtrlR3);
1665 DC390_write8 (CtrlReg4, pDCB->CtrlR4); /* ; Glitch eater */
1666 DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); /* ;to release the /ACK signal */
1667}
1668
1669static int __inline__
1670dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
1671{
1672 struct scsi_cmnd *pcmd;
1673
1674 pcmd = pSRB->pcmd;
1675
1676 REMOVABLEDEBUG(printk(KERN_INFO "DC390: RequestSense(Cmd %02x, Id %02x, LUN %02x)\n",\
1677 pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN));
1678
1679 pSRB->SRBFlag |= AUTO_REQSENSE;
1680 pSRB->SavedTotXLen = pSRB->TotalXferredLen;
1681 pSRB->AdaptStatus = 0;
1682 pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */
1683
1684 /* We are called from SRBdone, original PCI mapping has been removed
1685 * already, new one is set up from StartSCSI */
1686 pSRB->SGIndex = 0;
1687
1688 pSRB->TotalXferredLen = 0;
1689 pSRB->SGToBeXferLen = 0;
1690 return dc390_StartSCSI(pACB, pDCB, pSRB);
1691}
1692
1693
1694static void
1695dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB )
1696{
1697 u8 status;
1698 struct scsi_cmnd *pcmd;
1699
1700 pcmd = pSRB->pcmd;
1701 /* KG: Moved pci_unmap here */
1702 dc390_pci_unmap(pSRB);
1703
1704 status = pSRB->TargetStatus;
1705
1706 DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB));
1707 if(pSRB->SRBFlag & AUTO_REQSENSE)
1708 { /* Last command was a Request Sense */
1709 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1710 pSRB->AdaptStatus = 0;
1711 pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION;
1712
1713 //pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status);
1714 if (status == SAM_STAT_CHECK_CONDITION)
1715 pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0);
1716 else /* Retry */
1717 {
1718 if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */)
1719 {
1720 /* Don't retry on TEST_UNIT_READY */
1721 pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION);
1722 REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\
1723 (u32) pcmd->result, (u32) pSRB->TotalXferredLen));
1724 } else {
1725 SET_RES_DRV(pcmd->result, DRIVER_SENSE);
1726 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
1727 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun));
1728 pSRB->TotalXferredLen = 0;
1729 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1730 }
1731 }
1732 goto cmd_done;
1733 }
1734 if( status )
1735 {
1736 if (status == SAM_STAT_CHECK_CONDITION)
1737 {
1738 if (dc390_RequestSense(pACB, pDCB, pSRB)) {
1739 SET_RES_DID(pcmd->result, DID_ERROR);
1740 goto cmd_done;
1741 }
1742 return;
1743 }
1744 else if (status == SAM_STAT_TASK_SET_FULL)
1745 {
1746 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
1747 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun));
1748 pSRB->TotalXferredLen = 0;
1749 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1750 }
1751 else if (status == SAM_STAT_BUSY &&
1752 (pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) &&
1753 pACB->scan_devices)
1754 {
1755 pSRB->AdaptStatus = 0;
1756 pSRB->TargetStatus = status;
1757 pcmd->result = MK_RES(0,0,pSRB->EndMessage,/*status*/0);
1758 }
1759 else
1760 { /* Another error */
1761 pSRB->TotalXferredLen = 0;
1762 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1763 goto cmd_done;
1764 }
1765 }
1766 else
1767 { /* Target status == 0 */
1768 status = pSRB->AdaptStatus;
1769 if (status == H_OVER_UNDER_RUN)
1770 {
1771 pSRB->TargetStatus = 0;
1772 SET_RES_DID(pcmd->result,DID_OK);
1773 SET_RES_MSG(pcmd->result,pSRB->EndMessage);
1774 }
1775 else if (status == H_SEL_TIMEOUT)
1776 {
1777 pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0);
1778 /* Devices are removed below ... */
1779 }
1780 else if( pSRB->SRBStatus & PARITY_ERROR)
1781 {
1782 //pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0);
1783 SET_RES_DID(pcmd->result,DID_PARITY);
1784 SET_RES_MSG(pcmd->result,pSRB->EndMessage);
1785 }
1786 else /* No error */
1787 {
1788 pSRB->AdaptStatus = 0;
1789 pSRB->TargetStatus = 0;
1790 SET_RES_DID(pcmd->result,DID_OK);
1791 }
1792 }
1793
1794cmd_done:
1795 scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen);
1796
1797 dc390_Going_remove (pDCB, pSRB);
1798 /* Add to free list */
1799 dc390_Free_insert (pACB, pSRB);
1800
1801 DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n"));
1802 pcmd->scsi_done (pcmd);
1803
1804 return;
1805}
1806
1807
1808/* Remove all SRBs from Going list and inform midlevel */
1809static void
1810dc390_DoingSRB_Done(struct dc390_acb* pACB, struct scsi_cmnd *cmd)
1811{
1812 struct dc390_dcb *pDCB, *pdcb;
1813 struct dc390_srb *psrb, *psrb2;
1814 int i;
1815 struct scsi_cmnd *pcmd;
1816
1817 pDCB = pACB->pLinkDCB;
1818 pdcb = pDCB;
1819 if (! pdcb) return;
1820 do
1821 {
1822 psrb = pdcb->pGoingSRB;
1823 for (i = 0; i < pdcb->GoingSRBCnt; i++)
1824 {
1825 psrb2 = psrb->pNextSRB;
1826 pcmd = psrb->pcmd;
1827 dc390_Free_insert (pACB, psrb);
1828 psrb = psrb2;
1829 }
1830 pdcb->GoingSRBCnt = 0;
1831 pdcb->pGoingSRB = NULL;
1832 pdcb->TagMask = 0;
1833 pdcb = pdcb->pNextDCB;
1834 } while( pdcb != pDCB );
1835}
1836
1837
1838static void
1839dc390_ResetSCSIBus( struct dc390_acb* pACB )
1840{
1841 //DC390_write8 (ScsiCmd, RST_DEVICE_CMD);
1842 //udelay (250);
1843 //DC390_write8 (ScsiCmd, NOP_CMD);
1844
1845 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1846 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1847 DC390_write8 (ScsiCmd, RST_SCSI_BUS_CMD);
1848 pACB->Connected = 0;
1849
1850 return;
1851}
1852
1853static void
1854dc390_ScsiRstDetect( struct dc390_acb* pACB )
1855{
1856 printk ("DC390: Rst_Detect: laststat = %08x\n", dc390_laststatus);
1857 //DEBUG0(printk(KERN_INFO "RST_DETECT,"));
1858
1859 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1860 /* Unlock before ? */
1861 /* delay half a second */
1862 udelay (1000);
1863 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1864 pACB->last_reset = jiffies + 5*HZ/2
1865 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
1866 pACB->Connected = 0;
1867
1868 if( pACB->ACBFlag & RESET_DEV )
1869 pACB->ACBFlag |= RESET_DONE;
1870 else
1871 { /* Reset was issued by sb else */
1872 pACB->ACBFlag |= RESET_DETECT;
1873
1874 dc390_ResetDevParam( pACB );
1875 dc390_DoingSRB_Done( pACB, NULL);
1876 //dc390_RecoverSRB( pACB );
1877 pACB->pActiveDCB = NULL;
1878 pACB->ACBFlag = 0;
1879 }
1880 return;
1881}
1882
1883static int DC390_queuecommand_lck(struct scsi_cmnd *cmd,
1884 void (*done)(struct scsi_cmnd *))
1885{
1886 struct scsi_device *sdev = cmd->device;
1887 struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata;
1888 struct dc390_dcb *dcb = sdev->hostdata;
1889 struct dc390_srb *srb;
1890
1891 if (sdev->queue_depth <= dcb->GoingSRBCnt)
1892 goto device_busy;
1893 if (acb->pActiveDCB)
1894 goto host_busy;
1895 if (acb->ACBFlag & (RESET_DETECT|RESET_DONE|RESET_DEV))
1896 goto host_busy;
1897
1898 srb = acb->pFreeSRB;
1899 if (unlikely(srb == NULL))
1900 goto host_busy;
1901
1902 cmd->scsi_done = done;
1903 cmd->result = 0;
1904 acb->Cmds++;
1905
1906 acb->pFreeSRB = srb->pNextSRB;
1907 srb->pNextSRB = NULL;
1908
1909 srb->pSRBDCB = dcb;
1910 srb->pcmd = cmd;
1911 cmd->host_scribble = (char *)srb;
1912
1913 srb->SGIndex = 0;
1914 srb->AdaptStatus = 0;
1915 srb->TargetStatus = 0;
1916 srb->MsgCnt = 0;
1917
1918 srb->SRBStatus = 0;
1919 srb->SRBFlag = 0;
1920 srb->SRBState = 0;
1921 srb->TotalXferredLen = 0;
1922 srb->SGBusAddr = 0;
1923 srb->SGToBeXferLen = 0;
1924 srb->ScsiPhase = 0;
1925 srb->EndMessage = 0;
1926 srb->TagNumber = SCSI_NO_TAG;
1927
1928 if (dc390_StartSCSI(acb, dcb, srb)) {
1929 dc390_Free_insert(acb, srb);
1930 goto host_busy;
1931 }
1932
1933 dc390_Going_append(dcb, srb);
1934
1935 return 0;
1936
1937 host_busy:
1938 return SCSI_MLQUEUE_HOST_BUSY;
1939
1940 device_busy:
1941 return SCSI_MLQUEUE_DEVICE_BUSY;
1942}
1943
1944static DEF_SCSI_QCMD(DC390_queuecommand)
1945
1946static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
1947{
1948 struct pci_dev *pdev;
1949 u16 pstat;
1950
1951 if (!pDCB) pDCB = pACB->pActiveDCB;
1952 if (!pSRB && pDCB) pSRB = pDCB->pActiveSRB;
1953
1954 if (pSRB)
1955 {
1956 printk ("DC390: SRB: Xferred %08lx, Remain %08lx, State %08x, Phase %02x\n",
1957 pSRB->TotalXferredLen, pSRB->SGToBeXferLen, pSRB->SRBState,
1958 pSRB->ScsiPhase);
1959 printk ("DC390: AdpaterStatus: %02x, SRB Status %02x\n", pSRB->AdaptStatus, pSRB->SRBStatus);
1960 }
1961 printk ("DC390: Status of last IRQ (DMA/SC/Int/IRQ): %08x\n", dc390_laststatus);
1962 printk ("DC390: Register dump: SCSI block:\n");
1963 printk ("DC390: XferCnt Cmd Stat IntS IRQS FFIS Ctl1 Ctl2 Ctl3 Ctl4\n");
1964 printk ("DC390: %06x %02x %02x %02x",
1965 DC390_read8(CtcReg_Low) + (DC390_read8(CtcReg_Mid) << 8) + (DC390_read8(CtcReg_High) << 16),
1966 DC390_read8(ScsiCmd), DC390_read8(Scsi_Status), DC390_read8(Intern_State));
1967 printk (" %02x %02x %02x %02x %02x %02x\n",
1968 DC390_read8(INT_Status), DC390_read8(Current_Fifo), DC390_read8(CtrlReg1),
1969 DC390_read8(CtrlReg2), DC390_read8(CtrlReg3), DC390_read8(CtrlReg4));
1970 DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
1971 if (DC390_read8(Current_Fifo) & 0x1f)
1972 {
1973 printk ("DC390: FIFO:");
1974 while (DC390_read8(Current_Fifo) & 0x1f) printk (" %02x", DC390_read8(ScsiFifo));
1975 printk ("\n");
1976 }
1977 printk ("DC390: Register dump: DMA engine:\n");
1978 printk ("DC390: Cmd STrCnt SBusA WrkBC WrkAC Stat SBusCtrl\n");
1979 printk ("DC390: %02x %08x %08x %08x %08x %02x %08x\n",
1980 DC390_read8(DMA_Cmd), DC390_read32(DMA_XferCnt), DC390_read32(DMA_XferAddr),
1981 DC390_read32(DMA_Wk_ByteCntr), DC390_read32(DMA_Wk_AddrCntr),
1982 DC390_read8(DMA_Status), DC390_read32(DMA_ScsiBusCtrl));
1983 DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
1984
1985 pdev = pACB->pdev;
1986 pci_read_config_word(pdev, PCI_STATUS, &pstat);
1987 printk ("DC390: Register dump: PCI Status: %04x\n", pstat);
1988 printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n");
1989}
1990
1991
1992static int DC390_abort(struct scsi_cmnd *cmd)
1993{
1994 struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata;
1995 struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata;
1996
1997 scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n");
1998
1999 /* abort() is too stupid for already sent commands at the moment.
2000 * If it's called we are in trouble anyway, so let's dump some info
2001 * into the syslog at least. (KG, 98/08/20,99/06/20) */
2002 dc390_dumpinfo(pACB, pDCB, NULL);
2003
2004 pDCB->DCBFlag |= ABORT_DEV_;
2005 printk(KERN_INFO "DC390: Aborted.\n");
2006
2007 return FAILED;
2008}
2009
2010
2011static void dc390_ResetDevParam( struct dc390_acb* pACB )
2012{
2013 struct dc390_dcb *pDCB, *pdcb;
2014
2015 pDCB = pACB->pLinkDCB;
2016 if (! pDCB) return;
2017 pdcb = pDCB;
2018 do
2019 {
2020 pDCB->SyncMode &= ~SYNC_NEGO_DONE;
2021 pDCB->SyncPeriod = 0;
2022 pDCB->SyncOffset = 0;
2023 pDCB->TagMask = 0;
2024 pDCB->CtrlR3 = FAST_CLK;
2025 pDCB->CtrlR4 &= NEGATE_REQACKDATA | CTRL4_RESERVED | NEGATE_REQACK;
2026 pDCB->CtrlR4 |= pACB->glitch_cfg;
2027 pDCB = pDCB->pNextDCB;
2028 }
2029 while( pdcb != pDCB );
2030 pACB->ACBFlag &= ~(RESET_DEV | RESET_DONE | RESET_DETECT);
2031
2032}
2033
2034static int DC390_bus_reset (struct scsi_cmnd *cmd)
2035{
2036 struct dc390_acb* pACB = (struct dc390_acb*) cmd->device->host->hostdata;
2037 u8 bval;
2038
2039 spin_lock_irq(cmd->device->host->host_lock);
2040
2041 bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST;
2042 DC390_write8(CtrlReg1, bval); /* disable IRQ on bus reset */
2043
2044 pACB->ACBFlag |= RESET_DEV;
2045 dc390_ResetSCSIBus(pACB);
2046
2047 dc390_ResetDevParam(pACB);
2048 mdelay(1);
2049 pACB->last_reset = jiffies + 3*HZ/2
2050 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
2051
2052 DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
2053 DC390_read8(INT_Status); /* Reset Pending INT */
2054
2055 dc390_DoingSRB_Done(pACB, cmd);
2056
2057 pACB->pActiveDCB = NULL;
2058 pACB->ACBFlag = 0;
2059
2060 bval = DC390_read8(CtrlReg1) & ~DIS_INT_ON_SCSI_RST;
2061 DC390_write8(CtrlReg1, bval); /* re-enable interrupt */
2062
2063 spin_unlock_irq(cmd->device->host->host_lock);
2064
2065 return SUCCESS;
2066}
2067
2068/**
2069 * dc390_slave_alloc - Called by the scsi mid layer to tell us about a new
2070 * scsi device that we need to deal with.
2071 *
2072 * @scsi_device: The new scsi device that we need to handle.
2073 */
2074static int dc390_slave_alloc(struct scsi_device *scsi_device)
2075{
2076 struct dc390_acb *pACB = (struct dc390_acb*) scsi_device->host->hostdata;
2077 struct dc390_dcb *pDCB, *pDCB2 = NULL;
2078 uint id = scsi_device->id;
2079 uint lun = scsi_device->lun;
2080
2081 pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL);
2082 if (!pDCB)
2083 return -ENOMEM;
2084
2085 if (!pACB->DCBCnt++) {
2086 pACB->pLinkDCB = pDCB;
2087 pACB->pDCBRunRobin = pDCB;
2088 } else {
2089 pACB->pLastDCB->pNextDCB = pDCB;
2090 }
2091
2092 pDCB->pNextDCB = pACB->pLinkDCB;
2093 pACB->pLastDCB = pDCB;
2094
2095 pDCB->pDCBACB = pACB;
2096 pDCB->TargetID = id;
2097 pDCB->TargetLUN = lun;
2098
2099 /*
2100 * Some values are for all LUNs: Copy them
2101 * In a clean way: We would have an own structure for a SCSI-ID
2102 */
2103 if (lun && (pDCB2 = dc390_findDCB(pACB, id, 0))) {
2104 pDCB->DevMode = pDCB2->DevMode;
2105 pDCB->SyncMode = pDCB2->SyncMode & SYNC_NEGO_DONE;
2106 pDCB->SyncPeriod = pDCB2->SyncPeriod;
2107 pDCB->SyncOffset = pDCB2->SyncOffset;
2108 pDCB->NegoPeriod = pDCB2->NegoPeriod;
2109
2110 pDCB->CtrlR3 = pDCB2->CtrlR3;
2111 pDCB->CtrlR4 = pDCB2->CtrlR4;
2112 } else {
2113 u8 index = pACB->AdapterIndex;
2114 PEEprom prom = (PEEprom) &dc390_eepromBuf[index][id << 2];
2115
2116 pDCB->DevMode = prom->EE_MODE1;
2117 pDCB->NegoPeriod =
2118 (dc390_clock_period1[prom->EE_SPEED] * 25) >> 2;
2119 pDCB->CtrlR3 = FAST_CLK;
2120 pDCB->CtrlR4 = pACB->glitch_cfg | CTRL4_RESERVED;
2121 if (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION)
2122 pDCB->CtrlR4 |= NEGATE_REQACKDATA | NEGATE_REQACK;
2123 }
2124
2125 if (pDCB->DevMode & SYNC_NEGO_)
2126 pDCB->SyncMode |= SYNC_ENABLE;
2127 else {
2128 pDCB->SyncMode = 0;
2129 pDCB->SyncOffset &= ~0x0f;
2130 }
2131
2132 pDCB->CtrlR1 = pACB->pScsiHost->this_id;
2133 if (pDCB->DevMode & PARITY_CHK_)
2134 pDCB->CtrlR1 |= PARITY_ERR_REPO;
2135
2136 pACB->scan_devices = 1;
2137 scsi_device->hostdata = pDCB;
2138 return 0;
2139}
2140
2141/**
2142 * dc390_slave_destroy - Called by the scsi mid layer to tell us about a
2143 * device that is going away.
2144 *
2145 * @scsi_device: The scsi device that we need to remove.
2146 */
2147static void dc390_slave_destroy(struct scsi_device *scsi_device)
2148{
2149 struct dc390_acb* pACB = (struct dc390_acb*) scsi_device->host->hostdata;
2150 struct dc390_dcb* pDCB = (struct dc390_dcb*) scsi_device->hostdata;
2151 struct dc390_dcb* pPrevDCB = pACB->pLinkDCB;
2152
2153 pACB->scan_devices = 0;
2154
2155 BUG_ON(pDCB->GoingSRBCnt > 1);
2156
2157 if (pDCB == pACB->pLinkDCB) {
2158 if (pACB->pLastDCB == pDCB) {
2159 pDCB->pNextDCB = NULL;
2160 pACB->pLastDCB = NULL;
2161 }
2162 pACB->pLinkDCB = pDCB->pNextDCB;
2163 } else {
2164 while (pPrevDCB->pNextDCB != pDCB)
2165 pPrevDCB = pPrevDCB->pNextDCB;
2166 pPrevDCB->pNextDCB = pDCB->pNextDCB;
2167 if (pDCB == pACB->pLastDCB)
2168 pACB->pLastDCB = pPrevDCB;
2169 }
2170
2171 if (pDCB == pACB->pActiveDCB)
2172 pACB->pActiveDCB = NULL;
2173 if (pDCB == pACB->pLinkDCB)
2174 pACB->pLinkDCB = pDCB->pNextDCB;
2175 if (pDCB == pACB->pDCBRunRobin)
2176 pACB->pDCBRunRobin = pDCB->pNextDCB;
2177 kfree(pDCB);
2178
2179 pACB->DCBCnt--;
2180}
2181
2182static int dc390_slave_configure(struct scsi_device *sdev)
2183{
2184 struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata;
2185 struct dc390_dcb *dcb = (struct dc390_dcb *)sdev->hostdata;
2186
2187 acb->scan_devices = 0;
2188
2189 /*
2190 * XXX: Note that while this driver used to called scsi_activate_tcq,
2191 * it never actually set a tag type, so emulate the old behavior.
2192 */
2193 scsi_set_tag_type(sdev, 0);
2194
2195 if (sdev->tagged_supported && (dcb->DevMode & TAG_QUEUEING_)) {
2196 dcb->SyncMode |= EN_TAG_QUEUEING;
2197 scsi_adjust_queue_depth(sdev, acb->TagMaxNum);
2198 }
2199
2200 return 0;
2201}
2202
2203static struct scsi_host_template driver_template = {
2204 .module = THIS_MODULE,
2205 .proc_name = "tmscsim",
2206 .name = DC390_BANNER " V" DC390_VERSION,
2207 .slave_alloc = dc390_slave_alloc,
2208 .slave_configure = dc390_slave_configure,
2209 .slave_destroy = dc390_slave_destroy,
2210 .queuecommand = DC390_queuecommand,
2211 .eh_abort_handler = DC390_abort,
2212 .eh_bus_reset_handler = DC390_bus_reset,
2213 .can_queue = 1,
2214 .this_id = 7,
2215 .sg_tablesize = SG_ALL,
2216 .cmd_per_lun = 1,
2217 .use_clustering = ENABLE_CLUSTERING,
2218 .max_sectors = 0x4000, /* 8MiB = 16 * 1024 * 512 */
2219 .use_blk_tags = 1,
2220};
2221
2222/***********************************************************************
2223 * Functions for access to DC390 EEPROM
2224 * and some to emulate it
2225 *
2226 **********************************************************************/
2227
2228static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
2229{
2230 u8 carryFlag = 1, j = 0x80, bval;
2231 int i;
2232
2233 for (i = 0; i < 9; i++) {
2234 if (carryFlag) {
2235 pci_write_config_byte(pdev, 0x80, 0x40);
2236 bval = 0xc0;
2237 } else
2238 bval = 0x80;
2239
2240 udelay(160);
2241 pci_write_config_byte(pdev, 0x80, bval);
2242 udelay(160);
2243 pci_write_config_byte(pdev, 0x80, 0);
2244 udelay(160);
2245
2246 carryFlag = (cmd & j) ? 1 : 0;
2247 j >>= 1;
2248 }
2249}
2250
2251static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
2252{
2253 int i;
2254 u16 wval = 0;
2255 u8 bval;
2256
2257 for (i = 0; i < 16; i++) {
2258 wval <<= 1;
2259
2260 pci_write_config_byte(pdev, 0x80, 0x80);
2261 udelay(160);
2262 pci_write_config_byte(pdev, 0x80, 0x40);
2263 udelay(160);
2264 pci_read_config_byte(pdev, 0x00, &bval);
2265
2266 if (bval == 0x22)
2267 wval |= 1;
2268 }
2269
2270 return wval;
2271}
2272
2273static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
2274{
2275 u8 cmd = EEPROM_READ, i;
2276
2277 for (i = 0; i < 0x40; i++) {
2278 pci_write_config_byte(pdev, 0xc0, 0);
2279 udelay(160);
2280
2281 dc390_eeprom_prepare_read(pdev, cmd++);
2282 *ptr++ = dc390_eeprom_get_data(pdev);
2283
2284 pci_write_config_byte(pdev, 0x80, 0);
2285 pci_write_config_byte(pdev, 0x80, 0);
2286 udelay(160);
2287 }
2288}
2289
2290/* Override EEprom values with explicitly set values */
2291static void dc390_eeprom_override(u8 index)
2292{
2293 u8 *ptr = (u8 *) dc390_eepromBuf[index], id;
2294
2295 /* Adapter Settings */
2296 if (tmscsim[0] != -2)
2297 ptr[EE_ADAPT_SCSI_ID] = (u8)tmscsim[0]; /* Adapter ID */
2298 if (tmscsim[3] != -2)
2299 ptr[EE_MODE2] = (u8)tmscsim[3];
2300 if (tmscsim[5] != -2)
2301 ptr[EE_DELAY] = tmscsim[5]; /* Reset delay */
2302 if (tmscsim[4] != -2)
2303 ptr[EE_TAG_CMD_NUM] = (u8)tmscsim[4]; /* Tagged Cmds */
2304
2305 /* Device Settings */
2306 for (id = 0; id < MAX_SCSI_ID; id++) {
2307 if (tmscsim[2] != -2)
2308 ptr[id << 2] = (u8)tmscsim[2]; /* EE_MODE1 */
2309 if (tmscsim[1] != -2)
2310 ptr[(id << 2) + 1] = (u8)tmscsim[1]; /* EE_Speed */
2311 }
2312}
2313
2314static int tmscsim_def[] = {
2315 7,
2316 0 /* 10MHz */,
2317 PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_,
2318 MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION | LUN_CHECK,
2319 3 /* 16 Tags per LUN */,
2320 1 /* s delay after Reset */,
2321};
2322
2323/* Copy defaults over set values where missing */
2324static void dc390_fill_with_defaults (void)
2325{
2326 int i;
2327
2328 for (i = 0; i < 6; i++) {
2329 if (tmscsim[i] < 0 || tmscsim[i] > 255)
2330 tmscsim[i] = tmscsim_def[i];
2331 }
2332
2333 /* Sanity checks */
2334 if (tmscsim[0] > 7)
2335 tmscsim[0] = 7;
2336 if (tmscsim[1] > 7)
2337 tmscsim[1] = 4;
2338 if (tmscsim[4] > 5)
2339 tmscsim[4] = 4;
2340 if (tmscsim[5] > 180)
2341 tmscsim[5] = 180;
2342}
2343
2344static void dc390_check_eeprom(struct pci_dev *pdev, u8 index)
2345{
2346 u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120};
2347 u8 EEbuf[128];
2348 u16 *ptr = (u16 *)EEbuf, wval = 0;
2349 int i;
2350
2351 dc390_read_eeprom(pdev, ptr);
2352 memcpy(dc390_eepromBuf[index], EEbuf, EE_ADAPT_SCSI_ID);
2353 memcpy(&dc390_eepromBuf[index][EE_ADAPT_SCSI_ID],
2354 &EEbuf[REAL_EE_ADAPT_SCSI_ID], EE_LEN - EE_ADAPT_SCSI_ID);
2355
2356 dc390_eepromBuf[index][EE_DELAY] = interpd[dc390_eepromBuf[index][EE_DELAY]];
2357
2358 for (i = 0; i < 0x40; i++, ptr++)
2359 wval += *ptr;
2360
2361 /* no Tekram EEprom found */
2362 if (wval != 0x1234) {
2363 int speed;
2364
2365 printk(KERN_INFO "DC390_init: No EEPROM found! Trying default settings ...\n");
2366
2367 /*
2368 * XXX(hch): bogus, because we might have tekram and
2369 * non-tekram hbas in a single machine.
2370 */
2371 dc390_fill_with_defaults();
2372
2373 speed = dc390_clock_speed[tmscsim[1]];
2374 printk(KERN_INFO "DC390: Used defaults: AdaptID=%i, SpeedIdx=%i (%i.%i MHz), "
2375 "DevMode=0x%02x, AdaptMode=0x%02x, TaggedCmnds=%i (%i), DelayReset=%is\n",
2376 tmscsim[0], tmscsim[1], speed / 10, speed % 10,
2377 (u8)tmscsim[2], (u8)tmscsim[3], tmscsim[4], 2 << (tmscsim[4]), tmscsim[5]);
2378 }
2379}
2380
2381static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
2382{
2383 struct Scsi_Host *shost = pACB->pScsiHost;
2384 u8 dstate;
2385
2386 /* Disable SCSI bus reset interrupt */
2387 DC390_write8(CtrlReg1, DIS_INT_ON_SCSI_RST | shost->this_id);
2388
2389 if (pACB->Gmode2 & RST_SCSI_BUS) {
2390 dc390_ResetSCSIBus(pACB);
2391 udelay(1000);
2392 pACB->last_reset = jiffies + HZ/2 +
2393 HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
2394 }
2395
2396 pACB->ACBFlag = 0;
2397
2398 /* Reset Pending INT */
2399 DC390_read8(INT_Status);
2400
2401 /* 250ms selection timeout */
2402 DC390_write8(Scsi_TimeOut, SEL_TIMEOUT);
2403
2404 /* Conversion factor = 0 , 40MHz clock */
2405 DC390_write8(Clk_Factor, CLK_FREQ_40MHZ);
2406
2407 /* NOP cmd - clear command register */
2408 DC390_write8(ScsiCmd, NOP_CMD);
2409
2410 /* Enable Feature and SCSI-2 */
2411 DC390_write8(CtrlReg2, EN_FEATURE+EN_SCSI2_CMD);
2412
2413 /* Fast clock */
2414 DC390_write8(CtrlReg3, FAST_CLK);
2415
2416 /* Negation */
2417 DC390_write8(CtrlReg4, pACB->glitch_cfg | /* glitch eater */
2418 (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) ?
2419 NEGATE_REQACKDATA : 0);
2420
2421 /* Clear Transfer Count High: ID */
2422 DC390_write8(CtcReg_High, 0);
2423 DC390_write8(DMA_Cmd, DMA_IDLE_CMD);
2424 DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
2425 DC390_write32(DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
2426
2427 dstate = DC390_read8(DMA_Status);
2428 DC390_write8(DMA_Status, dstate);
2429}
2430
2431static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2432{
2433 struct dc390_acb *pACB;
2434 struct Scsi_Host *shost;
2435 unsigned long io_port;
2436 int error = -ENODEV, i;
2437
2438 if (pci_enable_device(pdev))
2439 goto out;
2440
2441 pci_set_master(pdev);
2442
2443 error = -ENOMEM;
2444 if (disable_clustering)
2445 driver_template.use_clustering = DISABLE_CLUSTERING;
2446 shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb));
2447 if (!shost)
2448 goto out_disable_device;
2449
2450 pACB = (struct dc390_acb *)shost->hostdata;
2451 memset(pACB, 0, sizeof(struct dc390_acb));
2452
2453 dc390_check_eeprom(pdev, dc390_adapterCnt);
2454 dc390_eeprom_override(dc390_adapterCnt);
2455
2456 io_port = pci_resource_start(pdev, 0);
2457
2458 shost->this_id = dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID];
2459 shost->io_port = io_port;
2460 shost->n_io_port = 0x80;
2461 shost->irq = pdev->irq;
2462 shost->base = io_port;
2463 shost->unique_id = io_port;
2464
2465 pACB->last_reset = jiffies;
2466 pACB->pScsiHost = shost;
2467 pACB->IOPortBase = (u16) io_port;
2468 pACB->IRQLevel = pdev->irq;
2469
2470 shost->max_id = 8;
2471
2472 if (shost->max_id - 1 ==
2473 dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID])
2474 shost->max_id--;
2475
2476 if (dc390_eepromBuf[dc390_adapterCnt][EE_MODE2] & LUN_CHECK)
2477 shost->max_lun = 8;
2478 else
2479 shost->max_lun = 1;
2480
2481 pACB->pFreeSRB = pACB->SRB_array;
2482 pACB->SRBCount = MAX_SRB_CNT;
2483 pACB->AdapterIndex = dc390_adapterCnt;
2484 pACB->TagMaxNum =
2485 2 << dc390_eepromBuf[dc390_adapterCnt][EE_TAG_CMD_NUM];
2486 pACB->Gmode2 = dc390_eepromBuf[dc390_adapterCnt][EE_MODE2];
2487
2488 for (i = 0; i < pACB->SRBCount-1; i++)
2489 pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1];
2490 pACB->SRB_array[pACB->SRBCount-1].pNextSRB = NULL;
2491 pACB->pTmpSRB = &pACB->TmpSRB;
2492
2493 pACB->sel_timeout = SEL_TIMEOUT;
2494 pACB->glitch_cfg = EATER_25NS;
2495 pACB->pdev = pdev;
2496
2497 if (!request_region(io_port, shost->n_io_port, "tmscsim")) {
2498 printk(KERN_ERR "DC390: register IO ports error!\n");
2499 goto out_host_put;
2500 }
2501
2502 /* Reset Pending INT */
2503 DC390_read8_(INT_Status, io_port);
2504
2505 if (request_irq(pdev->irq, do_DC390_Interrupt, IRQF_SHARED,
2506 "tmscsim", pACB)) {
2507 printk(KERN_ERR "DC390: register IRQ error!\n");
2508 goto out_release_region;
2509 }
2510
2511 dc390_init_hw(pACB, dc390_adapterCnt);
2512
2513 dc390_adapterCnt++;
2514
2515 pci_set_drvdata(pdev, shost);
2516
2517 error = scsi_add_host(shost, &pdev->dev);
2518 if (error)
2519 goto out_free_irq;
2520 scsi_scan_host(shost);
2521 return 0;
2522
2523 out_free_irq:
2524 free_irq(pdev->irq, pACB);
2525 out_release_region:
2526 release_region(io_port, shost->n_io_port);
2527 out_host_put:
2528 scsi_host_put(shost);
2529 out_disable_device:
2530 pci_disable_device(pdev);
2531 out:
2532 return error;
2533}
2534
2535/**
2536 * dc390_remove_one - Called to remove a single instance of the adapter.
2537 *
2538 * @dev: The PCI device to remove.
2539 */
2540static void dc390_remove_one(struct pci_dev *dev)
2541{
2542 struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
2543 unsigned long iflags;
2544 struct dc390_acb* pACB = (struct dc390_acb*) scsi_host->hostdata;
2545 u8 bval;
2546
2547 scsi_remove_host(scsi_host);
2548
2549 spin_lock_irqsave(scsi_host->host_lock, iflags);
2550 pACB->ACBFlag = RESET_DEV;
2551 bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST;
2552 DC390_write8 (CtrlReg1, bval); /* disable interrupt */
2553 if (pACB->Gmode2 & RST_SCSI_BUS)
2554 dc390_ResetSCSIBus(pACB);
2555 spin_unlock_irqrestore(scsi_host->host_lock, iflags);
2556
2557 free_irq(scsi_host->irq, pACB);
2558 release_region(scsi_host->io_port, scsi_host->n_io_port);
2559
2560 pci_disable_device(dev);
2561 scsi_host_put(scsi_host);
2562}
2563
2564static struct pci_device_id tmscsim_pci_tbl[] = {
2565 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD53C974,
2566 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2567 { }
2568};
2569MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
2570
2571static struct pci_driver dc390_driver = {
2572 .name = "tmscsim",
2573 .id_table = tmscsim_pci_tbl,
2574 .probe = dc390_probe_one,
2575 .remove = dc390_remove_one,
2576};
2577
2578static int __init dc390_module_init(void)
2579{
2580 if (!disable_clustering) {
2581 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2582 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2583 }
2584
2585 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2586 tmscsim[0] = 7;
2587 tmscsim[1] = 4;
2588 tmscsim[2] = PARITY_CHK_ | TAG_QUEUEING_;
2589 tmscsim[3] = MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION;
2590 tmscsim[4] = 2;
2591 tmscsim[5] = 10;
2592 printk (KERN_INFO "DC390: Using safe settings.\n");
2593 }
2594
2595 return pci_register_driver(&dc390_driver);
2596}
2597
2598static void __exit dc390_module_exit(void)
2599{
2600 pci_unregister_driver(&dc390_driver);
2601}
2602
2603module_init(dc390_module_init);
2604module_exit(dc390_module_exit);
2605
2606#ifndef MODULE
2607static int __init dc390_setup (char *str)
2608{
2609 int ints[8],i, im;
2610
2611 get_options(str, ARRAY_SIZE(ints), ints);
2612 im = ints[0];
2613
2614 if (im > 6) {
2615 printk (KERN_NOTICE "DC390: ignore extra params!\n");
2616 im = 6;
2617 }
2618
2619 for (i = 0; i < im; i++)
2620 tmscsim[i] = ints[i+1];
2621 /* dc390_checkparams (); */
2622 return 1;
2623}
2624
2625__setup("tmscsim=", dc390_setup);
2626#endif
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
deleted file mode 100644
index 3d1bb4ad1826..000000000000
--- a/drivers/scsi/tmscsim.h
+++ /dev/null
@@ -1,551 +0,0 @@
1/***********************************************************************
2;* File Name : TMSCSIM.H *
3;* TEKRAM DC-390(T) PCI SCSI Bus Master Host Adapter *
4;* Device Driver *
5;***********************************************************************/
6/* $Id: tmscsim.h,v 2.15.2.3 2000/11/17 20:52:27 garloff Exp $ */
7
8#ifndef _TMSCSIM_H
9#define _TMSCSIM_H
10
11#include <linux/types.h>
12
13#define SCSI_IRQ_NONE 255
14
15#define MAX_ADAPTER_NUM 4
16#define MAX_SG_LIST_BUF 16 /* Not used */
17#define MAX_SCSI_ID 8
18#define MAX_SRB_CNT 50 /* Max number of started commands */
19
20#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */
21
22/*
23;-----------------------------------------------------------------------
24; SCSI Request Block
25;-----------------------------------------------------------------------
26*/
27struct dc390_srb
28{
29//u8 CmdBlock[12];
30
31struct dc390_srb *pNextSRB;
32struct dc390_dcb *pSRBDCB;
33struct scsi_cmnd *pcmd;
34struct scatterlist *pSegmentList;
35
36struct scatterlist Segmentx; /* make a one entry of S/G list table */
37
38unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A
39 in CPU endianness. We're only getting 32-bit bus
40 addresses by default */
41unsigned long SGToBeXferLen; /*; to be xfer length */
42unsigned long TotalXferredLen;
43unsigned long SavedTotXLen;
44unsigned long Saved_Ptr;
45u32 SRBState;
46
47u8 SRBStatus;
48u8 SRBFlag; /*; b0-AutoReqSense,b6-Read,b7-write */
49 /*; b4-settimeout,b5-Residual valid */
50u8 AdaptStatus;
51u8 TargetStatus;
52
53u8 ScsiPhase;
54s8 TagNumber;
55u8 SGIndex;
56u8 SGcount;
57
58u8 MsgCnt;
59u8 EndMessage;
60
61u8 MsgInBuf[6];
62u8 MsgOutBuf[6];
63
64//u8 IORBFlag; /*;81h-Reset, 2-retry */
65};
66
67
68/*
69;-----------------------------------------------------------------------
70; Device Control Block
71;-----------------------------------------------------------------------
72*/
73struct dc390_dcb
74{
75struct dc390_dcb *pNextDCB;
76struct dc390_acb *pDCBACB;
77
78/* Queued SRBs */
79struct dc390_srb *pGoingSRB;
80struct dc390_srb *pGoingLast;
81struct dc390_srb *pActiveSRB;
82u8 GoingSRBCnt;
83
84u32 TagMask;
85
86u8 TargetID; /*; SCSI Target ID (SCSI Only) */
87u8 TargetLUN; /*; SCSI Log. Unit (SCSI Only) */
88u8 DevMode;
89u8 DCBFlag;
90
91u8 CtrlR1;
92u8 CtrlR3;
93u8 CtrlR4;
94
95u8 SyncMode; /*; 0:async mode */
96u8 NegoPeriod; /*;for nego. */
97u8 SyncPeriod; /*;for reg. */
98u8 SyncOffset; /*;for reg. and nego.(low nibble) */
99};
100
101
102/*
103;-----------------------------------------------------------------------
104; Adapter Control Block
105;-----------------------------------------------------------------------
106*/
107struct dc390_acb
108{
109struct Scsi_Host *pScsiHost;
110u16 IOPortBase;
111u8 IRQLevel;
112u8 status;
113
114u8 SRBCount;
115u8 AdapterIndex; /*; nth Adapter this driver */
116u8 DCBCnt;
117
118u8 TagMaxNum;
119u8 ACBFlag;
120u8 Gmode2;
121u8 scan_devices;
122
123struct dc390_dcb *pLinkDCB;
124struct dc390_dcb *pLastDCB;
125struct dc390_dcb *pDCBRunRobin;
126
127struct dc390_dcb *pActiveDCB;
128struct dc390_srb *pFreeSRB;
129struct dc390_srb *pTmpSRB;
130
131u8 msgin123[4];
132u8 Connected;
133u8 pad;
134
135#if defined(USE_SPINLOCKS) && USE_SPINLOCKS > 1 && (defined(CONFIG_SMP) || DEBUG_SPINLOCKS > 0)
136spinlock_t lock;
137#endif
138u8 sel_timeout;
139u8 glitch_cfg;
140
141u8 MsgLen;
142u8 Ignore_IRQ; /* Not used */
143
144struct pci_dev *pdev;
145
146unsigned long last_reset;
147unsigned long Cmds;
148u32 SelLost;
149u32 SelConn;
150u32 CmdInQ;
151u32 CmdOutOfSRB;
152
153struct dc390_srb TmpSRB;
154struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
155};
156
157
158/*;-----------------------------------------------------------------------*/
159
160
161#define BIT31 0x80000000
162#define BIT30 0x40000000
163#define BIT29 0x20000000
164#define BIT28 0x10000000
165#define BIT27 0x08000000
166#define BIT26 0x04000000
167#define BIT25 0x02000000
168#define BIT24 0x01000000
169#define BIT23 0x00800000
170#define BIT22 0x00400000
171#define BIT21 0x00200000
172#define BIT20 0x00100000
173#define BIT19 0x00080000
174#define BIT18 0x00040000
175#define BIT17 0x00020000
176#define BIT16 0x00010000
177#define BIT15 0x00008000
178#define BIT14 0x00004000
179#define BIT13 0x00002000
180#define BIT12 0x00001000
181#define BIT11 0x00000800
182#define BIT10 0x00000400
183#define BIT9 0x00000200
184#define BIT8 0x00000100
185#define BIT7 0x00000080
186#define BIT6 0x00000040
187#define BIT5 0x00000020
188#define BIT4 0x00000010
189#define BIT3 0x00000008
190#define BIT2 0x00000004
191#define BIT1 0x00000002
192#define BIT0 0x00000001
193
194/*;---UnitCtrlFlag */
195#define UNIT_ALLOCATED BIT0
196#define UNIT_INFO_CHANGED BIT1
197#define FORMATING_MEDIA BIT2
198#define UNIT_RETRY BIT3
199
200/*;---UnitFlags */
201#define DASD_SUPPORT BIT0
202#define SCSI_SUPPORT BIT1
203#define ASPI_SUPPORT BIT2
204
205/*;----SRBState machine definition */
206#define SRB_FREE 0
207#define SRB_WAIT BIT0
208#define SRB_READY BIT1
209#define SRB_MSGOUT BIT2 /*;arbitration+msg_out 1st byte*/
210#define SRB_MSGIN BIT3
211#define SRB_MSGIN_MULTI BIT4
212#define SRB_COMMAND BIT5
213#define SRB_START_ BIT6 /*;arbitration+msg_out+command_out*/
214#define SRB_DISCONNECT BIT7
215#define SRB_DATA_XFER BIT8
216#define SRB_XFERPAD BIT9
217#define SRB_STATUS BIT10
218#define SRB_COMPLETED BIT11
219#define SRB_ABORT_SENT BIT12
220#define DO_SYNC_NEGO BIT13
221#define SRB_UNEXPECT_RESEL BIT14
222
223/*;---SRBstatus */
224#define SRB_OK BIT0
225#define ABORTION BIT1
226#define OVER_RUN BIT2
227#define UNDER_RUN BIT3
228#define PARITY_ERROR BIT4
229#define SRB_ERROR BIT5
230
231/*;---ACBFlag */
232#define RESET_DEV BIT0
233#define RESET_DETECT BIT1
234#define RESET_DONE BIT2
235
236/*;---DCBFlag */
237#define ABORT_DEV_ BIT0
238
239/*;---SRBFlag */
240#define DATAOUT BIT7
241#define DATAIN BIT6
242#define RESIDUAL_VALID BIT5
243#define ENABLE_TIMER BIT4
244#define RESET_DEV0 BIT2
245#define ABORT_DEV BIT1
246#define AUTO_REQSENSE BIT0
247
248/*;---Adapter status */
249#define H_STATUS_GOOD 0
250#define H_SEL_TIMEOUT 0x11
251#define H_OVER_UNDER_RUN 0x12
252#define H_UNEXP_BUS_FREE 0x13
253#define H_TARGET_PHASE_F 0x14
254#define H_INVALID_CCB_OP 0x16
255#define H_LINK_CCB_BAD 0x17
256#define H_BAD_TARGET_DIR 0x18
257#define H_DUPLICATE_CCB 0x19
258#define H_BAD_CCB_OR_SG 0x1A
259#define H_ABORT 0x0FF
260
261/* cmd->result */
262#define RES_TARGET 0x000000FF /* Target State */
263#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
264#define RES_ENDMSG 0x0000FF00 /* End Message */
265#define RES_DID 0x00FF0000 /* DID_ codes */
266#define RES_DRV 0xFF000000 /* DRIVER_ codes */
267
268#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
269#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
270
271#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0)
272#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0)
273#define SET_RES_MSG(who, msg) do { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; } while (0)
274#define SET_RES_DID(who, did) do { who &= ~RES_DID; who |= (int)(did) << 16; } while (0)
275#define SET_RES_DRV(who, drv) do { who &= ~RES_DRV; who |= (int)(drv) << 24; } while (0)
276
277/*;---Sync_Mode */
278#define SYNC_DISABLE 0
279#define SYNC_ENABLE BIT0
280#define SYNC_NEGO_DONE BIT1
281#define WIDE_ENABLE BIT2 /* Not used ;-) */
282#define WIDE_NEGO_DONE BIT3 /* Not used ;-) */
283#define EN_TAG_QUEUEING BIT4
284#define EN_ATN_STOP BIT5
285
286#define SYNC_NEGO_OFFSET 15
287
288/*;---SCSI bus phase*/
289#define SCSI_DATA_OUT 0
290#define SCSI_DATA_IN 1
291#define SCSI_COMMAND 2
292#define SCSI_STATUS_ 3
293#define SCSI_NOP0 4
294#define SCSI_NOP1 5
295#define SCSI_MSG_OUT 6
296#define SCSI_MSG_IN 7
297
298/*;----SCSI MSG BYTE*/ /* see scsi/scsi.h */ /* One is missing ! */
299#define ABORT_TAG 0x0d
300
301/*
302 * SISC query queue
303 */
304typedef struct {
305 dma_addr_t saved_dma_handle;
306} dc390_cmd_scp_t;
307
308/*
309;==========================================================
310; EEPROM byte offset
311;==========================================================
312*/
313typedef struct _EEprom
314{
315u8 EE_MODE1;
316u8 EE_SPEED;
317u8 xx1;
318u8 xx2;
319} EEprom, *PEEprom;
320
321#define REAL_EE_ADAPT_SCSI_ID 64
322#define REAL_EE_MODE2 65
323#define REAL_EE_DELAY 66
324#define REAL_EE_TAG_CMD_NUM 67
325
326#define EE_ADAPT_SCSI_ID 32
327#define EE_MODE2 33
328#define EE_DELAY 34
329#define EE_TAG_CMD_NUM 35
330
331#define EE_LEN 40
332
333/*; EE_MODE1 bits definition*/
334#define PARITY_CHK_ BIT0
335#define SYNC_NEGO_ BIT1
336#define EN_DISCONNECT_ BIT2
337#define SEND_START_ BIT3
338#define TAG_QUEUEING_ BIT4
339
340/*; EE_MODE2 bits definition*/
341#define MORE2_DRV BIT0
342#define GREATER_1G BIT1
343#define RST_SCSI_BUS BIT2
344#define ACTIVE_NEGATION BIT3
345#define NO_SEEK BIT4
346#define LUN_CHECK BIT5
347
348#define ENABLE_CE 1
349#define DISABLE_CE 0
350#define EEPROM_READ 0x80
351
352/*
353;==========================================================
354; AMD 53C974 Registers bit Definition
355;==========================================================
356*/
357/*
358;====================
359; SCSI Register
360;====================
361*/
362
363/*; Command Reg.(+0CH) (rw) */
364#define DMA_COMMAND BIT7
365#define NOP_CMD 0
366#define CLEAR_FIFO_CMD 1
367#define RST_DEVICE_CMD 2
368#define RST_SCSI_BUS_CMD 3
369
370#define INFO_XFER_CMD 0x10
371#define INITIATOR_CMD_CMPLTE 0x11
372#define MSG_ACCEPTED_CMD 0x12
373#define XFER_PAD_BYTE 0x18
374#define SET_ATN_CMD 0x1A
375#define RESET_ATN_CMD 0x1B
376
377#define SEL_WO_ATN 0x41 /* currently not used */
378#define SEL_W_ATN 0x42
379#define SEL_W_ATN_STOP 0x43
380#define SEL_W_ATN3 0x46
381#define EN_SEL_RESEL 0x44
382#define DIS_SEL_RESEL 0x45 /* currently not used */
383#define RESEL 0x40 /* " */
384#define RESEL_ATN3 0x47 /* " */
385
386#define DATA_XFER_CMD INFO_XFER_CMD
387
388
389/*; SCSI Status Reg.(+10H) (r) */
390#define INTERRUPT BIT7
391#define ILLEGAL_OP_ERR BIT6
392#define PARITY_ERR BIT5
393#define COUNT_2_ZERO BIT4
394#define GROUP_CODE_VALID BIT3
395#define SCSI_PHASE_MASK (BIT2+BIT1+BIT0)
396/* BIT2: MSG phase; BIT1: C/D physe; BIT0: I/O phase */
397
398/*; Interrupt Status Reg.(+14H) (r) */
399#define SCSI_RESET BIT7
400#define INVALID_CMD BIT6
401#define DISCONNECTED BIT5
402#define SERVICE_REQUEST BIT4
403#define SUCCESSFUL_OP BIT3
404#define RESELECTED BIT2
405#define SEL_ATTENTION BIT1
406#define SELECTED BIT0
407
408/*; Internal State Reg.(+18H) (r) */
409#define SYNC_OFFSET_FLAG BIT3
410#define INTRN_STATE_MASK (BIT2+BIT1+BIT0)
411/* 0x04: Sel. successful (w/o stop), 0x01: Sel. successful (w/ stop) */
412
413/*; Clock Factor Reg.(+24H) (w) */
414#define CLK_FREQ_40MHZ 0
415#define CLK_FREQ_35MHZ (BIT2+BIT1+BIT0)
416#define CLK_FREQ_30MHZ (BIT2+BIT1)
417#define CLK_FREQ_25MHZ (BIT2+BIT0)
418#define CLK_FREQ_20MHZ BIT2
419#define CLK_FREQ_15MHZ (BIT1+BIT0)
420#define CLK_FREQ_10MHZ BIT1
421
422/*; Control Reg. 1(+20H) (rw) */
423#define EXTENDED_TIMING BIT7
424#define DIS_INT_ON_SCSI_RST BIT6
425#define PARITY_ERR_REPO BIT4
426#define SCSI_ID_ON_BUS (BIT2+BIT1+BIT0) /* host adapter ID */
427
428/*; Control Reg. 2(+2CH) (rw) */
429#define EN_FEATURE BIT6
430#define EN_SCSI2_CMD BIT3
431
432/*; Control Reg. 3(+30H) (rw) */
433#define ID_MSG_CHECK BIT7
434#define EN_QTAG_MSG BIT6
435#define EN_GRP2_CMD BIT5
436#define FAST_SCSI BIT4 /* ;10MB/SEC */
437#define FAST_CLK BIT3 /* ;25 - 40 MHZ */
438
439/*; Control Reg. 4(+34H) (rw) */
440#define EATER_12NS 0
441#define EATER_25NS BIT7
442#define EATER_35NS BIT6
443#define EATER_0NS (BIT7+BIT6)
444#define REDUCED_POWER BIT5
445#define CTRL4_RESERVED BIT4 /* must be 1 acc. to AM53C974.c */
446#define NEGATE_REQACKDATA BIT2
447#define NEGATE_REQACK BIT3
448
449#define GLITCH_TO_NS(x) (((~x>>6 & 2) >> 1) | ((x>>6 & 1) << 1 ^ (x>>6 & 2)))
450#define NS_TO_GLITCH(y) (((~y<<7) | ~((y<<6) ^ ((y<<5 & 1<<6) | ~0x40))) & 0xc0)
451
452/*
453;====================
454; DMA Register
455;====================
456*/
457/*; DMA Command Reg.(+40H) (rw) */
458#define READ_DIRECTION BIT7
459#define WRITE_DIRECTION 0
460#define EN_DMA_INT BIT6
461#define EN_PAGE_INT BIT5 /* page transfer interrupt enable */
462#define MAP_TO_MDL BIT4
463#define DIAGNOSTIC BIT2
464#define DMA_IDLE_CMD 0
465#define DMA_BLAST_CMD BIT0
466#define DMA_ABORT_CMD BIT1
467#define DMA_START_CMD (BIT1+BIT0)
468
469/*; DMA Status Reg.(+54H) (r) */
470#define PCI_MS_ABORT BIT6
471#define BLAST_COMPLETE BIT5
472#define SCSI_INTERRUPT BIT4
473#define DMA_XFER_DONE BIT3
474#define DMA_XFER_ABORT BIT2
475#define DMA_XFER_ERROR BIT1
476#define POWER_DOWN BIT0
477
478/*; DMA SCSI Bus and Ctrl.(+70H) */
479#define EN_INT_ON_PCI_ABORT BIT25
480#define WRT_ERASE_DMA_STAT BIT24
481#define PW_DOWN_CTRL BIT21
482#define SCSI_BUSY BIT20
483#define SCLK BIT19
484#define SCAM BIT18
485#define SCSI_LINES 0x0003ffff
486
487/*
488;==========================================================
489; SCSI Chip register address offset
490;==========================================================
491;Registers are rw unless declared otherwise
492*/
493#define CtcReg_Low 0x00 /* r curr. transfer count */
494#define CtcReg_Mid 0x04 /* r */
495#define CtcReg_High 0x38 /* r */
496#define ScsiFifo 0x08
497#define ScsiCmd 0x0C
498#define Scsi_Status 0x10 /* r */
499#define INT_Status 0x14 /* r */
500#define Sync_Period 0x18 /* w */
501#define Sync_Offset 0x1C /* w */
502#define Clk_Factor 0x24 /* w */
503#define CtrlReg1 0x20
504#define CtrlReg2 0x2C
505#define CtrlReg3 0x30
506#define CtrlReg4 0x34
507#define DMA_Cmd 0x40
508#define DMA_XferCnt 0x44 /* rw starting transfer count (32 bit) */
509#define DMA_XferAddr 0x48 /* rw starting physical address (32 bit) */
510#define DMA_Wk_ByteCntr 0x4C /* r working byte counter */
511#define DMA_Wk_AddrCntr 0x50 /* r working address counter */
512#define DMA_Status 0x54 /* r */
513#define DMA_MDL_Addr 0x58 /* rw starting MDL address */
514#define DMA_Wk_MDL_Cntr 0x5C /* r working MDL counter */
515#define DMA_ScsiBusCtrl 0x70 /* rw SCSI Bus, PCI/DMA Ctrl */
516
517#define StcReg_Low CtcReg_Low /* w start transfer count */
518#define StcReg_Mid CtcReg_Mid /* w */
519#define StcReg_High CtcReg_High /* w */
520#define Scsi_Dest_ID Scsi_Status /* w */
521#define Scsi_TimeOut INT_Status /* w */
522#define Intern_State Sync_Period /* r */
523#define Current_Fifo Sync_Offset /* r Curr. FIFO / int. state */
524
525
526#define DC390_read8(address) \
527 (inb (pACB->IOPortBase + (address)))
528
529#define DC390_read8_(address, base) \
530 (inb ((u16)(base) + (address)))
531
532#define DC390_read16(address) \
533 (inw (pACB->IOPortBase + (address)))
534
535#define DC390_read32(address) \
536 (inl (pACB->IOPortBase + (address)))
537
538#define DC390_write8(address,value) \
539 outb ((value), pACB->IOPortBase + (address))
540
541#define DC390_write8_(address,value,base) \
542 outb ((value), (u16)(base) + (address))
543
544#define DC390_write16(address,value) \
545 outw ((value), pACB->IOPortBase + (address))
546
547#define DC390_write32(address,value) \
548 outl ((value), pACB->IOPortBase + (address))
549
550
551#endif /* _TMSCSIM_H */
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index aa0f4035afaf..14eb50b95a1e 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -696,25 +696,25 @@ static int u14_34f_slave_configure(struct scsi_device *dev) {
696 if (TLDEV(dev->type) && dev->tagged_supported) 696 if (TLDEV(dev->type) && dev->tagged_supported)
697 697
698 if (tag_mode == TAG_SIMPLE) { 698 if (tag_mode == TAG_SIMPLE) {
699 scsi_adjust_queue_depth(dev, tqd); 699 scsi_change_queue_depth(dev, tqd);
700 tag_suffix = ", simple tags"; 700 tag_suffix = ", simple tags";
701 } 701 }
702 else if (tag_mode == TAG_ORDERED) { 702 else if (tag_mode == TAG_ORDERED) {
703 scsi_adjust_queue_depth(dev, tqd); 703 scsi_change_queue_depth(dev, tqd);
704 tag_suffix = ", ordered tags"; 704 tag_suffix = ", ordered tags";
705 } 705 }
706 else { 706 else {
707 scsi_adjust_queue_depth(dev, tqd); 707 scsi_change_queue_depth(dev, tqd);
708 tag_suffix = ", no tags"; 708 tag_suffix = ", no tags";
709 } 709 }
710 710
711 else if (TLDEV(dev->type) && linked_comm) { 711 else if (TLDEV(dev->type) && linked_comm) {
712 scsi_adjust_queue_depth(dev, tqd); 712 scsi_change_queue_depth(dev, tqd);
713 tag_suffix = ", untagged"; 713 tag_suffix = ", untagged";
714 } 714 }
715 715
716 else { 716 else {
717 scsi_adjust_queue_depth(dev, utqd); 717 scsi_change_queue_depth(dev, utqd);
718 tag_suffix = ""; 718 tag_suffix = "";
719 } 719 }
720 720
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 820fc7d96084..2e4614b9dddf 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2713,7 +2713,7 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
2713 2713
2714 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", 2714 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2715 __func__, lun_qdepth); 2715 __func__, lun_qdepth);
2716 scsi_adjust_queue_depth(sdev, lun_qdepth); 2716 scsi_change_queue_depth(sdev, lun_qdepth);
2717} 2717}
2718 2718
2719/* 2719/*
@@ -2805,32 +2805,16 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
2805 * ufshcd_change_queue_depth - change queue depth 2805 * ufshcd_change_queue_depth - change queue depth
2806 * @sdev: pointer to SCSI device 2806 * @sdev: pointer to SCSI device
2807 * @depth: required depth to set 2807 * @depth: required depth to set
2808 * @reason: reason for changing the depth
2809 * 2808 *
2810 * Change queue depth according to the reason and make sure 2809 * Change queue depth and make sure the max. limits are not crossed.
2811 * the max. limits are not crossed.
2812 */ 2810 */
2813static int ufshcd_change_queue_depth(struct scsi_device *sdev, 2811static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
2814 int depth, int reason)
2815{ 2812{
2816 struct ufs_hba *hba = shost_priv(sdev->host); 2813 struct ufs_hba *hba = shost_priv(sdev->host);
2817 2814
2818 if (depth > hba->nutrs) 2815 if (depth > hba->nutrs)
2819 depth = hba->nutrs; 2816 depth = hba->nutrs;
2820 2817 return scsi_change_queue_depth(sdev, depth);
2821 switch (reason) {
2822 case SCSI_QDEPTH_DEFAULT:
2823 case SCSI_QDEPTH_RAMP_UP:
2824 scsi_adjust_queue_depth(sdev, depth);
2825 break;
2826 case SCSI_QDEPTH_QFULL:
2827 scsi_track_queue_full(sdev, depth);
2828 break;
2829 default:
2830 return -EOPNOTSUPP;
2831 }
2832
2833 return depth;
2834} 2818}
2835 2819
2836/** 2820/**
@@ -4235,6 +4219,7 @@ static struct scsi_host_template ufshcd_driver_template = {
4235 .can_queue = UFSHCD_CAN_QUEUE, 4219 .can_queue = UFSHCD_CAN_QUEUE,
4236 .max_host_blocked = 1, 4220 .max_host_blocked = 1,
4237 .use_blk_tags = 1, 4221 .use_blk_tags = 1,
4222 .track_queue_depth = 1,
4238}; 4223};
4239 4224
4240static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, 4225static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 355afbc7fde1..22e70126425b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -561,6 +561,15 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
561 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); 561 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
562} 562}
563 563
564static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
565 struct scsi_cmnd *sc)
566{
567 u32 tag = blk_mq_unique_tag(sc->request);
568 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
569
570 return &vscsi->req_vqs[hwq];
571}
572
564static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, 573static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
565 struct virtio_scsi_target_state *tgt) 574 struct virtio_scsi_target_state *tgt)
566{ 575{
@@ -604,7 +613,12 @@ static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
604 struct virtio_scsi *vscsi = shost_priv(sh); 613 struct virtio_scsi *vscsi = shost_priv(sh);
605 struct virtio_scsi_target_state *tgt = 614 struct virtio_scsi_target_state *tgt =
606 scsi_target(sc->device)->hostdata; 615 scsi_target(sc->device)->hostdata;
607 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt); 616 struct virtio_scsi_vq *req_vq;
617
618 if (shost_use_blk_mq(sh))
619 req_vq = virtscsi_pick_vq_mq(vscsi, sc);
620 else
621 req_vq = virtscsi_pick_vq(vscsi, tgt);
608 622
609 return virtscsi_queuecommand(vscsi, req_vq, sc); 623 return virtscsi_queuecommand(vscsi, req_vq, sc);
610} 624}
@@ -668,28 +682,13 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
668 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth 682 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
669 * @sdev: Virtscsi target whose queue depth to change 683 * @sdev: Virtscsi target whose queue depth to change
670 * @qdepth: New queue depth 684 * @qdepth: New queue depth
671 * @reason: Reason for the queue depth change.
672 */ 685 */
673static int virtscsi_change_queue_depth(struct scsi_device *sdev, 686static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
674 int qdepth,
675 int reason)
676{ 687{
677 struct Scsi_Host *shost = sdev->host; 688 struct Scsi_Host *shost = sdev->host;
678 int max_depth = shost->cmd_per_lun; 689 int max_depth = shost->cmd_per_lun;
679 690
680 switch (reason) { 691 return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
681 case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */
682 scsi_track_queue_full(sdev, qdepth);
683 break;
684 case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */
685 case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */
686 scsi_adjust_queue_depth(sdev, min(max_depth, qdepth));
687 break;
688 default:
689 return -EOPNOTSUPP;
690 }
691
692 return sdev->queue_depth;
693} 692}
694 693
695static int virtscsi_abort(struct scsi_cmnd *sc) 694static int virtscsi_abort(struct scsi_cmnd *sc)
@@ -756,6 +755,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
756 .use_clustering = ENABLE_CLUSTERING, 755 .use_clustering = ENABLE_CLUSTERING,
757 .target_alloc = virtscsi_target_alloc, 756 .target_alloc = virtscsi_target_alloc,
758 .target_destroy = virtscsi_target_destroy, 757 .target_destroy = virtscsi_target_destroy,
758 .track_queue_depth = 1,
759}; 759};
760 760
761static struct scsi_host_template virtscsi_host_template_multi = { 761static struct scsi_host_template virtscsi_host_template_multi = {
@@ -774,6 +774,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
774 .use_clustering = ENABLE_CLUSTERING, 774 .use_clustering = ENABLE_CLUSTERING,
775 .target_alloc = virtscsi_target_alloc, 775 .target_alloc = virtscsi_target_alloc,
776 .target_destroy = virtscsi_target_destroy, 776 .target_destroy = virtscsi_target_destroy,
777 .track_queue_depth = 1,
777}; 778};
778 779
779#define virtscsi_config_get(vdev, fld) \ 780#define virtscsi_config_get(vdev, fld) \
@@ -981,6 +982,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
981 shost->max_id = num_targets; 982 shost->max_id = num_targets;
982 shost->max_channel = 0; 983 shost->max_channel = 0;
983 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 984 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
985 shost->nr_hw_queues = num_queues;
984 986
985 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 987 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
986 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 988 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c3b4f8b3a3a5..0f133c1817de 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -504,33 +504,11 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
504 } 504 }
505} 505}
506 506
507static int pvscsi_change_queue_depth(struct scsi_device *sdev, 507static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
508 int qdepth,
509 int reason)
510{ 508{
511 int max_depth;
512 struct Scsi_Host *shost = sdev->host;
513
514 if (reason != SCSI_QDEPTH_DEFAULT)
515 /*
516 * We support only changing default.
517 */
518 return -EOPNOTSUPP;
519
520 max_depth = shost->can_queue;
521 if (!sdev->tagged_supported) 509 if (!sdev->tagged_supported)
522 max_depth = 1; 510 qdepth = 1;
523 if (qdepth > max_depth) 511 return scsi_change_queue_depth(sdev, qdepth);
524 qdepth = max_depth;
525 scsi_adjust_queue_depth(sdev, qdepth);
526
527 if (sdev->inquiry_len > 7)
528 sdev_printk(KERN_INFO, sdev,
529 "qdepth(%d), tagged(%d), simple(%d), scsi_level(%d), cmd_que(%d)\n",
530 sdev->queue_depth, sdev->tagged_supported,
531 sdev->simple_tags,
532 sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1);
533 return sdev->queue_depth;
534} 512}
535 513
536/* 514/*
@@ -723,10 +701,6 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
723 memcpy(e->cdb, cmd->cmnd, e->cdbLen); 701 memcpy(e->cdb, cmd->cmnd, e->cdbLen);
724 702
725 e->tag = SIMPLE_QUEUE_TAG; 703 e->tag = SIMPLE_QUEUE_TAG;
726 if (sdev->tagged_supported &&
727 (cmd->tag == HEAD_OF_QUEUE_TAG ||
728 cmd->tag == ORDERED_QUEUE_TAG))
729 e->tag = cmd->tag;
730 704
731 if (cmd->sc_data_direction == DMA_FROM_DEVICE) 705 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
732 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; 706 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 32674236fec7..f94d73611ab4 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1653,7 +1653,6 @@ static struct scsi_host_template driver_template = {
1653 .can_queue = WD7000_Q, 1653 .can_queue = WD7000_Q,
1654 .this_id = 7, 1654 .this_id = 7,
1655 .sg_tablesize = WD7000_SG, 1655 .sg_tablesize = WD7000_SG,
1656 .cmd_per_lun = 1,
1657 .unchecked_isa_dma = 1, 1656 .unchecked_isa_dma = 1,
1658 .use_clustering = ENABLE_CLUSTERING, 1657 .use_clustering = ENABLE_CLUSTERING,
1659}; 1658};
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
new file mode 100644
index 000000000000..7702664d7ed3
--- /dev/null
+++ b/drivers/scsi/wd719x.c
@@ -0,0 +1,996 @@
1/*
2 * Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards
3 * Copyright 2013 Ondrej Zary
4 *
5 * Original driver by
6 * Aaron Dewell <dewell@woods.net>
7 * Gaerti <Juergen.Gaertner@mbox.si.uni-hannover.de>
8 *
9 * HW documentation available in book:
10 *
11 * SPIDER Command Protocol
12 * by Chandru M. Sippy
13 * SCSI Storage Products (MCP)
14 * Western Digital Corporation
15 * 09-15-95
16 *
17 * http://web.archive.org/web/20070717175254/http://sun1.rrzn.uni-hannover.de/gaertner.juergen/wd719x/Linux/Docu/Spider/
18 */
19
20/*
21 * Driver workflow:
22 * 1. SCSI command is transformed to SCB (Spider Control Block) by the
23 * queuecommand function.
24 * 2. The address of the SCB is stored in a list to be able to access it, if
25 * something goes wrong.
26 * 3. The address of the SCB is written to the Controller, which loads the SCB
27 * via BM-DMA and processes it.
28 * 4. After it has finished, it generates an interrupt, and sets registers.
29 *
30 * flaws:
31 * - abort/reset functions
32 *
33 * ToDo:
34 * - tagged queueing
35 */
36
37#include <linux/interrupt.h>
38#include <linux/module.h>
39#include <linux/delay.h>
40#include <linux/pci.h>
41#include <linux/firmware.h>
42#include <linux/eeprom_93cx6.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include "wd719x.h"
47
48/* low-level register access */
49static inline u8 wd719x_readb(struct wd719x *wd, u8 reg)
50{
51 return ioread8(wd->base + reg);
52}
53
54static inline u32 wd719x_readl(struct wd719x *wd, u8 reg)
55{
56 return ioread32(wd->base + reg);
57}
58
59static inline void wd719x_writeb(struct wd719x *wd, u8 reg, u8 val)
60{
61 iowrite8(val, wd->base + reg);
62}
63
64static inline void wd719x_writew(struct wd719x *wd, u8 reg, u16 val)
65{
66 iowrite16(val, wd->base + reg);
67}
68
69static inline void wd719x_writel(struct wd719x *wd, u8 reg, u32 val)
70{
71 iowrite32(val, wd->base + reg);
72}
73
74/* wait until the command register is ready */
75static inline int wd719x_wait_ready(struct wd719x *wd)
76{
77 int i = 0;
78
79 do {
80 if (wd719x_readb(wd, WD719X_AMR_COMMAND) == WD719X_CMD_READY)
81 return 0;
82 udelay(1);
83 } while (i++ < WD719X_WAIT_FOR_CMD_READY);
84
85 dev_err(&wd->pdev->dev, "command register is not ready: 0x%02x\n",
86 wd719x_readb(wd, WD719X_AMR_COMMAND));
87
88 return -ETIMEDOUT;
89}
90
91/* poll interrupt status register until command finishes */
92static inline int wd719x_wait_done(struct wd719x *wd, int timeout)
93{
94 u8 status;
95
96 while (timeout > 0) {
97 status = wd719x_readb(wd, WD719X_AMR_INT_STATUS);
98 if (status)
99 break;
100 timeout--;
101 udelay(1);
102 }
103
104 if (timeout <= 0) {
105 dev_err(&wd->pdev->dev, "direct command timed out\n");
106 return -ETIMEDOUT;
107 }
108
109 if (status != WD719X_INT_NOERRORS) {
110 dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n",
111 status, wd719x_readb(wd, WD719X_AMR_SCB_ERROR));
112 return -EIO;
113 }
114
115 return 0;
116}
117
118static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun,
119 u8 tag, dma_addr_t data, int timeout)
120{
121 int ret = 0;
122
123 /* clear interrupt status register (allow command register to clear) */
124 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
125
126 /* Wait for the Command register to become free */
127 if (wd719x_wait_ready(wd))
128 return -ETIMEDOUT;
129
130 /* make sure we get NO interrupts */
131 dev |= WD719X_DISABLE_INT;
132 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev);
133 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun);
134 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag);
135 if (data)
136 wd719x_writel(wd, WD719X_AMR_SCB_IN, data);
137
138 /* clear interrupt status register again */
139 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
140
141 /* Now, write the command */
142 wd719x_writeb(wd, WD719X_AMR_COMMAND, opcode);
143
144 if (timeout) /* wait for the command to complete */
145 ret = wd719x_wait_done(wd, timeout);
146
147 /* clear interrupt status register (clean up) */
148 if (opcode != WD719X_CMD_READ_FIRMVER)
149 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
150
151 return ret;
152}
153
154static void wd719x_destroy(struct wd719x *wd)
155{
156 struct wd719x_scb *scb;
157
158 /* stop the RISC */
159 if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
160 WD719X_WAIT_FOR_RISC))
161 dev_warn(&wd->pdev->dev, "RISC sleep command failed\n");
162 /* disable RISC */
163 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
164
165 /* free all SCBs */
166 list_for_each_entry(scb, &wd->active_scbs, list)
167 pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
168 scb->phys);
169 list_for_each_entry(scb, &wd->free_scbs, list)
170 pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
171 scb->phys);
172 /* free internal buffers */
173 pci_free_consistent(wd->pdev, wd->fw_size, wd->fw_virt, wd->fw_phys);
174 wd->fw_virt = NULL;
175 pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
176 wd->hash_phys);
177 wd->hash_virt = NULL;
178 pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
179 wd->params, wd->params_phys);
180 wd->params = NULL;
181 free_irq(wd->pdev->irq, wd);
182}
183
184/* finish a SCSI command, mark SCB (if any) as free, unmap buffers */
185static void wd719x_finish_cmd(struct scsi_cmnd *cmd, int result)
186{
187 struct wd719x *wd = shost_priv(cmd->device->host);
188 struct wd719x_scb *scb = (struct wd719x_scb *) cmd->host_scribble;
189
190 if (scb) {
191 list_move(&scb->list, &wd->free_scbs);
192 dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
193 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
194 scsi_dma_unmap(cmd);
195 }
196 cmd->result = result << 16;
197 cmd->scsi_done(cmd);
198}
199
200/* Build a SCB and send it to the card */
201static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
202{
203 int i, count_sg;
204 unsigned long flags;
205 struct wd719x_scb *scb;
206 struct wd719x *wd = shost_priv(sh);
207 dma_addr_t phys;
208
209 cmd->host_scribble = NULL;
210
211 /* get a free SCB - either from existing ones or allocate a new one */
212 spin_lock_irqsave(wd->sh->host_lock, flags);
213 scb = list_first_entry_or_null(&wd->free_scbs, struct wd719x_scb, list);
214 if (scb) {
215 list_del(&scb->list);
216 phys = scb->phys;
217 } else {
218 spin_unlock_irqrestore(wd->sh->host_lock, flags);
219 scb = pci_alloc_consistent(wd->pdev, sizeof(struct wd719x_scb),
220 &phys);
221 spin_lock_irqsave(wd->sh->host_lock, flags);
222 if (!scb) {
223 dev_err(&wd->pdev->dev, "unable to allocate SCB\n");
224 wd719x_finish_cmd(cmd, DID_ERROR);
225 spin_unlock_irqrestore(wd->sh->host_lock, flags);
226 return 0;
227 }
228 }
229 memset(scb, 0, sizeof(struct wd719x_scb));
230 list_add(&scb->list, &wd->active_scbs);
231
232 scb->phys = phys;
233 scb->cmd = cmd;
234 cmd->host_scribble = (char *) scb;
235
236 scb->CDB_tag = 0; /* Tagged queueing not supported yet */
237 scb->devid = cmd->device->id;
238 scb->lun = cmd->device->lun;
239
240 /* copy the command */
241 memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len);
242
243 /* map sense buffer */
244 scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
245 cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
246 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
247 scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
248
249 /* request autosense */
250 scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE;
251
252 /* check direction */
253 if (cmd->sc_data_direction == DMA_TO_DEVICE)
254 scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION
255 | WD719X_SCB_FLAGS_PCI_TO_SCSI;
256 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
257 scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION;
258
259 /* Scather/gather */
260 count_sg = scsi_dma_map(cmd);
261 if (count_sg < 0) {
262 wd719x_finish_cmd(cmd, DID_ERROR);
263 spin_unlock_irqrestore(wd->sh->host_lock, flags);
264 return 0;
265 }
266 BUG_ON(count_sg > WD719X_SG);
267
268 if (count_sg) {
269 struct scatterlist *sg;
270
271 scb->data_length = cpu_to_le32(count_sg *
272 sizeof(struct wd719x_sglist));
273 scb->data_p = cpu_to_le32(scb->phys +
274 offsetof(struct wd719x_scb, sg_list));
275
276 scsi_for_each_sg(cmd, sg, count_sg, i) {
277 scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg));
278 scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
279 }
280 scb->SCB_options |= WD719X_SCB_FLAGS_DO_SCATTER_GATHER;
281 } else { /* zero length */
282 scb->data_length = 0;
283 scb->data_p = 0;
284 }
285
286 /* check if the Command register is free */
287 if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) {
288 spin_unlock_irqrestore(wd->sh->host_lock, flags);
289 return SCSI_MLQUEUE_HOST_BUSY;
290 }
291
292 /* write pointer to the AMR */
293 wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys);
294 /* send SCB opcode */
295 wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB);
296
297 spin_unlock_irqrestore(wd->sh->host_lock, flags);
298
299 return 0;
300}
301
302static int wd719x_chip_init(struct wd719x *wd)
303{
304 int i, ret;
305 u32 risc_init[3];
306 const struct firmware *fw_wcs, *fw_risc;
307 const char fwname_wcs[] = "wd719x-wcs.bin";
308 const char fwname_risc[] = "wd719x-risc.bin";
309
310 memset(wd->hash_virt, 0, WD719X_HASH_TABLE_SIZE);
311
312 /* WCS (sequencer) firmware */
313 ret = request_firmware(&fw_wcs, fwname_wcs, &wd->pdev->dev);
314 if (ret) {
315 dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n",
316 fwname_wcs, ret);
317 return ret;
318 }
319 /* RISC firmware */
320 ret = request_firmware(&fw_risc, fwname_risc, &wd->pdev->dev);
321 if (ret) {
322 dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n",
323 fwname_risc, ret);
324 release_firmware(fw_wcs);
325 return ret;
326 }
327 wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size;
328
329 if (!wd->fw_virt)
330 wd->fw_virt = pci_alloc_consistent(wd->pdev, wd->fw_size,
331 &wd->fw_phys);
332 if (!wd->fw_virt) {
333 ret = -ENOMEM;
334 goto wd719x_init_end;
335 }
336
337 /* make a fresh copy of WCS and RISC code */
338 memcpy(wd->fw_virt, fw_wcs->data, fw_wcs->size);
339 memcpy(wd->fw_virt + ALIGN(fw_wcs->size, 4), fw_risc->data,
340 fw_risc->size);
341
342 /* Reset the Spider Chip and adapter itself */
343 wd719x_writeb(wd, WD719X_PCI_PORT_RESET, WD719X_PCI_RESET);
344 udelay(WD719X_WAIT_FOR_RISC);
345 /* Clear PIO mode bits set by BIOS */
346 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, 0);
347 /* ensure RISC is not running */
348 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
349 /* ensure command port is ready */
350 wd719x_writeb(wd, WD719X_AMR_COMMAND, 0);
351 if (wd719x_wait_ready(wd)) {
352 ret = -ETIMEDOUT;
353 goto wd719x_init_end;
354 }
355
356 /* Transfer the first 2K words of RISC code to kick start the uP */
357 risc_init[0] = wd->fw_phys; /* WCS FW */
358 risc_init[1] = wd->fw_phys + ALIGN(fw_wcs->size, 4); /* RISC FW */
359 risc_init[2] = wd->hash_phys; /* hash table */
360
361 /* clear DMA status */
362 wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3STATUS, 0);
363
364 /* address to read firmware from */
365 wd719x_writel(wd, WD719X_PCI_EXTERNAL_ADDR, risc_init[1]);
366 /* base address to write firmware to (on card) */
367 wd719x_writew(wd, WD719X_PCI_INTERNAL_ADDR, WD719X_PRAM_BASE_ADDR);
368 /* size: first 2K words */
369 wd719x_writew(wd, WD719X_PCI_DMA_TRANSFER_SIZE, 2048 * 2);
370 /* start DMA */
371 wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3CMD, WD719X_START_CHANNEL2_3DMA);
372
373 /* wait for DMA to complete */
374 i = WD719X_WAIT_FOR_RISC;
375 while (i-- > 0) {
376 u8 status = wd719x_readb(wd, WD719X_PCI_CHANNEL2_3STATUS);
377 if (status == WD719X_START_CHANNEL2_3DONE)
378 break;
379 if (status == WD719X_START_CHANNEL2_3ABORT) {
380 dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA aborted\n");
381 ret = -EIO;
382 goto wd719x_init_end;
383 }
384 udelay(1);
385 }
386 if (i < 1) {
387 dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA timeout\n");
388 ret = -ETIMEDOUT;
389 goto wd719x_init_end;
390 }
391
392 /* firmware is loaded, now initialize and wake up the RISC */
393 /* write RISC initialization long words to Spider */
394 wd719x_writel(wd, WD719X_AMR_SCB_IN, risc_init[0]);
395 wd719x_writel(wd, WD719X_AMR_SCB_IN + 4, risc_init[1]);
396 wd719x_writel(wd, WD719X_AMR_SCB_IN + 8, risc_init[2]);
397
398 /* disable interrupts during initialization of RISC */
399 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, WD719X_DISABLE_INT);
400
401 /* issue INITIALIZE RISC comand */
402 wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_INIT_RISC);
403 /* enable advanced mode (wake up RISC) */
404 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, WD719X_ENABLE_ADVANCE_MODE);
405 udelay(WD719X_WAIT_FOR_RISC);
406
407 ret = wd719x_wait_done(wd, WD719X_WAIT_FOR_RISC);
408 /* clear interrupt status register */
409 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
410 if (ret) {
411 dev_warn(&wd->pdev->dev, "Unable to initialize RISC\n");
412 goto wd719x_init_end;
413 }
414 /* RISC is up and running */
415
416 /* Read FW version from RISC */
417 ret = wd719x_direct_cmd(wd, WD719X_CMD_READ_FIRMVER, 0, 0, 0, 0,
418 WD719X_WAIT_FOR_RISC);
419 if (ret) {
420 dev_warn(&wd->pdev->dev, "Unable to read firmware version\n");
421 goto wd719x_init_end;
422 }
423 dev_info(&wd->pdev->dev, "RISC initialized with firmware version %.2x.%.2x\n",
424 wd719x_readb(wd, WD719X_AMR_SCB_OUT + 1),
425 wd719x_readb(wd, WD719X_AMR_SCB_OUT));
426
427 /* RESET SCSI bus */
428 ret = wd719x_direct_cmd(wd, WD719X_CMD_BUSRESET, 0, 0, 0, 0,
429 WD719X_WAIT_FOR_SCSI_RESET);
430 if (ret) {
431 dev_warn(&wd->pdev->dev, "SCSI bus reset failed\n");
432 goto wd719x_init_end;
433 }
434
435 /* use HostParameter structure to set Spider's Host Parameter Block */
436 ret = wd719x_direct_cmd(wd, WD719X_CMD_SET_PARAM, 0,
437 sizeof(struct wd719x_host_param), 0,
438 wd->params_phys, WD719X_WAIT_FOR_RISC);
439 if (ret) {
440 dev_warn(&wd->pdev->dev, "Failed to set HOST PARAMETERS\n");
441 goto wd719x_init_end;
442 }
443
444 /* initiate SCAM (does nothing if disabled in BIOS) */
445 /* bug?: we should pass a mask of static IDs which we don't have */
446 ret = wd719x_direct_cmd(wd, WD719X_CMD_INIT_SCAM, 0, 0, 0, 0,
447 WD719X_WAIT_FOR_SCSI_RESET);
448 if (ret) {
449 dev_warn(&wd->pdev->dev, "SCAM initialization failed\n");
450 goto wd719x_init_end;
451 }
452
453 /* clear AMR_BIOS_SHARE_INT register */
454 wd719x_writeb(wd, WD719X_AMR_BIOS_SHARE_INT, 0);
455
456wd719x_init_end:
457 release_firmware(fw_wcs);
458 release_firmware(fw_risc);
459
460 return ret;
461}
462
463static int wd719x_abort(struct scsi_cmnd *cmd)
464{
465 int action, result;
466 unsigned long flags;
467 struct wd719x_scb *scb = (struct wd719x_scb *)cmd->host_scribble;
468 struct wd719x *wd = shost_priv(cmd->device->host);
469
470 dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
471
472 action = /*cmd->tag ? WD719X_CMD_ABORT_TAG : */WD719X_CMD_ABORT;
473
474 spin_lock_irqsave(wd->sh->host_lock, flags);
475 result = wd719x_direct_cmd(wd, action, cmd->device->id,
476 cmd->device->lun, cmd->tag, scb->phys, 0);
477 spin_unlock_irqrestore(wd->sh->host_lock, flags);
478 if (result)
479 return FAILED;
480
481 return SUCCESS;
482}
483
484static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device)
485{
486 int result;
487 unsigned long flags;
488 struct wd719x *wd = shost_priv(cmd->device->host);
489
490 dev_info(&wd->pdev->dev, "%s reset requested\n",
491 (opcode == WD719X_CMD_BUSRESET) ? "bus" : "device");
492
493 spin_lock_irqsave(wd->sh->host_lock, flags);
494 result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0,
495 WD719X_WAIT_FOR_SCSI_RESET);
496 spin_unlock_irqrestore(wd->sh->host_lock, flags);
497 if (result)
498 return FAILED;
499
500 return SUCCESS;
501}
502
503static int wd719x_dev_reset(struct scsi_cmnd *cmd)
504{
505 return wd719x_reset(cmd, WD719X_CMD_RESET, cmd->device->id);
506}
507
508static int wd719x_bus_reset(struct scsi_cmnd *cmd)
509{
510 return wd719x_reset(cmd, WD719X_CMD_BUSRESET, 0);
511}
512
513static int wd719x_host_reset(struct scsi_cmnd *cmd)
514{
515 struct wd719x *wd = shost_priv(cmd->device->host);
516 struct wd719x_scb *scb, *tmp;
517 unsigned long flags;
518 int result;
519
520 dev_info(&wd->pdev->dev, "host reset requested\n");
521 spin_lock_irqsave(wd->sh->host_lock, flags);
522 /* Try to reinit the RISC */
523 if (wd719x_chip_init(wd) == 0)
524 result = SUCCESS;
525 else
526 result = FAILED;
527
528 /* flush all SCBs */
529 list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) {
530 struct scsi_cmnd *tmp_cmd = scb->cmd;
531 wd719x_finish_cmd(tmp_cmd, result);
532 }
533 spin_unlock_irqrestore(wd->sh->host_lock, flags);
534
535 return result;
536}
537
538static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
539 sector_t capacity, int geom[])
540{
541 if (capacity >= 0x200000) {
542 geom[0] = 255; /* heads */
543 geom[1] = 63; /* sectors */
544 } else {
545 geom[0] = 64; /* heads */
546 geom[1] = 32; /* sectors */
547 }
548 geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */
549
550 return 0;
551}
552
553/* process a SCB-completion interrupt */
554static inline void wd719x_interrupt_SCB(struct wd719x *wd,
555 union wd719x_regs regs,
556 struct wd719x_scb *scb)
557{
558 struct scsi_cmnd *cmd;
559 int result;
560
561 /* now have to find result from card */
562 switch (regs.bytes.SUE) {
563 case WD719X_SUE_NOERRORS:
564 result = DID_OK;
565 break;
566 case WD719X_SUE_REJECTED:
567 dev_err(&wd->pdev->dev, "command rejected\n");
568 result = DID_ERROR;
569 break;
570 case WD719X_SUE_SCBQFULL:
571 dev_err(&wd->pdev->dev, "SCB queue is full\n");
572 result = DID_ERROR;
573 break;
574 case WD719X_SUE_TERM:
575 dev_dbg(&wd->pdev->dev, "SCB terminated by direct command\n");
576 result = DID_ABORT; /* or DID_RESET? */
577 break;
578 case WD719X_SUE_CHAN1ABORT:
579 case WD719X_SUE_CHAN23ABORT:
580 result = DID_ABORT;
581 dev_err(&wd->pdev->dev, "DMA abort\n");
582 break;
583 case WD719X_SUE_CHAN1PAR:
584 case WD719X_SUE_CHAN23PAR:
585 result = DID_PARITY;
586 dev_err(&wd->pdev->dev, "DMA parity error\n");
587 break;
588 case WD719X_SUE_TIMEOUT:
589 result = DID_TIME_OUT;
590 dev_dbg(&wd->pdev->dev, "selection timeout\n");
591 break;
592 case WD719X_SUE_RESET:
593 dev_dbg(&wd->pdev->dev, "bus reset occured\n");
594 result = DID_RESET;
595 break;
596 case WD719X_SUE_BUSERROR:
597 dev_dbg(&wd->pdev->dev, "SCSI bus error\n");
598 result = DID_ERROR;
599 break;
600 case WD719X_SUE_WRONGWAY:
601 dev_err(&wd->pdev->dev, "wrong data transfer direction\n");
602 result = DID_ERROR;
603 break;
604 case WD719X_SUE_BADPHASE:
605 dev_err(&wd->pdev->dev, "invalid SCSI phase\n");
606 result = DID_ERROR;
607 break;
608 case WD719X_SUE_TOOLONG:
609 dev_err(&wd->pdev->dev, "record too long\n");
610 result = DID_ERROR;
611 break;
612 case WD719X_SUE_BUSFREE:
613 dev_err(&wd->pdev->dev, "unexpected bus free\n");
614 result = DID_NO_CONNECT; /* or DID_ERROR ???*/
615 break;
616 case WD719X_SUE_ARSDONE:
617 dev_dbg(&wd->pdev->dev, "auto request sense\n");
618 if (regs.bytes.SCSI == 0)
619 result = DID_OK;
620 else
621 result = DID_PARITY;
622 break;
623 case WD719X_SUE_IGNORED:
624 dev_err(&wd->pdev->dev, "target id %d ignored command\n",
625 scb->cmd->device->id);
626 result = DID_NO_CONNECT;
627 break;
628 case WD719X_SUE_WRONGTAGS:
629 dev_err(&wd->pdev->dev, "reversed tags\n");
630 result = DID_ERROR;
631 break;
632 case WD719X_SUE_BADTAGS:
633 dev_err(&wd->pdev->dev, "tag type not supported by target\n");
634 result = DID_ERROR;
635 break;
636 case WD719X_SUE_NOSCAMID:
637 dev_err(&wd->pdev->dev, "no SCAM soft ID available\n");
638 result = DID_ERROR;
639 break;
640 default:
641 dev_warn(&wd->pdev->dev, "unknown SUE error code: 0x%x\n",
642 regs.bytes.SUE);
643 result = DID_ERROR;
644 break;
645 }
646 cmd = scb->cmd;
647
648 wd719x_finish_cmd(cmd, result);
649}
650
651static irqreturn_t wd719x_interrupt(int irq, void *dev_id)
652{
653 struct wd719x *wd = dev_id;
654 union wd719x_regs regs;
655 unsigned long flags;
656 u32 SCB_out;
657
658 spin_lock_irqsave(wd->sh->host_lock, flags);
659 /* read SCB pointer back from card */
660 SCB_out = wd719x_readl(wd, WD719X_AMR_SCB_OUT);
661 /* read all status info at once */
662 regs.all = cpu_to_le32(wd719x_readl(wd, WD719X_AMR_OP_CODE));
663
664 switch (regs.bytes.INT) {
665 case WD719X_INT_NONE:
666 spin_unlock_irqrestore(wd->sh->host_lock, flags);
667 return IRQ_NONE;
668 case WD719X_INT_LINKNOSTATUS:
669 dev_err(&wd->pdev->dev, "linked command completed with no status\n");
670 break;
671 case WD719X_INT_BADINT:
672 dev_err(&wd->pdev->dev, "unsolicited interrupt\n");
673 break;
674 case WD719X_INT_NOERRORS:
675 case WD719X_INT_LINKNOERRORS:
676 case WD719X_INT_ERRORSLOGGED:
677 case WD719X_INT_SPIDERFAILED:
678 /* was the cmd completed a direct or SCB command? */
679 if (regs.bytes.OPC == WD719X_CMD_PROCESS_SCB) {
680 struct wd719x_scb *scb;
681 list_for_each_entry(scb, &wd->active_scbs, list)
682 if (SCB_out == scb->phys)
683 break;
684 if (SCB_out == scb->phys)
685 wd719x_interrupt_SCB(wd, regs, scb);
686 else
687 dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n");
688 } else
689 dev_warn(&wd->pdev->dev, "direct command 0x%x completed\n",
690 regs.bytes.OPC);
691 break;
692 case WD719X_INT_PIOREADY:
693 dev_err(&wd->pdev->dev, "card indicates PIO data ready but we never use PIO\n");
694 /* interrupt will not be cleared until all data is read */
695 break;
696 default:
697 dev_err(&wd->pdev->dev, "unknown interrupt reason: %d\n",
698 regs.bytes.INT);
699
700 }
701 /* clear interrupt so another can happen */
702 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
703 spin_unlock_irqrestore(wd->sh->host_lock, flags);
704
705 return IRQ_HANDLED;
706}
707
708static void wd719x_eeprom_reg_read(struct eeprom_93cx6 *eeprom)
709{
710 struct wd719x *wd = eeprom->data;
711 u8 reg = wd719x_readb(wd, WD719X_PCI_GPIO_DATA);
712
713 eeprom->reg_data_out = reg & WD719X_EE_DO;
714}
715
716static void wd719x_eeprom_reg_write(struct eeprom_93cx6 *eeprom)
717{
718 struct wd719x *wd = eeprom->data;
719 u8 reg = 0;
720
721 if (eeprom->reg_data_in)
722 reg |= WD719X_EE_DI;
723 if (eeprom->reg_data_clock)
724 reg |= WD719X_EE_CLK;
725 if (eeprom->reg_chip_select)
726 reg |= WD719X_EE_CS;
727
728 wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, reg);
729}
730
731/* read config from EEPROM so it can be downloaded by the RISC on (re-)init */
732static void wd719x_read_eeprom(struct wd719x *wd)
733{
734 struct eeprom_93cx6 eeprom;
735 u8 gpio;
736 struct wd719x_eeprom_header header;
737
738 eeprom.data = wd;
739 eeprom.register_read = wd719x_eeprom_reg_read;
740 eeprom.register_write = wd719x_eeprom_reg_write;
741 eeprom.width = PCI_EEPROM_WIDTH_93C46;
742
743 /* set all outputs to low */
744 wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, 0);
745 /* configure GPIO pins */
746 gpio = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL);
747 /* GPIO outputs */
748 gpio &= (~(WD719X_EE_CLK | WD719X_EE_DI | WD719X_EE_CS));
749 /* GPIO input */
750 gpio |= WD719X_EE_DO;
751 wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, gpio);
752
753 /* read EEPROM header */
754 eeprom_93cx6_multireadb(&eeprom, 0, (u8 *)&header, sizeof(header));
755
756 if (header.sig1 == 'W' && header.sig2 == 'D')
757 eeprom_93cx6_multireadb(&eeprom, header.cfg_offset,
758 (u8 *)wd->params,
759 sizeof(struct wd719x_host_param));
760 else { /* default EEPROM values */
761 dev_warn(&wd->pdev->dev, "EEPROM signature is invalid (0x%02x 0x%02x), using default values\n",
762 header.sig1, header.sig2);
763 wd->params->ch_1_th = 0x10; /* 16 DWs = 64 B */
764 wd->params->scsi_conf = 0x4c; /* 48ma, spue, parity check */
765 wd->params->own_scsi_id = 0x07; /* ID 7, SCAM disabled */
766 wd->params->sel_timeout = 0x4d; /* 250 ms */
767 wd->params->sleep_timer = 0x01;
768 wd->params->cdb_size = cpu_to_le16(0x5555); /* all 6 B */
769 wd->params->scsi_pad = 0x1b;
770 if (wd->type == WD719X_TYPE_7193) /* narrow card - disable */
771 wd->params->wide = cpu_to_le32(0x00000000);
772 else /* initiate & respond to WIDE messages */
773 wd->params->wide = cpu_to_le32(0xffffffff);
774 wd->params->sync = cpu_to_le32(0xffffffff);
775 wd->params->soft_mask = 0x00; /* all disabled */
776 wd->params->unsol_mask = 0x00; /* all disabled */
777 }
778 /* disable TAGGED messages */
779 wd->params->tag_en = cpu_to_le16(0x0000);
780}
781
782/* Read card type from GPIO bits 1 and 3 */
783static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
784{
785 u8 card = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL);
786
787 card |= WD719X_GPIO_ID_BITS;
788 wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, card);
789 card = wd719x_readb(wd, WD719X_PCI_GPIO_DATA) & WD719X_GPIO_ID_BITS;
790 switch (card) {
791 case 0x08:
792 return WD719X_TYPE_7193;
793 case 0x02:
794 return WD719X_TYPE_7197;
795 case 0x00:
796 return WD719X_TYPE_7296;
797 default:
798 dev_warn(&wd->pdev->dev, "unknown card type 0x%x\n", card);
799 return WD719X_TYPE_UNKNOWN;
800 }
801}
802
803static int wd719x_board_found(struct Scsi_Host *sh)
804{
805 struct wd719x *wd = shost_priv(sh);
806 char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
807 int ret;
808
809 INIT_LIST_HEAD(&wd->active_scbs);
810 INIT_LIST_HEAD(&wd->free_scbs);
811
812 sh->base = pci_resource_start(wd->pdev, 0);
813
814 wd->type = wd719x_detect_type(wd);
815
816 wd->sh = sh;
817 sh->irq = wd->pdev->irq;
818 wd->fw_virt = NULL;
819
820 /* memory area for host (EEPROM) parameters */
821 wd->params = pci_alloc_consistent(wd->pdev,
822 sizeof(struct wd719x_host_param),
823 &wd->params_phys);
824 if (!wd->params) {
825 dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n");
826 return -ENOMEM;
827 }
828
829 /* memory area for the RISC for hash table of outstanding requests */
830 wd->hash_virt = pci_alloc_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE,
831 &wd->hash_phys);
832 if (!wd->hash_virt) {
833 dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n");
834 ret = -ENOMEM;
835 goto fail_free_params;
836 }
837
838 ret = request_irq(wd->pdev->irq, wd719x_interrupt, IRQF_SHARED,
839 "wd719x", wd);
840 if (ret) {
841 dev_warn(&wd->pdev->dev, "unable to assign IRQ %d\n",
842 wd->pdev->irq);
843 goto fail_free_hash;
844 }
845
846 /* read parameters from EEPROM */
847 wd719x_read_eeprom(wd);
848
849 ret = wd719x_chip_init(wd);
850 if (ret)
851 goto fail_free_irq;
852
853 sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK;
854
855 dev_info(&wd->pdev->dev, "%s at I/O 0x%lx, IRQ %u, SCSI ID %d\n",
856 card_types[wd->type], sh->base, sh->irq, sh->this_id);
857
858 return 0;
859
860fail_free_irq:
861 free_irq(wd->pdev->irq, wd);
862fail_free_hash:
863 pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
864 wd->hash_phys);
865fail_free_params:
866 pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
867 wd->params, wd->params_phys);
868
869 return ret;
870}
871
872static struct scsi_host_template wd719x_template = {
873 .name = "Western Digital 719x",
874 .queuecommand = wd719x_queuecommand,
875 .eh_abort_handler = wd719x_abort,
876 .eh_device_reset_handler = wd719x_dev_reset,
877 .eh_bus_reset_handler = wd719x_bus_reset,
878 .eh_host_reset_handler = wd719x_host_reset,
879 .bios_param = wd719x_biosparam,
880 .proc_name = "wd719x",
881 .can_queue = 255,
882 .this_id = 7,
883 .sg_tablesize = WD719X_SG,
884 .cmd_per_lun = WD719X_CMD_PER_LUN,
885 .use_clustering = ENABLE_CLUSTERING,
886};
887
888static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
889{
890 int err;
891 struct Scsi_Host *sh;
892 struct wd719x *wd;
893
894 err = pci_enable_device(pdev);
895 if (err)
896 goto fail;
897
898 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
899 dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
900 goto disable_device;
901 }
902
903 err = pci_request_regions(pdev, "wd719x");
904 if (err)
905 goto disable_device;
906 pci_set_master(pdev);
907
908 err = -ENODEV;
909 if (pci_resource_len(pdev, 0) == 0)
910 goto release_region;
911
912 err = -ENOMEM;
913 sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x));
914 if (!sh)
915 goto release_region;
916
917 wd = shost_priv(sh);
918 wd->base = pci_iomap(pdev, 0, 0);
919 if (!wd->base)
920 goto free_host;
921 wd->pdev = pdev;
922
923 err = wd719x_board_found(sh);
924 if (err)
925 goto unmap;
926
927 err = scsi_add_host(sh, &wd->pdev->dev);
928 if (err)
929 goto destroy;
930
931 scsi_scan_host(sh);
932
933 pci_set_drvdata(pdev, sh);
934 return 0;
935
936destroy:
937 wd719x_destroy(wd);
938unmap:
939 pci_iounmap(pdev, wd->base);
940free_host:
941 scsi_host_put(sh);
942release_region:
943 pci_release_regions(pdev);
944disable_device:
945 pci_disable_device(pdev);
946fail:
947 return err;
948}
949
950
951static void wd719x_pci_remove(struct pci_dev *pdev)
952{
953 struct Scsi_Host *sh = pci_get_drvdata(pdev);
954 struct wd719x *wd = shost_priv(sh);
955
956 scsi_remove_host(sh);
957 wd719x_destroy(wd);
958 pci_iounmap(pdev, wd->base);
959 pci_release_regions(pdev);
960 pci_disable_device(pdev);
961
962 scsi_host_put(sh);
963}
964
965static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
966 { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
967 {}
968};
969
970MODULE_DEVICE_TABLE(pci, wd719x_pci_table);
971
972static struct pci_driver wd719x_pci_driver = {
973 .name = "wd719x",
974 .id_table = wd719x_pci_table,
975 .probe = wd719x_pci_probe,
976 .remove = wd719x_pci_remove,
977};
978
979static int __init wd719x_init(void)
980{
981 return pci_register_driver(&wd719x_pci_driver);
982}
983
984static void __exit wd719x_exit(void)
985{
986 pci_unregister_driver(&wd719x_pci_driver);
987}
988
989module_init(wd719x_init);
990module_exit(wd719x_exit);
991
992MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver");
993MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner");
994MODULE_LICENSE("GPL");
995MODULE_FIRMWARE("wd719x-wcs.bin");
996MODULE_FIRMWARE("wd719x-risc.bin");
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
new file mode 100644
index 000000000000..185e30e4eb93
--- /dev/null
+++ b/drivers/scsi/wd719x.h
@@ -0,0 +1,249 @@
1#ifndef _WD719X_H_
2#define _WD719X_H_
3
4#define WD719X_SG 255 /* Scatter/gather size */
5#define WD719X_CMD_PER_LUN 1 /* We should be able to do linked commands, but
6 * this is 1 for now to be safe. */
7
8struct wd719x_sglist {
9 __le32 ptr;
10 __le32 length;
11} __packed;
12
13enum wd719x_card_type {
14 WD719X_TYPE_UNKNOWN = 0,
15 WD719X_TYPE_7193,
16 WD719X_TYPE_7197,
17 WD719X_TYPE_7296,
18};
19
20union wd719x_regs {
21 __le32 all; /* All Status at once */
22 struct {
23 u8 OPC; /* Opcode register */
24 u8 SCSI; /* SCSI Errors */
25 u8 SUE; /* Spider unique Errors */
26 u8 INT; /* Interrupt Status */
27 } bytes;
28};
29
30/* Spider Command Block (SCB) */
31struct wd719x_scb {
32 __le32 Int_SCB; /* 00-03 Internal SCB link pointer (must be cleared) */
33 u8 SCB_opcode; /* 04 SCB Command opcode */
34 u8 CDB_tag; /* 05 SCSI Tag byte for CDB queues (0 if untagged) */
35 u8 lun; /* 06 SCSI LUN */
36 u8 devid; /* 07 SCSI Device ID */
37 u8 CDB[16]; /* 08-23 SCSI CDB (16 bytes as defined by ANSI spec. */
38 __le32 data_p; /* 24-27 Data transfer address (or SG list address) */
39 __le32 data_length; /* 28-31 Data transfer Length (or SG list length) */
40 __le32 CDB_link; /* 32-35 SCSI CDB Link Ptr */
41 __le32 sense_buf; /* 36-39 Auto request sense buffer address */
42 u8 sense_buf_length;/* 40 Auto request sense transfer length */
43 u8 reserved; /* 41 reserved */
44 u8 SCB_options; /* 42 SCB-options */
45 u8 SCB_tag_msg; /* 43 Tagged messages options */
46 /* Not filled in by host */
47 __le32 req_ptr; /* 44-47 Ptr to Host Request returned on interrupt */
48 u8 host_opcode; /* 48 Host Command Opcode (same as AMR_00) */
49 u8 scsi_stat; /* 49 SCSI Status returned */
50 u8 ret_error; /* 50 SPIDER Unique Error Code returned (SUE) */
51 u8 int_stat; /* 51 Message u8 / Interrupt Status byte returned */
52 __le32 transferred; /* 52-55 Bytes Transferred */
53 u8 last_trans[3]; /* 56-58 Bytes Transferred in last session */
54 u8 length; /* 59 SCSI Messages Length (1-8) */
55 u8 sync_offset; /* 60 Synchronous offset */
56 u8 sync_rate; /* 61 Synchronous rate */
57 u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */
58 /* everything below is for driver use (not used by card) */
59 dma_addr_t phys; /* bus address of the SCB */
60 struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */
61 struct list_head list;
62 struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */
63} __packed;
64
65struct wd719x {
66 struct Scsi_Host *sh; /* pointer to host structure */
67 struct pci_dev *pdev;
68 void __iomem *base;
69 enum wd719x_card_type type; /* type of card */
70 void *fw_virt; /* firmware buffer CPU address */
71 dma_addr_t fw_phys; /* firmware buffer bus address */
72 size_t fw_size; /* firmware buffer size */
73 struct wd719x_host_param *params; /* host parameters (EEPROM) */
74 dma_addr_t params_phys; /* host parameters bus address */
75 void *hash_virt; /* hash table CPU address */
76 dma_addr_t hash_phys; /* hash table bus address */
77 struct list_head active_scbs;
78 struct list_head free_scbs;
79};
80
81/* timeout delays in microsecs */
82#define WD719X_WAIT_FOR_CMD_READY 500
83#define WD719X_WAIT_FOR_RISC 2000
84#define WD719X_WAIT_FOR_SCSI_RESET 3000000
85
86/* All commands except 0x00 generate an interrupt */
87#define WD719X_CMD_READY 0x00 /* Command register ready (or noop) */
88#define WD719X_CMD_INIT_RISC 0x01 /* Initialize RISC */
89/* 0x02 is reserved */
90#define WD719X_CMD_BUSRESET 0x03 /* Assert SCSI bus reset */
91#define WD719X_CMD_READ_FIRMVER 0x04 /* Read the Firmware Revision */
92#define WD719X_CMD_ECHO_BYTES 0x05 /* Echo command bytes (DW) */
93/* 0x06 is reserved */
94/* 0x07 is reserved */
95#define WD719X_CMD_GET_PARAM 0x08 /* Get programmable parameters */
96#define WD719X_CMD_SET_PARAM 0x09 /* Set programmable parameters */
97#define WD719X_CMD_SLEEP 0x0a /* Put SPIDER to sleep */
98#define WD719X_CMD_READ_INIT 0x0b /* Read initialization parameters */
99#define WD719X_CMD_RESTORE_INIT 0x0c /* Restore initialization parameters */
100/* 0x0d is reserved */
101/* 0x0e is reserved */
102/* 0x0f is reserved */
103#define WD719X_CMD_ABORT_TAG 0x10 /* Send Abort tag message to target */
104#define WD719X_CMD_ABORT 0x11 /* Send Abort message to target */
105#define WD719X_CMD_RESET 0x12 /* Send Reset message to target */
106#define WD719X_CMD_INIT_SCAM 0x13 /* Initiate SCAM */
107#define WD719X_CMD_GET_SYNC 0x14 /* Get synchronous rates */
108#define WD719X_CMD_SET_SYNC 0x15 /* Set synchronous rates */
109#define WD719X_CMD_GET_WIDTH 0x16 /* Get SCSI bus width */
110#define WD719X_CMD_SET_WIDTH 0x17 /* Set SCSI bus width */
111#define WD719X_CMD_GET_TAGS 0x18 /* Get tag flags */
112#define WD719X_CMD_SET_TAGS 0x19 /* Set tag flags */
113#define WD719X_CMD_GET_PARAM2 0x1a /* Get programmable params (format 2) */
114#define WD719X_CMD_SET_PARAM2 0x1b /* Set programmable params (format 2) */
115/* Commands with request pointers (mailbox) */
116#define WD719X_CMD_PROCESS_SCB 0x80 /* Process SCSI Control Block (SCB) */
117/* No interrupt generated on acceptance of SCB pointer */
118
119/* interrupt status defines */
120#define WD719X_INT_NONE 0x00 /* No interrupt pending */
121#define WD719X_INT_NOERRORS 0x01 /* Command completed with no errors */
122#define WD719X_INT_LINKNOERRORS 0x02 /* link cmd completed with no errors */
123#define WD719X_INT_LINKNOSTATUS 0x03 /* link cmd completed with no flag set */
124#define WD719X_INT_ERRORSLOGGED 0x04 /* cmd completed with errors logged */
125#define WD719X_INT_SPIDERFAILED 0x05 /* cmd failed without valid SCSI status */
126#define WD719X_INT_BADINT 0x80 /* unsolicited interrupt */
127#define WD719X_INT_PIOREADY 0xf0 /* data ready for PIO output */
128
129/* Spider Unique Error Codes (SUE) */
130#define WD719X_SUE_NOERRORS 0x00 /* No errors detected by SPIDER */
131#define WD719X_SUE_REJECTED 0x01 /* Command Rejected (bad opcode/param) */
132#define WD719X_SUE_SCBQFULL 0x02 /* SCB queue full */
133/* 0x03 is reserved */
134#define WD719X_SUE_TERM 0x04 /* Host terminated SCB via primative cmd */
135#define WD719X_SUE_CHAN1PAR 0x05 /* PCI Channel 1 parity error occurred */
136#define WD719X_SUE_CHAN1ABORT 0x06 /* PCI Channel 1 system abort occurred */
137#define WD719X_SUE_CHAN23PAR 0x07 /* PCI Channel 2/3 parity error occurred */
138#define WD719X_SUE_CHAN23ABORT 0x08 /* PCI Channel 2/3 system abort occurred */
139#define WD719X_SUE_TIMEOUT 0x10 /* Selection/reselection timeout */
140#define WD719X_SUE_RESET 0x11 /* SCSI bus reset occurred */
141#define WD719X_SUE_BUSERROR 0x12 /* SCSI bus error */
142#define WD719X_SUE_WRONGWAY 0x13 /* Wrong data transfer dir set by target */
143#define WD719X_SUE_BADPHASE 0x14 /* SCSI phase illegal or unexpected */
144#define WD719X_SUE_TOOLONG 0x15 /* target requested too much data */
145#define WD719X_SUE_BUSFREE 0x16 /* Unexpected SCSI bus free */
146#define WD719X_SUE_ARSDONE 0x17 /* Auto request sense executed */
147#define WD719X_SUE_IGNORED 0x18 /* SCSI message was ignored by target */
148#define WD719X_SUE_WRONGTAGS 0x19 /* Tagged SCB & tags off (or vice versa) */
149#define WD719X_SUE_BADTAGS 0x1a /* Wrong tag message type for target */
150#define WD719X_SUE_NOSCAMID 0x1b /* No SCAM soft ID available */
151
152/* code sizes */
153#define WD719X_HASH_TABLE_SIZE 4096
154
155/* Advanced Mode Registers */
156/* Regs 0x00..0x1f are for Advanced Mode of the card (RISC is running). */
157#define WD719X_AMR_COMMAND 0x00
158#define WD719X_AMR_CMD_PARAM 0x01
159#define WD719X_AMR_CMD_PARAM_2 0x02
160#define WD719X_AMR_CMD_PARAM_3 0x03
161#define WD719X_AMR_SCB_IN 0x04
162
163#define WD719X_AMR_BIOS_SHARE_INT 0x0f
164
165#define WD719X_AMR_SCB_OUT 0x18
166#define WD719X_AMR_OP_CODE 0x1c
167#define WD719X_AMR_SCSI_STATUS 0x1d
168#define WD719X_AMR_SCB_ERROR 0x1e
169#define WD719X_AMR_INT_STATUS 0x1f
170
171#define WD719X_DISABLE_INT 0x80
172
173/* SCB flags */
174#define WD719X_SCB_FLAGS_CHECK_DIRECTION 0x01
175#define WD719X_SCB_FLAGS_PCI_TO_SCSI 0x02
176#define WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE 0x10
177#define WD719X_SCB_FLAGS_DO_SCATTER_GATHER 0x20
178#define WD719X_SCB_FLAGS_NO_DISCONNECT 0x40
179
180/* PCI Registers used for reset, initial code download */
181/* Regs 0x20..0x3f are for Normal (DOS) mode (RISC is asleep). */
182#define WD719X_PCI_GPIO_CONTROL 0x3C
183#define WD719X_PCI_GPIO_DATA 0x3D
184#define WD719X_PCI_PORT_RESET 0x3E
185#define WD719X_PCI_MODE_SELECT 0x3F
186
187#define WD719X_PCI_EXTERNAL_ADDR 0x60
188#define WD719X_PCI_INTERNAL_ADDR 0x64
189#define WD719X_PCI_DMA_TRANSFER_SIZE 0x66
190#define WD719X_PCI_CHANNEL2_3CMD 0x68
191#define WD719X_PCI_CHANNEL2_3STATUS 0x69
192
193#define WD719X_GPIO_ID_BITS 0x0a
194#define WD719X_PRAM_BASE_ADDR 0x00
195
196/* codes written to or read from the card */
197#define WD719X_PCI_RESET 0x01
198#define WD719X_ENABLE_ADVANCE_MODE 0x01
199
200#define WD719X_START_CHANNEL2_3DMA 0x17
201#define WD719X_START_CHANNEL2_3DONE 0x01
202#define WD719X_START_CHANNEL2_3ABORT 0x20
203
204/* 33C296 GPIO bits for EEPROM pins */
205#define WD719X_EE_DI (1 << 1)
206#define WD719X_EE_CS (1 << 2)
207#define WD719X_EE_CLK (1 << 3)
208#define WD719X_EE_DO (1 << 4)
209
210/* EEPROM contents */
211struct wd719x_eeprom_header {
212 u8 sig1;
213 u8 sig2;
214 u8 version;
215 u8 checksum;
216 u8 cfg_offset;
217 u8 cfg_size;
218 u8 setup_offset;
219 u8 setup_size;
220} __packed;
221
222#define WD719X_EE_SIG1 0
223#define WD719X_EE_SIG2 1
224#define WD719X_EE_VERSION 2
225#define WD719X_EE_CHECKSUM 3
226#define WD719X_EE_CFG_OFFSET 4
227#define WD719X_EE_CFG_SIZE 5
228#define WD719X_EE_SETUP_OFFSET 6
229#define WD719X_EE_SETUP_SIZE 7
230
231#define WD719X_EE_SCSI_ID_MASK 0xf
232
233/* SPIDER Host Parameters Block (=EEPROM configuration block) */
234struct wd719x_host_param {
235 u8 ch_1_th; /* FIFO threshold */
236 u8 scsi_conf; /* SCSI configuration */
237 u8 own_scsi_id; /* controller SCSI ID */
238 u8 sel_timeout; /* selection timeout*/
239 u8 sleep_timer; /* seep timer */
240 __le16 cdb_size;/* CDB size groups */
241 __le16 tag_en; /* Tag msg enables (ID 0-15) */
242 u8 scsi_pad; /* SCSI pad control */
243 __le32 wide; /* WIDE msg options (ID 0-15) */
244 __le32 sync; /* SYNC msg options (ID 0-15) */
245 u8 soft_mask; /* soft error mask */
246 u8 unsol_mask; /* unsolicited error mask */
247} __packed;
248
249#endif /* _WD719X_H_ */
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0ed96644ec94..4d1b7224a7f2 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -110,31 +110,6 @@ static struct device_driver tcm_loop_driverfs = {
110 */ 110 */
111struct device *tcm_loop_primary; 111struct device *tcm_loop_primary;
112 112
113/*
114 * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
115 * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
116 */
117static int tcm_loop_change_queue_depth(
118 struct scsi_device *sdev,
119 int depth,
120 int reason)
121{
122 switch (reason) {
123 case SCSI_QDEPTH_DEFAULT:
124 scsi_adjust_queue_depth(sdev, depth);
125 break;
126 case SCSI_QDEPTH_QFULL:
127 scsi_track_queue_full(sdev, depth);
128 break;
129 case SCSI_QDEPTH_RAMP_UP:
130 scsi_adjust_queue_depth(sdev, depth);
131 break;
132 default:
133 return -EOPNOTSUPP;
134 }
135 return sdev->queue_depth;
136}
137
138static void tcm_loop_submission_work(struct work_struct *work) 113static void tcm_loop_submission_work(struct work_struct *work)
139{ 114{
140 struct tcm_loop_cmd *tl_cmd = 115 struct tcm_loop_cmd *tl_cmd =
@@ -409,7 +384,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
409 .proc_name = "tcm_loopback", 384 .proc_name = "tcm_loopback",
410 .name = "TCM_Loopback", 385 .name = "TCM_Loopback",
411 .queuecommand = tcm_loop_queuecommand, 386 .queuecommand = tcm_loop_queuecommand,
412 .change_queue_depth = tcm_loop_change_queue_depth, 387 .change_queue_depth = scsi_change_queue_depth,
413 .change_queue_type = scsi_change_queue_type, 388 .change_queue_type = scsi_change_queue_type,
414 .eh_abort_handler = tcm_loop_abort_task, 389 .eh_abort_handler = tcm_loop_abort_task,
415 .eh_device_reset_handler = tcm_loop_device_reset, 390 .eh_device_reset_handler = tcm_loop_device_reset,
@@ -423,6 +398,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
423 .slave_alloc = tcm_loop_slave_alloc, 398 .slave_alloc = tcm_loop_slave_alloc,
424 .module = THIS_MODULE, 399 .module = THIS_MODULE,
425 .use_blk_tags = 1, 400 .use_blk_tags = 1,
401 .track_queue_depth = 1,
426}; 402};
427 403
428static int tcm_loop_driver_probe(struct device *dev) 404static int tcm_loop_driver_probe(struct device *dev)
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 33f211b56a42..4047edfb64e1 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -799,7 +799,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
799 if (devinfo->flags & US_FL_NO_REPORT_OPCODES) 799 if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
800 sdev->no_report_opcodes = 1; 800 sdev->no_report_opcodes = 1;
801 801
802 scsi_adjust_queue_depth(sdev, devinfo->qdepth - 2); 802 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
803 return 0; 803 return 0;
804} 804}
805 805
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index e50f98b0297a..eb0b1988050a 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
75 const u8 word, u16 *data); 75 const u8 word, u16 *data);
76extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, 76extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
77 const u8 word, __le16 *data, const u16 words); 77 const u8 word, __le16 *data, const u16 words);
78extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom,
79 const u8 byte, u8 *data);
80extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom,
81 const u8 byte, u8 *data, const u16 bytes);
78 82
79extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); 83extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
80 84
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bd5fefeaf548..bfbc817c34ee 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1191,9 +1191,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
1191extern int ata_scsi_slave_config(struct scsi_device *sdev); 1191extern int ata_scsi_slave_config(struct scsi_device *sdev);
1192extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 1192extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
1193extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 1193extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
1194 int queue_depth, int reason); 1194 int queue_depth);
1195extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, 1195extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1196 int queue_depth, int reason); 1196 int queue_depth);
1197extern struct ata_device *ata_dev_pair(struct ata_device *adev); 1197extern struct ata_device *ata_dev_pair(struct ata_device *adev);
1198extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); 1198extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
1199extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); 1199extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 2e0cf568a9c1..93d14daf0994 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -1105,7 +1105,6 @@ int fc_eh_abort(struct scsi_cmnd *);
1105int fc_eh_device_reset(struct scsi_cmnd *); 1105int fc_eh_device_reset(struct scsi_cmnd *);
1106int fc_eh_host_reset(struct scsi_cmnd *); 1106int fc_eh_host_reset(struct scsi_cmnd *);
1107int fc_slave_alloc(struct scsi_device *); 1107int fc_slave_alloc(struct scsi_device *);
1108int fc_change_queue_depth(struct scsi_device *, int qdepth, int reason);
1109 1108
1110/* 1109/*
1111 * ELS/CT interface 1110 * ELS/CT interface
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 728c9ad9feb0..4d1c46aac331 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -378,8 +378,6 @@ struct iscsi_host {
378/* 378/*
379 * scsi host template 379 * scsi host template
380 */ 380 */
381extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth,
382 int reason);
383extern int iscsi_eh_abort(struct scsi_cmnd *sc); 381extern int iscsi_eh_abort(struct scsi_cmnd *sc);
384extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); 382extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
385extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); 383extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ef7872c20da9..832dcc9f86ec 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -365,12 +365,6 @@ struct asd_sas_phy {
365struct scsi_core { 365struct scsi_core {
366 struct Scsi_Host *shost; 366 struct Scsi_Host *shost;
367 367
368 struct mutex task_queue_flush;
369 spinlock_t task_queue_lock;
370 struct list_head task_queue;
371 int task_queue_size;
372
373 struct task_struct *queue_thread;
374}; 368};
375 369
376struct sas_ha_event { 370struct sas_ha_event {
@@ -422,9 +416,6 @@ struct sas_ha_struct {
422 struct asd_sas_port **sas_port; /* array of valid pointers, must be set */ 416 struct asd_sas_port **sas_port; /* array of valid pointers, must be set */
423 int num_phys; /* must be set, gt 0, static */ 417 int num_phys; /* must be set, gt 0, static */
424 418
425 /* The class calls this to send a task for execution. */
426 int lldd_max_execute_num;
427 int lldd_queue_size;
428 int strict_wide_ports; /* both sas_addr and attached_sas_addr must match 419 int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
429 * their siblings when forming wide ports */ 420 * their siblings when forming wide ports */
430 421
@@ -612,7 +603,6 @@ struct sas_ssp_task {
612 603
613struct sas_task { 604struct sas_task {
614 struct domain_device *dev; 605 struct domain_device *dev;
615 struct list_head list;
616 606
617 spinlock_t task_state_lock; 607 spinlock_t task_state_lock;
618 unsigned task_state_flags; 608 unsigned task_state_flags;
@@ -665,8 +655,7 @@ struct sas_domain_function_template {
665 int (*lldd_dev_found)(struct domain_device *); 655 int (*lldd_dev_found)(struct domain_device *);
666 void (*lldd_dev_gone)(struct domain_device *); 656 void (*lldd_dev_gone)(struct domain_device *);
667 657
668 int (*lldd_execute_task)(struct sas_task *, int num, 658 int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags);
669 gfp_t gfp_flags);
670 659
671 /* Task Management Functions. Must be called from process context. */ 660 /* Task Management Functions. Must be called from process context. */
672 int (*lldd_abort_task)(struct sas_task *); 661 int (*lldd_abort_task)(struct sas_task *);
@@ -700,12 +689,10 @@ extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
700int sas_set_phy_speed(struct sas_phy *phy, 689int sas_set_phy_speed(struct sas_phy *phy,
701 struct sas_phy_linkrates *rates); 690 struct sas_phy_linkrates *rates);
702int sas_phy_reset(struct sas_phy *phy, int hard_reset); 691int sas_phy_reset(struct sas_phy *phy, int hard_reset);
703int sas_queue_up(struct sas_task *task);
704extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *); 692extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
705extern int sas_target_alloc(struct scsi_target *); 693extern int sas_target_alloc(struct scsi_target *);
706extern int sas_slave_configure(struct scsi_device *); 694extern int sas_slave_configure(struct scsi_device *);
707extern int sas_change_queue_depth(struct scsi_device *, int new_depth, 695extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
708 int reason);
709extern int sas_change_queue_type(struct scsi_device *, int qt); 696extern int sas_change_queue_type(struct scsi_device *, int qt);
710extern int sas_bios_param(struct scsi_device *, 697extern int sas_bios_param(struct scsi_device *,
711 struct block_device *, 698 struct block_device *,
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 0aeaa003c3c1..6364e23454dd 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -380,7 +380,7 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
380#define __shost_for_each_device(sdev, shost) \ 380#define __shost_for_each_device(sdev, shost) \
381 list_for_each_entry((sdev), &((shost)->__devices), siblings) 381 list_for_each_entry((sdev), &((shost)->__devices), siblings)
382 382
383extern void scsi_adjust_queue_depth(struct scsi_device *, int); 383extern int scsi_change_queue_depth(struct scsi_device *, int);
384extern int scsi_track_queue_full(struct scsi_device *, int); 384extern int scsi_track_queue_full(struct scsi_device *, int);
385 385
386extern int scsi_set_medium_removal(struct scsi_device *, char); 386extern int scsi_set_medium_removal(struct scsi_device *, char);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 61a81bf77e28..c8a462ef9a4e 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -46,12 +46,6 @@ struct blk_queue_tags;
46#define DISABLE_CLUSTERING 0 46#define DISABLE_CLUSTERING 0
47#define ENABLE_CLUSTERING 1 47#define ENABLE_CLUSTERING 1
48 48
49enum {
50 SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
51 SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
52 SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshold event */
53};
54
55struct scsi_host_template { 49struct scsi_host_template {
56 struct module *module; 50 struct module *module;
57 const char *name; 51 const char *name;
@@ -195,7 +189,7 @@ struct scsi_host_template {
195 * Things currently recommended to be handled at this time include: 189 * Things currently recommended to be handled at this time include:
196 * 190 *
197 * 1. Setting the device queue depth. Proper setting of this is 191 * 1. Setting the device queue depth. Proper setting of this is
198 * described in the comments for scsi_adjust_queue_depth. 192 * described in the comments for scsi_change_queue_depth.
199 * 2. Determining if the device supports the various synchronous 193 * 2. Determining if the device supports the various synchronous
200 * negotiation protocols. The device struct will already have 194 * negotiation protocols. The device struct will already have
201 * responded to INQUIRY and the results of the standard items 195 * responded to INQUIRY and the results of the standard items
@@ -281,7 +275,7 @@ struct scsi_host_template {
281 * 275 *
282 * Status: OPTIONAL 276 * Status: OPTIONAL
283 */ 277 */
284 int (* change_queue_depth)(struct scsi_device *, int, int); 278 int (* change_queue_depth)(struct scsi_device *, int);
285 279
286 /* 280 /*
287 * Fill in this function to allow the changing of tag types 281 * Fill in this function to allow the changing of tag types
@@ -427,6 +421,11 @@ struct scsi_host_template {
427 unsigned use_blk_tags:1; 421 unsigned use_blk_tags:1;
428 422
429 /* 423 /*
424 * Track QUEUE_FULL events and reduce queue depth on demand.
425 */
426 unsigned track_queue_depth:1;
427
428 /*
430 * This specifies the mode that a LLD supports. 429 * This specifies the mode that a LLD supports.
431 */ 430 */
432 unsigned supported_mode:2; 431 unsigned supported_mode:2;