aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 20:30:44 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 20:30:44 -0500
commitc8b6de16d9434405e5832b8772e4f986ddd5118e (patch)
tree03d5d92be22e83778e3cf1367f8b6847eb953eb6 /drivers
parenta6a852e93705121e2b90bd41ad50e85a508699aa (diff)
parent8e31e607ea050c0df1483d8b6cdd5b1395c03cbe (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (48 commits) [SCSI] aacraid: do not set valid bit in sense information [SCSI] ses: add new Enclosure ULD [SCSI] enclosure: add support for enclosure services [SCSI] sr: fix test unit ready responses [SCSI] u14-34f: fix data direction bug [SCSI] aacraid: pci_set_dma_max_seg_size opened up for late model controllers [SCSI] fix BUG when sum(scatterlist) > bufflen [SCSI] arcmsr: updates (1.20.00.15) [SCSI] advansys: make 3 functions static [SCSI] Small cleanups for scsi_host.h [SCSI] dc395x: fix uninitialized var warning [SCSI] NCR53C9x: remove driver [SCSI] remove m68k NCR53C9x based drivers [SCSI] dec_esp: Remove driver [SCSI] kernel-doc: fix scsi docbook [SCSI] update my email address [SCSI] add protocol definitions [SCSI] sd: handle bad lba in sense information [SCSI] qla2xxx: Update version number to 8.02.00-k8. [SCSI] qla2xxx: Correct issue where incorrect init-fw mailbox command was used on non-NPIV capable ISPs. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/enclosure.c484
-rw-r--r--drivers/scsi/Kconfig93
-rw-r--r--drivers/scsi/Makefile12
-rw-r--r--drivers/scsi/NCR53C9x.c3654
-rw-r--r--drivers/scsi/NCR53C9x.h668
-rw-r--r--drivers/scsi/aacraid/aachba.c81
-rw-r--r--drivers/scsi/aacraid/commctrl.c26
-rw-r--r--drivers/scsi/aacraid/linit.c28
-rw-r--r--drivers/scsi/advansys.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c87
-rw-r--r--drivers/scsi/arm/acornscsi.c14
-rw-r--r--drivers/scsi/arm/scsi.h87
-rw-r--r--drivers/scsi/blz1230.c353
-rw-r--r--drivers/scsi/blz2060.c306
-rw-r--r--drivers/scsi/cyberstorm.c377
-rw-r--r--drivers/scsi/cyberstormII.c314
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dec_esp.c687
-rw-r--r--drivers/scsi/fastlane.c421
-rw-r--r--drivers/scsi/iscsi_tcp.c57
-rw-r--r--drivers/scsi/libiscsi.c137
-rw-r--r--drivers/scsi/mac_esp.c751
-rw-r--r--drivers/scsi/mca_53c9x.c520
-rw-r--r--drivers/scsi/oktagon_esp.c606
-rw-r--r--drivers/scsi/oktagon_io.S194
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c404
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c75
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c238
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/ses.c689
-rw-r--r--drivers/scsi/sr.c49
-rw-r--r--drivers/scsi/sr.h1
-rw-r--r--drivers/scsi/sr_ioctl.c3
-rw-r--r--drivers/scsi/sun3x_esp.c546
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/u14-34f.c2
52 files changed, 2231 insertions, 9988 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 78cd33861766..7b5220ca7d7f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -285,4 +285,13 @@ config INTEL_MENLOW
285 285
286 If unsure, say N. 286 If unsure, say N.
287 287
288config ENCLOSURE_SERVICES
289 tristate "Enclosure Services"
290 default n
291 help
292 Provides support for intelligent enclosures (bays which
293 contain storage devices). You also need either a host
294 driver (SCSI/ATA) which supports enclosures
295 or a SCSI enclosure device (SES) to use these services.
296
288endif # MISC_DEVICES 297endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 1f41654aae4d..7f13549cc87e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
20obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o 20obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
21obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o 21obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
22obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o 22obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
23obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
new file mode 100644
index 000000000000..6fcb0e96adf4
--- /dev/null
+++ b/drivers/misc/enclosure.c
@@ -0,0 +1,484 @@
1/*
2 * Enclosure Services
3 *
4 * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
5 *
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or
9** modify it under the terms of the GNU General Public License
10** version 2 as published by the Free Software Foundation.
11**
12** This program is distributed in the hope that it will be useful,
13** but WITHOUT ANY WARRANTY; without even the implied warranty of
14** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15** GNU General Public License for more details.
16**
17** You should have received a copy of the GNU General Public License
18** along with this program; if not, write to the Free Software
19** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20**
21**-----------------------------------------------------------------------------
22*/
23#include <linux/device.h>
24#include <linux/enclosure.h>
25#include <linux/err.h>
26#include <linux/list.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30
31static LIST_HEAD(container_list);
32static DEFINE_MUTEX(container_list_lock);
33static struct class enclosure_class;
34static struct class enclosure_component_class;
35
36/**
37 * enclosure_find - find an enclosure given a device
38 * @dev: the device to find for
39 *
40 * Looks through the list of registered enclosures to see
41 * if it can find a match for a device. Returns NULL if no
42 * enclosure is found. Obtains a reference to the enclosure class
43 * device which must be released with class_device_put().
44 */
45struct enclosure_device *enclosure_find(struct device *dev)
46{
47 struct enclosure_device *edev = NULL;
48
49 mutex_lock(&container_list_lock);
50 list_for_each_entry(edev, &container_list, node) {
51 if (edev->cdev.dev == dev) {
52 class_device_get(&edev->cdev);
53 mutex_unlock(&container_list_lock);
54 return edev;
55 }
56 }
57 mutex_unlock(&container_list_lock);
58
59 return NULL;
60}
61EXPORT_SYMBOL_GPL(enclosure_find);
62
63/**
64 * enclosure_for_each_device - calls a function for each enclosure
65 * @fn: the function to call
66 * @data: the data to pass to each call
67 *
68 * Loops over all the enclosures calling the function.
69 *
70 * Note, this function uses a mutex which will be held across calls to
71 * @fn, so it must have non atomic context, and @fn may (although it
72 * should not) sleep or otherwise cause the mutex to be held for
73 * indefinite periods
74 */
75int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
76 void *data)
77{
78 int error = 0;
79 struct enclosure_device *edev;
80
81 mutex_lock(&container_list_lock);
82 list_for_each_entry(edev, &container_list, node) {
83 error = fn(edev, data);
84 if (error)
85 break;
86 }
87 mutex_unlock(&container_list_lock);
88
89 return error;
90}
91EXPORT_SYMBOL_GPL(enclosure_for_each_device);
92
93/**
94 * enclosure_register - register device as an enclosure
95 *
96 * @dev: device containing the enclosure
97 * @components: number of components in the enclosure
98 *
99 * This sets up the device for being an enclosure. Note that @dev does
100 * not have to be a dedicated enclosure device. It may be some other type
101 * of device that additionally responds to enclosure services
102 */
103struct enclosure_device *
104enclosure_register(struct device *dev, const char *name, int components,
105 struct enclosure_component_callbacks *cb)
106{
107 struct enclosure_device *edev =
108 kzalloc(sizeof(struct enclosure_device) +
109 sizeof(struct enclosure_component)*components,
110 GFP_KERNEL);
111 int err, i;
112
113 BUG_ON(!cb);
114
115 if (!edev)
116 return ERR_PTR(-ENOMEM);
117
118 edev->components = components;
119
120 edev->cdev.class = &enclosure_class;
121 edev->cdev.dev = get_device(dev);
122 edev->cb = cb;
123 snprintf(edev->cdev.class_id, BUS_ID_SIZE, "%s", name);
124 err = class_device_register(&edev->cdev);
125 if (err)
126 goto err;
127
128 for (i = 0; i < components; i++)
129 edev->component[i].number = -1;
130
131 mutex_lock(&container_list_lock);
132 list_add_tail(&edev->node, &container_list);
133 mutex_unlock(&container_list_lock);
134
135 return edev;
136
137 err:
138 put_device(edev->cdev.dev);
139 kfree(edev);
140 return ERR_PTR(err);
141}
142EXPORT_SYMBOL_GPL(enclosure_register);
143
144static struct enclosure_component_callbacks enclosure_null_callbacks;
145
146/**
147 * enclosure_unregister - remove an enclosure
148 *
149 * @edev: the registered enclosure to remove;
150 */
151void enclosure_unregister(struct enclosure_device *edev)
152{
153 int i;
154
155 mutex_lock(&container_list_lock);
156 list_del(&edev->node);
157 mutex_unlock(&container_list_lock);
158
159 for (i = 0; i < edev->components; i++)
160 if (edev->component[i].number != -1)
161 class_device_unregister(&edev->component[i].cdev);
162
163 /* prevent any callbacks into service user */
164 edev->cb = &enclosure_null_callbacks;
165 class_device_unregister(&edev->cdev);
166}
167EXPORT_SYMBOL_GPL(enclosure_unregister);
168
169static void enclosure_release(struct class_device *cdev)
170{
171 struct enclosure_device *edev = to_enclosure_device(cdev);
172
173 put_device(cdev->dev);
174 kfree(edev);
175}
176
177static void enclosure_component_release(struct class_device *cdev)
178{
179 if (cdev->dev)
180 put_device(cdev->dev);
181 class_device_put(cdev->parent);
182}
183
184/**
185 * enclosure_component_register - add a particular component to an enclosure
186 * @edev: the enclosure to add the component
187 * @num: the device number
188 * @type: the type of component being added
189 * @name: an optional name to appear in sysfs (leave NULL if none)
190 *
191 * Registers the component. The name is optional for enclosures that
192 * give their components a unique name. If not, leave the field NULL
193 * and a name will be assigned.
194 *
195 * Returns a pointer to the enclosure component or an error.
196 */
197struct enclosure_component *
198enclosure_component_register(struct enclosure_device *edev,
199 unsigned int number,
200 enum enclosure_component_type type,
201 const char *name)
202{
203 struct enclosure_component *ecomp;
204 struct class_device *cdev;
205 int err;
206
207 if (number >= edev->components)
208 return ERR_PTR(-EINVAL);
209
210 ecomp = &edev->component[number];
211
212 if (ecomp->number != -1)
213 return ERR_PTR(-EINVAL);
214
215 ecomp->type = type;
216 ecomp->number = number;
217 cdev = &ecomp->cdev;
218 cdev->parent = class_device_get(&edev->cdev);
219 cdev->class = &enclosure_component_class;
220 if (name)
221 snprintf(cdev->class_id, BUS_ID_SIZE, "%s", name);
222 else
223 snprintf(cdev->class_id, BUS_ID_SIZE, "%u", number);
224
225 err = class_device_register(cdev);
226 if (err)
227 ERR_PTR(err);
228
229 return ecomp;
230}
231EXPORT_SYMBOL_GPL(enclosure_component_register);
232
233/**
234 * enclosure_add_device - add a device as being part of an enclosure
235 * @edev: the enclosure device being added to.
236 * @num: the number of the component
237 * @dev: the device being added
238 *
239 * Declares a real device to reside in slot (or identifier) @num of an
240 * enclosure. This will cause the relevant sysfs links to appear.
241 * This function may also be used to change a device associated with
242 * an enclosure without having to call enclosure_remove_device() in
243 * between.
244 *
245 * Returns zero on success or an error.
246 */
247int enclosure_add_device(struct enclosure_device *edev, int component,
248 struct device *dev)
249{
250 struct class_device *cdev;
251
252 if (!edev || component >= edev->components)
253 return -EINVAL;
254
255 cdev = &edev->component[component].cdev;
256
257 class_device_del(cdev);
258 if (cdev->dev)
259 put_device(cdev->dev);
260 cdev->dev = get_device(dev);
261 return class_device_add(cdev);
262}
263EXPORT_SYMBOL_GPL(enclosure_add_device);
264
265/**
266 * enclosure_remove_device - remove a device from an enclosure
267 * @edev: the enclosure device
268 * @num: the number of the component to remove
269 *
270 * Returns zero on success or an error.
271 *
272 */
273int enclosure_remove_device(struct enclosure_device *edev, int component)
274{
275 struct class_device *cdev;
276
277 if (!edev || component >= edev->components)
278 return -EINVAL;
279
280 cdev = &edev->component[component].cdev;
281
282 class_device_del(cdev);
283 if (cdev->dev)
284 put_device(cdev->dev);
285 cdev->dev = NULL;
286 return class_device_add(cdev);
287}
288EXPORT_SYMBOL_GPL(enclosure_remove_device);
289
290/*
291 * sysfs pieces below
292 */
293
294static ssize_t enclosure_show_components(struct class_device *cdev, char *buf)
295{
296 struct enclosure_device *edev = to_enclosure_device(cdev);
297
298 return snprintf(buf, 40, "%d\n", edev->components);
299}
300
301static struct class_device_attribute enclosure_attrs[] = {
302 __ATTR(components, S_IRUGO, enclosure_show_components, NULL),
303 __ATTR_NULL
304};
305
306static struct class enclosure_class = {
307 .name = "enclosure",
308 .owner = THIS_MODULE,
309 .release = enclosure_release,
310 .class_dev_attrs = enclosure_attrs,
311};
312
313static const char *const enclosure_status [] = {
314 [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
315 [ENCLOSURE_STATUS_OK] = "OK",
316 [ENCLOSURE_STATUS_CRITICAL] = "critical",
317 [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical",
318 [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable",
319 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
320 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
321 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
322};
323
324static const char *const enclosure_type [] = {
325 [ENCLOSURE_COMPONENT_DEVICE] = "device",
326 [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
327};
328
329static ssize_t get_component_fault(struct class_device *cdev, char *buf)
330{
331 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
332 struct enclosure_component *ecomp = to_enclosure_component(cdev);
333
334 if (edev->cb->get_fault)
335 edev->cb->get_fault(edev, ecomp);
336 return snprintf(buf, 40, "%d\n", ecomp->fault);
337}
338
339static ssize_t set_component_fault(struct class_device *cdev, const char *buf,
340 size_t count)
341{
342 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
343 struct enclosure_component *ecomp = to_enclosure_component(cdev);
344 int val = simple_strtoul(buf, NULL, 0);
345
346 if (edev->cb->set_fault)
347 edev->cb->set_fault(edev, ecomp, val);
348 return count;
349}
350
351static ssize_t get_component_status(struct class_device *cdev, char *buf)
352{
353 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
354 struct enclosure_component *ecomp = to_enclosure_component(cdev);
355
356 if (edev->cb->get_status)
357 edev->cb->get_status(edev, ecomp);
358 return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]);
359}
360
361static ssize_t set_component_status(struct class_device *cdev, const char *buf,
362 size_t count)
363{
364 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
365 struct enclosure_component *ecomp = to_enclosure_component(cdev);
366 int i;
367
368 for (i = 0; enclosure_status[i]; i++) {
369 if (strncmp(buf, enclosure_status[i],
370 strlen(enclosure_status[i])) == 0 &&
371 (buf[strlen(enclosure_status[i])] == '\n' ||
372 buf[strlen(enclosure_status[i])] == '\0'))
373 break;
374 }
375
376 if (enclosure_status[i] && edev->cb->set_status) {
377 edev->cb->set_status(edev, ecomp, i);
378 return count;
379 } else
380 return -EINVAL;
381}
382
383static ssize_t get_component_active(struct class_device *cdev, char *buf)
384{
385 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
386 struct enclosure_component *ecomp = to_enclosure_component(cdev);
387
388 if (edev->cb->get_active)
389 edev->cb->get_active(edev, ecomp);
390 return snprintf(buf, 40, "%d\n", ecomp->active);
391}
392
393static ssize_t set_component_active(struct class_device *cdev, const char *buf,
394 size_t count)
395{
396 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
397 struct enclosure_component *ecomp = to_enclosure_component(cdev);
398 int val = simple_strtoul(buf, NULL, 0);
399
400 if (edev->cb->set_active)
401 edev->cb->set_active(edev, ecomp, val);
402 return count;
403}
404
405static ssize_t get_component_locate(struct class_device *cdev, char *buf)
406{
407 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
408 struct enclosure_component *ecomp = to_enclosure_component(cdev);
409
410 if (edev->cb->get_locate)
411 edev->cb->get_locate(edev, ecomp);
412 return snprintf(buf, 40, "%d\n", ecomp->locate);
413}
414
415static ssize_t set_component_locate(struct class_device *cdev, const char *buf,
416 size_t count)
417{
418 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
419 struct enclosure_component *ecomp = to_enclosure_component(cdev);
420 int val = simple_strtoul(buf, NULL, 0);
421
422 if (edev->cb->set_locate)
423 edev->cb->set_locate(edev, ecomp, val);
424 return count;
425}
426
427static ssize_t get_component_type(struct class_device *cdev, char *buf)
428{
429 struct enclosure_component *ecomp = to_enclosure_component(cdev);
430
431 return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
432}
433
434
435static struct class_device_attribute enclosure_component_attrs[] = {
436 __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
437 set_component_fault),
438 __ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
439 set_component_status),
440 __ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
441 set_component_active),
442 __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
443 set_component_locate),
444 __ATTR(type, S_IRUGO, get_component_type, NULL),
445 __ATTR_NULL
446};
447
448static struct class enclosure_component_class = {
449 .name = "enclosure_component",
450 .owner = THIS_MODULE,
451 .class_dev_attrs = enclosure_component_attrs,
452 .release = enclosure_component_release,
453};
454
455static int __init enclosure_init(void)
456{
457 int err;
458
459 err = class_register(&enclosure_class);
460 if (err)
461 return err;
462 err = class_register(&enclosure_component_class);
463 if (err)
464 goto err_out;
465
466 return 0;
467 err_out:
468 class_unregister(&enclosure_class);
469
470 return err;
471}
472
473static void __exit enclosure_exit(void)
474{
475 class_unregister(&enclosure_component_class);
476 class_unregister(&enclosure_class);
477}
478
479module_init(enclosure_init);
480module_exit(enclosure_exit);
481
482MODULE_AUTHOR("James Bottomley");
483MODULE_DESCRIPTION("Enclosure Services");
484MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 14fc7f39e83e..a5f0aaaf0dd4 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -179,7 +179,15 @@ config CHR_DEV_SCH
179 say M here and read <file:Documentation/kbuild/modules.txt> and 179 say M here and read <file:Documentation/kbuild/modules.txt> and
180 <file:Documentation/scsi/scsi.txt>. The module will be called ch.o. 180 <file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
181 If unsure, say N. 181 If unsure, say N.
182 182
183config SCSI_ENCLOSURE
184 tristate "SCSI Enclosure Support"
185 depends on SCSI && ENCLOSURE_SERVICES
186 help
187 Enclosures are devices sitting on or in SCSI backplanes that
188 manage devices. If you have a disk cage, the chances are that
189 it has an enclosure device. Selecting this option will just allow
190 certain enclosure conditions to be reported and is not required.
183 191
184comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs" 192comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
185 depends on SCSI 193 depends on SCSI
@@ -350,17 +358,6 @@ config SGIWD93_SCSI
350 If you have a Western Digital WD93 SCSI controller on 358 If you have a Western Digital WD93 SCSI controller on
351 an SGI MIPS system, say Y. Otherwise, say N. 359 an SGI MIPS system, say Y. Otherwise, say N.
352 360
353config SCSI_DECNCR
354 tristate "DEC NCR53C94 Scsi Driver"
355 depends on MACH_DECSTATION && SCSI && TC
356 help
357 Say Y here to support the NCR53C94 SCSI controller chips on IOASIC
358 based TURBOchannel DECstations and TURBOchannel PMAZ-A cards.
359
360config SCSI_DECSII
361 tristate "DEC SII Scsi Driver"
362 depends on MACH_DECSTATION && SCSI && 32BIT
363
364config BLK_DEV_3W_XXXX_RAID 361config BLK_DEV_3W_XXXX_RAID
365 tristate "3ware 5/6/7/8xxx ATA-RAID support" 362 tristate "3ware 5/6/7/8xxx ATA-RAID support"
366 depends on PCI && SCSI 363 depends on PCI && SCSI
@@ -1263,17 +1260,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
1263 not allow targets to disconnect is not reasonable if there is more 1260 not allow targets to disconnect is not reasonable if there is more
1264 than 1 device on a SCSI bus. The normal answer therefore is N. 1261 than 1 device on a SCSI bus. The normal answer therefore is N.
1265 1262
1266config SCSI_MCA_53C9X
1267 tristate "NCR MCA 53C9x SCSI support"
1268 depends on MCA_LEGACY && SCSI && BROKEN_ON_SMP
1269 help
1270 Some MicroChannel machines, notably the NCR 35xx line, use a SCSI
1271 controller based on the NCR 53C94. This driver will allow use of
1272 the controller on the 3550, and very possibly others.
1273
1274 To compile this driver as a module, choose M here: the
1275 module will be called mca_53c9x.
1276
1277config SCSI_PAS16 1263config SCSI_PAS16
1278 tristate "PAS16 SCSI support" 1264 tristate "PAS16 SCSI support"
1279 depends on ISA && SCSI 1265 depends on ISA && SCSI
@@ -1600,45 +1586,6 @@ config GVP11_SCSI
1600 To compile this driver as a module, choose M here: the 1586 To compile this driver as a module, choose M here: the
1601 module will be called gvp11. 1587 module will be called gvp11.
1602 1588
1603config CYBERSTORM_SCSI
1604 tristate "CyberStorm SCSI support"
1605 depends on ZORRO && SCSI
1606 help
1607 If you have an Amiga with an original (MkI) Phase5 Cyberstorm
1608 accelerator board and the optional Cyberstorm SCSI controller,
1609 answer Y. Otherwise, say N.
1610
1611config CYBERSTORMII_SCSI
1612 tristate "CyberStorm Mk II SCSI support"
1613 depends on ZORRO && SCSI
1614 help
1615 If you have an Amiga with a Phase5 Cyberstorm MkII accelerator board
1616 and the optional Cyberstorm SCSI controller, say Y. Otherwise,
1617 answer N.
1618
1619config BLZ2060_SCSI
1620 tristate "Blizzard 2060 SCSI support"
1621 depends on ZORRO && SCSI
1622 help
1623 If you have an Amiga with a Phase5 Blizzard 2060 accelerator board
1624 and want to use the onboard SCSI controller, say Y. Otherwise,
1625 answer N.
1626
1627config BLZ1230_SCSI
1628 tristate "Blizzard 1230IV/1260 SCSI support"
1629 depends on ZORRO && SCSI
1630 help
1631 If you have an Amiga 1200 with a Phase5 Blizzard 1230IV or Blizzard
1632 1260 accelerator, and the optional SCSI module, say Y. Otherwise,
1633 say N.
1634
1635config FASTLANE_SCSI
1636 tristate "Fastlane SCSI support"
1637 depends on ZORRO && SCSI
1638 help
1639 If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
1640 one in the near future, say Y to this question. Otherwise, say N.
1641
1642config SCSI_A4000T 1589config SCSI_A4000T
1643 tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)" 1590 tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
1644 depends on AMIGA && SCSI && EXPERIMENTAL 1591 depends on AMIGA && SCSI && EXPERIMENTAL
@@ -1666,15 +1613,6 @@ config SCSI_ZORRO7XX
1666 accelerator card for the Amiga 1200, 1613 accelerator card for the Amiga 1200,
1667 - the SCSI controller on the GVP Turbo 040/060 accelerator. 1614 - the SCSI controller on the GVP Turbo 040/060 accelerator.
1668 1615
1669config OKTAGON_SCSI
1670 tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
1671 depends on ZORRO && SCSI && EXPERIMENTAL
1672 help
1673 If you have the BSC Oktagon SCSI disk controller for the Amiga, say
1674 Y to this question. If you're in doubt about whether you have one,
1675 see the picture at
1676 <http://amiga.resource.cx/exp/search.pl?product=oktagon>.
1677
1678config ATARI_SCSI 1616config ATARI_SCSI
1679 tristate "Atari native SCSI support" 1617 tristate "Atari native SCSI support"
1680 depends on ATARI && SCSI 1618 depends on ATARI && SCSI
@@ -1727,18 +1665,6 @@ config MAC_SCSI
1727 SCSI-HOWTO, available from 1665 SCSI-HOWTO, available from
1728 <http://www.tldp.org/docs.html#howto>. 1666 <http://www.tldp.org/docs.html#howto>.
1729 1667
1730config SCSI_MAC_ESP
1731 tristate "Macintosh NCR53c9[46] SCSI"
1732 depends on MAC && SCSI
1733 help
1734 This is the NCR 53c9x SCSI controller found on most of the 68040
1735 based Macintoshes. If you have one of these say Y and read the
1736 SCSI-HOWTO, available from
1737 <http://www.tldp.org/docs.html#howto>.
1738
1739 To compile this driver as a module, choose M here: the
1740 module will be called mac_esp.
1741
1742config MVME147_SCSI 1668config MVME147_SCSI
1743 bool "WD33C93 SCSI driver for MVME147" 1669 bool "WD33C93 SCSI driver for MVME147"
1744 depends on MVME147 && SCSI=y 1670 depends on MVME147 && SCSI=y
@@ -1779,6 +1705,7 @@ config SUN3_SCSI
1779config SUN3X_ESP 1705config SUN3X_ESP
1780 bool "Sun3x ESP SCSI" 1706 bool "Sun3x ESP SCSI"
1781 depends on SUN3X && SCSI=y 1707 depends on SUN3X && SCSI=y
1708 select SCSI_SPI_ATTRS
1782 help 1709 help
1783 The ESP was an on-board SCSI controller used on Sun 3/80 1710 The ESP was an on-board SCSI controller used on Sun 3/80
1784 machines. Say Y here to compile in support for it. 1711 machines. Say Y here to compile in support for it.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 93e1428d03fc..925c26b4fff9 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -44,15 +44,8 @@ obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
44obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o 44obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
45obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o 45obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o 46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
47obj-$(CONFIG_CYBERSTORM_SCSI) += NCR53C9x.o cyberstorm.o
48obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o
49obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o
50obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o
51obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o
52obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o
53obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o 47obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
54obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 48obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
55obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
56obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o 49obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
57obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o 50obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
58obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o 51obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
@@ -95,7 +88,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
95obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 88obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
96obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o 89obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
97obj-$(CONFIG_SCSI_7000FASST) += wd7000.o 90obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
98obj-$(CONFIG_SCSI_MCA_53C9X) += NCR53C9x.o mca_53c9x.o
99obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o 91obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o
100obj-$(CONFIG_SCSI_EATA) += eata.o 92obj-$(CONFIG_SCSI_EATA) += eata.o
101obj-$(CONFIG_SCSI_DC395x) += dc395x.o 93obj-$(CONFIG_SCSI_DC395x) += dc395x.o
@@ -112,13 +104,12 @@ obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
112obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o 104obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o
113obj-$(CONFIG_SCSI_MESH) += mesh.o 105obj-$(CONFIG_SCSI_MESH) += mesh.o
114obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o 106obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
115obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o
116obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o 107obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
117obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 108obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
118obj-$(CONFIG_SCSI_PPA) += ppa.o 109obj-$(CONFIG_SCSI_PPA) += ppa.o
119obj-$(CONFIG_SCSI_IMM) += imm.o 110obj-$(CONFIG_SCSI_IMM) += imm.o
120obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o 111obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
121obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o 112obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o
122obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o 113obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
123obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o 114obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
124obj-$(CONFIG_SCSI_NSP32) += nsp32.o 115obj-$(CONFIG_SCSI_NSP32) += nsp32.o
@@ -138,6 +129,7 @@ obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
138obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o 129obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
139obj-$(CONFIG_CHR_DEV_SG) += sg.o 130obj-$(CONFIG_CHR_DEV_SG) += sg.o
140obj-$(CONFIG_CHR_DEV_SCH) += ch.o 131obj-$(CONFIG_CHR_DEV_SCH) += ch.o
132obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
141 133
142# This goes last, so that "real" scsi devices probe earlier 134# This goes last, so that "real" scsi devices probe earlier
143obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 135obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
deleted file mode 100644
index 5b0efc903918..000000000000
--- a/drivers/scsi/NCR53C9x.c
+++ /dev/null
@@ -1,3654 +0,0 @@
1/* NCR53C9x.c: Generic SCSI driver code for NCR53C9x chips.
2 *
3 * Originally esp.c : EnhancedScsiProcessor Sun SCSI driver code.
4 *
5 * Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Most DMA dependencies put in driver specific files by
8 * Jesper Skov (jskov@cygnus.co.uk)
9 *
10 * Set up to use esp_read/esp_write (preprocessor macros in NCR53c9x.h) by
11 * Tymm Twillman (tymm@coe.missouri.edu)
12 */
13
14/* TODO:
15 *
16 * 1) Maybe disable parity checking in config register one for SCSI1
17 * targets. (Gilmore says parity error on the SBus can lock up
18 * old sun4c's)
19 * 2) Add support for DMA2 pipelining.
20 * 3) Add tagged queueing.
21 * 4) Maybe change use of "esp" to something more "NCR"'ish.
22 */
23
24#include <linux/module.h>
25
26#include <linux/kernel.h>
27#include <linux/delay.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/slab.h>
31#include <linux/blkdev.h>
32#include <linux/interrupt.h>
33#include <linux/proc_fs.h>
34#include <linux/stat.h>
35#include <linux/init.h>
36
37#include "scsi.h"
38#include <scsi/scsi_host.h>
39#include "NCR53C9x.h"
40
41#include <asm/system.h>
42#include <asm/ptrace.h>
43#include <asm/pgtable.h>
44#include <asm/io.h>
45#include <asm/irq.h>
46
47/* Command phase enumeration. */
48enum {
49 not_issued = 0x00, /* Still in the issue_SC queue. */
50
51 /* Various forms of selecting a target. */
52#define in_slct_mask 0x10
53 in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */
54 in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */
55 in_slct_msg = 0x12, /* select, then send a message */
56 in_slct_tag = 0x13, /* select and send tagged queue msg */
57 in_slct_sneg = 0x14, /* select and acquire sync capabilities */
58
59 /* Any post selection activity. */
60#define in_phases_mask 0x20
61 in_datain = 0x20, /* Data is transferring from the bus */
62 in_dataout = 0x21, /* Data is transferring to the bus */
63 in_data_done = 0x22, /* Last DMA data operation done (maybe) */
64 in_msgin = 0x23, /* Eating message from target */
65 in_msgincont = 0x24, /* Eating more msg bytes from target */
66 in_msgindone = 0x25, /* Decide what to do with what we got */
67 in_msgout = 0x26, /* Sending message to target */
68 in_msgoutdone = 0x27, /* Done sending msg out */
69 in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */
70 in_cmdend = 0x29, /* Done sending slow cmd */
71 in_status = 0x2a, /* Was in status phase, finishing cmd */
72 in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */
73 in_the_dark = 0x2c, /* Don't know what bus phase we are in */
74
75 /* Special states, ie. not normal bus transitions... */
76#define in_spec_mask 0x80
77 in_abortone = 0x80, /* Aborting one command currently */
78 in_abortall = 0x81, /* Blowing away all commands we have */
79 in_resetdev = 0x82, /* SCSI target reset in progress */
80 in_resetbus = 0x83, /* SCSI bus reset in progress */
81 in_tgterror = 0x84, /* Target did something stupid */
82};
83
84enum {
85 /* Zero has special meaning, see skipahead[12]. */
86/*0*/ do_never,
87
88/*1*/ do_phase_determine,
89/*2*/ do_reset_bus,
90/*3*/ do_reset_complete,
91/*4*/ do_work_bus,
92/*5*/ do_intr_end
93};
94
95/* The master ring of all esp hosts we are managing in this driver. */
96static struct NCR_ESP *espchain;
97int nesps = 0, esps_in_use = 0, esps_running = 0;
98EXPORT_SYMBOL(nesps);
99EXPORT_SYMBOL(esps_running);
100
101irqreturn_t esp_intr(int irq, void *dev_id);
102
103/* Debugging routines */
104static struct esp_cmdstrings {
105 unchar cmdchar;
106 char *text;
107} esp_cmd_strings[] = {
108 /* Miscellaneous */
109 { ESP_CMD_NULL, "ESP_NOP", },
110 { ESP_CMD_FLUSH, "FIFO_FLUSH", },
111 { ESP_CMD_RC, "RSTESP", },
112 { ESP_CMD_RS, "RSTSCSI", },
113 /* Disconnected State Group */
114 { ESP_CMD_RSEL, "RESLCTSEQ", },
115 { ESP_CMD_SEL, "SLCTNATN", },
116 { ESP_CMD_SELA, "SLCTATN", },
117 { ESP_CMD_SELAS, "SLCTATNSTOP", },
118 { ESP_CMD_ESEL, "ENSLCTRESEL", },
119 { ESP_CMD_DSEL, "DISSELRESEL", },
120 { ESP_CMD_SA3, "SLCTATN3", },
121 { ESP_CMD_RSEL3, "RESLCTSEQ", },
122 /* Target State Group */
123 { ESP_CMD_SMSG, "SNDMSG", },
124 { ESP_CMD_SSTAT, "SNDSTATUS", },
125 { ESP_CMD_SDATA, "SNDDATA", },
126 { ESP_CMD_DSEQ, "DISCSEQ", },
127 { ESP_CMD_TSEQ, "TERMSEQ", },
128 { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", },
129 { ESP_CMD_DCNCT, "DISC", },
130 { ESP_CMD_RMSG, "RCVMSG", },
131 { ESP_CMD_RCMD, "RCVCMD", },
132 { ESP_CMD_RDATA, "RCVDATA", },
133 { ESP_CMD_RCSEQ, "RCVCMDSEQ", },
134 /* Initiator State Group */
135 { ESP_CMD_TI, "TRANSINFO", },
136 { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", },
137 { ESP_CMD_MOK, "MSGACCEPTED", },
138 { ESP_CMD_TPAD, "TPAD", },
139 { ESP_CMD_SATN, "SATN", },
140 { ESP_CMD_RATN, "RATN", },
141};
142#define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings)))
143
144/* Print textual representation of an ESP command */
145static inline void esp_print_cmd(unchar espcmd)
146{
147 unchar dma_bit = espcmd & ESP_CMD_DMA;
148 int i;
149
150 espcmd &= ~dma_bit;
151 for(i=0; i<NUM_ESP_COMMANDS; i++)
152 if(esp_cmd_strings[i].cmdchar == espcmd)
153 break;
154 if(i==NUM_ESP_COMMANDS)
155 printk("ESP_Unknown");
156 else
157 printk("%s%s", esp_cmd_strings[i].text,
158 ((dma_bit) ? "+DMA" : ""));
159}
160
161/* Print the status register's value */
162static inline void esp_print_statreg(unchar statreg)
163{
164 unchar phase;
165
166 printk("STATUS<");
167 phase = statreg & ESP_STAT_PMASK;
168 printk("%s,", (phase == ESP_DOP ? "DATA-OUT" :
169 (phase == ESP_DIP ? "DATA-IN" :
170 (phase == ESP_CMDP ? "COMMAND" :
171 (phase == ESP_STATP ? "STATUS" :
172 (phase == ESP_MOP ? "MSG-OUT" :
173 (phase == ESP_MIP ? "MSG_IN" :
174 "unknown")))))));
175 if(statreg & ESP_STAT_TDONE)
176 printk("TRANS_DONE,");
177 if(statreg & ESP_STAT_TCNT)
178 printk("TCOUNT_ZERO,");
179 if(statreg & ESP_STAT_PERR)
180 printk("P_ERROR,");
181 if(statreg & ESP_STAT_SPAM)
182 printk("SPAM,");
183 if(statreg & ESP_STAT_INTR)
184 printk("IRQ,");
185 printk(">");
186}
187
188/* Print the interrupt register's value */
189static inline void esp_print_ireg(unchar intreg)
190{
191 printk("INTREG< ");
192 if(intreg & ESP_INTR_S)
193 printk("SLCT_NATN ");
194 if(intreg & ESP_INTR_SATN)
195 printk("SLCT_ATN ");
196 if(intreg & ESP_INTR_RSEL)
197 printk("RSLCT ");
198 if(intreg & ESP_INTR_FDONE)
199 printk("FDONE ");
200 if(intreg & ESP_INTR_BSERV)
201 printk("BSERV ");
202 if(intreg & ESP_INTR_DC)
203 printk("DISCNCT ");
204 if(intreg & ESP_INTR_IC)
205 printk("ILL_CMD ");
206 if(intreg & ESP_INTR_SR)
207 printk("SCSI_BUS_RESET ");
208 printk(">");
209}
210
211/* Print the sequence step registers contents */
212static inline void esp_print_seqreg(unchar stepreg)
213{
214 stepreg &= ESP_STEP_VBITS;
215 printk("STEP<%s>",
216 (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" :
217 (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" :
218 (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" :
219 (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" :
220 (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" :
221 "UNKNOWN"))))));
222}
223
224static char *phase_string(int phase)
225{
226 switch(phase) {
227 case not_issued:
228 return "UNISSUED";
229 case in_slct_norm:
230 return "SLCTNORM";
231 case in_slct_stop:
232 return "SLCTSTOP";
233 case in_slct_msg:
234 return "SLCTMSG";
235 case in_slct_tag:
236 return "SLCTTAG";
237 case in_slct_sneg:
238 return "SLCTSNEG";
239 case in_datain:
240 return "DATAIN";
241 case in_dataout:
242 return "DATAOUT";
243 case in_data_done:
244 return "DATADONE";
245 case in_msgin:
246 return "MSGIN";
247 case in_msgincont:
248 return "MSGINCONT";
249 case in_msgindone:
250 return "MSGINDONE";
251 case in_msgout:
252 return "MSGOUT";
253 case in_msgoutdone:
254 return "MSGOUTDONE";
255 case in_cmdbegin:
256 return "CMDBEGIN";
257 case in_cmdend:
258 return "CMDEND";
259 case in_status:
260 return "STATUS";
261 case in_freeing:
262 return "FREEING";
263 case in_the_dark:
264 return "CLUELESS";
265 case in_abortone:
266 return "ABORTONE";
267 case in_abortall:
268 return "ABORTALL";
269 case in_resetdev:
270 return "RESETDEV";
271 case in_resetbus:
272 return "RESETBUS";
273 case in_tgterror:
274 return "TGTERROR";
275 default:
276 return "UNKNOWN";
277 };
278}
279
280#ifdef DEBUG_STATE_MACHINE
281static inline void esp_advance_phase(Scsi_Cmnd *s, int newphase)
282{
283 ESPLOG(("<%s>", phase_string(newphase)));
284 s->SCp.sent_command = s->SCp.phase;
285 s->SCp.phase = newphase;
286}
287#else
288#define esp_advance_phase(__s, __newphase) \
289 (__s)->SCp.sent_command = (__s)->SCp.phase; \
290 (__s)->SCp.phase = (__newphase);
291#endif
292
293#ifdef DEBUG_ESP_CMDS
294static inline void esp_cmd(struct NCR_ESP *esp, struct ESP_regs *eregs,
295 unchar cmd)
296{
297 esp->espcmdlog[esp->espcmdent] = cmd;
298 esp->espcmdent = (esp->espcmdent + 1) & 31;
299 esp_write(eregs->esp_cmnd, cmd);
300}
301#else
302#define esp_cmd(__esp, __eregs, __cmd) esp_write((__eregs)->esp_cmnd, (__cmd))
303#endif
304
305/* How we use the various Linux SCSI data structures for operation.
306 *
307 * struct scsi_cmnd:
308 *
309 * We keep track of the syncronous capabilities of a target
310 * in the device member, using sync_min_period and
311 * sync_max_offset. These are the values we directly write
312 * into the ESP registers while running a command. If offset
313 * is zero the ESP will use asynchronous transfers.
314 * If the borken flag is set we assume we shouldn't even bother
315 * trying to negotiate for synchronous transfer as this target
316 * is really stupid. If we notice the target is dropping the
317 * bus, and we have been allowing it to disconnect, we clear
318 * the disconnect flag.
319 */
320
321/* Manipulation of the ESP command queues. Thanks to the aha152x driver
322 * and its author, Juergen E. Fischer, for the methods used here.
323 * Note that these are per-ESP queues, not global queues like
324 * the aha152x driver uses.
325 */
326static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
327{
328 Scsi_Cmnd *end;
329
330 new_SC->host_scribble = (unsigned char *) NULL;
331 if(!*SC)
332 *SC = new_SC;
333 else {
334 for(end=*SC;end->host_scribble;end=(Scsi_Cmnd *)end->host_scribble)
335 ;
336 end->host_scribble = (unsigned char *) new_SC;
337 }
338}
339
340static inline void prepend_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
341{
342 new_SC->host_scribble = (unsigned char *) *SC;
343 *SC = new_SC;
344}
345
346static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC)
347{
348 Scsi_Cmnd *ptr;
349
350 ptr = *SC;
351 if(ptr)
352 *SC = (Scsi_Cmnd *) (*SC)->host_scribble;
353 return ptr;
354}
355
356static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun)
357{
358 Scsi_Cmnd *ptr, *prev;
359
360 for(ptr = *SC, prev = NULL;
361 ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
362 prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble)
363 ;
364 if(ptr) {
365 if(prev)
366 prev->host_scribble=ptr->host_scribble;
367 else
368 *SC=(Scsi_Cmnd *)ptr->host_scribble;
369 }
370 return ptr;
371}
372
373/* Resetting various pieces of the ESP scsi driver chipset */
374
375/* Reset the ESP chip, _not_ the SCSI bus. */
376static void esp_reset_esp(struct NCR_ESP *esp, struct ESP_regs *eregs)
377{
378 int family_code, version, i;
379 volatile int trash;
380
381 /* Now reset the ESP chip */
382 esp_cmd(esp, eregs, ESP_CMD_RC);
383 esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA);
384 if(esp->erev == fast)
385 esp_write(eregs->esp_cfg2, ESP_CONFIG2_FENAB);
386 esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA);
387
388 /* This is the only point at which it is reliable to read
389 * the ID-code for a fast ESP chip variant.
390 */
391 esp->max_period = ((35 * esp->ccycle) / 1000);
392 if(esp->erev == fast) {
393 char *erev2string[] = {
394 "Emulex FAS236",
395 "Emulex FPESP100A",
396 "fast",
397 "QLogic FAS366",
398 "Emulex FAS216",
399 "Symbios Logic 53CF9x-2",
400 "unknown!"
401 };
402
403 version = esp_read(eregs->esp_uid);
404 family_code = (version & 0xf8) >> 3;
405 if(family_code == 0x02) {
406 if ((version & 7) == 2)
407 esp->erev = fas216;
408 else
409 esp->erev = fas236;
410 } else if(family_code == 0x0a)
411 esp->erev = fas366; /* Version is usually '5'. */
412 else if(family_code == 0x00) {
413 if ((version & 7) == 2)
414 esp->erev = fas100a; /* NCR53C9X */
415 else
416 esp->erev = espunknown;
417 } else if(family_code == 0x14) {
418 if ((version & 7) == 2)
419 esp->erev = fsc;
420 else
421 esp->erev = espunknown;
422 } else if(family_code == 0x00) {
423 if ((version & 7) == 2)
424 esp->erev = fas100a; /* NCR53C9X */
425 else
426 esp->erev = espunknown;
427 } else
428 esp->erev = espunknown;
429 ESPLOG(("esp%d: FAST chip is %s (family=%d, version=%d)\n",
430 esp->esp_id, erev2string[esp->erev - fas236],
431 family_code, (version & 7)));
432
433 esp->min_period = ((4 * esp->ccycle) / 1000);
434 } else {
435 esp->min_period = ((5 * esp->ccycle) / 1000);
436 }
437
438 /* Reload the configuration registers */
439 esp_write(eregs->esp_cfact, esp->cfact);
440 esp->prev_stp = 0;
441 esp_write(eregs->esp_stp, 0);
442 esp->prev_soff = 0;
443 esp_write(eregs->esp_soff, 0);
444 esp_write(eregs->esp_timeo, esp->neg_defp);
445 esp->max_period = (esp->max_period + 3)>>2;
446 esp->min_period = (esp->min_period + 3)>>2;
447
448 esp_write(eregs->esp_cfg1, esp->config1);
449 switch(esp->erev) {
450 case esp100:
451 /* nothing to do */
452 break;
453 case esp100a:
454 esp_write(eregs->esp_cfg2, esp->config2);
455 break;
456 case esp236:
457 /* Slow 236 */
458 esp_write(eregs->esp_cfg2, esp->config2);
459 esp->prev_cfg3 = esp->config3[0];
460 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
461 break;
462 case fas366:
463 panic("esp: FAS366 support not present, please notify "
464 "jongk@cs.utwente.nl");
465 break;
466 case fas216:
467 case fas236:
468 case fsc:
469 /* Fast ESP variants */
470 esp_write(eregs->esp_cfg2, esp->config2);
471 for(i=0; i<8; i++)
472 esp->config3[i] |= ESP_CONFIG3_FCLK;
473 esp->prev_cfg3 = esp->config3[0];
474 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
475 if(esp->diff)
476 esp->radelay = 0;
477 else
478 esp->radelay = 16;
479 /* Different timeout constant for these chips */
480 esp->neg_defp =
481 FSC_NEG_DEFP(esp->cfreq,
482 (esp->cfact == ESP_CCF_F0 ?
483 ESP_CCF_F7 + 1 : esp->cfact));
484 esp_write(eregs->esp_timeo, esp->neg_defp);
485 /* Enable Active Negotiation if possible */
486 if((esp->erev == fsc) && !esp->diff)
487 esp_write(eregs->esp_cfg4, ESP_CONFIG4_EAN);
488 break;
489 case fas100a:
490 /* Fast 100a */
491 esp_write(eregs->esp_cfg2, esp->config2);
492 for(i=0; i<8; i++)
493 esp->config3[i] |= ESP_CONFIG3_FCLOCK;
494 esp->prev_cfg3 = esp->config3[0];
495 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
496 esp->radelay = 32;
497 break;
498 default:
499 panic("esp: what could it be... I wonder...");
500 break;
501 };
502
503 /* Eat any bitrot in the chip */
504 trash = esp_read(eregs->esp_intrpt);
505 udelay(100);
506}
507
508/* This places the ESP into a known state at boot time. */
509void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs)
510{
511 volatile unchar trash;
512
513 /* Reset the DMA */
514 if(esp->dma_reset)
515 esp->dma_reset(esp);
516
517 /* Reset the ESP */
518 esp_reset_esp(esp, eregs);
519
520 /* Reset the SCSI bus, but tell ESP not to generate an irq */
521 esp_write(eregs->esp_cfg1, (esp_read(eregs->esp_cfg1) | ESP_CONFIG1_SRRDISAB));
522 esp_cmd(esp, eregs, ESP_CMD_RS);
523 udelay(400);
524 esp_write(eregs->esp_cfg1, esp->config1);
525
526 /* Eat any bitrot in the chip and we are done... */
527 trash = esp_read(eregs->esp_intrpt);
528}
529EXPORT_SYMBOL(esp_bootup_reset);
530
531/* Allocate structure and insert basic data such as SCSI chip frequency
532 * data and a pointer to the device
533 */
534struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev,
535 int hotplug)
536{
537 struct NCR_ESP *esp, *elink;
538 struct Scsi_Host *esp_host;
539
540 if (hotplug)
541 esp_host = scsi_host_alloc(tpnt, sizeof(struct NCR_ESP));
542 else
543 esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP));
544 if(!esp_host)
545 panic("Cannot register ESP SCSI host");
546 esp = (struct NCR_ESP *) esp_host->hostdata;
547 if(!esp)
548 panic("No esp in hostdata");
549 esp->ehost = esp_host;
550 esp->edev = esp_dev;
551 esp->esp_id = nesps++;
552
553 /* Set bitshift value (only used on Amiga with multiple ESPs) */
554 esp->shift = 2;
555
556 /* Put into the chain of esp chips detected */
557 if(espchain) {
558 elink = espchain;
559 while(elink->next) elink = elink->next;
560 elink->next = esp;
561 } else {
562 espchain = esp;
563 }
564 esp->next = NULL;
565
566 return esp;
567}
568
569void esp_deallocate(struct NCR_ESP *esp)
570{
571 struct NCR_ESP *elink;
572
573 if(espchain == esp) {
574 espchain = NULL;
575 } else {
576 for(elink = espchain; elink && (elink->next != esp); elink = elink->next);
577 if(elink)
578 elink->next = esp->next;
579 }
580 nesps--;
581}
582
583/* Complete initialization of ESP structure and device
584 * Caller must have initialized appropriate parts of the ESP structure
585 * between the call to esp_allocate and this function.
586 */
587void esp_initialize(struct NCR_ESP *esp)
588{
589 struct ESP_regs *eregs = esp->eregs;
590 unsigned int fmhz;
591 unchar ccf;
592 int i;
593
594 /* Check out the clock properties of the chip. */
595
596 /* This is getting messy but it has to be done
597 * correctly or else you get weird behavior all
598 * over the place. We are trying to basically
599 * figure out three pieces of information.
600 *
601 * a) Clock Conversion Factor
602 *
603 * This is a representation of the input
604 * crystal clock frequency going into the
605 * ESP on this machine. Any operation whose
606 * timing is longer than 400ns depends on this
607 * value being correct. For example, you'll
608 * get blips for arbitration/selection during
609 * high load or with multiple targets if this
610 * is not set correctly.
611 *
612 * b) Selection Time-Out
613 *
614 * The ESP isn't very bright and will arbitrate
615 * for the bus and try to select a target
616 * forever if you let it. This value tells
617 * the ESP when it has taken too long to
618 * negotiate and that it should interrupt
619 * the CPU so we can see what happened.
620 * The value is computed as follows (from
621 * NCR/Symbios chip docs).
622 *
623 * (Time Out Period) * (Input Clock)
624 * STO = ----------------------------------
625 * (8192) * (Clock Conversion Factor)
626 *
627 * You usually want the time out period to be
628 * around 250ms, I think we'll set it a little
629 * bit higher to account for fully loaded SCSI
630 * bus's and slow devices that don't respond so
631 * quickly to selection attempts. (yeah, I know
632 * this is out of spec. but there is a lot of
633 * buggy pieces of firmware out there so bite me)
634 *
635 * c) Imperical constants for synchronous offset
636 * and transfer period register values
637 *
638 * This entails the smallest and largest sync
639 * period we could ever handle on this ESP.
640 */
641
642 fmhz = esp->cfreq;
643
644 if(fmhz <= (5000000))
645 ccf = 0;
646 else
647 ccf = (((5000000 - 1) + (fmhz))/(5000000));
648 if(!ccf || ccf > 8) {
649 /* If we can't find anything reasonable,
650 * just assume 20MHZ. This is the clock
651 * frequency of the older sun4c's where I've
652 * been unable to find the clock-frequency
653 * PROM property. All other machines provide
654 * useful values it seems.
655 */
656 ccf = ESP_CCF_F4;
657 fmhz = (20000000);
658 }
659 if(ccf==(ESP_CCF_F7+1))
660 esp->cfact = ESP_CCF_F0;
661 else if(ccf == ESP_CCF_NEVER)
662 esp->cfact = ESP_CCF_F2;
663 else
664 esp->cfact = ccf;
665 esp->cfreq = fmhz;
666 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
667 esp->ctick = ESP_TICK(ccf, esp->ccycle);
668 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
669 esp->sync_defp = SYNC_DEFP_SLOW;
670
671 printk("SCSI ID %d Clk %dMHz CCF=%d TOut %d ",
672 esp->scsi_id, (esp->cfreq / 1000000),
673 ccf, (int) esp->neg_defp);
674
675 /* Fill in ehost data */
676 esp->ehost->base = (unsigned long)eregs;
677 esp->ehost->this_id = esp->scsi_id;
678 esp->ehost->irq = esp->irq;
679
680 /* SCSI id mask */
681 esp->scsi_id_mask = (1 << esp->scsi_id);
682
683 /* Probe the revision of this esp */
684 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
685 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
686 esp_write(eregs->esp_cfg2, esp->config2);
687 if((esp_read(eregs->esp_cfg2) & ~(ESP_CONFIG2_MAGIC)) !=
688 (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
689 printk("NCR53C90(esp100)\n");
690 esp->erev = esp100;
691 } else {
692 esp->config2 = 0;
693 esp_write(eregs->esp_cfg2, 0);
694 esp_write(eregs->esp_cfg3, 5);
695 if(esp_read(eregs->esp_cfg3) != 5) {
696 printk("NCR53C90A(esp100a)\n");
697 esp->erev = esp100a;
698 } else {
699 int target;
700
701 for(target=0; target<8; target++)
702 esp->config3[target] = 0;
703 esp->prev_cfg3 = 0;
704 esp_write(eregs->esp_cfg3, 0);
705 if(ccf > ESP_CCF_F5) {
706 printk("NCR53C9XF(espfast)\n");
707 esp->erev = fast;
708 esp->sync_defp = SYNC_DEFP_FAST;
709 } else {
710 printk("NCR53C9x(esp236)\n");
711 esp->erev = esp236;
712 }
713 }
714 }
715
716 /* Initialize the command queues */
717 esp->current_SC = NULL;
718 esp->disconnected_SC = NULL;
719 esp->issue_SC = NULL;
720
721 /* Clear the state machines. */
722 esp->targets_present = 0;
723 esp->resetting_bus = 0;
724 esp->snip = 0;
725
726 init_waitqueue_head(&esp->reset_queue);
727
728 esp->fas_premature_intr_workaround = 0;
729 for(i = 0; i < 32; i++)
730 esp->espcmdlog[i] = 0;
731 esp->espcmdent = 0;
732 for(i = 0; i < 16; i++) {
733 esp->cur_msgout[i] = 0;
734 esp->cur_msgin[i] = 0;
735 }
736 esp->prevmsgout = esp->prevmsgin = 0;
737 esp->msgout_len = esp->msgin_len = 0;
738
739 /* Clear the one behind caches to hold unmatchable values. */
740 esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff;
741
742 /* Reset the thing before we try anything... */
743 esp_bootup_reset(esp, eregs);
744
745 esps_in_use++;
746}
747
748/* The info function will return whatever useful
749 * information the developer sees fit. If not provided, then
750 * the name field will be used instead.
751 */
752const char *esp_info(struct Scsi_Host *host)
753{
754 struct NCR_ESP *esp;
755
756 esp = (struct NCR_ESP *) host->hostdata;
757 switch(esp->erev) {
758 case esp100:
759 return "ESP100 (NCR53C90)";
760 case esp100a:
761 return "ESP100A (NCR53C90A)";
762 case esp236:
763 return "ESP236 (NCR53C9x)";
764 case fas216:
765 return "Emulex FAS216";
766 case fas236:
767 return "Emulex FAS236";
768 case fas366:
769 return "QLogic FAS366";
770 case fas100a:
771 return "FPESP100A";
772 case fsc:
773 return "Symbios Logic 53CF9x-2";
774 default:
775 panic("Bogon ESP revision");
776 };
777}
778EXPORT_SYMBOL(esp_info);
779
780/* From Wolfgang Stanglmeier's NCR scsi driver. */
781struct info_str
782{
783 char *buffer;
784 int length;
785 int offset;
786 int pos;
787};
788
789static void copy_mem_info(struct info_str *info, char *data, int len)
790{
791 if (info->pos + len > info->length)
792 len = info->length - info->pos;
793
794 if (info->pos + len < info->offset) {
795 info->pos += len;
796 return;
797 }
798 if (info->pos < info->offset) {
799 data += (info->offset - info->pos);
800 len -= (info->offset - info->pos);
801 }
802
803 if (len > 0) {
804 memcpy(info->buffer + info->pos, data, len);
805 info->pos += len;
806 }
807}
808
809static int copy_info(struct info_str *info, char *fmt, ...)
810{
811 va_list args;
812 char buf[81];
813 int len;
814
815 va_start(args, fmt);
816 len = vsprintf(buf, fmt, args);
817 va_end(args);
818
819 copy_mem_info(info, buf, len);
820 return len;
821}
822
823static int esp_host_info(struct NCR_ESP *esp, char *ptr, off_t offset, int len)
824{
825 struct scsi_device *sdev;
826 struct info_str info;
827 int i;
828
829 info.buffer = ptr;
830 info.length = len;
831 info.offset = offset;
832 info.pos = 0;
833
834 copy_info(&info, "ESP Host Adapter:\n");
835 copy_info(&info, "\tESP Model\t\t");
836 switch(esp->erev) {
837 case esp100:
838 copy_info(&info, "ESP100 (NCR53C90)\n");
839 break;
840 case esp100a:
841 copy_info(&info, "ESP100A (NCR53C90A)\n");
842 break;
843 case esp236:
844 copy_info(&info, "ESP236 (NCR53C9x)\n");
845 break;
846 case fas216:
847 copy_info(&info, "Emulex FAS216\n");
848 break;
849 case fas236:
850 copy_info(&info, "Emulex FAS236\n");
851 break;
852 case fas100a:
853 copy_info(&info, "FPESP100A\n");
854 break;
855 case fast:
856 copy_info(&info, "Generic FAST\n");
857 break;
858 case fas366:
859 copy_info(&info, "QLogic FAS366\n");
860 break;
861 case fsc:
862 copy_info(&info, "Symbios Logic 53C9x-2\n");
863 break;
864 case espunknown:
865 default:
866 copy_info(&info, "Unknown!\n");
867 break;
868 };
869 copy_info(&info, "\tLive Targets\t\t[ ");
870 for(i = 0; i < 15; i++) {
871 if(esp->targets_present & (1 << i))
872 copy_info(&info, "%d ", i);
873 }
874 copy_info(&info, "]\n\n");
875
876 /* Now describe the state of each existing target. */
877 copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\n");
878
879 shost_for_each_device(sdev, esp->ehost) {
880 struct esp_device *esp_dev = sdev->hostdata;
881 uint id = sdev->id;
882
883 if (!(esp->targets_present & (1 << id)))
884 continue;
885
886 copy_info(&info, "%d\t\t", id);
887 copy_info(&info, "%08lx\t", esp->config3[id]);
888 copy_info(&info, "[%02lx,%02lx]\t\t\t",
889 esp_dev->sync_max_offset,
890 esp_dev->sync_min_period);
891 copy_info(&info, "%s\n", esp_dev->disconnect ? "yes" : "no");
892 }
893
894 return info.pos > info.offset? info.pos - info.offset : 0;
895}
896
897/* ESP proc filesystem code. */
898int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length,
899 int inout)
900{
901 struct NCR_ESP *esp = (struct NCR_ESP *)shost->hostdata;
902
903 if(inout)
904 return -EINVAL; /* not yet */
905 if(start)
906 *start = buffer;
907 return esp_host_info(esp, buffer, offset, length);
908}
909EXPORT_SYMBOL(esp_proc_info);
910
911static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
912{
913 if(sp->use_sg == 0) {
914 sp->SCp.this_residual = sp->request_bufflen;
915 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
916 sp->SCp.buffers_residual = 0;
917 if (esp->dma_mmu_get_scsi_one)
918 esp->dma_mmu_get_scsi_one(esp, sp);
919 else
920 sp->SCp.ptr =
921 (char *) virt_to_phys(sp->request_buffer);
922 } else {
923 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
924 sp->SCp.buffers_residual = sp->use_sg - 1;
925 sp->SCp.this_residual = sp->SCp.buffer->length;
926 if (esp->dma_mmu_get_scsi_sgl)
927 esp->dma_mmu_get_scsi_sgl(esp, sp);
928 else
929 sp->SCp.ptr =
930 (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
931 }
932}
933
934static void esp_release_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
935{
936 if(sp->use_sg == 0) {
937 if (esp->dma_mmu_release_scsi_one)
938 esp->dma_mmu_release_scsi_one(esp, sp);
939 } else {
940 if (esp->dma_mmu_release_scsi_sgl)
941 esp->dma_mmu_release_scsi_sgl(esp, sp);
942 }
943}
944
945static void esp_restore_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
946{
947 struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
948
949 sp->SCp.ptr = ep->saved_ptr;
950 sp->SCp.buffer = ep->saved_buffer;
951 sp->SCp.this_residual = ep->saved_this_residual;
952 sp->SCp.buffers_residual = ep->saved_buffers_residual;
953}
954
955static void esp_save_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
956{
957 struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
958
959 ep->saved_ptr = sp->SCp.ptr;
960 ep->saved_buffer = sp->SCp.buffer;
961 ep->saved_this_residual = sp->SCp.this_residual;
962 ep->saved_buffers_residual = sp->SCp.buffers_residual;
963}
964
965/* Some rules:
966 *
967 * 1) Never ever panic while something is live on the bus.
968 * If there is to be any chance of syncing the disks this
969 * rule is to be obeyed.
970 *
971 * 2) Any target that causes a foul condition will no longer
972 * have synchronous transfers done to it, no questions
973 * asked.
974 *
975 * 3) Keep register accesses to a minimum. Think about some
976 * day when we have Xbus machines this is running on and
977 * the ESP chip is on the other end of the machine on a
978 * different board from the cpu where this is running.
979 */
980
981/* Fire off a command. We assume the bus is free and that the only
982 * case where we could see an interrupt is where we have disconnected
983 * commands active and they are trying to reselect us.
984 */
985static inline void esp_check_cmd(struct NCR_ESP *esp, Scsi_Cmnd *sp)
986{
987 switch(sp->cmd_len) {
988 case 6:
989 case 10:
990 case 12:
991 esp->esp_slowcmd = 0;
992 break;
993
994 default:
995 esp->esp_slowcmd = 1;
996 esp->esp_scmdleft = sp->cmd_len;
997 esp->esp_scmdp = &sp->cmnd[0];
998 break;
999 };
1000}
1001
1002static inline void build_sync_nego_msg(struct NCR_ESP *esp, int period, int offset)
1003{
1004 esp->cur_msgout[0] = EXTENDED_MESSAGE;
1005 esp->cur_msgout[1] = 3;
1006 esp->cur_msgout[2] = EXTENDED_SDTR;
1007 esp->cur_msgout[3] = period;
1008 esp->cur_msgout[4] = offset;
1009 esp->msgout_len = 5;
1010}
1011
1012static void esp_exec_cmd(struct NCR_ESP *esp)
1013{
1014 struct ESP_regs *eregs = esp->eregs;
1015 struct esp_device *esp_dev;
1016 Scsi_Cmnd *SCptr;
1017 struct scsi_device *SDptr;
1018 volatile unchar *cmdp = esp->esp_command;
1019 unsigned char the_esp_command;
1020 int lun, target;
1021 int i;
1022
1023 /* Hold off if we have disconnected commands and
1024 * an IRQ is showing...
1025 */
1026 if(esp->disconnected_SC && esp->dma_irq_p(esp))
1027 return;
1028
1029 /* Grab first member of the issue queue. */
1030 SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC);
1031
1032 /* Safe to panic here because current_SC is null. */
1033 if(!SCptr)
1034 panic("esp: esp_exec_cmd and issue queue is NULL");
1035
1036 SDptr = SCptr->device;
1037 esp_dev = SDptr->hostdata;
1038 lun = SCptr->device->lun;
1039 target = SCptr->device->id;
1040
1041 esp->snip = 0;
1042 esp->msgout_len = 0;
1043
1044 /* Send it out whole, or piece by piece? The ESP
1045 * only knows how to automatically send out 6, 10,
1046 * and 12 byte commands. I used to think that the
1047 * Linux SCSI code would never throw anything other
1048 * than that to us, but then again there is the
1049 * SCSI generic driver which can send us anything.
1050 */
1051 esp_check_cmd(esp, SCptr);
1052
1053 /* If arbitration/selection is successful, the ESP will leave
1054 * ATN asserted, causing the target to go into message out
1055 * phase. The ESP will feed the target the identify and then
1056 * the target can only legally go to one of command,
1057 * datain/out, status, or message in phase, or stay in message
1058 * out phase (should we be trying to send a sync negotiation
1059 * message after the identify). It is not allowed to drop
1060 * BSY, but some buggy targets do and we check for this
1061 * condition in the selection complete code. Most of the time
1062 * we'll make the command bytes available to the ESP and it
1063 * will not interrupt us until it finishes command phase, we
1064 * cannot do this for command sizes the ESP does not
1065 * understand and in this case we'll get interrupted right
1066 * when the target goes into command phase.
1067 *
1068 * It is absolutely _illegal_ in the presence of SCSI-2 devices
1069 * to use the ESP select w/o ATN command. When SCSI-2 devices are
1070 * present on the bus we _must_ always go straight to message out
1071 * phase with an identify message for the target. Being that
1072 * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2
1073 * selections should not confuse SCSI-1 we hope.
1074 */
1075
1076 if(esp_dev->sync) {
1077 /* this targets sync is known */
1078#ifdef CONFIG_SCSI_MAC_ESP
1079do_sync_known:
1080#endif
1081 if(esp_dev->disconnect)
1082 *cmdp++ = IDENTIFY(1, lun);
1083 else
1084 *cmdp++ = IDENTIFY(0, lun);
1085
1086 if(esp->esp_slowcmd) {
1087 the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
1088 esp_advance_phase(SCptr, in_slct_stop);
1089 } else {
1090 the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
1091 esp_advance_phase(SCptr, in_slct_norm);
1092 }
1093 } else if(!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) {
1094 /* After the bootup SCSI code sends both the
1095 * TEST_UNIT_READY and INQUIRY commands we want
1096 * to at least attempt allowing the device to
1097 * disconnect.
1098 */
1099 ESPMISC(("esp: Selecting device for first time. target=%d "
1100 "lun=%d\n", target, SCptr->device->lun));
1101 if(!SDptr->borken && !esp_dev->disconnect)
1102 esp_dev->disconnect = 1;
1103
1104 *cmdp++ = IDENTIFY(0, lun);
1105 esp->prevmsgout = NOP;
1106 esp_advance_phase(SCptr, in_slct_norm);
1107 the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
1108
1109 /* Take no chances... */
1110 esp_dev->sync_max_offset = 0;
1111 esp_dev->sync_min_period = 0;
1112 } else {
1113 int toshiba_cdrom_hwbug_wkaround = 0;
1114
1115#ifdef CONFIG_SCSI_MAC_ESP
1116 /* Never allow synchronous transfers (disconnect OK) on
1117 * Macintosh. Well, maybe later when we figured out how to
1118 * do DMA on the machines that support it ...
1119 */
1120 esp_dev->disconnect = 1;
1121 esp_dev->sync_max_offset = 0;
1122 esp_dev->sync_min_period = 0;
1123 esp_dev->sync = 1;
1124 esp->snip = 0;
1125 goto do_sync_known;
1126#endif
1127 /* We've talked to this guy before,
1128 * but never negotiated. Let's try
1129 * sync negotiation.
1130 */
1131 if(!SDptr->borken) {
1132 if((SDptr->type == TYPE_ROM) &&
1133 (!strncmp(SDptr->vendor, "TOSHIBA", 7))) {
1134 /* Nice try sucker... */
1135 ESPMISC(("esp%d: Disabling sync for buggy "
1136 "Toshiba CDROM.\n", esp->esp_id));
1137 toshiba_cdrom_hwbug_wkaround = 1;
1138 build_sync_nego_msg(esp, 0, 0);
1139 } else {
1140 build_sync_nego_msg(esp, esp->sync_defp, 15);
1141 }
1142 } else {
1143 build_sync_nego_msg(esp, 0, 0);
1144 }
1145 esp_dev->sync = 1;
1146 esp->snip = 1;
1147
1148 /* A fix for broken SCSI1 targets, when they disconnect
1149 * they lock up the bus and confuse ESP. So disallow
1150 * disconnects for SCSI1 targets for now until we
1151 * find a better fix.
1152 *
1153 * Addendum: This is funny, I figured out what was going
1154 * on. The blotzed SCSI1 target would disconnect,
1155 * one of the other SCSI2 targets or both would be
1156 * disconnected as well. The SCSI1 target would
1157 * stay disconnected long enough that we start
1158 * up a command on one of the SCSI2 targets. As
1159 * the ESP is arbitrating for the bus the SCSI1
1160 * target begins to arbitrate as well to reselect
1161 * the ESP. The SCSI1 target refuses to drop it's
1162 * ID bit on the data bus even though the ESP is
1163 * at ID 7 and is the obvious winner for any
1164 * arbitration. The ESP is a poor sport and refuses
1165 * to lose arbitration, it will continue indefinitely
1166 * trying to arbitrate for the bus and can only be
1167 * stopped via a chip reset or SCSI bus reset.
1168 * Therefore _no_ disconnects for SCSI1 targets
1169 * thank you very much. ;-)
1170 */
1171 if(((SDptr->scsi_level < 3) && (SDptr->type != TYPE_TAPE)) ||
1172 toshiba_cdrom_hwbug_wkaround || SDptr->borken) {
1173 ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d "
1174 "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
1175 esp_dev->disconnect = 0;
1176 *cmdp++ = IDENTIFY(0, lun);
1177 } else {
1178 *cmdp++ = IDENTIFY(1, lun);
1179 }
1180
1181 /* ESP fifo is only so big...
1182 * Make this look like a slow command.
1183 */
1184 esp->esp_slowcmd = 1;
1185 esp->esp_scmdleft = SCptr->cmd_len;
1186 esp->esp_scmdp = &SCptr->cmnd[0];
1187
1188 the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
1189 esp_advance_phase(SCptr, in_slct_msg);
1190 }
1191
1192 if(!esp->esp_slowcmd)
1193 for(i = 0; i < SCptr->cmd_len; i++)
1194 *cmdp++ = SCptr->cmnd[i];
1195
1196 esp_write(eregs->esp_busid, (target & 7));
1197 if (esp->prev_soff != esp_dev->sync_max_offset ||
1198 esp->prev_stp != esp_dev->sync_min_period ||
1199 (esp->erev > esp100a &&
1200 esp->prev_cfg3 != esp->config3[target])) {
1201 esp->prev_soff = esp_dev->sync_max_offset;
1202 esp_write(eregs->esp_soff, esp->prev_soff);
1203 esp->prev_stp = esp_dev->sync_min_period;
1204 esp_write(eregs->esp_stp, esp->prev_stp);
1205 if(esp->erev > esp100a) {
1206 esp->prev_cfg3 = esp->config3[target];
1207 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
1208 }
1209 }
1210 i = (cmdp - esp->esp_command);
1211
1212 /* Set up the DMA and ESP counters */
1213 if(esp->do_pio_cmds){
1214 int j = 0;
1215
1216 /*
1217 * XXX MSch:
1218 *
1219 * It seems this is required, at least to clean up
1220 * after failed commands when using PIO mode ...
1221 */
1222 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
1223
1224 for(;j<i;j++)
1225 esp_write(eregs->esp_fdata, esp->esp_command[j]);
1226 the_esp_command &= ~ESP_CMD_DMA;
1227
1228 /* Tell ESP to "go". */
1229 esp_cmd(esp, eregs, the_esp_command);
1230 } else {
1231 /* Set up the ESP counters */
1232 esp_write(eregs->esp_tclow, i);
1233 esp_write(eregs->esp_tcmed, 0);
1234 esp->dma_init_write(esp, esp->esp_command_dvma, i);
1235
1236 /* Tell ESP to "go". */
1237 esp_cmd(esp, eregs, the_esp_command);
1238 }
1239}
1240
1241/* Queue a SCSI command delivered from the mid-level Linux SCSI code. */
1242int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
1243{
1244 struct NCR_ESP *esp;
1245
1246 /* Set up func ptr and initial driver cmd-phase. */
1247 SCpnt->scsi_done = done;
1248 SCpnt->SCp.phase = not_issued;
1249
1250 esp = (struct NCR_ESP *) SCpnt->device->host->hostdata;
1251
1252 if(esp->dma_led_on)
1253 esp->dma_led_on(esp);
1254
1255 /* We use the scratch area. */
1256 ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->lun));
1257 ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->lun));
1258
1259 esp_get_dmabufs(esp, SCpnt);
1260 esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */
1261
1262 SCpnt->SCp.Status = CHECK_CONDITION;
1263 SCpnt->SCp.Message = 0xff;
1264 SCpnt->SCp.sent_command = 0;
1265
1266 /* Place into our queue. */
1267 if(SCpnt->cmnd[0] == REQUEST_SENSE) {
1268 ESPQUEUE(("RQSENSE\n"));
1269 prepend_SC(&esp->issue_SC, SCpnt);
1270 } else {
1271 ESPQUEUE(("\n"));
1272 append_SC(&esp->issue_SC, SCpnt);
1273 }
1274
1275 /* Run it now if we can. */
1276 if(!esp->current_SC && !esp->resetting_bus)
1277 esp_exec_cmd(esp);
1278
1279 return 0;
1280}
1281
1282/* Dump driver state. */
1283static void esp_dump_cmd(Scsi_Cmnd *SCptr)
1284{
1285 ESPLOG(("[tgt<%02x> lun<%02x> "
1286 "pphase<%s> cphase<%s>]",
1287 SCptr->device->id, SCptr->device->lun,
1288 phase_string(SCptr->SCp.sent_command),
1289 phase_string(SCptr->SCp.phase)));
1290}
1291
1292static void esp_dump_state(struct NCR_ESP *esp,
1293 struct ESP_regs *eregs)
1294{
1295 Scsi_Cmnd *SCptr = esp->current_SC;
1296#ifdef DEBUG_ESP_CMDS
1297 int i;
1298#endif
1299
1300 ESPLOG(("esp%d: dumping state\n", esp->esp_id));
1301
1302 /* Print DMA status */
1303 esp->dma_dump_state(esp);
1304
1305 ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
1306 esp->esp_id, esp->sreg, esp->seqreg, esp->ireg));
1307 ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
1308 esp->esp_id, esp_read(eregs->esp_status), esp_read(eregs->esp_sstep),
1309 esp_read(eregs->esp_intrpt)));
1310#ifdef DEBUG_ESP_CMDS
1311 printk("esp%d: last ESP cmds [", esp->esp_id);
1312 i = (esp->espcmdent - 1) & 31;
1313 printk("<");
1314 esp_print_cmd(esp->espcmdlog[i]);
1315 printk(">");
1316 i = (i - 1) & 31;
1317 printk("<");
1318 esp_print_cmd(esp->espcmdlog[i]);
1319 printk(">");
1320 i = (i - 1) & 31;
1321 printk("<");
1322 esp_print_cmd(esp->espcmdlog[i]);
1323 printk(">");
1324 i = (i - 1) & 31;
1325 printk("<");
1326 esp_print_cmd(esp->espcmdlog[i]);
1327 printk(">");
1328 printk("]\n");
1329#endif /* (DEBUG_ESP_CMDS) */
1330
1331 if(SCptr) {
1332 ESPLOG(("esp%d: current command ", esp->esp_id));
1333 esp_dump_cmd(SCptr);
1334 }
1335 ESPLOG(("\n"));
1336 SCptr = esp->disconnected_SC;
1337 ESPLOG(("esp%d: disconnected ", esp->esp_id));
1338 while(SCptr) {
1339 esp_dump_cmd(SCptr);
1340 SCptr = (Scsi_Cmnd *) SCptr->host_scribble;
1341 }
1342 ESPLOG(("\n"));
1343}
1344
1345/* Abort a command. The host_lock is acquired by caller. */
1346int esp_abort(Scsi_Cmnd *SCptr)
1347{
1348 struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata;
1349 struct ESP_regs *eregs = esp->eregs;
1350 int don;
1351
1352 ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
1353 esp_dump_state(esp, eregs);
1354
1355 /* Wheee, if this is the current command on the bus, the
1356 * best we can do is assert ATN and wait for msgout phase.
1357 * This should even fix a hung SCSI bus when we lose state
1358 * in the driver and timeout because the eventual phase change
1359 * will cause the ESP to (eventually) give an interrupt.
1360 */
1361 if(esp->current_SC == SCptr) {
1362 esp->cur_msgout[0] = ABORT;
1363 esp->msgout_len = 1;
1364 esp->msgout_ctr = 0;
1365 esp_cmd(esp, eregs, ESP_CMD_SATN);
1366 return SUCCESS;
1367 }
1368
1369 /* If it is still in the issue queue then we can safely
1370 * call the completion routine and report abort success.
1371 */
1372 don = esp->dma_ports_p(esp);
1373 if(don) {
1374 esp->dma_ints_off(esp);
1375 synchronize_irq(esp->irq);
1376 }
1377 if(esp->issue_SC) {
1378 Scsi_Cmnd **prev, *this;
1379 for(prev = (&esp->issue_SC), this = esp->issue_SC;
1380 this;
1381 prev = (Scsi_Cmnd **) &(this->host_scribble),
1382 this = (Scsi_Cmnd *) this->host_scribble) {
1383 if(this == SCptr) {
1384 *prev = (Scsi_Cmnd *) this->host_scribble;
1385 this->host_scribble = NULL;
1386 esp_release_dmabufs(esp, this);
1387 this->result = DID_ABORT << 16;
1388 this->scsi_done(this);
1389 if(don)
1390 esp->dma_ints_on(esp);
1391 return SUCCESS;
1392 }
1393 }
1394 }
1395
1396 /* Yuck, the command to abort is disconnected, it is not
1397 * worth trying to abort it now if something else is live
1398 * on the bus at this time. So, we let the SCSI code wait
1399 * a little bit and try again later.
1400 */
1401 if(esp->current_SC) {
1402 if(don)
1403 esp->dma_ints_on(esp);
1404 return FAILED;
1405 }
1406
1407 /* It's disconnected, we have to reconnect to re-establish
1408 * the nexus and tell the device to abort. However, we really
1409 * cannot 'reconnect' per se. Don't try to be fancy, just
1410 * indicate failure, which causes our caller to reset the whole
1411 * bus.
1412 */
1413
1414 if(don)
1415 esp->dma_ints_on(esp);
1416 return FAILED;
1417}
1418
1419/* We've sent ESP_CMD_RS to the ESP, the interrupt had just
1420 * arrived indicating the end of the SCSI bus reset. Our job
1421 * is to clean out the command queues and begin re-execution
1422 * of SCSI commands once more.
1423 */
1424static int esp_finish_reset(struct NCR_ESP *esp,
1425 struct ESP_regs *eregs)
1426{
1427 Scsi_Cmnd *sp = esp->current_SC;
1428
1429 /* Clean up currently executing command, if any. */
1430 if (sp != NULL) {
1431 esp_release_dmabufs(esp, sp);
1432 sp->result = (DID_RESET << 16);
1433 sp->scsi_done(sp);
1434 esp->current_SC = NULL;
1435 }
1436
1437 /* Clean up disconnected queue, they have been invalidated
1438 * by the bus reset.
1439 */
1440 if (esp->disconnected_SC) {
1441 while((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
1442 esp_release_dmabufs(esp, sp);
1443 sp->result = (DID_RESET << 16);
1444 sp->scsi_done(sp);
1445 }
1446 }
1447
1448 /* SCSI bus reset is complete. */
1449 esp->resetting_bus = 0;
1450 wake_up(&esp->reset_queue);
1451
1452 /* Ok, now it is safe to get commands going once more. */
1453 if(esp->issue_SC)
1454 esp_exec_cmd(esp);
1455
1456 return do_intr_end;
1457}
1458
1459static int esp_do_resetbus(struct NCR_ESP *esp,
1460 struct ESP_regs *eregs)
1461{
1462 ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id));
1463 esp->resetting_bus = 1;
1464 esp_cmd(esp, eregs, ESP_CMD_RS);
1465
1466 return do_intr_end;
1467}
1468
1469/* Reset ESP chip, reset hanging bus, then kill active and
1470 * disconnected commands for targets without soft reset.
1471 *
1472 * The host_lock is acquired by caller.
1473 */
1474int esp_reset(Scsi_Cmnd *SCptr)
1475{
1476 struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata;
1477
1478 spin_lock_irq(esp->ehost->host_lock);
1479 (void) esp_do_resetbus(esp, esp->eregs);
1480 spin_unlock_irq(esp->ehost->host_lock);
1481
1482 wait_event(esp->reset_queue, (esp->resetting_bus == 0));
1483
1484 return SUCCESS;
1485}
1486
1487/* Internal ESP done function. */
1488static void esp_done(struct NCR_ESP *esp, int error)
1489{
1490 Scsi_Cmnd *done_SC;
1491
1492 if(esp->current_SC) {
1493 done_SC = esp->current_SC;
1494 esp->current_SC = NULL;
1495 esp_release_dmabufs(esp, done_SC);
1496 done_SC->result = error;
1497 done_SC->scsi_done(done_SC);
1498
1499 /* Bus is free, issue any commands in the queue. */
1500 if(esp->issue_SC && !esp->current_SC)
1501 esp_exec_cmd(esp);
1502 } else {
1503 /* Panic is safe as current_SC is null so we may still
1504 * be able to accept more commands to sync disk buffers.
1505 */
1506 ESPLOG(("panicing\n"));
1507 panic("esp: done() called with NULL esp->current_SC");
1508 }
1509}
1510
1511/* Wheee, ESP interrupt engine. */
1512
1513/* Forward declarations. */
1514static int esp_do_phase_determine(struct NCR_ESP *esp,
1515 struct ESP_regs *eregs);
1516static int esp_do_data_finale(struct NCR_ESP *esp, struct ESP_regs *eregs);
1517static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs);
1518static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs);
1519static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs);
1520static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs);
1521static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs);
1522static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs);
1523
1524#define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP)
1525#define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP)
1526
1527/* We try to avoid some interrupts by jumping ahead and see if the ESP
1528 * has gotten far enough yet. Hence the following.
1529 */
1530static inline int skipahead1(struct NCR_ESP *esp, struct ESP_regs *eregs,
1531 Scsi_Cmnd *scp, int prev_phase, int new_phase)
1532{
1533 if(scp->SCp.sent_command != prev_phase)
1534 return 0;
1535
1536 if(esp->dma_irq_p(esp)) {
1537 /* Yes, we are able to save an interrupt. */
1538 esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR));
1539 esp->ireg = esp_read(eregs->esp_intrpt);
1540 if(!(esp->ireg & ESP_INTR_SR))
1541 return 0;
1542 else
1543 return do_reset_complete;
1544 }
1545 /* Ho hum, target is taking forever... */
1546 scp->SCp.sent_command = new_phase; /* so we don't recurse... */
1547 return do_intr_end;
1548}
1549
1550static inline int skipahead2(struct NCR_ESP *esp,
1551 struct ESP_regs *eregs,
1552 Scsi_Cmnd *scp, int prev_phase1, int prev_phase2,
1553 int new_phase)
1554{
1555 if(scp->SCp.sent_command != prev_phase1 &&
1556 scp->SCp.sent_command != prev_phase2)
1557 return 0;
1558 if(esp->dma_irq_p(esp)) {
1559 /* Yes, we are able to save an interrupt. */
1560 esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR));
1561 esp->ireg = esp_read(eregs->esp_intrpt);
1562 if(!(esp->ireg & ESP_INTR_SR))
1563 return 0;
1564 else
1565 return do_reset_complete;
1566 }
1567 /* Ho hum, target is taking forever... */
1568 scp->SCp.sent_command = new_phase; /* so we don't recurse... */
1569 return do_intr_end;
1570}
1571
1572/* Misc. esp helper macros. */
1573#define esp_setcount(__eregs, __cnt) \
1574 esp_write((__eregs)->esp_tclow, ((__cnt) & 0xff)); \
1575 esp_write((__eregs)->esp_tcmed, (((__cnt) >> 8) & 0xff))
1576
1577#define esp_getcount(__eregs) \
1578 ((esp_read((__eregs)->esp_tclow)&0xff) | \
1579 ((esp_read((__eregs)->esp_tcmed)&0xff) << 8))
1580
1581#define fcount(__esp, __eregs) \
1582 (esp_read((__eregs)->esp_fflags) & ESP_FF_FBYTES)
1583
1584#define fnzero(__esp, __eregs) \
1585 (esp_read((__eregs)->esp_fflags) & ESP_FF_ONOTZERO)
1586
1587/* XXX speculative nops unnecessary when continuing amidst a data phase
1588 * XXX even on esp100!!! another case of flooding the bus with I/O reg
1589 * XXX writes...
1590 */
1591#define esp_maybe_nop(__esp, __eregs) \
1592 if((__esp)->erev == esp100) \
1593 esp_cmd((__esp), (__eregs), ESP_CMD_NULL)
1594
1595#define sreg_to_dataphase(__sreg) \
1596 ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain)
1597
1598/* The ESP100 when in synchronous data phase, can mistake a long final
1599 * REQ pulse from the target as an extra byte, it places whatever is on
1600 * the data lines into the fifo. For now, we will assume when this
1601 * happens that the target is a bit quirky and we don't want to
1602 * be talking synchronously to it anyways. Regardless, we need to
1603 * tell the ESP to eat the extraneous byte so that we can proceed
1604 * to the next phase.
1605 */
1606static inline int esp100_sync_hwbug(struct NCR_ESP *esp, struct ESP_regs *eregs,
1607 Scsi_Cmnd *sp, int fifocnt)
1608{
1609 /* Do not touch this piece of code. */
1610 if((!(esp->erev == esp100)) ||
1611 (!(sreg_datainp((esp->sreg = esp_read(eregs->esp_status))) && !fifocnt) &&
1612 !(sreg_dataoutp(esp->sreg) && !fnzero(esp, eregs)))) {
1613 if(sp->SCp.phase == in_dataout)
1614 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
1615 return 0;
1616 } else {
1617 /* Async mode for this guy. */
1618 build_sync_nego_msg(esp, 0, 0);
1619
1620 /* Ack the bogus byte, but set ATN first. */
1621 esp_cmd(esp, eregs, ESP_CMD_SATN);
1622 esp_cmd(esp, eregs, ESP_CMD_MOK);
1623 return 1;
1624 }
1625}
1626
1627/* This closes the window during a selection with a reselect pending, because
1628 * we use DMA for the selection process the FIFO should hold the correct
1629 * contents if we get reselected during this process. So we just need to
1630 * ack the possible illegal cmd interrupt pending on the esp100.
1631 */
1632static inline int esp100_reconnect_hwbug(struct NCR_ESP *esp,
1633 struct ESP_regs *eregs)
1634{
1635 volatile unchar junk;
1636
1637 if(esp->erev != esp100)
1638 return 0;
1639 junk = esp_read(eregs->esp_intrpt);
1640
1641 if(junk & ESP_INTR_SR)
1642 return 1;
1643 return 0;
1644}
1645
1646/* This verifies the BUSID bits during a reselection so that we know which
1647 * target is talking to us.
1648 */
1649static inline int reconnect_target(struct NCR_ESP *esp, struct ESP_regs *eregs)
1650{
1651 int it, me = esp->scsi_id_mask, targ = 0;
1652
1653 if(2 != fcount(esp, eregs))
1654 return -1;
1655 it = esp_read(eregs->esp_fdata);
1656 if(!(it & me))
1657 return -1;
1658 it &= ~me;
1659 if(it & (it - 1))
1660 return -1;
1661 while(!(it & 1))
1662 targ++, it >>= 1;
1663 return targ;
1664}
1665
1666/* This verifies the identify from the target so that we know which lun is
1667 * being reconnected.
1668 */
1669static inline int reconnect_lun(struct NCR_ESP *esp, struct ESP_regs *eregs)
1670{
1671 int lun;
1672
1673 if((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)
1674 return -1;
1675 lun = esp_read(eregs->esp_fdata);
1676
1677 /* Yes, you read this correctly. We report lun of zero
1678 * if we see parity error. ESP reports parity error for
1679 * the lun byte, and this is the only way to hope to recover
1680 * because the target is connected.
1681 */
1682 if(esp->sreg & ESP_STAT_PERR)
1683 return 0;
1684
1685 /* Check for illegal bits being set in the lun. */
1686 if((lun & 0x40) || !(lun & 0x80))
1687 return -1;
1688
1689 return lun & 7;
1690}
1691
1692/* This puts the driver in a state where it can revitalize a command that
1693 * is being continued due to reselection.
1694 */
1695static inline void esp_connect(struct NCR_ESP *esp, struct ESP_regs *eregs,
1696 Scsi_Cmnd *sp)
1697{
1698 struct scsi_device *dp = sp->device;
1699 struct esp_device *esp_dev = dp->hostdata;
1700
1701 if(esp->prev_soff != esp_dev->sync_max_offset ||
1702 esp->prev_stp != esp_dev->sync_min_period ||
1703 (esp->erev > esp100a &&
1704 esp->prev_cfg3 != esp->config3[scmd_id(sp)])) {
1705 esp->prev_soff = esp_dev->sync_max_offset;
1706 esp_write(eregs->esp_soff, esp->prev_soff);
1707 esp->prev_stp = esp_dev->sync_min_period;
1708 esp_write(eregs->esp_stp, esp->prev_stp);
1709 if(esp->erev > esp100a) {
1710 esp->prev_cfg3 = esp->config3[scmd_id(sp)];
1711 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
1712 }
1713 }
1714 esp->current_SC = sp;
1715}
1716
1717/* This will place the current working command back into the issue queue
1718 * if we are to receive a reselection amidst a selection attempt.
1719 */
1720static inline void esp_reconnect(struct NCR_ESP *esp, Scsi_Cmnd *sp)
1721{
1722 if(!esp->disconnected_SC)
1723 ESPLOG(("esp%d: Weird, being reselected but disconnected "
1724 "command queue is empty.\n", esp->esp_id));
1725 esp->snip = 0;
1726 esp->current_SC = NULL;
1727 sp->SCp.phase = not_issued;
1728 append_SC(&esp->issue_SC, sp);
1729}
1730
1731/* Begin message in phase. */
1732static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs)
1733{
1734 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
1735 esp_maybe_nop(esp, eregs);
1736 esp_cmd(esp, eregs, ESP_CMD_TI);
1737 esp->msgin_len = 1;
1738 esp->msgin_ctr = 0;
1739 esp_advance_phase(esp->current_SC, in_msgindone);
1740 return do_work_bus;
1741}
1742
1743static inline void advance_sg(struct NCR_ESP *esp, Scsi_Cmnd *sp)
1744{
1745 ++sp->SCp.buffer;
1746 --sp->SCp.buffers_residual;
1747 sp->SCp.this_residual = sp->SCp.buffer->length;
1748 if (esp->dma_advance_sg)
1749 esp->dma_advance_sg (sp);
1750 else
1751 sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
1752
1753}
1754
1755/* Please note that the way I've coded these routines is that I _always_
1756 * check for a disconnect during any and all information transfer
1757 * phases. The SCSI standard states that the target _can_ cause a BUS
1758 * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note
1759 * that during information transfer phases the target controls every
1760 * change in phase, the only thing the initiator can do is "ask" for
1761 * a message out phase by driving ATN true. The target can, and sometimes
1762 * will, completely ignore this request so we cannot assume anything when
1763 * we try to force a message out phase to abort/reset a target. Most of
1764 * the time the target will eventually be nice and go to message out, so
1765 * we may have to hold on to our state about what we want to tell the target
1766 * for some period of time.
1767 */
1768
1769/* I think I have things working here correctly. Even partial transfers
1770 * within a buffer or sub-buffer should not upset us at all no matter
1771 * how bad the target and/or ESP fucks things up.
1772 */
1773static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
1774{
1775 Scsi_Cmnd *SCptr = esp->current_SC;
1776 int thisphase, hmuch;
1777
1778 ESPDATA(("esp_do_data: "));
1779 esp_maybe_nop(esp, eregs);
1780 thisphase = sreg_to_dataphase(esp->sreg);
1781 esp_advance_phase(SCptr, thisphase);
1782 ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT"));
1783 hmuch = esp->dma_can_transfer(esp, SCptr);
1784
1785 /*
1786 * XXX MSch: cater for PIO transfer here; PIO used if hmuch == 0
1787 */
1788 if (hmuch) { /* DMA */
1789 /*
1790 * DMA
1791 */
1792 ESPDATA(("hmuch<%d> ", hmuch));
1793 esp->current_transfer_size = hmuch;
1794 esp_setcount(eregs, (esp->fas_premature_intr_workaround ?
1795 (hmuch + 0x40) : hmuch));
1796 esp->dma_setup(esp, (__u32)((unsigned long)SCptr->SCp.ptr),
1797 hmuch, (thisphase == in_datain));
1798 ESPDATA(("DMA|TI --> do_intr_end\n"));
1799 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
1800 return do_intr_end;
1801 /*
1802 * end DMA
1803 */
1804 } else {
1805 /*
1806 * PIO
1807 */
1808 int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */
1809 int fifocnt = 0;
1810 unsigned char *p = phys_to_virt((unsigned long)SCptr->SCp.ptr);
1811
1812 oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK;
1813
1814 /*
1815 * polled transfer; ugly, can we make this happen in a DRQ
1816 * interrupt handler ??
1817 * requires keeping track of state information in host or
1818 * command struct!
1819 * Problem: I've never seen a DRQ happen on Mac, not even
1820 * with ESP_CMD_DMA ...
1821 */
1822
1823 /* figure out how much needs to be transferred */
1824 hmuch = SCptr->SCp.this_residual;
1825 ESPDATA(("hmuch<%d> pio ", hmuch));
1826 esp->current_transfer_size = hmuch;
1827
1828 /* tell the ESP ... */
1829 esp_setcount(eregs, hmuch);
1830
1831 /* loop */
1832 while (hmuch) {
1833 int j, fifo_stuck = 0, newphase;
1834 unsigned long timeout;
1835#if 0
1836 unsigned long flags;
1837#endif
1838#if 0
1839 if ( i % 10 )
1840 ESPDATA(("\r"));
1841 else
1842 ESPDATA(( /*"\n"*/ "\r"));
1843#endif
1844#if 0
1845 local_irq_save(flags);
1846#endif
1847 if(thisphase == in_datain) {
1848 /* 'go' ... */
1849 esp_cmd(esp, eregs, ESP_CMD_TI);
1850
1851 /* wait for data */
1852 timeout = 1000000;
1853 while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout)
1854 udelay(2);
1855 if (timeout == 0)
1856 printk("DRQ datain timeout! \n");
1857
1858 newphase = esp->sreg & ESP_STAT_PMASK;
1859
1860 /* see how much we got ... */
1861 fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
1862
1863 if (!fifocnt)
1864 fifo_stuck++;
1865 else
1866 fifo_stuck = 0;
1867
1868 ESPDATA(("\rgot %d st %x ph %x", fifocnt, esp->sreg, newphase));
1869
1870 /* read fifo */
1871 for(j=0;j<fifocnt;j++)
1872 p[i++] = esp_read(eregs->esp_fdata);
1873
1874 ESPDATA(("(%d) ", i));
1875
1876 /* how many to go ?? */
1877 hmuch -= fifocnt;
1878
1879 /* break if status phase !! */
1880 if(newphase == ESP_STATP) {
1881 /* clear int. */
1882 esp->ireg = esp_read(eregs->esp_intrpt);
1883 break;
1884 }
1885 } else {
1886#define MAX_FIFO 8
1887 /* how much will fit ? */
1888 int this_count = MAX_FIFO - fifocnt;
1889 if (this_count > hmuch)
1890 this_count = hmuch;
1891
1892 /* fill fifo */
1893 for(j=0;j<this_count;j++)
1894 esp_write(eregs->esp_fdata, p[i++]);
1895
1896 /* how many left if this goes out ?? */
1897 hmuch -= this_count;
1898
1899 /* 'go' ... */
1900 esp_cmd(esp, eregs, ESP_CMD_TI);
1901
1902 /* wait for 'got it' */
1903 timeout = 1000000;
1904 while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout)
1905 udelay(2);
1906 if (timeout == 0)
1907 printk("DRQ dataout timeout! \n");
1908
1909 newphase = esp->sreg & ESP_STAT_PMASK;
1910
1911 /* need to check how much was sent ?? */
1912 fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
1913
1914 ESPDATA(("\rsent %d st %x ph %x", this_count - fifocnt, esp->sreg, newphase));
1915
1916 ESPDATA(("(%d) ", i));
1917
1918 /* break if status phase !! */
1919 if(newphase == ESP_STATP) {
1920 /* clear int. */
1921 esp->ireg = esp_read(eregs->esp_intrpt);
1922 break;
1923 }
1924
1925 }
1926
1927 /* clear int. */
1928 esp->ireg = esp_read(eregs->esp_intrpt);
1929
1930 ESPDATA(("ir %x ... ", esp->ireg));
1931
1932 if (hmuch == 0)
1933 ESPDATA(("done! \n"));
1934
1935#if 0
1936 local_irq_restore(flags);
1937#endif
1938
1939 /* check new bus phase */
1940 if (newphase != oldphase && i < esp->current_transfer_size) {
1941 /* something happened; disconnect ?? */
1942 ESPDATA(("phase change, dropped out with %d done ... ", i));
1943 break;
1944 }
1945
1946 /* check int. status */
1947 if (esp->ireg & ESP_INTR_DC) {
1948 /* disconnect */
1949 ESPDATA(("disconnect; %d transferred ... ", i));
1950 break;
1951 } else if (esp->ireg & ESP_INTR_FDONE) {
1952 /* function done */
1953 ESPDATA(("function done; %d transferred ... ", i));
1954 break;
1955 }
1956
1957 /* XXX fixme: bail out on stall */
1958 if (fifo_stuck > 10) {
1959 /* we're stuck */
1960 ESPDATA(("fifo stall; %d transferred ... ", i));
1961 break;
1962 }
1963 }
1964
1965 ESPDATA(("\n"));
1966 /* check successful completion ?? */
1967
1968 if (thisphase == in_dataout)
1969 hmuch += fifocnt; /* stuck?? adjust data pointer ...*/
1970
1971 /* tell do_data_finale how much was transferred */
1972 esp->current_transfer_size -= hmuch;
1973
1974 /* still not completely sure on this one ... */
1975 return /*do_intr_end*/ do_work_bus /*do_phase_determine*/ ;
1976
1977 /*
1978 * end PIO
1979 */
1980 }
1981 return do_intr_end;
1982}
1983
1984/* See how successful the data transfer was. */
1985static int esp_do_data_finale(struct NCR_ESP *esp,
1986 struct ESP_regs *eregs)
1987{
1988 Scsi_Cmnd *SCptr = esp->current_SC;
1989 struct esp_device *esp_dev = SCptr->device->hostdata;
1990 int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0;
1991
1992 if(esp->dma_led_off)
1993 esp->dma_led_off(esp);
1994
1995 ESPDATA(("esp_do_data_finale: "));
1996
1997 if(SCptr->SCp.phase == in_datain) {
1998 if(esp->sreg & ESP_STAT_PERR) {
1999 /* Yuck, parity error. The ESP asserts ATN
2000 * so that we can go to message out phase
2001 * immediately and inform the target that
2002 * something bad happened.
2003 */
2004 ESPLOG(("esp%d: data bad parity detected.\n",
2005 esp->esp_id));
2006 esp->cur_msgout[0] = INITIATOR_ERROR;
2007 esp->msgout_len = 1;
2008 }
2009 if(esp->dma_drain)
2010 esp->dma_drain(esp);
2011 }
2012 if(esp->dma_invalidate)
2013 esp->dma_invalidate(esp);
2014
2015 /* This could happen for the above parity error case. */
2016 if(!(esp->ireg == ESP_INTR_BSERV)) {
2017 /* Please go to msgout phase, please please please... */
2018 ESPLOG(("esp%d: !BSERV after data, probably to msgout\n",
2019 esp->esp_id));
2020 return esp_do_phase_determine(esp, eregs);
2021 }
2022
2023 /* Check for partial transfers and other horrible events. */
2024 fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
2025 ecount = esp_getcount(eregs);
2026 if(esp->fas_premature_intr_workaround)
2027 ecount -= 0x40;
2028 bytes_sent = esp->current_transfer_size;
2029
2030 ESPDATA(("trans_sz=%d, ", bytes_sent));
2031 if(!(esp->sreg & ESP_STAT_TCNT))
2032 bytes_sent -= ecount;
2033 if(SCptr->SCp.phase == in_dataout)
2034 bytes_sent -= fifocnt;
2035
2036 ESPDATA(("bytes_sent=%d (ecount=%d, fifocnt=%d), ", bytes_sent,
2037 ecount, fifocnt));
2038
2039 /* If we were in synchronous mode, check for peculiarities. */
2040 if(esp_dev->sync_max_offset)
2041 bogus_data = esp100_sync_hwbug(esp, eregs, SCptr, fifocnt);
2042 else
2043 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
2044
2045 /* Until we are sure of what has happened, we are certainly
2046 * in the dark.
2047 */
2048 esp_advance_phase(SCptr, in_the_dark);
2049
2050 /* Check for premature interrupt condition. Can happen on FAS2x6
2051 * chips. QLogic recommends a workaround by overprogramming the
2052 * transfer counters, but this makes doing scatter-gather impossible.
2053 * Until there is a way to disable scatter-gather for a single target,
2054 * and not only for the entire host adapter as it is now, the workaround
2055 * is way to expensive performance wise.
2056 * Instead, it turns out that when this happens the target has disconnected
2057 * already but it doesn't show in the interrupt register. Compensate for
2058 * that here to try and avoid a SCSI bus reset.
2059 */
2060 if(!esp->fas_premature_intr_workaround && (fifocnt == 1) &&
2061 sreg_dataoutp(esp->sreg)) {
2062 ESPLOG(("esp%d: Premature interrupt, enabling workaround\n",
2063 esp->esp_id));
2064#if 0
2065 /* Disable scatter-gather operations, they are not possible
2066 * when using this workaround.
2067 */
2068 esp->ehost->sg_tablesize = 0;
2069 esp->ehost->use_clustering = ENABLE_CLUSTERING;
2070 esp->fas_premature_intr_workaround = 1;
2071 bytes_sent = 0;
2072 if(SCptr->use_sg) {
2073 ESPLOG(("esp%d: Aborting scatter-gather operation\n",
2074 esp->esp_id));
2075 esp->cur_msgout[0] = ABORT;
2076 esp->msgout_len = 1;
2077 esp->msgout_ctr = 0;
2078 esp_cmd(esp, eregs, ESP_CMD_SATN);
2079 esp_setcount(eregs, 0xffff);
2080 esp_cmd(esp, eregs, ESP_CMD_NULL);
2081 esp_cmd(esp, eregs, ESP_CMD_TPAD | ESP_CMD_DMA);
2082 return do_intr_end;
2083 }
2084#else
2085 /* Just set the disconnected bit. That's what appears to
2086 * happen anyway. The state machine will pick it up when
2087 * we return.
2088 */
2089 esp->ireg |= ESP_INTR_DC;
2090#endif
2091 }
2092
2093 if(bytes_sent < 0) {
2094 /* I've seen this happen due to lost state in this
2095 * driver. No idea why it happened, but allowing
2096 * this value to be negative caused things to
2097 * lock up. This allows greater chance of recovery.
2098 * In fact every time I've seen this, it has been
2099 * a driver bug without question.
2100 */
2101 ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id));
2102 ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n",
2103 esp->esp_id,
2104 esp->current_transfer_size, fifocnt, ecount));
2105 ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n",
2106 esp->esp_id,
2107 SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual));
2108 ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id,
2109 SCptr->device->id));
2110 SCptr->device->borken = 1;
2111 esp_dev->sync = 0;
2112 bytes_sent = 0;
2113 }
2114
2115 /* Update the state of our transfer. */
2116 SCptr->SCp.ptr += bytes_sent;
2117 SCptr->SCp.this_residual -= bytes_sent;
2118 if(SCptr->SCp.this_residual < 0) {
2119 /* shit */
2120 ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id));
2121 SCptr->SCp.this_residual = 0;
2122 }
2123
2124 /* Maybe continue. */
2125 if(!bogus_data) {
2126 ESPDATA(("!bogus_data, "));
2127 /* NO MATTER WHAT, we advance the scatterlist,
2128 * if the target should decide to disconnect
2129 * in between scatter chunks (which is common)
2130 * we could die horribly! I used to have the sg
2131 * advance occur only if we are going back into
2132 * (or are staying in) a data phase, you can
2133 * imagine the hell I went through trying to
2134 * figure this out.
2135 */
2136 if(!SCptr->SCp.this_residual && SCptr->SCp.buffers_residual)
2137 advance_sg(esp, SCptr);
2138#ifdef DEBUG_ESP_DATA
2139 if(sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) {
2140 ESPDATA(("to more data\n"));
2141 } else {
2142 ESPDATA(("to new phase\n"));
2143 }
2144#endif
2145 return esp_do_phase_determine(esp, eregs);
2146 }
2147 /* Bogus data, just wait for next interrupt. */
2148 ESPLOG(("esp%d: bogus_data during end of data phase\n",
2149 esp->esp_id));
2150 return do_intr_end;
2151}
2152
2153/* We received a non-good status return at the end of
2154 * running a SCSI command. This is used to decide if
2155 * we should clear our synchronous transfer state for
2156 * such a device when that happens.
2157 *
2158 * The idea is that when spinning up a disk or rewinding
2159 * a tape, we don't want to go into a loop re-negotiating
2160 * synchronous capabilities over and over.
2161 */
2162static int esp_should_clear_sync(Scsi_Cmnd *sp)
2163{
2164 unchar cmd = sp->cmnd[0];
2165
2166 /* These cases are for spinning up a disk and
2167 * waiting for that spinup to complete.
2168 */
2169 if(cmd == START_STOP)
2170 return 0;
2171
2172 if(cmd == TEST_UNIT_READY)
2173 return 0;
2174
2175 /* One more special case for SCSI tape drives,
2176 * this is what is used to probe the device for
2177 * completion of a rewind or tape load operation.
2178 */
2179 if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
2180 return 0;
2181
2182 return 1;
2183}
2184
2185/* Either a command is completing or a target is dropping off the bus
2186 * to continue the command in the background so we can do other work.
2187 */
2188static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs)
2189{
2190 Scsi_Cmnd *SCptr = esp->current_SC;
2191 int rval;
2192
2193 rval = skipahead2(esp, eregs, SCptr, in_status, in_msgindone, in_freeing);
2194 if(rval)
2195 return rval;
2196
2197 if(esp->ireg != ESP_INTR_DC) {
2198 ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id));
2199 return do_reset_bus; /* target will not drop BSY... */
2200 }
2201 esp->msgout_len = 0;
2202 esp->prevmsgout = NOP;
2203 if(esp->prevmsgin == COMMAND_COMPLETE) {
2204 struct esp_device *esp_dev = SCptr->device->hostdata;
2205 /* Normal end of nexus. */
2206 if(esp->disconnected_SC)
2207 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2208
2209 if(SCptr->SCp.Status != GOOD &&
2210 SCptr->SCp.Status != CONDITION_GOOD &&
2211 ((1<<scmd_id(SCptr)) & esp->targets_present) &&
2212 esp_dev->sync && esp_dev->sync_max_offset) {
2213 /* SCSI standard says that the synchronous capabilities
2214 * should be renegotiated at this point. Most likely
2215 * we are about to request sense from this target
2216 * in which case we want to avoid using sync
2217 * transfers until we are sure of the current target
2218 * state.
2219 */
2220 ESPMISC(("esp: Status <%d> for target %d lun %d\n",
2221 SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun));
2222
2223 /* But don't do this when spinning up a disk at
2224 * boot time while we poll for completion as it
2225 * fills up the console with messages. Also, tapes
2226 * can report not ready many times right after
2227 * loading up a tape.
2228 */
2229 if(esp_should_clear_sync(SCptr) != 0)
2230 esp_dev->sync = 0;
2231 }
2232 ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
2233 esp_done(esp, ((SCptr->SCp.Status & 0xff) |
2234 ((SCptr->SCp.Message & 0xff)<<8) |
2235 (DID_OK << 16)));
2236 } else if(esp->prevmsgin == DISCONNECT) {
2237 /* Normal disconnect. */
2238 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2239 ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
2240 append_SC(&esp->disconnected_SC, SCptr);
2241 esp->current_SC = NULL;
2242 if(esp->issue_SC)
2243 esp_exec_cmd(esp);
2244 } else {
2245 /* Driver bug, we do not expect a disconnect here
2246 * and should not have advanced the state engine
2247 * to in_freeing.
2248 */
2249 ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n",
2250 esp->esp_id));
2251 return do_reset_bus;
2252 }
2253 return do_intr_end;
2254}
2255
2256/* When a reselect occurs, and we cannot find the command to
2257 * reconnect to in our queues, we do this.
2258 */
2259static int esp_bad_reconnect(struct NCR_ESP *esp)
2260{
2261 Scsi_Cmnd *sp;
2262
2263 ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n",
2264 esp->esp_id));
2265 ESPLOG(("QUEUE DUMP\n"));
2266 sp = esp->issue_SC;
2267 ESPLOG(("esp%d: issue_SC[", esp->esp_id));
2268 while(sp) {
2269 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2270 sp = (Scsi_Cmnd *) sp->host_scribble;
2271 }
2272 ESPLOG(("]\n"));
2273 sp = esp->current_SC;
2274 ESPLOG(("esp%d: current_SC[", esp->esp_id));
2275 while(sp) {
2276 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2277 sp = (Scsi_Cmnd *) sp->host_scribble;
2278 }
2279 ESPLOG(("]\n"));
2280 sp = esp->disconnected_SC;
2281 ESPLOG(("esp%d: disconnected_SC[", esp->esp_id));
2282 while(sp) {
2283 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2284 sp = (Scsi_Cmnd *) sp->host_scribble;
2285 }
2286 ESPLOG(("]\n"));
2287 return do_reset_bus;
2288}
2289
2290/* Do the needy when a target tries to reconnect to us. */
2291static int esp_do_reconnect(struct NCR_ESP *esp,
2292 struct ESP_regs *eregs)
2293{
2294 int lun, target;
2295 Scsi_Cmnd *SCptr;
2296
2297 /* Check for all bogus conditions first. */
2298 target = reconnect_target(esp, eregs);
2299 if(target < 0) {
2300 ESPDISC(("bad bus bits\n"));
2301 return do_reset_bus;
2302 }
2303 lun = reconnect_lun(esp, eregs);
2304 if(lun < 0) {
2305 ESPDISC(("target=%2x, bad identify msg\n", target));
2306 return do_reset_bus;
2307 }
2308
2309 /* Things look ok... */
2310 ESPDISC(("R<%02x,%02x>", target, lun));
2311
2312 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
2313 if(esp100_reconnect_hwbug(esp, eregs))
2314 return do_reset_bus;
2315 esp_cmd(esp, eregs, ESP_CMD_NULL);
2316
2317 SCptr = remove_SC(&esp->disconnected_SC, (unchar) target, (unchar) lun);
2318 if(!SCptr)
2319 return esp_bad_reconnect(esp);
2320
2321 esp_connect(esp, eregs, SCptr);
2322 esp_cmd(esp, eregs, ESP_CMD_MOK);
2323
2324 /* Reconnect implies a restore pointers operation. */
2325 esp_restore_pointers(esp, SCptr);
2326
2327 esp->snip = 0;
2328 esp_advance_phase(SCptr, in_the_dark);
2329 return do_intr_end;
2330}
2331
2332/* End of NEXUS (hopefully), pick up status + message byte then leave if
2333 * all goes well.
2334 */
2335static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs)
2336{
2337 Scsi_Cmnd *SCptr = esp->current_SC;
2338 int intr, rval;
2339
2340 rval = skipahead1(esp, eregs, SCptr, in_the_dark, in_status);
2341 if(rval)
2342 return rval;
2343
2344 intr = esp->ireg;
2345 ESPSTAT(("esp_do_status: "));
2346 if(intr != ESP_INTR_DC) {
2347 int message_out = 0; /* for parity problems */
2348
2349 /* Ack the message. */
2350 ESPSTAT(("ack msg, "));
2351 esp_cmd(esp, eregs, ESP_CMD_MOK);
2352
2353 if(esp->dma_poll)
2354 esp->dma_poll(esp, (unsigned char *) esp->esp_command);
2355
2356 ESPSTAT(("got something, "));
2357 /* ESP chimes in with one of
2358 *
2359 * 1) function done interrupt:
2360 * both status and message in bytes
2361 * are available
2362 *
2363 * 2) bus service interrupt:
2364 * only status byte was acquired
2365 *
2366 * 3) Anything else:
2367 * can't happen, but we test for it
2368 * anyways
2369 *
2370 * ALSO: If bad parity was detected on either
2371 * the status _or_ the message byte then
2372 * the ESP has asserted ATN on the bus
2373 * and we must therefore wait for the
2374 * next phase change.
2375 */
2376 if(intr & ESP_INTR_FDONE) {
2377 /* We got it all, hallejulia. */
2378 ESPSTAT(("got both, "));
2379 SCptr->SCp.Status = esp->esp_command[0];
2380 SCptr->SCp.Message = esp->esp_command[1];
2381 esp->prevmsgin = SCptr->SCp.Message;
2382 esp->cur_msgin[0] = SCptr->SCp.Message;
2383 if(esp->sreg & ESP_STAT_PERR) {
2384 /* There was bad parity for the
2385 * message byte, the status byte
2386 * was ok.
2387 */
2388 message_out = MSG_PARITY_ERROR;
2389 }
2390 } else if(intr == ESP_INTR_BSERV) {
2391 /* Only got status byte. */
2392 ESPLOG(("esp%d: got status only, ", esp->esp_id));
2393 if(!(esp->sreg & ESP_STAT_PERR)) {
2394 SCptr->SCp.Status = esp->esp_command[0];
2395 SCptr->SCp.Message = 0xff;
2396 } else {
2397 /* The status byte had bad parity.
2398 * we leave the scsi_pointer Status
2399 * field alone as we set it to a default
2400 * of CHECK_CONDITION in esp_queue.
2401 */
2402 message_out = INITIATOR_ERROR;
2403 }
2404 } else {
2405 /* This shouldn't happen ever. */
2406 ESPSTAT(("got bolixed\n"));
2407 esp_advance_phase(SCptr, in_the_dark);
2408 return esp_do_phase_determine(esp, eregs);
2409 }
2410
2411 if(!message_out) {
2412 ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status,
2413 SCptr->SCp.Message));
2414 if(SCptr->SCp.Message == COMMAND_COMPLETE) {
2415 ESPSTAT(("and was COMMAND_COMPLETE\n"));
2416 esp_advance_phase(SCptr, in_freeing);
2417 return esp_do_freebus(esp, eregs);
2418 } else {
2419 ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n",
2420 esp->esp_id));
2421 esp->msgin_len = esp->msgin_ctr = 1;
2422 esp_advance_phase(SCptr, in_msgindone);
2423 return esp_do_msgindone(esp, eregs);
2424 }
2425 } else {
2426 /* With luck we'll be able to let the target
2427 * know that bad parity happened, it will know
2428 * which byte caused the problems and send it
2429 * again. For the case where the status byte
2430 * receives bad parity, I do not believe most
2431 * targets recover very well. We'll see.
2432 */
2433 ESPLOG(("esp%d: bad parity somewhere mout=%2x\n",
2434 esp->esp_id, message_out));
2435 esp->cur_msgout[0] = message_out;
2436 esp->msgout_len = esp->msgout_ctr = 1;
2437 esp_advance_phase(SCptr, in_the_dark);
2438 return esp_do_phase_determine(esp, eregs);
2439 }
2440 } else {
2441 /* If we disconnect now, all hell breaks loose. */
2442 ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id));
2443 esp_advance_phase(SCptr, in_the_dark);
2444 return esp_do_phase_determine(esp, eregs);
2445 }
2446}
2447
2448static int esp_enter_status(struct NCR_ESP *esp,
2449 struct ESP_regs *eregs)
2450{
2451 unchar thecmd = ESP_CMD_ICCSEQ;
2452
2453 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
2454
2455 if(esp->do_pio_cmds) {
2456 esp_advance_phase(esp->current_SC, in_status);
2457 esp_cmd(esp, eregs, thecmd);
2458 while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR));
2459 esp->esp_command[0] = esp_read(eregs->esp_fdata);
2460 while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR));
2461 esp->esp_command[1] = esp_read(eregs->esp_fdata);
2462 } else {
2463 esp->esp_command[0] = esp->esp_command[1] = 0xff;
2464 esp_write(eregs->esp_tclow, 2);
2465 esp_write(eregs->esp_tcmed, 0);
2466 esp->dma_init_read(esp, esp->esp_command_dvma, 2);
2467 thecmd |= ESP_CMD_DMA;
2468 esp_cmd(esp, eregs, thecmd);
2469 esp_advance_phase(esp->current_SC, in_status);
2470 }
2471
2472 return esp_do_status(esp, eregs);
2473}
2474
2475static int esp_disconnect_amidst_phases(struct NCR_ESP *esp,
2476 struct ESP_regs *eregs)
2477{
2478 Scsi_Cmnd *sp = esp->current_SC;
2479 struct esp_device *esp_dev = sp->device->hostdata;
2480
2481 /* This means real problems if we see this
2482 * here. Unless we were actually trying
2483 * to force the device to abort/reset.
2484 */
2485 ESPLOG(("esp%d: Disconnect amidst phases, ", esp->esp_id));
2486 ESPLOG(("pphase<%s> cphase<%s>, ",
2487 phase_string(sp->SCp.phase),
2488 phase_string(sp->SCp.sent_command)));
2489
2490 if(esp->disconnected_SC)
2491 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2492
2493 switch(esp->cur_msgout[0]) {
2494 default:
2495 /* We didn't expect this to happen at all. */
2496 ESPLOG(("device is bolixed\n"));
2497 esp_advance_phase(sp, in_tgterror);
2498 esp_done(esp, (DID_ERROR << 16));
2499 break;
2500
2501 case BUS_DEVICE_RESET:
2502 ESPLOG(("device reset successful\n"));
2503 esp_dev->sync_max_offset = 0;
2504 esp_dev->sync_min_period = 0;
2505 esp_dev->sync = 0;
2506 esp_advance_phase(sp, in_resetdev);
2507 esp_done(esp, (DID_RESET << 16));
2508 break;
2509
2510 case ABORT:
2511 ESPLOG(("device abort successful\n"));
2512 esp_advance_phase(sp, in_abortone);
2513 esp_done(esp, (DID_ABORT << 16));
2514 break;
2515
2516 };
2517 return do_intr_end;
2518}
2519
2520static int esp_enter_msgout(struct NCR_ESP *esp,
2521 struct ESP_regs *eregs)
2522{
2523 esp_advance_phase(esp->current_SC, in_msgout);
2524 return esp_do_msgout(esp, eregs);
2525}
2526
2527static int esp_enter_msgin(struct NCR_ESP *esp,
2528 struct ESP_regs *eregs)
2529{
2530 esp_advance_phase(esp->current_SC, in_msgin);
2531 return esp_do_msgin(esp, eregs);
2532}
2533
2534static int esp_enter_cmd(struct NCR_ESP *esp,
2535 struct ESP_regs *eregs)
2536{
2537 esp_advance_phase(esp->current_SC, in_cmdbegin);
2538 return esp_do_cmdbegin(esp, eregs);
2539}
2540
2541static int esp_enter_badphase(struct NCR_ESP *esp,
2542 struct ESP_regs *eregs)
2543{
2544 ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id,
2545 esp->sreg & ESP_STAT_PMASK));
2546 return do_reset_bus;
2547}
2548
2549typedef int (*espfunc_t)(struct NCR_ESP *,
2550 struct ESP_regs *);
2551
2552static espfunc_t phase_vector[] = {
2553 esp_do_data, /* ESP_DOP */
2554 esp_do_data, /* ESP_DIP */
2555 esp_enter_cmd, /* ESP_CMDP */
2556 esp_enter_status, /* ESP_STATP */
2557 esp_enter_badphase, /* ESP_STAT_PMSG */
2558 esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */
2559 esp_enter_msgout, /* ESP_MOP */
2560 esp_enter_msgin, /* ESP_MIP */
2561};
2562
2563/* The target has control of the bus and we have to see where it has
2564 * taken us.
2565 */
2566static int esp_do_phase_determine(struct NCR_ESP *esp,
2567 struct ESP_regs *eregs)
2568{
2569 if ((esp->ireg & ESP_INTR_DC) != 0)
2570 return esp_disconnect_amidst_phases(esp, eregs);
2571 return phase_vector[esp->sreg & ESP_STAT_PMASK](esp, eregs);
2572}
2573
2574/* First interrupt after exec'ing a cmd comes here. */
2575static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
2576{
2577 Scsi_Cmnd *SCptr = esp->current_SC;
2578 struct esp_device *esp_dev = SCptr->device->hostdata;
2579 int cmd_bytes_sent, fcnt;
2580
2581 fcnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
2582 cmd_bytes_sent = esp->dma_bytes_sent(esp, fcnt);
2583 if(esp->dma_invalidate)
2584 esp->dma_invalidate(esp);
2585
2586 /* Let's check to see if a reselect happened
2587 * while we we're trying to select. This must
2588 * be checked first.
2589 */
2590 if(esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
2591 esp_reconnect(esp, SCptr);
2592 return esp_do_reconnect(esp, eregs);
2593 }
2594
2595 /* Looks like things worked, we should see a bus service &
2596 * a function complete interrupt at this point. Note we
2597 * are doing a direct comparison because we don't want to
2598 * be fooled into thinking selection was successful if
2599 * ESP_INTR_DC is set, see below.
2600 */
2601 if(esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
2602 /* target speaks... */
2603 esp->targets_present |= (1<<scmd_id(SCptr));
2604
2605 /* What if the target ignores the sdtr? */
2606 if(esp->snip)
2607 esp_dev->sync = 1;
2608
2609 /* See how far, if at all, we got in getting
2610 * the information out to the target.
2611 */
2612 switch(esp->seqreg) {
2613 default:
2614
2615 case ESP_STEP_ASEL:
2616 /* Arbitration won, target selected, but
2617 * we are in some phase which is not command
2618 * phase nor is it message out phase.
2619 *
2620 * XXX We've confused the target, obviously.
2621 * XXX So clear it's state, but we also end
2622 * XXX up clearing everyone elses. That isn't
2623 * XXX so nice. I'd like to just reset this
2624 * XXX target, but if I cannot even get it's
2625 * XXX attention and finish selection to talk
2626 * XXX to it, there is not much more I can do.
2627 * XXX If we have a loaded bus we're going to
2628 * XXX spend the next second or so renegotiating
2629 * XXX for synchronous transfers.
2630 */
2631 ESPLOG(("esp%d: STEP_ASEL for tgt %d\n",
2632 esp->esp_id, SCptr->device->id));
2633
2634 case ESP_STEP_SID:
2635 /* Arbitration won, target selected, went
2636 * to message out phase, sent one message
2637 * byte, then we stopped. ATN is asserted
2638 * on the SCSI bus and the target is still
2639 * there hanging on. This is a legal
2640 * sequence step if we gave the ESP a select
2641 * and stop command.
2642 *
2643 * XXX See above, I could set the borken flag
2644 * XXX in the device struct and retry the
2645 * XXX command. But would that help for
2646 * XXX tagged capable targets?
2647 */
2648
2649 case ESP_STEP_NCMD:
2650 /* Arbitration won, target selected, maybe
2651 * sent the one message byte in message out
2652 * phase, but we did not go to command phase
2653 * in the end. Actually, we could have sent
2654 * only some of the message bytes if we tried
2655 * to send out the entire identify and tag
2656 * message using ESP_CMD_SA3.
2657 */
2658 cmd_bytes_sent = 0;
2659 break;
2660
2661 case ESP_STEP_PPC:
2662 /* No, not the powerPC pinhead. Arbitration
2663 * won, all message bytes sent if we went to
2664 * message out phase, went to command phase
2665 * but only part of the command was sent.
2666 *
2667 * XXX I've seen this, but usually in conjunction
2668 * XXX with a gross error which appears to have
2669 * XXX occurred between the time I told the
2670 * XXX ESP to arbitrate and when I got the
2671 * XXX interrupt. Could I have misloaded the
2672 * XXX command bytes into the fifo? Actually,
2673 * XXX I most likely missed a phase, and therefore
2674 * XXX went into never never land and didn't even
2675 * XXX know it. That was the old driver though.
2676 * XXX What is even more peculiar is that the ESP
2677 * XXX showed the proper function complete and
2678 * XXX bus service bits in the interrupt register.
2679 */
2680
2681 case ESP_STEP_FINI4:
2682 case ESP_STEP_FINI5:
2683 case ESP_STEP_FINI6:
2684 case ESP_STEP_FINI7:
2685 /* Account for the identify message */
2686 if(SCptr->SCp.phase == in_slct_norm)
2687 cmd_bytes_sent -= 1;
2688 };
2689 esp_cmd(esp, eregs, ESP_CMD_NULL);
2690
2691 /* Be careful, we could really get fucked during synchronous
2692 * data transfers if we try to flush the fifo now.
2693 */
2694 if(!fcnt && /* Fifo is empty and... */
2695 /* either we are not doing synchronous transfers or... */
2696 (!esp_dev->sync_max_offset ||
2697 /* We are not going into data in phase. */
2698 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
2699 esp_cmd(esp, eregs, ESP_CMD_FLUSH); /* flush is safe */
2700
2701 /* See how far we got if this is not a slow command. */
2702 if(!esp->esp_slowcmd) {
2703 if(cmd_bytes_sent < 0)
2704 cmd_bytes_sent = 0;
2705 if(cmd_bytes_sent != SCptr->cmd_len) {
2706 /* Crapola, mark it as a slowcmd
2707 * so that we have some chance of
2708 * keeping the command alive with
2709 * good luck.
2710 *
2711 * XXX Actually, if we didn't send it all
2712 * XXX this means either we didn't set things
2713 * XXX up properly (driver bug) or the target
2714 * XXX or the ESP detected parity on one of
2715 * XXX the command bytes. This makes much
2716 * XXX more sense, and therefore this code
2717 * XXX should be changed to send out a
2718 * XXX parity error message or if the status
2719 * XXX register shows no parity error then
2720 * XXX just expect the target to bring the
2721 * XXX bus into message in phase so that it
2722 * XXX can send us the parity error message.
2723 * XXX SCSI sucks...
2724 */
2725 esp->esp_slowcmd = 1;
2726 esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]);
2727 esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent);
2728 }
2729 }
2730
2731 /* Now figure out where we went. */
2732 esp_advance_phase(SCptr, in_the_dark);
2733 return esp_do_phase_determine(esp, eregs);
2734 }
2735
2736 /* Did the target even make it? */
2737 if(esp->ireg == ESP_INTR_DC) {
2738 /* wheee... nobody there or they didn't like
2739 * what we told it to do, clean up.
2740 */
2741
2742 /* If anyone is off the bus, but working on
2743 * a command in the background for us, tell
2744 * the ESP to listen for them.
2745 */
2746 if(esp->disconnected_SC)
2747 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2748
2749 if(((1<<SCptr->device->id) & esp->targets_present) &&
2750 esp->seqreg && esp->cur_msgout[0] == EXTENDED_MESSAGE &&
2751 (SCptr->SCp.phase == in_slct_msg ||
2752 SCptr->SCp.phase == in_slct_stop)) {
2753 /* shit */
2754 esp->snip = 0;
2755 ESPLOG(("esp%d: Failed synchronous negotiation for target %d "
2756 "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
2757 esp_dev->sync_max_offset = 0;
2758 esp_dev->sync_min_period = 0;
2759 esp_dev->sync = 1; /* so we don't negotiate again */
2760
2761 /* Run the command again, this time though we
2762 * won't try to negotiate for synchronous transfers.
2763 *
2764 * XXX I'd like to do something like send an
2765 * XXX INITIATOR_ERROR or ABORT message to the
2766 * XXX target to tell it, "Sorry I confused you,
2767 * XXX please come back and I will be nicer next
2768 * XXX time". But that requires having the target
2769 * XXX on the bus, and it has dropped BSY on us.
2770 */
2771 esp->current_SC = NULL;
2772 esp_advance_phase(SCptr, not_issued);
2773 prepend_SC(&esp->issue_SC, SCptr);
2774 esp_exec_cmd(esp);
2775 return do_intr_end;
2776 }
2777
2778 /* Ok, this is normal, this is what we see during boot
2779 * or whenever when we are scanning the bus for targets.
2780 * But first make sure that is really what is happening.
2781 */
2782 if(((1<<SCptr->device->id) & esp->targets_present)) {
2783 ESPLOG(("esp%d: Warning, live target %d not responding to "
2784 "selection.\n", esp->esp_id, SCptr->device->id));
2785
2786 /* This _CAN_ happen. The SCSI standard states that
2787 * the target is to _not_ respond to selection if
2788 * _it_ detects bad parity on the bus for any reason.
2789 * Therefore, we assume that if we've talked successfully
2790 * to this target before, bad parity is the problem.
2791 */
2792 esp_done(esp, (DID_PARITY << 16));
2793 } else {
2794 /* Else, there really isn't anyone there. */
2795 ESPMISC(("esp: selection failure, maybe nobody there?\n"));
2796 ESPMISC(("esp: target %d lun %d\n",
2797 SCptr->device->id, SCptr->device->lun));
2798 esp_done(esp, (DID_BAD_TARGET << 16));
2799 }
2800 return do_intr_end;
2801 }
2802
2803
2804 ESPLOG(("esp%d: Selection failure.\n", esp->esp_id));
2805 printk("esp%d: Currently -- ", esp->esp_id);
2806 esp_print_ireg(esp->ireg);
2807 printk(" ");
2808 esp_print_statreg(esp->sreg);
2809 printk(" ");
2810 esp_print_seqreg(esp->seqreg);
2811 printk("\n");
2812 printk("esp%d: New -- ", esp->esp_id);
2813 esp->sreg = esp_read(eregs->esp_status);
2814 esp->seqreg = esp_read(eregs->esp_sstep);
2815 esp->ireg = esp_read(eregs->esp_intrpt);
2816 esp_print_ireg(esp->ireg);
2817 printk(" ");
2818 esp_print_statreg(esp->sreg);
2819 printk(" ");
2820 esp_print_seqreg(esp->seqreg);
2821 printk("\n");
2822 ESPLOG(("esp%d: resetting bus\n", esp->esp_id));
2823 return do_reset_bus; /* ugh... */
2824}
2825
2826/* Continue reading bytes for msgin phase. */
2827static int esp_do_msgincont(struct NCR_ESP *esp, struct ESP_regs *eregs)
2828{
2829 if(esp->ireg & ESP_INTR_BSERV) {
2830 /* in the right phase too? */
2831 if((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) {
2832 /* phew... */
2833 esp_cmd(esp, eregs, ESP_CMD_TI);
2834 esp_advance_phase(esp->current_SC, in_msgindone);
2835 return do_intr_end;
2836 }
2837
2838 /* We changed phase but ESP shows bus service,
2839 * in this case it is most likely that we, the
2840 * hacker who has been up for 20hrs straight
2841 * staring at the screen, drowned in coffee
2842 * smelling like retched cigarette ashes
2843 * have miscoded something..... so, try to
2844 * recover as best we can.
2845 */
2846 ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id));
2847 }
2848 esp_advance_phase(esp->current_SC, in_the_dark);
2849 return do_phase_determine;
2850}
2851
2852static int check_singlebyte_msg(struct NCR_ESP *esp,
2853 struct ESP_regs *eregs)
2854{
2855 esp->prevmsgin = esp->cur_msgin[0];
2856 if(esp->cur_msgin[0] & 0x80) {
2857 /* wheee... */
2858 ESPLOG(("esp%d: target sends identify amidst phases\n",
2859 esp->esp_id));
2860 esp_advance_phase(esp->current_SC, in_the_dark);
2861 return 0;
2862 } else if(((esp->cur_msgin[0] & 0xf0) == 0x20) ||
2863 (esp->cur_msgin[0] == EXTENDED_MESSAGE)) {
2864 esp->msgin_len = 2;
2865 esp_advance_phase(esp->current_SC, in_msgincont);
2866 return 0;
2867 }
2868 esp_advance_phase(esp->current_SC, in_the_dark);
2869 switch(esp->cur_msgin[0]) {
2870 default:
2871 /* We don't want to hear about it. */
2872 ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id,
2873 esp->cur_msgin[0]));
2874 return MESSAGE_REJECT;
2875
2876 case NOP:
2877 ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id,
2878 esp->current_SC->device->id));
2879 return 0;
2880
2881 case RESTORE_POINTERS:
2882 /* In this case we might also have to backup the
2883 * "slow command" pointer. It is rare to get such
2884 * a save/restore pointer sequence so early in the
2885 * bus transition sequences, but cover it.
2886 */
2887 if(esp->esp_slowcmd) {
2888 esp->esp_scmdleft = esp->current_SC->cmd_len;
2889 esp->esp_scmdp = &esp->current_SC->cmnd[0];
2890 }
2891 esp_restore_pointers(esp, esp->current_SC);
2892 return 0;
2893
2894 case SAVE_POINTERS:
2895 esp_save_pointers(esp, esp->current_SC);
2896 return 0;
2897
2898 case COMMAND_COMPLETE:
2899 case DISCONNECT:
2900 /* Freeing the bus, let it go. */
2901 esp->current_SC->SCp.phase = in_freeing;
2902 return 0;
2903
2904 case MESSAGE_REJECT:
2905 ESPMISC(("msg reject, "));
2906 if(esp->prevmsgout == EXTENDED_MESSAGE) {
2907 struct esp_device *esp_dev = esp->current_SC->device->hostdata;
2908
2909 /* Doesn't look like this target can
2910 * do synchronous or WIDE transfers.
2911 */
2912 ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n"));
2913 esp_dev->sync = 1;
2914 esp_dev->wide = 1;
2915 esp_dev->sync_min_period = 0;
2916 esp_dev->sync_max_offset = 0;
2917 return 0;
2918 } else {
2919 ESPMISC(("not sync nego, sending ABORT\n"));
2920 return ABORT;
2921 }
2922 };
2923}
2924
2925/* Target negotiates for synchronous transfers before we do, this
2926 * is legal although very strange. What is even funnier is that
2927 * the SCSI2 standard specifically recommends against targets doing
2928 * this because so many initiators cannot cope with this occurring.
2929 */
2930static int target_with_ants_in_pants(struct NCR_ESP *esp,
2931 Scsi_Cmnd *SCptr,
2932 struct esp_device *esp_dev)
2933{
2934 if(esp_dev->sync || SCptr->device->borken) {
2935 /* sorry, no can do */
2936 ESPSDTR(("forcing to async, "));
2937 build_sync_nego_msg(esp, 0, 0);
2938 esp_dev->sync = 1;
2939 esp->snip = 1;
2940 ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id));
2941 esp_advance_phase(SCptr, in_the_dark);
2942 return EXTENDED_MESSAGE;
2943 }
2944
2945 /* Ok, we'll check them out... */
2946 return 0;
2947}
2948
2949static void sync_report(struct NCR_ESP *esp)
2950{
2951 int msg3, msg4;
2952 char *type;
2953
2954 msg3 = esp->cur_msgin[3];
2955 msg4 = esp->cur_msgin[4];
2956 if(msg4) {
2957 int hz = 1000000000 / (msg3 * 4);
2958 int integer = hz / 1000000;
2959 int fraction = (hz - (integer * 1000000)) / 10000;
2960 if((msg3 * 4) < 200) {
2961 type = "FAST";
2962 } else {
2963 type = "synchronous";
2964 }
2965
2966 /* Do not transform this back into one big printk
2967 * again, it triggers a bug in our sparc64-gcc272
2968 * sibling call optimization. -DaveM
2969 */
2970 ESPLOG((KERN_INFO "esp%d: target %d ",
2971 esp->esp_id, esp->current_SC->device->id));
2972 ESPLOG(("[period %dns offset %d %d.%02dMHz ",
2973 (int) msg3 * 4, (int) msg4,
2974 integer, fraction));
2975 ESPLOG(("%s SCSI%s]\n", type,
2976 (((msg3 * 4) < 200) ? "-II" : "")));
2977 } else {
2978 ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n",
2979 esp->esp_id, esp->current_SC->device->id));
2980 }
2981}
2982
2983static int check_multibyte_msg(struct NCR_ESP *esp,
2984 struct ESP_regs *eregs)
2985{
2986 Scsi_Cmnd *SCptr = esp->current_SC;
2987 struct esp_device *esp_dev = SCptr->device->hostdata;
2988 unchar regval = 0;
2989 int message_out = 0;
2990
2991 ESPSDTR(("chk multibyte msg: "));
2992 if(esp->cur_msgin[2] == EXTENDED_SDTR) {
2993 int period = esp->cur_msgin[3];
2994 int offset = esp->cur_msgin[4];
2995
2996 ESPSDTR(("is sync nego response, "));
2997 if(!esp->snip) {
2998 int rval;
2999
3000 /* Target negotiates first! */
3001 ESPSDTR(("target jumps the gun, "));
3002 message_out = EXTENDED_MESSAGE; /* we must respond */
3003 rval = target_with_ants_in_pants(esp, SCptr, esp_dev);
3004 if(rval)
3005 return rval;
3006 }
3007
3008 ESPSDTR(("examining sdtr, "));
3009
3010 /* Offset cannot be larger than ESP fifo size. */
3011 if(offset > 15) {
3012 ESPSDTR(("offset too big %2x, ", offset));
3013 offset = 15;
3014 ESPSDTR(("sending back new offset\n"));
3015 build_sync_nego_msg(esp, period, offset);
3016 return EXTENDED_MESSAGE;
3017 }
3018
3019 if(offset && period > esp->max_period) {
3020 /* Yeee, async for this slow device. */
3021 ESPSDTR(("period too long %2x, ", period));
3022 build_sync_nego_msg(esp, 0, 0);
3023 ESPSDTR(("hoping for msgout\n"));
3024 esp_advance_phase(esp->current_SC, in_the_dark);
3025 return EXTENDED_MESSAGE;
3026 } else if (offset && period < esp->min_period) {
3027 ESPSDTR(("period too short %2x, ", period));
3028 period = esp->min_period;
3029 if(esp->erev > esp236)
3030 regval = 4;
3031 else
3032 regval = 5;
3033 } else if(offset) {
3034 int tmp;
3035
3036 ESPSDTR(("period is ok, "));
3037 tmp = esp->ccycle / 1000;
3038 regval = (((period << 2) + tmp - 1) / tmp);
3039 if(regval && (esp->erev > esp236)) {
3040 if(period >= 50)
3041 regval--;
3042 }
3043 }
3044
3045 if(offset) {
3046 unchar bit;
3047
3048 esp_dev->sync_min_period = (regval & 0x1f);
3049 esp_dev->sync_max_offset = (offset | esp->radelay);
3050 if(esp->erev > esp236) {
3051 if(esp->erev == fas100a)
3052 bit = ESP_CONFIG3_FAST;
3053 else
3054 bit = ESP_CONFIG3_FSCSI;
3055 if(period < 50)
3056 esp->config3[SCptr->device->id] |= bit;
3057 else
3058 esp->config3[SCptr->device->id] &= ~bit;
3059 esp->prev_cfg3 = esp->config3[SCptr->device->id];
3060 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
3061 }
3062 esp->prev_soff = esp_dev->sync_min_period;
3063 esp_write(eregs->esp_soff, esp->prev_soff);
3064 esp->prev_stp = esp_dev->sync_max_offset;
3065 esp_write(eregs->esp_stp, esp->prev_stp);
3066
3067 ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
3068 esp_dev->sync_max_offset,
3069 esp_dev->sync_min_period,
3070 esp->config3[scmd_id(SCptr)]));
3071
3072 esp->snip = 0;
3073 } else if(esp_dev->sync_max_offset) {
3074 unchar bit;
3075
3076 /* back to async mode */
3077 ESPSDTR(("unaccaptable sync nego, forcing async\n"));
3078 esp_dev->sync_max_offset = 0;
3079 esp_dev->sync_min_period = 0;
3080 esp->prev_soff = 0;
3081 esp_write(eregs->esp_soff, 0);
3082 esp->prev_stp = 0;
3083 esp_write(eregs->esp_stp, 0);
3084 if(esp->erev > esp236) {
3085 if(esp->erev == fas100a)
3086 bit = ESP_CONFIG3_FAST;
3087 else
3088 bit = ESP_CONFIG3_FSCSI;
3089 esp->config3[SCptr->device->id] &= ~bit;
3090 esp->prev_cfg3 = esp->config3[SCptr->device->id];
3091 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
3092 }
3093 }
3094
3095 sync_report(esp);
3096
3097 ESPSDTR(("chk multibyte msg: sync is known, "));
3098 esp_dev->sync = 1;
3099
3100 if(message_out) {
3101 ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n",
3102 esp->esp_id));
3103 build_sync_nego_msg(esp, period, offset);
3104 esp_advance_phase(SCptr, in_the_dark);
3105 return EXTENDED_MESSAGE;
3106 }
3107
3108 ESPSDTR(("returning zero\n"));
3109 esp_advance_phase(SCptr, in_the_dark); /* ...or else! */
3110 return 0;
3111 } else if(esp->cur_msgin[2] == EXTENDED_WDTR) {
3112 ESPLOG(("esp%d: AIEEE wide msg received\n", esp->esp_id));
3113 message_out = MESSAGE_REJECT;
3114 } else if(esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) {
3115 ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id));
3116 message_out = MESSAGE_REJECT;
3117 }
3118 esp_advance_phase(SCptr, in_the_dark);
3119 return message_out;
3120}
3121
3122static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs)
3123{
3124 Scsi_Cmnd *SCptr = esp->current_SC;
3125 int message_out = 0, it = 0, rval;
3126
3127 rval = skipahead1(esp, eregs, SCptr, in_msgin, in_msgindone);
3128 if(rval)
3129 return rval;
3130 if(SCptr->SCp.sent_command != in_status) {
3131 if(!(esp->ireg & ESP_INTR_DC)) {
3132 if(esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) {
3133 message_out = MSG_PARITY_ERROR;
3134 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3135 } else if((it = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES))!=1) {
3136 /* We certainly dropped the ball somewhere. */
3137 message_out = INITIATOR_ERROR;
3138 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3139 } else if(!esp->msgin_len) {
3140 it = esp_read(eregs->esp_fdata);
3141 esp_advance_phase(SCptr, in_msgincont);
3142 } else {
3143 /* it is ok and we want it */
3144 it = esp->cur_msgin[esp->msgin_ctr] =
3145 esp_read(eregs->esp_fdata);
3146 esp->msgin_ctr++;
3147 }
3148 } else {
3149 esp_advance_phase(SCptr, in_the_dark);
3150 return do_work_bus;
3151 }
3152 } else {
3153 it = esp->cur_msgin[0];
3154 }
3155 if(!message_out && esp->msgin_len) {
3156 if(esp->msgin_ctr < esp->msgin_len) {
3157 esp_advance_phase(SCptr, in_msgincont);
3158 } else if(esp->msgin_len == 1) {
3159 message_out = check_singlebyte_msg(esp, eregs);
3160 } else if(esp->msgin_len == 2) {
3161 if(esp->cur_msgin[0] == EXTENDED_MESSAGE) {
3162 if((it+2) >= 15) {
3163 message_out = MESSAGE_REJECT;
3164 } else {
3165 esp->msgin_len = (it + 2);
3166 esp_advance_phase(SCptr, in_msgincont);
3167 }
3168 } else {
3169 message_out = MESSAGE_REJECT; /* foo on you */
3170 }
3171 } else {
3172 message_out = check_multibyte_msg(esp, eregs);
3173 }
3174 }
3175 if(message_out < 0) {
3176 return -message_out;
3177 } else if(message_out) {
3178 if(((message_out != 1) &&
3179 ((message_out < 0x20) || (message_out & 0x80))))
3180 esp->msgout_len = 1;
3181 esp->cur_msgout[0] = message_out;
3182 esp_cmd(esp, eregs, ESP_CMD_SATN);
3183 esp_advance_phase(SCptr, in_the_dark);
3184 esp->msgin_len = 0;
3185 }
3186 esp->sreg = esp_read(eregs->esp_status);
3187 esp->sreg &= ~(ESP_STAT_INTR);
3188 if((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD))
3189 esp_cmd(esp, eregs, ESP_CMD_MOK);
3190 if((SCptr->SCp.sent_command == in_msgindone) &&
3191 (SCptr->SCp.phase == in_freeing))
3192 return esp_do_freebus(esp, eregs);
3193 return do_intr_end;
3194}
3195
3196static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs)
3197{
3198 unsigned char tmp;
3199 Scsi_Cmnd *SCptr = esp->current_SC;
3200
3201 esp_advance_phase(SCptr, in_cmdend);
3202 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3203 tmp = *esp->esp_scmdp++;
3204 esp->esp_scmdleft--;
3205 esp_write(eregs->esp_fdata, tmp);
3206 esp_cmd(esp, eregs, ESP_CMD_TI);
3207 return do_intr_end;
3208}
3209
3210static int esp_do_cmddone(struct NCR_ESP *esp, struct ESP_regs *eregs)
3211{
3212 esp_cmd(esp, eregs, ESP_CMD_NULL);
3213 if(esp->ireg & ESP_INTR_BSERV) {
3214 esp_advance_phase(esp->current_SC, in_the_dark);
3215 return esp_do_phase_determine(esp, eregs);
3216 }
3217 ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n",
3218 esp->esp_id));
3219 return do_reset_bus;
3220}
3221
3222static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs)
3223{
3224 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3225 switch(esp->msgout_len) {
3226 case 1:
3227 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3228 esp_cmd(esp, eregs, ESP_CMD_TI);
3229 break;
3230
3231 case 2:
3232 if(esp->do_pio_cmds){
3233 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3234 esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
3235 esp_cmd(esp, eregs, ESP_CMD_TI);
3236 } else {
3237 esp->esp_command[0] = esp->cur_msgout[0];
3238 esp->esp_command[1] = esp->cur_msgout[1];
3239 esp->dma_setup(esp, esp->esp_command_dvma, 2, 0);
3240 esp_setcount(eregs, 2);
3241 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
3242 }
3243 break;
3244
3245 case 4:
3246 esp->snip = 1;
3247 if(esp->do_pio_cmds){
3248 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3249 esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
3250 esp_write(eregs->esp_fdata, esp->cur_msgout[2]);
3251 esp_write(eregs->esp_fdata, esp->cur_msgout[3]);
3252 esp_cmd(esp, eregs, ESP_CMD_TI);
3253 } else {
3254 esp->esp_command[0] = esp->cur_msgout[0];
3255 esp->esp_command[1] = esp->cur_msgout[1];
3256 esp->esp_command[2] = esp->cur_msgout[2];
3257 esp->esp_command[3] = esp->cur_msgout[3];
3258 esp->dma_setup(esp, esp->esp_command_dvma, 4, 0);
3259 esp_setcount(eregs, 4);
3260 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
3261 }
3262 break;
3263
3264 case 5:
3265 esp->snip = 1;
3266 if(esp->do_pio_cmds){
3267 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3268 esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
3269 esp_write(eregs->esp_fdata, esp->cur_msgout[2]);
3270 esp_write(eregs->esp_fdata, esp->cur_msgout[3]);
3271 esp_write(eregs->esp_fdata, esp->cur_msgout[4]);
3272 esp_cmd(esp, eregs, ESP_CMD_TI);
3273 } else {
3274 esp->esp_command[0] = esp->cur_msgout[0];
3275 esp->esp_command[1] = esp->cur_msgout[1];
3276 esp->esp_command[2] = esp->cur_msgout[2];
3277 esp->esp_command[3] = esp->cur_msgout[3];
3278 esp->esp_command[4] = esp->cur_msgout[4];
3279 esp->dma_setup(esp, esp->esp_command_dvma, 5, 0);
3280 esp_setcount(eregs, 5);
3281 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
3282 }
3283 break;
3284
3285 default:
3286 /* whoops */
3287 ESPMISC(("bogus msgout sending NOP\n"));
3288 esp->cur_msgout[0] = NOP;
3289 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3290 esp->msgout_len = 1;
3291 esp_cmd(esp, eregs, ESP_CMD_TI);
3292 break;
3293 }
3294 esp_advance_phase(esp->current_SC, in_msgoutdone);
3295 return do_intr_end;
3296}
3297
3298static int esp_do_msgoutdone(struct NCR_ESP *esp,
3299 struct ESP_regs *eregs)
3300{
3301 if((esp->msgout_len > 1) && esp->dma_barrier)
3302 esp->dma_barrier(esp);
3303
3304 if(!(esp->ireg & ESP_INTR_DC)) {
3305 esp_cmd(esp, eregs, ESP_CMD_NULL);
3306 switch(esp->sreg & ESP_STAT_PMASK) {
3307 case ESP_MOP:
3308 /* whoops, parity error */
3309 ESPLOG(("esp%d: still in msgout, parity error assumed\n",
3310 esp->esp_id));
3311 if(esp->msgout_len > 1)
3312 esp_cmd(esp, eregs, ESP_CMD_SATN);
3313 esp_advance_phase(esp->current_SC, in_msgout);
3314 return do_work_bus;
3315
3316 case ESP_DIP:
3317 break;
3318
3319 default:
3320 if(!fcount(esp, eregs) &&
3321 !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset))
3322 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3323 break;
3324
3325 };
3326 }
3327
3328 /* If we sent out a synchronous negotiation message, update
3329 * our state.
3330 */
3331 if(esp->cur_msgout[2] == EXTENDED_MESSAGE &&
3332 esp->cur_msgout[4] == EXTENDED_SDTR) {
3333 esp->snip = 1; /* anal retentiveness... */
3334 }
3335
3336 esp->prevmsgout = esp->cur_msgout[0];
3337 esp->msgout_len = 0;
3338 esp_advance_phase(esp->current_SC, in_the_dark);
3339 return esp_do_phase_determine(esp, eregs);
3340}
3341
3342static int esp_bus_unexpected(struct NCR_ESP *esp, struct ESP_regs *eregs)
3343{
3344 ESPLOG(("esp%d: command in weird state %2x\n",
3345 esp->esp_id, esp->current_SC->SCp.phase));
3346 return do_reset_bus;
3347}
3348
3349static espfunc_t bus_vector[] = {
3350 esp_do_data_finale,
3351 esp_do_data_finale,
3352 esp_bus_unexpected,
3353 esp_do_msgin,
3354 esp_do_msgincont,
3355 esp_do_msgindone,
3356 esp_do_msgout,
3357 esp_do_msgoutdone,
3358 esp_do_cmdbegin,
3359 esp_do_cmddone,
3360 esp_do_status,
3361 esp_do_freebus,
3362 esp_do_phase_determine,
3363 esp_bus_unexpected,
3364 esp_bus_unexpected,
3365 esp_bus_unexpected,
3366};
3367
3368/* This is the second tier in our dual-level SCSI state machine. */
3369static int esp_work_bus(struct NCR_ESP *esp, struct ESP_regs *eregs)
3370{
3371 Scsi_Cmnd *SCptr = esp->current_SC;
3372 unsigned int phase;
3373
3374 ESPBUS(("esp_work_bus: "));
3375 if(!SCptr) {
3376 ESPBUS(("reconnect\n"));
3377 return esp_do_reconnect(esp, eregs);
3378 }
3379 phase = SCptr->SCp.phase;
3380 if ((phase & 0xf0) == in_phases_mask)
3381 return bus_vector[(phase & 0x0f)](esp, eregs);
3382 else if((phase & 0xf0) == in_slct_mask)
3383 return esp_select_complete(esp, eregs);
3384 else
3385 return esp_bus_unexpected(esp, eregs);
3386}
3387
3388static espfunc_t isvc_vector[] = {
3389 NULL,
3390 esp_do_phase_determine,
3391 esp_do_resetbus,
3392 esp_finish_reset,
3393 esp_work_bus
3394};
3395
3396/* Main interrupt handler for an esp adapter. */
3397void esp_handle(struct NCR_ESP *esp)
3398{
3399 struct ESP_regs *eregs;
3400 Scsi_Cmnd *SCptr;
3401 int what_next = do_intr_end;
3402 eregs = esp->eregs;
3403 SCptr = esp->current_SC;
3404
3405 if(esp->dma_irq_entry)
3406 esp->dma_irq_entry(esp);
3407
3408 /* Check for errors. */
3409 esp->sreg = esp_read(eregs->esp_status);
3410 esp->sreg &= (~ESP_STAT_INTR);
3411 esp->seqreg = (esp_read(eregs->esp_sstep) & ESP_STEP_VBITS);
3412 esp->ireg = esp_read(eregs->esp_intrpt); /* Unlatch intr and stat regs */
3413 ESPIRQ(("handle_irq: [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
3414 esp->sreg, esp->seqreg, esp->ireg));
3415 if(esp->sreg & (ESP_STAT_SPAM)) {
3416 /* Gross error, could be due to one of:
3417 *
3418 * - top of fifo overwritten, could be because
3419 * we tried to do a synchronous transfer with
3420 * an offset greater than ESP fifo size
3421 *
3422 * - top of command register overwritten
3423 *
3424 * - DMA setup to go in one direction, SCSI
3425 * bus points in the other, whoops
3426 *
3427 * - weird phase change during asynchronous
3428 * data phase while we are initiator
3429 */
3430 ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg));
3431
3432 /* If a command is live on the bus we cannot safely
3433 * reset the bus, so we'll just let the pieces fall
3434 * where they may. Here we are hoping that the
3435 * target will be able to cleanly go away soon
3436 * so we can safely reset things.
3437 */
3438 if(!SCptr) {
3439 ESPLOG(("esp%d: No current cmd during gross error, "
3440 "resetting bus\n", esp->esp_id));
3441 what_next = do_reset_bus;
3442 goto state_machine;
3443 }
3444 }
3445
3446 /* No current cmd is only valid at this point when there are
3447 * commands off the bus or we are trying a reset.
3448 */
3449 if(!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) {
3450 /* Panic is safe, since current_SC is null. */
3451 ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id));
3452 panic("esp_handle: current_SC == penguin within interrupt!");
3453 }
3454
3455 if(esp->ireg & (ESP_INTR_IC)) {
3456 /* Illegal command fed to ESP. Outside of obvious
3457 * software bugs that could cause this, there is
3458 * a condition with ESP100 where we can confuse the
3459 * ESP into an erroneous illegal command interrupt
3460 * because it does not scrape the FIFO properly
3461 * for reselection. See esp100_reconnect_hwbug()
3462 * to see how we try very hard to avoid this.
3463 */
3464 ESPLOG(("esp%d: invalid command\n", esp->esp_id));
3465
3466 esp_dump_state(esp, eregs);
3467
3468 if(SCptr) {
3469 /* Devices with very buggy firmware can drop BSY
3470 * during a scatter list interrupt when using sync
3471 * mode transfers. We continue the transfer as
3472 * expected, the target drops the bus, the ESP
3473 * gets confused, and we get a illegal command
3474 * interrupt because the bus is in the disconnected
3475 * state now and ESP_CMD_TI is only allowed when
3476 * a nexus is alive on the bus.
3477 */
3478 ESPLOG(("esp%d: Forcing async and disabling disconnect for "
3479 "target %d\n", esp->esp_id, SCptr->device->id));
3480 SCptr->device->borken = 1; /* foo on you */
3481 }
3482
3483 what_next = do_reset_bus;
3484 } else if(!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) {
3485 int phase;
3486
3487 if(SCptr) {
3488 phase = SCptr->SCp.phase;
3489 if(phase & in_phases_mask) {
3490 what_next = esp_work_bus(esp, eregs);
3491 } else if(phase & in_slct_mask) {
3492 what_next = esp_select_complete(esp, eregs);
3493 } else {
3494 ESPLOG(("esp%d: interrupt for no good reason...\n",
3495 esp->esp_id));
3496 what_next = do_intr_end;
3497 }
3498 } else {
3499 ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n",
3500 esp->esp_id));
3501 what_next = do_reset_bus;
3502 }
3503 } else if(esp->ireg & ESP_INTR_SR) {
3504 ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id));
3505 what_next = do_reset_complete;
3506 } else if(esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) {
3507 ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n",
3508 esp->esp_id));
3509 what_next = do_reset_bus;
3510 } else if(esp->ireg & ESP_INTR_RSEL) {
3511 if(!SCptr) {
3512 /* This is ok. */
3513 what_next = esp_do_reconnect(esp, eregs);
3514 } else if(SCptr->SCp.phase & in_slct_mask) {
3515 /* Only selection code knows how to clean
3516 * up properly.
3517 */
3518 ESPDISC(("Reselected during selection attempt\n"));
3519 what_next = esp_select_complete(esp, eregs);
3520 } else {
3521 ESPLOG(("esp%d: Reselected while bus is busy\n",
3522 esp->esp_id));
3523 what_next = do_reset_bus;
3524 }
3525 }
3526
3527 /* This is tier-one in our dual level SCSI state machine. */
3528state_machine:
3529 while(what_next != do_intr_end) {
3530 if (what_next >= do_phase_determine &&
3531 what_next < do_intr_end)
3532 what_next = isvc_vector[what_next](esp, eregs);
3533 else {
3534 /* state is completely lost ;-( */
3535 ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n",
3536 esp->esp_id));
3537 what_next = do_reset_bus;
3538 }
3539 }
3540 if(esp->dma_irq_exit)
3541 esp->dma_irq_exit(esp);
3542}
3543EXPORT_SYMBOL(esp_handle);
3544
3545#ifndef CONFIG_SMP
3546irqreturn_t esp_intr(int irq, void *dev_id)
3547{
3548 struct NCR_ESP *esp;
3549 unsigned long flags;
3550 int again;
3551 struct Scsi_Host *dev = dev_id;
3552
3553 /* Handle all ESP interrupts showing at this IRQ level. */
3554 spin_lock_irqsave(dev->host_lock, flags);
3555repeat:
3556 again = 0;
3557 for_each_esp(esp) {
3558#ifndef __mips__
3559 if(((esp)->irq & 0xff) == irq) {
3560#endif
3561 if(esp->dma_irq_p(esp)) {
3562 again = 1;
3563
3564 esp->dma_ints_off(esp);
3565
3566 ESPIRQ(("I%d(", esp->esp_id));
3567 esp_handle(esp);
3568 ESPIRQ((")"));
3569
3570 esp->dma_ints_on(esp);
3571 }
3572#ifndef __mips__
3573 }
3574#endif
3575 }
3576 if(again)
3577 goto repeat;
3578 spin_unlock_irqrestore(dev->host_lock, flags);
3579 return IRQ_HANDLED;
3580}
3581#else
3582/* For SMP we only service one ESP on the list list at our IRQ level! */
3583irqreturn_t esp_intr(int irq, void *dev_id)
3584{
3585 struct NCR_ESP *esp;
3586 unsigned long flags;
3587 struct Scsi_Host *dev = dev_id;
3588
3589 /* Handle all ESP interrupts showing at this IRQ level. */
3590 spin_lock_irqsave(dev->host_lock, flags);
3591 for_each_esp(esp) {
3592 if(((esp)->irq & 0xf) == irq) {
3593 if(esp->dma_irq_p(esp)) {
3594 esp->dma_ints_off(esp);
3595
3596 ESPIRQ(("I[%d:%d](",
3597 smp_processor_id(), esp->esp_id));
3598 esp_handle(esp);
3599 ESPIRQ((")"));
3600
3601 esp->dma_ints_on(esp);
3602 goto out;
3603 }
3604 }
3605 }
3606out:
3607 spin_unlock_irqrestore(dev->host_lock, flags);
3608 return IRQ_HANDLED;
3609}
3610#endif
3611
3612int esp_slave_alloc(struct scsi_device *SDptr)
3613{
3614 struct esp_device *esp_dev =
3615 kzalloc(sizeof(struct esp_device), GFP_ATOMIC);
3616
3617 if (!esp_dev)
3618 return -ENOMEM;
3619 SDptr->hostdata = esp_dev;
3620 return 0;
3621}
3622
3623void esp_slave_destroy(struct scsi_device *SDptr)
3624{
3625 struct NCR_ESP *esp = (struct NCR_ESP *) SDptr->host->hostdata;
3626
3627 esp->targets_present &= ~(1 << sdev_id(SDptr));
3628 kfree(SDptr->hostdata);
3629 SDptr->hostdata = NULL;
3630}
3631
3632#ifdef MODULE
3633int init_module(void) { return 0; }
3634void cleanup_module(void) {}
3635void esp_release(void)
3636{
3637 esps_in_use--;
3638 esps_running = esps_in_use;
3639}
3640EXPORT_SYMBOL(esp_release);
3641#endif
3642
3643EXPORT_SYMBOL(esp_abort);
3644EXPORT_SYMBOL(esp_allocate);
3645EXPORT_SYMBOL(esp_deallocate);
3646EXPORT_SYMBOL(esp_initialize);
3647EXPORT_SYMBOL(esp_intr);
3648EXPORT_SYMBOL(esp_queue);
3649EXPORT_SYMBOL(esp_reset);
3650EXPORT_SYMBOL(esp_slave_alloc);
3651EXPORT_SYMBOL(esp_slave_destroy);
3652EXPORT_SYMBOL(esps_in_use);
3653
3654MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h
deleted file mode 100644
index 00a0ba040dba..000000000000
--- a/drivers/scsi/NCR53C9x.h
+++ /dev/null
@@ -1,668 +0,0 @@
1/* NCR53C9x.c: Defines and structures for the NCR53C9x generic driver.
2 *
3 * Originally esp.h: Defines and structures for the Sparc ESP
4 * (Enhanced SCSI Processor) driver under Linux.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 *
8 * Generalization by Jesper Skov (jskov@cygnus.co.uk)
9 *
10 * More generalization (for i386 stuff) by Tymm Twillman (tymm@computer.org)
11 */
12
13#ifndef NCR53C9X_H
14#define NCR53C9X_H
15
16#include <linux/interrupt.h>
17
18/* djweis for mac driver */
19#if defined(CONFIG_MAC)
20#define PAD_SIZE 15
21#else
22#define PAD_SIZE 3
23#endif
24
25/* Handle multiple hostadapters on Amiga
26 * generally PAD_SIZE = 3
27 * but there is one exception: Oktagon (PAD_SIZE = 1) */
28#if defined(CONFIG_OKTAGON_SCSI) || defined(CONFIG_OKTAGON_SCSI_MODULE)
29#undef PAD_SIZE
30#if defined(CONFIG_BLZ1230_SCSI) || defined(CONFIG_BLZ1230_SCSI_MODULE) || \
31 defined(CONFIG_BLZ2060_SCSI) || defined(CONFIG_BLZ2060_SCSI_MODULE) || \
32 defined(CONFIG_CYBERSTORM_SCSI) || defined(CONFIG_CYBERSTORM_SCSI_MODULE) || \
33 defined(CONFIG_CYBERSTORMII_SCSI) || defined(CONFIG_CYBERSTORMII_SCSI_MODULE) || \
34 defined(CONFIG_FASTLANE_SCSI) || defined(CONFIG_FASTLANE_SCSI_MODULE)
35#define MULTIPLE_PAD_SIZES
36#else
37#define PAD_SIZE 1
38#endif
39#endif
40
41/* Macros for debugging messages */
42
43#define DEBUG_ESP
44/* #define DEBUG_ESP_DATA */
45/* #define DEBUG_ESP_QUEUE */
46/* #define DEBUG_ESP_DISCONNECT */
47/* #define DEBUG_ESP_STATUS */
48/* #define DEBUG_ESP_PHASES */
49/* #define DEBUG_ESP_WORKBUS */
50/* #define DEBUG_STATE_MACHINE */
51/* #define DEBUG_ESP_CMDS */
52/* #define DEBUG_ESP_IRQS */
53/* #define DEBUG_SDTR */
54/* #define DEBUG_ESP_SG */
55
56/* Use the following to sprinkle debugging messages in a way which
57 * suits you if combinations of the above become too verbose when
58 * trying to track down a specific problem.
59 */
60/* #define DEBUG_ESP_MISC */
61
62#if defined(DEBUG_ESP)
63#define ESPLOG(foo) printk foo
64#else
65#define ESPLOG(foo)
66#endif /* (DEBUG_ESP) */
67
68#if defined(DEBUG_ESP_DATA)
69#define ESPDATA(foo) printk foo
70#else
71#define ESPDATA(foo)
72#endif
73
74#if defined(DEBUG_ESP_QUEUE)
75#define ESPQUEUE(foo) printk foo
76#else
77#define ESPQUEUE(foo)
78#endif
79
80#if defined(DEBUG_ESP_DISCONNECT)
81#define ESPDISC(foo) printk foo
82#else
83#define ESPDISC(foo)
84#endif
85
86#if defined(DEBUG_ESP_STATUS)
87#define ESPSTAT(foo) printk foo
88#else
89#define ESPSTAT(foo)
90#endif
91
92#if defined(DEBUG_ESP_PHASES)
93#define ESPPHASE(foo) printk foo
94#else
95#define ESPPHASE(foo)
96#endif
97
98#if defined(DEBUG_ESP_WORKBUS)
99#define ESPBUS(foo) printk foo
100#else
101#define ESPBUS(foo)
102#endif
103
104#if defined(DEBUG_ESP_IRQS)
105#define ESPIRQ(foo) printk foo
106#else
107#define ESPIRQ(foo)
108#endif
109
110#if defined(DEBUG_SDTR)
111#define ESPSDTR(foo) printk foo
112#else
113#define ESPSDTR(foo)
114#endif
115
116#if defined(DEBUG_ESP_MISC)
117#define ESPMISC(foo) printk foo
118#else
119#define ESPMISC(foo)
120#endif
121
122/*
123 * padding for register structure
124 */
125#ifdef CONFIG_JAZZ_ESP
126#define EREGS_PAD(n)
127#else
128#ifndef MULTIPLE_PAD_SIZES
129#define EREGS_PAD(n) unchar n[PAD_SIZE];
130#endif
131#endif
132
133/* The ESP SCSI controllers have their register sets in three
134 * "classes":
135 *
136 * 1) Registers which are both read and write.
137 * 2) Registers which are read only.
138 * 3) Registers which are write only.
139 *
140 * Yet, they all live within the same IO space.
141 */
142
143#if !defined(__i386__) && !defined(__x86_64__)
144
145#ifndef MULTIPLE_PAD_SIZES
146
147#ifdef CONFIG_CPU_HAS_WB
148#include <asm/wbflush.h>
149#define esp_write(__reg, __val) do{(__reg) = (__val); wbflush();} while(0)
150#else
151#define esp_write(__reg, __val) ((__reg) = (__val))
152#endif
153#define esp_read(__reg) (__reg)
154
155struct ESP_regs {
156 /* Access Description Offset */
157 volatile unchar esp_tclow; /* rw Low bits of the transfer count 0x00 */
158 EREGS_PAD(tlpad1);
159 volatile unchar esp_tcmed; /* rw Mid bits of the transfer count 0x04 */
160 EREGS_PAD(fdpad);
161 volatile unchar esp_fdata; /* rw FIFO data bits 0x08 */
162 EREGS_PAD(cbpad);
163 volatile unchar esp_cmnd; /* rw SCSI command bits 0x0c */
164 EREGS_PAD(stpad);
165 volatile unchar esp_status; /* ro ESP status register 0x10 */
166#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
167 EREGS_PAD(irqpd);
168 volatile unchar esp_intrpt; /* ro Kind of interrupt 0x14 */
169#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
170 EREGS_PAD(sspad);
171 volatile unchar esp_sstep; /* ro Sequence step register 0x18 */
172#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
173 EREGS_PAD(ffpad);
174 volatile unchar esp_fflags; /* ro Bits of current FIFO info 0x1c */
175#define esp_soff esp_fflags /* wo Sync offset 0x1c */
176 EREGS_PAD(cf1pd);
177 volatile unchar esp_cfg1; /* rw First configuration register 0x20 */
178 EREGS_PAD(cfpad);
179 volatile unchar esp_cfact; /* wo Clock conversion factor 0x24 */
180 EREGS_PAD(ctpad);
181 volatile unchar esp_ctest; /* wo Chip test register 0x28 */
182 EREGS_PAD(cf2pd);
183 volatile unchar esp_cfg2; /* rw Second configuration register 0x2c */
184 EREGS_PAD(cf3pd);
185
186 /* The following is only found on the 53C9X series SCSI chips */
187 volatile unchar esp_cfg3; /* rw Third configuration register 0x30 */
188 EREGS_PAD(cf4pd);
189 volatile unchar esp_cfg4; /* rw Fourth configuration register 0x34 */
190 EREGS_PAD(thpd);
191 /* The following is found on all chips except the NCR53C90 (ESP100) */
192 volatile unchar esp_tchi; /* rw High bits of transfer count 0x38 */
193#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
194 EREGS_PAD(fgpad);
195 volatile unchar esp_fgrnd; /* rw Data base for fifo 0x3c */
196};
197
198#else /* MULTIPLE_PAD_SIZES */
199
200#define esp_write(__reg, __val) (*(__reg) = (__val))
201#define esp_read(__reg) (*(__reg))
202
203struct ESP_regs {
204 unsigned char io_addr[64]; /* dummy */
205 /* Access Description Offset */
206#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
207#define esp_tcmed io_addr + (1<<(esp->shift)) /* rw Mid bits of the transfer count 0x04 */
208#define esp_fdata io_addr + (2<<(esp->shift)) /* rw FIFO data bits 0x08 */
209#define esp_cmnd io_addr + (3<<(esp->shift)) /* rw SCSI command bits 0x0c */
210#define esp_status io_addr + (4<<(esp->shift)) /* ro ESP status register 0x10 */
211#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
212#define esp_intrpt io_addr + (5<<(esp->shift)) /* ro Kind of interrupt 0x14 */
213#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
214#define esp_sstep io_addr + (6<<(esp->shift)) /* ro Sequence step register 0x18 */
215#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
216#define esp_fflags io_addr + (7<<(esp->shift)) /* ro Bits of current FIFO info 0x1c */
217#define esp_soff esp_fflags /* wo Sync offset 0x1c */
218#define esp_cfg1 io_addr + (8<<(esp->shift)) /* rw First configuration register 0x20 */
219#define esp_cfact io_addr + (9<<(esp->shift)) /* wo Clock conversion factor 0x24 */
220#define esp_ctest io_addr + (10<<(esp->shift)) /* wo Chip test register 0x28 */
221#define esp_cfg2 io_addr + (11<<(esp->shift)) /* rw Second configuration register 0x2c */
222
223 /* The following is only found on the 53C9X series SCSI chips */
224#define esp_cfg3 io_addr + (12<<(esp->shift)) /* rw Third configuration register 0x30 */
225#define esp_cfg4 io_addr + (13<<(esp->shift)) /* rw Fourth configuration register 0x34 */
226
227 /* The following is found on all chips except the NCR53C90 (ESP100) */
228#define esp_tchi io_addr + (14<<(esp->shift)) /* rw High bits of transfer count 0x38 */
229#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
230#define esp_fgrnd io_addr + (15<<(esp->shift)) /* rw Data base for fifo 0x3c */
231};
232
233#endif
234
235#else /* !defined(__i386__) && !defined(__x86_64__) */
236
237#define esp_write(__reg, __val) outb((__val), (__reg))
238#define esp_read(__reg) inb((__reg))
239
240struct ESP_regs {
241 unsigned int io_addr;
242 /* Access Description Offset */
243#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
244#define esp_tcmed io_addr + 1 /* rw Mid bits of the transfer count 0x04 */
245#define esp_fdata io_addr + 2 /* rw FIFO data bits 0x08 */
246#define esp_cmnd io_addr + 3 /* rw SCSI command bits 0x0c */
247#define esp_status io_addr + 4 /* ro ESP status register 0x10 */
248#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
249#define esp_intrpt io_addr + 5 /* ro Kind of interrupt 0x14 */
250#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
251#define esp_sstep io_addr + 6 /* ro Sequence step register 0x18 */
252#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
253#define esp_fflags io_addr + 7 /* ro Bits of current FIFO info 0x1c */
254#define esp_soff esp_fflags /* wo Sync offset 0x1c */
255#define esp_cfg1 io_addr + 8 /* rw First configuration register 0x20 */
256#define esp_cfact io_addr + 9 /* wo Clock conversion factor 0x24 */
257#define esp_ctest io_addr + 10 /* wo Chip test register 0x28 */
258#define esp_cfg2 io_addr + 11 /* rw Second configuration register 0x2c */
259
260 /* The following is only found on the 53C9X series SCSI chips */
261#define esp_cfg3 io_addr + 12 /* rw Third configuration register 0x30 */
262#define esp_cfg4 io_addr + 13 /* rw Fourth configuration register 0x34 */
263
264 /* The following is found on all chips except the NCR53C90 (ESP100) */
265#define esp_tchi io_addr + 14 /* rw High bits of transfer count 0x38 */
266#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
267#define esp_fgrnd io_addr + 15 /* rw Data base for fifo 0x3c */
268};
269
270#endif /* !defined(__i386__) && !defined(__x86_64__) */
271
272/* Various revisions of the ESP board. */
273enum esp_rev {
274 esp100 = 0x00, /* NCR53C90 - very broken */
275 esp100a = 0x01, /* NCR53C90A */
276 esp236 = 0x02,
277 fas236 = 0x03,
278 fas100a = 0x04,
279 fast = 0x05,
280 fas366 = 0x06,
281 fas216 = 0x07,
282 fsc = 0x08, /* SYM53C94-2 */
283 espunknown = 0x09
284};
285
286/* We allocate one of these for each scsi device and attach it to
287 * SDptr->hostdata for use in the driver
288 */
289struct esp_device {
290 unsigned char sync_min_period;
291 unsigned char sync_max_offset;
292 unsigned sync:1;
293 unsigned wide:1;
294 unsigned disconnect:1;
295};
296
297/* We get one of these for each ESP probed. */
298struct NCR_ESP {
299 struct NCR_ESP *next; /* Next ESP on probed or NULL */
300 struct ESP_regs *eregs; /* All esp registers */
301 int dma; /* Who I do transfers with. */
302 void *dregs; /* And his registers. */
303 struct Scsi_Host *ehost; /* Backpointer to SCSI Host */
304
305 void *edev; /* Pointer to controller base/SBus */
306 int esp_id; /* Unique per-ESP ID number */
307
308 /* ESP Configuration Registers */
309 unsigned char config1; /* Copy of the 1st config register */
310 unsigned char config2; /* Copy of the 2nd config register */
311 unsigned char config3[16]; /* Copy of the 3rd config register */
312
313 /* The current command we are sending to the ESP chip. This esp_command
314 * ptr needs to be mapped in DVMA area so we can send commands and read
315 * from the ESP fifo without burning precious CPU cycles. Programmed I/O
316 * sucks when we have the DVMA to do it for us. The ESP is stupid and will
317 * only send out 6, 10, and 12 byte SCSI commands, others we need to send
318 * one byte at a time. esp_slowcmd being set says that we are doing one
319 * of the command types ESP doesn't understand, esp_scmdp keeps track of
320 * which byte we are sending, esp_scmdleft says how many bytes to go.
321 */
322 volatile unchar *esp_command; /* Location of command (CPU view) */
323 __u32 esp_command_dvma; /* Location of command (DVMA view) */
324 unsigned char esp_clen; /* Length of this command */
325 unsigned char esp_slowcmd;
326 unsigned char *esp_scmdp;
327 unsigned char esp_scmdleft;
328
329 /* The following are used to determine the cause of an IRQ. Upon every
330 * IRQ entry we synchronize these with the hardware registers.
331 */
332 unchar ireg; /* Copy of ESP interrupt register */
333 unchar sreg; /* Same for ESP status register */
334 unchar seqreg; /* The ESP sequence register */
335
336 /* The following is set when a premature interrupt condition is detected
337 * in some FAS revisions.
338 */
339 unchar fas_premature_intr_workaround;
340
341 /* To save register writes to the ESP, which can be expensive, we
342 * keep track of the previous value that various registers had for
343 * the last target we connected to. If they are the same for the
344 * current target, we skip the register writes as they are not needed.
345 */
346 unchar prev_soff, prev_stp, prev_cfg3;
347
348 /* For each target we keep track of save/restore data
349 * pointer information. This needs to be updated majorly
350 * when we add support for tagged queueing. -DaveM
351 */
352 struct esp_pointers {
353 char *saved_ptr;
354 struct scatterlist *saved_buffer;
355 int saved_this_residual;
356 int saved_buffers_residual;
357 } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/;
358
359 /* Clock periods, frequencies, synchronization, etc. */
360 unsigned int cfreq; /* Clock frequency in HZ */
361 unsigned int cfact; /* Clock conversion factor */
362 unsigned int ccycle; /* One ESP clock cycle */
363 unsigned int ctick; /* One ESP clock time */
364 unsigned int radelay; /* FAST chip req/ack delay */
365 unsigned int neg_defp; /* Default negotiation period */
366 unsigned int sync_defp; /* Default sync transfer period */
367 unsigned int max_period; /* longest our period can be */
368 unsigned int min_period; /* shortest period we can withstand */
369 /* For slow to medium speed input clock rates we shoot for 5mb/s,
370 * but for high input clock rates we try to do 10mb/s although I
371 * don't think a transfer can even run that fast with an ESP even
372 * with DMA2 scatter gather pipelining.
373 */
374#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
375#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
376
377 unsigned int snip; /* Sync. negotiation in progress */
378 unsigned int wnip; /* WIDE negotiation in progress */
379 unsigned int targets_present; /* targets spoken to before */
380
381 int current_transfer_size; /* Set at beginning of data dma */
382
383 unchar espcmdlog[32]; /* Log of current esp cmds sent. */
384 unchar espcmdent; /* Current entry in esp cmd log. */
385
386 /* Misc. info about this ESP */
387 enum esp_rev erev; /* ESP revision */
388 int irq; /* IRQ for this ESP */
389 int scsi_id; /* Who am I as initiator? */
390 int scsi_id_mask; /* Bitmask of 'me'. */
391 int diff; /* Differential SCSI bus? */
392 int slot; /* Slot the adapter occupies */
393
394 /* Our command queues, only one cmd lives in the current_SC queue. */
395 Scsi_Cmnd *issue_SC; /* Commands to be issued */
396 Scsi_Cmnd *current_SC; /* Who is currently working the bus */
397 Scsi_Cmnd *disconnected_SC; /* Commands disconnected from the bus */
398
399 /* Message goo */
400 unchar cur_msgout[16];
401 unchar cur_msgin[16];
402 unchar prevmsgout, prevmsgin;
403 unchar msgout_len, msgin_len;
404 unchar msgout_ctr, msgin_ctr;
405
406 /* States that we cannot keep in the per cmd structure because they
407 * cannot be assosciated with any specific command.
408 */
409 unchar resetting_bus;
410 wait_queue_head_t reset_queue;
411
412 unchar do_pio_cmds; /* Do command transfer with pio */
413
414 /* How much bits do we have to shift the registers */
415 unsigned char shift;
416
417 /* Functions handling DMA
418 */
419 /* Required functions */
420 int (*dma_bytes_sent)(struct NCR_ESP *, int);
421 int (*dma_can_transfer)(struct NCR_ESP *, Scsi_Cmnd *);
422 void (*dma_dump_state)(struct NCR_ESP *);
423 void (*dma_init_read)(struct NCR_ESP *, __u32, int);
424 void (*dma_init_write)(struct NCR_ESP *, __u32, int);
425 void (*dma_ints_off)(struct NCR_ESP *);
426 void (*dma_ints_on)(struct NCR_ESP *);
427 int (*dma_irq_p)(struct NCR_ESP *);
428 int (*dma_ports_p)(struct NCR_ESP *);
429 void (*dma_setup)(struct NCR_ESP *, __u32, int, int);
430
431 /* Optional functions (i.e. may be initialized to 0) */
432 void (*dma_barrier)(struct NCR_ESP *);
433 void (*dma_drain)(struct NCR_ESP *);
434 void (*dma_invalidate)(struct NCR_ESP *);
435 void (*dma_irq_entry)(struct NCR_ESP *);
436 void (*dma_irq_exit)(struct NCR_ESP *);
437 void (*dma_led_off)(struct NCR_ESP *);
438 void (*dma_led_on)(struct NCR_ESP *);
439 void (*dma_poll)(struct NCR_ESP *, unsigned char *);
440 void (*dma_reset)(struct NCR_ESP *);
441
442 /* Optional virtual DMA functions */
443 void (*dma_mmu_get_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
444 void (*dma_mmu_get_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
445 void (*dma_mmu_release_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
446 void (*dma_mmu_release_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
447 void (*dma_advance_sg)(Scsi_Cmnd *);
448};
449
450/* Bitfield meanings for the above registers. */
451
452/* ESP config reg 1, read-write, found on all ESP chips */
453#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
454#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
455#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
456#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
457#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
458#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
459
460/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236+fsc chips */
461#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236,fsc) */
462#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236,fsc) */
463#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
464#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */
465#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
466#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
467#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236,fsc) */
468#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216,fsc) */
469#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */
470#define ESP_CONFIG2_RFB 0x80 /* Reserve FIFO byte (fsc) */
471#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
472
473/* ESP config register 3 read-write, found only esp236+fas236+fas100a+fsc chips */
474#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/fas366) */
475#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236/fsc) */
476#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a) */
477#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236/fsc) */
478#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a) */
479#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236/fsc) */
480#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a) */
481#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236/fsc) */
482#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a) */
483#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236/fsc) */
484#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236/fsc) */
485#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236/fsc) */
486#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236/fsc) */
487
488/* ESP config register 4 read-write, found only on fsc chips */
489#define ESP_CONFIG4_BBTE 0x01 /* Back-to-Back transfer enable */
490#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode */
491#define ESP_CONFIG4_EAN 0x04 /* Enable Active Negotiation */
492
493/* ESP command register read-write */
494/* Group 1 commands: These may be sent at any point in time to the ESP
495 * chip. None of them can generate interrupts 'cept
496 * the "SCSI bus reset" command if you have not disabled
497 * SCSI reset interrupts in the config1 ESP register.
498 */
499#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
500#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
501#define ESP_CMD_RC 0x02 /* Chip reset */
502#define ESP_CMD_RS 0x03 /* SCSI bus reset */
503
504/* Group 2 commands: ESP must be an initiator and connected to a target
505 * for these commands to work.
506 */
507#define ESP_CMD_TI 0x10 /* Transfer Information */
508#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
509#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
510#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
511#define ESP_CMD_SATN 0x1a /* Set ATN */
512#define ESP_CMD_RATN 0x1b /* De-assert ATN */
513
514/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
515 * to a target as the initiator for these commands to work.
516 */
517#define ESP_CMD_SMSG 0x20 /* Send message */
518#define ESP_CMD_SSTAT 0x21 /* Send status */
519#define ESP_CMD_SDATA 0x22 /* Send data */
520#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
521#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
522#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
523#define ESP_CMD_DCNCT 0x27 /* Disconnect */
524#define ESP_CMD_RMSG 0x28 /* Receive Message */
525#define ESP_CMD_RCMD 0x29 /* Receive Command */
526#define ESP_CMD_RDATA 0x2a /* Receive Data */
527#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
528
529/* Group 4 commands: The ESP must be in the disconnected state and must
530 * not be connected to any targets as initiator for
531 * these commands to work.
532 */
533#define ESP_CMD_RSEL 0x40 /* Reselect */
534#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
535#define ESP_CMD_SELA 0x42 /* Select w/ATN */
536#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
537#define ESP_CMD_ESEL 0x44 /* Enable selection */
538#define ESP_CMD_DSEL 0x45 /* Disable selections */
539#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
540#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
541
542/* This bit enables the ESP's DMA */
543#define ESP_CMD_DMA 0x80 /* Do DMA? */
544
545/* ESP status register read-only */
546#define ESP_STAT_PIO 0x01 /* IO phase bit */
547#define ESP_STAT_PCD 0x02 /* CD phase bit */
548#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
549#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
550#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
551#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
552#define ESP_STAT_PERR 0x20 /* Parity error */
553#define ESP_STAT_SPAM 0x40 /* Real bad error */
554/* This indicates the 'interrupt pending' condition, it is a reserved
555 * bit on old revs of the ESP (ESP100, ESP100A, FAS100A).
556 */
557#define ESP_STAT_INTR 0x80 /* Interrupt */
558
559/* The status register can be masked with ESP_STAT_PMASK and compared
560 * with the following values to determine the current phase the ESP
561 * (at least thinks it) is in. For our purposes we also add our own
562 * software 'done' bit for our phase management engine.
563 */
564#define ESP_DOP (0) /* Data Out */
565#define ESP_DIP (ESP_STAT_PIO) /* Data In */
566#define ESP_CMDP (ESP_STAT_PCD) /* Command */
567#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
568#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
569#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
570
571/* ESP interrupt register read-only */
572#define ESP_INTR_S 0x01 /* Select w/o ATN */
573#define ESP_INTR_SATN 0x02 /* Select w/ATN */
574#define ESP_INTR_RSEL 0x04 /* Reselected */
575#define ESP_INTR_FDONE 0x08 /* Function done */
576#define ESP_INTR_BSERV 0x10 /* Bus service */
577#define ESP_INTR_DC 0x20 /* Disconnect */
578#define ESP_INTR_IC 0x40 /* Illegal command given */
579#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
580
581/* Interrupt status macros */
582#define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR))
583#define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC))
584#define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN))
585#define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S))
586#define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \
587 (ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
588#define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL))
589
590/* ESP sequence step register read-only */
591#define ESP_STEP_VBITS 0x07 /* Valid bits */
592#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
593#define ESP_STEP_SID 0x01 /* One msg byte sent */
594#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
595#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
596 * bytes to be lost
597 */
598#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
599
600/* Ho hum, some ESP's set the step register to this as well... */
601#define ESP_STEP_FINI5 0x05
602#define ESP_STEP_FINI6 0x06
603#define ESP_STEP_FINI7 0x07
604#define ESP_STEP_SOM 0x08 /* Synchronous Offset Max */
605
606/* ESP chip-test register read-write */
607#define ESP_TEST_TARG 0x01 /* Target test mode */
608#define ESP_TEST_INI 0x02 /* Initiator test mode */
609#define ESP_TEST_TS 0x04 /* Tristate test mode */
610
611/* ESP unique ID register read-only, found on fas236+fas100a+fsc only */
612#define ESP_UID_F100A 0x00 /* FAS100A */
613#define ESP_UID_F236 0x02 /* FAS236 */
614#define ESP_UID_FSC 0xa2 /* NCR53CF9x-2 */
615#define ESP_UID_REV 0x07 /* ESP revision */
616#define ESP_UID_FAM 0xf8 /* ESP family */
617
618/* ESP fifo flags register read-only */
619/* Note that the following implies a 16 byte FIFO on the ESP. */
620#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
621#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100,fsc) */
622#define ESP_FF_SSTEP 0xe0 /* Sequence step */
623
624/* ESP clock conversion factor register write-only */
625#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
626#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
627#define ESP_CCF_F2 0x02 /* 10MHz */
628#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
629#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
630#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
631#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
632#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
633
634#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
635#define ESP_TIMEO_CONST 8192
636#define FSC_TIMEO_CONST 7668
637#define ESP_NEG_DEFP(mhz, cfact) \
638 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
639#define FSC_NEG_DEFP(mhz, cfact) \
640 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (7668 * (cfact)))
641#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
642#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
643
644
645/* UGLY, UGLY, UGLY! */
646extern int nesps, esps_in_use, esps_running;
647
648/* For our interrupt engine. */
649#define for_each_esp(esp) \
650 for((esp) = espchain; (esp); (esp) = (esp)->next)
651
652
653/* External functions */
654extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs);
655extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *, int);
656extern void esp_deallocate(struct NCR_ESP *);
657extern void esp_release(void);
658extern void esp_initialize(struct NCR_ESP *);
659extern irqreturn_t esp_intr(int, void *);
660extern const char *esp_info(struct Scsi_Host *);
661extern int esp_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
662extern int esp_abort(Scsi_Cmnd *);
663extern int esp_reset(Scsi_Cmnd *);
664extern int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length,
665 int inout);
666extern int esp_slave_alloc(struct scsi_device *);
667extern void esp_slave_destroy(struct scsi_device *);
668#endif /* !(NCR53C9X_H) */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d7235f42cf5f..bfd0e64964ac 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -859,44 +859,31 @@ static int setinqserial(struct aac_dev *dev, void *data, int cid)
859 le32_to_cpu(dev->adapter_info.serial[0]), cid); 859 le32_to_cpu(dev->adapter_info.serial[0]), cid);
860} 860}
861 861
862static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code, 862static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
863 u8 a_sense_code, u8 incorrect_length, 863 u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
864 u8 bit_pointer, u16 field_pointer,
865 u32 residue)
866{ 864{
867 sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */ 865 u8 *sense_buf = (u8 *)sense_data;
866 /* Sense data valid, err code 70h */
867 sense_buf[0] = 0x70; /* No info field */
868 sense_buf[1] = 0; /* Segment number, always zero */ 868 sense_buf[1] = 0; /* Segment number, always zero */
869 869
870 if (incorrect_length) { 870 sense_buf[2] = sense_key; /* Sense key */
871 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
872 sense_buf[3] = BYTE3(residue);
873 sense_buf[4] = BYTE2(residue);
874 sense_buf[5] = BYTE1(residue);
875 sense_buf[6] = BYTE0(residue);
876 } else
877 sense_buf[2] = sense_key; /* Sense key */
878
879 if (sense_key == ILLEGAL_REQUEST)
880 sense_buf[7] = 10; /* Additional sense length */
881 else
882 sense_buf[7] = 6; /* Additional sense length */
883 871
884 sense_buf[12] = sense_code; /* Additional sense code */ 872 sense_buf[12] = sense_code; /* Additional sense code */
885 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ 873 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
874
886 if (sense_key == ILLEGAL_REQUEST) { 875 if (sense_key == ILLEGAL_REQUEST) {
887 sense_buf[15] = 0; 876 sense_buf[7] = 10; /* Additional sense length */
888 877
889 if (sense_code == SENCODE_INVALID_PARAM_FIELD) 878 sense_buf[15] = bit_pointer;
890 sense_buf[15] = 0x80;/* Std sense key specific field */
891 /* Illegal parameter is in the parameter block */ 879 /* Illegal parameter is in the parameter block */
892
893 if (sense_code == SENCODE_INVALID_CDB_FIELD) 880 if (sense_code == SENCODE_INVALID_CDB_FIELD)
894 sense_buf[15] = 0xc0;/* Std sense key specific field */ 881 sense_buf[15] |= 0xc0;/* Std sense key specific field */
895 /* Illegal parameter is in the CDB block */ 882 /* Illegal parameter is in the CDB block */
896 sense_buf[15] |= bit_pointer;
897 sense_buf[16] = field_pointer >> 8; /* MSB */ 883 sense_buf[16] = field_pointer >> 8; /* MSB */
898 sense_buf[17] = field_pointer; /* LSB */ 884 sense_buf[17] = field_pointer; /* LSB */
899 } 885 } else
886 sense_buf[7] = 6; /* Additional sense length */
900} 887}
901 888
902static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) 889static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
@@ -906,11 +893,9 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
906 dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); 893 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
907 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 894 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
908 SAM_STAT_CHECK_CONDITION; 895 SAM_STAT_CHECK_CONDITION;
909 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 896 set_sense(&dev->fsa_dev[cid].sense_data,
910 HARDWARE_ERROR, 897 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
911 SENCODE_INTERNAL_TARGET_FAILURE, 898 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
912 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
913 0, 0);
914 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 899 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
915 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 900 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
916 SCSI_SENSE_BUFFERSIZE)); 901 SCSI_SENSE_BUFFERSIZE));
@@ -1520,11 +1505,9 @@ static void io_callback(void *context, struct fib * fibptr)
1520 le32_to_cpu(readreply->status)); 1505 le32_to_cpu(readreply->status));
1521#endif 1506#endif
1522 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1507 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1523 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1508 set_sense(&dev->fsa_dev[cid].sense_data,
1524 HARDWARE_ERROR, 1509 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1525 SENCODE_INTERNAL_TARGET_FAILURE, 1510 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1526 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1527 0, 0);
1528 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1511 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1529 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1512 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1530 SCSI_SENSE_BUFFERSIZE)); 1513 SCSI_SENSE_BUFFERSIZE));
@@ -1733,11 +1716,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1733 le32_to_cpu(synchronizereply->status)); 1716 le32_to_cpu(synchronizereply->status));
1734 cmd->result = DID_OK << 16 | 1717 cmd->result = DID_OK << 16 |
1735 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1718 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1736 set_sense((u8 *)&dev->fsa_dev[cid].sense_data, 1719 set_sense(&dev->fsa_dev[cid].sense_data,
1737 HARDWARE_ERROR, 1720 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1738 SENCODE_INTERNAL_TARGET_FAILURE, 1721 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1739 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1740 0, 0);
1741 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1722 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1742 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1723 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1743 SCSI_SENSE_BUFFERSIZE)); 1724 SCSI_SENSE_BUFFERSIZE));
@@ -1945,10 +1926,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1945 { 1926 {
1946 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); 1927 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
1947 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1928 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1948 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1929 set_sense(&dev->fsa_dev[cid].sense_data,
1949 ILLEGAL_REQUEST, 1930 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
1950 SENCODE_INVALID_COMMAND, 1931 ASENCODE_INVALID_COMMAND, 0, 0);
1951 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1952 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1932 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1953 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1933 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1954 SCSI_SENSE_BUFFERSIZE)); 1934 SCSI_SENSE_BUFFERSIZE));
@@ -1995,10 +1975,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1995 scsicmd->result = DID_OK << 16 | 1975 scsicmd->result = DID_OK << 16 |
1996 COMMAND_COMPLETE << 8 | 1976 COMMAND_COMPLETE << 8 |
1997 SAM_STAT_CHECK_CONDITION; 1977 SAM_STAT_CHECK_CONDITION;
1998 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1978 set_sense(&dev->fsa_dev[cid].sense_data,
1999 ILLEGAL_REQUEST, 1979 ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
2000 SENCODE_INVALID_CDB_FIELD, 1980 ASENCODE_NO_SENSE, 7, 2);
2001 ASENCODE_NO_SENSE, 0, 7, 2, 0);
2002 memcpy(scsicmd->sense_buffer, 1981 memcpy(scsicmd->sense_buffer,
2003 &dev->fsa_dev[cid].sense_data, 1982 &dev->fsa_dev[cid].sense_data,
2004 min_t(size_t, 1983 min_t(size_t,
@@ -2254,9 +2233,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2254 */ 2233 */
2255 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0])); 2234 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
2256 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 2235 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2257 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 2236 set_sense(&dev->fsa_dev[cid].sense_data,
2258 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 2237 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2259 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 2238 ASENCODE_INVALID_COMMAND, 0, 0);
2260 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2239 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2261 min_t(size_t, 2240 min_t(size_t,
2262 sizeof(dev->fsa_dev[cid].sense_data), 2241 sizeof(dev->fsa_dev[cid].sense_data),
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index f8afa358b6b6..abef05146d75 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
243 * Search the list of AdapterFibContext addresses on the adapter 243 * Search the list of AdapterFibContext addresses on the adapter
244 * to be sure this is a valid address 244 * to be sure this is a valid address
245 */ 245 */
246 spin_lock_irqsave(&dev->fib_lock, flags);
246 entry = dev->fib_list.next; 247 entry = dev->fib_list.next;
247 fibctx = NULL; 248 fibctx = NULL;
248 249
@@ -251,24 +252,25 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
251 /* 252 /*
252 * Extract the AdapterFibContext from the Input parameters. 253 * Extract the AdapterFibContext from the Input parameters.
253 */ 254 */
254 if (fibctx->unique == f.fibctx) { /* We found a winner */ 255 if (fibctx->unique == f.fibctx) { /* We found a winner */
255 break; 256 break;
256 } 257 }
257 entry = entry->next; 258 entry = entry->next;
258 fibctx = NULL; 259 fibctx = NULL;
259 } 260 }
260 if (!fibctx) { 261 if (!fibctx) {
262 spin_unlock_irqrestore(&dev->fib_lock, flags);
261 dprintk ((KERN_INFO "Fib Context not found\n")); 263 dprintk ((KERN_INFO "Fib Context not found\n"));
262 return -EINVAL; 264 return -EINVAL;
263 } 265 }
264 266
265 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 267 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
266 (fibctx->size != sizeof(struct aac_fib_context))) { 268 (fibctx->size != sizeof(struct aac_fib_context))) {
269 spin_unlock_irqrestore(&dev->fib_lock, flags);
267 dprintk ((KERN_INFO "Fib Context corrupt?\n")); 270 dprintk ((KERN_INFO "Fib Context corrupt?\n"));
268 return -EINVAL; 271 return -EINVAL;
269 } 272 }
270 status = 0; 273 status = 0;
271 spin_lock_irqsave(&dev->fib_lock, flags);
272 /* 274 /*
273 * If there are no fibs to send back, then either wait or return 275 * If there are no fibs to send back, then either wait or return
274 * -EAGAIN 276 * -EAGAIN
@@ -414,8 +416,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
414 * @arg: ioctl arguments 416 * @arg: ioctl arguments
415 * 417 *
416 * This routine returns the driver version. 418 * This routine returns the driver version.
417 * Under Linux, there have been no version incompatibilities, so this is 419 * Under Linux, there have been no version incompatibilities, so this is
418 * simple! 420 * simple!
419 */ 421 */
420 422
421static int check_revision(struct aac_dev *dev, void __user *arg) 423static int check_revision(struct aac_dev *dev, void __user *arg)
@@ -463,7 +465,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
463 u32 data_dir; 465 u32 data_dir;
464 void __user *sg_user[32]; 466 void __user *sg_user[32];
465 void *sg_list[32]; 467 void *sg_list[32];
466 u32 sg_indx = 0; 468 u32 sg_indx = 0;
467 u32 byte_count = 0; 469 u32 byte_count = 0;
468 u32 actual_fibsize64, actual_fibsize = 0; 470 u32 actual_fibsize64, actual_fibsize = 0;
469 int i; 471 int i;
@@ -517,11 +519,11 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
517 // Fix up srb for endian and force some values 519 // Fix up srb for endian and force some values
518 520
519 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this 521 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
520 srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 522 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
521 srbcmd->id = cpu_to_le32(user_srbcmd->id); 523 srbcmd->id = cpu_to_le32(user_srbcmd->id);
522 srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 524 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
523 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 525 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
524 srbcmd->flags = cpu_to_le32(flags); 526 srbcmd->flags = cpu_to_le32(flags);
525 srbcmd->retry_limit = 0; // Obsolete parameter 527 srbcmd->retry_limit = 0; // Obsolete parameter
526 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); 528 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
527 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); 529 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
@@ -786,9 +788,9 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
786 pci_info.bus = dev->pdev->bus->number; 788 pci_info.bus = dev->pdev->bus->number;
787 pci_info.slot = PCI_SLOT(dev->pdev->devfn); 789 pci_info.slot = PCI_SLOT(dev->pdev->devfn);
788 790
789 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 791 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
790 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 792 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
791 return -EFAULT; 793 return -EFAULT;
792 } 794 }
793 return 0; 795 return 0;
794} 796}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index fb0886140dd7..e80d2a0c46af 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1130,31 +1130,29 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1130 if (error < 0) 1130 if (error < 0)
1131 goto out_deinit; 1131 goto out_deinit;
1132 1132
1133 if (!(aac->adapter_info.options & AAC_OPT_NEW_COMM)) {
1134 error = pci_set_dma_max_seg_size(pdev, 65536);
1135 if (error)
1136 goto out_deinit;
1137 }
1138
1139 /* 1133 /*
1140 * Lets override negotiations and drop the maximum SG limit to 34 1134 * Lets override negotiations and drop the maximum SG limit to 34
1141 */ 1135 */
1142 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && 1136 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
1143 (aac->scsi_host_ptr->sg_tablesize > 34)) { 1137 (shost->sg_tablesize > 34)) {
1144 aac->scsi_host_ptr->sg_tablesize = 34; 1138 shost->sg_tablesize = 34;
1145 aac->scsi_host_ptr->max_sectors 1139 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1146 = (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
1147 } 1140 }
1148 1141
1149 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) && 1142 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
1150 (aac->scsi_host_ptr->sg_tablesize > 17)) { 1143 (shost->sg_tablesize > 17)) {
1151 aac->scsi_host_ptr->sg_tablesize = 17; 1144 shost->sg_tablesize = 17;
1152 aac->scsi_host_ptr->max_sectors 1145 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1153 = (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
1154 } 1146 }
1155 1147
1148 error = pci_set_dma_max_seg_size(pdev,
1149 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
1150 (shost->max_sectors << 9) : 65536);
1151 if (error)
1152 goto out_deinit;
1153
1156 /* 1154 /*
1157 * Firware printf works only with older firmware. 1155 * Firmware printf works only with older firmware.
1158 */ 1156 */
1159 if (aac_drivers[index].quirks & AAC_QUIRK_34SG) 1157 if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
1160 aac->printf_enabled = 1; 1158 aac->printf_enabled = 1;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 374ed025dc5a..ccef891d642f 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -12261,7 +12261,7 @@ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
12261/* 12261/*
12262 * Write the EEPROM from 'cfg_buf'. 12262 * Write the EEPROM from 'cfg_buf'.
12263 */ 12263 */
12264void __devinit 12264static void __devinit
12265AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) 12265AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
12266{ 12266{
12267 ushort *wbuf; 12267 ushort *wbuf;
@@ -12328,7 +12328,7 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
12328/* 12328/*
12329 * Write the EEPROM from 'cfg_buf'. 12329 * Write the EEPROM from 'cfg_buf'.
12330 */ 12330 */
12331void __devinit 12331static void __devinit
12332AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) 12332AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
12333{ 12333{
12334 ushort *wbuf; 12334 ushort *wbuf;
@@ -12395,7 +12395,7 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
12395/* 12395/*
12396 * Write the EEPROM from 'cfg_buf'. 12396 * Write the EEPROM from 'cfg_buf'.
12397 */ 12397 */
12398void __devinit 12398static void __devinit
12399AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) 12399AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
12400{ 12400{
12401 ushort *wbuf; 12401 ushort *wbuf;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a67e29f83ae5..57786502e3ec 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct class_device_attribute;
48/*The limit of outstanding scsi command that firmware can handle*/ 48/*The limit of outstanding scsi command that firmware can handle*/
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 320 50#define ARCMSR_MAX_FREECCB_NUM 320
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/08/30" 51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24"
52#define ARCMSR_SCSI_INITIATOR_ID 255 52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096 54#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -248,6 +248,7 @@ struct FIRMWARE_INFO
248#define ARCMSR_MESSAGE_START_BGRB 0x00060008 248#define ARCMSR_MESSAGE_START_BGRB 0x00060008
249#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008 249#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
250#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008 250#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
251#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
251/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */ 252/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
252#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000 253#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
253/* ioctl transfer */ 254/* ioctl transfer */
@@ -256,6 +257,7 @@ struct FIRMWARE_INFO
256#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002 257#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
257#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004 258#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
258#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008 259#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
260#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
259 261
260/* data tunnel buffer between user space program and its firmware */ 262/* data tunnel buffer between user space program and its firmware */
261/* user space data to iop 128bytes */ 263/* user space data to iop 128bytes */
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f4a202e8df26..4f9ff32cfed0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -315,9 +315,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
315 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); 315 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
316 } 316 }
317 317
318 reg = (struct MessageUnit_B *)(dma_coherent +
319 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
320
321 dma_addr = dma_coherent_handle; 318 dma_addr = dma_coherent_handle;
322 ccb_tmp = (struct CommandControlBlock *)dma_coherent; 319 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
323 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 320 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
@@ -371,8 +368,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
371 368
372out: 369out:
373 dma_free_coherent(&acb->pdev->dev, 370 dma_free_coherent(&acb->pdev->dev,
374 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20, 371 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
375 acb->dma_coherent, acb->dma_coherent_handle); 372 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
376 return -ENOMEM; 373 return -ENOMEM;
377} 374}
378 375
@@ -509,6 +506,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
509 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 506 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
510 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN 507 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
511 , reg->iop2drv_doorbell_reg); 508 , reg->iop2drv_doorbell_reg);
509 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
512 return 0x00; 510 return 0x00;
513 } 511 }
514 msleep(10); 512 msleep(10);
@@ -748,6 +746,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t fla
748 , ccb->startdone 746 , ccb->startdone
749 , atomic_read(&acb->ccboutstandingcount)); 747 , atomic_read(&acb->ccboutstandingcount));
750 } 748 }
749 else
751 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 750 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
752} 751}
753 752
@@ -886,7 +885,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
886 } 885 }
887} 886}
888 887
889static void arcmsr_build_ccb(struct AdapterControlBlock *acb, 888static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
890 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) 889 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
891{ 890{
892 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 891 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
@@ -906,6 +905,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
906 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 905 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
907 906
908 nseg = scsi_dma_map(pcmd); 907 nseg = scsi_dma_map(pcmd);
908 if (nseg > ARCMSR_MAX_SG_ENTRIES)
909 return FAILED;
909 BUG_ON(nseg < 0); 910 BUG_ON(nseg < 0);
910 911
911 if (nseg) { 912 if (nseg) {
@@ -946,6 +947,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
946 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 947 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
947 ccb->ccb_flags |= CCB_FLAG_WRITE; 948 ccb->ccb_flags |= CCB_FLAG_WRITE;
948 } 949 }
950 return SUCCESS;
949} 951}
950 952
951static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 953static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
@@ -1036,18 +1038,22 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1036 switch (acb->adapter_type) { 1038 switch (acb->adapter_type) {
1037 case ACB_ADAPTER_TYPE_A: { 1039 case ACB_ADAPTER_TYPE_A: {
1038 iounmap(acb->pmuA); 1040 iounmap(acb->pmuA);
1041 dma_free_coherent(&acb->pdev->dev,
1042 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1043 acb->dma_coherent,
1044 acb->dma_coherent_handle);
1039 break; 1045 break;
1040 } 1046 }
1041 case ACB_ADAPTER_TYPE_B: { 1047 case ACB_ADAPTER_TYPE_B: {
1042 struct MessageUnit_B *reg = acb->pmuB; 1048 struct MessageUnit_B *reg = acb->pmuB;
1043 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); 1049 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1044 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); 1050 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1051 dma_free_coherent(&acb->pdev->dev,
1052 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1053 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
1045 } 1054 }
1046 } 1055 }
1047 dma_free_coherent(&acb->pdev->dev, 1056
1048 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1049 acb->dma_coherent,
1050 acb->dma_coherent_handle);
1051} 1057}
1052 1058
1053void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1059void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
@@ -1273,7 +1279,9 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1273 return 1; 1279 return 1;
1274 1280
1275 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); 1281 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1276 1282 /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
1283 readl(reg->iop2drv_doorbell_reg);
1284 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
1277 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1285 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1278 arcmsr_iop2drv_data_wrote_handle(acb); 1286 arcmsr_iop2drv_data_wrote_handle(acb);
1279 } 1287 }
@@ -1380,12 +1388,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1380 1388
1381 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1389 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1382 unsigned long *ver_addr; 1390 unsigned long *ver_addr;
1383 dma_addr_t buf_handle;
1384 uint8_t *pQbuffer, *ptmpQbuffer; 1391 uint8_t *pQbuffer, *ptmpQbuffer;
1385 int32_t allxfer_len = 0; 1392 int32_t allxfer_len = 0;
1393 void *tmp;
1386 1394
1387 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1395 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
1388 if (!ver_addr) { 1396 ver_addr = (unsigned long *)tmp;
1397 if (!tmp) {
1389 retvalue = ARCMSR_MESSAGE_FAIL; 1398 retvalue = ARCMSR_MESSAGE_FAIL;
1390 goto message_out; 1399 goto message_out;
1391 } 1400 }
@@ -1421,18 +1430,19 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1421 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); 1430 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
1422 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1431 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1423 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1432 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1424 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1433 kfree(tmp);
1425 } 1434 }
1426 break; 1435 break;
1427 1436
1428 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1437 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1429 unsigned long *ver_addr; 1438 unsigned long *ver_addr;
1430 dma_addr_t buf_handle;
1431 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1439 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1432 uint8_t *pQbuffer, *ptmpuserbuffer; 1440 uint8_t *pQbuffer, *ptmpuserbuffer;
1441 void *tmp;
1433 1442
1434 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1443 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
1435 if (!ver_addr) { 1444 ver_addr = (unsigned long *)tmp;
1445 if (!tmp) {
1436 retvalue = ARCMSR_MESSAGE_FAIL; 1446 retvalue = ARCMSR_MESSAGE_FAIL;
1437 goto message_out; 1447 goto message_out;
1438 } 1448 }
@@ -1482,7 +1492,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1482 retvalue = ARCMSR_MESSAGE_FAIL; 1492 retvalue = ARCMSR_MESSAGE_FAIL;
1483 } 1493 }
1484 } 1494 }
1485 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1495 kfree(tmp);
1486 } 1496 }
1487 break; 1497 break;
1488 1498
@@ -1682,8 +1692,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1682 ccb = arcmsr_get_freeccb(acb); 1692 ccb = arcmsr_get_freeccb(acb);
1683 if (!ccb) 1693 if (!ccb)
1684 return SCSI_MLQUEUE_HOST_BUSY; 1694 return SCSI_MLQUEUE_HOST_BUSY;
1685 1695 if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
1686 arcmsr_build_ccb(acb, ccb, cmd); 1696 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
1697 cmd->scsi_done(cmd);
1698 return 0;
1699 }
1687 arcmsr_post_ccb(acb, ccb); 1700 arcmsr_post_ccb(acb, ccb);
1688 return 0; 1701 return 0;
1689} 1702}
@@ -1844,7 +1857,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
1844 } 1857 }
1845} 1858}
1846 1859
1847static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ 1860static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
1848 struct CommandControlBlock *poll_ccb) 1861 struct CommandControlBlock *poll_ccb)
1849{ 1862{
1850 struct MessageUnit_B *reg = acb->pmuB; 1863 struct MessageUnit_B *reg = acb->pmuB;
@@ -1878,7 +1891,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
1878 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1891 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1879 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 1892 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1880 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 1893 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1881 if (ccb->startdone == ARCMSR_CCB_ABORTED) { 1894 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
1882 printk(KERN_NOTICE "arcmsr%d: \ 1895 printk(KERN_NOTICE "arcmsr%d: \
1883 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n" 1896 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
1884 ,acb->host->host_no 1897 ,acb->host->host_no
@@ -1901,7 +1914,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
1901 } /*drain reply FIFO*/ 1914 } /*drain reply FIFO*/
1902} 1915}
1903 1916
1904static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \ 1917static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1905 struct CommandControlBlock *poll_ccb) 1918 struct CommandControlBlock *poll_ccb)
1906{ 1919{
1907 switch (acb->adapter_type) { 1920 switch (acb->adapter_type) {
@@ -2026,6 +2039,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2026 do { 2039 do {
2027 firmware_state = readl(reg->iop2drv_doorbell_reg); 2040 firmware_state = readl(reg->iop2drv_doorbell_reg);
2028 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 2041 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2042 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
2029 } 2043 }
2030 break; 2044 break;
2031 } 2045 }
@@ -2090,19 +2104,39 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2090 } 2104 }
2091} 2105}
2092 2106
2107static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2108{
2109 switch (acb->adapter_type) {
2110 case ACB_ADAPTER_TYPE_A:
2111 return;
2112 case ACB_ADAPTER_TYPE_B:
2113 {
2114 struct MessageUnit_B *reg = acb->pmuB;
2115 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
2116 if(arcmsr_hbb_wait_msgint_ready(acb)) {
2117 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
2118 return;
2119 }
2120 }
2121 break;
2122 }
2123 return;
2124}
2125
2093static void arcmsr_iop_init(struct AdapterControlBlock *acb) 2126static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2094{ 2127{
2095 uint32_t intmask_org; 2128 uint32_t intmask_org;
2096 2129
2097 arcmsr_wait_firmware_ready(acb);
2098 arcmsr_iop_confirm(acb);
2099 /* disable all outbound interrupt */ 2130 /* disable all outbound interrupt */
2100 intmask_org = arcmsr_disable_outbound_ints(acb); 2131 intmask_org = arcmsr_disable_outbound_ints(acb);
2132 arcmsr_wait_firmware_ready(acb);
2133 arcmsr_iop_confirm(acb);
2101 arcmsr_get_firmware_spec(acb); 2134 arcmsr_get_firmware_spec(acb);
2102 /*start background rebuild*/ 2135 /*start background rebuild*/
2103 arcmsr_start_adapter_bgrb(acb); 2136 arcmsr_start_adapter_bgrb(acb);
2104 /* empty doorbell Qbuffer if door bell ringed */ 2137 /* empty doorbell Qbuffer if door bell ringed */
2105 arcmsr_clear_doorbell_queue_buffer(acb); 2138 arcmsr_clear_doorbell_queue_buffer(acb);
2139 arcmsr_enable_eoi_mode(acb);
2106 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2140 /* enable outbound Post Queue,outbound doorbell Interrupt */
2107 arcmsr_enable_outbound_ints(acb, intmask_org); 2141 arcmsr_enable_outbound_ints(acb, intmask_org);
2108 acb->acb_flags |= ACB_F_IOP_INITED; 2142 acb->acb_flags |= ACB_F_IOP_INITED;
@@ -2275,6 +2309,7 @@ static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
2275 arcmsr_start_adapter_bgrb(acb); 2309 arcmsr_start_adapter_bgrb(acb);
2276 /* empty doorbell Qbuffer if door bell ringed */ 2310 /* empty doorbell Qbuffer if door bell ringed */
2277 arcmsr_clear_doorbell_queue_buffer(acb); 2311 arcmsr_clear_doorbell_queue_buffer(acb);
2312 arcmsr_enable_eoi_mode(acb);
2278 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2313 /* enable outbound Post Queue,outbound doorbell Interrupt */
2279 arcmsr_enable_outbound_ints(acb, intmask_org); 2314 arcmsr_enable_outbound_ints(acb, intmask_org);
2280 acb->acb_flags |= ACB_F_IOP_INITED; 2315 acb->acb_flags |= ACB_F_IOP_INITED;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index eceacf6d49ea..3bedf2466bd1 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1790,7 +1790,7 @@ int acornscsi_starttransfer(AS_Host *host)
1790 return 0; 1790 return 0;
1791 } 1791 }
1792 1792
1793 residual = host->SCpnt->request_bufflen - host->scsi.SCp.scsi_xferred; 1793 residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
1794 1794
1795 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); 1795 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
1796 sbic_arm_writenext(host->scsi.io_port, residual >> 16); 1796 sbic_arm_writenext(host->scsi.io_port, residual >> 16);
@@ -2270,7 +2270,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2270 case 0x4b: /* -> PHASE_STATUSIN */ 2270 case 0x4b: /* -> PHASE_STATUSIN */
2271 case 0x8b: /* -> PHASE_STATUSIN */ 2271 case 0x8b: /* -> PHASE_STATUSIN */
2272 /* DATA IN -> STATUS */ 2272 /* DATA IN -> STATUS */
2273 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2273 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2274 acornscsi_sbic_xfcount(host); 2274 acornscsi_sbic_xfcount(host);
2275 acornscsi_dma_stop(host); 2275 acornscsi_dma_stop(host);
2276 acornscsi_readstatusbyte(host); 2276 acornscsi_readstatusbyte(host);
@@ -2281,7 +2281,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2281 case 0x4e: /* -> PHASE_MSGOUT */ 2281 case 0x4e: /* -> PHASE_MSGOUT */
2282 case 0x8e: /* -> PHASE_MSGOUT */ 2282 case 0x8e: /* -> PHASE_MSGOUT */
2283 /* DATA IN -> MESSAGE OUT */ 2283 /* DATA IN -> MESSAGE OUT */
2284 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2284 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2285 acornscsi_sbic_xfcount(host); 2285 acornscsi_sbic_xfcount(host);
2286 acornscsi_dma_stop(host); 2286 acornscsi_dma_stop(host);
2287 acornscsi_sendmessage(host); 2287 acornscsi_sendmessage(host);
@@ -2291,7 +2291,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2291 case 0x4f: /* message in */ 2291 case 0x4f: /* message in */
2292 case 0x8f: /* message in */ 2292 case 0x8f: /* message in */
2293 /* DATA IN -> MESSAGE IN */ 2293 /* DATA IN -> MESSAGE IN */
2294 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2294 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2295 acornscsi_sbic_xfcount(host); 2295 acornscsi_sbic_xfcount(host);
2296 acornscsi_dma_stop(host); 2296 acornscsi_dma_stop(host);
2297 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ 2297 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
@@ -2319,7 +2319,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2319 case 0x4b: /* -> PHASE_STATUSIN */ 2319 case 0x4b: /* -> PHASE_STATUSIN */
2320 case 0x8b: /* -> PHASE_STATUSIN */ 2320 case 0x8b: /* -> PHASE_STATUSIN */
2321 /* DATA OUT -> STATUS */ 2321 /* DATA OUT -> STATUS */
2322 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2322 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2323 acornscsi_sbic_xfcount(host); 2323 acornscsi_sbic_xfcount(host);
2324 acornscsi_dma_stop(host); 2324 acornscsi_dma_stop(host);
2325 acornscsi_dma_adjust(host); 2325 acornscsi_dma_adjust(host);
@@ -2331,7 +2331,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2331 case 0x4e: /* -> PHASE_MSGOUT */ 2331 case 0x4e: /* -> PHASE_MSGOUT */
2332 case 0x8e: /* -> PHASE_MSGOUT */ 2332 case 0x8e: /* -> PHASE_MSGOUT */
2333 /* DATA OUT -> MESSAGE OUT */ 2333 /* DATA OUT -> MESSAGE OUT */
2334 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2334 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2335 acornscsi_sbic_xfcount(host); 2335 acornscsi_sbic_xfcount(host);
2336 acornscsi_dma_stop(host); 2336 acornscsi_dma_stop(host);
2337 acornscsi_dma_adjust(host); 2337 acornscsi_dma_adjust(host);
@@ -2342,7 +2342,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2342 case 0x4f: /* message in */ 2342 case 0x4f: /* message in */
2343 case 0x8f: /* message in */ 2343 case 0x8f: /* message in */
2344 /* DATA OUT -> MESSAGE IN */ 2344 /* DATA OUT -> MESSAGE IN */
2345 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2345 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2346 acornscsi_sbic_xfcount(host); 2346 acornscsi_sbic_xfcount(host);
2347 acornscsi_dma_stop(host); 2347 acornscsi_dma_stop(host);
2348 acornscsi_dma_adjust(host); 2348 acornscsi_dma_adjust(host);
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index bb6550e31926..138a521ba1a8 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -18,17 +18,32 @@
18 * The scatter-gather list handling. This contains all 18 * The scatter-gather list handling. This contains all
19 * the yucky stuff that needs to be fixed properly. 19 * the yucky stuff that needs to be fixed properly.
20 */ 20 */
21
22/*
23 * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
24 * entries of uninitialized memory. SCp is from scsi-ml and has a valid
25 * (possibly chained) sg-list
26 */
21static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max) 27static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
22{ 28{
23 int bufs = SCp->buffers_residual; 29 int bufs = SCp->buffers_residual;
24 30
31 /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
32 * and to remove this BUG_ON. Use min() in-its-place
33 */
25 BUG_ON(bufs + 1 > max); 34 BUG_ON(bufs + 1 > max);
26 35
27 sg_set_buf(sg, SCp->ptr, SCp->this_residual); 36 sg_set_buf(sg, SCp->ptr, SCp->this_residual);
28 37
29 if (bufs) 38 if (bufs) {
30 memcpy(sg + 1, SCp->buffer + 1, 39 struct scatterlist *src_sg;
31 sizeof(struct scatterlist) * bufs); 40 unsigned i;
41
42 for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
43 *(++sg) = *src_sg;
44 sg_mark_end(sg);
45 }
46
32 return bufs + 1; 47 return bufs + 1;
33} 48}
34 49
@@ -36,7 +51,7 @@ static inline int next_SCp(struct scsi_pointer *SCp)
36{ 51{
37 int ret = SCp->buffers_residual; 52 int ret = SCp->buffers_residual;
38 if (ret) { 53 if (ret) {
39 SCp->buffer++; 54 SCp->buffer = sg_next(SCp->buffer);
40 SCp->buffers_residual--; 55 SCp->buffers_residual--;
41 SCp->ptr = sg_virt(SCp->buffer); 56 SCp->ptr = sg_virt(SCp->buffer);
42 SCp->this_residual = SCp->buffer->length; 57 SCp->this_residual = SCp->buffer->length;
@@ -68,46 +83,46 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
68{ 83{
69 memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer)); 84 memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
70 85
71 if (SCpnt->use_sg) { 86 if (scsi_bufflen(SCpnt)) {
72 unsigned long len = 0; 87 unsigned long len = 0;
73 int buf;
74 88
75 SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer; 89 SCpnt->SCp.buffer = scsi_sglist(SCpnt);
76 SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; 90 SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
77 SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer); 91 SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
78 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 92 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
79 SCpnt->SCp.phase = SCpnt->request_bufflen; 93 SCpnt->SCp.phase = scsi_bufflen(SCpnt);
80 94
81#ifdef BELT_AND_BRACES 95#ifdef BELT_AND_BRACES
82 /* 96 { /*
83 * Calculate correct buffer length. Some commands 97 * Calculate correct buffer length. Some commands
84 * come in with the wrong request_bufflen. 98 * come in with the wrong scsi_bufflen.
85 */ 99 */
86 for (buf = 0; buf <= SCpnt->SCp.buffers_residual; buf++) 100 struct scatterlist *sg;
87 len += SCpnt->SCp.buffer[buf].length; 101 unsigned i, sg_count = scsi_sg_count(SCpnt);
88 102
89 if (SCpnt->request_bufflen != len) 103 scsi_for_each_sg(SCpnt, sg, sg_count, i)
90 printk(KERN_WARNING "scsi%d.%c: bad request buffer " 104 len += sg->length;
91 "length %d, should be %ld\n", SCpnt->device->host->host_no, 105
92 '0' + SCpnt->device->id, SCpnt->request_bufflen, len); 106 if (scsi_bufflen(SCpnt) != len) {
93 SCpnt->request_bufflen = len; 107 printk(KERN_WARNING
108 "scsi%d.%c: bad request buffer "
109 "length %d, should be %ld\n",
110 SCpnt->device->host->host_no,
111 '0' + SCpnt->device->id,
112 scsi_bufflen(SCpnt), len);
113 /*
114 * FIXME: Totaly naive fixup. We should abort
115 * with error
116 */
117 SCpnt->SCp.phase =
118 min_t(unsigned long, len,
119 scsi_bufflen(SCpnt));
120 }
121 }
94#endif 122#endif
95 } else { 123 } else {
96 SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
97 SCpnt->SCp.this_residual = SCpnt->request_bufflen;
98 SCpnt->SCp.phase = SCpnt->request_bufflen;
99 }
100
101 /*
102 * If the upper SCSI layers pass a buffer, but zero length,
103 * we aren't interested in the buffer pointer.
104 */
105 if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.ptr) {
106#if 0 //def BELT_AND_BRACES
107 printk(KERN_WARNING "scsi%d.%c: zero length buffer passed for "
108 "command ", SCpnt->host->host_no, '0' + SCpnt->target);
109 __scsi_print_command(SCpnt->cmnd);
110#endif
111 SCpnt->SCp.ptr = NULL; 124 SCpnt->SCp.ptr = NULL;
125 SCpnt->SCp.this_residual = 0;
126 SCpnt->SCp.phase = 0;
112 } 127 }
113} 128}
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
deleted file mode 100644
index 23f7c24ab809..000000000000
--- a/drivers/scsi/blz1230.c
+++ /dev/null
@@ -1,353 +0,0 @@
1/* blz1230.c: Driver for Blizzard 1230 SCSI IV Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
7 */
8
9/* TODO:
10 *
11 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
12 * to the caches and the Sparc MMU mapping.
13 * 2) Make as few routines required outside the generic driver. A lot of the
14 * routines in this file used to be inline!
15 */
16
17#include <linux/module.h>
18
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/blkdev.h>
26#include <linux/proc_fs.h>
27#include <linux/stat.h>
28#include <linux/interrupt.h>
29
30#include "scsi.h"
31#include <scsi/scsi_host.h>
32#include "NCR53C9x.h"
33
34#include <linux/zorro.h>
35#include <asm/irq.h>
36#include <asm/amigaints.h>
37#include <asm/amigahw.h>
38
39#include <asm/pgtable.h>
40
41#define MKIV 1
42
43/* The controller registers can be found in the Z2 config area at these
44 * offsets:
45 */
46#define BLZ1230_ESP_ADDR 0x8000
47#define BLZ1230_DMA_ADDR 0x10000
48#define BLZ1230II_ESP_ADDR 0x10000
49#define BLZ1230II_DMA_ADDR 0x10021
50
51
52/* The Blizzard 1230 DMA interface
53 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
54 * Only two things can be programmed in the Blizzard DMA:
55 * 1) The data direction is controlled by the status of bit 31 (1 = write)
56 * 2) The source/dest address (word aligned, shifted one right) in bits 30-0
57 *
58 * Program DMA by first latching the highest byte of the address/direction
59 * (i.e. bits 31-24 of the long word constructed as described in steps 1+2
60 * above). Then write each byte of the address/direction (starting with the
61 * top byte, working down) to the DMA address register.
62 *
63 * Figure out interrupt status by reading the ESP status byte.
64 */
65struct blz1230_dma_registers {
66 volatile unsigned char dma_addr; /* DMA address [0x0000] */
67 unsigned char dmapad2[0x7fff];
68 volatile unsigned char dma_latch; /* DMA latch [0x8000] */
69};
70
71struct blz1230II_dma_registers {
72 volatile unsigned char dma_addr; /* DMA address [0x0000] */
73 unsigned char dmapad2[0xf];
74 volatile unsigned char dma_latch; /* DMA latch [0x0010] */
75};
76
77#define BLZ1230_DMA_WRITE 0x80000000
78
79static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
80static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
81static void dma_dump_state(struct NCR_ESP *esp);
82static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
83static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
84static void dma_ints_off(struct NCR_ESP *esp);
85static void dma_ints_on(struct NCR_ESP *esp);
86static int dma_irq_p(struct NCR_ESP *esp);
87static int dma_ports_p(struct NCR_ESP *esp);
88static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
89
90static volatile unsigned char cmd_buffer[16];
91 /* This is where all commands are put
92 * before they are transferred to the ESP chip
93 * via PIO.
94 */
95
96/***************************************************************** Detection */
97int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
98{
99 struct NCR_ESP *esp;
100 struct zorro_dev *z = NULL;
101 unsigned long address;
102 struct ESP_regs *eregs;
103 unsigned long board;
104
105#if MKIV
106#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260
107#define REAL_BLZ1230_ESP_ADDR BLZ1230_ESP_ADDR
108#define REAL_BLZ1230_DMA_ADDR BLZ1230_DMA_ADDR
109#else
110#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060
111#define REAL_BLZ1230_ESP_ADDR BLZ1230II_ESP_ADDR
112#define REAL_BLZ1230_DMA_ADDR BLZ1230II_DMA_ADDR
113#endif
114
115 if ((z = zorro_find_device(REAL_BLZ1230_ID, z))) {
116 board = z->resource.start;
117 if (request_mem_region(board+REAL_BLZ1230_ESP_ADDR,
118 sizeof(struct ESP_regs), "NCR53C9x")) {
119 /* Do some magic to figure out if the blizzard is
120 * equipped with a SCSI controller
121 */
122 address = ZTWO_VADDR(board);
123 eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR);
124 esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR,
125 0);
126
127 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
128 udelay(5);
129 if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
130 goto err_out;
131
132 /* Do command transfer with programmed I/O */
133 esp->do_pio_cmds = 1;
134
135 /* Required functions */
136 esp->dma_bytes_sent = &dma_bytes_sent;
137 esp->dma_can_transfer = &dma_can_transfer;
138 esp->dma_dump_state = &dma_dump_state;
139 esp->dma_init_read = &dma_init_read;
140 esp->dma_init_write = &dma_init_write;
141 esp->dma_ints_off = &dma_ints_off;
142 esp->dma_ints_on = &dma_ints_on;
143 esp->dma_irq_p = &dma_irq_p;
144 esp->dma_ports_p = &dma_ports_p;
145 esp->dma_setup = &dma_setup;
146
147 /* Optional functions */
148 esp->dma_barrier = 0;
149 esp->dma_drain = 0;
150 esp->dma_invalidate = 0;
151 esp->dma_irq_entry = 0;
152 esp->dma_irq_exit = 0;
153 esp->dma_led_on = 0;
154 esp->dma_led_off = 0;
155 esp->dma_poll = 0;
156 esp->dma_reset = 0;
157
158 /* SCSI chip speed */
159 esp->cfreq = 40000000;
160
161 /* The DMA registers on the Blizzard are mapped
162 * relative to the device (i.e. in the same Zorro
163 * I/O block).
164 */
165 esp->dregs = (void *)(address + REAL_BLZ1230_DMA_ADDR);
166
167 /* ESP register base */
168 esp->eregs = eregs;
169
170 /* Set the command buffer */
171 esp->esp_command = cmd_buffer;
172 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
173
174 esp->irq = IRQ_AMIGA_PORTS;
175 esp->slot = board+REAL_BLZ1230_ESP_ADDR;
176 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
177 "Blizzard 1230 SCSI IV", esp->ehost))
178 goto err_out;
179
180 /* Figure out our scsi ID on the bus */
181 esp->scsi_id = 7;
182
183 /* We don't have a differential SCSI-bus. */
184 esp->diff = 0;
185
186 esp_initialize(esp);
187
188 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
189 esps_running = esps_in_use;
190 return esps_in_use;
191 }
192 }
193 return 0;
194
195 err_out:
196 scsi_unregister(esp->ehost);
197 esp_deallocate(esp);
198 release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
199 sizeof(struct ESP_regs));
200 return 0;
201}
202
203/************************************************************* DMA Functions */
204static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
205{
206 /* Since the Blizzard DMA is fully dedicated to the ESP chip,
207 * the number of bytes sent (to the ESP chip) equals the number
208 * of bytes in the FIFO - there is no buffering in the DMA controller.
209 * XXXX Do I read this right? It is from host to ESP, right?
210 */
211 return fifo_count;
212}
213
214static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
215{
216 /* I don't think there's any limit on the Blizzard DMA. So we use what
217 * the ESP chip can handle (24 bit).
218 */
219 unsigned long sz = sp->SCp.this_residual;
220 if(sz > 0x1000000)
221 sz = 0x1000000;
222 return sz;
223}
224
225static void dma_dump_state(struct NCR_ESP *esp)
226{
227 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
228 amiga_custom.intreqr, amiga_custom.intenar));
229}
230
231void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
232{
233#if MKIV
234 struct blz1230_dma_registers *dregs =
235 (struct blz1230_dma_registers *) (esp->dregs);
236#else
237 struct blz1230II_dma_registers *dregs =
238 (struct blz1230II_dma_registers *) (esp->dregs);
239#endif
240
241 cache_clear(addr, length);
242
243 addr >>= 1;
244 addr &= ~(BLZ1230_DMA_WRITE);
245
246 /* First set latch */
247 dregs->dma_latch = (addr >> 24) & 0xff;
248
249 /* Then pump the address to the DMA address register */
250#if MKIV
251 dregs->dma_addr = (addr >> 24) & 0xff;
252#endif
253 dregs->dma_addr = (addr >> 16) & 0xff;
254 dregs->dma_addr = (addr >> 8) & 0xff;
255 dregs->dma_addr = (addr ) & 0xff;
256}
257
258void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
259{
260#if MKIV
261 struct blz1230_dma_registers *dregs =
262 (struct blz1230_dma_registers *) (esp->dregs);
263#else
264 struct blz1230II_dma_registers *dregs =
265 (struct blz1230II_dma_registers *) (esp->dregs);
266#endif
267
268 cache_push(addr, length);
269
270 addr >>= 1;
271 addr |= BLZ1230_DMA_WRITE;
272
273 /* First set latch */
274 dregs->dma_latch = (addr >> 24) & 0xff;
275
276 /* Then pump the address to the DMA address register */
277#if MKIV
278 dregs->dma_addr = (addr >> 24) & 0xff;
279#endif
280 dregs->dma_addr = (addr >> 16) & 0xff;
281 dregs->dma_addr = (addr >> 8) & 0xff;
282 dregs->dma_addr = (addr ) & 0xff;
283}
284
285static void dma_ints_off(struct NCR_ESP *esp)
286{
287 disable_irq(esp->irq);
288}
289
290static void dma_ints_on(struct NCR_ESP *esp)
291{
292 enable_irq(esp->irq);
293}
294
295static int dma_irq_p(struct NCR_ESP *esp)
296{
297 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
298}
299
300static int dma_ports_p(struct NCR_ESP *esp)
301{
302 return ((amiga_custom.intenar) & IF_PORTS);
303}
304
305static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
306{
307 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
308 * so when (write) is true, it actually means READ!
309 */
310 if(write){
311 dma_init_read(esp, addr, count);
312 } else {
313 dma_init_write(esp, addr, count);
314 }
315}
316
317#define HOSTS_C
318
319int blz1230_esp_release(struct Scsi_Host *instance)
320{
321#ifdef MODULE
322 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
323 esp_deallocate((struct NCR_ESP *)instance->hostdata);
324 esp_release();
325 release_mem_region(address, sizeof(struct ESP_regs));
326 free_irq(IRQ_AMIGA_PORTS, esp_intr);
327#endif
328 return 1;
329}
330
331
332static struct scsi_host_template driver_template = {
333 .proc_name = "esp-blz1230",
334 .proc_info = esp_proc_info,
335 .name = "Blizzard1230 SCSI IV",
336 .detect = blz1230_esp_detect,
337 .slave_alloc = esp_slave_alloc,
338 .slave_destroy = esp_slave_destroy,
339 .release = blz1230_esp_release,
340 .queuecommand = esp_queue,
341 .eh_abort_handler = esp_abort,
342 .eh_bus_reset_handler = esp_reset,
343 .can_queue = 7,
344 .this_id = 7,
345 .sg_tablesize = SG_ALL,
346 .cmd_per_lun = 1,
347 .use_clustering = ENABLE_CLUSTERING
348};
349
350
351#include "scsi_module.c"
352
353MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
deleted file mode 100644
index b6203ec00961..000000000000
--- a/drivers/scsi/blz2060.c
+++ /dev/null
@@ -1,306 +0,0 @@
1/* blz2060.c: Driver for Blizzard 2060 SCSI Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
7 */
8
9/* TODO:
10 *
11 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
12 * to the caches and the Sparc MMU mapping.
13 * 2) Make as few routines required outside the generic driver. A lot of the
14 * routines in this file used to be inline!
15 */
16
17#include <linux/module.h>
18
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/blkdev.h>
26#include <linux/proc_fs.h>
27#include <linux/stat.h>
28#include <linux/interrupt.h>
29
30#include "scsi.h"
31#include <scsi/scsi_host.h>
32#include "NCR53C9x.h"
33
34#include <linux/zorro.h>
35#include <asm/irq.h>
36#include <asm/amigaints.h>
37#include <asm/amigahw.h>
38
39#include <asm/pgtable.h>
40
41/* The controller registers can be found in the Z2 config area at these
42 * offsets:
43 */
44#define BLZ2060_ESP_ADDR 0x1ff00
45#define BLZ2060_DMA_ADDR 0x1ffe0
46
47
48/* The Blizzard 2060 DMA interface
49 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50 * Only two things can be programmed in the Blizzard DMA:
51 * 1) The data direction is controlled by the status of bit 31 (1 = write)
52 * 2) The source/dest address (word aligned, shifted one right) in bits 30-0
53 *
54 * Figure out interrupt status by reading the ESP status byte.
55 */
56struct blz2060_dma_registers {
57 volatile unsigned char dma_led_ctrl; /* DMA led control [0x000] */
58 unsigned char dmapad1[0x0f];
59 volatile unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
60 unsigned char dmapad2[0x03];
61 volatile unsigned char dma_addr1; /* DMA address [0x014] */
62 unsigned char dmapad3[0x03];
63 volatile unsigned char dma_addr2; /* DMA address [0x018] */
64 unsigned char dmapad4[0x03];
65 volatile unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
66};
67
68#define BLZ2060_DMA_WRITE 0x80000000
69
70/* DMA control bits */
71#define BLZ2060_DMA_LED 0x02 /* HD led control 1 = off */
72
73static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
74static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
75static void dma_dump_state(struct NCR_ESP *esp);
76static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
77static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
78static void dma_ints_off(struct NCR_ESP *esp);
79static void dma_ints_on(struct NCR_ESP *esp);
80static int dma_irq_p(struct NCR_ESP *esp);
81static void dma_led_off(struct NCR_ESP *esp);
82static void dma_led_on(struct NCR_ESP *esp);
83static int dma_ports_p(struct NCR_ESP *esp);
84static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
85
86static volatile unsigned char cmd_buffer[16];
87 /* This is where all commands are put
88 * before they are transferred to the ESP chip
89 * via PIO.
90 */
91
92/***************************************************************** Detection */
93int __init blz2060_esp_detect(struct scsi_host_template *tpnt)
94{
95 struct NCR_ESP *esp;
96 struct zorro_dev *z = NULL;
97 unsigned long address;
98
99 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_2060, z))) {
100 unsigned long board = z->resource.start;
101 if (request_mem_region(board+BLZ2060_ESP_ADDR,
102 sizeof(struct ESP_regs), "NCR53C9x")) {
103 esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0);
104
105 /* Do command transfer with programmed I/O */
106 esp->do_pio_cmds = 1;
107
108 /* Required functions */
109 esp->dma_bytes_sent = &dma_bytes_sent;
110 esp->dma_can_transfer = &dma_can_transfer;
111 esp->dma_dump_state = &dma_dump_state;
112 esp->dma_init_read = &dma_init_read;
113 esp->dma_init_write = &dma_init_write;
114 esp->dma_ints_off = &dma_ints_off;
115 esp->dma_ints_on = &dma_ints_on;
116 esp->dma_irq_p = &dma_irq_p;
117 esp->dma_ports_p = &dma_ports_p;
118 esp->dma_setup = &dma_setup;
119
120 /* Optional functions */
121 esp->dma_barrier = 0;
122 esp->dma_drain = 0;
123 esp->dma_invalidate = 0;
124 esp->dma_irq_entry = 0;
125 esp->dma_irq_exit = 0;
126 esp->dma_led_on = &dma_led_on;
127 esp->dma_led_off = &dma_led_off;
128 esp->dma_poll = 0;
129 esp->dma_reset = 0;
130
131 /* SCSI chip speed */
132 esp->cfreq = 40000000;
133
134 /* The DMA registers on the Blizzard are mapped
135 * relative to the device (i.e. in the same Zorro
136 * I/O block).
137 */
138 address = (unsigned long)ZTWO_VADDR(board);
139 esp->dregs = (void *)(address + BLZ2060_DMA_ADDR);
140
141 /* ESP register base */
142 esp->eregs = (struct ESP_regs *)(address + BLZ2060_ESP_ADDR);
143
144 /* Set the command buffer */
145 esp->esp_command = cmd_buffer;
146 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
147
148 esp->irq = IRQ_AMIGA_PORTS;
149 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
150 "Blizzard 2060 SCSI", esp->ehost);
151
152 /* Figure out our scsi ID on the bus */
153 esp->scsi_id = 7;
154
155 /* We don't have a differential SCSI-bus. */
156 esp->diff = 0;
157
158 esp_initialize(esp);
159
160 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
161 esps_running = esps_in_use;
162 return esps_in_use;
163 }
164 }
165 return 0;
166}
167
168/************************************************************* DMA Functions */
169static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
170{
171 /* Since the Blizzard DMA is fully dedicated to the ESP chip,
172 * the number of bytes sent (to the ESP chip) equals the number
173 * of bytes in the FIFO - there is no buffering in the DMA controller.
174 * XXXX Do I read this right? It is from host to ESP, right?
175 */
176 return fifo_count;
177}
178
179static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
180{
181 /* I don't think there's any limit on the Blizzard DMA. So we use what
182 * the ESP chip can handle (24 bit).
183 */
184 unsigned long sz = sp->SCp.this_residual;
185 if(sz > 0x1000000)
186 sz = 0x1000000;
187 return sz;
188}
189
190static void dma_dump_state(struct NCR_ESP *esp)
191{
192 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
193 amiga_custom.intreqr, amiga_custom.intenar));
194}
195
196static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
197{
198 struct blz2060_dma_registers *dregs =
199 (struct blz2060_dma_registers *) (esp->dregs);
200
201 cache_clear(addr, length);
202
203 addr >>= 1;
204 addr &= ~(BLZ2060_DMA_WRITE);
205 dregs->dma_addr3 = (addr ) & 0xff;
206 dregs->dma_addr2 = (addr >> 8) & 0xff;
207 dregs->dma_addr1 = (addr >> 16) & 0xff;
208 dregs->dma_addr0 = (addr >> 24) & 0xff;
209}
210
211static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
212{
213 struct blz2060_dma_registers *dregs =
214 (struct blz2060_dma_registers *) (esp->dregs);
215
216 cache_push(addr, length);
217
218 addr >>= 1;
219 addr |= BLZ2060_DMA_WRITE;
220 dregs->dma_addr3 = (addr ) & 0xff;
221 dregs->dma_addr2 = (addr >> 8) & 0xff;
222 dregs->dma_addr1 = (addr >> 16) & 0xff;
223 dregs->dma_addr0 = (addr >> 24) & 0xff;
224}
225
226static void dma_ints_off(struct NCR_ESP *esp)
227{
228 disable_irq(esp->irq);
229}
230
231static void dma_ints_on(struct NCR_ESP *esp)
232{
233 enable_irq(esp->irq);
234}
235
236static int dma_irq_p(struct NCR_ESP *esp)
237{
238 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
239}
240
241static void dma_led_off(struct NCR_ESP *esp)
242{
243 ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl =
244 BLZ2060_DMA_LED;
245}
246
247static void dma_led_on(struct NCR_ESP *esp)
248{
249 ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl = 0;
250}
251
252static int dma_ports_p(struct NCR_ESP *esp)
253{
254 return ((amiga_custom.intenar) & IF_PORTS);
255}
256
257static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
258{
259 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
260 * so when (write) is true, it actually means READ!
261 */
262 if(write){
263 dma_init_read(esp, addr, count);
264 } else {
265 dma_init_write(esp, addr, count);
266 }
267}
268
269#define HOSTS_C
270
271int blz2060_esp_release(struct Scsi_Host *instance)
272{
273#ifdef MODULE
274 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
275
276 esp_deallocate((struct NCR_ESP *)instance->hostdata);
277 esp_release();
278 release_mem_region(address, sizeof(struct ESP_regs));
279 free_irq(IRQ_AMIGA_PORTS, esp_intr);
280#endif
281 return 1;
282}
283
284
285static struct scsi_host_template driver_template = {
286 .proc_name = "esp-blz2060",
287 .proc_info = esp_proc_info,
288 .name = "Blizzard2060 SCSI",
289 .detect = blz2060_esp_detect,
290 .slave_alloc = esp_slave_alloc,
291 .slave_destroy = esp_slave_destroy,
292 .release = blz2060_esp_release,
293 .queuecommand = esp_queue,
294 .eh_abort_handler = esp_abort,
295 .eh_bus_reset_handler = esp_reset,
296 .can_queue = 7,
297 .this_id = 7,
298 .sg_tablesize = SG_ALL,
299 .cmd_per_lun = 1,
300 .use_clustering = ENABLE_CLUSTERING
301};
302
303
304#include "scsi_module.c"
305
306MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
deleted file mode 100644
index c6b98a42e89d..000000000000
--- a/drivers/scsi/cyberstorm.c
+++ /dev/null
@@ -1,377 +0,0 @@
1/* cyberstorm.c: Driver for CyberStorm SCSI Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * The CyberStorm SCSI driver is based on David S. Miller's ESP driver
6 * for the Sparc computers.
7 *
8 * This work was made possible by Phase5 who willingly (and most generously)
9 * supported me with hardware and all the information I needed.
10 */
11
12/* TODO:
13 *
14 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
15 * to the caches and the Sparc MMU mapping.
16 * 2) Make as few routines required outside the generic driver. A lot of the
17 * routines in this file used to be inline!
18 */
19
20#include <linux/module.h>
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/types.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28#include <linux/blkdev.h>
29#include <linux/proc_fs.h>
30#include <linux/stat.h>
31#include <linux/interrupt.h>
32
33#include "scsi.h"
34#include <scsi/scsi_host.h>
35#include "NCR53C9x.h"
36
37#include <linux/zorro.h>
38#include <asm/irq.h>
39#include <asm/amigaints.h>
40#include <asm/amigahw.h>
41
42#include <asm/pgtable.h>
43
44/* The controller registers can be found in the Z2 config area at these
45 * offsets:
46 */
47#define CYBER_ESP_ADDR 0xf400
48#define CYBER_DMA_ADDR 0xf800
49
50
51/* The CyberStorm DMA interface */
52struct cyber_dma_registers {
53 volatile unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
54 unsigned char dmapad1[1];
55 volatile unsigned char dma_addr1; /* DMA address [0x002] */
56 unsigned char dmapad2[1];
57 volatile unsigned char dma_addr2; /* DMA address [0x004] */
58 unsigned char dmapad3[1];
59 volatile unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
60 unsigned char dmapad4[0x3fb];
61 volatile unsigned char cond_reg; /* DMA cond (ro) [0x402] */
62#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
63};
64
65/* DMA control bits */
66#define CYBER_DMA_LED 0x80 /* HD led control 1 = on */
67#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
68#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
69
70/* DMA status bits */
71#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
72
73/* The bits below appears to be Phase5 Debug bits only; they were not
74 * described by Phase5 so using them may seem a bit stupid...
75 */
76#define CYBER_HOST_ID 0x02 /* If set, host ID should be 7, otherwise
77 * it should be 6.
78 */
79#define CYBER_SLOW_CABLE 0x08 /* If *not* set, assume SLOW_CABLE */
80
81static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
82static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
83static void dma_dump_state(struct NCR_ESP *esp);
84static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
85static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
86static void dma_ints_off(struct NCR_ESP *esp);
87static void dma_ints_on(struct NCR_ESP *esp);
88static int dma_irq_p(struct NCR_ESP *esp);
89static void dma_led_off(struct NCR_ESP *esp);
90static void dma_led_on(struct NCR_ESP *esp);
91static int dma_ports_p(struct NCR_ESP *esp);
92static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
93
94static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
95 * to ctrl_reg. Always write a copy
96 * to this register when writing to
97 * the hardware register!
98 */
99
100static volatile unsigned char cmd_buffer[16];
101 /* This is where all commands are put
102 * before they are transferred to the ESP chip
103 * via PIO.
104 */
105
106/***************************************************************** Detection */
107int __init cyber_esp_detect(struct scsi_host_template *tpnt)
108{
109 struct NCR_ESP *esp;
110 struct zorro_dev *z = NULL;
111 unsigned long address;
112
113 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
114 unsigned long board = z->resource.start;
115 if ((z->id == ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM ||
116 z->id == ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060) &&
117 request_mem_region(board+CYBER_ESP_ADDR,
118 sizeof(struct ESP_regs), "NCR53C9x")) {
119 /* Figure out if this is a CyberStorm or really a
120 * Fastlane/Blizzard Mk II by looking at the board size.
121 * CyberStorm maps 64kB
122 * (ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM does anyway)
123 */
124 if(z->resource.end-board != 0xffff) {
125 release_mem_region(board+CYBER_ESP_ADDR,
126 sizeof(struct ESP_regs));
127 return 0;
128 }
129 esp = esp_allocate(tpnt, (void *)board + CYBER_ESP_ADDR, 0);
130
131 /* Do command transfer with programmed I/O */
132 esp->do_pio_cmds = 1;
133
134 /* Required functions */
135 esp->dma_bytes_sent = &dma_bytes_sent;
136 esp->dma_can_transfer = &dma_can_transfer;
137 esp->dma_dump_state = &dma_dump_state;
138 esp->dma_init_read = &dma_init_read;
139 esp->dma_init_write = &dma_init_write;
140 esp->dma_ints_off = &dma_ints_off;
141 esp->dma_ints_on = &dma_ints_on;
142 esp->dma_irq_p = &dma_irq_p;
143 esp->dma_ports_p = &dma_ports_p;
144 esp->dma_setup = &dma_setup;
145
146 /* Optional functions */
147 esp->dma_barrier = 0;
148 esp->dma_drain = 0;
149 esp->dma_invalidate = 0;
150 esp->dma_irq_entry = 0;
151 esp->dma_irq_exit = 0;
152 esp->dma_led_on = &dma_led_on;
153 esp->dma_led_off = &dma_led_off;
154 esp->dma_poll = 0;
155 esp->dma_reset = 0;
156
157 /* SCSI chip speed */
158 esp->cfreq = 40000000;
159
160 /* The DMA registers on the CyberStorm are mapped
161 * relative to the device (i.e. in the same Zorro
162 * I/O block).
163 */
164 address = (unsigned long)ZTWO_VADDR(board);
165 esp->dregs = (void *)(address + CYBER_DMA_ADDR);
166
167 /* ESP register base */
168 esp->eregs = (struct ESP_regs *)(address + CYBER_ESP_ADDR);
169
170 /* Set the command buffer */
171 esp->esp_command = cmd_buffer;
172 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
173
174 esp->irq = IRQ_AMIGA_PORTS;
175 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
176 "CyberStorm SCSI", esp->ehost);
177 /* Figure out our scsi ID on the bus */
178 /* The DMA cond flag contains a hardcoded jumper bit
179 * which can be used to select host number 6 or 7.
180 * However, even though it may change, we use a hardcoded
181 * value of 7.
182 */
183 esp->scsi_id = 7;
184
185 /* We don't have a differential SCSI-bus. */
186 esp->diff = 0;
187
188 esp_initialize(esp);
189
190 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
191 esps_running = esps_in_use;
192 return esps_in_use;
193 }
194 }
195 return 0;
196}
197
198/************************************************************* DMA Functions */
199static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
200{
201 /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
202 * the number of bytes sent (to the ESP chip) equals the number
203 * of bytes in the FIFO - there is no buffering in the DMA controller.
204 * XXXX Do I read this right? It is from host to ESP, right?
205 */
206 return fifo_count;
207}
208
209static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
210{
211 /* I don't think there's any limit on the CyberDMA. So we use what
212 * the ESP chip can handle (24 bit).
213 */
214 unsigned long sz = sp->SCp.this_residual;
215 if(sz > 0x1000000)
216 sz = 0x1000000;
217 return sz;
218}
219
220static void dma_dump_state(struct NCR_ESP *esp)
221{
222 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
223 esp->esp_id, ((struct cyber_dma_registers *)
224 (esp->dregs))->cond_reg));
225 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
226 amiga_custom.intreqr, amiga_custom.intenar));
227}
228
229static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
230{
231 struct cyber_dma_registers *dregs =
232 (struct cyber_dma_registers *) esp->dregs;
233
234 cache_clear(addr, length);
235
236 addr &= ~(1);
237 dregs->dma_addr0 = (addr >> 24) & 0xff;
238 dregs->dma_addr1 = (addr >> 16) & 0xff;
239 dregs->dma_addr2 = (addr >> 8) & 0xff;
240 dregs->dma_addr3 = (addr ) & 0xff;
241 ctrl_data &= ~(CYBER_DMA_WRITE);
242
243 /* Check if physical address is outside Z2 space and of
244 * block length/block aligned in memory. If this is the
245 * case, enable 32 bit transfer. In all other cases, fall back
246 * to 16 bit transfer.
247 * Obviously 32 bit transfer should be enabled if the DMA address
248 * and length are 32 bit aligned. However, this leads to some
249 * strange behavior. Even 64 bit aligned addr/length fails.
250 * Until I've found a reason for this, 32 bit transfer is only
251 * used for full-block transfers (1kB).
252 * -jskov
253 */
254#if 0
255 if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
256 (addr < 0xff0000)))
257 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
258 else
259 ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
260#else
261 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
262#endif
263 dregs->ctrl_reg = ctrl_data;
264}
265
266static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
267{
268 struct cyber_dma_registers *dregs =
269 (struct cyber_dma_registers *) esp->dregs;
270
271 cache_push(addr, length);
272
273 addr |= 1;
274 dregs->dma_addr0 = (addr >> 24) & 0xff;
275 dregs->dma_addr1 = (addr >> 16) & 0xff;
276 dregs->dma_addr2 = (addr >> 8) & 0xff;
277 dregs->dma_addr3 = (addr ) & 0xff;
278 ctrl_data |= CYBER_DMA_WRITE;
279
280 /* See comment above */
281#if 0
282 if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
283 (addr < 0xff0000)))
284 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
285 else
286 ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
287#else
288 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
289#endif
290 dregs->ctrl_reg = ctrl_data;
291}
292
293static void dma_ints_off(struct NCR_ESP *esp)
294{
295 disable_irq(esp->irq);
296}
297
298static void dma_ints_on(struct NCR_ESP *esp)
299{
300 enable_irq(esp->irq);
301}
302
303static int dma_irq_p(struct NCR_ESP *esp)
304{
305 /* It's important to check the DMA IRQ bit in the correct way! */
306 return ((esp_read(esp->eregs->esp_status) & ESP_STAT_INTR) &&
307 ((((struct cyber_dma_registers *)(esp->dregs))->cond_reg) &
308 CYBER_DMA_HNDL_INTR));
309}
310
311static void dma_led_off(struct NCR_ESP *esp)
312{
313 ctrl_data &= ~CYBER_DMA_LED;
314 ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
315}
316
317static void dma_led_on(struct NCR_ESP *esp)
318{
319 ctrl_data |= CYBER_DMA_LED;
320 ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
321}
322
323static int dma_ports_p(struct NCR_ESP *esp)
324{
325 return ((amiga_custom.intenar) & IF_PORTS);
326}
327
328static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
329{
330 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
331 * so when (write) is true, it actually means READ!
332 */
333 if(write){
334 dma_init_read(esp, addr, count);
335 } else {
336 dma_init_write(esp, addr, count);
337 }
338}
339
340#define HOSTS_C
341
342int cyber_esp_release(struct Scsi_Host *instance)
343{
344#ifdef MODULE
345 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
346
347 esp_deallocate((struct NCR_ESP *)instance->hostdata);
348 esp_release();
349 release_mem_region(address, sizeof(struct ESP_regs));
350 free_irq(IRQ_AMIGA_PORTS, esp_intr);
351#endif
352 return 1;
353}
354
355
356static struct scsi_host_template driver_template = {
357 .proc_name = "esp-cyberstorm",
358 .proc_info = esp_proc_info,
359 .name = "CyberStorm SCSI",
360 .detect = cyber_esp_detect,
361 .slave_alloc = esp_slave_alloc,
362 .slave_destroy = esp_slave_destroy,
363 .release = cyber_esp_release,
364 .queuecommand = esp_queue,
365 .eh_abort_handler = esp_abort,
366 .eh_bus_reset_handler = esp_reset,
367 .can_queue = 7,
368 .this_id = 7,
369 .sg_tablesize = SG_ALL,
370 .cmd_per_lun = 1,
371 .use_clustering = ENABLE_CLUSTERING
372};
373
374
375#include "scsi_module.c"
376
377MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
deleted file mode 100644
index e336e853e66f..000000000000
--- a/drivers/scsi/cyberstormII.c
+++ /dev/null
@@ -1,314 +0,0 @@
1/* cyberstormII.c: Driver for CyberStorm SCSI Mk II
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on cyberstorm.c
6 */
7
8/* TODO:
9 *
10 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
11 * to the caches and the Sparc MMU mapping.
12 * 2) Make as few routines required outside the generic driver. A lot of the
13 * routines in this file used to be inline!
14 */
15
16#include <linux/module.h>
17
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/types.h>
22#include <linux/string.h>
23#include <linux/slab.h>
24#include <linux/blkdev.h>
25#include <linux/proc_fs.h>
26#include <linux/stat.h>
27#include <linux/interrupt.h>
28
29#include "scsi.h"
30#include <scsi/scsi_host.h>
31#include "NCR53C9x.h"
32
33#include <linux/zorro.h>
34#include <asm/irq.h>
35#include <asm/amigaints.h>
36#include <asm/amigahw.h>
37
38#include <asm/pgtable.h>
39
40/* The controller registers can be found in the Z2 config area at these
41 * offsets:
42 */
43#define CYBERII_ESP_ADDR 0x1ff03
44#define CYBERII_DMA_ADDR 0x1ff43
45
46
47/* The CyberStorm II DMA interface */
48struct cyberII_dma_registers {
49 volatile unsigned char cond_reg; /* DMA cond (ro) [0x000] */
50#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
51 unsigned char dmapad4[0x3f];
52 volatile unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
53 unsigned char dmapad1[3];
54 volatile unsigned char dma_addr1; /* DMA address [0x044] */
55 unsigned char dmapad2[3];
56 volatile unsigned char dma_addr2; /* DMA address [0x048] */
57 unsigned char dmapad3[3];
58 volatile unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
59};
60
61/* DMA control bits */
62#define CYBERII_DMA_LED 0x02 /* HD led control 1 = on */
63
64static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
65static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
66static void dma_dump_state(struct NCR_ESP *esp);
67static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
68static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
69static void dma_ints_off(struct NCR_ESP *esp);
70static void dma_ints_on(struct NCR_ESP *esp);
71static int dma_irq_p(struct NCR_ESP *esp);
72static void dma_led_off(struct NCR_ESP *esp);
73static void dma_led_on(struct NCR_ESP *esp);
74static int dma_ports_p(struct NCR_ESP *esp);
75static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
76
77static volatile unsigned char cmd_buffer[16];
78 /* This is where all commands are put
79 * before they are transferred to the ESP chip
80 * via PIO.
81 */
82
83/***************************************************************** Detection */
84int __init cyberII_esp_detect(struct scsi_host_template *tpnt)
85{
86 struct NCR_ESP *esp;
87 struct zorro_dev *z = NULL;
88 unsigned long address;
89 struct ESP_regs *eregs;
90
91 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_CYBERSTORM_MK_II, z))) {
92 unsigned long board = z->resource.start;
93 if (request_mem_region(board+CYBERII_ESP_ADDR,
94 sizeof(struct ESP_regs), "NCR53C9x")) {
95 /* Do some magic to figure out if the CyberStorm Mk II
96 * is equipped with a SCSI controller
97 */
98 address = (unsigned long)ZTWO_VADDR(board);
99 eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR);
100
101 esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0);
102
103 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
104 udelay(5);
105 if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) {
106 esp_deallocate(esp);
107 scsi_unregister(esp->ehost);
108 release_mem_region(board+CYBERII_ESP_ADDR,
109 sizeof(struct ESP_regs));
110 return 0; /* Bail out if address did not hold data */
111 }
112
113 /* Do command transfer with programmed I/O */
114 esp->do_pio_cmds = 1;
115
116 /* Required functions */
117 esp->dma_bytes_sent = &dma_bytes_sent;
118 esp->dma_can_transfer = &dma_can_transfer;
119 esp->dma_dump_state = &dma_dump_state;
120 esp->dma_init_read = &dma_init_read;
121 esp->dma_init_write = &dma_init_write;
122 esp->dma_ints_off = &dma_ints_off;
123 esp->dma_ints_on = &dma_ints_on;
124 esp->dma_irq_p = &dma_irq_p;
125 esp->dma_ports_p = &dma_ports_p;
126 esp->dma_setup = &dma_setup;
127
128 /* Optional functions */
129 esp->dma_barrier = 0;
130 esp->dma_drain = 0;
131 esp->dma_invalidate = 0;
132 esp->dma_irq_entry = 0;
133 esp->dma_irq_exit = 0;
134 esp->dma_led_on = &dma_led_on;
135 esp->dma_led_off = &dma_led_off;
136 esp->dma_poll = 0;
137 esp->dma_reset = 0;
138
139 /* SCSI chip speed */
140 esp->cfreq = 40000000;
141
142 /* The DMA registers on the CyberStorm are mapped
143 * relative to the device (i.e. in the same Zorro
144 * I/O block).
145 */
146 esp->dregs = (void *)(address + CYBERII_DMA_ADDR);
147
148 /* ESP register base */
149 esp->eregs = eregs;
150
151 /* Set the command buffer */
152 esp->esp_command = cmd_buffer;
153 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
154
155 esp->irq = IRQ_AMIGA_PORTS;
156 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
157 "CyberStorm SCSI Mk II", esp->ehost);
158
159 /* Figure out our scsi ID on the bus */
160 esp->scsi_id = 7;
161
162 /* We don't have a differential SCSI-bus. */
163 esp->diff = 0;
164
165 esp_initialize(esp);
166
167 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
168 esps_running = esps_in_use;
169 return esps_in_use;
170 }
171 }
172 return 0;
173}
174
175/************************************************************* DMA Functions */
176static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
177{
178 /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
179 * the number of bytes sent (to the ESP chip) equals the number
180 * of bytes in the FIFO - there is no buffering in the DMA controller.
181 * XXXX Do I read this right? It is from host to ESP, right?
182 */
183 return fifo_count;
184}
185
186static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
187{
188 /* I don't think there's any limit on the CyberDMA. So we use what
189 * the ESP chip can handle (24 bit).
190 */
191 unsigned long sz = sp->SCp.this_residual;
192 if(sz > 0x1000000)
193 sz = 0x1000000;
194 return sz;
195}
196
197static void dma_dump_state(struct NCR_ESP *esp)
198{
199 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
200 esp->esp_id, ((struct cyberII_dma_registers *)
201 (esp->dregs))->cond_reg));
202 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
203 amiga_custom.intreqr, amiga_custom.intenar));
204}
205
206static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
207{
208 struct cyberII_dma_registers *dregs =
209 (struct cyberII_dma_registers *) esp->dregs;
210
211 cache_clear(addr, length);
212
213 addr &= ~(1);
214 dregs->dma_addr0 = (addr >> 24) & 0xff;
215 dregs->dma_addr1 = (addr >> 16) & 0xff;
216 dregs->dma_addr2 = (addr >> 8) & 0xff;
217 dregs->dma_addr3 = (addr ) & 0xff;
218}
219
220static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
221{
222 struct cyberII_dma_registers *dregs =
223 (struct cyberII_dma_registers *) esp->dregs;
224
225 cache_push(addr, length);
226
227 addr |= 1;
228 dregs->dma_addr0 = (addr >> 24) & 0xff;
229 dregs->dma_addr1 = (addr >> 16) & 0xff;
230 dregs->dma_addr2 = (addr >> 8) & 0xff;
231 dregs->dma_addr3 = (addr ) & 0xff;
232}
233
234static void dma_ints_off(struct NCR_ESP *esp)
235{
236 disable_irq(esp->irq);
237}
238
239static void dma_ints_on(struct NCR_ESP *esp)
240{
241 enable_irq(esp->irq);
242}
243
244static int dma_irq_p(struct NCR_ESP *esp)
245{
246 /* It's important to check the DMA IRQ bit in the correct way! */
247 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
248}
249
250static void dma_led_off(struct NCR_ESP *esp)
251{
252 ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg &= ~CYBERII_DMA_LED;
253}
254
255static void dma_led_on(struct NCR_ESP *esp)
256{
257 ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg |= CYBERII_DMA_LED;
258}
259
260static int dma_ports_p(struct NCR_ESP *esp)
261{
262 return ((amiga_custom.intenar) & IF_PORTS);
263}
264
265static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
266{
267 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
268 * so when (write) is true, it actually means READ!
269 */
270 if(write){
271 dma_init_read(esp, addr, count);
272 } else {
273 dma_init_write(esp, addr, count);
274 }
275}
276
277#define HOSTS_C
278
279int cyberII_esp_release(struct Scsi_Host *instance)
280{
281#ifdef MODULE
282 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
283
284 esp_deallocate((struct NCR_ESP *)instance->hostdata);
285 esp_release();
286 release_mem_region(address, sizeof(struct ESP_regs));
287 free_irq(IRQ_AMIGA_PORTS, esp_intr);
288#endif
289 return 1;
290}
291
292
293static struct scsi_host_template driver_template = {
294 .proc_name = "esp-cyberstormII",
295 .proc_info = esp_proc_info,
296 .name = "CyberStorm Mk II SCSI",
297 .detect = cyberII_esp_detect,
298 .slave_alloc = esp_slave_alloc,
299 .slave_destroy = esp_slave_destroy,
300 .release = cyberII_esp_release,
301 .queuecommand = esp_queue,
302 .eh_abort_handler = esp_abort,
303 .eh_bus_reset_handler = esp_reset,
304 .can_queue = 7,
305 .this_id = 7,
306 .sg_tablesize = SG_ALL,
307 .cmd_per_lun = 1,
308 .use_clustering = ENABLE_CLUSTERING
309};
310
311
312#include "scsi_module.c"
313
314MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 22ef3716e786..e351db6c0077 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4267,7 +4267,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4267 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; 4267 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4268 int srb_idx = 0; 4268 int srb_idx = 0;
4269 unsigned i = 0; 4269 unsigned i = 0;
4270 struct SGentry *ptr; 4270 struct SGentry *uninitialized_var(ptr);
4271 4271
4272 for (i = 0; i < DC395x_MAX_SRB_CNT; i++) 4272 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4273 acb->srb_array[i].segment_x = NULL; 4273 acb->srb_array[i].segment_x = NULL;
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c
deleted file mode 100644
index d42ad663ffee..000000000000
--- a/drivers/scsi/dec_esp.c
+++ /dev/null
@@ -1,687 +0,0 @@
1/*
2 * dec_esp.c: Driver for SCSI chips on IOASIC based TURBOchannel DECstations
3 * and TURBOchannel PMAZ-A cards
4 *
5 * TURBOchannel changes by Harald Koerfgen
6 * PMAZ-A support by David Airlie
7 *
8 * based on jazz_esp.c:
9 * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
10 *
11 * jazz_esp is based on David S. Miller's ESP driver and cyber_esp
12 *
13 * 20000819 - Small PMAZ-AA fixes by Florian Lohoff <flo@rfc822.org>
14 * Be warned the PMAZ-AA works currently as a single card.
15 * Dont try to put multiple cards in one machine - They are
16 * both detected but it may crash under high load garbling your
17 * data.
18 * 20001005 - Initialization fixes for 2.4.0-test9
19 * Florian Lohoff <flo@rfc822.org>
20 *
21 * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
22 */
23
24#include <linux/kernel.h>
25#include <linux/delay.h>
26#include <linux/types.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29#include <linux/blkdev.h>
30#include <linux/proc_fs.h>
31#include <linux/spinlock.h>
32#include <linux/stat.h>
33#include <linux/tc.h>
34
35#include <asm/dma.h>
36#include <asm/irq.h>
37#include <asm/pgtable.h>
38#include <asm/system.h>
39
40#include <asm/dec/interrupts.h>
41#include <asm/dec/ioasic.h>
42#include <asm/dec/ioasic_addrs.h>
43#include <asm/dec/ioasic_ints.h>
44#include <asm/dec/machtype.h>
45#include <asm/dec/system.h>
46
47#define DEC_SCSI_SREG 0
48#define DEC_SCSI_DMAREG 0x40000
49#define DEC_SCSI_SRAM 0x80000
50#define DEC_SCSI_DIAG 0xC0000
51
52#include "scsi.h"
53#include <scsi/scsi_host.h>
54#include "NCR53C9x.h"
55
56static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
57static void dma_drain(struct NCR_ESP *esp);
58static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
59static void dma_dump_state(struct NCR_ESP *esp);
60static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
61static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
62static void dma_ints_off(struct NCR_ESP *esp);
63static void dma_ints_on(struct NCR_ESP *esp);
64static int dma_irq_p(struct NCR_ESP *esp);
65static int dma_ports_p(struct NCR_ESP *esp);
66static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
67static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
68static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp);
69static void dma_advance_sg(struct scsi_cmnd * sp);
70
71static void pmaz_dma_drain(struct NCR_ESP *esp);
72static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
73static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
74static void pmaz_dma_ints_off(struct NCR_ESP *esp);
75static void pmaz_dma_ints_on(struct NCR_ESP *esp);
76static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
77static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
78
79#define TC_ESP_RAM_SIZE 0x20000
80#define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1))
81#define ESP_NCMD 7
82
83#define TC_ESP_DMAR_MASK 0x1ffff
84#define TC_ESP_DMAR_WRITE 0x80000000
85#define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK)
86
87u32 esp_virt_buffer;
88int scsi_current_length;
89
90volatile unsigned char cmd_buffer[16];
91volatile unsigned char pmaz_cmd_buffer[16];
92 /* This is where all commands are put
93 * before they are trasfered to the ESP chip
94 * via PIO.
95 */
96
97static irqreturn_t scsi_dma_merr_int(int, void *);
98static irqreturn_t scsi_dma_err_int(int, void *);
99static irqreturn_t scsi_dma_int(int, void *);
100
101static struct scsi_host_template dec_esp_template = {
102 .module = THIS_MODULE,
103 .name = "NCR53C94",
104 .info = esp_info,
105 .queuecommand = esp_queue,
106 .eh_abort_handler = esp_abort,
107 .eh_bus_reset_handler = esp_reset,
108 .slave_alloc = esp_slave_alloc,
109 .slave_destroy = esp_slave_destroy,
110 .proc_info = esp_proc_info,
111 .proc_name = "dec_esp",
112 .can_queue = 7,
113 .sg_tablesize = SG_ALL,
114 .cmd_per_lun = 1,
115 .use_clustering = DISABLE_CLUSTERING,
116};
117
118static struct NCR_ESP *dec_esp_platform;
119
120/***************************************************************** Detection */
121static int dec_esp_platform_probe(void)
122{
123 struct NCR_ESP *esp;
124 int err = 0;
125
126 if (IOASIC) {
127 esp = esp_allocate(&dec_esp_template, NULL, 1);
128
129 /* Do command transfer with programmed I/O */
130 esp->do_pio_cmds = 1;
131
132 /* Required functions */
133 esp->dma_bytes_sent = &dma_bytes_sent;
134 esp->dma_can_transfer = &dma_can_transfer;
135 esp->dma_dump_state = &dma_dump_state;
136 esp->dma_init_read = &dma_init_read;
137 esp->dma_init_write = &dma_init_write;
138 esp->dma_ints_off = &dma_ints_off;
139 esp->dma_ints_on = &dma_ints_on;
140 esp->dma_irq_p = &dma_irq_p;
141 esp->dma_ports_p = &dma_ports_p;
142 esp->dma_setup = &dma_setup;
143
144 /* Optional functions */
145 esp->dma_barrier = 0;
146 esp->dma_drain = &dma_drain;
147 esp->dma_invalidate = 0;
148 esp->dma_irq_entry = 0;
149 esp->dma_irq_exit = 0;
150 esp->dma_poll = 0;
151 esp->dma_reset = 0;
152 esp->dma_led_off = 0;
153 esp->dma_led_on = 0;
154
155 /* virtual DMA functions */
156 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
157 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
158 esp->dma_mmu_release_scsi_one = 0;
159 esp->dma_mmu_release_scsi_sgl = 0;
160 esp->dma_advance_sg = &dma_advance_sg;
161
162
163 /* SCSI chip speed */
164 esp->cfreq = 25000000;
165
166 esp->dregs = 0;
167
168 /* ESP register base */
169 esp->eregs = (void *)CKSEG1ADDR(dec_kn_slot_base +
170 IOASIC_SCSI);
171
172 /* Set the command buffer */
173 esp->esp_command = (volatile unsigned char *) cmd_buffer;
174
175 /* get virtual dma address for command buffer */
176 esp->esp_command_dvma = virt_to_phys(cmd_buffer);
177
178 esp->irq = dec_interrupt[DEC_IRQ_ASC];
179
180 esp->scsi_id = 7;
181
182 /* Check for differential SCSI-bus */
183 esp->diff = 0;
184
185 err = request_irq(esp->irq, esp_intr, IRQF_DISABLED,
186 "ncr53c94", esp->ehost);
187 if (err)
188 goto err_alloc;
189 err = request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
190 scsi_dma_merr_int, IRQF_DISABLED,
191 "ncr53c94 error", esp->ehost);
192 if (err)
193 goto err_irq;
194 err = request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
195 scsi_dma_err_int, IRQF_DISABLED,
196 "ncr53c94 overrun", esp->ehost);
197 if (err)
198 goto err_irq_merr;
199 err = request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], scsi_dma_int,
200 IRQF_DISABLED, "ncr53c94 dma", esp->ehost);
201 if (err)
202 goto err_irq_err;
203
204 esp_initialize(esp);
205
206 err = scsi_add_host(esp->ehost, NULL);
207 if (err) {
208 printk(KERN_ERR "ESP: Unable to register adapter\n");
209 goto err_irq_dma;
210 }
211
212 scsi_scan_host(esp->ehost);
213
214 dec_esp_platform = esp;
215 }
216
217 return 0;
218
219err_irq_dma:
220 free_irq(dec_interrupt[DEC_IRQ_ASC_DMA], esp->ehost);
221err_irq_err:
222 free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], esp->ehost);
223err_irq_merr:
224 free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], esp->ehost);
225err_irq:
226 free_irq(esp->irq, esp->ehost);
227err_alloc:
228 esp_deallocate(esp);
229 scsi_host_put(esp->ehost);
230 return err;
231}
232
233static int __init dec_esp_probe(struct device *dev)
234{
235 struct NCR_ESP *esp;
236 resource_size_t start, len;
237 int err;
238
239 esp = esp_allocate(&dec_esp_template, NULL, 1);
240
241 dev_set_drvdata(dev, esp);
242
243 start = to_tc_dev(dev)->resource.start;
244 len = to_tc_dev(dev)->resource.end - start + 1;
245
246 if (!request_mem_region(start, len, dev->bus_id)) {
247 printk(KERN_ERR "%s: Unable to reserve MMIO resource\n",
248 dev->bus_id);
249 err = -EBUSY;
250 goto err_alloc;
251 }
252
253 /* Store base addr into esp struct. */
254 esp->slot = start;
255
256 esp->dregs = 0;
257 esp->eregs = (void *)CKSEG1ADDR(start + DEC_SCSI_SREG);
258 esp->do_pio_cmds = 1;
259
260 /* Set the command buffer. */
261 esp->esp_command = (volatile unsigned char *)pmaz_cmd_buffer;
262
263 /* Get virtual dma address for command buffer. */
264 esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer);
265
266 esp->cfreq = tc_get_speed(to_tc_dev(dev)->bus);
267
268 esp->irq = to_tc_dev(dev)->interrupt;
269
270 /* Required functions. */
271 esp->dma_bytes_sent = &dma_bytes_sent;
272 esp->dma_can_transfer = &dma_can_transfer;
273 esp->dma_dump_state = &dma_dump_state;
274 esp->dma_init_read = &pmaz_dma_init_read;
275 esp->dma_init_write = &pmaz_dma_init_write;
276 esp->dma_ints_off = &pmaz_dma_ints_off;
277 esp->dma_ints_on = &pmaz_dma_ints_on;
278 esp->dma_irq_p = &dma_irq_p;
279 esp->dma_ports_p = &dma_ports_p;
280 esp->dma_setup = &pmaz_dma_setup;
281
282 /* Optional functions. */
283 esp->dma_barrier = 0;
284 esp->dma_drain = &pmaz_dma_drain;
285 esp->dma_invalidate = 0;
286 esp->dma_irq_entry = 0;
287 esp->dma_irq_exit = 0;
288 esp->dma_poll = 0;
289 esp->dma_reset = 0;
290 esp->dma_led_off = 0;
291 esp->dma_led_on = 0;
292
293 esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one;
294 esp->dma_mmu_get_scsi_sgl = 0;
295 esp->dma_mmu_release_scsi_one = 0;
296 esp->dma_mmu_release_scsi_sgl = 0;
297 esp->dma_advance_sg = 0;
298
299 err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, "PMAZ_AA",
300 esp->ehost);
301 if (err) {
302 printk(KERN_ERR "%s: Unable to get IRQ %d\n",
303 dev->bus_id, esp->irq);
304 goto err_resource;
305 }
306
307 esp->scsi_id = 7;
308 esp->diff = 0;
309 esp_initialize(esp);
310
311 err = scsi_add_host(esp->ehost, dev);
312 if (err) {
313 printk(KERN_ERR "%s: Unable to register adapter\n",
314 dev->bus_id);
315 goto err_irq;
316 }
317
318 scsi_scan_host(esp->ehost);
319
320 return 0;
321
322err_irq:
323 free_irq(esp->irq, esp->ehost);
324
325err_resource:
326 release_mem_region(start, len);
327
328err_alloc:
329 esp_deallocate(esp);
330 scsi_host_put(esp->ehost);
331 return err;
332}
333
334static void __exit dec_esp_platform_remove(void)
335{
336 struct NCR_ESP *esp = dec_esp_platform;
337
338 free_irq(esp->irq, esp->ehost);
339 esp_deallocate(esp);
340 scsi_host_put(esp->ehost);
341 dec_esp_platform = NULL;
342}
343
344static void __exit dec_esp_remove(struct device *dev)
345{
346 struct NCR_ESP *esp = dev_get_drvdata(dev);
347
348 free_irq(esp->irq, esp->ehost);
349 esp_deallocate(esp);
350 scsi_host_put(esp->ehost);
351}
352
353
354/************************************************************* DMA Functions */
355static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id)
356{
357 printk("Got unexpected SCSI DMA Interrupt! < ");
358 printk("SCSI_DMA_MEMRDERR ");
359 printk(">\n");
360
361 return IRQ_HANDLED;
362}
363
364static irqreturn_t scsi_dma_err_int(int irq, void *dev_id)
365{
366 /* empty */
367
368 return IRQ_HANDLED;
369}
370
371static irqreturn_t scsi_dma_int(int irq, void *dev_id)
372{
373 u32 scsi_next_ptr;
374
375 scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P);
376
377 /* next page */
378 scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3;
379 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
380 fast_iob();
381
382 return IRQ_HANDLED;
383}
384
385static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
386{
387 return fifo_count;
388}
389
390static void dma_drain(struct NCR_ESP *esp)
391{
392 u32 nw, data0, data1, scsi_data_ptr;
393 u16 *p;
394
395 nw = ioasic_read(IO_REG_SCSI_SCR);
396
397 /*
398 * Is there something in the dma buffers left?
399 */
400 if (nw) {
401 scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3;
402 p = phys_to_virt(scsi_data_ptr);
403 switch (nw) {
404 case 1:
405 data0 = ioasic_read(IO_REG_SCSI_SDR0);
406 p[0] = data0 & 0xffff;
407 break;
408 case 2:
409 data0 = ioasic_read(IO_REG_SCSI_SDR0);
410 p[0] = data0 & 0xffff;
411 p[1] = (data0 >> 16) & 0xffff;
412 break;
413 case 3:
414 data0 = ioasic_read(IO_REG_SCSI_SDR0);
415 data1 = ioasic_read(IO_REG_SCSI_SDR1);
416 p[0] = data0 & 0xffff;
417 p[1] = (data0 >> 16) & 0xffff;
418 p[2] = data1 & 0xffff;
419 break;
420 default:
421 printk("Strange: %d words in dma buffer left\n", nw);
422 break;
423 }
424 }
425}
426
427static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp)
428{
429 return sp->SCp.this_residual;
430}
431
432static void dma_dump_state(struct NCR_ESP *esp)
433{
434}
435
436static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
437{
438 u32 scsi_next_ptr, ioasic_ssr;
439 unsigned long flags;
440
441 if (vaddress & 3)
442 panic("dec_esp.c: unable to handle partial word transfers, yet...");
443
444 dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
445
446 spin_lock_irqsave(&ioasic_ssr_lock, flags);
447
448 fast_mb();
449 ioasic_ssr = ioasic_read(IO_REG_SSR);
450
451 ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN;
452 ioasic_write(IO_REG_SSR, ioasic_ssr);
453
454 fast_wmb();
455 ioasic_write(IO_REG_SCSI_SCR, 0);
456 ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
457
458 /* prepare for next page */
459 scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
460 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
461
462 ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
463 fast_wmb();
464 ioasic_write(IO_REG_SSR, ioasic_ssr);
465
466 fast_iob();
467 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
468}
469
470static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
471{
472 u32 scsi_next_ptr, ioasic_ssr;
473 unsigned long flags;
474
475 if (vaddress & 3)
476 panic("dec_esp.c: unable to handle partial word transfers, yet...");
477
478 dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
479
480 spin_lock_irqsave(&ioasic_ssr_lock, flags);
481
482 fast_mb();
483 ioasic_ssr = ioasic_read(IO_REG_SSR);
484
485 ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
486 ioasic_write(IO_REG_SSR, ioasic_ssr);
487
488 fast_wmb();
489 ioasic_write(IO_REG_SCSI_SCR, 0);
490 ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
491
492 /* prepare for next page */
493 scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
494 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
495
496 ioasic_ssr |= IO_SSR_SCSI_DMA_EN;
497 fast_wmb();
498 ioasic_write(IO_REG_SSR, ioasic_ssr);
499
500 fast_iob();
501 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
502}
503
504static void dma_ints_off(struct NCR_ESP *esp)
505{
506 disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
507}
508
509static void dma_ints_on(struct NCR_ESP *esp)
510{
511 enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
512}
513
514static int dma_irq_p(struct NCR_ESP *esp)
515{
516 return (esp->eregs->esp_status & ESP_STAT_INTR);
517}
518
519static int dma_ports_p(struct NCR_ESP *esp)
520{
521 /*
522 * FIXME: what's this good for?
523 */
524 return 1;
525}
526
527static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
528{
529 /*
530 * DMA_ST_WRITE means "move data from device to memory"
531 * so when (write) is true, it actually means READ!
532 */
533 if (write)
534 dma_init_read(esp, addr, count);
535 else
536 dma_init_write(esp, addr, count);
537}
538
539static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
540{
541 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
542}
543
544static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp)
545{
546 int sz = sp->SCp.buffers_residual;
547 struct scatterlist *sg = sp->SCp.buffer;
548
549 while (sz >= 0) {
550 sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset;
551 sz--;
552 }
553 sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
554}
555
556static void dma_advance_sg(struct scsi_cmnd * sp)
557{
558 sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
559}
560
561static void pmaz_dma_drain(struct NCR_ESP *esp)
562{
563 memcpy(phys_to_virt(esp_virt_buffer),
564 (void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
565 ESP_TGT_DMA_SIZE),
566 scsi_current_length);
567}
568
569static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
570{
571 volatile u32 *dmareg =
572 (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
573
574 if (length > ESP_TGT_DMA_SIZE)
575 length = ESP_TGT_DMA_SIZE;
576
577 *dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
578
579 iob();
580
581 esp_virt_buffer = vaddress;
582 scsi_current_length = length;
583}
584
585static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
586{
587 volatile u32 *dmareg =
588 (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
589
590 memcpy((void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
591 ESP_TGT_DMA_SIZE),
592 phys_to_virt(vaddress), length);
593
594 wmb();
595 *dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
596
597 iob();
598}
599
600static void pmaz_dma_ints_off(struct NCR_ESP *esp)
601{
602}
603
604static void pmaz_dma_ints_on(struct NCR_ESP *esp)
605{
606}
607
608static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
609{
610 /*
611 * DMA_ST_WRITE means "move data from device to memory"
612 * so when (write) is true, it actually means READ!
613 */
614 if (write)
615 pmaz_dma_init_read(esp, addr, count);
616 else
617 pmaz_dma_init_write(esp, addr, count);
618}
619
620static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
621{
622 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
623}
624
625
626#ifdef CONFIG_TC
627static int __init dec_esp_tc_probe(struct device *dev);
628static int __exit dec_esp_tc_remove(struct device *dev);
629
630static const struct tc_device_id dec_esp_tc_table[] = {
631 { "DEC ", "PMAZ-AA " },
632 { }
633};
634MODULE_DEVICE_TABLE(tc, dec_esp_tc_table);
635
636static struct tc_driver dec_esp_tc_driver = {
637 .id_table = dec_esp_tc_table,
638 .driver = {
639 .name = "dec_esp",
640 .bus = &tc_bus_type,
641 .probe = dec_esp_tc_probe,
642 .remove = __exit_p(dec_esp_tc_remove),
643 },
644};
645
646static int __init dec_esp_tc_probe(struct device *dev)
647{
648 int status = dec_esp_probe(dev);
649 if (!status)
650 get_device(dev);
651 return status;
652}
653
654static int __exit dec_esp_tc_remove(struct device *dev)
655{
656 put_device(dev);
657 dec_esp_remove(dev);
658 return 0;
659}
660#endif
661
662static int __init dec_esp_init(void)
663{
664 int status;
665
666 status = tc_register_driver(&dec_esp_tc_driver);
667 if (!status)
668 dec_esp_platform_probe();
669
670 if (nesps) {
671 pr_info("ESP: Total of %d ESP hosts found, "
672 "%d actually in use.\n", nesps, esps_in_use);
673 esps_running = esps_in_use;
674 }
675
676 return status;
677}
678
679static void __exit dec_esp_exit(void)
680{
681 dec_esp_platform_remove();
682 tc_unregister_driver(&dec_esp_tc_driver);
683}
684
685
686module_init(dec_esp_init);
687module_exit(dec_esp_exit);
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
deleted file mode 100644
index 4266a2139b5f..000000000000
--- a/drivers/scsi/fastlane.c
+++ /dev/null
@@ -1,421 +0,0 @@
1/* fastlane.c: Driver for Phase5's Fastlane SCSI Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
7 *
8 * Betatesting & crucial adjustments by
9 * Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
10 *
11 */
12
13/* TODO:
14 *
15 * o According to the doc from laire, it is required to reset the DMA when
16 * the transfer is done. ATM we reset DMA just before every new
17 * dma_init_(read|write).
18 *
19 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
20 * to the caches and the Sparc MMU mapping.
21 * 2) Make as few routines required outside the generic driver. A lot of the
22 * routines in this file used to be inline!
23 */
24
25#include <linux/module.h>
26
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/delay.h>
30#include <linux/types.h>
31#include <linux/string.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/proc_fs.h>
35#include <linux/stat.h>
36#include <linux/interrupt.h>
37
38#include "scsi.h"
39#include <scsi/scsi_host.h>
40#include "NCR53C9x.h"
41
42#include <linux/zorro.h>
43#include <asm/irq.h>
44
45#include <asm/amigaints.h>
46#include <asm/amigahw.h>
47
48#include <asm/pgtable.h>
49
50/* Such day has just come... */
51#if 0
52/* Let this defined unless you really need to enable DMA IRQ one day */
53#define NODMAIRQ
54#endif
55
56/* The controller registers can be found in the Z2 config area at these
57 * offsets:
58 */
59#define FASTLANE_ESP_ADDR 0x1000001
60#define FASTLANE_DMA_ADDR 0x1000041
61
62
63/* The Fastlane DMA interface */
64struct fastlane_dma_registers {
65 volatile unsigned char cond_reg; /* DMA status (ro) [0x0000] */
66#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
67 unsigned char dmapad1[0x3f];
68 volatile unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
69};
70
71
72/* DMA status bits */
73#define FASTLANE_DMA_MINT 0x80
74#define FASTLANE_DMA_IACT 0x40
75#define FASTLANE_DMA_CREQ 0x20
76
77/* DMA control bits */
78#define FASTLANE_DMA_FCODE 0xa0
79#define FASTLANE_DMA_MASK 0xf3
80#define FASTLANE_DMA_LED 0x10 /* HD led control 1 = on */
81#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
82#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
83#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
84#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
85
86static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
87static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
88static void dma_dump_state(struct NCR_ESP *esp);
89static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
90static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
91static void dma_ints_off(struct NCR_ESP *esp);
92static void dma_ints_on(struct NCR_ESP *esp);
93static int dma_irq_p(struct NCR_ESP *esp);
94static void dma_irq_exit(struct NCR_ESP *esp);
95static void dma_led_off(struct NCR_ESP *esp);
96static void dma_led_on(struct NCR_ESP *esp);
97static int dma_ports_p(struct NCR_ESP *esp);
98static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
99
100static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
101 * to ctrl_reg. Always write a copy
102 * to this register when writing to
103 * the hardware register!
104 */
105
106static volatile unsigned char cmd_buffer[16];
107 /* This is where all commands are put
108 * before they are transferred to the ESP chip
109 * via PIO.
110 */
111
112static inline void dma_clear(struct NCR_ESP *esp)
113{
114 struct fastlane_dma_registers *dregs =
115 (struct fastlane_dma_registers *) (esp->dregs);
116 unsigned long *t;
117
118 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
119 dregs->ctrl_reg = ctrl_data;
120
121 t = (unsigned long *)(esp->edev);
122
123 dregs->clear_strobe = 0;
124 *t = 0 ;
125}
126
127/***************************************************************** Detection */
128int __init fastlane_esp_detect(struct scsi_host_template *tpnt)
129{
130 struct NCR_ESP *esp;
131 struct zorro_dev *z = NULL;
132 unsigned long address;
133
134 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060, z))) {
135 unsigned long board = z->resource.start;
136 if (request_mem_region(board+FASTLANE_ESP_ADDR,
137 sizeof(struct ESP_regs), "NCR53C9x")) {
138 /* Check if this is really a fastlane controller. The problem
139 * is that also the cyberstorm and blizzard controllers use
140 * this ID value. Fortunately only Fastlane maps in Z3 space
141 */
142 if (board < 0x1000000) {
143 goto err_release;
144 }
145 esp = esp_allocate(tpnt, (void *)board + FASTLANE_ESP_ADDR, 0);
146
147 /* Do command transfer with programmed I/O */
148 esp->do_pio_cmds = 1;
149
150 /* Required functions */
151 esp->dma_bytes_sent = &dma_bytes_sent;
152 esp->dma_can_transfer = &dma_can_transfer;
153 esp->dma_dump_state = &dma_dump_state;
154 esp->dma_init_read = &dma_init_read;
155 esp->dma_init_write = &dma_init_write;
156 esp->dma_ints_off = &dma_ints_off;
157 esp->dma_ints_on = &dma_ints_on;
158 esp->dma_irq_p = &dma_irq_p;
159 esp->dma_ports_p = &dma_ports_p;
160 esp->dma_setup = &dma_setup;
161
162 /* Optional functions */
163 esp->dma_barrier = 0;
164 esp->dma_drain = 0;
165 esp->dma_invalidate = 0;
166 esp->dma_irq_entry = 0;
167 esp->dma_irq_exit = &dma_irq_exit;
168 esp->dma_led_on = &dma_led_on;
169 esp->dma_led_off = &dma_led_off;
170 esp->dma_poll = 0;
171 esp->dma_reset = 0;
172
173 /* Initialize the portBits (enable IRQs) */
174 ctrl_data = (FASTLANE_DMA_FCODE |
175#ifndef NODMAIRQ
176 FASTLANE_DMA_EDI |
177#endif
178 FASTLANE_DMA_ESI);
179
180
181 /* SCSI chip clock */
182 esp->cfreq = 40000000;
183
184
185 /* Map the physical address space into virtual kernel space */
186 address = (unsigned long)
187 z_ioremap(board, z->resource.end-board+1);
188
189 if(!address){
190 printk("Could not remap Fastlane controller memory!");
191 goto err_unregister;
192 }
193
194
195 /* The DMA registers on the Fastlane are mapped
196 * relative to the device (i.e. in the same Zorro
197 * I/O block).
198 */
199 esp->dregs = (void *)(address + FASTLANE_DMA_ADDR);
200
201 /* ESP register base */
202 esp->eregs = (struct ESP_regs *)(address + FASTLANE_ESP_ADDR);
203
204 /* Board base */
205 esp->edev = (void *) address;
206
207 /* Set the command buffer */
208 esp->esp_command = cmd_buffer;
209 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
210
211 esp->irq = IRQ_AMIGA_PORTS;
212 esp->slot = board+FASTLANE_ESP_ADDR;
213 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
214 "Fastlane SCSI", esp->ehost)) {
215 printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
216 goto err_unmap;
217 }
218
219 /* Controller ID */
220 esp->scsi_id = 7;
221
222 /* We don't have a differential SCSI-bus. */
223 esp->diff = 0;
224
225 dma_clear(esp);
226 esp_initialize(esp);
227
228 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
229 esps_running = esps_in_use;
230 return esps_in_use;
231 }
232 }
233 return 0;
234
235 err_unmap:
236 z_iounmap((void *)address);
237 err_unregister:
238 scsi_unregister (esp->ehost);
239 err_release:
240 release_mem_region(z->resource.start+FASTLANE_ESP_ADDR,
241 sizeof(struct ESP_regs));
242 return 0;
243}
244
245
246/************************************************************* DMA Functions */
247static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
248{
249 /* Since the Fastlane DMA is fully dedicated to the ESP chip,
250 * the number of bytes sent (to the ESP chip) equals the number
251 * of bytes in the FIFO - there is no buffering in the DMA controller.
252 * XXXX Do I read this right? It is from host to ESP, right?
253 */
254 return fifo_count;
255}
256
257static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
258{
259 unsigned long sz = sp->SCp.this_residual;
260 if(sz > 0xfffc)
261 sz = 0xfffc;
262 return sz;
263}
264
265static void dma_dump_state(struct NCR_ESP *esp)
266{
267 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
268 esp->esp_id, ((struct fastlane_dma_registers *)
269 (esp->dregs))->cond_reg));
270 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
271 amiga_custom.intreqr, amiga_custom.intenar));
272}
273
274static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
275{
276 struct fastlane_dma_registers *dregs =
277 (struct fastlane_dma_registers *) (esp->dregs);
278 unsigned long *t;
279
280 cache_clear(addr, length);
281
282 dma_clear(esp);
283
284 t = (unsigned long *)((addr & 0x00ffffff) + esp->edev);
285
286 dregs->clear_strobe = 0;
287 *t = addr;
288
289 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE;
290 dregs->ctrl_reg = ctrl_data;
291}
292
293static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
294{
295 struct fastlane_dma_registers *dregs =
296 (struct fastlane_dma_registers *) (esp->dregs);
297 unsigned long *t;
298
299 cache_push(addr, length);
300
301 dma_clear(esp);
302
303 t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev));
304
305 dregs->clear_strobe = 0;
306 *t = addr;
307
308 ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) |
309 FASTLANE_DMA_ENABLE |
310 FASTLANE_DMA_WRITE);
311 dregs->ctrl_reg = ctrl_data;
312}
313
314
315static void dma_ints_off(struct NCR_ESP *esp)
316{
317 disable_irq(esp->irq);
318}
319
320static void dma_ints_on(struct NCR_ESP *esp)
321{
322 enable_irq(esp->irq);
323}
324
325static void dma_irq_exit(struct NCR_ESP *esp)
326{
327 struct fastlane_dma_registers *dregs =
328 (struct fastlane_dma_registers *) (esp->dregs);
329
330 dregs->ctrl_reg = ctrl_data & ~(FASTLANE_DMA_EDI|FASTLANE_DMA_ESI);
331#ifdef __mc68000__
332 nop();
333#endif
334 dregs->ctrl_reg = ctrl_data;
335}
336
337static int dma_irq_p(struct NCR_ESP *esp)
338{
339 struct fastlane_dma_registers *dregs =
340 (struct fastlane_dma_registers *) (esp->dregs);
341 unsigned char dma_status;
342
343 dma_status = dregs->cond_reg;
344
345 if(dma_status & FASTLANE_DMA_IACT)
346 return 0; /* not our IRQ */
347
348 /* Return non-zero if ESP requested IRQ */
349 return (
350#ifndef NODMAIRQ
351 (dma_status & FASTLANE_DMA_CREQ) &&
352#endif
353 (!(dma_status & FASTLANE_DMA_MINT)) &&
354 (esp_read(((struct ESP_regs *) (esp->eregs))->esp_status) & ESP_STAT_INTR));
355}
356
357static void dma_led_off(struct NCR_ESP *esp)
358{
359 ctrl_data &= ~FASTLANE_DMA_LED;
360 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
361}
362
363static void dma_led_on(struct NCR_ESP *esp)
364{
365 ctrl_data |= FASTLANE_DMA_LED;
366 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
367}
368
369static int dma_ports_p(struct NCR_ESP *esp)
370{
371 return ((amiga_custom.intenar) & IF_PORTS);
372}
373
374static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
375{
376 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
377 * so when (write) is true, it actually means READ!
378 */
379 if(write){
380 dma_init_read(esp, addr, count);
381 } else {
382 dma_init_write(esp, addr, count);
383 }
384}
385
386#define HOSTS_C
387
388int fastlane_esp_release(struct Scsi_Host *instance)
389{
390#ifdef MODULE
391 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
392 esp_deallocate((struct NCR_ESP *)instance->hostdata);
393 esp_release();
394 release_mem_region(address, sizeof(struct ESP_regs));
395 free_irq(IRQ_AMIGA_PORTS, esp_intr);
396#endif
397 return 1;
398}
399
400
401static struct scsi_host_template driver_template = {
402 .proc_name = "esp-fastlane",
403 .proc_info = esp_proc_info,
404 .name = "Fastlane SCSI",
405 .detect = fastlane_esp_detect,
406 .slave_alloc = esp_slave_alloc,
407 .slave_destroy = esp_slave_destroy,
408 .release = fastlane_esp_release,
409 .queuecommand = esp_queue,
410 .eh_abort_handler = esp_abort,
411 .eh_bus_reset_handler = esp_reset,
412 .can_queue = 7,
413 .this_id = 7,
414 .sg_tablesize = SG_ALL,
415 .cmd_per_lun = 1,
416 .use_clustering = ENABLE_CLUSTERING
417};
418
419#include "scsi_module.c"
420
421MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b6f99dfbb038..8a178674cb18 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -629,8 +629,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
629 int rc; 629 int rc;
630 630
631 if (tcp_conn->in.datalen) { 631 if (tcp_conn->in.datalen) {
632 printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n", 632 iscsi_conn_printk(KERN_ERR, conn,
633 tcp_conn->in.datalen); 633 "invalid R2t with datalen %d\n",
634 tcp_conn->in.datalen);
634 return ISCSI_ERR_DATALEN; 635 return ISCSI_ERR_DATALEN;
635 } 636 }
636 637
@@ -644,8 +645,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
644 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 645 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
645 646
646 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { 647 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
647 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " 648 iscsi_conn_printk(KERN_INFO, conn,
648 "recovery...\n", ctask->itt); 649 "dropping R2T itt %d in recovery.\n",
650 ctask->itt);
649 return 0; 651 return 0;
650 } 652 }
651 653
@@ -655,7 +657,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
655 r2t->exp_statsn = rhdr->statsn; 657 r2t->exp_statsn = rhdr->statsn;
656 r2t->data_length = be32_to_cpu(rhdr->data_length); 658 r2t->data_length = be32_to_cpu(rhdr->data_length);
657 if (r2t->data_length == 0) { 659 if (r2t->data_length == 0) {
658 printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n"); 660 iscsi_conn_printk(KERN_ERR, conn,
661 "invalid R2T with zero data len\n");
659 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 662 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
660 sizeof(void*)); 663 sizeof(void*));
661 return ISCSI_ERR_DATALEN; 664 return ISCSI_ERR_DATALEN;
@@ -668,9 +671,10 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
668 671
669 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 672 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
670 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) { 673 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
671 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " 674 iscsi_conn_printk(KERN_ERR, conn,
672 "offset %u and total length %d\n", r2t->data_length, 675 "invalid R2T with data len %u at offset %u "
673 r2t->data_offset, scsi_bufflen(ctask->sc)); 676 "and total length %d\n", r2t->data_length,
677 r2t->data_offset, scsi_bufflen(ctask->sc));
674 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 678 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
675 sizeof(void*)); 679 sizeof(void*));
676 return ISCSI_ERR_DATALEN; 680 return ISCSI_ERR_DATALEN;
@@ -736,8 +740,9 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
736 /* verify PDU length */ 740 /* verify PDU length */
737 tcp_conn->in.datalen = ntoh24(hdr->dlength); 741 tcp_conn->in.datalen = ntoh24(hdr->dlength);
738 if (tcp_conn->in.datalen > conn->max_recv_dlength) { 742 if (tcp_conn->in.datalen > conn->max_recv_dlength) {
739 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n", 743 iscsi_conn_printk(KERN_ERR, conn,
740 tcp_conn->in.datalen, conn->max_recv_dlength); 744 "iscsi_tcp: datalen %d > %d\n",
745 tcp_conn->in.datalen, conn->max_recv_dlength);
741 return ISCSI_ERR_DATALEN; 746 return ISCSI_ERR_DATALEN;
742 } 747 }
743 748
@@ -819,10 +824,12 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
819 * For now we fail until we find a vendor that needs it 824 * For now we fail until we find a vendor that needs it
820 */ 825 */
821 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) { 826 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
822 printk(KERN_ERR "iscsi_tcp: received buffer of len %u " 827 iscsi_conn_printk(KERN_ERR, conn,
823 "but conn buffer is only %u (opcode %0x)\n", 828 "iscsi_tcp: received buffer of "
824 tcp_conn->in.datalen, 829 "len %u but conn buffer is only %u "
825 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode); 830 "(opcode %0x)\n",
831 tcp_conn->in.datalen,
832 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
826 rc = ISCSI_ERR_PROTO; 833 rc = ISCSI_ERR_PROTO;
827 break; 834 break;
828 } 835 }
@@ -1496,30 +1503,25 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1496 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1503 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1497 CRYPTO_ALG_ASYNC); 1504 CRYPTO_ALG_ASYNC);
1498 tcp_conn->tx_hash.flags = 0; 1505 tcp_conn->tx_hash.flags = 0;
1499 if (IS_ERR(tcp_conn->tx_hash.tfm)) { 1506 if (IS_ERR(tcp_conn->tx_hash.tfm))
1500 printk(KERN_ERR "Could not create connection due to crc32c "
1501 "loading error %ld. Make sure the crc32c module is "
1502 "built as a module or into the kernel\n",
1503 PTR_ERR(tcp_conn->tx_hash.tfm));
1504 goto free_tcp_conn; 1507 goto free_tcp_conn;
1505 }
1506 1508
1507 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1509 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1508 CRYPTO_ALG_ASYNC); 1510 CRYPTO_ALG_ASYNC);
1509 tcp_conn->rx_hash.flags = 0; 1511 tcp_conn->rx_hash.flags = 0;
1510 if (IS_ERR(tcp_conn->rx_hash.tfm)) { 1512 if (IS_ERR(tcp_conn->rx_hash.tfm))
1511 printk(KERN_ERR "Could not create connection due to crc32c "
1512 "loading error %ld. Make sure the crc32c module is "
1513 "built as a module or into the kernel\n",
1514 PTR_ERR(tcp_conn->rx_hash.tfm));
1515 goto free_tx_tfm; 1513 goto free_tx_tfm;
1516 }
1517 1514
1518 return cls_conn; 1515 return cls_conn;
1519 1516
1520free_tx_tfm: 1517free_tx_tfm:
1521 crypto_free_hash(tcp_conn->tx_hash.tfm); 1518 crypto_free_hash(tcp_conn->tx_hash.tfm);
1522free_tcp_conn: 1519free_tcp_conn:
1520 iscsi_conn_printk(KERN_ERR, conn,
1521 "Could not create connection due to crc32c "
1522 "loading error. Make sure the crc32c "
1523 "module is built as a module or into the "
1524 "kernel\n");
1523 kfree(tcp_conn); 1525 kfree(tcp_conn);
1524tcp_conn_alloc_fail: 1526tcp_conn_alloc_fail:
1525 iscsi_conn_teardown(cls_conn); 1527 iscsi_conn_teardown(cls_conn);
@@ -1627,7 +1629,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1627 /* lookup for existing socket */ 1629 /* lookup for existing socket */
1628 sock = sockfd_lookup((int)transport_eph, &err); 1630 sock = sockfd_lookup((int)transport_eph, &err);
1629 if (!sock) { 1631 if (!sock) {
1630 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err); 1632 iscsi_conn_printk(KERN_ERR, conn,
1633 "sockfd_lookup failed %d\n", err);
1631 return -EEXIST; 1634 return -EEXIST;
1632 } 1635 }
1633 /* 1636 /*
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 553168ae44f1..59f8445eab0d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -160,7 +160,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
160 hdr->opcode = ISCSI_OP_SCSI_CMD; 160 hdr->opcode = ISCSI_OP_SCSI_CMD;
161 hdr->flags = ISCSI_ATTR_SIMPLE; 161 hdr->flags = ISCSI_ATTR_SIMPLE;
162 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 162 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
163 hdr->itt = build_itt(ctask->itt, conn->id, session->age); 163 hdr->itt = build_itt(ctask->itt, session->age);
164 hdr->data_length = cpu_to_be32(scsi_bufflen(sc)); 164 hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
165 hdr->cmdsn = cpu_to_be32(session->cmdsn); 165 hdr->cmdsn = cpu_to_be32(session->cmdsn);
166 session->cmdsn++; 166 session->cmdsn++;
@@ -416,8 +416,9 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
416 416
417 if (datalen < 2) { 417 if (datalen < 2) {
418invalid_datalen: 418invalid_datalen:
419 printk(KERN_ERR "iscsi: Got CHECK_CONDITION but " 419 iscsi_conn_printk(KERN_ERR, conn,
420 "invalid data buffer size of %d\n", datalen); 420 "Got CHECK_CONDITION but invalid data "
421 "buffer size of %d\n", datalen);
421 sc->result = DID_BAD_TARGET << 16; 422 sc->result = DID_BAD_TARGET << 16;
422 goto out; 423 goto out;
423 } 424 }
@@ -494,7 +495,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
494 495
495 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 496 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
496 if (!mtask) { 497 if (!mtask) {
497 printk(KERN_ERR "Could not send nopout\n"); 498 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
498 return; 499 return;
499 } 500 }
500 501
@@ -522,9 +523,10 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
522 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 523 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
523 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 524 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
524 itt = get_itt(rejected_pdu.itt); 525 itt = get_itt(rejected_pdu.itt);
525 printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected " 526 iscsi_conn_printk(KERN_ERR, conn,
526 "due to DataDigest error.\n", itt, 527 "itt 0x%x had pdu (op 0x%x) rejected "
527 rejected_pdu.opcode); 528 "due to DataDigest error.\n", itt,
529 rejected_pdu.opcode);
528 } 530 }
529 } 531 }
530 return 0; 532 return 0;
@@ -541,8 +543,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
541 * queuecommand or send generic. session lock must be held and verify 543 * queuecommand or send generic. session lock must be held and verify
542 * itt must have been called. 544 * itt must have been called.
543 */ 545 */
544int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 546static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
545 char *data, int datalen) 547 char *data, int datalen)
546{ 548{
547 struct iscsi_session *session = conn->session; 549 struct iscsi_session *session = conn->session;
548 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; 550 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
@@ -672,7 +674,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
672 674
673 return rc; 675 return rc;
674} 676}
675EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
676 677
677int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 678int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
678 char *data, int datalen) 679 char *data, int datalen)
@@ -697,18 +698,13 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
697 if (hdr->itt != RESERVED_ITT) { 698 if (hdr->itt != RESERVED_ITT) {
698 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) != 699 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
699 (session->age << ISCSI_AGE_SHIFT)) { 700 (session->age << ISCSI_AGE_SHIFT)) {
700 printk(KERN_ERR "iscsi: received itt %x expected " 701 iscsi_conn_printk(KERN_ERR, conn,
701 "session age (%x)\n", (__force u32)hdr->itt, 702 "received itt %x expected session "
702 session->age & ISCSI_AGE_MASK); 703 "age (%x)\n", (__force u32)hdr->itt,
704 session->age & ISCSI_AGE_MASK);
703 return ISCSI_ERR_BAD_ITT; 705 return ISCSI_ERR_BAD_ITT;
704 } 706 }
705 707
706 if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
707 (conn->id << ISCSI_CID_SHIFT)) {
708 printk(KERN_ERR "iscsi: received itt %x, expected "
709 "CID (%x)\n", (__force u32)hdr->itt, conn->id);
710 return ISCSI_ERR_BAD_ITT;
711 }
712 itt = get_itt(hdr->itt); 708 itt = get_itt(hdr->itt);
713 } else 709 } else
714 itt = ~0U; 710 itt = ~0U;
@@ -717,16 +713,17 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
717 ctask = session->cmds[itt]; 713 ctask = session->cmds[itt];
718 714
719 if (!ctask->sc) { 715 if (!ctask->sc) {
720 printk(KERN_INFO "iscsi: dropping ctask with " 716 iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
721 "itt 0x%x\n", ctask->itt); 717 "with itt 0x%x\n", ctask->itt);
722 /* force drop */ 718 /* force drop */
723 return ISCSI_ERR_NO_SCSI_CMD; 719 return ISCSI_ERR_NO_SCSI_CMD;
724 } 720 }
725 721
726 if (ctask->sc->SCp.phase != session->age) { 722 if (ctask->sc->SCp.phase != session->age) {
727 printk(KERN_ERR "iscsi: ctask's session age %d, " 723 iscsi_conn_printk(KERN_ERR, conn,
728 "expected %d\n", ctask->sc->SCp.phase, 724 "iscsi: ctask's session age %d, "
729 session->age); 725 "expected %d\n", ctask->sc->SCp.phase,
726 session->age);
730 return ISCSI_ERR_SESSION_FAILED; 727 return ISCSI_ERR_SESSION_FAILED;
731 } 728 }
732 } 729 }
@@ -771,7 +768,7 @@ static void iscsi_prep_mtask(struct iscsi_conn *conn,
771 */ 768 */
772 nop->cmdsn = cpu_to_be32(session->cmdsn); 769 nop->cmdsn = cpu_to_be32(session->cmdsn);
773 if (hdr->itt != RESERVED_ITT) { 770 if (hdr->itt != RESERVED_ITT) {
774 hdr->itt = build_itt(mtask->itt, conn->id, session->age); 771 hdr->itt = build_itt(mtask->itt, session->age);
775 /* 772 /*
776 * TODO: We always use immediate, so we never hit this. 773 * TODO: We always use immediate, so we never hit this.
777 * If we start to send tmfs or nops as non-immediate then 774 * If we start to send tmfs or nops as non-immediate then
@@ -997,6 +994,7 @@ enum {
997 FAILURE_SESSION_IN_RECOVERY, 994 FAILURE_SESSION_IN_RECOVERY,
998 FAILURE_SESSION_RECOVERY_TIMEOUT, 995 FAILURE_SESSION_RECOVERY_TIMEOUT,
999 FAILURE_SESSION_LOGGING_OUT, 996 FAILURE_SESSION_LOGGING_OUT,
997 FAILURE_SESSION_NOT_READY,
1000}; 998};
1001 999
1002int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1000int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
@@ -1017,6 +1015,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1017 session = iscsi_hostdata(host->hostdata); 1015 session = iscsi_hostdata(host->hostdata);
1018 spin_lock(&session->lock); 1016 spin_lock(&session->lock);
1019 1017
1018 reason = iscsi_session_chkready(session_to_cls(session));
1019 if (reason) {
1020 sc->result = reason;
1021 goto fault;
1022 }
1023
1020 /* 1024 /*
1021 * ISCSI_STATE_FAILED is a temp. state. The recovery 1025 * ISCSI_STATE_FAILED is a temp. state. The recovery
1022 * code will decide what is best to do with command queued 1026 * code will decide what is best to do with command queued
@@ -1033,18 +1037,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1033 switch (session->state) { 1037 switch (session->state) {
1034 case ISCSI_STATE_IN_RECOVERY: 1038 case ISCSI_STATE_IN_RECOVERY:
1035 reason = FAILURE_SESSION_IN_RECOVERY; 1039 reason = FAILURE_SESSION_IN_RECOVERY;
1036 goto reject; 1040 sc->result = DID_IMM_RETRY << 16;
1041 break;
1037 case ISCSI_STATE_LOGGING_OUT: 1042 case ISCSI_STATE_LOGGING_OUT:
1038 reason = FAILURE_SESSION_LOGGING_OUT; 1043 reason = FAILURE_SESSION_LOGGING_OUT;
1039 goto reject; 1044 sc->result = DID_IMM_RETRY << 16;
1045 break;
1040 case ISCSI_STATE_RECOVERY_FAILED: 1046 case ISCSI_STATE_RECOVERY_FAILED:
1041 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1047 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1048 sc->result = DID_NO_CONNECT << 16;
1042 break; 1049 break;
1043 case ISCSI_STATE_TERMINATE: 1050 case ISCSI_STATE_TERMINATE:
1044 reason = FAILURE_SESSION_TERMINATE; 1051 reason = FAILURE_SESSION_TERMINATE;
1052 sc->result = DID_NO_CONNECT << 16;
1045 break; 1053 break;
1046 default: 1054 default:
1047 reason = FAILURE_SESSION_FREED; 1055 reason = FAILURE_SESSION_FREED;
1056 sc->result = DID_NO_CONNECT << 16;
1048 } 1057 }
1049 goto fault; 1058 goto fault;
1050 } 1059 }
@@ -1052,6 +1061,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1052 conn = session->leadconn; 1061 conn = session->leadconn;
1053 if (!conn) { 1062 if (!conn) {
1054 reason = FAILURE_SESSION_FREED; 1063 reason = FAILURE_SESSION_FREED;
1064 sc->result = DID_NO_CONNECT << 16;
1055 goto fault; 1065 goto fault;
1056 } 1066 }
1057 1067
@@ -1091,9 +1101,7 @@ reject:
1091 1101
1092fault: 1102fault:
1093 spin_unlock(&session->lock); 1103 spin_unlock(&session->lock);
1094 printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n", 1104 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
1095 sc->cmnd[0], reason);
1096 sc->result = (DID_NO_CONNECT << 16);
1097 scsi_set_resid(sc, scsi_bufflen(sc)); 1105 scsi_set_resid(sc, scsi_bufflen(sc));
1098 sc->scsi_done(sc); 1106 sc->scsi_done(sc);
1099 spin_lock(host->host_lock); 1107 spin_lock(host->host_lock);
@@ -1160,7 +1168,8 @@ failed:
1160 mutex_lock(&session->eh_mutex); 1168 mutex_lock(&session->eh_mutex);
1161 spin_lock_bh(&session->lock); 1169 spin_lock_bh(&session->lock);
1162 if (session->state == ISCSI_STATE_LOGGED_IN) 1170 if (session->state == ISCSI_STATE_LOGGED_IN)
1163 printk(KERN_INFO "iscsi: host reset succeeded\n"); 1171 iscsi_session_printk(KERN_INFO, session,
1172 "host reset succeeded\n");
1164 else 1173 else
1165 goto failed; 1174 goto failed;
1166 spin_unlock_bh(&session->lock); 1175 spin_unlock_bh(&session->lock);
@@ -1239,7 +1248,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1239 * Fail commands. session lock held and recv side suspended and xmit 1248 * Fail commands. session lock held and recv side suspended and xmit
1240 * thread flushed 1249 * thread flushed
1241 */ 1250 */
1242static void fail_all_commands(struct iscsi_conn *conn, unsigned lun) 1251static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1252 int error)
1243{ 1253{
1244 struct iscsi_cmd_task *ctask, *tmp; 1254 struct iscsi_cmd_task *ctask, *tmp;
1245 1255
@@ -1251,7 +1261,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
1251 if (lun == ctask->sc->device->lun || lun == -1) { 1261 if (lun == ctask->sc->device->lun || lun == -1) {
1252 debug_scsi("failing pending sc %p itt 0x%x\n", 1262 debug_scsi("failing pending sc %p itt 0x%x\n",
1253 ctask->sc, ctask->itt); 1263 ctask->sc, ctask->itt);
1254 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1264 fail_command(conn, ctask, error << 16);
1255 } 1265 }
1256 } 1266 }
1257 1267
@@ -1259,7 +1269,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
1259 if (lun == ctask->sc->device->lun || lun == -1) { 1269 if (lun == ctask->sc->device->lun || lun == -1) {
1260 debug_scsi("failing requeued sc %p itt 0x%x\n", 1270 debug_scsi("failing requeued sc %p itt 0x%x\n",
1261 ctask->sc, ctask->itt); 1271 ctask->sc, ctask->itt);
1262 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1272 fail_command(conn, ctask, error << 16);
1263 } 1273 }
1264 } 1274 }
1265 1275
@@ -1357,10 +1367,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1357 last_recv = conn->last_recv; 1367 last_recv = conn->last_recv;
1358 if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ), 1368 if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
1359 jiffies)) { 1369 jiffies)) {
1360 printk(KERN_ERR "ping timeout of %d secs expired, " 1370 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1361 "last rx %lu, last ping %lu, now %lu\n", 1371 "expired, last rx %lu, last ping %lu, "
1362 conn->ping_timeout, last_recv, 1372 "now %lu\n", conn->ping_timeout, last_recv,
1363 conn->last_ping, jiffies); 1373 conn->last_ping, jiffies);
1364 spin_unlock(&session->lock); 1374 spin_unlock(&session->lock);
1365 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1375 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1366 return; 1376 return;
@@ -1373,14 +1383,11 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1373 iscsi_send_nopout(conn, NULL); 1383 iscsi_send_nopout(conn, NULL);
1374 } 1384 }
1375 next_timeout = last_recv + timeout + (conn->ping_timeout * HZ); 1385 next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
1376 } else { 1386 } else
1377 next_timeout = last_recv + timeout; 1387 next_timeout = last_recv + timeout;
1378 }
1379 1388
1380 if (next_timeout) { 1389 debug_scsi("Setting next tmo %lu\n", next_timeout);
1381 debug_scsi("Setting next tmo %lu\n", next_timeout); 1390 mod_timer(&conn->transport_timer, next_timeout);
1382 mod_timer(&conn->transport_timer, next_timeout);
1383 }
1384done: 1391done:
1385 spin_unlock(&session->lock); 1392 spin_unlock(&session->lock);
1386} 1393}
@@ -1573,7 +1580,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1573 /* need to grab the recv lock then session lock */ 1580 /* need to grab the recv lock then session lock */
1574 write_lock_bh(conn->recv_lock); 1581 write_lock_bh(conn->recv_lock);
1575 spin_lock(&session->lock); 1582 spin_lock(&session->lock);
1576 fail_all_commands(conn, sc->device->lun); 1583 fail_all_commands(conn, sc->device->lun, DID_ERROR);
1577 conn->tmf_state = TMF_INITIAL; 1584 conn->tmf_state = TMF_INITIAL;
1578 spin_unlock(&session->lock); 1585 spin_unlock(&session->lock);
1579 write_unlock_bh(conn->recv_lock); 1586 write_unlock_bh(conn->recv_lock);
@@ -1944,9 +1951,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1944 } 1951 }
1945 spin_unlock_irqrestore(session->host->host_lock, flags); 1952 spin_unlock_irqrestore(session->host->host_lock, flags);
1946 msleep_interruptible(500); 1953 msleep_interruptible(500);
1947 printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d " 1954 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
1948 "host_failed %d\n", session->host->host_busy, 1955 "host_busy %d host_failed %d\n",
1949 session->host->host_failed); 1956 session->host->host_busy,
1957 session->host->host_failed);
1950 /* 1958 /*
1951 * force eh_abort() to unblock 1959 * force eh_abort() to unblock
1952 */ 1960 */
@@ -1975,27 +1983,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
1975 struct iscsi_session *session = conn->session; 1983 struct iscsi_session *session = conn->session;
1976 1984
1977 if (!session) { 1985 if (!session) {
1978 printk(KERN_ERR "iscsi: can't start unbound connection\n"); 1986 iscsi_conn_printk(KERN_ERR, conn,
1987 "can't start unbound connection\n");
1979 return -EPERM; 1988 return -EPERM;
1980 } 1989 }
1981 1990
1982 if ((session->imm_data_en || !session->initial_r2t_en) && 1991 if ((session->imm_data_en || !session->initial_r2t_en) &&
1983 session->first_burst > session->max_burst) { 1992 session->first_burst > session->max_burst) {
1984 printk("iscsi: invalid burst lengths: " 1993 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
1985 "first_burst %d max_burst %d\n", 1994 "first_burst %d max_burst %d\n",
1986 session->first_burst, session->max_burst); 1995 session->first_burst, session->max_burst);
1987 return -EINVAL; 1996 return -EINVAL;
1988 } 1997 }
1989 1998
1990 if (conn->ping_timeout && !conn->recv_timeout) { 1999 if (conn->ping_timeout && !conn->recv_timeout) {
1991 printk(KERN_ERR "iscsi: invalid recv timeout of zero " 2000 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
1992 "Using 5 seconds\n."); 2001 "zero. Using 5 seconds\n.");
1993 conn->recv_timeout = 5; 2002 conn->recv_timeout = 5;
1994 } 2003 }
1995 2004
1996 if (conn->recv_timeout && !conn->ping_timeout) { 2005 if (conn->recv_timeout && !conn->ping_timeout) {
1997 printk(KERN_ERR "iscsi: invalid ping timeout of zero " 2006 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
1998 "Using 5 seconds.\n"); 2007 "zero. Using 5 seconds.\n");
1999 conn->ping_timeout = 5; 2008 conn->ping_timeout = 5;
2000 } 2009 }
2001 2010
@@ -2019,11 +2028,9 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2019 conn->stop_stage = 0; 2028 conn->stop_stage = 0;
2020 conn->tmf_state = TMF_INITIAL; 2029 conn->tmf_state = TMF_INITIAL;
2021 session->age++; 2030 session->age++;
2022 spin_unlock_bh(&session->lock); 2031 if (session->age == 16)
2023 2032 session->age = 0;
2024 iscsi_unblock_session(session_to_cls(session)); 2033 break;
2025 wake_up(&conn->ehwait);
2026 return 0;
2027 case STOP_CONN_TERM: 2034 case STOP_CONN_TERM:
2028 conn->stop_stage = 0; 2035 conn->stop_stage = 0;
2029 break; 2036 break;
@@ -2032,6 +2039,8 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2032 } 2039 }
2033 spin_unlock_bh(&session->lock); 2040 spin_unlock_bh(&session->lock);
2034 2041
2042 iscsi_unblock_session(session_to_cls(session));
2043 wake_up(&conn->ehwait);
2035 return 0; 2044 return 0;
2036} 2045}
2037EXPORT_SYMBOL_GPL(iscsi_conn_start); 2046EXPORT_SYMBOL_GPL(iscsi_conn_start);
@@ -2123,7 +2132,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2123 * flush queues. 2132 * flush queues.
2124 */ 2133 */
2125 spin_lock_bh(&session->lock); 2134 spin_lock_bh(&session->lock);
2126 fail_all_commands(conn, -1); 2135 fail_all_commands(conn, -1,
2136 STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
2127 flush_control_queues(session, conn); 2137 flush_control_queues(session, conn);
2128 spin_unlock_bh(&session->lock); 2138 spin_unlock_bh(&session->lock);
2129 mutex_unlock(&session->eh_mutex); 2139 mutex_unlock(&session->eh_mutex);
@@ -2140,7 +2150,8 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2140 iscsi_start_session_recovery(session, conn, flag); 2150 iscsi_start_session_recovery(session, conn, flag);
2141 break; 2151 break;
2142 default: 2152 default:
2143 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag); 2153 iscsi_conn_printk(KERN_ERR, conn,
2154 "invalid stop flag %d\n", flag);
2144 } 2155 }
2145} 2156}
2146EXPORT_SYMBOL_GPL(iscsi_conn_stop); 2157EXPORT_SYMBOL_GPL(iscsi_conn_stop);
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
deleted file mode 100644
index bcb49021b7e2..000000000000
--- a/drivers/scsi/mac_esp.c
+++ /dev/null
@@ -1,751 +0,0 @@
1/*
2 * 68k mac 53c9[46] scsi driver
3 *
4 * copyright (c) 1998, David Weis weisd3458@uni.edu
5 *
6 * debugging on Quadra 800 and 660AV Michael Schmitz, Dave Kilzer 7/98
7 *
8 * based loosely on cyber_esp.c
9 */
10
11/* these are unused for now */
12#define myreadl(addr) (*(volatile unsigned int *) (addr))
13#define mywritel(b, addr) ((*(volatile unsigned int *) (addr)) = (b))
14
15
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/ctype.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/blkdev.h>
23#include <linux/proc_fs.h>
24#include <linux/stat.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27
28#include "scsi.h"
29#include <scsi/scsi_host.h>
30#include "NCR53C9x.h"
31
32#include <asm/io.h>
33
34#include <asm/setup.h>
35#include <asm/irq.h>
36#include <asm/macints.h>
37#include <asm/machw.h>
38#include <asm/mac_via.h>
39
40#include <asm/pgtable.h>
41
42#include <asm/macintosh.h>
43
44/* #define DEBUG_MAC_ESP */
45
46extern void esp_handle(struct NCR_ESP *esp);
47extern void mac_esp_intr(int irq, void *dev_id);
48
49static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count);
50static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd *sp);
51static void dma_dump_state(struct NCR_ESP * esp);
52static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length);
53static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length);
54static void dma_ints_off(struct NCR_ESP * esp);
55static void dma_ints_on(struct NCR_ESP * esp);
56static int dma_irq_p(struct NCR_ESP * esp);
57static int dma_irq_p_quick(struct NCR_ESP * esp);
58static void dma_led_off(struct NCR_ESP * esp);
59static void dma_led_on(struct NCR_ESP *esp);
60static int dma_ports_p(struct NCR_ESP *esp);
61static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write);
62static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write);
63
64static int esp_dafb_dma_irq_p(struct NCR_ESP * espdev);
65static int esp_iosb_dma_irq_p(struct NCR_ESP * espdev);
66
67static volatile unsigned char cmd_buffer[16];
68 /* This is where all commands are put
69 * before they are transferred to the ESP chip
70 * via PIO.
71 */
72
73static int esp_initialized = 0;
74
75static int setup_num_esps = -1;
76static int setup_disconnect = -1;
77static int setup_nosync = -1;
78static int setup_can_queue = -1;
79static int setup_cmd_per_lun = -1;
80static int setup_sg_tablesize = -1;
81#ifdef SUPPORT_TAGS
82static int setup_use_tagged_queuing = -1;
83#endif
84static int setup_hostid = -1;
85
86/*
87 * Experimental ESP inthandler; check macints.c to make sure dev_id is
88 * set up properly!
89 */
90
91void mac_esp_intr(int irq, void *dev_id)
92{
93 struct NCR_ESP *esp = (struct NCR_ESP *) dev_id;
94 int irq_p = 0;
95
96 /* Handle the one ESP interrupt showing at this IRQ level. */
97 if(((esp)->irq & 0xff) == irq) {
98 /*
99 * Debug ..
100 */
101 irq_p = esp->dma_irq_p(esp);
102 printk("mac_esp: irq_p %x current %p disconnected %p\n",
103 irq_p, esp->current_SC, esp->disconnected_SC);
104
105 /*
106 * Mac: if we're here, it's an ESP interrupt for sure!
107 */
108 if((esp->current_SC || esp->disconnected_SC)) {
109 esp->dma_ints_off(esp);
110
111 ESPIRQ(("I%d(", esp->esp_id));
112 esp_handle(esp);
113 ESPIRQ((")"));
114
115 esp->dma_ints_on(esp);
116 }
117 }
118}
119
120/*
121 * Debug hooks; use for playing with the interrupt flag testing and interrupt
122 * acknowledge on the various machines
123 */
124
125void scsi_esp_polled(int irq, void *dev_id)
126{
127 if (esp_initialized == 0)
128 return;
129
130 mac_esp_intr(irq, dev_id);
131}
132
133void fake_intr(int irq, void *dev_id)
134{
135#ifdef DEBUG_MAC_ESP
136 printk("mac_esp: got irq\n");
137#endif
138
139 mac_esp_intr(irq, dev_id);
140}
141
142irqreturn_t fake_drq(int irq, void *dev_id)
143{
144 printk("mac_esp: got drq\n");
145 return IRQ_HANDLED;
146}
147
148#define DRIVER_SETUP
149
150/*
151 * Function : mac_esp_setup(char *str)
152 *
153 * Purpose : booter command line initialization of the overrides array,
154 *
155 * Inputs : str - parameters, separated by commas.
156 *
157 * Currently unused in the new driver; need to add settable parameters to the
158 * detect function.
159 *
160 */
161
162static int __init mac_esp_setup(char *str) {
163#ifdef DRIVER_SETUP
164 /* Format of mac53c9x parameter is:
165 * mac53c9x=<num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
166 * Negative values mean don't change.
167 */
168
169 char *this_opt;
170 long opt;
171
172 this_opt = strsep (&str, ",");
173 if(this_opt) {
174 opt = simple_strtol( this_opt, NULL, 0 );
175
176 if (opt >= 0 && opt <= 2)
177 setup_num_esps = opt;
178 else if (opt > 2)
179 printk( "mac_esp_setup: invalid number of hosts %ld !\n", opt );
180
181 this_opt = strsep (&str, ",");
182 }
183 if(this_opt) {
184 opt = simple_strtol( this_opt, NULL, 0 );
185
186 if (opt > 0)
187 setup_disconnect = opt;
188
189 this_opt = strsep (&str, ",");
190 }
191 if(this_opt) {
192 opt = simple_strtol( this_opt, NULL, 0 );
193
194 if (opt >= 0)
195 setup_nosync = opt;
196
197 this_opt = strsep (&str, ",");
198 }
199 if(this_opt) {
200 opt = simple_strtol( this_opt, NULL, 0 );
201
202 if (opt > 0)
203 setup_can_queue = opt;
204
205 this_opt = strsep (&str, ",");
206 }
207 if(this_opt) {
208 opt = simple_strtol( this_opt, NULL, 0 );
209
210 if (opt > 0)
211 setup_cmd_per_lun = opt;
212
213 this_opt = strsep (&str, ",");
214 }
215 if(this_opt) {
216 opt = simple_strtol( this_opt, NULL, 0 );
217
218 if (opt >= 0) {
219 setup_sg_tablesize = opt;
220 /* Must be <= SG_ALL (255) */
221 if (setup_sg_tablesize > SG_ALL)
222 setup_sg_tablesize = SG_ALL;
223 }
224
225 this_opt = strsep (&str, ",");
226 }
227 if(this_opt) {
228 opt = simple_strtol( this_opt, NULL, 0 );
229
230 /* Must be between 0 and 7 */
231 if (opt >= 0 && opt <= 7)
232 setup_hostid = opt;
233 else if (opt > 7)
234 printk( "mac_esp_setup: invalid host ID %ld !\n", opt);
235
236 this_opt = strsep (&str, ",");
237 }
238#ifdef SUPPORT_TAGS
239 if(this_opt) {
240 opt = simple_strtol( this_opt, NULL, 0 );
241 if (opt >= 0)
242 setup_use_tagged_queuing = !!opt;
243 }
244#endif
245#endif
246 return 1;
247}
248
249__setup("mac53c9x=", mac_esp_setup);
250
251
252/*
253 * ESP address 'detection'
254 */
255
256unsigned long get_base(int chip_num)
257{
258 /*
259 * using the chip_num and mac model, figure out where the
260 * chips are mapped
261 */
262
263 unsigned long io_base = 0x50f00000;
264 unsigned int second_offset = 0x402;
265 unsigned long scsi_loc = 0;
266
267 switch (macintosh_config->scsi_type) {
268
269 /* 950, 900, 700 */
270 case MAC_SCSI_QUADRA2:
271 scsi_loc = io_base + 0xf000 + ((chip_num == 0) ? 0 : second_offset);
272 break;
273
274 /* av's */
275 case MAC_SCSI_QUADRA3:
276 scsi_loc = io_base + 0x18000 + ((chip_num == 0) ? 0 : second_offset);
277 break;
278
279 /* most quadra/centris models are like this */
280 case MAC_SCSI_QUADRA:
281 scsi_loc = io_base + 0x10000;
282 break;
283
284 default:
285 printk("mac_esp: get_base: hit default!\n");
286 scsi_loc = io_base + 0x10000;
287 break;
288
289 } /* switch */
290
291 printk("mac_esp: io base at 0x%lx\n", scsi_loc);
292
293 return scsi_loc;
294}
295
296/*
297 * Model dependent ESP setup
298 */
299
300int mac_esp_detect(struct scsi_host_template * tpnt)
301{
302 int quick = 0;
303 int chipnum, chipspresent = 0;
304#if 0
305 unsigned long timeout;
306#endif
307
308 if (esp_initialized > 0)
309 return -ENODEV;
310
311 /* what do we have in this machine... */
312 if (MACHW_PRESENT(MAC_SCSI_96)) {
313 chipspresent ++;
314 }
315
316 if (MACHW_PRESENT(MAC_SCSI_96_2)) {
317 chipspresent ++;
318 }
319
320 /* number of ESPs present ? */
321 if (setup_num_esps >= 0) {
322 if (chipspresent >= setup_num_esps)
323 chipspresent = setup_num_esps;
324 else
325 printk("mac_esp_detect: num_hosts detected %d setup %d \n",
326 chipspresent, setup_num_esps);
327 }
328
329 /* TODO: add disconnect / nosync flags */
330
331 /* setup variables */
332 tpnt->can_queue =
333 (setup_can_queue > 0) ? setup_can_queue : 7;
334 tpnt->cmd_per_lun =
335 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : 1;
336 tpnt->sg_tablesize =
337 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_ALL;
338
339 if (setup_hostid >= 0)
340 tpnt->this_id = setup_hostid;
341 else {
342 /* use 7 as default */
343 tpnt->this_id = 7;
344 }
345
346#ifdef SUPPORT_TAGS
347 if (setup_use_tagged_queuing < 0)
348 setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING;
349#endif
350
351 for (chipnum = 0; chipnum < chipspresent; chipnum ++) {
352 struct NCR_ESP * esp;
353
354 esp = esp_allocate(tpnt, NULL, 0);
355 esp->eregs = (struct ESP_regs *) get_base(chipnum);
356
357 esp->dma_irq_p = &esp_dafb_dma_irq_p;
358 if (chipnum == 0) {
359
360 if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
361 /* most machines except those below :-) */
362 quick = 1;
363 esp->dma_irq_p = &esp_iosb_dma_irq_p;
364 } else if (macintosh_config->scsi_type == MAC_SCSI_QUADRA3) {
365 /* mostly av's */
366 quick = 0;
367 } else {
368 /* q950, 900, 700 */
369 quick = 1;
370 out_be32(0xf9800024, 0x1d1);
371 esp->dregs = (void *) 0xf9800024;
372 }
373
374 } else { /* chipnum */
375
376 quick = 1;
377 out_be32(0xf9800028, 0x1d1);
378 esp->dregs = (void *) 0xf9800028;
379
380 } /* chipnum == 0 */
381
382 /* use pio for command bytes; pio for message/data: TBI */
383 esp->do_pio_cmds = 1;
384
385 /* Set the command buffer */
386 esp->esp_command = (volatile unsigned char*) cmd_buffer;
387 esp->esp_command_dvma = (__u32) cmd_buffer;
388
389 /* various functions */
390 esp->dma_bytes_sent = &dma_bytes_sent;
391 esp->dma_can_transfer = &dma_can_transfer;
392 esp->dma_dump_state = &dma_dump_state;
393 esp->dma_init_read = NULL;
394 esp->dma_init_write = NULL;
395 esp->dma_ints_off = &dma_ints_off;
396 esp->dma_ints_on = &dma_ints_on;
397
398 esp->dma_ports_p = &dma_ports_p;
399
400
401 /* Optional functions */
402 esp->dma_barrier = NULL;
403 esp->dma_drain = NULL;
404 esp->dma_invalidate = NULL;
405 esp->dma_irq_entry = NULL;
406 esp->dma_irq_exit = NULL;
407 esp->dma_led_on = NULL;
408 esp->dma_led_off = NULL;
409 esp->dma_poll = NULL;
410 esp->dma_reset = NULL;
411
412 /* SCSI chip speed */
413 /* below esp->cfreq = 40000000; */
414
415
416 if (quick) {
417 /* 'quick' means there's handshake glue logic like in the 5380 case */
418 esp->dma_setup = &dma_setup_quick;
419 } else {
420 esp->dma_setup = &dma_setup;
421 }
422
423 if (chipnum == 0) {
424
425 esp->irq = IRQ_MAC_SCSI;
426
427 request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp->ehost);
428#if 0 /* conflicts with IOP ADB */
429 request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp->ehost);
430#endif
431
432 if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
433 esp->cfreq = 16500000;
434 } else {
435 esp->cfreq = 25000000;
436 }
437
438
439 } else { /* chipnum == 1 */
440
441 esp->irq = IRQ_MAC_SCSIDRQ;
442#if 0 /* conflicts with IOP ADB */
443 request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp->ehost);
444#endif
445
446 esp->cfreq = 25000000;
447
448 }
449
450 if (quick) {
451 printk("esp: using quick version\n");
452 }
453
454 printk("esp: addr at 0x%p\n", esp->eregs);
455
456 esp->scsi_id = 7;
457 esp->diff = 0;
458
459 esp_initialize(esp);
460
461 } /* for chipnum */
462
463 if (chipspresent)
464 printk("\nmac_esp: %d esp controllers found\n", chipspresent);
465
466 esp_initialized = chipspresent;
467
468 return chipspresent;
469}
470
471static int mac_esp_release(struct Scsi_Host *shost)
472{
473 if (shost->irq)
474 free_irq(shost->irq, NULL);
475 if (shost->io_port && shost->n_io_port)
476 release_region(shost->io_port, shost->n_io_port);
477 scsi_unregister(shost);
478 return 0;
479}
480
481/*
482 * I've been wondering what this is supposed to do, for some time. Talking
483 * to Allen Briggs: These machines have an extra register someplace where the
484 * DRQ pin of the ESP can be monitored. That isn't useful for determining
485 * anything else (such as reselect interrupt or other magic) though.
486 * Maybe make the semantics should be changed like
487 * if (esp->current_SC)
488 * ... check DRQ flag ...
489 * else
490 * ... disconnected, check pending VIA interrupt ...
491 *
492 * There's a problem with using the dabf flag or mac_irq_pending() here: both
493 * seem to return 1 even though no interrupt is currently pending, resulting
494 * in esp_exec_cmd() holding off the next command, and possibly infinite loops
495 * in esp_intr().
496 * Short term fix: just use esp_status & ESP_STAT_INTR here, as long as we
497 * use simple PIO. The DRQ status will be important when implementing pseudo
498 * DMA mode (set up ESP transfer count, return, do a batch of bytes in PIO or
499 * 'hardware handshake' mode upon DRQ).
500 * If you plan on changing this (i.e. to save the esp_status register access in
501 * favor of a VIA register access or a shadow register for the IFR), make sure
502 * to try a debug version of this first to monitor what registers would be a good
503 * indicator of the ESP interrupt.
504 */
505
506static int esp_dafb_dma_irq_p(struct NCR_ESP * esp)
507{
508 unsigned int ret;
509 int sreg = esp_read(esp->eregs->esp_status);
510
511#ifdef DEBUG_MAC_ESP
512 printk("mac_esp: esp_dafb_dma_irq_p dafb %d irq %d\n",
513 readl(esp->dregs), mac_irq_pending(IRQ_MAC_SCSI));
514#endif
515
516 sreg &= ESP_STAT_INTR;
517
518 /*
519 * maybe working; this is essentially what's used for iosb_dma_irq_p
520 */
521 if (sreg)
522 return 1;
523 else
524 return 0;
525
526 /*
527 * didn't work ...
528 */
529#if 0
530 if (esp->current_SC)
531 ret = readl(esp->dregs) & 0x200;
532 else if (esp->disconnected_SC)
533 ret = 1; /* sreg ?? */
534 else
535 ret = mac_irq_pending(IRQ_MAC_SCSI);
536
537 return(ret);
538#endif
539
540}
541
542/*
543 * See above: testing mac_irq_pending always returned 8 (SCSI IRQ) regardless
544 * of the actual ESP status.
545 */
546
547static int esp_iosb_dma_irq_p(struct NCR_ESP * esp)
548{
549 int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
550 int sreg = esp_read(esp->eregs->esp_status);
551
552#ifdef DEBUG_MAC_ESP
553 printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
554 mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
555 sreg, esp->current_SC, esp->disconnected_SC);
556#endif
557
558 sreg &= ESP_STAT_INTR;
559
560 if (sreg)
561 return (sreg);
562 else
563 return 0;
564}
565
566/*
567 * This seems to be OK for PIO at least ... usually 0 after PIO.
568 */
569
570static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count)
571{
572
573#ifdef DEBUG_MAC_ESP
574 printk("mac_esp: dma bytes sent = %x\n", fifo_count);
575#endif
576
577 return fifo_count;
578}
579
580/*
581 * dma_can_transfer is used to switch between DMA and PIO, if DMA (pseudo)
582 * is ever implemented. Returning 0 here will use PIO.
583 */
584
585static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd * sp)
586{
587 unsigned long sz = sp->SCp.this_residual;
588#if 0 /* no DMA yet; make conditional */
589 if (sz > 0x10000000) {
590 sz = 0x10000000;
591 }
592 printk("mac_esp: dma can transfer = 0lx%x\n", sz);
593#else
594
595#ifdef DEBUG_MAC_ESP
596 printk("mac_esp: pio to transfer = %ld\n", sz);
597#endif
598
599 sz = 0;
600#endif
601 return sz;
602}
603
604/*
605 * Not yet ...
606 */
607
608static void dma_dump_state(struct NCR_ESP * esp)
609{
610#ifdef DEBUG_MAC_ESP
611 printk("mac_esp: dma_dump_state: called\n");
612#endif
613#if 0
614 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
615 esp->esp_id, ((struct mac_dma_registers *)
616 (esp->dregs))->cond_reg));
617#endif
618}
619
620/*
621 * DMA setup: should be used to set up the ESP transfer count for pseudo
622 * DMA transfers; need a DRQ transfer function to do the actual transfer
623 */
624
625static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length)
626{
627 printk("mac_esp: dma_init_read\n");
628}
629
630
631static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length)
632{
633 printk("mac_esp: dma_init_write\n");
634}
635
636
637static void dma_ints_off(struct NCR_ESP * esp)
638{
639 disable_irq(esp->irq);
640}
641
642
643static void dma_ints_on(struct NCR_ESP * esp)
644{
645 enable_irq(esp->irq);
646}
647
648/*
649 * generic dma_irq_p(), unused
650 */
651
652static int dma_irq_p(struct NCR_ESP * esp)
653{
654 int i = esp_read(esp->eregs->esp_status);
655
656#ifdef DEBUG_MAC_ESP
657 printk("mac_esp: dma_irq_p status %d\n", i);
658#endif
659
660 return (i & ESP_STAT_INTR);
661}
662
663static int dma_irq_p_quick(struct NCR_ESP * esp)
664{
665 /*
666 * Copied from iosb_dma_irq_p()
667 */
668 int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
669 int sreg = esp_read(esp->eregs->esp_status);
670
671#ifdef DEBUG_MAC_ESP
672 printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
673 mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
674 sreg, esp->current_SC, esp->disconnected_SC);
675#endif
676
677 sreg &= ESP_STAT_INTR;
678
679 if (sreg)
680 return (sreg);
681 else
682 return 0;
683
684}
685
686static void dma_led_off(struct NCR_ESP * esp)
687{
688#ifdef DEBUG_MAC_ESP
689 printk("mac_esp: dma_led_off: called\n");
690#endif
691}
692
693
694static void dma_led_on(struct NCR_ESP * esp)
695{
696#ifdef DEBUG_MAC_ESP
697 printk("mac_esp: dma_led_on: called\n");
698#endif
699}
700
701
702static int dma_ports_p(struct NCR_ESP * esp)
703{
704 return 0;
705}
706
707
708static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write)
709{
710
711#ifdef DEBUG_MAC_ESP
712 printk("mac_esp: dma_setup\n");
713#endif
714
715 if (write) {
716 dma_init_read(esp, (char *) addr, count);
717 } else {
718 dma_init_write(esp, (char *) addr, count);
719 }
720}
721
722
723static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write)
724{
725#ifdef DEBUG_MAC_ESP
726 printk("mac_esp: dma_setup_quick\n");
727#endif
728}
729
730static struct scsi_host_template driver_template = {
731 .proc_name = "mac_esp",
732 .name = "Mac 53C9x SCSI",
733 .detect = mac_esp_detect,
734 .slave_alloc = esp_slave_alloc,
735 .slave_destroy = esp_slave_destroy,
736 .release = mac_esp_release,
737 .info = esp_info,
738 .queuecommand = esp_queue,
739 .eh_abort_handler = esp_abort,
740 .eh_bus_reset_handler = esp_reset,
741 .can_queue = 7,
742 .this_id = 7,
743 .sg_tablesize = SG_ALL,
744 .cmd_per_lun = 1,
745 .use_clustering = DISABLE_CLUSTERING
746};
747
748
749#include "scsi_module.c"
750
751MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/mca_53c9x.c b/drivers/scsi/mca_53c9x.c
deleted file mode 100644
index d693d0f21395..000000000000
--- a/drivers/scsi/mca_53c9x.c
+++ /dev/null
@@ -1,520 +0,0 @@
1/* mca_53c9x.c: Driver for the SCSI adapter found on NCR 35xx
2 * (and maybe some other) Microchannel machines
3 *
4 * Code taken mostly from Cyberstorm SCSI drivers
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
6 *
7 * Hacked to work with the NCR MCA stuff by Tymm Twillman (tymm@computer.org)
8 *
9 * The CyberStorm SCSI driver (and this driver) is based on David S. Miller's
10 * ESP driver * for the Sparc computers.
11 *
12 * Special thanks to Ken Stewart at Symbios (LSI) for helping with info on
13 * the 86C01. I was on the brink of going ga-ga...
14 *
15 * Also thanks to Jesper Skov for helping me with info on how the Amiga
16 * does things...
17 */
18
19/*
20 * This is currently only set up to use one 53c9x card at a time; it could be
21 * changed fairly easily to detect/use more than one, but I'm not too sure how
22 * many cards that use the 53c9x on MCA systems there are (if, in fact, there
23 * are cards that use them, other than the one built into some NCR systems)...
24 * If anyone requests this, I'll throw it in, otherwise it's not worth the
25 * effort.
26 */
27
28/*
29 * Info on the 86C01 MCA interface chip at the bottom, if you care enough to
30 * look.
31 */
32
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/kernel.h>
36#include <linux/mca.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/slab.h>
40#include <linux/blkdev.h>
41#include <linux/proc_fs.h>
42#include <linux/stat.h>
43#include <linux/mca-legacy.h>
44
45#include "scsi.h"
46#include <scsi/scsi_host.h>
47#include "NCR53C9x.h"
48
49#include <asm/dma.h>
50#include <asm/irq.h>
51#include <asm/mca_dma.h>
52#include <asm/pgtable.h>
53
54/*
55 * From ibmmca.c (IBM scsi controller card driver) -- used for turning PS2 disk
56 * activity LED on and off
57 */
58
59#define PS2_SYS_CTR 0x92
60
61/* Ports the ncr's 53c94 can be put at; indexed by pos register value */
62
63#define MCA_53C9X_IO_PORTS { \
64 0x0000, 0x0240, 0x0340, 0x0400, \
65 0x0420, 0x3240, 0x8240, 0xA240, \
66 }
67
68/*
69 * Supposedly there were some cards put together with the 'c9x and 86c01. If
70 * they have different ID's from the ones on the 3500 series machines,
71 * you can add them here and hopefully things will work out.
72 */
73
74#define MCA_53C9X_IDS { \
75 0x7F4C, \
76 0x0000, \
77 }
78
79static int dma_bytes_sent(struct NCR_ESP *, int);
80static int dma_can_transfer(struct NCR_ESP *, Scsi_Cmnd *);
81static void dma_dump_state(struct NCR_ESP *);
82static void dma_init_read(struct NCR_ESP *, __u32, int);
83static void dma_init_write(struct NCR_ESP *, __u32, int);
84static void dma_ints_off(struct NCR_ESP *);
85static void dma_ints_on(struct NCR_ESP *);
86static int dma_irq_p(struct NCR_ESP *);
87static int dma_ports_p(struct NCR_ESP *);
88static void dma_setup(struct NCR_ESP *, __u32, int, int);
89static void dma_led_on(struct NCR_ESP *);
90static void dma_led_off(struct NCR_ESP *);
91
92/* This is where all commands are put before they are trasfered to the
93 * 53c9x via PIO.
94 */
95
96static volatile unsigned char cmd_buffer[16];
97
98/*
99 * We keep the structure that is used to access the registers on the 53c9x
100 * here.
101 */
102
103static struct ESP_regs eregs;
104
105/***************************************************************** Detection */
106static int mca_esp_detect(struct scsi_host_template *tpnt)
107{
108 struct NCR_ESP *esp;
109 static int io_port_by_pos[] = MCA_53C9X_IO_PORTS;
110 int mca_53c9x_ids[] = MCA_53C9X_IDS;
111 int *id_to_check = mca_53c9x_ids;
112 int slot;
113 int pos[3];
114 unsigned int tmp_io_addr;
115 unsigned char tmp_byte;
116
117
118 if (!MCA_bus)
119 return 0;
120
121 while (*id_to_check) {
122 if ((slot = mca_find_adapter(*id_to_check, 0)) !=
123 MCA_NOTFOUND)
124 {
125 esp = esp_allocate(tpnt, NULL, 0);
126
127 pos[0] = mca_read_stored_pos(slot, 2);
128 pos[1] = mca_read_stored_pos(slot, 3);
129 pos[2] = mca_read_stored_pos(slot, 4);
130
131 esp->eregs = &eregs;
132
133 /*
134 * IO port base is given in the first (non-ID) pos
135 * register, like so:
136 *
137 * Bits 3 2 1 IO base
138 * ----------------------------
139 * 0 0 0 <disabled>
140 * 0 0 1 0x0240
141 * 0 1 0 0x0340
142 * 0 1 1 0x0400
143 * 1 0 0 0x0420
144 * 1 0 1 0x3240
145 * 1 1 0 0x8240
146 * 1 1 1 0xA240
147 */
148
149 tmp_io_addr =
150 io_port_by_pos[(pos[0] & 0x0E) >> 1];
151
152 esp->eregs->io_addr = tmp_io_addr + 0x10;
153
154 if (esp->eregs->io_addr == 0x0000) {
155 printk("Adapter is disabled.\n");
156 break;
157 }
158
159 /*
160 * IRQ is specified in bits 4 and 5:
161 *
162 * Bits 4 5 IRQ
163 * -----------------------
164 * 0 0 3
165 * 0 1 5
166 * 1 0 7
167 * 1 1 9
168 */
169
170 esp->irq = ((pos[0] & 0x30) >> 3) + 3;
171
172 /*
173 * DMA channel is in the low 3 bits of the second
174 * POS register
175 */
176
177 esp->dma = pos[1] & 7;
178 esp->slot = slot;
179
180 if (request_irq(esp->irq, esp_intr, 0,
181 "NCR 53c9x SCSI", esp->ehost))
182 {
183 printk("Unable to request IRQ %d.\n", esp->irq);
184 esp_deallocate(esp);
185 scsi_unregister(esp->ehost);
186 return 0;
187 }
188
189 if (request_dma(esp->dma, "NCR 53c9x SCSI")) {
190 printk("Unable to request DMA channel %d.\n",
191 esp->dma);
192 free_irq(esp->irq, esp_intr);
193 esp_deallocate(esp);
194 scsi_unregister(esp->ehost);
195 return 0;
196 }
197
198 request_region(tmp_io_addr, 32, "NCR 53c9x SCSI");
199
200 /*
201 * 86C01 handles DMA, IO mode, from address
202 * (base + 0x0a)
203 */
204
205 mca_disable_dma(esp->dma);
206 mca_set_dma_io(esp->dma, tmp_io_addr + 0x0a);
207 mca_enable_dma(esp->dma);
208
209 /* Tell the 86C01 to give us interrupts */
210
211 tmp_byte = inb(tmp_io_addr + 0x02) | 0x40;
212 outb(tmp_byte, tmp_io_addr + 0x02);
213
214 /*
215 * Scsi ID -- general purpose register, hi
216 * 2 bits; add 4 to this number to get the
217 * ID
218 */
219
220 esp->scsi_id = ((pos[2] & 0xC0) >> 6) + 4;
221
222 /* Do command transfer with programmed I/O */
223
224 esp->do_pio_cmds = 1;
225
226 /* Required functions */
227
228 esp->dma_bytes_sent = &dma_bytes_sent;
229 esp->dma_can_transfer = &dma_can_transfer;
230 esp->dma_dump_state = &dma_dump_state;
231 esp->dma_init_read = &dma_init_read;
232 esp->dma_init_write = &dma_init_write;
233 esp->dma_ints_off = &dma_ints_off;
234 esp->dma_ints_on = &dma_ints_on;
235 esp->dma_irq_p = &dma_irq_p;
236 esp->dma_ports_p = &dma_ports_p;
237 esp->dma_setup = &dma_setup;
238
239 /* Optional functions */
240
241 esp->dma_barrier = NULL;
242 esp->dma_drain = NULL;
243 esp->dma_invalidate = NULL;
244 esp->dma_irq_entry = NULL;
245 esp->dma_irq_exit = NULL;
246 esp->dma_led_on = dma_led_on;
247 esp->dma_led_off = dma_led_off;
248 esp->dma_poll = NULL;
249 esp->dma_reset = NULL;
250
251 /* Set the command buffer */
252
253 esp->esp_command = (volatile unsigned char*)
254 cmd_buffer;
255 esp->esp_command_dvma = isa_virt_to_bus(cmd_buffer);
256
257 /* SCSI chip speed */
258
259 esp->cfreq = 25000000;
260
261 /* Differential SCSI? I think not. */
262
263 esp->diff = 0;
264
265 esp_initialize(esp);
266
267 printk(" Adapter found in slot %2d: io port 0x%x "
268 "irq %d dma channel %d\n", slot + 1, tmp_io_addr,
269 esp->irq, esp->dma);
270
271 mca_set_adapter_name(slot, "NCR 53C9X SCSI Adapter");
272 mca_mark_as_used(slot);
273
274 break;
275 }
276
277 id_to_check++;
278 }
279
280 return esps_in_use;
281}
282
283
284/******************************************************************* Release */
285
286static int mca_esp_release(struct Scsi_Host *host)
287{
288 struct NCR_ESP *esp = (struct NCR_ESP *)host->hostdata;
289 unsigned char tmp_byte;
290
291 esp_deallocate(esp);
292 /*
293 * Tell the 86C01 to stop sending interrupts
294 */
295
296 tmp_byte = inb(esp->eregs->io_addr - 0x0E);
297 tmp_byte &= ~0x40;
298 outb(tmp_byte, esp->eregs->io_addr - 0x0E);
299
300 free_irq(esp->irq, esp_intr);
301 free_dma(esp->dma);
302
303 mca_mark_as_unused(esp->slot);
304
305 return 0;
306}
307
308/************************************************************* DMA Functions */
309static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
310{
311 /* Ask the 53c9x. It knows. */
312
313 return fifo_count;
314}
315
316static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
317{
318 /*
319 * The MCA dma channels can only do up to 128K bytes at a time.
320 * (16 bit mode)
321 */
322
323 unsigned long sz = sp->SCp.this_residual;
324 if(sz > 0x20000)
325 sz = 0x20000;
326 return sz;
327}
328
329static void dma_dump_state(struct NCR_ESP *esp)
330{
331 /*
332 * Doesn't quite match up to the other drivers, but we do what we
333 * can.
334 */
335
336 ESPLOG(("esp%d: dma channel <%d>\n", esp->esp_id, esp->dma));
337 ESPLOG(("bytes left to dma: %d\n", mca_get_dma_residue(esp->dma)));
338}
339
340static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
341{
342 unsigned long flags;
343
344
345 save_flags(flags);
346 cli();
347
348 mca_disable_dma(esp->dma);
349 mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_16 |
350 MCA_DMA_MODE_IO);
351 mca_set_dma_addr(esp->dma, addr);
352 mca_set_dma_count(esp->dma, length / 2); /* !!! */
353 mca_enable_dma(esp->dma);
354
355 restore_flags(flags);
356}
357
358static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
359{
360 unsigned long flags;
361
362
363 save_flags(flags);
364 cli();
365
366 mca_disable_dma(esp->dma);
367 mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_WRITE |
368 MCA_DMA_MODE_16 | MCA_DMA_MODE_IO);
369 mca_set_dma_addr(esp->dma, addr);
370 mca_set_dma_count(esp->dma, length / 2); /* !!! */
371 mca_enable_dma(esp->dma);
372
373 restore_flags(flags);
374}
375
376static void dma_ints_off(struct NCR_ESP *esp)
377{
378 /*
379 * Tell the 'C01 to shut up. All interrupts are routed through it.
380 */
381
382 outb(inb(esp->eregs->io_addr - 0x0E) & ~0x40,
383 esp->eregs->io_addr - 0x0E);
384}
385
386static void dma_ints_on(struct NCR_ESP *esp)
387{
388 /*
389 * Ok. You can speak again.
390 */
391
392 outb(inb(esp->eregs->io_addr - 0x0E) | 0x40,
393 esp->eregs->io_addr - 0x0E);
394}
395
396static int dma_irq_p(struct NCR_ESP *esp)
397{
398 /*
399 * DaveM says that this should return a "yes" if there is an interrupt
400 * or a DMA error occurred. I copied the Amiga driver's semantics,
401 * though, because it seems to work and we can't really tell if
402 * a DMA error happened. This gives the "yes" if the scsi chip
403 * is sending an interrupt and no DMA activity is taking place
404 */
405
406 return (!(inb(esp->eregs->io_addr - 0x04) & 1) &&
407 !(inb(esp->eregs->io_addr - 0x04) & 2) );
408}
409
410static int dma_ports_p(struct NCR_ESP *esp)
411{
412 /*
413 * Check to see if interrupts are enabled on the 'C01 (in case abort
414 * is entered multiple times, so we only do the abort once)
415 */
416
417 return (inb(esp->eregs->io_addr - 0x0E) & 0x40) ? 1:0;
418}
419
420static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
421{
422 if(write){
423 dma_init_write(esp, addr, count);
424 } else {
425 dma_init_read(esp, addr, count);
426 }
427}
428
429/*
430 * These will not play nicely with other disk controllers that try to use the
431 * disk active LED... but what can you do? Don't answer that.
432 *
433 * Stolen shamelessly from ibmmca.c -- IBM Microchannel SCSI adapter driver
434 *
435 */
436
437static void dma_led_on(struct NCR_ESP *esp)
438{
439 outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR);
440}
441
442static void dma_led_off(struct NCR_ESP *esp)
443{
444 outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR);
445}
446
447static struct scsi_host_template driver_template = {
448 .proc_name = "mca_53c9x",
449 .name = "NCR 53c9x SCSI",
450 .detect = mca_esp_detect,
451 .slave_alloc = esp_slave_alloc,
452 .slave_destroy = esp_slave_destroy,
453 .release = mca_esp_release,
454 .queuecommand = esp_queue,
455 .eh_abort_handler = esp_abort,
456 .eh_bus_reset_handler = esp_reset,
457 .can_queue = 7,
458 .sg_tablesize = SG_ALL,
459 .cmd_per_lun = 1,
460 .unchecked_isa_dma = 1,
461 .use_clustering = DISABLE_CLUSTERING
462};
463
464
465#include "scsi_module.c"
466
467/*
468 * OK, here's the goods I promised. The NCR 86C01 is an MCA interface chip
469 * that handles enabling/diabling IRQ, dma interfacing, IO port selection
470 * and other fun stuff. It takes up 16 addresses, and the chip it is
471 * connnected to gets the following 16. Registers are as follows:
472 *
473 * Offsets 0-1 : Card ID
474 *
475 * Offset 2 : Mode enable register --
476 * Bit 7 : Data Word width (1 = 16, 0 = 8)
477 * Bit 6 : IRQ enable (1 = enabled)
478 * Bits 5,4 : IRQ select
479 * 0 0 : IRQ 3
480 * 0 1 : IRQ 5
481 * 1 0 : IRQ 7
482 * 1 1 : IRQ 9
483 * Bits 3-1 : Base Address
484 * 0 0 0 : <disabled>
485 * 0 0 1 : 0x0240
486 * 0 1 0 : 0x0340
487 * 0 1 1 : 0x0400
488 * 1 0 0 : 0x0420
489 * 1 0 1 : 0x3240
490 * 1 1 0 : 0x8240
491 * 1 1 1 : 0xA240
492 * Bit 0 : Card enable (1 = enabled)
493 *
494 * Offset 3 : DMA control register --
495 * Bit 7 : DMA enable (1 = enabled)
496 * Bits 6,5 : Preemt Count Select (transfers to complete after
497 * 'C01 has been preempted on MCA bus)
498 * 0 0 : 0
499 * 0 1 : 1
500 * 1 0 : 3
501 * 1 1 : 7
502 * (all these wacky numbers; I'm sure there's a reason somewhere)
503 * Bit 4 : Fairness enable (1 = fair bus priority)
504 * Bits 3-0 : Arbitration level (0-15 consecutive)
505 *
506 * Offset 4 : General purpose register
507 * Bits 7-3 : User definable (here, 7,6 are SCSI ID)
508 * Bits 2-0 : reserved
509 *
510 * Offset 10 : DMA decode register (used for IO based DMA; also can do
511 * PIO through this port)
512 *
513 * Offset 12 : Status
514 * Bits 7-2 : reserved
515 * Bit 1 : DMA pending (1 = pending)
516 * Bit 0 : IRQ pending (0 = pending)
517 *
518 * Exciting, huh?
519 *
520 */
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
deleted file mode 100644
index 8e5eadbd5c51..000000000000
--- a/drivers/scsi/oktagon_esp.c
+++ /dev/null
@@ -1,606 +0,0 @@
1/*
2 * Oktagon_esp.c -- Driver for bsc Oktagon
3 *
4 * Written by Carsten Pluntke 1998
5 *
6 * Based on cyber_esp.c
7 */
8
9
10#if defined(CONFIG_AMIGA) || defined(CONFIG_APUS)
11#define USE_BOTTOM_HALF
12#endif
13
14#include <linux/module.h>
15
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/string.h>
20#include <linux/slab.h>
21#include <linux/blkdev.h>
22#include <linux/proc_fs.h>
23#include <linux/stat.h>
24#include <linux/reboot.h>
25#include <asm/system.h>
26#include <asm/ptrace.h>
27#include <asm/pgtable.h>
28
29
30#include "scsi.h"
31#include <scsi/scsi_host.h>
32#include "NCR53C9x.h"
33
34#include <linux/zorro.h>
35#include <asm/irq.h>
36#include <asm/amigaints.h>
37#include <asm/amigahw.h>
38
39#ifdef USE_BOTTOM_HALF
40#include <linux/workqueue.h>
41#include <linux/interrupt.h>
42#endif
43
44/* The controller registers can be found in the Z2 config area at these
45 * offsets:
46 */
47#define OKTAGON_ESP_ADDR 0x03000
48#define OKTAGON_DMA_ADDR 0x01000
49
50
51static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
52static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
53static void dma_dump_state(struct NCR_ESP *esp);
54static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
55static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
56static void dma_ints_off(struct NCR_ESP *esp);
57static void dma_ints_on(struct NCR_ESP *esp);
58static int dma_irq_p(struct NCR_ESP *esp);
59static void dma_led_off(struct NCR_ESP *esp);
60static void dma_led_on(struct NCR_ESP *esp);
61static int dma_ports_p(struct NCR_ESP *esp);
62static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
63
64static void dma_irq_exit(struct NCR_ESP *esp);
65static void dma_invalidate(struct NCR_ESP *esp);
66
67static void dma_mmu_get_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
68static void dma_mmu_get_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
69static void dma_mmu_release_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
70static void dma_mmu_release_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
71static void dma_advance_sg(Scsi_Cmnd *);
72static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
73
74#ifdef USE_BOTTOM_HALF
75static void dma_commit(struct work_struct *unused);
76
77long oktag_to_io(long *paddr, long *addr, long len);
78long oktag_from_io(long *addr, long *paddr, long len);
79
80static DECLARE_WORK(tq_fake_dma, dma_commit);
81
82#define DMA_MAXTRANSFER 0x8000
83
84#else
85
86/*
87 * No bottom half. Use transfer directly from IRQ. Find a narrow path
88 * between too much IRQ overhead and clogging the IRQ for too long.
89 */
90
91#define DMA_MAXTRANSFER 0x1000
92
93#endif
94
95static struct notifier_block oktagon_notifier = {
96 oktagon_notify_reboot,
97 NULL,
98 0
99};
100
101static long *paddress;
102static long *address;
103static long len;
104static long dma_on;
105static int direction;
106static struct NCR_ESP *current_esp;
107
108
109static volatile unsigned char cmd_buffer[16];
110 /* This is where all commands are put
111 * before they are trasfered to the ESP chip
112 * via PIO.
113 */
114
115/***************************************************************** Detection */
116int oktagon_esp_detect(struct scsi_host_template *tpnt)
117{
118 struct NCR_ESP *esp;
119 struct zorro_dev *z = NULL;
120 unsigned long address;
121 struct ESP_regs *eregs;
122
123 while ((z = zorro_find_device(ZORRO_PROD_BSC_OKTAGON_2008, z))) {
124 unsigned long board = z->resource.start;
125 if (request_mem_region(board+OKTAGON_ESP_ADDR,
126 sizeof(struct ESP_regs), "NCR53C9x")) {
127 /*
128 * It is a SCSI controller.
129 * Hardwire Host adapter to SCSI ID 7
130 */
131
132 address = (unsigned long)ZTWO_VADDR(board);
133 eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR);
134
135 /* This line was 5 lines lower */
136 esp = esp_allocate(tpnt, (void *)board + OKTAGON_ESP_ADDR, 0);
137
138 /* we have to shift the registers only one bit for oktagon */
139 esp->shift = 1;
140
141 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
142 udelay(5);
143 if (esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
144 return 0; /* Bail out if address did not hold data */
145
146 /* Do command transfer with programmed I/O */
147 esp->do_pio_cmds = 1;
148
149 /* Required functions */
150 esp->dma_bytes_sent = &dma_bytes_sent;
151 esp->dma_can_transfer = &dma_can_transfer;
152 esp->dma_dump_state = &dma_dump_state;
153 esp->dma_init_read = &dma_init_read;
154 esp->dma_init_write = &dma_init_write;
155 esp->dma_ints_off = &dma_ints_off;
156 esp->dma_ints_on = &dma_ints_on;
157 esp->dma_irq_p = &dma_irq_p;
158 esp->dma_ports_p = &dma_ports_p;
159 esp->dma_setup = &dma_setup;
160
161 /* Optional functions */
162 esp->dma_barrier = 0;
163 esp->dma_drain = 0;
164 esp->dma_invalidate = &dma_invalidate;
165 esp->dma_irq_entry = 0;
166 esp->dma_irq_exit = &dma_irq_exit;
167 esp->dma_led_on = &dma_led_on;
168 esp->dma_led_off = &dma_led_off;
169 esp->dma_poll = 0;
170 esp->dma_reset = 0;
171
172 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
173 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
174 esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
175 esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
176 esp->dma_advance_sg = &dma_advance_sg;
177
178 /* SCSI chip speed */
179 /* Looking at the quartz of the SCSI board... */
180 esp->cfreq = 25000000;
181
182 /* The DMA registers on the CyberStorm are mapped
183 * relative to the device (i.e. in the same Zorro
184 * I/O block).
185 */
186 esp->dregs = (void *)(address + OKTAGON_DMA_ADDR);
187
188 paddress = (long *) esp->dregs;
189
190 /* ESP register base */
191 esp->eregs = eregs;
192
193 /* Set the command buffer */
194 esp->esp_command = (volatile unsigned char*) cmd_buffer;
195
196 /* Yes, the virtual address. See below. */
197 esp->esp_command_dvma = (__u32) cmd_buffer;
198
199 esp->irq = IRQ_AMIGA_PORTS;
200 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
201 "BSC Oktagon SCSI", esp->ehost);
202
203 /* Figure out our scsi ID on the bus */
204 esp->scsi_id = 7;
205
206 /* We don't have a differential SCSI-bus. */
207 esp->diff = 0;
208
209 esp_initialize(esp);
210
211 printk("ESP_Oktagon Driver 1.1"
212#ifdef USE_BOTTOM_HALF
213 " [BOTTOM_HALF]"
214#else
215 " [IRQ]"
216#endif
217 " registered.\n");
218 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use);
219 esps_running = esps_in_use;
220 current_esp = esp;
221 register_reboot_notifier(&oktagon_notifier);
222 return esps_in_use;
223 }
224 }
225 return 0;
226}
227
228
229/*
230 * On certain configurations the SCSI equipment gets confused on reboot,
231 * so we have to reset it then.
232 */
233
234static int
235oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
236{
237 struct NCR_ESP *esp;
238
239 if((code == SYS_DOWN || code == SYS_HALT) && (esp = current_esp))
240 {
241 esp_bootup_reset(esp,esp->eregs);
242 udelay(500); /* Settle time. Maybe unnecessary. */
243 }
244 return NOTIFY_DONE;
245}
246
247
248
249#ifdef USE_BOTTOM_HALF
250
251
252/*
253 * The bsc Oktagon controller has no real DMA, so we have to do the 'DMA
254 * transfer' in the interrupt (Yikes!) or use a bottom half to not to clutter
255 * IRQ's for longer-than-good.
256 *
257 * FIXME
258 * BIG PROBLEM: 'len' is usually the buffer length, not the expected length
259 * of the data. So DMA may finish prematurely, further reads lead to
260 * 'machine check' on APUS systems (don't know about m68k systems, AmigaOS
261 * deliberately ignores the bus faults) and a normal copy-loop can't
262 * be exited prematurely just at the right moment by the dma_invalidate IRQ.
263 * So do it the hard way, write an own copier in assembler and
264 * catch the exception.
265 * -- Carsten
266 */
267
268
269static void dma_commit(struct work_struct *unused)
270{
271 long wait,len2,pos;
272 struct NCR_ESP *esp;
273
274 ESPDATA(("Transfer: %ld bytes, Address 0x%08lX, Direction: %d\n",
275 len,(long) address,direction));
276 dma_ints_off(current_esp);
277
278 pos = 0;
279 wait = 1;
280 if(direction) /* write? (memory to device) */
281 {
282 while(len > 0)
283 {
284 len2 = oktag_to_io(paddress, address+pos, len);
285 if(!len2)
286 {
287 if(wait > 1000)
288 {
289 printk("Expedited DMA exit (writing) %ld\n",len);
290 break;
291 }
292 mdelay(wait);
293 wait *= 2;
294 }
295 pos += len2;
296 len -= len2*sizeof(long);
297 }
298 } else {
299 while(len > 0)
300 {
301 len2 = oktag_from_io(address+pos, paddress, len);
302 if(!len2)
303 {
304 if(wait > 1000)
305 {
306 printk("Expedited DMA exit (reading) %ld\n",len);
307 break;
308 }
309 mdelay(wait);
310 wait *= 2;
311 }
312 pos += len2;
313 len -= len2*sizeof(long);
314 }
315 }
316
317 /* to make esp->shift work */
318 esp=current_esp;
319
320#if 0
321 len2 = (esp_read(current_esp->eregs->esp_tclow) & 0xff) |
322 ((esp_read(current_esp->eregs->esp_tcmed) & 0xff) << 8);
323
324 /*
325 * Uh uh. If you see this, len and transfer count registers were out of
326 * sync. That means really serious trouble.
327 */
328
329 if(len2)
330 printk("Eeeek!! Transfer count still %ld!\n",len2);
331#endif
332
333 /*
334 * Normally we just need to exit and wait for the interrupt to come.
335 * But at least one device (my Microtek ScanMaker 630) regularly mis-
336 * calculates the bytes it should send which is really ugly because
337 * it locks up the SCSI bus if not accounted for.
338 */
339
340 if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
341 {
342 long len = 100;
343 long trash[10];
344
345 /*
346 * Interrupt bit was not set. Either the device is just plain lazy
347 * so we give it a 10 ms chance or...
348 */
349 while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
350 udelay(100);
351
352
353 if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
354 {
355 /*
356 * So we think that the transfer count is out of sync. Since we
357 * have all we want we are happy and can ditch the trash.
358 */
359
360 len = DMA_MAXTRANSFER;
361
362 while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
363 oktag_from_io(trash,paddress,2);
364
365 if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
366 {
367 /*
368 * Things really have gone wrong. If we leave the system in that
369 * state, the SCSI bus is locked forever. I hope that this will
370 * turn the system in a more or less running state.
371 */
372 printk("Device is bolixed, trying bus reset...\n");
373 esp_bootup_reset(current_esp,current_esp->eregs);
374 }
375 }
376 }
377
378 ESPDATA(("Transfer_finale: do_data_finale should come\n"));
379
380 len = 0;
381 dma_on = 0;
382 dma_ints_on(current_esp);
383}
384
385#endif
386
387/************************************************************* DMA Functions */
388static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
389{
390 /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
391 * the number of bytes sent (to the ESP chip) equals the number
392 * of bytes in the FIFO - there is no buffering in the DMA controller.
393 * XXXX Do I read this right? It is from host to ESP, right?
394 */
395 return fifo_count;
396}
397
398static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
399{
400 unsigned long sz = sp->SCp.this_residual;
401 if(sz > DMA_MAXTRANSFER)
402 sz = DMA_MAXTRANSFER;
403 return sz;
404}
405
406static void dma_dump_state(struct NCR_ESP *esp)
407{
408}
409
410/*
411 * What the f$@& is this?
412 *
413 * Some SCSI devices (like my Microtek ScanMaker 630 scanner) want to transfer
414 * more data than requested. How much? Dunno. So ditch the bogus data into
415 * the sink, hoping the device will advance to the next phase sooner or later.
416 *
417 * -- Carsten
418 */
419
420static long oktag_eva_buffer[16]; /* The data sink */
421
422static void oktag_check_dma(void)
423{
424 struct NCR_ESP *esp;
425
426 esp=current_esp;
427 if(!len)
428 {
429 address = oktag_eva_buffer;
430 len = 2;
431 /* esp_do_data sets them to zero like len */
432 esp_write(current_esp->eregs->esp_tclow,2);
433 esp_write(current_esp->eregs->esp_tcmed,0);
434 }
435}
436
437static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
438{
439 /* Zorro is noncached, everything else done using processor. */
440 /* cache_clear(addr, length); */
441
442 if(dma_on)
443 panic("dma_init_read while dma process is initialized/running!\n");
444 direction = 0;
445 address = (long *) vaddress;
446 current_esp = esp;
447 len = length;
448 oktag_check_dma();
449 dma_on = 1;
450}
451
452static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
453{
454 /* cache_push(addr, length); */
455
456 if(dma_on)
457 panic("dma_init_write while dma process is initialized/running!\n");
458 direction = 1;
459 address = (long *) vaddress;
460 current_esp = esp;
461 len = length;
462 oktag_check_dma();
463 dma_on = 1;
464}
465
466static void dma_ints_off(struct NCR_ESP *esp)
467{
468 disable_irq(esp->irq);
469}
470
471static void dma_ints_on(struct NCR_ESP *esp)
472{
473 enable_irq(esp->irq);
474}
475
476static int dma_irq_p(struct NCR_ESP *esp)
477{
478 /* It's important to check the DMA IRQ bit in the correct way! */
479 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
480}
481
482static void dma_led_off(struct NCR_ESP *esp)
483{
484}
485
486static void dma_led_on(struct NCR_ESP *esp)
487{
488}
489
490static int dma_ports_p(struct NCR_ESP *esp)
491{
492 return ((amiga_custom.intenar) & IF_PORTS);
493}
494
495static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
496{
497 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
498 * so when (write) is true, it actually means READ!
499 */
500 if(write){
501 dma_init_read(esp, addr, count);
502 } else {
503 dma_init_write(esp, addr, count);
504 }
505}
506
507/*
508 * IRQ entry when DMA transfer is ready to be started
509 */
510
511static void dma_irq_exit(struct NCR_ESP *esp)
512{
513#ifdef USE_BOTTOM_HALF
514 if(dma_on)
515 {
516 schedule_work(&tq_fake_dma);
517 }
518#else
519 while(len && !dma_irq_p(esp))
520 {
521 if(direction)
522 *paddress = *address++;
523 else
524 *address++ = *paddress;
525 len -= (sizeof(long));
526 }
527 len = 0;
528 dma_on = 0;
529#endif
530}
531
532/*
533 * IRQ entry when DMA has just finished
534 */
535
536static void dma_invalidate(struct NCR_ESP *esp)
537{
538}
539
540/*
541 * Since the processor does the data transfer we have to use the custom
542 * mmu interface to pass the virtual address, not the physical.
543 */
544
545void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
546{
547 sp->SCp.ptr =
548 sp->request_buffer;
549}
550
551void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
552{
553 sp->SCp.ptr = sg_virt(sp->SCp.buffer);
554}
555
556void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
557{
558}
559
560void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
561{
562}
563
564void dma_advance_sg(Scsi_Cmnd *sp)
565{
566 sp->SCp.ptr = sg_virt(sp->SCp.buffer);
567}
568
569
570#define HOSTS_C
571
572int oktagon_esp_release(struct Scsi_Host *instance)
573{
574#ifdef MODULE
575 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
576 esp_release();
577 release_mem_region(address, sizeof(struct ESP_regs));
578 free_irq(IRQ_AMIGA_PORTS, esp_intr);
579 unregister_reboot_notifier(&oktagon_notifier);
580#endif
581 return 1;
582}
583
584
585static struct scsi_host_template driver_template = {
586 .proc_name = "esp-oktagon",
587 .proc_info = &esp_proc_info,
588 .name = "BSC Oktagon SCSI",
589 .detect = oktagon_esp_detect,
590 .slave_alloc = esp_slave_alloc,
591 .slave_destroy = esp_slave_destroy,
592 .release = oktagon_esp_release,
593 .queuecommand = esp_queue,
594 .eh_abort_handler = esp_abort,
595 .eh_bus_reset_handler = esp_reset,
596 .can_queue = 7,
597 .this_id = 7,
598 .sg_tablesize = SG_ALL,
599 .cmd_per_lun = 1,
600 .use_clustering = ENABLE_CLUSTERING
601};
602
603
604#include "scsi_module.c"
605
606MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/oktagon_io.S b/drivers/scsi/oktagon_io.S
deleted file mode 100644
index 8a7340b02707..000000000000
--- a/drivers/scsi/oktagon_io.S
+++ /dev/null
@@ -1,194 +0,0 @@
1/* -*- mode: asm -*-
2 * Due to problems while transferring data I've put these routines as assembly
3 * code.
4 * Since I'm no PPC assembler guru, the code is just the assembler version of
5
6int oktag_to_io(long *paddr,long *addr,long len)
7{
8 long *addr2 = addr;
9 for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
10 *paddr = *addr2++;
11 return addr2 - addr;
12}
13
14int oktag_from_io(long *addr,long *paddr,long len)
15{
16 long *addr2 = addr;
17 for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
18 *addr2++ = *paddr;
19 return addr2 - addr;
20}
21
22 * assembled using gcc -O2 -S, with two exception catch points where data
23 * is moved to/from the IO register.
24 */
25
26
27#ifdef CONFIG_APUS
28
29 .file "oktagon_io.c"
30
31gcc2_compiled.:
32/*
33 .section ".text"
34*/
35 .align 2
36 .globl oktag_to_io
37 .type oktag_to_io,@function
38oktag_to_io:
39 addi 5,5,3
40 srwi 5,5,2
41 cmpwi 1,5,0
42 mr 9,3
43 mr 3,4
44 addi 5,5,-1
45 bc 12,6,.L3
46.L5:
47 cmpwi 1,5,0
48 lwz 0,0(3)
49 addi 3,3,4
50 addi 5,5,-1
51exp1: stw 0,0(9)
52 bc 4,6,.L5
53.L3:
54ret1: subf 3,4,3
55 srawi 3,3,2
56 blr
57.Lfe1:
58 .size oktag_to_io,.Lfe1-oktag_to_io
59 .align 2
60 .globl oktag_from_io
61 .type oktag_from_io,@function
62oktag_from_io:
63 addi 5,5,3
64 srwi 5,5,2
65 cmpwi 1,5,0
66 mr 9,3
67 addi 5,5,-1
68 bc 12,6,.L9
69.L11:
70 cmpwi 1,5,0
71exp2: lwz 0,0(4)
72 addi 5,5,-1
73 stw 0,0(3)
74 addi 3,3,4
75 bc 4,6,.L11
76.L9:
77ret2: subf 3,9,3
78 srawi 3,3,2
79 blr
80.Lfe2:
81 .size oktag_from_io,.Lfe2-oktag_from_io
82 .ident "GCC: (GNU) egcs-2.90.29 980515 (egcs-1.0.3 release)"
83
84/*
85 * Exception table.
86 * Second longword shows where to jump when an exception at the addr the first
87 * longword is pointing to is caught.
88 */
89
90.section __ex_table,"a"
91 .align 2
92oktagon_except:
93 .long exp1,ret1
94 .long exp2,ret2
95
96#else
97
98/*
99The code which follows is for 680x0 based assembler and is meant for
100Linux/m68k. It was created by cross compiling the code using the
101instructions given above. I then added the four labels used in the
102exception handler table at the bottom of this file.
103- Kevin <kcozens@interlog.com>
104*/
105
106#ifdef CONFIG_AMIGA
107
108 .file "oktagon_io.c"
109 .version "01.01"
110gcc2_compiled.:
111.text
112 .align 2
113.globl oktag_to_io
114 .type oktag_to_io,@function
115oktag_to_io:
116 link.w %a6,#0
117 move.l %d2,-(%sp)
118 move.l 8(%a6),%a1
119 move.l 12(%a6),%d1
120 move.l %d1,%a0
121 move.l 16(%a6),%d0
122 addq.l #3,%d0
123 lsr.l #2,%d0
124 subq.l #1,%d0
125 moveq.l #-1,%d2
126 cmp.l %d0,%d2
127 jbeq .L3
128.L5:
129exp1:
130 move.l (%a0)+,(%a1)
131 dbra %d0,.L5
132 clr.w %d0
133 subq.l #1,%d0
134 jbcc .L5
135.L3:
136ret1:
137 move.l %a0,%d0
138 sub.l %d1,%d0
139 asr.l #2,%d0
140 move.l -4(%a6),%d2
141 unlk %a6
142 rts
143
144.Lfe1:
145 .size oktag_to_io,.Lfe1-oktag_to_io
146 .align 2
147.globl oktag_from_io
148 .type oktag_from_io,@function
149oktag_from_io:
150 link.w %a6,#0
151 move.l %d2,-(%sp)
152 move.l 8(%a6),%d1
153 move.l 12(%a6),%a1
154 move.l %d1,%a0
155 move.l 16(%a6),%d0
156 addq.l #3,%d0
157 lsr.l #2,%d0
158 subq.l #1,%d0
159 moveq.l #-1,%d2
160 cmp.l %d0,%d2
161 jbeq .L9
162.L11:
163exp2:
164 move.l (%a1),(%a0)+
165 dbra %d0,.L11
166 clr.w %d0
167 subq.l #1,%d0
168 jbcc .L11
169.L9:
170ret2:
171 move.l %a0,%d0
172 sub.l %d1,%d0
173 asr.l #2,%d0
174 move.l -4(%a6),%d2
175 unlk %a6
176 rts
177.Lfe2:
178 .size oktag_from_io,.Lfe2-oktag_from_io
179 .ident "GCC: (GNU) 2.7.2.1"
180
181/*
182 * Exception table.
183 * Second longword shows where to jump when an exception at the addr the first
184 * longword is pointing to is caught.
185 */
186
187.section __ex_table,"a"
188 .align 2
189oktagon_except:
190 .long exp1,ret1
191 .long exp2,ret2
192
193#endif
194#endif
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 17b4a7c4618c..0cd614a0fa73 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -35,7 +35,7 @@
35 35
36#define BOUNCE_SIZE (64*1024) 36#define BOUNCE_SIZE (64*1024)
37 37
38#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE) 38#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9)
39 39
40 40
41struct ps3rom_private { 41struct ps3rom_private {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index adf97320574b..4894dc886b62 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -428,6 +428,19 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
428 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2) 428 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
429 return 0; 429 return 0;
430 430
431 if (ha->sfp_data)
432 goto do_read;
433
434 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
435 &ha->sfp_data_dma);
436 if (!ha->sfp_data) {
437 qla_printk(KERN_WARNING, ha,
438 "Unable to allocate memory for SFP read-data.\n");
439 return 0;
440 }
441
442do_read:
443 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
431 addr = 0xa0; 444 addr = 0xa0;
432 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; 445 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
433 iter++, offset += SFP_BLOCK_SIZE) { 446 iter++, offset += SFP_BLOCK_SIZE) {
@@ -835,7 +848,7 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
835static void 848static void
836qla2x00_get_host_speed(struct Scsi_Host *shost) 849qla2x00_get_host_speed(struct Scsi_Host *shost)
837{ 850{
838 scsi_qla_host_t *ha = shost_priv(shost); 851 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
839 uint32_t speed = 0; 852 uint32_t speed = 0;
840 853
841 switch (ha->link_data_rate) { 854 switch (ha->link_data_rate) {
@@ -848,6 +861,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
848 case PORT_SPEED_4GB: 861 case PORT_SPEED_4GB:
849 speed = 4; 862 speed = 4;
850 break; 863 break;
864 case PORT_SPEED_8GB:
865 speed = 8;
866 break;
851 } 867 }
852 fc_host_speed(shost) = speed; 868 fc_host_speed(shost) = speed;
853} 869}
@@ -855,7 +871,7 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
855static void 871static void
856qla2x00_get_host_port_type(struct Scsi_Host *shost) 872qla2x00_get_host_port_type(struct Scsi_Host *shost)
857{ 873{
858 scsi_qla_host_t *ha = shost_priv(shost); 874 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
859 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 875 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
860 876
861 switch (ha->current_topology) { 877 switch (ha->current_topology) {
@@ -965,7 +981,7 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
965static struct fc_host_statistics * 981static struct fc_host_statistics *
966qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 982qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
967{ 983{
968 scsi_qla_host_t *ha = shost_priv(shost); 984 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
969 int rval; 985 int rval;
970 struct link_statistics *stats; 986 struct link_statistics *stats;
971 dma_addr_t stats_dma; 987 dma_addr_t stats_dma;
@@ -1049,7 +1065,7 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1049static void 1065static void
1050qla2x00_get_host_port_state(struct Scsi_Host *shost) 1066qla2x00_get_host_port_state(struct Scsi_Host *shost)
1051{ 1067{
1052 scsi_qla_host_t *ha = shost_priv(shost); 1068 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
1053 1069
1054 if (!ha->flags.online) 1070 if (!ha->flags.online)
1055 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1071 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b72c7f170854..3750319f4968 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2041,8 +2041,6 @@ typedef struct vport_params {
2041#define VP_RET_CODE_NO_MEM 5 2041#define VP_RET_CODE_NO_MEM 5
2042#define VP_RET_CODE_NOT_FOUND 6 2042#define VP_RET_CODE_NOT_FOUND 6
2043 2043
2044#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
2045
2046/* 2044/*
2047 * ISP operations 2045 * ISP operations
2048 */ 2046 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ba35fc26ce6b..193f688ec3d7 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern int ql2xqfullrampup;
66extern int num_hosts; 66extern int num_hosts;
67 67
68extern int qla2x00_loop_reset(scsi_qla_host_t *); 68extern int qla2x00_loop_reset(scsi_qla_host_t *);
69extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
69 70
70/* 71/*
71 * Global Functions in qla_mid.c source file. 72 * Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d0633ca894be..d5c7853e7eba 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -925,6 +925,16 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
925{ 925{
926 int rval; 926 int rval;
927 uint32_t srisc_address = 0; 927 uint32_t srisc_address = 0;
928 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
929 unsigned long flags;
930
931 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
932 /* Disable SRAM, Instruction RAM and GP RAM parity. */
933 spin_lock_irqsave(&ha->hardware_lock, flags);
934 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
935 RD_REG_WORD(&reg->hccr);
936 spin_unlock_irqrestore(&ha->hardware_lock, flags);
937 }
928 938
929 /* Load firmware sequences */ 939 /* Load firmware sequences */
930 rval = ha->isp_ops->load_risc(ha, &srisc_address); 940 rval = ha->isp_ops->load_risc(ha, &srisc_address);
@@ -968,6 +978,19 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
968 } 978 }
969 } 979 }
970 980
981 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
982 /* Enable proper parity. */
983 spin_lock_irqsave(&ha->hardware_lock, flags);
984 if (IS_QLA2300(ha))
985 /* SRAM parity */
986 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
987 else
988 /* SRAM, Instruction RAM and GP RAM parity */
989 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
990 RD_REG_WORD(&reg->hccr);
991 spin_unlock_irqrestore(&ha->hardware_lock, flags);
992 }
993
971 if (rval) { 994 if (rval) {
972 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 995 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
973 ha->host_no)); 996 ha->host_no));
@@ -3213,9 +3236,6 @@ int
3213qla2x00_abort_isp(scsi_qla_host_t *ha) 3236qla2x00_abort_isp(scsi_qla_host_t *ha)
3214{ 3237{
3215 int rval; 3238 int rval;
3216 unsigned long flags = 0;
3217 uint16_t cnt;
3218 srb_t *sp;
3219 uint8_t status = 0; 3239 uint8_t status = 0;
3220 3240
3221 if (ha->flags.online) { 3241 if (ha->flags.online) {
@@ -3236,19 +3256,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3236 LOOP_DOWN_TIME); 3256 LOOP_DOWN_TIME);
3237 } 3257 }
3238 3258
3239 spin_lock_irqsave(&ha->hardware_lock, flags);
3240 /* Requeue all commands in outstanding command list. */ 3259 /* Requeue all commands in outstanding command list. */
3241 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3260 qla2x00_abort_all_cmds(ha, DID_RESET << 16);
3242 sp = ha->outstanding_cmds[cnt];
3243 if (sp) {
3244 ha->outstanding_cmds[cnt] = NULL;
3245 sp->flags = 0;
3246 sp->cmd->result = DID_RESET << 16;
3247 sp->cmd->host_scribble = (unsigned char *)NULL;
3248 qla2x00_sp_compl(ha, sp);
3249 }
3250 }
3251 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3252 3261
3253 ha->isp_ops->get_flash_version(ha, ha->request_ring); 3262 ha->isp_ops->get_flash_version(ha, ha->request_ring);
3254 3263
@@ -3273,6 +3282,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3273 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3282 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3274 3283
3275 if (ha->eft) { 3284 if (ha->eft) {
3285 memset(ha->eft, 0, EFT_SIZE);
3276 rval = qla2x00_enable_eft_trace(ha, 3286 rval = qla2x00_enable_eft_trace(ha,
3277 ha->eft_dma, EFT_NUM_BUFFERS); 3287 ha->eft_dma, EFT_NUM_BUFFERS);
3278 if (rval) { 3288 if (rval) {
@@ -3357,60 +3367,15 @@ static int
3357qla2x00_restart_isp(scsi_qla_host_t *ha) 3367qla2x00_restart_isp(scsi_qla_host_t *ha)
3358{ 3368{
3359 uint8_t status = 0; 3369 uint8_t status = 0;
3360 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3361 unsigned long flags = 0;
3362 uint32_t wait_time; 3370 uint32_t wait_time;
3363 3371
3364 /* If firmware needs to be loaded */ 3372 /* If firmware needs to be loaded */
3365 if (qla2x00_isp_firmware(ha)) { 3373 if (qla2x00_isp_firmware(ha)) {
3366 ha->flags.online = 0; 3374 ha->flags.online = 0;
3367 if (!(status = ha->isp_ops->chip_diag(ha))) { 3375 if (!(status = ha->isp_ops->chip_diag(ha)))
3368 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3369 status = qla2x00_setup_chip(ha);
3370 goto done;
3371 }
3372
3373 spin_lock_irqsave(&ha->hardware_lock, flags);
3374
3375 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
3376 !IS_QLA25XX(ha)) {
3377 /*
3378 * Disable SRAM, Instruction RAM and GP RAM
3379 * parity.
3380 */
3381 WRT_REG_WORD(&reg->hccr,
3382 (HCCR_ENABLE_PARITY + 0x0));
3383 RD_REG_WORD(&reg->hccr);
3384 }
3385
3386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3387
3388 status = qla2x00_setup_chip(ha); 3376 status = qla2x00_setup_chip(ha);
3389
3390 spin_lock_irqsave(&ha->hardware_lock, flags);
3391
3392 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
3393 !IS_QLA25XX(ha)) {
3394 /* Enable proper parity */
3395 if (IS_QLA2300(ha))
3396 /* SRAM parity */
3397 WRT_REG_WORD(&reg->hccr,
3398 (HCCR_ENABLE_PARITY + 0x1));
3399 else
3400 /*
3401 * SRAM, Instruction RAM and GP RAM
3402 * parity.
3403 */
3404 WRT_REG_WORD(&reg->hccr,
3405 (HCCR_ENABLE_PARITY + 0x7));
3406 RD_REG_WORD(&reg->hccr);
3407 }
3408
3409 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3410 }
3411 } 3377 }
3412 3378
3413 done:
3414 if (!status && !(status = qla2x00_init_rings(ha))) { 3379 if (!status && !(status = qla2x00_init_rings(ha))) {
3415 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3380 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
3416 if (!(status = qla2x00_fw_ready(ha))) { 3381 if (!(status = qla2x00_fw_ready(ha))) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 8e3b04464cff..5d1a3f7c408f 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -119,6 +119,13 @@ static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha)
119 qla2x00_get_firmware_state(ha, &fw_state); 119 qla2x00_get_firmware_state(ha, &fw_state);
120} 120}
121 121
122static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *);
123static __inline__ scsi_qla_host_t *
124to_qla_parent(scsi_qla_host_t *ha)
125{
126 return ha->parent ? ha->parent : ha;
127}
128
122/** 129/**
123 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary. 130 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
124 * @ha: HA context 131 * @ha: HA context
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 642a0c3f09c6..14e6f22944b7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1815,6 +1815,8 @@ int
1815qla2x00_request_irqs(scsi_qla_host_t *ha) 1815qla2x00_request_irqs(scsi_qla_host_t *ha)
1816{ 1816{
1817 int ret; 1817 int ret;
1818 device_reg_t __iomem *reg = ha->iobase;
1819 unsigned long flags;
1818 1820
1819 /* If possible, enable MSI-X. */ 1821 /* If possible, enable MSI-X. */
1820 if (!IS_QLA2432(ha) && !IS_QLA2532(ha)) 1822 if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
@@ -1846,7 +1848,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1846 DEBUG2(qla_printk(KERN_INFO, ha, 1848 DEBUG2(qla_printk(KERN_INFO, ha,
1847 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1849 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1848 ha->fw_attributes)); 1850 ha->fw_attributes));
1849 return ret; 1851 goto clear_risc_ints;
1850 } 1852 }
1851 qla_printk(KERN_WARNING, ha, 1853 qla_printk(KERN_WARNING, ha,
1852 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 1854 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
@@ -1864,15 +1866,30 @@ skip_msi:
1864 1866
1865 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1867 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1866 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1868 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1867 if (!ret) { 1869 if (ret) {
1868 ha->flags.inta_enabled = 1;
1869 ha->host->irq = ha->pdev->irq;
1870 } else {
1871 qla_printk(KERN_WARNING, ha, 1870 qla_printk(KERN_WARNING, ha,
1872 "Failed to reserve interrupt %d already in use.\n", 1871 "Failed to reserve interrupt %d already in use.\n",
1873 ha->pdev->irq); 1872 ha->pdev->irq);
1873 goto fail;
1874 }
1875 ha->flags.inta_enabled = 1;
1876 ha->host->irq = ha->pdev->irq;
1877clear_risc_ints:
1878
1879 ha->isp_ops->disable_intrs(ha);
1880 spin_lock_irqsave(&ha->hardware_lock, flags);
1881 if (IS_FWI2_CAPABLE(ha)) {
1882 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1883 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1884 } else {
1885 WRT_REG_WORD(&reg->isp.semaphore, 0);
1886 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1887 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1874 } 1888 }
1889 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1890 ha->isp_ops->enable_intrs(ha);
1875 1891
1892fail:
1876 return ret; 1893 return ret;
1877} 1894}
1878 1895
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 0c10c0b0fb73..99d29fff836d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -980,7 +980,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
980 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 980 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
981 ha->host_no)); 981 ha->host_no));
982 982
983 if (ha->fw_attributes & BIT_2) 983 if (ha->flags.npiv_supported)
984 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 984 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
985 else 985 else
986 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 986 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8f69caf83272..3c1b43356adb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -204,10 +204,8 @@ static int qla2x00_do_dpc(void *data);
204 204
205static void qla2x00_rst_aen(scsi_qla_host_t *); 205static void qla2x00_rst_aen(scsi_qla_host_t *);
206 206
207static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *); 207static int qla2x00_mem_alloc(scsi_qla_host_t *);
208static void qla2x00_mem_free(scsi_qla_host_t *ha); 208static void qla2x00_mem_free(scsi_qla_host_t *ha);
209static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
210static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
211static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 209static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
212 210
213/* -------------------------------------------------------------------------- */ 211/* -------------------------------------------------------------------------- */
@@ -1117,6 +1115,27 @@ qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
1117 return ha->isp_ops->abort_target(reset_fcport); 1115 return ha->isp_ops->abort_target(reset_fcport);
1118} 1116}
1119 1117
1118void
1119qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
1120{
1121 int cnt;
1122 unsigned long flags;
1123 srb_t *sp;
1124
1125 spin_lock_irqsave(&ha->hardware_lock, flags);
1126 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1127 sp = ha->outstanding_cmds[cnt];
1128 if (sp) {
1129 ha->outstanding_cmds[cnt] = NULL;
1130 sp->flags = 0;
1131 sp->cmd->result = res;
1132 sp->cmd->host_scribble = (unsigned char *)NULL;
1133 qla2x00_sp_compl(ha, sp);
1134 }
1135 }
1136 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1137}
1138
1120static int 1139static int
1121qla2xxx_slave_alloc(struct scsi_device *sdev) 1140qla2xxx_slave_alloc(struct scsi_device *sdev)
1122{ 1141{
@@ -1557,10 +1576,8 @@ static int __devinit
1557qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 1576qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1558{ 1577{
1559 int ret = -ENODEV; 1578 int ret = -ENODEV;
1560 device_reg_t __iomem *reg;
1561 struct Scsi_Host *host; 1579 struct Scsi_Host *host;
1562 scsi_qla_host_t *ha; 1580 scsi_qla_host_t *ha;
1563 unsigned long flags = 0;
1564 char pci_info[30]; 1581 char pci_info[30];
1565 char fw_str[30]; 1582 char fw_str[30];
1566 struct scsi_host_template *sht; 1583 struct scsi_host_template *sht;
@@ -1608,6 +1625,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1608 ha->parent = NULL; 1625 ha->parent = NULL;
1609 ha->bars = bars; 1626 ha->bars = bars;
1610 ha->mem_only = mem_only; 1627 ha->mem_only = mem_only;
1628 spin_lock_init(&ha->hardware_lock);
1611 1629
1612 /* Set ISP-type information. */ 1630 /* Set ISP-type information. */
1613 qla2x00_set_isp_flags(ha); 1631 qla2x00_set_isp_flags(ha);
@@ -1621,8 +1639,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1621 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1639 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
1622 ha->iobase); 1640 ha->iobase);
1623 1641
1624 spin_lock_init(&ha->hardware_lock);
1625
1626 ha->prev_topology = 0; 1642 ha->prev_topology = 0;
1627 ha->init_cb_size = sizeof(init_cb_t); 1643 ha->init_cb_size = sizeof(init_cb_t);
1628 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx; 1644 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
@@ -1751,34 +1767,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1751 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1767 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1752 ha->host_no, ha)); 1768 ha->host_no, ha));
1753 1769
1754 ha->isp_ops->disable_intrs(ha);
1755
1756 spin_lock_irqsave(&ha->hardware_lock, flags);
1757 reg = ha->iobase;
1758 if (IS_FWI2_CAPABLE(ha)) {
1759 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1760 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1761 } else {
1762 WRT_REG_WORD(&reg->isp.semaphore, 0);
1763 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1764 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1765
1766 /* Enable proper parity */
1767 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1768 if (IS_QLA2300(ha))
1769 /* SRAM parity */
1770 WRT_REG_WORD(&reg->isp.hccr,
1771 (HCCR_ENABLE_PARITY + 0x1));
1772 else
1773 /* SRAM, Instruction RAM and GP RAM parity */
1774 WRT_REG_WORD(&reg->isp.hccr,
1775 (HCCR_ENABLE_PARITY + 0x7));
1776 }
1777 }
1778 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1779
1780 ha->isp_ops->enable_intrs(ha);
1781
1782 pci_set_drvdata(pdev, ha); 1770 pci_set_drvdata(pdev, ha);
1783 1771
1784 ha->flags.init_done = 1; 1772 ha->flags.init_done = 1;
@@ -1848,10 +1836,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
1848static void 1836static void
1849qla2x00_free_device(scsi_qla_host_t *ha) 1837qla2x00_free_device(scsi_qla_host_t *ha)
1850{ 1838{
1839 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16);
1840
1851 /* Disable timer */ 1841 /* Disable timer */
1852 if (ha->timer_active) 1842 if (ha->timer_active)
1853 qla2x00_stop_timer(ha); 1843 qla2x00_stop_timer(ha);
1854 1844
1845 ha->flags.online = 0;
1846
1855 /* Kill the kernel thread for this host */ 1847 /* Kill the kernel thread for this host */
1856 if (ha->dpc_thread) { 1848 if (ha->dpc_thread) {
1857 struct task_struct *t = ha->dpc_thread; 1849 struct task_struct *t = ha->dpc_thread;
@@ -1870,8 +1862,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1870 if (ha->eft) 1862 if (ha->eft)
1871 qla2x00_disable_eft_trace(ha); 1863 qla2x00_disable_eft_trace(ha);
1872 1864
1873 ha->flags.online = 0;
1874
1875 /* Stop currently executing firmware. */ 1865 /* Stop currently executing firmware. */
1876 qla2x00_try_to_stop_firmware(ha); 1866 qla2x00_try_to_stop_firmware(ha);
1877 1867
@@ -2010,196 +2000,109 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
2010* 2000*
2011* Returns: 2001* Returns:
2012* 0 = success. 2002* 0 = success.
2013* 1 = failure. 2003* !0 = failure.
2014*/ 2004*/
2015static uint8_t 2005static int
2016qla2x00_mem_alloc(scsi_qla_host_t *ha) 2006qla2x00_mem_alloc(scsi_qla_host_t *ha)
2017{ 2007{
2018 char name[16]; 2008 char name[16];
2019 uint8_t status = 1;
2020 int retry= 10;
2021
2022 do {
2023 /*
2024 * This will loop only once if everything goes well, else some
2025 * number of retries will be performed to get around a kernel
2026 * bug where available mem is not allocated until after a
2027 * little delay and a retry.
2028 */
2029 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
2030 (ha->request_q_length + 1) * sizeof(request_t),
2031 &ha->request_dma, GFP_KERNEL);
2032 if (ha->request_ring == NULL) {
2033 qla_printk(KERN_WARNING, ha,
2034 "Memory Allocation failed - request_ring\n");
2035
2036 qla2x00_mem_free(ha);
2037 msleep(100);
2038
2039 continue;
2040 }
2041
2042 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
2043 (ha->response_q_length + 1) * sizeof(response_t),
2044 &ha->response_dma, GFP_KERNEL);
2045 if (ha->response_ring == NULL) {
2046 qla_printk(KERN_WARNING, ha,
2047 "Memory Allocation failed - response_ring\n");
2048
2049 qla2x00_mem_free(ha);
2050 msleep(100);
2051
2052 continue;
2053 }
2054
2055 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2056 &ha->gid_list_dma, GFP_KERNEL);
2057 if (ha->gid_list == NULL) {
2058 qla_printk(KERN_WARNING, ha,
2059 "Memory Allocation failed - gid_list\n");
2060
2061 qla2x00_mem_free(ha);
2062 msleep(100);
2063
2064 continue;
2065 }
2066
2067 /* get consistent memory allocated for init control block */
2068 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev,
2069 ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL);
2070 if (ha->init_cb == NULL) {
2071 qla_printk(KERN_WARNING, ha,
2072 "Memory Allocation failed - init_cb\n");
2073
2074 qla2x00_mem_free(ha);
2075 msleep(100);
2076
2077 continue;
2078 }
2079 memset(ha->init_cb, 0, ha->init_cb_size);
2080
2081 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
2082 ha->host_no);
2083 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2084 DMA_POOL_SIZE, 8, 0);
2085 if (ha->s_dma_pool == NULL) {
2086 qla_printk(KERN_WARNING, ha,
2087 "Memory Allocation failed - s_dma_pool\n");
2088
2089 qla2x00_mem_free(ha);
2090 msleep(100);
2091
2092 continue;
2093 }
2094
2095 if (qla2x00_allocate_sp_pool(ha)) {
2096 qla_printk(KERN_WARNING, ha,
2097 "Memory Allocation failed - "
2098 "qla2x00_allocate_sp_pool()\n");
2099
2100 qla2x00_mem_free(ha);
2101 msleep(100);
2102
2103 continue;
2104 }
2105
2106 /* Allocate memory for SNS commands */
2107 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2108 /* Get consistent memory allocated for SNS commands */
2109 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2110 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma,
2111 GFP_KERNEL);
2112 if (ha->sns_cmd == NULL) {
2113 /* error */
2114 qla_printk(KERN_WARNING, ha,
2115 "Memory Allocation failed - sns_cmd\n");
2116
2117 qla2x00_mem_free(ha);
2118 msleep(100);
2119
2120 continue;
2121 }
2122 memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt));
2123 } else {
2124 /* Get consistent memory allocated for MS IOCB */
2125 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2126 &ha->ms_iocb_dma);
2127 if (ha->ms_iocb == NULL) {
2128 /* error */
2129 qla_printk(KERN_WARNING, ha,
2130 "Memory Allocation failed - ms_iocb\n");
2131
2132 qla2x00_mem_free(ha);
2133 msleep(100);
2134
2135 continue;
2136 }
2137 memset(ha->ms_iocb, 0, sizeof(ms_iocb_entry_t));
2138
2139 /*
2140 * Get consistent memory allocated for CT SNS
2141 * commands
2142 */
2143 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2144 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma,
2145 GFP_KERNEL);
2146 if (ha->ct_sns == NULL) {
2147 /* error */
2148 qla_printk(KERN_WARNING, ha,
2149 "Memory Allocation failed - ct_sns\n");
2150 2009
2151 qla2x00_mem_free(ha); 2010 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
2152 msleep(100); 2011 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma,
2012 GFP_KERNEL);
2013 if (!ha->request_ring)
2014 goto fail;
2015
2016 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
2017 (ha->response_q_length + 1) * sizeof(response_t),
2018 &ha->response_dma, GFP_KERNEL);
2019 if (!ha->response_ring)
2020 goto fail_free_request_ring;
2021
2022 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2023 &ha->gid_list_dma, GFP_KERNEL);
2024 if (!ha->gid_list)
2025 goto fail_free_response_ring;
2026
2027 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
2028 &ha->init_cb_dma, GFP_KERNEL);
2029 if (!ha->init_cb)
2030 goto fail_free_gid_list;
2031
2032 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
2033 ha->host_no);
2034 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2035 DMA_POOL_SIZE, 8, 0);
2036 if (!ha->s_dma_pool)
2037 goto fail_free_init_cb;
2153 2038
2154 continue; 2039 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2155 } 2040 if (!ha->srb_mempool)
2156 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 2041 goto fail_free_s_dma_pool;
2157 2042
2158 if (IS_FWI2_CAPABLE(ha)) { 2043 /* Get memory for cached NVRAM */
2159 /* 2044 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2160 * Get consistent memory allocated for SFP 2045 if (!ha->nvram)
2161 * block. 2046 goto fail_free_srb_mempool;
2162 */ 2047
2163 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, 2048 /* Allocate memory for SNS commands */
2164 GFP_KERNEL, &ha->sfp_data_dma); 2049 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2165 if (ha->sfp_data == NULL) { 2050 /* Get consistent memory allocated for SNS commands */
2166 qla_printk(KERN_WARNING, ha, 2051 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2167 "Memory Allocation failed - " 2052 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2168 "sfp_data\n"); 2053 if (!ha->sns_cmd)
2169 2054 goto fail_free_nvram;
2170 qla2x00_mem_free(ha); 2055 } else {
2171 msleep(100); 2056 /* Get consistent memory allocated for MS IOCB */
2172 2057 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2173 continue; 2058 &ha->ms_iocb_dma);
2174 } 2059 if (!ha->ms_iocb)
2175 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); 2060 goto fail_free_nvram;
2176 }
2177 }
2178
2179 /* Get memory for cached NVRAM */
2180 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2181 if (ha->nvram == NULL) {
2182 /* error */
2183 qla_printk(KERN_WARNING, ha,
2184 "Memory Allocation failed - nvram cache\n");
2185
2186 qla2x00_mem_free(ha);
2187 msleep(100);
2188
2189 continue;
2190 }
2191
2192 /* Done all allocations without any error. */
2193 status = 0;
2194
2195 } while (retry-- && status != 0);
2196 2061
2197 if (status) { 2062 /* Get consistent memory allocated for CT SNS commands */
2198 printk(KERN_WARNING 2063 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2199 "%s(): **** FAILED ****\n", __func__); 2064 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2065 if (!ha->ct_sns)
2066 goto fail_free_ms_iocb;
2200 } 2067 }
2201 2068
2202 return(status); 2069 return 0;
2070
2071fail_free_ms_iocb:
2072 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2073 ha->ms_iocb = NULL;
2074 ha->ms_iocb_dma = 0;
2075fail_free_nvram:
2076 kfree(ha->nvram);
2077 ha->nvram = NULL;
2078fail_free_srb_mempool:
2079 mempool_destroy(ha->srb_mempool);
2080 ha->srb_mempool = NULL;
2081fail_free_s_dma_pool:
2082 dma_pool_destroy(ha->s_dma_pool);
2083 ha->s_dma_pool = NULL;
2084fail_free_init_cb:
2085 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2086 ha->init_cb_dma);
2087 ha->init_cb = NULL;
2088 ha->init_cb_dma = 0;
2089fail_free_gid_list:
2090 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2091 ha->gid_list_dma);
2092 ha->gid_list = NULL;
2093 ha->gid_list_dma = 0;
2094fail_free_response_ring:
2095 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) *
2096 sizeof(response_t), ha->response_ring, ha->response_dma);
2097 ha->response_ring = NULL;
2098 ha->response_dma = 0;
2099fail_free_request_ring:
2100 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
2101 sizeof(request_t), ha->request_ring, ha->request_dma);
2102 ha->request_ring = NULL;
2103 ha->request_dma = 0;
2104fail:
2105 return -ENOMEM;
2203} 2106}
2204 2107
2205/* 2108/*
@@ -2215,14 +2118,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2215 struct list_head *fcpl, *fcptemp; 2118 struct list_head *fcpl, *fcptemp;
2216 fc_port_t *fcport; 2119 fc_port_t *fcport;
2217 2120
2218 if (ha == NULL) { 2121 if (ha->srb_mempool)
2219 /* error */ 2122 mempool_destroy(ha->srb_mempool);
2220 DEBUG2(printk("%s(): ERROR invalid ha pointer.\n", __func__));
2221 return;
2222 }
2223
2224 /* free sp pool */
2225 qla2x00_free_sp_pool(ha);
2226 2123
2227 if (ha->fce) 2124 if (ha->fce)
2228 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2125 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -2270,6 +2167,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2270 (ha->request_q_length + 1) * sizeof(request_t), 2167 (ha->request_q_length + 1) * sizeof(request_t),
2271 ha->request_ring, ha->request_dma); 2168 ha->request_ring, ha->request_dma);
2272 2169
2170 ha->srb_mempool = NULL;
2273 ha->eft = NULL; 2171 ha->eft = NULL;
2274 ha->eft_dma = 0; 2172 ha->eft_dma = 0;
2275 ha->sns_cmd = NULL; 2173 ha->sns_cmd = NULL;
@@ -2308,44 +2206,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2308 kfree(ha->nvram); 2206 kfree(ha->nvram);
2309} 2207}
2310 2208
2311/*
2312 * qla2x00_allocate_sp_pool
2313 * This routine is called during initialization to allocate
2314 * memory for local srb_t.
2315 *
2316 * Input:
2317 * ha = adapter block pointer.
2318 *
2319 * Context:
2320 * Kernel context.
2321 */
2322static int
2323qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
2324{
2325 int rval;
2326
2327 rval = QLA_SUCCESS;
2328 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2329 if (ha->srb_mempool == NULL) {
2330 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
2331 rval = QLA_FUNCTION_FAILED;
2332 }
2333 return (rval);
2334}
2335
2336/*
2337 * This routine frees all adapter allocated memory.
2338 *
2339 */
2340static void
2341qla2x00_free_sp_pool( scsi_qla_host_t *ha)
2342{
2343 if (ha->srb_mempool) {
2344 mempool_destroy(ha->srb_mempool);
2345 ha->srb_mempool = NULL;
2346 }
2347}
2348
2349/************************************************************************** 2209/**************************************************************************
2350* qla2x00_do_dpc 2210* qla2x00_do_dpc
2351* This kernel thread is a task that is schedule by the interrupt handler 2211* This kernel thread is a task that is schedule by the interrupt handler
@@ -2367,6 +2227,9 @@ qla2x00_do_dpc(void *data)
2367 fc_port_t *fcport; 2227 fc_port_t *fcport;
2368 uint8_t status; 2228 uint8_t status;
2369 uint16_t next_loopid; 2229 uint16_t next_loopid;
2230 struct scsi_qla_host *vha;
2231 int i;
2232
2370 2233
2371 ha = (scsi_qla_host_t *)data; 2234 ha = (scsi_qla_host_t *)data;
2372 2235
@@ -2409,6 +2272,18 @@ qla2x00_do_dpc(void *data)
2409 } 2272 }
2410 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2273 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
2411 } 2274 }
2275
2276 for_each_mapped_vp_idx(ha, i) {
2277 list_for_each_entry(vha, &ha->vp_list,
2278 vp_list) {
2279 if (i == vha->vp_idx) {
2280 set_bit(ISP_ABORT_NEEDED,
2281 &vha->dpc_flags);
2282 break;
2283 }
2284 }
2285 }
2286
2412 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2287 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2413 ha->host_no)); 2288 ha->host_no));
2414 } 2289 }
@@ -3029,3 +2904,4 @@ MODULE_FIRMWARE(FW_FILE_ISP22XX);
3029MODULE_FIRMWARE(FW_FILE_ISP2300); 2904MODULE_FIRMWARE(FW_FILE_ISP2300);
3030MODULE_FIRMWARE(FW_FILE_ISP2322); 2905MODULE_FIRMWARE(FW_FILE_ISP2322);
3031MODULE_FIRMWARE(FW_FILE_ISP24XX); 2906MODULE_FIRMWARE(FW_FILE_ISP24XX);
2907MODULE_FIRMWARE(FW_FILE_ISP25XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b68fb73613ed..26822c8807ee 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -893,6 +893,8 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
893 } 893 }
894} 894}
895 895
896#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
897
896void 898void
897qla2x00_beacon_blink(struct scsi_qla_host *ha) 899qla2x00_beacon_blink(struct scsi_qla_host *ha)
898{ 900{
@@ -902,15 +904,12 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
902 unsigned long flags; 904 unsigned long flags;
903 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 905 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
904 906
905 if (ha->pio_address)
906 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
907
908 spin_lock_irqsave(&ha->hardware_lock, flags); 907 spin_lock_irqsave(&ha->hardware_lock, flags);
909 908
910 /* Save the Original GPIOE. */ 909 /* Save the Original GPIOE. */
911 if (ha->pio_address) { 910 if (ha->pio_address) {
912 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 911 gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
913 gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 912 gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
914 } else { 913 } else {
915 gpio_enable = RD_REG_WORD(&reg->gpioe); 914 gpio_enable = RD_REG_WORD(&reg->gpioe);
916 gpio_data = RD_REG_WORD(&reg->gpiod); 915 gpio_data = RD_REG_WORD(&reg->gpiod);
@@ -920,7 +919,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
920 gpio_enable |= GPIO_LED_MASK; 919 gpio_enable |= GPIO_LED_MASK;
921 920
922 if (ha->pio_address) { 921 if (ha->pio_address) {
923 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 922 WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
924 } else { 923 } else {
925 WRT_REG_WORD(&reg->gpioe, gpio_enable); 924 WRT_REG_WORD(&reg->gpioe, gpio_enable);
926 RD_REG_WORD(&reg->gpioe); 925 RD_REG_WORD(&reg->gpioe);
@@ -936,7 +935,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
936 935
937 /* Set the modified gpio_data values */ 936 /* Set the modified gpio_data values */
938 if (ha->pio_address) { 937 if (ha->pio_address) {
939 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 938 WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
940 } else { 939 } else {
941 WRT_REG_WORD(&reg->gpiod, gpio_data); 940 WRT_REG_WORD(&reg->gpiod, gpio_data);
942 RD_REG_WORD(&reg->gpiod); 941 RD_REG_WORD(&reg->gpiod);
@@ -962,14 +961,11 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
962 return QLA_FUNCTION_FAILED; 961 return QLA_FUNCTION_FAILED;
963 } 962 }
964 963
965 if (ha->pio_address)
966 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
967
968 /* Turn off LEDs. */ 964 /* Turn off LEDs. */
969 spin_lock_irqsave(&ha->hardware_lock, flags); 965 spin_lock_irqsave(&ha->hardware_lock, flags);
970 if (ha->pio_address) { 966 if (ha->pio_address) {
971 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 967 gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
972 gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 968 gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
973 } else { 969 } else {
974 gpio_enable = RD_REG_WORD(&reg->gpioe); 970 gpio_enable = RD_REG_WORD(&reg->gpioe);
975 gpio_data = RD_REG_WORD(&reg->gpiod); 971 gpio_data = RD_REG_WORD(&reg->gpiod);
@@ -978,7 +974,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
978 974
979 /* Set the modified gpio_enable values. */ 975 /* Set the modified gpio_enable values. */
980 if (ha->pio_address) { 976 if (ha->pio_address) {
981 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 977 WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
982 } else { 978 } else {
983 WRT_REG_WORD(&reg->gpioe, gpio_enable); 979 WRT_REG_WORD(&reg->gpioe, gpio_enable);
984 RD_REG_WORD(&reg->gpioe); 980 RD_REG_WORD(&reg->gpioe);
@@ -987,7 +983,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
987 /* Clear out previously set LED colour. */ 983 /* Clear out previously set LED colour. */
988 gpio_data &= ~GPIO_LED_MASK; 984 gpio_data &= ~GPIO_LED_MASK;
989 if (ha->pio_address) { 985 if (ha->pio_address) {
990 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 986 WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
991 } else { 987 } else {
992 WRT_REG_WORD(&reg->gpiod, gpio_data); 988 WRT_REG_WORD(&reg->gpiod, gpio_data);
993 RD_REG_WORD(&reg->gpiod); 989 RD_REG_WORD(&reg->gpiod);
@@ -1244,13 +1240,12 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1244 if (ha->pio_address) { 1240 if (ha->pio_address) {
1245 uint16_t data2; 1241 uint16_t data2;
1246 1242
1247 reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1243 WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
1248 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1249 do { 1244 do {
1250 data = RD_REG_WORD_PIO(&reg->flash_data); 1245 data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
1251 barrier(); 1246 barrier();
1252 cpu_relax(); 1247 cpu_relax();
1253 data2 = RD_REG_WORD_PIO(&reg->flash_data); 1248 data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
1254 } while (data != data2); 1249 } while (data != data2);
1255 } else { 1250 } else {
1256 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1251 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
@@ -1304,9 +1299,8 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1304 1299
1305 /* Always perform IO mapped accesses to the FLASH registers. */ 1300 /* Always perform IO mapped accesses to the FLASH registers. */
1306 if (ha->pio_address) { 1301 if (ha->pio_address) {
1307 reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1302 WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
1308 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1303 WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
1309 WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data);
1310 } else { 1304 } else {
1311 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1305 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1312 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1306 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2c2f6b4697c7..c5742cc15abb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.00-k7" 10#define QLA2XXX_VERSION "8.02.00-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 49925f92555e..10b3b9a620f3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1306,6 +1306,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1306 atomic_set(&ddb_entry->relogin_timer, 0); 1306 atomic_set(&ddb_entry->relogin_timer, 0);
1307 clear_bit(DF_RELOGIN, &ddb_entry->flags); 1307 clear_bit(DF_RELOGIN, &ddb_entry->flags);
1308 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); 1308 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
1309 iscsi_unblock_session(ddb_entry->sess);
1309 iscsi_session_event(ddb_entry->sess, 1310 iscsi_session_event(ddb_entry->sess,
1310 ISCSI_KEVENT_CREATE_SESSION); 1311 ISCSI_KEVENT_CREATE_SESSION);
1311 /* 1312 /*
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2e2b9fedffcc..c3c59d763037 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -63,8 +63,6 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
63 enum iscsi_param param, char *buf); 63 enum iscsi_param param, char *buf);
64static int qla4xxx_host_get_param(struct Scsi_Host *shost, 64static int qla4xxx_host_get_param(struct Scsi_Host *shost,
65 enum iscsi_host_param param, char *buf); 65 enum iscsi_host_param param, char *buf);
66static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
67static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
68static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); 66static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
69 67
70/* 68/*
@@ -91,6 +89,8 @@ static struct scsi_host_template qla4xxx_driver_template = {
91 .slave_alloc = qla4xxx_slave_alloc, 89 .slave_alloc = qla4xxx_slave_alloc,
92 .slave_destroy = qla4xxx_slave_destroy, 90 .slave_destroy = qla4xxx_slave_destroy,
93 91
92 .scan_finished = iscsi_scan_finished,
93
94 .this_id = -1, 94 .this_id = -1,
95 .cmd_per_lun = 3, 95 .cmd_per_lun = 3,
96 .use_clustering = ENABLE_CLUSTERING, 96 .use_clustering = ENABLE_CLUSTERING,
@@ -116,8 +116,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
116 .get_conn_param = qla4xxx_conn_get_param, 116 .get_conn_param = qla4xxx_conn_get_param,
117 .get_session_param = qla4xxx_sess_get_param, 117 .get_session_param = qla4xxx_sess_get_param,
118 .get_host_param = qla4xxx_host_get_param, 118 .get_host_param = qla4xxx_host_get_param,
119 .start_conn = qla4xxx_conn_start,
120 .stop_conn = qla4xxx_conn_stop,
121 .session_recovery_timedout = qla4xxx_recovery_timedout, 119 .session_recovery_timedout = qla4xxx_recovery_timedout,
122}; 120};
123 121
@@ -128,48 +126,19 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
128 struct ddb_entry *ddb_entry = session->dd_data; 126 struct ddb_entry *ddb_entry = session->dd_data;
129 struct scsi_qla_host *ha = ddb_entry->ha; 127 struct scsi_qla_host *ha = ddb_entry->ha;
130 128
131 DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) " 129 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
132 "secs exhausted, marking device DEAD.\n", ha->host_no, 130 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
133 __func__, ddb_entry->fw_ddb_index,
134 ha->port_down_retry_count));
135
136 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
137
138 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
139 "0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
140 queue_work(ha->dpc_thread, &ha->dpc_work);
141}
142
143static int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
144{
145 struct iscsi_cls_session *session;
146 struct ddb_entry *ddb_entry;
147
148 session = iscsi_dev_to_session(conn->dev.parent);
149 ddb_entry = session->dd_data;
150
151 DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
152 ddb_entry->ha->host_no, __func__,
153 ddb_entry->fw_ddb_index));
154 iscsi_unblock_session(session);
155 return 0;
156}
157
158static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
159{
160 struct iscsi_cls_session *session;
161 struct ddb_entry *ddb_entry;
162 131
163 session = iscsi_dev_to_session(conn->dev.parent); 132 DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count "
164 ddb_entry = session->dd_data; 133 "of (%d) secs exhausted, marking device DEAD.\n",
134 ha->host_no, __func__, ddb_entry->fw_ddb_index,
135 ha->port_down_retry_count));
165 136
166 DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n", 137 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc "
167 ddb_entry->ha->host_no, __func__, 138 "flags = 0x%lx\n",
168 ddb_entry->fw_ddb_index)); 139 ha->host_no, __func__, ha->dpc_flags));
169 if (flag == STOP_CONN_RECOVER) 140 queue_work(ha->dpc_thread, &ha->dpc_work);
170 iscsi_block_session(session); 141 }
171 else
172 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
173} 142}
174 143
175static int qla4xxx_host_get_param(struct Scsi_Host *shost, 144static int qla4xxx_host_get_param(struct Scsi_Host *shost,
@@ -308,6 +277,9 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
308 DEBUG2(printk(KERN_ERR "Could not add connection.\n")); 277 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
309 return -ENOMEM; 278 return -ENOMEM;
310 } 279 }
280
281 /* finally ready to go */
282 iscsi_unblock_session(ddb_entry->sess);
311 return 0; 283 return 0;
312} 284}
313 285
@@ -364,6 +336,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
364 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n", 336 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n",
365 ha->host_no, ddb_entry->bus, ddb_entry->target, 337 ha->host_no, ddb_entry->bus, ddb_entry->target,
366 ddb_entry->fw_ddb_index)); 338 ddb_entry->fw_ddb_index));
339 iscsi_block_session(ddb_entry->sess);
367 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); 340 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
368} 341}
369 342
@@ -430,9 +403,21 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
430{ 403{
431 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 404 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
432 struct ddb_entry *ddb_entry = cmd->device->hostdata; 405 struct ddb_entry *ddb_entry = cmd->device->hostdata;
406 struct iscsi_cls_session *sess = ddb_entry->sess;
433 struct srb *srb; 407 struct srb *srb;
434 int rval; 408 int rval;
435 409
410 if (!sess) {
411 cmd->result = DID_IMM_RETRY << 16;
412 goto qc_fail_command;
413 }
414
415 rval = iscsi_session_chkready(sess);
416 if (rval) {
417 cmd->result = rval;
418 goto qc_fail_command;
419 }
420
436 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { 421 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
437 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) { 422 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
438 cmd->result = DID_NO_CONNECT << 16; 423 cmd->result = DID_NO_CONNECT << 16;
@@ -1323,7 +1308,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1323 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 1308 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
1324 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 1309 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
1325 ha->patch_number, ha->build_number); 1310 ha->patch_number, ha->build_number);
1326 1311 scsi_scan_host(host);
1327 return 0; 1312 return 0;
1328 1313
1329remove_host: 1314remove_host:
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b35d19472caa..fecba05b4e77 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -969,9 +969,10 @@ void starget_for_each_device(struct scsi_target *starget, void *data,
969EXPORT_SYMBOL(starget_for_each_device); 969EXPORT_SYMBOL(starget_for_each_device);
970 970
971/** 971/**
972 * __starget_for_each_device - helper to walk all devices of a target 972 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
973 * (UNLOCKED)
974 * @starget: target whose devices we want to iterate over. 973 * @starget: target whose devices we want to iterate over.
974 * @data: parameter for callback @fn()
975 * @fn: callback function that is invoked for each device
975 * 976 *
976 * This traverses over each device of @starget. It does _not_ 977 * This traverses over each device of @starget. It does _not_
977 * take a reference on the scsi_device, so the whole loop must be 978 * take a reference on the scsi_device, so the whole loop must be
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f243fc30c908..135c1d054701 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -301,7 +301,6 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
301 page = sg_page(sg); 301 page = sg_page(sg);
302 off = sg->offset; 302 off = sg->offset;
303 len = sg->length; 303 len = sg->length;
304 data_len += len;
305 304
306 while (len > 0 && data_len > 0) { 305 while (len > 0 && data_len > 0) {
307 /* 306 /*
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0d7b4e79415c..fac7534f3ec4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,10 @@
30#include <scsi/scsi_transport_iscsi.h> 30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/iscsi_if.h> 31#include <scsi/iscsi_if.h>
32 32
33#define ISCSI_SESSION_ATTRS 18 33#define ISCSI_SESSION_ATTRS 19
34#define ISCSI_CONN_ATTRS 11 34#define ISCSI_CONN_ATTRS 13
35#define ISCSI_HOST_ATTRS 4 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-867" 36#define ISCSI_TRANSPORT_VERSION "2.0-868"
37 37
38struct iscsi_internal { 38struct iscsi_internal {
39 int daemon_pid; 39 int daemon_pid;
@@ -127,12 +127,13 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
127 memset(ihost, 0, sizeof(*ihost)); 127 memset(ihost, 0, sizeof(*ihost));
128 INIT_LIST_HEAD(&ihost->sessions); 128 INIT_LIST_HEAD(&ihost->sessions);
129 mutex_init(&ihost->mutex); 129 mutex_init(&ihost->mutex);
130 atomic_set(&ihost->nr_scans, 0);
130 131
131 snprintf(ihost->unbind_workq_name, KOBJ_NAME_LEN, "iscsi_unbind_%d", 132 snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
132 shost->host_no); 133 shost->host_no);
133 ihost->unbind_workq = create_singlethread_workqueue( 134 ihost->scan_workq = create_singlethread_workqueue(
134 ihost->unbind_workq_name); 135 ihost->scan_workq_name);
135 if (!ihost->unbind_workq) 136 if (!ihost->scan_workq)
136 return -ENOMEM; 137 return -ENOMEM;
137 return 0; 138 return 0;
138} 139}
@@ -143,7 +144,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
143 struct Scsi_Host *shost = dev_to_shost(dev); 144 struct Scsi_Host *shost = dev_to_shost(dev);
144 struct iscsi_host *ihost = shost->shost_data; 145 struct iscsi_host *ihost = shost->shost_data;
145 146
146 destroy_workqueue(ihost->unbind_workq); 147 destroy_workqueue(ihost->scan_workq);
147 return 0; 148 return 0;
148} 149}
149 150
@@ -221,6 +222,54 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
221 * The following functions can be used by LLDs that allocate 222 * The following functions can be used by LLDs that allocate
222 * their own scsi_hosts or by software iscsi LLDs 223 * their own scsi_hosts or by software iscsi LLDs
223 */ 224 */
225static struct {
226 int value;
227 char *name;
228} iscsi_session_state_names[] = {
229 { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
230 { ISCSI_SESSION_FAILED, "FAILED" },
231 { ISCSI_SESSION_FREE, "FREE" },
232};
233
234const char *iscsi_session_state_name(int state)
235{
236 int i;
237 char *name = NULL;
238
239 for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
240 if (iscsi_session_state_names[i].value == state) {
241 name = iscsi_session_state_names[i].name;
242 break;
243 }
244 }
245 return name;
246}
247
248int iscsi_session_chkready(struct iscsi_cls_session *session)
249{
250 unsigned long flags;
251 int err;
252
253 spin_lock_irqsave(&session->lock, flags);
254 switch (session->state) {
255 case ISCSI_SESSION_LOGGED_IN:
256 err = 0;
257 break;
258 case ISCSI_SESSION_FAILED:
259 err = DID_IMM_RETRY << 16;
260 break;
261 case ISCSI_SESSION_FREE:
262 err = DID_NO_CONNECT << 16;
263 break;
264 default:
265 err = DID_NO_CONNECT << 16;
266 break;
267 }
268 spin_unlock_irqrestore(&session->lock, flags);
269 return err;
270}
271EXPORT_SYMBOL_GPL(iscsi_session_chkready);
272
224static void iscsi_session_release(struct device *dev) 273static void iscsi_session_release(struct device *dev)
225{ 274{
226 struct iscsi_cls_session *session = iscsi_dev_to_session(dev); 275 struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
@@ -236,6 +285,25 @@ static int iscsi_is_session_dev(const struct device *dev)
236 return dev->release == iscsi_session_release; 285 return dev->release == iscsi_session_release;
237} 286}
238 287
288/**
289 * iscsi_scan_finished - helper to report when running scans are done
290 * @shost: scsi host
291 * @time: scan run time
292 *
293 * This function can be used by drives like qla4xxx to report to the scsi
294 * layer when the scans it kicked off at module load time are done.
295 */
296int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
297{
298 struct iscsi_host *ihost = shost->shost_data;
299 /*
300 * qla4xxx will have kicked off some session unblocks before calling
301 * scsi_scan_host, so just wait for them to complete.
302 */
303 return !atomic_read(&ihost->nr_scans);
304}
305EXPORT_SYMBOL_GPL(iscsi_scan_finished);
306
239static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, 307static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
240 uint id, uint lun) 308 uint id, uint lun)
241{ 309{
@@ -254,14 +322,50 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
254 return 0; 322 return 0;
255} 323}
256 324
325static void iscsi_scan_session(struct work_struct *work)
326{
327 struct iscsi_cls_session *session =
328 container_of(work, struct iscsi_cls_session, scan_work);
329 struct Scsi_Host *shost = iscsi_session_to_shost(session);
330 struct iscsi_host *ihost = shost->shost_data;
331 unsigned long flags;
332
333 spin_lock_irqsave(&session->lock, flags);
334 if (session->state != ISCSI_SESSION_LOGGED_IN) {
335 spin_unlock_irqrestore(&session->lock, flags);
336 goto done;
337 }
338 spin_unlock_irqrestore(&session->lock, flags);
339
340 scsi_scan_target(&session->dev, 0, session->target_id,
341 SCAN_WILD_CARD, 1);
342done:
343 atomic_dec(&ihost->nr_scans);
344}
345
257static void session_recovery_timedout(struct work_struct *work) 346static void session_recovery_timedout(struct work_struct *work)
258{ 347{
259 struct iscsi_cls_session *session = 348 struct iscsi_cls_session *session =
260 container_of(work, struct iscsi_cls_session, 349 container_of(work, struct iscsi_cls_session,
261 recovery_work.work); 350 recovery_work.work);
351 unsigned long flags;
352
353 iscsi_cls_session_printk(KERN_INFO, session,
354 "session recovery timed out after %d secs\n",
355 session->recovery_tmo);
262 356
263 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " 357 spin_lock_irqsave(&session->lock, flags);
264 "out after %d secs\n", session->recovery_tmo); 358 switch (session->state) {
359 case ISCSI_SESSION_FAILED:
360 session->state = ISCSI_SESSION_FREE;
361 break;
362 case ISCSI_SESSION_LOGGED_IN:
363 case ISCSI_SESSION_FREE:
364 /* we raced with the unblock's flush */
365 spin_unlock_irqrestore(&session->lock, flags);
366 return;
367 }
368 spin_unlock_irqrestore(&session->lock, flags);
265 369
266 if (session->transport->session_recovery_timedout) 370 if (session->transport->session_recovery_timedout)
267 session->transport->session_recovery_timedout(session); 371 session->transport->session_recovery_timedout(session);
@@ -269,16 +373,44 @@ static void session_recovery_timedout(struct work_struct *work)
269 scsi_target_unblock(&session->dev); 373 scsi_target_unblock(&session->dev);
270} 374}
271 375
272void iscsi_unblock_session(struct iscsi_cls_session *session) 376void __iscsi_unblock_session(struct iscsi_cls_session *session)
273{ 377{
274 if (!cancel_delayed_work(&session->recovery_work)) 378 if (!cancel_delayed_work(&session->recovery_work))
275 flush_workqueue(iscsi_eh_timer_workq); 379 flush_workqueue(iscsi_eh_timer_workq);
276 scsi_target_unblock(&session->dev); 380 scsi_target_unblock(&session->dev);
277} 381}
382
383void iscsi_unblock_session(struct iscsi_cls_session *session)
384{
385 struct Scsi_Host *shost = iscsi_session_to_shost(session);
386 struct iscsi_host *ihost = shost->shost_data;
387 unsigned long flags;
388
389 spin_lock_irqsave(&session->lock, flags);
390 session->state = ISCSI_SESSION_LOGGED_IN;
391 spin_unlock_irqrestore(&session->lock, flags);
392
393 __iscsi_unblock_session(session);
394 /*
395 * Only do kernel scanning if the driver is properly hooked into
396 * the async scanning code (drivers like iscsi_tcp do login and
397 * scanning from userspace).
398 */
399 if (shost->hostt->scan_finished) {
400 if (queue_work(ihost->scan_workq, &session->scan_work))
401 atomic_inc(&ihost->nr_scans);
402 }
403}
278EXPORT_SYMBOL_GPL(iscsi_unblock_session); 404EXPORT_SYMBOL_GPL(iscsi_unblock_session);
279 405
280void iscsi_block_session(struct iscsi_cls_session *session) 406void iscsi_block_session(struct iscsi_cls_session *session)
281{ 407{
408 unsigned long flags;
409
410 spin_lock_irqsave(&session->lock, flags);
411 session->state = ISCSI_SESSION_FAILED;
412 spin_unlock_irqrestore(&session->lock, flags);
413
282 scsi_target_block(&session->dev); 414 scsi_target_block(&session->dev);
283 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, 415 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
284 session->recovery_tmo * HZ); 416 session->recovery_tmo * HZ);
@@ -311,7 +443,7 @@ static int iscsi_unbind_session(struct iscsi_cls_session *session)
311 struct Scsi_Host *shost = iscsi_session_to_shost(session); 443 struct Scsi_Host *shost = iscsi_session_to_shost(session);
312 struct iscsi_host *ihost = shost->shost_data; 444 struct iscsi_host *ihost = shost->shost_data;
313 445
314 return queue_work(ihost->unbind_workq, &session->unbind_work); 446 return queue_work(ihost->scan_workq, &session->unbind_work);
315} 447}
316 448
317struct iscsi_cls_session * 449struct iscsi_cls_session *
@@ -327,10 +459,13 @@ iscsi_alloc_session(struct Scsi_Host *shost,
327 459
328 session->transport = transport; 460 session->transport = transport;
329 session->recovery_tmo = 120; 461 session->recovery_tmo = 120;
462 session->state = ISCSI_SESSION_FREE;
330 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 463 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
331 INIT_LIST_HEAD(&session->host_list); 464 INIT_LIST_HEAD(&session->host_list);
332 INIT_LIST_HEAD(&session->sess_list); 465 INIT_LIST_HEAD(&session->sess_list);
333 INIT_WORK(&session->unbind_work, __iscsi_unbind_session); 466 INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
467 INIT_WORK(&session->scan_work, iscsi_scan_session);
468 spin_lock_init(&session->lock);
334 469
335 /* this is released in the dev's release function */ 470 /* this is released in the dev's release function */
336 scsi_host_get(shost); 471 scsi_host_get(shost);
@@ -358,8 +493,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
358 session->sid); 493 session->sid);
359 err = device_add(&session->dev); 494 err = device_add(&session->dev);
360 if (err) { 495 if (err) {
361 dev_printk(KERN_ERR, &session->dev, "iscsi: could not " 496 iscsi_cls_session_printk(KERN_ERR, session,
362 "register session's dev\n"); 497 "could not register session's dev\n");
363 goto release_host; 498 goto release_host;
364 } 499 }
365 transport_register_device(&session->dev); 500 transport_register_device(&session->dev);
@@ -444,22 +579,28 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
444 * If we are blocked let commands flow again. The lld or iscsi 579 * If we are blocked let commands flow again. The lld or iscsi
445 * layer should set up the queuecommand to fail commands. 580 * layer should set up the queuecommand to fail commands.
446 */ 581 */
447 iscsi_unblock_session(session); 582 spin_lock_irqsave(&session->lock, flags);
448 iscsi_unbind_session(session); 583 session->state = ISCSI_SESSION_FREE;
584 spin_unlock_irqrestore(&session->lock, flags);
585 __iscsi_unblock_session(session);
586 __iscsi_unbind_session(&session->unbind_work);
587
588 /* flush running scans */
589 flush_workqueue(ihost->scan_workq);
449 /* 590 /*
450 * If the session dropped while removing devices then we need to make 591 * If the session dropped while removing devices then we need to make
451 * sure it is not blocked 592 * sure it is not blocked
452 */ 593 */
453 if (!cancel_delayed_work(&session->recovery_work)) 594 if (!cancel_delayed_work(&session->recovery_work))
454 flush_workqueue(iscsi_eh_timer_workq); 595 flush_workqueue(iscsi_eh_timer_workq);
455 flush_workqueue(ihost->unbind_workq);
456 596
457 /* hw iscsi may not have removed all connections from session */ 597 /* hw iscsi may not have removed all connections from session */
458 err = device_for_each_child(&session->dev, NULL, 598 err = device_for_each_child(&session->dev, NULL,
459 iscsi_iter_destroy_conn_fn); 599 iscsi_iter_destroy_conn_fn);
460 if (err) 600 if (err)
461 dev_printk(KERN_ERR, &session->dev, "iscsi: Could not delete " 601 iscsi_cls_session_printk(KERN_ERR, session,
462 "all connections for session. Error %d.\n", err); 602 "Could not delete all connections "
603 "for session. Error %d.\n", err);
463 604
464 transport_unregister_device(&session->dev); 605 transport_unregister_device(&session->dev);
465 device_del(&session->dev); 606 device_del(&session->dev);
@@ -531,8 +672,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
531 conn->dev.release = iscsi_conn_release; 672 conn->dev.release = iscsi_conn_release;
532 err = device_register(&conn->dev); 673 err = device_register(&conn->dev);
533 if (err) { 674 if (err) {
534 dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register " 675 iscsi_cls_session_printk(KERN_ERR, session, "could not "
535 "connection's dev\n"); 676 "register connection's dev\n");
536 goto release_parent_ref; 677 goto release_parent_ref;
537 } 678 }
538 transport_register_device(&conn->dev); 679 transport_register_device(&conn->dev);
@@ -639,8 +780,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
639 skb = alloc_skb(len, GFP_ATOMIC); 780 skb = alloc_skb(len, GFP_ATOMIC);
640 if (!skb) { 781 if (!skb) {
641 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); 782 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
642 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 783 iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
643 "control PDU: OOM\n"); 784 "control PDU: OOM\n");
644 return -ENOMEM; 785 return -ENOMEM;
645 } 786 }
646 787
@@ -661,20 +802,27 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
661 802
662void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 803void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
663{ 804{
805 struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
664 struct nlmsghdr *nlh; 806 struct nlmsghdr *nlh;
665 struct sk_buff *skb; 807 struct sk_buff *skb;
666 struct iscsi_uevent *ev; 808 struct iscsi_uevent *ev;
667 struct iscsi_internal *priv; 809 struct iscsi_internal *priv;
668 int len = NLMSG_SPACE(sizeof(*ev)); 810 int len = NLMSG_SPACE(sizeof(*ev));
811 unsigned long flags;
669 812
670 priv = iscsi_if_transport_lookup(conn->transport); 813 priv = iscsi_if_transport_lookup(conn->transport);
671 if (!priv) 814 if (!priv)
672 return; 815 return;
673 816
817 spin_lock_irqsave(&session->lock, flags);
818 if (session->state == ISCSI_SESSION_LOGGED_IN)
819 session->state = ISCSI_SESSION_FAILED;
820 spin_unlock_irqrestore(&session->lock, flags);
821
674 skb = alloc_skb(len, GFP_ATOMIC); 822 skb = alloc_skb(len, GFP_ATOMIC);
675 if (!skb) { 823 if (!skb) {
676 dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored " 824 iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
677 "conn error (%d)\n", error); 825 "conn error (%d)\n", error);
678 return; 826 return;
679 } 827 }
680 828
@@ -688,8 +836,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
688 836
689 iscsi_broadcast_skb(skb, GFP_ATOMIC); 837 iscsi_broadcast_skb(skb, GFP_ATOMIC);
690 838
691 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 839 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
692 error); 840 error);
693} 841}
694EXPORT_SYMBOL_GPL(iscsi_conn_error); 842EXPORT_SYMBOL_GPL(iscsi_conn_error);
695 843
@@ -744,8 +892,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
744 892
745 skbstat = alloc_skb(len, GFP_ATOMIC); 893 skbstat = alloc_skb(len, GFP_ATOMIC);
746 if (!skbstat) { 894 if (!skbstat) {
747 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not " 895 iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
748 "deliver stats: OOM\n"); 896 "deliver stats: OOM\n");
749 return -ENOMEM; 897 return -ENOMEM;
750 } 898 }
751 899
@@ -801,8 +949,9 @@ int iscsi_session_event(struct iscsi_cls_session *session,
801 949
802 skb = alloc_skb(len, GFP_KERNEL); 950 skb = alloc_skb(len, GFP_KERNEL);
803 if (!skb) { 951 if (!skb) {
804 dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace " 952 iscsi_cls_session_printk(KERN_ERR, session,
805 "of session event %u\n", event); 953 "Cannot notify userspace of session "
954 "event %u\n", event);
806 return -ENOMEM; 955 return -ENOMEM;
807 } 956 }
808 957
@@ -825,8 +974,8 @@ int iscsi_session_event(struct iscsi_cls_session *session,
825 ev->r.unbind_session.sid = session->sid; 974 ev->r.unbind_session.sid = session->sid;
826 break; 975 break;
827 default: 976 default:
828 dev_printk(KERN_ERR, &session->dev, "Invalid event %u.\n", 977 iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
829 event); 978 "%u.\n", event);
830 kfree_skb(skb); 979 kfree_skb(skb);
831 return -EINVAL; 980 return -EINVAL;
832 } 981 }
@@ -837,8 +986,10 @@ int iscsi_session_event(struct iscsi_cls_session *session,
837 */ 986 */
838 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 987 rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
839 if (rc < 0) 988 if (rc < 0)
840 dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace " 989 iscsi_cls_session_printk(KERN_ERR, session,
841 "of session event %u. Check iscsi daemon\n", event); 990 "Cannot notify userspace of session "
991 "event %u. Check iscsi daemon\n",
992 event);
842 return rc; 993 return rc;
843} 994}
844EXPORT_SYMBOL_GPL(iscsi_session_event); 995EXPORT_SYMBOL_GPL(iscsi_session_event);
@@ -871,16 +1022,15 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
871 1022
872 session = iscsi_session_lookup(ev->u.c_conn.sid); 1023 session = iscsi_session_lookup(ev->u.c_conn.sid);
873 if (!session) { 1024 if (!session) {
874 printk(KERN_ERR "iscsi: invalid session %d\n", 1025 printk(KERN_ERR "iscsi: invalid session %d.\n",
875 ev->u.c_conn.sid); 1026 ev->u.c_conn.sid);
876 return -EINVAL; 1027 return -EINVAL;
877 } 1028 }
878 1029
879 conn = transport->create_conn(session, ev->u.c_conn.cid); 1030 conn = transport->create_conn(session, ev->u.c_conn.cid);
880 if (!conn) { 1031 if (!conn) {
881 printk(KERN_ERR "iscsi: couldn't create a new " 1032 iscsi_cls_session_printk(KERN_ERR, session,
882 "connection for session %d\n", 1033 "couldn't create a new connection.");
883 session->sid);
884 return -ENOMEM; 1034 return -ENOMEM;
885 } 1035 }
886 1036
@@ -1246,6 +1396,15 @@ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
1246iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); 1396iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
1247iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 1397iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
1248 1398
1399static ssize_t
1400show_priv_session_state(struct class_device *cdev, char *buf)
1401{
1402 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
1403 return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
1404}
1405static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
1406 NULL);
1407
1249#define iscsi_priv_session_attr_show(field, format) \ 1408#define iscsi_priv_session_attr_show(field, format) \
1250static ssize_t \ 1409static ssize_t \
1251show_priv_session_##field(struct class_device *cdev, char *buf) \ 1410show_priv_session_##field(struct class_device *cdev, char *buf) \
@@ -1472,6 +1631,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1472 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); 1631 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
1473 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); 1632 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
1474 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1633 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1634 SETUP_PRIV_SESSION_RD_ATTR(state);
1475 1635
1476 BUG_ON(count > ISCSI_SESSION_ATTRS); 1636 BUG_ON(count > ISCSI_SESSION_ATTRS);
1477 priv->session_attrs[count] = NULL; 1637 priv->session_attrs[count] = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 51a5557f42dd..37df8bbe7f46 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -929,6 +929,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
929 unsigned int xfer_size = scsi_bufflen(SCpnt); 929 unsigned int xfer_size = scsi_bufflen(SCpnt);
930 unsigned int good_bytes = result ? 0 : xfer_size; 930 unsigned int good_bytes = result ? 0 : xfer_size;
931 u64 start_lba = SCpnt->request->sector; 931 u64 start_lba = SCpnt->request->sector;
932 u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
932 u64 bad_lba; 933 u64 bad_lba;
933 struct scsi_sense_hdr sshdr; 934 struct scsi_sense_hdr sshdr;
934 int sense_valid = 0; 935 int sense_valid = 0;
@@ -967,26 +968,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
967 goto out; 968 goto out;
968 if (xfer_size <= SCpnt->device->sector_size) 969 if (xfer_size <= SCpnt->device->sector_size)
969 goto out; 970 goto out;
970 switch (SCpnt->device->sector_size) { 971 if (SCpnt->device->sector_size < 512) {
971 case 256: 972 /* only legitimate sector_size here is 256 */
972 start_lba <<= 1; 973 start_lba <<= 1;
973 break; 974 end_lba <<= 1;
974 case 512: 975 } else {
975 break; 976 /* be careful ... don't want any overflows */
976 case 1024: 977 u64 factor = SCpnt->device->sector_size / 512;
977 start_lba >>= 1; 978 do_div(start_lba, factor);
978 break; 979 do_div(end_lba, factor);
979 case 2048:
980 start_lba >>= 2;
981 break;
982 case 4096:
983 start_lba >>= 3;
984 break;
985 default:
986 /* Print something here with limiting frequency. */
987 goto out;
988 break;
989 } 980 }
981
982 if (bad_lba < start_lba || bad_lba >= end_lba)
983 /* the bad lba was reported incorrectly, we have
984 * no idea where the error is
985 */
986 goto out;
987
990 /* This computation should always be done in terms of 988 /* This computation should always be done in terms of
991 * the resolution of the device's medium. 989 * the resolution of the device's medium.
992 */ 990 */
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
new file mode 100644
index 000000000000..2a6e4f472eaa
--- /dev/null
+++ b/drivers/scsi/ses.c
@@ -0,0 +1,689 @@
1/*
2 * SCSI Enclosure Services
3 *
4 * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
5 *
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or
9** modify it under the terms of the GNU General Public License
10** version 2 as published by the Free Software Foundation.
11**
12** This program is distributed in the hope that it will be useful,
13** but WITHOUT ANY WARRANTY; without even the implied warranty of
14** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15** GNU General Public License for more details.
16**
17** You should have received a copy of the GNU General Public License
18** along with this program; if not, write to the Free Software
19** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20**
21**-----------------------------------------------------------------------------
22*/
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/enclosure.h>
27
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_dbg.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_driver.h>
33#include <scsi/scsi_host.h>
34
35struct ses_device {
36 char *page1;
37 char *page2;
38 char *page10;
39 short page1_len;
40 short page2_len;
41 short page10_len;
42};
43
44struct ses_component {
45 u64 addr;
46 unsigned char *desc;
47};
48
49static int ses_probe(struct device *dev)
50{
51 struct scsi_device *sdev = to_scsi_device(dev);
52 int err = -ENODEV;
53
54 if (sdev->type != TYPE_ENCLOSURE)
55 goto out;
56
57 err = 0;
58 sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n");
59
60 out:
61 return err;
62}
63
64#define SES_TIMEOUT 30
65#define SES_RETRIES 3
66
67static int ses_recv_diag(struct scsi_device *sdev, int page_code,
68 void *buf, int bufflen)
69{
70 char cmd[] = {
71 RECEIVE_DIAGNOSTIC,
72 1, /* Set PCV bit */
73 page_code,
74 bufflen >> 8,
75 bufflen & 0xff,
76 0
77 };
78
79 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
80 NULL, SES_TIMEOUT, SES_RETRIES);
81}
82
83static int ses_send_diag(struct scsi_device *sdev, int page_code,
84 void *buf, int bufflen)
85{
86 u32 result;
87
88 char cmd[] = {
89 SEND_DIAGNOSTIC,
90 0x10, /* Set PF bit */
91 0,
92 bufflen >> 8,
93 bufflen & 0xff,
94 0
95 };
96
97 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
98 NULL, SES_TIMEOUT, SES_RETRIES);
99 if (result)
100 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
101 result);
102 return result;
103}
104
105static int ses_set_page2_descriptor(struct enclosure_device *edev,
106 struct enclosure_component *ecomp,
107 char *desc)
108{
109 int i, j, count = 0, descriptor = ecomp->number;
110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
111 struct ses_device *ses_dev = edev->scratch;
112 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
113 char *desc_ptr = ses_dev->page2 + 8;
114
115 /* Clear everything */
116 memset(desc_ptr, 0, ses_dev->page2_len - 8);
117 for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
118 for (j = 0; j < type_ptr[1]; j++) {
119 desc_ptr += 4;
120 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
121 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
122 continue;
123 if (count++ == descriptor) {
124 memcpy(desc_ptr, desc, 4);
125 /* set select */
126 desc_ptr[0] |= 0x80;
127 /* clear reserved, just in case */
128 desc_ptr[0] &= 0xf0;
129 }
130 }
131 }
132
133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
134}
135
136static char *ses_get_page2_descriptor(struct enclosure_device *edev,
137 struct enclosure_component *ecomp)
138{
139 int i, j, count = 0, descriptor = ecomp->number;
140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
141 struct ses_device *ses_dev = edev->scratch;
142 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
143 char *desc_ptr = ses_dev->page2 + 8;
144
145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
146
147 for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
148 for (j = 0; j < type_ptr[1]; j++) {
149 desc_ptr += 4;
150 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
151 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
152 continue;
153 if (count++ == descriptor)
154 return desc_ptr;
155 }
156 }
157 return NULL;
158}
159
160static void ses_get_fault(struct enclosure_device *edev,
161 struct enclosure_component *ecomp)
162{
163 char *desc;
164
165 desc = ses_get_page2_descriptor(edev, ecomp);
166 ecomp->fault = (desc[3] & 0x60) >> 4;
167}
168
169static int ses_set_fault(struct enclosure_device *edev,
170 struct enclosure_component *ecomp,
171 enum enclosure_component_setting val)
172{
173 char desc[4] = {0 };
174
175 switch (val) {
176 case ENCLOSURE_SETTING_DISABLED:
177 /* zero is disabled */
178 break;
179 case ENCLOSURE_SETTING_ENABLED:
180 desc[2] = 0x02;
181 break;
182 default:
183 /* SES doesn't do the SGPIO blink settings */
184 return -EINVAL;
185 }
186
187 return ses_set_page2_descriptor(edev, ecomp, desc);
188}
189
190static void ses_get_status(struct enclosure_device *edev,
191 struct enclosure_component *ecomp)
192{
193 char *desc;
194
195 desc = ses_get_page2_descriptor(edev, ecomp);
196 ecomp->status = (desc[0] & 0x0f);
197}
198
199static void ses_get_locate(struct enclosure_device *edev,
200 struct enclosure_component *ecomp)
201{
202 char *desc;
203
204 desc = ses_get_page2_descriptor(edev, ecomp);
205 ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
206}
207
208static int ses_set_locate(struct enclosure_device *edev,
209 struct enclosure_component *ecomp,
210 enum enclosure_component_setting val)
211{
212 char desc[4] = {0 };
213
214 switch (val) {
215 case ENCLOSURE_SETTING_DISABLED:
216 /* zero is disabled */
217 break;
218 case ENCLOSURE_SETTING_ENABLED:
219 desc[2] = 0x02;
220 break;
221 default:
222 /* SES doesn't do the SGPIO blink settings */
223 return -EINVAL;
224 }
225 return ses_set_page2_descriptor(edev, ecomp, desc);
226}
227
228static int ses_set_active(struct enclosure_device *edev,
229 struct enclosure_component *ecomp,
230 enum enclosure_component_setting val)
231{
232 char desc[4] = {0 };
233
234 switch (val) {
235 case ENCLOSURE_SETTING_DISABLED:
236 /* zero is disabled */
237 ecomp->active = 0;
238 break;
239 case ENCLOSURE_SETTING_ENABLED:
240 desc[2] = 0x80;
241 ecomp->active = 1;
242 break;
243 default:
244 /* SES doesn't do the SGPIO blink settings */
245 return -EINVAL;
246 }
247 return ses_set_page2_descriptor(edev, ecomp, desc);
248}
249
250static struct enclosure_component_callbacks ses_enclosure_callbacks = {
251 .get_fault = ses_get_fault,
252 .set_fault = ses_set_fault,
253 .get_status = ses_get_status,
254 .get_locate = ses_get_locate,
255 .set_locate = ses_set_locate,
256 .set_active = ses_set_active,
257};
258
259struct ses_host_edev {
260 struct Scsi_Host *shost;
261 struct enclosure_device *edev;
262};
263
264int ses_match_host(struct enclosure_device *edev, void *data)
265{
266 struct ses_host_edev *sed = data;
267 struct scsi_device *sdev;
268
269 if (!scsi_is_sdev_device(edev->cdev.dev))
270 return 0;
271
272 sdev = to_scsi_device(edev->cdev.dev);
273
274 if (sdev->host != sed->shost)
275 return 0;
276
277 sed->edev = edev;
278 return 1;
279}
280
281static void ses_process_descriptor(struct enclosure_component *ecomp,
282 unsigned char *desc)
283{
284 int eip = desc[0] & 0x10;
285 int invalid = desc[0] & 0x80;
286 enum scsi_protocol proto = desc[0] & 0x0f;
287 u64 addr = 0;
288 struct ses_component *scomp = ecomp->scratch;
289 unsigned char *d;
290
291 scomp->desc = desc;
292
293 if (invalid)
294 return;
295
296 switch (proto) {
297 case SCSI_PROTOCOL_SAS:
298 if (eip)
299 d = desc + 8;
300 else
301 d = desc + 4;
302 /* only take the phy0 addr */
303 addr = (u64)d[12] << 56 |
304 (u64)d[13] << 48 |
305 (u64)d[14] << 40 |
306 (u64)d[15] << 32 |
307 (u64)d[16] << 24 |
308 (u64)d[17] << 16 |
309 (u64)d[18] << 8 |
310 (u64)d[19];
311 break;
312 default:
313 /* FIXME: Need to add more protocols than just SAS */
314 break;
315 }
316 scomp->addr = addr;
317}
318
319struct efd {
320 u64 addr;
321 struct device *dev;
322};
323
324static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
325 void *data)
326{
327 struct efd *efd = data;
328 int i;
329 struct ses_component *scomp;
330
331 if (!edev->component[0].scratch)
332 return 0;
333
334 for (i = 0; i < edev->components; i++) {
335 scomp = edev->component[i].scratch;
336 if (scomp->addr != efd->addr)
337 continue;
338
339 enclosure_add_device(edev, i, efd->dev);
340 return 1;
341 }
342 return 0;
343}
344
345#define VPD_INQUIRY_SIZE 512
346
347static void ses_match_to_enclosure(struct enclosure_device *edev,
348 struct scsi_device *sdev)
349{
350 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
351 unsigned char *desc;
352 int len;
353 struct efd efd = {
354 .addr = 0,
355 };
356 unsigned char cmd[] = {
357 INQUIRY,
358 1,
359 0x83,
360 VPD_INQUIRY_SIZE >> 8,
361 VPD_INQUIRY_SIZE & 0xff,
362 0
363 };
364
365 if (!buf)
366 return;
367
368 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
369 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
370 goto free;
371
372 len = (buf[2] << 8) + buf[3];
373 desc = buf + 4;
374 while (desc < buf + len) {
375 enum scsi_protocol proto = desc[0] >> 4;
376 u8 code_set = desc[0] & 0x0f;
377 u8 piv = desc[1] & 0x80;
378 u8 assoc = (desc[1] & 0x30) >> 4;
379 u8 type = desc[1] & 0x0f;
380 u8 len = desc[3];
381
382 if (piv && code_set == 1 && assoc == 1 && code_set == 1
383 && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
384 efd.addr = (u64)desc[4] << 56 |
385 (u64)desc[5] << 48 |
386 (u64)desc[6] << 40 |
387 (u64)desc[7] << 32 |
388 (u64)desc[8] << 24 |
389 (u64)desc[9] << 16 |
390 (u64)desc[10] << 8 |
391 (u64)desc[11];
392
393 desc += len + 4;
394 }
395 if (!efd.addr)
396 goto free;
397
398 efd.dev = &sdev->sdev_gendev;
399
400 enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
401 free:
402 kfree(buf);
403}
404
405#define INIT_ALLOC_SIZE 32
406
407static int ses_intf_add(struct class_device *cdev,
408 struct class_interface *intf)
409{
410 struct scsi_device *sdev = to_scsi_device(cdev->dev);
411 struct scsi_device *tmp_sdev;
412 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr,
413 *addl_desc_ptr;
414 struct ses_device *ses_dev;
415 u32 result;
416 int i, j, types, len, components = 0;
417 int err = -ENOMEM;
418 struct enclosure_device *edev;
419 struct ses_component *scomp;
420
421 if (!scsi_device_enclosure(sdev)) {
422 /* not an enclosure, but might be in one */
423 edev = enclosure_find(&sdev->host->shost_gendev);
424 if (edev) {
425 ses_match_to_enclosure(edev, sdev);
426 class_device_put(&edev->cdev);
427 }
428 return -ENODEV;
429 }
430
431 /* TYPE_ENCLOSURE prints a message in probe */
432 if (sdev->type != TYPE_ENCLOSURE)
433 sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n");
434
435 ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL);
436 hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
437 if (!hdr_buf || !ses_dev)
438 goto err_init_free;
439
440 result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
441 if (result)
442 goto recv_failed;
443
444 if (hdr_buf[1] != 0) {
445 /* FIXME: need subenclosure support; I've just never
446 * seen a device with subenclosures and it makes the
447 * traversal routines more complex */
448 sdev_printk(KERN_ERR, sdev,
449 "FIXME driver has no support for subenclosures (%d)\n",
450 buf[1]);
451 goto err_free;
452 }
453
454 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
455 buf = kzalloc(len, GFP_KERNEL);
456 if (!buf)
457 goto err_free;
458
459 ses_dev->page1 = buf;
460 ses_dev->page1_len = len;
461
462 result = ses_recv_diag(sdev, 1, buf, len);
463 if (result)
464 goto recv_failed;
465
466 types = buf[10];
467 len = buf[11];
468
469 type_ptr = buf + 12 + len;
470
471 for (i = 0; i < types; i++, type_ptr += 4) {
472 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
473 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
474 components += type_ptr[1];
475 }
476
477 result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
478 if (result)
479 goto recv_failed;
480
481 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
482 buf = kzalloc(len, GFP_KERNEL);
483 if (!buf)
484 goto err_free;
485
486 /* make sure getting page 2 actually works */
487 result = ses_recv_diag(sdev, 2, buf, len);
488 if (result)
489 goto recv_failed;
490 ses_dev->page2 = buf;
491 ses_dev->page2_len = len;
492
493 /* The additional information page --- allows us
494 * to match up the devices */
495 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
496 if (result)
497 goto no_page10;
498
499 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
500 buf = kzalloc(len, GFP_KERNEL);
501 if (!buf)
502 goto err_free;
503
504 result = ses_recv_diag(sdev, 10, buf, len);
505 if (result)
506 goto recv_failed;
507 ses_dev->page10 = buf;
508 ses_dev->page10_len = len;
509
510 no_page10:
511 scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
512 if (!scomp)
513 goto err_free;
514
515 edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id,
516 components, &ses_enclosure_callbacks);
517 if (IS_ERR(edev)) {
518 err = PTR_ERR(edev);
519 goto err_free;
520 }
521
522 edev->scratch = ses_dev;
523 for (i = 0; i < components; i++)
524 edev->component[i].scratch = scomp++;
525
526 /* Page 7 for the descriptors is optional */
527 buf = NULL;
528 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
529 if (result)
530 goto simple_populate;
531
532 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
533 /* add 1 for trailing '\0' we'll use */
534 buf = kzalloc(len + 1, GFP_KERNEL);
535 result = ses_recv_diag(sdev, 7, buf, len);
536 if (result) {
537 simple_populate:
538 kfree(buf);
539 buf = NULL;
540 desc_ptr = NULL;
541 addl_desc_ptr = NULL;
542 } else {
543 desc_ptr = buf + 8;
544 len = (desc_ptr[2] << 8) + desc_ptr[3];
545 /* skip past overall descriptor */
546 desc_ptr += len + 4;
547 addl_desc_ptr = ses_dev->page10 + 8;
548 }
549 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
550 components = 0;
551 for (i = 0; i < types; i++, type_ptr += 4) {
552 for (j = 0; j < type_ptr[1]; j++) {
553 char *name = NULL;
554 struct enclosure_component *ecomp;
555
556 if (desc_ptr) {
557 len = (desc_ptr[2] << 8) + desc_ptr[3];
558 desc_ptr += 4;
559 /* Add trailing zero - pushes into
560 * reserved space */
561 desc_ptr[len] = '\0';
562 name = desc_ptr;
563 }
564 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
565 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
566 continue;
567 ecomp = enclosure_component_register(edev,
568 components++,
569 type_ptr[0],
570 name);
571 if (desc_ptr) {
572 desc_ptr += len;
573 if (!IS_ERR(ecomp))
574 ses_process_descriptor(ecomp,
575 addl_desc_ptr);
576
577 if (addl_desc_ptr)
578 addl_desc_ptr += addl_desc_ptr[1] + 2;
579 }
580 }
581 }
582 kfree(buf);
583 kfree(hdr_buf);
584
585 /* see if there are any devices matching before
586 * we found the enclosure */
587 shost_for_each_device(tmp_sdev, sdev->host) {
588 if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
589 continue;
590 ses_match_to_enclosure(edev, tmp_sdev);
591 }
592
593 return 0;
594
595 recv_failed:
596 sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
597 result);
598 err = -ENODEV;
599 err_free:
600 kfree(buf);
601 kfree(ses_dev->page10);
602 kfree(ses_dev->page2);
603 kfree(ses_dev->page1);
604 err_init_free:
605 kfree(ses_dev);
606 kfree(hdr_buf);
607 sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err);
608 return err;
609}
610
611static int ses_remove(struct device *dev)
612{
613 return 0;
614}
615
616static void ses_intf_remove(struct class_device *cdev,
617 struct class_interface *intf)
618{
619 struct scsi_device *sdev = to_scsi_device(cdev->dev);
620 struct enclosure_device *edev;
621 struct ses_device *ses_dev;
622
623 if (!scsi_device_enclosure(sdev))
624 return;
625
626 edev = enclosure_find(cdev->dev);
627 if (!edev)
628 return;
629
630 ses_dev = edev->scratch;
631 edev->scratch = NULL;
632
633 kfree(ses_dev->page1);
634 kfree(ses_dev->page2);
635 kfree(ses_dev);
636
637 kfree(edev->component[0].scratch);
638
639 class_device_put(&edev->cdev);
640 enclosure_unregister(edev);
641}
642
643static struct class_interface ses_interface = {
644 .add = ses_intf_add,
645 .remove = ses_intf_remove,
646};
647
648static struct scsi_driver ses_template = {
649 .owner = THIS_MODULE,
650 .gendrv = {
651 .name = "ses",
652 .probe = ses_probe,
653 .remove = ses_remove,
654 },
655};
656
657static int __init ses_init(void)
658{
659 int err;
660
661 err = scsi_register_interface(&ses_interface);
662 if (err)
663 return err;
664
665 err = scsi_register_driver(&ses_template.gendrv);
666 if (err)
667 goto out_unreg;
668
669 return 0;
670
671 out_unreg:
672 scsi_unregister_interface(&ses_interface);
673 return err;
674}
675
676static void __exit ses_exit(void)
677{
678 scsi_unregister_driver(&ses_template.gendrv);
679 scsi_unregister_interface(&ses_interface);
680}
681
682module_init(ses_init);
683module_exit(ses_exit);
684
685MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE);
686
687MODULE_AUTHOR("James Bottomley");
688MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver");
689MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 50ba49250203..208565bdbe8e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -163,6 +163,29 @@ static void scsi_cd_put(struct scsi_cd *cd)
163 mutex_unlock(&sr_ref_mutex); 163 mutex_unlock(&sr_ref_mutex);
164} 164}
165 165
166/* identical to scsi_test_unit_ready except that it doesn't
167 * eat the NOT_READY returns for removable media */
168int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
169{
170 int retries = MAX_RETRIES;
171 int the_result;
172 u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 };
173
174 /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
175 * conditions are gone, or a timeout happens
176 */
177 do {
178 the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
179 0, sshdr, SR_TIMEOUT,
180 retries--);
181
182 } while (retries > 0 &&
183 (!scsi_status_is_good(the_result) ||
184 (scsi_sense_valid(sshdr) &&
185 sshdr->sense_key == UNIT_ATTENTION)));
186 return the_result;
187}
188
166/* 189/*
167 * This function checks to see if the media has been changed in the 190 * This function checks to see if the media has been changed in the
168 * CDROM drive. It is possible that we have already sensed a change, 191 * CDROM drive. It is possible that we have already sensed a change,
@@ -185,8 +208,7 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
185 } 208 }
186 209
187 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 210 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
188 retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, 211 retval = sr_test_unit_ready(cd->device, sshdr);
189 sshdr);
190 if (retval || (scsi_sense_valid(sshdr) && 212 if (retval || (scsi_sense_valid(sshdr) &&
191 /* 0x3a is medium not present */ 213 /* 0x3a is medium not present */
192 sshdr->asc == 0x3a)) { 214 sshdr->asc == 0x3a)) {
@@ -733,10 +755,8 @@ static void get_capabilities(struct scsi_cd *cd)
733{ 755{
734 unsigned char *buffer; 756 unsigned char *buffer;
735 struct scsi_mode_data data; 757 struct scsi_mode_data data;
736 unsigned char cmd[MAX_COMMAND_SIZE];
737 struct scsi_sense_hdr sshdr; 758 struct scsi_sense_hdr sshdr;
738 unsigned int the_result; 759 int rc, n;
739 int retries, rc, n;
740 760
741 static const char *loadmech[] = 761 static const char *loadmech[] =
742 { 762 {
@@ -758,23 +778,8 @@ static void get_capabilities(struct scsi_cd *cd)
758 return; 778 return;
759 } 779 }
760 780
761 /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION 781 /* eat unit attentions */
762 * conditions are gone, or a timeout happens 782 sr_test_unit_ready(cd->device, &sshdr);
763 */
764 retries = 0;
765 do {
766 memset((void *)cmd, 0, MAX_COMMAND_SIZE);
767 cmd[0] = TEST_UNIT_READY;
768
769 the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL,
770 0, &sshdr, SR_TIMEOUT,
771 MAX_RETRIES);
772
773 retries++;
774 } while (retries < 5 &&
775 (!scsi_status_is_good(the_result) ||
776 (scsi_sense_valid(&sshdr) &&
777 sshdr.sense_key == UNIT_ATTENTION)));
778 783
779 /* ask for mode page 0x2a */ 784 /* ask for mode page 0x2a */
780 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 785 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 81fbc0b78a52..1e144dfdbd4b 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -61,6 +61,7 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed);
61int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); 61int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
62 62
63int sr_is_xa(Scsi_CD *); 63int sr_is_xa(Scsi_CD *);
64int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr);
64 65
65/* sr_vendor.c */ 66/* sr_vendor.c */
66void sr_vendor_init(Scsi_CD *); 67void sr_vendor_init(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index d5cebff1d646..ae87d08df588 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -306,8 +306,7 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
306 /* we have no changer support */ 306 /* we have no changer support */
307 return -EINVAL; 307 return -EINVAL;
308 } 308 }
309 if (0 == scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, 309 if (0 == sr_test_unit_ready(cd->device, &sshdr))
310 &sshdr))
311 return CDS_DISC_OK; 310 return CDS_DISC_OK;
312 311
313 if (!cdrom_get_media_event(cdi, &med)) { 312 if (!cdrom_get_media_event(cdi, &med)) {
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 1bc41907a038..06152c7fa689 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -1,392 +1,316 @@
1/* sun3x_esp.c: EnhancedScsiProcessor Sun3x SCSI driver code. 1/* sun3x_esp.c: ESP front-end for Sun3x systems.
2 * 2 *
3 * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 3 * Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
4 *
5 * Based on David S. Miller's esp driver
6 */ 4 */
7 5
8#include <linux/kernel.h> 6#include <linux/kernel.h>
9#include <linux/types.h> 7#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/slab.h>
12#include <linux/blkdev.h>
13#include <linux/proc_fs.h>
14#include <linux/stat.h>
15#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
16#include <linux/interrupt.h> 13#include <linux/interrupt.h>
17 14
18#include "scsi.h"
19#include <scsi/scsi_host.h>
20#include "NCR53C9x.h"
21
22#include <asm/sun3x.h> 15#include <asm/sun3x.h>
16#include <asm/io.h>
17#include <asm/dma.h>
23#include <asm/dvma.h> 18#include <asm/dvma.h>
24#include <asm/irq.h>
25
26static void dma_barrier(struct NCR_ESP *esp);
27static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
28static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
29static void dma_drain(struct NCR_ESP *esp);
30static void dma_invalidate(struct NCR_ESP *esp);
31static void dma_dump_state(struct NCR_ESP *esp);
32static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
33static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
34static void dma_ints_off(struct NCR_ESP *esp);
35static void dma_ints_on(struct NCR_ESP *esp);
36static int dma_irq_p(struct NCR_ESP *esp);
37static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr);
38static int dma_ports_p(struct NCR_ESP *esp);
39static void dma_reset(struct NCR_ESP *esp);
40static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
41static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
42static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
43static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
44static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
45static void dma_advance_sg (Scsi_Cmnd *sp);
46
47/* Detecting ESP chips on the machine. This is the simple and easy
48 * version.
49 */
50int sun3x_esp_detect(struct scsi_host_template *tpnt)
51{
52 struct NCR_ESP *esp;
53 struct ConfigDev *esp_dev;
54
55 esp_dev = 0;
56 esp = esp_allocate(tpnt, esp_dev, 0);
57
58 /* Do command transfer with DMA */
59 esp->do_pio_cmds = 0;
60
61 /* Required functions */
62 esp->dma_bytes_sent = &dma_bytes_sent;
63 esp->dma_can_transfer = &dma_can_transfer;
64 esp->dma_dump_state = &dma_dump_state;
65 esp->dma_init_read = &dma_init_read;
66 esp->dma_init_write = &dma_init_write;
67 esp->dma_ints_off = &dma_ints_off;
68 esp->dma_ints_on = &dma_ints_on;
69 esp->dma_irq_p = &dma_irq_p;
70 esp->dma_ports_p = &dma_ports_p;
71 esp->dma_setup = &dma_setup;
72
73 /* Optional functions */
74 esp->dma_barrier = &dma_barrier;
75 esp->dma_invalidate = &dma_invalidate;
76 esp->dma_drain = &dma_drain;
77 esp->dma_irq_entry = 0;
78 esp->dma_irq_exit = 0;
79 esp->dma_led_on = 0;
80 esp->dma_led_off = 0;
81 esp->dma_poll = &dma_poll;
82 esp->dma_reset = &dma_reset;
83
84 /* virtual DMA functions */
85 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
86 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
87 esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
88 esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
89 esp->dma_advance_sg = &dma_advance_sg;
90
91 /* SCSI chip speed */
92 esp->cfreq = 20000000;
93 esp->eregs = (struct ESP_regs *)(SUN3X_ESP_BASE);
94 esp->dregs = (void *)SUN3X_ESP_DMA;
95 19
96 esp->esp_command = (volatile unsigned char *)dvma_malloc(DVMA_PAGE_SIZE); 20/* DMA controller reg offsets */
97 esp->esp_command_dvma = dvma_vtob((unsigned long)esp->esp_command); 21#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
98 22#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
99 esp->irq = 2; 23#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
100 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED, 24#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
101 "SUN3X SCSI", esp->ehost)) {
102 esp_deallocate(esp);
103 return 0;
104 }
105 25
106 esp->scsi_id = 7; 26#include <scsi/scsi_host.h>
107 esp->diff = 0;
108 27
109 esp_initialize(esp); 28#include "esp_scsi.h"
110 29
111 /* for reasons beyond my knowledge (and which should likely be fixed) 30#define DRV_MODULE_NAME "sun3x_esp"
112 sync mode doesn't work on a 3/80 at 5mhz. but it does at 4. */ 31#define PFX DRV_MODULE_NAME ": "
113 esp->sync_defp = 0x3f; 32#define DRV_VERSION "1.000"
33#define DRV_MODULE_RELDATE "Nov 1, 2007"
114 34
115 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, 35/*
116 esps_in_use); 36 * m68k always assumes readl/writel operate on little endian
117 esps_running = esps_in_use; 37 * mmio space; this is wrong at least for Sun3x, so we
118 return esps_in_use; 38 * need to workaround this until a proper way is found
39 */
40#if 0
41#define dma_read32(REG) \
42 readl(esp->dma_regs + (REG))
43#define dma_write32(VAL, REG) \
44 writel((VAL), esp->dma_regs + (REG))
45#else
46#define dma_read32(REG) \
47 *(volatile u32 *)(esp->dma_regs + (REG))
48#define dma_write32(VAL, REG) \
49 do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
50#endif
51
52static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
53{
54 writeb(val, esp->regs + (reg * 4UL));
119} 55}
120 56
121static void dma_do_drain(struct NCR_ESP *esp) 57static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
122{ 58{
123 struct sparc_dma_registers *dregs = 59 return readb(esp->regs + (reg * 4UL));
124 (struct sparc_dma_registers *) esp->dregs;
125
126 int count = 500000;
127
128 while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
129 udelay(1);
130
131 if(!count) {
132 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
133 }
134
135 dregs->cond_reg |= DMA_FIFO_STDRAIN;
136
137 count = 500000;
138
139 while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
140 udelay(1);
141
142 if(!count) {
143 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
144 }
145
146} 60}
147 61
148static void dma_barrier(struct NCR_ESP *esp) 62static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
63 size_t sz, int dir)
149{ 64{
150 struct sparc_dma_registers *dregs = 65 return dma_map_single(esp->dev, buf, sz, dir);
151 (struct sparc_dma_registers *) esp->dregs;
152 int count = 500000;
153
154 while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
155 udelay(1);
156
157 if(!count) {
158 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
159 }
160
161 dregs->cond_reg &= ~(DMA_ENABLE);
162} 66}
163 67
164/* This uses various DMA csr fields and the fifo flags count value to 68static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
165 * determine how many bytes were successfully sent/received by the ESP. 69 int num_sg, int dir)
166 */
167static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
168{ 70{
169 struct sparc_dma_registers *dregs = 71 return dma_map_sg(esp->dev, sg, num_sg, dir);
170 (struct sparc_dma_registers *) esp->dregs;
171
172 int rval = dregs->st_addr - esp->esp_command_dvma;
173
174 return rval - fifo_count;
175} 72}
176 73
177static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 74static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
75 size_t sz, int dir)
178{ 76{
179 return sp->SCp.this_residual; 77 dma_unmap_single(esp->dev, addr, sz, dir);
180} 78}
181 79
182static void dma_drain(struct NCR_ESP *esp) 80static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
81 int num_sg, int dir)
183{ 82{
184 struct sparc_dma_registers *dregs = 83 dma_unmap_sg(esp->dev, sg, num_sg, dir);
185 (struct sparc_dma_registers *) esp->dregs;
186 int count = 500000;
187
188 if(dregs->cond_reg & DMA_FIFO_ISDRAIN) {
189 dregs->cond_reg |= DMA_FIFO_STDRAIN;
190 while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
191 udelay(1);
192 if(!count) {
193 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
194 }
195
196 }
197} 84}
198 85
199static void dma_invalidate(struct NCR_ESP *esp) 86static int sun3x_esp_irq_pending(struct esp *esp)
200{ 87{
201 struct sparc_dma_registers *dregs = 88 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
202 (struct sparc_dma_registers *) esp->dregs; 89 return 1;
203 90 return 0;
204 __u32 tmp; 91}
205 int count = 500000;
206
207 while(((tmp = dregs->cond_reg) & DMA_PEND_READ) && (--count > 0))
208 udelay(1);
209 92
210 if(!count) { 93static void sun3x_esp_reset_dma(struct esp *esp)
211 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg); 94{
212 } 95 u32 val;
213 96
214 dregs->cond_reg = tmp | DMA_FIFO_INV; 97 val = dma_read32(DMA_CSR);
215 dregs->cond_reg &= ~DMA_FIFO_INV; 98 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
99 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
216 100
101 /* Enable interrupts. */
102 val = dma_read32(DMA_CSR);
103 dma_write32(val | DMA_INT_ENAB, DMA_CSR);
217} 104}
218 105
219static void dma_dump_state(struct NCR_ESP *esp) 106static void sun3x_esp_dma_drain(struct esp *esp)
220{ 107{
221 struct sparc_dma_registers *dregs = 108 u32 csr;
222 (struct sparc_dma_registers *) esp->dregs; 109 int lim;
223 110
224 ESPLOG(("esp%d: dma -- cond_reg<%08lx> addr<%08lx>\n", 111 csr = dma_read32(DMA_CSR);
225 esp->esp_id, dregs->cond_reg, dregs->st_addr)); 112 if (!(csr & DMA_FIFO_ISDRAIN))
226} 113 return;
227 114
228static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length) 115 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
229{
230 struct sparc_dma_registers *dregs =
231 (struct sparc_dma_registers *) esp->dregs;
232 116
233 dregs->st_addr = vaddress; 117 lim = 1000;
234 dregs->cond_reg |= (DMA_ST_WRITE | DMA_ENABLE); 118 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
119 if (--lim == 0) {
120 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
121 esp->host->unique_id);
122 break;
123 }
124 udelay(1);
125 }
235} 126}
236 127
237static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length) 128static void sun3x_esp_dma_invalidate(struct esp *esp)
238{ 129{
239 struct sparc_dma_registers *dregs = 130 u32 val;
240 (struct sparc_dma_registers *) esp->dregs; 131 int lim;
241 132
242 /* Set up the DMA counters */ 133 lim = 1000;
134 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
135 if (--lim == 0) {
136 printk(KERN_ALERT PFX "esp%d: DMA will not "
137 "invalidate!\n", esp->host->unique_id);
138 break;
139 }
140 udelay(1);
141 }
243 142
244 dregs->st_addr = vaddress; 143 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
245 dregs->cond_reg = ((dregs->cond_reg & ~(DMA_ST_WRITE)) | DMA_ENABLE); 144 val |= DMA_FIFO_INV;
145 dma_write32(val, DMA_CSR);
146 val &= ~DMA_FIFO_INV;
147 dma_write32(val, DMA_CSR);
246} 148}
247 149
248static void dma_ints_off(struct NCR_ESP *esp) 150static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
151 u32 dma_count, int write, u8 cmd)
249{ 152{
250 DMA_INTSOFF((struct sparc_dma_registers *) esp->dregs); 153 u32 csr;
154
155 BUG_ON(!(cmd & ESP_CMD_DMA));
156
157 sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
158 sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
159 csr = dma_read32(DMA_CSR);
160 csr |= DMA_ENABLE;
161 if (write)
162 csr |= DMA_ST_WRITE;
163 else
164 csr &= ~DMA_ST_WRITE;
165 dma_write32(csr, DMA_CSR);
166 dma_write32(addr, DMA_ADDR);
167
168 scsi_esp_cmd(esp, cmd);
251} 169}
252 170
253static void dma_ints_on(struct NCR_ESP *esp) 171static int sun3x_esp_dma_error(struct esp *esp)
254{ 172{
255 DMA_INTSON((struct sparc_dma_registers *) esp->dregs); 173 u32 csr = dma_read32(DMA_CSR);
256}
257 174
258static int dma_irq_p(struct NCR_ESP *esp) 175 if (csr & DMA_HNDL_ERROR)
259{ 176 return 1;
260 return DMA_IRQ_P((struct sparc_dma_registers *) esp->dregs); 177
178 return 0;
261} 179}
262 180
263static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr) 181static const struct esp_driver_ops sun3x_esp_ops = {
182 .esp_write8 = sun3x_esp_write8,
183 .esp_read8 = sun3x_esp_read8,
184 .map_single = sun3x_esp_map_single,
185 .map_sg = sun3x_esp_map_sg,
186 .unmap_single = sun3x_esp_unmap_single,
187 .unmap_sg = sun3x_esp_unmap_sg,
188 .irq_pending = sun3x_esp_irq_pending,
189 .reset_dma = sun3x_esp_reset_dma,
190 .dma_drain = sun3x_esp_dma_drain,
191 .dma_invalidate = sun3x_esp_dma_invalidate,
192 .send_dma_cmd = sun3x_esp_send_dma_cmd,
193 .dma_error = sun3x_esp_dma_error,
194};
195
196static int __devinit esp_sun3x_probe(struct platform_device *dev)
264{ 197{
265 int count = 50; 198 struct scsi_host_template *tpnt = &scsi_esp_template;
266 dma_do_drain(esp); 199 struct Scsi_Host *host;
200 struct esp *esp;
201 struct resource *res;
202 int err = -ENOMEM;
267 203
268 /* Wait till the first bits settle. */ 204 host = scsi_host_alloc(tpnt, sizeof(struct esp));
269 while((*(volatile unsigned char *)vaddr == 0xff) && (--count > 0)) 205 if (!host)
270 udelay(1); 206 goto fail;
271 207
272 if(!count) { 208 host->max_id = 8;
273// printk("%s:%d timeout expire (data %02x)\n", __FILE__, __LINE__, 209 esp = shost_priv(host);
274// esp_read(esp->eregs->esp_fdata));
275 //mach_halt();
276 vaddr[0] = esp_read(esp->eregs->esp_fdata);
277 vaddr[1] = esp_read(esp->eregs->esp_fdata);
278 }
279 210
280} 211 esp->host = host;
212 esp->dev = dev;
213 esp->ops = &sun3x_esp_ops;
281 214
282static int dma_ports_p(struct NCR_ESP *esp) 215 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
283{ 216 if (!res && !res->start)
284 return (((struct sparc_dma_registers *) esp->dregs)->cond_reg 217 goto fail_unlink;
285 & DMA_INT_ENAB);
286}
287 218
288/* Resetting various pieces of the ESP scsi driver chipset/buses. */ 219 esp->regs = ioremap_nocache(res->start, 0x20);
289static void dma_reset(struct NCR_ESP *esp) 220 if (!esp->regs)
290{ 221 goto fail_unmap_regs;
291 struct sparc_dma_registers *dregs =
292 (struct sparc_dma_registers *)esp->dregs;
293 222
294 /* Punt the DVMA into a known state. */ 223 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
295 dregs->cond_reg |= DMA_RST_SCSI; 224 if (!res && !res->start)
296 dregs->cond_reg &= ~(DMA_RST_SCSI); 225 goto fail_unmap_regs;
297 DMA_INTSON(dregs);
298}
299 226
300static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 227 esp->dma_regs = ioremap_nocache(res->start, 0x10);
301{
302 struct sparc_dma_registers *dregs =
303 (struct sparc_dma_registers *) esp->dregs;
304 unsigned long nreg = dregs->cond_reg;
305 228
306// printk("dma_setup %c addr %08x cnt %08x\n", 229 esp->command_block = dma_alloc_coherent(esp->dev, 16,
307// write ? 'W' : 'R', addr, count); 230 &esp->command_block_dma,
231 GFP_KERNEL);
232 if (!esp->command_block)
233 goto fail_unmap_regs_dma;
308 234
309 dma_do_drain(esp); 235 host->irq = platform_get_irq(dev, 0);
236 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
237 "SUN3X ESP", esp);
238 if (err < 0)
239 goto fail_unmap_command_block;
310 240
311 if(write) 241 esp->scsi_id = 7;
312 nreg |= DMA_ST_WRITE; 242 esp->host->this_id = esp->scsi_id;
313 else { 243 esp->scsi_id_mask = (1 << esp->scsi_id);
314 nreg &= ~(DMA_ST_WRITE); 244 esp->cfreq = 20000000;
315 }
316
317 nreg |= DMA_ENABLE;
318 dregs->cond_reg = nreg;
319 dregs->st_addr = addr;
320}
321 245
322static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 246 dev_set_drvdata(&dev->dev, esp);
323{ 247
324 sp->SCp.have_data_in = dvma_map((unsigned long)sp->SCp.buffer, 248 err = scsi_esp_register(esp, &dev->dev);
325 sp->SCp.this_residual); 249 if (err)
326 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in); 250 goto fail_free_irq;
251
252 return 0;
253
254fail_free_irq:
255 free_irq(host->irq, esp);
256fail_unmap_command_block:
257 dma_free_coherent(esp->dev, 16,
258 esp->command_block,
259 esp->command_block_dma);
260fail_unmap_regs_dma:
261 iounmap(esp->dma_regs);
262fail_unmap_regs:
263 iounmap(esp->regs);
264fail_unlink:
265 scsi_host_put(host);
266fail:
267 return err;
327} 268}
328 269
329static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 270static int __devexit esp_sun3x_remove(struct platform_device *dev)
330{ 271{
331 int sz = sp->SCp.buffers_residual; 272 struct esp *esp = dev_get_drvdata(&dev->dev);
332 struct scatterlist *sg = sp->SCp.buffer; 273 unsigned int irq = esp->host->irq;
333 274 u32 val;
334 while (sz >= 0) {
335 sg[sz].dma_address = dvma_map((unsigned long)sg_virt(&sg[sz]),
336 sg[sz].length);
337 sz--;
338 }
339 sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address);
340}
341 275
342static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 276 scsi_esp_unregister(esp);
343{
344 dvma_unmap((char *)sp->SCp.have_data_in);
345}
346 277
347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 278 /* Disable interrupts. */
348{ 279 val = dma_read32(DMA_CSR);
349 int sz = sp->use_sg - 1; 280 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
350 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
351
352 while(sz >= 0) {
353 dvma_unmap((char *)sg[sz].dma_address);
354 sz--;
355 }
356}
357 281
358static void dma_advance_sg (Scsi_Cmnd *sp) 282 free_irq(irq, esp);
359{ 283 dma_free_coherent(esp->dev, 16,
360 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address); 284 esp->command_block,
361} 285 esp->command_block_dma);
362 286
363static int sun3x_esp_release(struct Scsi_Host *instance) 287 scsi_host_put(esp->host);
364{
365 /* this code does not support being compiled as a module */
366 return 1;
367 288
289 return 0;
368} 290}
369 291
370static struct scsi_host_template driver_template = { 292static struct platform_driver esp_sun3x_driver = {
371 .proc_name = "sun3x_esp", 293 .probe = esp_sun3x_probe,
372 .proc_info = &esp_proc_info, 294 .remove = __devexit_p(esp_sun3x_remove),
373 .name = "Sun ESP 100/100a/200", 295 .driver = {
374 .detect = sun3x_esp_detect, 296 .name = "sun3x_esp",
375 .release = sun3x_esp_release, 297 },
376 .slave_alloc = esp_slave_alloc,
377 .slave_destroy = esp_slave_destroy,
378 .info = esp_info,
379 .queuecommand = esp_queue,
380 .eh_abort_handler = esp_abort,
381 .eh_bus_reset_handler = esp_reset,
382 .can_queue = 7,
383 .this_id = 7,
384 .sg_tablesize = SG_ALL,
385 .cmd_per_lun = 1,
386 .use_clustering = DISABLE_CLUSTERING,
387}; 298};
388 299
300static int __init sun3x_esp_init(void)
301{
302 return platform_driver_register(&esp_sun3x_driver);
303}
389 304
390#include "scsi_module.c" 305static void __exit sun3x_esp_exit(void)
306{
307 platform_driver_unregister(&esp_sun3x_driver);
308}
391 309
310MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
311MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
392MODULE_LICENSE("GPL"); 312MODULE_LICENSE("GPL");
313MODULE_VERSION(DRV_VERSION);
314
315module_init(sun3x_esp_init);
316module_exit(sun3x_esp_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 254bdaeb35ff..35142b5341b5 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3842,7 +3842,7 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
3842 if (cp->startp == cp->phys.head.lastp || 3842 if (cp->startp == cp->phys.head.lastp ||
3843 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), 3843 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
3844 &dp_ofs) < 0) { 3844 &dp_ofs) < 0) {
3845 return cp->data_len; 3845 return cp->data_len - cp->odd_byte_adjustment;
3846 } 3846 }
3847 3847
3848 /* 3848 /*
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 662c00451be4..58d7eee4fe81 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1216,7 +1216,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
1216 cpp->xdir = DTD_IN; 1216 cpp->xdir = DTD_IN;
1217 return; 1217 return;
1218 } 1218 }
1219 else if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) { 1219 else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
1220 cpp->xdir = DTD_OUT; 1220 cpp->xdir = DTD_OUT;
1221 return; 1221 return;
1222 } 1222 }