aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/sym53c8xx_2
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/sym53c8xx_2
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/scsi/sym53c8xx_2')
-rw-r--r--drivers/scsi/sym53c8xx_2/Makefile4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym53c8xx.h217
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_defs.h792
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c568
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.h211
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw1.h1838
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw2.h1927
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2196
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h300
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c5865
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h1304
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_malloc.c382
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_misc.h192
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c771
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.h214
15 files changed, 16781 insertions, 0 deletions
diff --git a/drivers/scsi/sym53c8xx_2/Makefile b/drivers/scsi/sym53c8xx_2/Makefile
new file mode 100644
index 000000000000..873e8ced8252
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/Makefile
@@ -0,0 +1,4 @@
1# Makefile for the NCR/SYMBIOS/LSI 53C8XX PCI SCSI controllers driver.
2
3sym53c8xx-objs := sym_fw.o sym_glue.o sym_hipd.o sym_malloc.o sym_nvram.o
4obj-$(CONFIG_SCSI_SYM53C8XX_2) := sym53c8xx.o
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
new file mode 100644
index 000000000000..481103769729
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -0,0 +1,217 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifndef SYM53C8XX_H
41#define SYM53C8XX_H
42
43#include <linux/config.h>
44
45/*
46 * DMA addressing mode.
47 *
48 * 0 : 32 bit addressing for all chips.
49 * 1 : 40 bit addressing when supported by chip.
50 * 2 : 64 bit addressing when supported by chip,
51 * limited to 16 segments of 4 GB -> 64 GB max.
52 */
53#define SYM_CONF_DMA_ADDRESSING_MODE CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
54
55/*
56 * NVRAM support.
57 */
58#if 1
59#define SYM_CONF_NVRAM_SUPPORT (1)
60#endif
61
62/*
63 * These options are not tunable from 'make config'
64 */
65#if 1
66#define SYM_LINUX_PROC_INFO_SUPPORT
67#define SYM_LINUX_USER_COMMAND_SUPPORT
68#define SYM_LINUX_USER_INFO_SUPPORT
69#define SYM_LINUX_DEBUG_CONTROL_SUPPORT
70#endif
71
72/*
73 * Also handle old NCR chips if not (0).
74 */
75#define SYM_CONF_GENERIC_SUPPORT (1)
76
77/*
78 * Allow tags from 2 to 256, default 8
79 */
80#ifndef CONFIG_SCSI_SYM53C8XX_MAX_TAGS
81#define CONFIG_SCSI_SYM53C8XX_MAX_TAGS (8)
82#endif
83
84#if CONFIG_SCSI_SYM53C8XX_MAX_TAGS < 2
85#define SYM_CONF_MAX_TAG (2)
86#elif CONFIG_SCSI_SYM53C8XX_MAX_TAGS > 256
87#define SYM_CONF_MAX_TAG (256)
88#else
89#define SYM_CONF_MAX_TAG CONFIG_SCSI_SYM53C8XX_MAX_TAGS
90#endif
91
92#ifndef CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS
93#define CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS SYM_CONF_MAX_TAG
94#endif
95
96/*
97 * Anyway, we configure the driver for at least 64 tags per LUN. :)
98 */
99#if SYM_CONF_MAX_TAG <= 64
100#define SYM_CONF_MAX_TAG_ORDER (6)
101#elif SYM_CONF_MAX_TAG <= 128
102#define SYM_CONF_MAX_TAG_ORDER (7)
103#else
104#define SYM_CONF_MAX_TAG_ORDER (8)
105#endif
106
107/*
108 * Max number of SG entries.
109 */
110#define SYM_CONF_MAX_SG (96)
111
112/*
113 * Driver setup structure.
114 *
115 * This structure is initialized from linux config options.
116 * It can be overridden at boot-up by the boot command line.
117 */
118struct sym_driver_setup {
119 u_short max_tag;
120 u_char burst_order;
121 u_char scsi_led;
122 u_char scsi_diff;
123 u_char irq_mode;
124 u_char scsi_bus_check;
125 u_char host_id;
126
127 u_char verbose;
128 u_char settle_delay;
129 u_char use_nvram;
130 u_long excludes[8];
131 char tag_ctrl[100];
132};
133
134#define SYM_SETUP_MAX_TAG sym_driver_setup.max_tag
135#define SYM_SETUP_BURST_ORDER sym_driver_setup.burst_order
136#define SYM_SETUP_SCSI_LED sym_driver_setup.scsi_led
137#define SYM_SETUP_SCSI_DIFF sym_driver_setup.scsi_diff
138#define SYM_SETUP_IRQ_MODE sym_driver_setup.irq_mode
139#define SYM_SETUP_SCSI_BUS_CHECK sym_driver_setup.scsi_bus_check
140#define SYM_SETUP_HOST_ID sym_driver_setup.host_id
141#define boot_verbose sym_driver_setup.verbose
142
143/*
144 * Initial setup.
145 *
146 * Can be overriden at startup by a command line.
147 */
148#define SYM_LINUX_DRIVER_SETUP { \
149 .max_tag = CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS, \
150 .burst_order = 7, \
151 .scsi_led = 1, \
152 .scsi_diff = 1, \
153 .irq_mode = 0, \
154 .scsi_bus_check = 1, \
155 .host_id = 7, \
156 .verbose = 0, \
157 .settle_delay = 3, \
158 .use_nvram = 1, \
159}
160
161extern struct sym_driver_setup sym_driver_setup;
162extern unsigned int sym_debug_flags;
163#define DEBUG_FLAGS sym_debug_flags
164
165/*
166 * Max number of targets.
167 * Maximum is 16 and you are advised not to change this value.
168 */
169#ifndef SYM_CONF_MAX_TARGET
170#define SYM_CONF_MAX_TARGET (16)
171#endif
172
173/*
174 * Max number of logical units.
175 * SPI-2 allows up to 64 logical units, but in real life, target
176 * that implements more that 7 logical units are pretty rare.
177 * Anyway, the cost of accepting up to 64 logical unit is low in
178 * this driver, thus going with the maximum is acceptable.
179 */
180#ifndef SYM_CONF_MAX_LUN
181#define SYM_CONF_MAX_LUN (64)
182#endif
183
184/*
185 * Max number of IO control blocks queued to the controller.
186 * Each entry needs 8 bytes and the queues are allocated contiguously.
187 * Since we donnot want to allocate more than a page, the theorical
188 * maximum is PAGE_SIZE/8. For safety, we announce a bit less to the
189 * access method. :)
190 * When not supplied, as it is suggested, the driver compute some
191 * good value for this parameter.
192 */
193/* #define SYM_CONF_MAX_START (PAGE_SIZE/8 - 16) */
194
195/*
196 * Support for Immediate Arbitration.
197 * Not advised.
198 */
199/* #define SYM_CONF_IARB_SUPPORT */
200
201/*
202 * Only relevant if IARB support configured.
203 * - Max number of successive settings of IARB hints.
204 * - Set IARB on arbitration lost.
205 */
206#define SYM_CONF_IARB_MAX 3
207#define SYM_CONF_SET_IARB_ON_ARB_LOST 1
208
209/*
210 * Returning wrong residuals may make problems.
211 * When zero, this define tells the driver to
212 * always return 0 as transfer residual.
213 * Btw, all my testings of residuals have succeeded.
214 */
215#define SYM_SETUP_RESIDUAL_SUPPORT 1
216
217#endif /* SYM53C8XX_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_defs.h b/drivers/scsi/sym53c8xx_2/sym_defs.h
new file mode 100644
index 000000000000..15bb89195c09
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_defs.h
@@ -0,0 +1,792 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifndef SYM_DEFS_H
41#define SYM_DEFS_H
42
43#define SYM_VERSION "2.2.0"
44#define SYM_DRIVER_NAME "sym-" SYM_VERSION
45
46/*
47 * SYM53C8XX device features descriptor.
48 */
49struct sym_chip {
50 u_short device_id;
51 u_short revision_id;
52 char *name;
53 u_char burst_max; /* log-base-2 of max burst */
54 u_char offset_max;
55 u_char nr_divisor;
56 u_char lp_probe_bit;
57 u_int features;
58#define FE_LED0 (1<<0)
59#define FE_WIDE (1<<1) /* Wide data transfers */
60#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */
61#define FE_ULTRA2 (1<<3) /* Ultra 2 - 40 Mtrans/sec */
62#define FE_DBLR (1<<4) /* Clock doubler present */
63#define FE_QUAD (1<<5) /* Clock quadrupler present */
64#define FE_ERL (1<<6) /* Enable read line */
65#define FE_CLSE (1<<7) /* Cache line size enable */
66#define FE_WRIE (1<<8) /* Write & Invalidate enable */
67#define FE_ERMP (1<<9) /* Enable read multiple */
68#define FE_BOF (1<<10) /* Burst opcode fetch */
69#define FE_DFS (1<<11) /* DMA fifo size */
70#define FE_PFEN (1<<12) /* Prefetch enable */
71#define FE_LDSTR (1<<13) /* Load/Store supported */
72#define FE_RAM (1<<14) /* On chip RAM present */
73#define FE_VARCLK (1<<15) /* Clock frequency may vary */
74#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */
75#define FE_64BIT (1<<17) /* 64-bit PCI BUS interface */
76#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */
77#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */
78#define FE_LEDC (1<<20) /* Hardware control of LED */
79#define FE_ULTRA3 (1<<21) /* Ultra 3 - 80 Mtrans/sec DT */
80#define FE_66MHZ (1<<22) /* 66MHz PCI support */
81#define FE_CRC (1<<23) /* CRC support */
82#define FE_DIFF (1<<24) /* SCSI HVD support */
83#define FE_DFBC (1<<25) /* Have DFBC register */
84#define FE_LCKFRQ (1<<26) /* Have LCKFRQ */
85#define FE_C10 (1<<27) /* Various C10 core (mis)features */
86#define FE_U3EN (1<<28) /* U3EN bit usable */
87#define FE_DAC (1<<29) /* Support PCI DAC (64 bit addressing) */
88#define FE_ISTAT1 (1<<30) /* Have ISTAT1, MBOX0, MBOX1 registers */
89
90#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
91#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL)
92};
93
94/*
95 * SYM53C8XX IO register data structure.
96 */
97struct sym_reg {
98/*00*/ u8 nc_scntl0; /* full arb., ena parity, par->ATN */
99
100/*01*/ u8 nc_scntl1; /* no reset */
101 #define ISCON 0x10 /* connected to scsi */
102 #define CRST 0x08 /* force reset */
103 #define IARB 0x02 /* immediate arbitration */
104
105/*02*/ u8 nc_scntl2; /* no disconnect expected */
106 #define SDU 0x80 /* cmd: disconnect will raise error */
107 #define CHM 0x40 /* sta: chained mode */
108 #define WSS 0x08 /* sta: wide scsi send [W]*/
109 #define WSR 0x01 /* sta: wide scsi received [W]*/
110
111/*03*/ u8 nc_scntl3; /* cnf system clock dependent */
112 #define EWS 0x08 /* cmd: enable wide scsi [W]*/
113 #define ULTRA 0x80 /* cmd: ULTRA enable */
114 /* bits 0-2, 7 rsvd for C1010 */
115
116/*04*/ u8 nc_scid; /* cnf host adapter scsi address */
117 #define RRE 0x40 /* r/w:e enable response to resel. */
118 #define SRE 0x20 /* r/w:e enable response to select */
119
120/*05*/ u8 nc_sxfer; /* ### Sync speed and count */
121 /* bits 6-7 rsvd for C1010 */
122
123/*06*/ u8 nc_sdid; /* ### Destination-ID */
124
125/*07*/ u8 nc_gpreg; /* ??? IO-Pins */
126
127/*08*/ u8 nc_sfbr; /* ### First byte received */
128
129/*09*/ u8 nc_socl;
130 #define CREQ 0x80 /* r/w: SCSI-REQ */
131 #define CACK 0x40 /* r/w: SCSI-ACK */
132 #define CBSY 0x20 /* r/w: SCSI-BSY */
133 #define CSEL 0x10 /* r/w: SCSI-SEL */
134 #define CATN 0x08 /* r/w: SCSI-ATN */
135 #define CMSG 0x04 /* r/w: SCSI-MSG */
136 #define CC_D 0x02 /* r/w: SCSI-C_D */
137 #define CI_O 0x01 /* r/w: SCSI-I_O */
138
139/*0a*/ u8 nc_ssid;
140
141/*0b*/ u8 nc_sbcl;
142
143/*0c*/ u8 nc_dstat;
144 #define DFE 0x80 /* sta: dma fifo empty */
145 #define MDPE 0x40 /* int: master data parity error */
146 #define BF 0x20 /* int: script: bus fault */
147 #define ABRT 0x10 /* int: script: command aborted */
148 #define SSI 0x08 /* int: script: single step */
149 #define SIR 0x04 /* int: script: interrupt instruct. */
150 #define IID 0x01 /* int: script: illegal instruct. */
151
152/*0d*/ u8 nc_sstat0;
153 #define ILF 0x80 /* sta: data in SIDL register lsb */
154 #define ORF 0x40 /* sta: data in SODR register lsb */
155 #define OLF 0x20 /* sta: data in SODL register lsb */
156 #define AIP 0x10 /* sta: arbitration in progress */
157 #define LOA 0x08 /* sta: arbitration lost */
158 #define WOA 0x04 /* sta: arbitration won */
159 #define IRST 0x02 /* sta: scsi reset signal */
160 #define SDP 0x01 /* sta: scsi parity signal */
161
162/*0e*/ u8 nc_sstat1;
163 #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
164
165/*0f*/ u8 nc_sstat2;
166 #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
167 #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
168 #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
169 #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
170 #define LDSC 0x02 /* sta: disconnect & reconnect */
171
172/*10*/ u8 nc_dsa; /* --> Base page */
173/*11*/ u8 nc_dsa1;
174/*12*/ u8 nc_dsa2;
175/*13*/ u8 nc_dsa3;
176
177/*14*/ u8 nc_istat; /* --> Main Command and status */
178 #define CABRT 0x80 /* cmd: abort current operation */
179 #define SRST 0x40 /* mod: reset chip */
180 #define SIGP 0x20 /* r/w: message from host to script */
181 #define SEM 0x10 /* r/w: message between host + script */
182 #define CON 0x08 /* sta: connected to scsi */
183 #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
184 #define SIP 0x02 /* sta: scsi-interrupt */
185 #define DIP 0x01 /* sta: host/script interrupt */
186
187/*15*/ u8 nc_istat1; /* 896 only */
188 #define FLSH 0x04 /* sta: chip is flushing */
189 #define SCRUN 0x02 /* sta: scripts are running */
190 #define SIRQD 0x01 /* r/w: disable INT pin */
191
192/*16*/ u8 nc_mbox0; /* 896 only */
193/*17*/ u8 nc_mbox1; /* 896 only */
194
195/*18*/ u8 nc_ctest0;
196/*19*/ u8 nc_ctest1;
197
198/*1a*/ u8 nc_ctest2;
199 #define CSIGP 0x40
200 /* bits 0-2,7 rsvd for C1010 */
201
202/*1b*/ u8 nc_ctest3;
203 #define FLF 0x08 /* cmd: flush dma fifo */
204 #define CLF 0x04 /* cmd: clear dma fifo */
205 #define FM 0x02 /* mod: fetch pin mode */
206 #define WRIE 0x01 /* mod: write and invalidate enable */
207 /* bits 4-7 rsvd for C1010 */
208
209/*1c*/ u32 nc_temp; /* ### Temporary stack */
210
211/*20*/ u8 nc_dfifo;
212/*21*/ u8 nc_ctest4;
213 #define BDIS 0x80 /* mod: burst disable */
214 #define MPEE 0x08 /* mod: master parity error enable */
215
216/*22*/ u8 nc_ctest5;
217 #define DFS 0x20 /* mod: dma fifo size */
218 /* bits 0-1, 3-7 rsvd for C1010 */
219
220/*23*/ u8 nc_ctest6;
221
222/*24*/ u32 nc_dbc; /* ### Byte count and command */
223/*28*/ u32 nc_dnad; /* ### Next command register */
224/*2c*/ u32 nc_dsp; /* --> Script Pointer */
225/*30*/ u32 nc_dsps; /* --> Script pointer save/opcode#2 */
226
227/*34*/ u8 nc_scratcha; /* Temporary register a */
228/*35*/ u8 nc_scratcha1;
229/*36*/ u8 nc_scratcha2;
230/*37*/ u8 nc_scratcha3;
231
232/*38*/ u8 nc_dmode;
233 #define BL_2 0x80 /* mod: burst length shift value +2 */
234 #define BL_1 0x40 /* mod: burst length shift value +1 */
235 #define ERL 0x08 /* mod: enable read line */
236 #define ERMP 0x04 /* mod: enable read multiple */
237 #define BOF 0x02 /* mod: burst op code fetch */
238
239/*39*/ u8 nc_dien;
240/*3a*/ u8 nc_sbr;
241
242/*3b*/ u8 nc_dcntl; /* --> Script execution control */
243 #define CLSE 0x80 /* mod: cache line size enable */
244 #define PFF 0x40 /* cmd: pre-fetch flush */
245 #define PFEN 0x20 /* mod: pre-fetch enable */
246 #define SSM 0x10 /* mod: single step mode */
247 #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
248 #define STD 0x04 /* cmd: start dma mode */
249 #define IRQD 0x02 /* mod: irq disable */
250 #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
251 /* bits 0-1 rsvd for C1010 */
252
253/*3c*/ u32 nc_adder;
254
255/*40*/ u16 nc_sien; /* -->: interrupt enable */
256/*42*/ u16 nc_sist; /* <--: interrupt status */
257 #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
258 #define STO 0x0400/* sta: timeout (select) */
259 #define GEN 0x0200/* sta: timeout (general) */
260 #define HTH 0x0100/* sta: timeout (handshake) */
261 #define MA 0x80 /* sta: phase mismatch */
262 #define CMP 0x40 /* sta: arbitration complete */
263 #define SEL 0x20 /* sta: selected by another device */
264 #define RSL 0x10 /* sta: reselected by another device*/
265 #define SGE 0x08 /* sta: gross error (over/underflow)*/
266 #define UDC 0x04 /* sta: unexpected disconnect */
267 #define RST 0x02 /* sta: scsi bus reset detected */
268 #define PAR 0x01 /* sta: scsi parity error */
269
270/*44*/ u8 nc_slpar;
271/*45*/ u8 nc_swide;
272/*46*/ u8 nc_macntl;
273/*47*/ u8 nc_gpcntl;
274/*48*/ u8 nc_stime0; /* cmd: timeout for select&handshake*/
275/*49*/ u8 nc_stime1; /* cmd: timeout user defined */
276/*4a*/ u16 nc_respid; /* sta: Reselect-IDs */
277
278/*4c*/ u8 nc_stest0;
279
280/*4d*/ u8 nc_stest1;
281 #define SCLK 0x80 /* Use the PCI clock as SCSI clock */
282 #define DBLEN 0x08 /* clock doubler running */
283 #define DBLSEL 0x04 /* clock doubler selected */
284
285
286/*4e*/ u8 nc_stest2;
287 #define ROF 0x40 /* reset scsi offset (after gross error!) */
288 #define EXT 0x02 /* extended filtering */
289
290/*4f*/ u8 nc_stest3;
291 #define TE 0x80 /* c: tolerAnt enable */
292 #define HSC 0x20 /* c: Halt SCSI Clock */
293 #define CSF 0x02 /* c: clear scsi fifo */
294
295/*50*/ u16 nc_sidl; /* Lowlevel: latched from scsi data */
296/*52*/ u8 nc_stest4;
297 #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
298 #define SMODE_HVD 0x40 /* High Voltage Differential */
299 #define SMODE_SE 0x80 /* Single Ended */
300 #define SMODE_LVD 0xc0 /* Low Voltage Differential */
301 #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
302 /* bits 0-5 rsvd for C1010 */
303
304/*53*/ u8 nc_53_;
305/*54*/ u16 nc_sodl; /* Lowlevel: data out to scsi data */
306/*56*/ u8 nc_ccntl0; /* Chip Control 0 (896) */
307 #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */
308 #define PMJCTL 0x40 /* Phase Mismatch Jump Control */
309 #define ENNDJ 0x20 /* Enable Non Data PM Jump */
310 #define DISFC 0x10 /* Disable Auto FIFO Clear */
311 #define DILS 0x02 /* Disable Internal Load/Store */
312 #define DPR 0x01 /* Disable Pipe Req */
313
314/*57*/ u8 nc_ccntl1; /* Chip Control 1 (896) */
315 #define ZMOD 0x80 /* High Impedance Mode */
316 #define DDAC 0x08 /* Disable Dual Address Cycle */
317 #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */
318 #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */
319 #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */
320
321/*58*/ u16 nc_sbdl; /* Lowlevel: data from scsi data */
322/*5a*/ u16 nc_5a_;
323
324/*5c*/ u8 nc_scr0; /* Working register B */
325/*5d*/ u8 nc_scr1;
326/*5e*/ u8 nc_scr2;
327/*5f*/ u8 nc_scr3;
328
329/*60*/ u8 nc_scrx[64]; /* Working register C-R */
330/*a0*/ u32 nc_mmrs; /* Memory Move Read Selector */
331/*a4*/ u32 nc_mmws; /* Memory Move Write Selector */
332/*a8*/ u32 nc_sfs; /* Script Fetch Selector */
333/*ac*/ u32 nc_drs; /* DSA Relative Selector */
334/*b0*/ u32 nc_sbms; /* Static Block Move Selector */
335/*b4*/ u32 nc_dbms; /* Dynamic Block Move Selector */
336/*b8*/ u32 nc_dnad64; /* DMA Next Address 64 */
337/*bc*/ u16 nc_scntl4; /* C1010 only */
338 #define U3EN 0x80 /* Enable Ultra 3 */
339 #define AIPCKEN 0x40 /* AIP checking enable */
340 /* Also enable AIP generation on C10-33*/
341 #define XCLKH_DT 0x08 /* Extra clock of data hold on DT edge */
342 #define XCLKH_ST 0x04 /* Extra clock of data hold on ST edge */
343 #define XCLKS_DT 0x02 /* Extra clock of data set on DT edge */
344 #define XCLKS_ST 0x01 /* Extra clock of data set on ST edge */
345/*be*/ u8 nc_aipcntl0; /* AIP Control 0 C1010 only */
346/*bf*/ u8 nc_aipcntl1; /* AIP Control 1 C1010 only */
347 #define DISAIP 0x08 /* Disable AIP generation C10-66 only */
348/*c0*/ u32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */
349/*c4*/ u32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */
350/*c8*/ u8 nc_rbc; /* Remaining Byte Count */
351/*c9*/ u8 nc_rbc1;
352/*ca*/ u8 nc_rbc2;
353/*cb*/ u8 nc_rbc3;
354
355/*cc*/ u8 nc_ua; /* Updated Address */
356/*cd*/ u8 nc_ua1;
357/*ce*/ u8 nc_ua2;
358/*cf*/ u8 nc_ua3;
359/*d0*/ u32 nc_esa; /* Entry Storage Address */
360/*d4*/ u8 nc_ia; /* Instruction Address */
361/*d5*/ u8 nc_ia1;
362/*d6*/ u8 nc_ia2;
363/*d7*/ u8 nc_ia3;
364/*d8*/ u32 nc_sbc; /* SCSI Byte Count (3 bytes only) */
365/*dc*/ u32 nc_csbc; /* Cumulative SCSI Byte Count */
366 /* Following for C1010 only */
367/*e0*/ u16 nc_crcpad; /* CRC Value */
368/*e2*/ u8 nc_crccntl0; /* CRC control register */
369 #define SNDCRC 0x10 /* Send CRC Request */
370/*e3*/ u8 nc_crccntl1; /* CRC control register */
371/*e4*/ u32 nc_crcdata; /* CRC data register */
372/*e8*/ u32 nc_e8_;
373/*ec*/ u32 nc_ec_;
374/*f0*/ u16 nc_dfbc; /* DMA FIFO byte count */
375};
376
377/*-----------------------------------------------------------
378 *
379 * Utility macros for the script.
380 *
381 *-----------------------------------------------------------
382 */
383
384#define REGJ(p,r) (offsetof(struct sym_reg, p ## r))
385#define REG(r) REGJ (nc_, r)
386
387/*-----------------------------------------------------------
388 *
389 * SCSI phases
390 *
391 *-----------------------------------------------------------
392 */
393
394#define SCR_DATA_OUT 0x00000000
395#define SCR_DATA_IN 0x01000000
396#define SCR_COMMAND 0x02000000
397#define SCR_STATUS 0x03000000
398#define SCR_DT_DATA_OUT 0x04000000
399#define SCR_DT_DATA_IN 0x05000000
400#define SCR_MSG_OUT 0x06000000
401#define SCR_MSG_IN 0x07000000
402/* DT phases are illegal for non Ultra3 mode */
403#define SCR_ILG_OUT 0x04000000
404#define SCR_ILG_IN 0x05000000
405
406/*-----------------------------------------------------------
407 *
408 * Data transfer via SCSI.
409 *
410 *-----------------------------------------------------------
411 *
412 * MOVE_ABS (LEN)
413 * <<start address>>
414 *
415 * MOVE_IND (LEN)
416 * <<dnad_offset>>
417 *
418 * MOVE_TBL
419 * <<dnad_offset>>
420 *
421 *-----------------------------------------------------------
422 */
423
424#define OPC_MOVE 0x08000000
425
426#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l))
427/* #define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l)) */
428#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE)
429
430#define SCR_CHMOV_ABS(l) ((0x00000000) | (l))
431/* #define SCR_CHMOV_IND(l) ((0x20000000) | (l)) */
432#define SCR_CHMOV_TBL (0x10000000)
433
434#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
435/* We steal the `indirect addressing' flag for target mode MOVE in scripts */
436
437#define OPC_TCHMOVE 0x08000000
438
439#define SCR_TCHMOVE_ABS(l) ((0x20000000 | OPC_TCHMOVE) | (l))
440#define SCR_TCHMOVE_TBL (0x30000000 | OPC_TCHMOVE)
441
442#define SCR_TMOV_ABS(l) ((0x20000000) | (l))
443#define SCR_TMOV_TBL (0x30000000)
444#endif
445
446struct sym_tblmove {
447 u32 size;
448 u32 addr;
449};
450
451/*-----------------------------------------------------------
452 *
453 * Selection
454 *
455 *-----------------------------------------------------------
456 *
457 * SEL_ABS | SCR_ID (0..15) [ | REL_JMP]
458 * <<alternate_address>>
459 *
460 * SEL_TBL | << dnad_offset>> [ | REL_JMP]
461 * <<alternate_address>>
462 *
463 *-----------------------------------------------------------
464 */
465
466#define SCR_SEL_ABS 0x40000000
467#define SCR_SEL_ABS_ATN 0x41000000
468#define SCR_SEL_TBL 0x42000000
469#define SCR_SEL_TBL_ATN 0x43000000
470
471#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
472#define SCR_RESEL_ABS 0x40000000
473#define SCR_RESEL_ABS_ATN 0x41000000
474#define SCR_RESEL_TBL 0x42000000
475#define SCR_RESEL_TBL_ATN 0x43000000
476#endif
477
478struct sym_tblsel {
479 u_char sel_scntl4; /* C1010 only */
480 u_char sel_sxfer;
481 u_char sel_id;
482 u_char sel_scntl3;
483};
484
485#define SCR_JMP_REL 0x04000000
486#define SCR_ID(id) (((u32)(id)) << 16)
487
488/*-----------------------------------------------------------
489 *
490 * Waiting for Disconnect or Reselect
491 *
492 *-----------------------------------------------------------
493 *
494 * WAIT_DISC
495 * dummy: <<alternate_address>>
496 *
497 * WAIT_RESEL
498 * <<alternate_address>>
499 *
500 *-----------------------------------------------------------
501 */
502
503#define SCR_WAIT_DISC 0x48000000
504#define SCR_WAIT_RESEL 0x50000000
505
506#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
507#define SCR_DISCONNECT 0x48000000
508#endif
509
510/*-----------------------------------------------------------
511 *
512 * Bit Set / Reset
513 *
514 *-----------------------------------------------------------
515 *
516 * SET (flags {|.. })
517 *
518 * CLR (flags {|.. })
519 *
520 *-----------------------------------------------------------
521 */
522
523#define SCR_SET(f) (0x58000000 | (f))
524#define SCR_CLR(f) (0x60000000 | (f))
525
526#define SCR_CARRY 0x00000400
527#define SCR_TRG 0x00000200
528#define SCR_ACK 0x00000040
529#define SCR_ATN 0x00000008
530
531
532/*-----------------------------------------------------------
533 *
534 * Memory to memory move
535 *
536 *-----------------------------------------------------------
537 *
538 * COPY (bytecount)
539 * << source_address >>
540 * << destination_address >>
541 *
542 * SCR_COPY sets the NO FLUSH option by default.
543 * SCR_COPY_F does not set this option.
544 *
545 * For chips which do not support this option,
546 * sym_fw_bind_script() will remove this bit.
547 *
548 *-----------------------------------------------------------
549 */
550
551#define SCR_NO_FLUSH 0x01000000
552
553#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
554#define SCR_COPY_F(n) (0xc0000000 | (n))
555
556/*-----------------------------------------------------------
557 *
558 * Register move and binary operations
559 *
560 *-----------------------------------------------------------
561 *
562 * SFBR_REG (reg, op, data) reg = SFBR op data
563 * << 0 >>
564 *
565 * REG_SFBR (reg, op, data) SFBR = reg op data
566 * << 0 >>
567 *
568 * REG_REG (reg, op, data) reg = reg op data
569 * << 0 >>
570 *
571 *-----------------------------------------------------------
572 *
573 * On 825A, 875, 895 and 896 chips the content
574 * of SFBR register can be used as data (SCR_SFBR_DATA).
575 * The 896 has additionnal IO registers starting at
576 * offset 0x80. Bit 7 of register offset is stored in
577 * bit 7 of the SCRIPTS instruction first DWORD.
578 *
579 *-----------------------------------------------------------
580 */
581
582#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80))
583
584#define SCR_SFBR_REG(reg,op,data) \
585 (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
586
587#define SCR_REG_SFBR(reg,op,data) \
588 (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
589
590#define SCR_REG_REG(reg,op,data) \
591 (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
592
593
594#define SCR_LOAD 0x00000000
595#define SCR_SHL 0x01000000
596#define SCR_OR 0x02000000
597#define SCR_XOR 0x03000000
598#define SCR_AND 0x04000000
599#define SCR_SHR 0x05000000
600#define SCR_ADD 0x06000000
601#define SCR_ADDC 0x07000000
602
603#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */
604
605/*-----------------------------------------------------------
606 *
607 * FROM_REG (reg) SFBR = reg
608 * << 0 >>
609 *
610 * TO_REG (reg) reg = SFBR
611 * << 0 >>
612 *
613 * LOAD_REG (reg, data) reg = <data>
614 * << 0 >>
615 *
616 * LOAD_SFBR(data) SFBR = <data>
617 * << 0 >>
618 *
619 *-----------------------------------------------------------
620 */
621
622#define SCR_FROM_REG(reg) \
623 SCR_REG_SFBR(reg,SCR_OR,0)
624
625#define SCR_TO_REG(reg) \
626 SCR_SFBR_REG(reg,SCR_OR,0)
627
628#define SCR_LOAD_REG(reg,data) \
629 SCR_REG_REG(reg,SCR_LOAD,data)
630
631#define SCR_LOAD_SFBR(data) \
632 (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
633
634/*-----------------------------------------------------------
635 *
636 * LOAD from memory to register.
637 * STORE from register to memory.
638 *
639 * Only supported by 810A, 860, 825A, 875, 895 and 896.
640 *
641 *-----------------------------------------------------------
642 *
643 * LOAD_ABS (LEN)
644 * <<start address>>
645 *
646 * LOAD_REL (LEN) (DSA relative)
647 * <<dsa_offset>>
648 *
649 *-----------------------------------------------------------
650 */
651
652#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul)
653#define SCR_NO_FLUSH2 0x02000000
654#define SCR_DSA_REL2 0x10000000
655
656#define SCR_LOAD_R(reg, how, n) \
657 (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
658
659#define SCR_STORE_R(reg, how, n) \
660 (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
661
662#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n)
663#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n)
664#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n)
665#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n)
666
667#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n)
668#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n)
669#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n)
670#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n)
671
672
673/*-----------------------------------------------------------
674 *
675 * Waiting for Disconnect or Reselect
676 *
677 *-----------------------------------------------------------
678 *
679 * JUMP [ | IFTRUE/IFFALSE ( ... ) ]
680 * <<address>>
681 *
682 * JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
683 * <<distance>>
684 *
685 * CALL [ | IFTRUE/IFFALSE ( ... ) ]
686 * <<address>>
687 *
688 * CALLR [ | IFTRUE/IFFALSE ( ... ) ]
689 * <<distance>>
690 *
691 * RETURN [ | IFTRUE/IFFALSE ( ... ) ]
692 * <<dummy>>
693 *
694 * INT [ | IFTRUE/IFFALSE ( ... ) ]
695 * <<ident>>
696 *
697 * INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
698 * <<ident>>
699 *
700 * Conditions:
701 * WHEN (phase)
702 * IF (phase)
703 * CARRYSET
704 * DATA (data, mask)
705 *
706 *-----------------------------------------------------------
707 */
708
709#define SCR_NO_OP 0x80000000
710#define SCR_JUMP 0x80080000
711#define SCR_JUMP64 0x80480000
712#define SCR_JUMPR 0x80880000
713#define SCR_CALL 0x88080000
714#define SCR_CALLR 0x88880000
715#define SCR_RETURN 0x90080000
716#define SCR_INT 0x98080000
717#define SCR_INT_FLY 0x98180000
718
719#define IFFALSE(arg) (0x00080000 | (arg))
720#define IFTRUE(arg) (0x00000000 | (arg))
721
722#define WHEN(phase) (0x00030000 | (phase))
723#define IF(phase) (0x00020000 | (phase))
724
725#define DATA(D) (0x00040000 | ((D) & 0xff))
726#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
727
728#define CARRYSET (0x00200000)
729
730/*-----------------------------------------------------------
731 *
732 * SCSI constants.
733 *
734 *-----------------------------------------------------------
735 */
736
737/*
738 * Messages
739 */
740
741#define M_COMPLETE COMMAND_COMPLETE
742#define M_EXTENDED EXTENDED_MESSAGE
743#define M_SAVE_DP SAVE_POINTERS
744#define M_RESTORE_DP RESTORE_POINTERS
745#define M_DISCONNECT DISCONNECT
746#define M_ID_ERROR INITIATOR_ERROR
747#define M_ABORT ABORT_TASK_SET
748#define M_REJECT MESSAGE_REJECT
749#define M_NOOP NOP
750#define M_PARITY MSG_PARITY_ERROR
751#define M_LCOMPLETE LINKED_CMD_COMPLETE
752#define M_FCOMPLETE LINKED_FLG_CMD_COMPLETE
753#define M_RESET TARGET_RESET
754#define M_ABORT_TAG ABORT_TASK
755#define M_CLEAR_QUEUE CLEAR_TASK_SET
756#define M_INIT_REC INITIATE_RECOVERY
757#define M_REL_REC RELEASE_RECOVERY
758#define M_TERMINATE (0x11)
759#define M_SIMPLE_TAG SIMPLE_QUEUE_TAG
760#define M_HEAD_TAG HEAD_OF_QUEUE_TAG
761#define M_ORDERED_TAG ORDERED_QUEUE_TAG
762#define M_IGN_RESIDUE IGNORE_WIDE_RESIDUE
763
764#define M_X_MODIFY_DP EXTENDED_MODIFY_DATA_POINTER
765#define M_X_SYNC_REQ EXTENDED_SDTR
766#define M_X_WIDE_REQ EXTENDED_WDTR
767#define M_X_PPR_REQ EXTENDED_PPR
768
769/*
770 * PPR protocol options
771 */
772#define PPR_OPT_IU (0x01)
773#define PPR_OPT_DT (0x02)
774#define PPR_OPT_QAS (0x04)
775#define PPR_OPT_MASK (0x07)
776
777/*
778 * Status
779 */
780
781#define S_GOOD SAM_STAT_GOOD
782#define S_CHECK_COND SAM_STAT_CHECK_CONDITION
783#define S_COND_MET SAM_STAT_CONDITION_MET
784#define S_BUSY SAM_STAT_BUSY
785#define S_INT SAM_STAT_INTERMEDIATE
786#define S_INT_COND_MET SAM_STAT_INTERMEDIATE_CONDITION_MET
787#define S_CONFLICT SAM_STAT_RESERVATION_CONFLICT
788#define S_TERMINATED SAM_STAT_COMMAND_TERMINATED
789#define S_QUEUE_FULL SAM_STAT_TASK_SET_FULL
790#define S_ILLEGAL (0xff)
791
792#endif /* defined SYM_DEFS_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
new file mode 100644
index 000000000000..fd36cf9858cb
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -0,0 +1,568 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifdef __FreeBSD__
41#include <dev/sym/sym_glue.h>
42#else
43#include "sym_glue.h"
44#endif
45
46/*
47 * Macros used for all firmwares.
48 */
49#define SYM_GEN_A(s, label) ((short) offsetof(s, label)),
50#define SYM_GEN_B(s, label) ((short) offsetof(s, label)),
51#define SYM_GEN_Z(s, label) ((short) offsetof(s, label)),
52#define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label)
53#define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label)
54
55
56#if SYM_CONF_GENERIC_SUPPORT
57/*
58 * Allocate firmware #1 script area.
59 */
60#define SYM_FWA_SCR sym_fw1a_scr
61#define SYM_FWB_SCR sym_fw1b_scr
62#define SYM_FWZ_SCR sym_fw1z_scr
63#ifdef __FreeBSD__
64#include <dev/sym/sym_fw1.h>
65#else
66#include "sym_fw1.h"
67#endif
68static struct sym_fwa_ofs sym_fw1a_ofs = {
69 SYM_GEN_FW_A(struct SYM_FWA_SCR)
70};
71static struct sym_fwb_ofs sym_fw1b_ofs = {
72 SYM_GEN_FW_B(struct SYM_FWB_SCR)
73#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
74 SYM_GEN_B(struct SYM_FWB_SCR, data_io)
75#endif
76};
77static struct sym_fwz_ofs sym_fw1z_ofs = {
78 SYM_GEN_FW_Z(struct SYM_FWZ_SCR)
79};
80#undef SYM_FWA_SCR
81#undef SYM_FWB_SCR
82#undef SYM_FWZ_SCR
83#endif /* SYM_CONF_GENERIC_SUPPORT */
84
85/*
86 * Allocate firmware #2 script area.
87 */
88#define SYM_FWA_SCR sym_fw2a_scr
89#define SYM_FWB_SCR sym_fw2b_scr
90#define SYM_FWZ_SCR sym_fw2z_scr
91#ifdef __FreeBSD__
92#include <dev/sym/sym_fw2.h>
93#else
94#include "sym_fw2.h"
95#endif
96static struct sym_fwa_ofs sym_fw2a_ofs = {
97 SYM_GEN_FW_A(struct SYM_FWA_SCR)
98};
99static struct sym_fwb_ofs sym_fw2b_ofs = {
100 SYM_GEN_FW_B(struct SYM_FWB_SCR)
101#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
102 SYM_GEN_B(struct SYM_FWB_SCR, data_io)
103#endif
104 SYM_GEN_B(struct SYM_FWB_SCR, start64)
105 SYM_GEN_B(struct SYM_FWB_SCR, pm_handle)
106};
107static struct sym_fwz_ofs sym_fw2z_ofs = {
108 SYM_GEN_FW_Z(struct SYM_FWZ_SCR)
109};
110#undef SYM_FWA_SCR
111#undef SYM_FWB_SCR
112#undef SYM_FWZ_SCR
113
114#undef SYM_GEN_A
115#undef SYM_GEN_B
116#undef SYM_GEN_Z
117#undef PADDR_A
118#undef PADDR_B
119
120#if SYM_CONF_GENERIC_SUPPORT
121/*
122 * Patch routine for firmware #1.
123 */
124static void
125sym_fw1_patch(struct sym_hcb *np)
126{
127 struct sym_fw1a_scr *scripta0;
128 struct sym_fw1b_scr *scriptb0;
129
130 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
131 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
132
133 /*
134 * Remove LED support if not needed.
135 */
136 if (!(np->features & FE_LED0)) {
137 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
138 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
139 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
140 }
141
142#ifdef SYM_CONF_IARB_SUPPORT
143 /*
144 * If user does not want to use IMMEDIATE ARBITRATION
145 * when we are reselected while attempting to arbitrate,
146 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
147 */
148 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
149 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
150#endif
151 /*
152 * Patch some data in SCRIPTS.
153 * - start and done queue initial bus address.
154 * - target bus address table bus address.
155 */
156 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
157 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
158 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
159}
160#endif /* SYM_CONF_GENERIC_SUPPORT */
161
162/*
163 * Patch routine for firmware #2.
164 */
165static void
166sym_fw2_patch(struct sym_hcb *np)
167{
168 struct sym_fw2a_scr *scripta0;
169 struct sym_fw2b_scr *scriptb0;
170
171 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
172 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
173
174 /*
175 * Remove LED support if not needed.
176 */
177 if (!(np->features & FE_LED0)) {
178 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
179 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
180 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
181 }
182
183#if SYM_CONF_DMA_ADDRESSING_MODE == 2
184 /*
185 * Remove useless 64 bit DMA specific SCRIPTS,
186 * when this feature is not available.
187 */
188 if (!np->use_dac) {
189 scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP);
190 scripta0->is_dmap_dirty[1] = 0;
191 scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP);
192 scripta0->is_dmap_dirty[3] = 0;
193 }
194#endif
195
196#ifdef SYM_CONF_IARB_SUPPORT
197 /*
198 * If user does not want to use IMMEDIATE ARBITRATION
199 * when we are reselected while attempting to arbitrate,
200 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
201 */
202 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
203 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
204#endif
205 /*
206 * Patch some variable in SCRIPTS.
207 * - start and done queue initial bus address.
208 * - target bus address table bus address.
209 */
210 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
211 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
212 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
213
214 /*
215 * Remove the load of SCNTL4 on reselection if not a C10.
216 */
217 if (!(np->features & FE_C10)) {
218 scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP);
219 scripta0->resel_scntl4[1] = cpu_to_scr(0);
220 }
221
222 /*
223 * Remove a couple of work-arounds specific to C1010 if
224 * they are not desirable. See `sym_fw2.h' for more details.
225 */
226 if (!(np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 &&
227 np->revision_id < 0x1 &&
228 np->pciclk_khz < 60000)) {
229 scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP);
230 scripta0->datao_phase[1] = cpu_to_scr(0);
231 }
232 if (!(np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 &&
233 /* np->revision_id < 0xff */ 1)) {
234 scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP);
235 scripta0->sel_done[1] = cpu_to_scr(0);
236 }
237
238 /*
239 * Patch some other variables in SCRIPTS.
240 * These ones are loaded by the SCRIPTS processor.
241 */
242 scriptb0->pm0_data_addr[0] =
243 cpu_to_scr(np->scripta_ba +
244 offsetof(struct sym_fw2a_scr, pm0_data));
245 scriptb0->pm1_data_addr[0] =
246 cpu_to_scr(np->scripta_ba +
247 offsetof(struct sym_fw2a_scr, pm1_data));
248}
249
250/*
251 * Fill the data area in scripts.
252 * To be done for all firmwares.
253 */
254static void
255sym_fw_fill_data (u32 *in, u32 *out)
256{
257 int i;
258
259 for (i = 0; i < SYM_CONF_MAX_SG; i++) {
260 *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN;
261 *in++ = offsetof (struct sym_dsb, data[i]);
262 *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT;
263 *out++ = offsetof (struct sym_dsb, data[i]);
264 }
265}
266
267/*
268 * Setup useful script bus addresses.
269 * To be done for all firmwares.
270 */
271static void
272sym_fw_setup_bus_addresses(struct sym_hcb *np, struct sym_fw *fw)
273{
274 u32 *pa;
275 u_short *po;
276 int i;
277
278 /*
279 * Build the bus address table for script A
280 * from the script A offset table.
281 */
282 po = (u_short *) fw->a_ofs;
283 pa = (u32 *) &np->fwa_bas;
284 for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++)
285 pa[i] = np->scripta_ba + po[i];
286
287 /*
288 * Same for script B.
289 */
290 po = (u_short *) fw->b_ofs;
291 pa = (u32 *) &np->fwb_bas;
292 for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++)
293 pa[i] = np->scriptb_ba + po[i];
294
295 /*
296 * Same for script Z.
297 */
298 po = (u_short *) fw->z_ofs;
299 pa = (u32 *) &np->fwz_bas;
300 for (i = 0 ; i < sizeof(np->fwz_bas)/sizeof(u32) ; i++)
301 pa[i] = np->scriptz_ba + po[i];
302}
303
304#if SYM_CONF_GENERIC_SUPPORT
305/*
306 * Setup routine for firmware #1.
307 */
308static void
309sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw)
310{
311 struct sym_fw1a_scr *scripta0;
312 struct sym_fw1b_scr *scriptb0;
313
314 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
315 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
316
317 /*
318 * Fill variable parts in scripts.
319 */
320 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
321
322 /*
323 * Setup bus addresses used from the C code..
324 */
325 sym_fw_setup_bus_addresses(np, fw);
326}
327#endif /* SYM_CONF_GENERIC_SUPPORT */
328
329/*
330 * Setup routine for firmware #2.
331 */
332static void
333sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw)
334{
335 struct sym_fw2a_scr *scripta0;
336 struct sym_fw2b_scr *scriptb0;
337
338 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
339 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
340
341 /*
342 * Fill variable parts in scripts.
343 */
344 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
345
346 /*
347 * Setup bus addresses used from the C code..
348 */
349 sym_fw_setup_bus_addresses(np, fw);
350}
351
352/*
353 * Allocate firmware descriptors.
354 */
355#if SYM_CONF_GENERIC_SUPPORT
356static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic");
357#endif /* SYM_CONF_GENERIC_SUPPORT */
358static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based");
359
360/*
361 * Find the most appropriate firmware for a chip.
362 */
363struct sym_fw *
364sym_find_firmware(struct sym_chip *chip)
365{
366 if (chip->features & FE_LDSTR)
367 return &sym_fw2;
368#if SYM_CONF_GENERIC_SUPPORT
369 else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC)))
370 return &sym_fw1;
371#endif
372 else
373 return NULL;
374}
375
376/*
377 * Bind a script to physical addresses.
378 */
379void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
380{
381 u32 opcode, new, old, tmp1, tmp2;
382 u32 *end, *cur;
383 int relocs;
384
385 cur = start;
386 end = start + len/4;
387
388 while (cur < end) {
389
390 opcode = *cur;
391
392 /*
393 * If we forget to change the length
394 * in scripts, a field will be
395 * padded with 0. This is an illegal
396 * command.
397 */
398 if (opcode == 0) {
399 printf ("%s: ERROR0 IN SCRIPT at %d.\n",
400 sym_name(np), (int) (cur-start));
401 ++cur;
402 continue;
403 };
404
405 /*
406 * We use the bogus value 0xf00ff00f ;-)
407 * to reserve data area in SCRIPTS.
408 */
409 if (opcode == SCR_DATA_ZERO) {
410 *cur++ = 0;
411 continue;
412 }
413
414 if (DEBUG_FLAGS & DEBUG_SCRIPT)
415 printf ("%d: <%x>\n", (int) (cur-start),
416 (unsigned)opcode);
417
418 /*
419 * We don't have to decode ALL commands
420 */
421 switch (opcode >> 28) {
422 case 0xf:
423 /*
424 * LOAD / STORE DSA relative, don't relocate.
425 */
426 relocs = 0;
427 break;
428 case 0xe:
429 /*
430 * LOAD / STORE absolute.
431 */
432 relocs = 1;
433 break;
434 case 0xc:
435 /*
436 * COPY has TWO arguments.
437 */
438 relocs = 2;
439 tmp1 = cur[1];
440 tmp2 = cur[2];
441 if ((tmp1 ^ tmp2) & 3) {
442 printf ("%s: ERROR1 IN SCRIPT at %d.\n",
443 sym_name(np), (int) (cur-start));
444 }
445 /*
446 * If PREFETCH feature not enabled, remove
447 * the NO FLUSH bit if present.
448 */
449 if ((opcode & SCR_NO_FLUSH) &&
450 !(np->features & FE_PFEN)) {
451 opcode = (opcode & ~SCR_NO_FLUSH);
452 }
453 break;
454 case 0x0:
455 /*
456 * MOVE/CHMOV (absolute address)
457 */
458 if (!(np->features & FE_WIDE))
459 opcode = (opcode | OPC_MOVE);
460 relocs = 1;
461 break;
462 case 0x1:
463 /*
464 * MOVE/CHMOV (table indirect)
465 */
466 if (!(np->features & FE_WIDE))
467 opcode = (opcode | OPC_MOVE);
468 relocs = 0;
469 break;
470#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
471 case 0x2:
472 /*
473 * MOVE/CHMOV in target role (absolute address)
474 */
475 opcode &= ~0x20000000;
476 if (!(np->features & FE_WIDE))
477 opcode = (opcode & ~OPC_TCHMOVE);
478 relocs = 1;
479 break;
480 case 0x3:
481 /*
482 * MOVE/CHMOV in target role (table indirect)
483 */
484 opcode &= ~0x20000000;
485 if (!(np->features & FE_WIDE))
486 opcode = (opcode & ~OPC_TCHMOVE);
487 relocs = 0;
488 break;
489#endif
490 case 0x8:
491 /*
492 * JUMP / CALL
493 * don't relocate if relative :-)
494 */
495 if (opcode & 0x00800000)
496 relocs = 0;
497 else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
498 relocs = 2;
499 else
500 relocs = 1;
501 break;
502 case 0x4:
503 case 0x5:
504 case 0x6:
505 case 0x7:
506 relocs = 1;
507 break;
508 default:
509 relocs = 0;
510 break;
511 };
512
513 /*
514 * Scriptify:) the opcode.
515 */
516 *cur++ = cpu_to_scr(opcode);
517
518 /*
519 * If no relocation, assume 1 argument
520 * and just scriptize:) it.
521 */
522 if (!relocs) {
523 *cur = cpu_to_scr(*cur);
524 ++cur;
525 continue;
526 }
527
528 /*
529 * Otherwise performs all needed relocations.
530 */
531 while (relocs--) {
532 old = *cur;
533
534 switch (old & RELOC_MASK) {
535 case RELOC_REGISTER:
536 new = (old & ~RELOC_MASK) + np->mmio_ba;
537 break;
538 case RELOC_LABEL_A:
539 new = (old & ~RELOC_MASK) + np->scripta_ba;
540 break;
541 case RELOC_LABEL_B:
542 new = (old & ~RELOC_MASK) + np->scriptb_ba;
543 break;
544 case RELOC_SOFTC:
545 new = (old & ~RELOC_MASK) + np->hcb_ba;
546 break;
547 case 0:
548 /*
549 * Don't relocate a 0 address.
550 * They are mostly used for patched or
551 * script self-modified areas.
552 */
553 if (old == 0) {
554 new = old;
555 break;
556 }
557 /* fall through */
558 default:
559 new = 0;
560 panic("sym_fw_bind_script: "
561 "weird relocation %x\n", old);
562 break;
563 }
564
565 *cur++ = cpu_to_scr(new);
566 }
567 };
568}
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.h b/drivers/scsi/sym53c8xx_2/sym_fw.h
new file mode 100644
index 000000000000..43f6810a4045
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.h
@@ -0,0 +1,211 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifndef SYM_FW_H
41#define SYM_FW_H
42/*
43 * Macro used to generate interfaces for script A.
44 */
45#define SYM_GEN_FW_A(s) \
46 SYM_GEN_A(s, start) SYM_GEN_A(s, getjob_begin) \
47 SYM_GEN_A(s, getjob_end) \
48 SYM_GEN_A(s, select) SYM_GEN_A(s, wf_sel_done) \
49 SYM_GEN_A(s, send_ident) \
50 SYM_GEN_A(s, dispatch) SYM_GEN_A(s, init) \
51 SYM_GEN_A(s, clrack) SYM_GEN_A(s, complete_error) \
52 SYM_GEN_A(s, done) SYM_GEN_A(s, done_end) \
53 SYM_GEN_A(s, idle) SYM_GEN_A(s, ungetjob) \
54 SYM_GEN_A(s, reselect) \
55 SYM_GEN_A(s, resel_tag) SYM_GEN_A(s, resel_dsa) \
56 SYM_GEN_A(s, resel_no_tag) \
57 SYM_GEN_A(s, data_in) SYM_GEN_A(s, data_in2) \
58 SYM_GEN_A(s, data_out) SYM_GEN_A(s, data_out2) \
59 SYM_GEN_A(s, pm0_data) SYM_GEN_A(s, pm1_data)
60
61/*
62 * Macro used to generate interfaces for script B.
63 */
64#define SYM_GEN_FW_B(s) \
65 SYM_GEN_B(s, no_data) \
66 SYM_GEN_B(s, sel_for_abort) SYM_GEN_B(s, sel_for_abort_1) \
67 SYM_GEN_B(s, msg_bad) SYM_GEN_B(s, msg_weird) \
68 SYM_GEN_B(s, wdtr_resp) SYM_GEN_B(s, send_wdtr) \
69 SYM_GEN_B(s, sdtr_resp) SYM_GEN_B(s, send_sdtr) \
70 SYM_GEN_B(s, ppr_resp) SYM_GEN_B(s, send_ppr) \
71 SYM_GEN_B(s, nego_bad_phase) \
72 SYM_GEN_B(s, ident_break) SYM_GEN_B(s, ident_break_atn) \
73 SYM_GEN_B(s, sdata_in) SYM_GEN_B(s, resel_bad_lun) \
74 SYM_GEN_B(s, bad_i_t_l) SYM_GEN_B(s, bad_i_t_l_q) \
75 SYM_GEN_B(s, wsr_ma_helper)
76
77/*
78 * Macro used to generate interfaces for script Z.
79 */
80#define SYM_GEN_FW_Z(s) \
81 SYM_GEN_Z(s, snooptest) SYM_GEN_Z(s, snoopend)
82
83/*
84 * Generates structure interface that contains
85 * offsets within script A, B and Z.
86 */
87#define SYM_GEN_A(s, label) s label;
88#define SYM_GEN_B(s, label) s label;
89#define SYM_GEN_Z(s, label) s label;
90struct sym_fwa_ofs {
91 SYM_GEN_FW_A(u_short)
92};
93struct sym_fwb_ofs {
94 SYM_GEN_FW_B(u_short)
95#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
96 SYM_GEN_B(u_short, data_io)
97#endif
98 SYM_GEN_B(u_short, start64)
99 SYM_GEN_B(u_short, pm_handle)
100};
101struct sym_fwz_ofs {
102 SYM_GEN_FW_Z(u_short)
103};
104
105/*
106 * Generates structure interface that contains
107 * bus addresses within script A, B and Z.
108 */
109struct sym_fwa_ba {
110 SYM_GEN_FW_A(u32)
111};
112struct sym_fwb_ba {
113 SYM_GEN_FW_B(u32)
114#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
115 SYM_GEN_B(u32, data_io)
116#endif
117 SYM_GEN_B(u32, start64);
118 SYM_GEN_B(u32, pm_handle);
119};
120struct sym_fwz_ba {
121 SYM_GEN_FW_Z(u32)
122};
123#undef SYM_GEN_A
124#undef SYM_GEN_B
125#undef SYM_GEN_Z
126
127/*
128 * Let cc know about the name of the controller data structure.
129 * We need this for function prototype declarations just below.
130 */
131struct sym_hcb;
132
133/*
134 * Generic structure that defines a firmware.
135 */
136struct sym_fw {
137 char *name; /* Name we want to print out */
138 u32 *a_base; /* Pointer to script A template */
139 int a_size; /* Size of script A */
140 struct sym_fwa_ofs
141 *a_ofs; /* Useful offsets in script A */
142 u32 *b_base; /* Pointer to script B template */
143 int b_size; /* Size of script B */
144 struct sym_fwb_ofs
145 *b_ofs; /* Useful offsets in script B */
146 u32 *z_base; /* Pointer to script Z template */
147 int z_size; /* Size of script Z */
148 struct sym_fwz_ofs
149 *z_ofs; /* Useful offsets in script Z */
150 /* Setup and patch methods for this firmware */
151 void (*setup)(struct sym_hcb *, struct sym_fw *);
152 void (*patch)(struct sym_hcb *);
153};
154
155/*
156 * Macro used to declare a firmware.
157 */
158#define SYM_FW_ENTRY(fw, name) \
159{ \
160 name, \
161 (u32 *) &fw##a_scr, sizeof(fw##a_scr), &fw##a_ofs, \
162 (u32 *) &fw##b_scr, sizeof(fw##b_scr), &fw##b_ofs, \
163 (u32 *) &fw##z_scr, sizeof(fw##z_scr), &fw##z_ofs, \
164 fw##_setup, fw##_patch \
165}
166
167/*
168 * Macros used from the C code to get useful
169 * SCRIPTS bus addresses.
170 */
171#define SCRIPTA_BA(np, label) (np->fwa_bas.label)
172#define SCRIPTB_BA(np, label) (np->fwb_bas.label)
173#define SCRIPTZ_BA(np, label) (np->fwz_bas.label)
174
175/*
176 * Macros used by scripts definitions.
177 *
178 * HADDR_1 generates a reference to a field of the controller data.
179 * HADDR_2 generates a reference to a field of the controller data
180 * with offset.
181 * RADDR_1 generates a reference to a script processor register.
182 * RADDR_2 generates a reference to a script processor register
183 * with offset.
184 * PADDR_A generates a reference to another part of script A.
185 * PADDR_B generates a reference to another part of script B.
186 *
187 * SYM_GEN_PADDR_A and SYM_GEN_PADDR_B are used to define respectively
188 * the PADDR_A and PADDR_B macros for each firmware by setting argument
189 * `s' to the name of the corresponding structure.
190 *
191 * SCR_DATA_ZERO is used to allocate a DWORD of data in scripts areas.
192 */
193
194#define RELOC_SOFTC 0x40000000
195#define RELOC_LABEL_A 0x50000000
196#define RELOC_REGISTER 0x60000000
197#define RELOC_LABEL_B 0x80000000
198#define RELOC_MASK 0xf0000000
199
200#define HADDR_1(label) (RELOC_SOFTC | offsetof(struct sym_hcb, label))
201#define HADDR_2(label,ofs) (RELOC_SOFTC | \
202 (offsetof(struct sym_hcb, label)+(ofs)))
203#define RADDR_1(label) (RELOC_REGISTER | REG(label))
204#define RADDR_2(label,ofs) (RELOC_REGISTER | ((REG(label))+(ofs)))
205
206#define SYM_GEN_PADDR_A(s, label) (RELOC_LABEL_A | offsetof(s, label))
207#define SYM_GEN_PADDR_B(s, label) (RELOC_LABEL_B | offsetof(s, label))
208
209#define SCR_DATA_ZERO 0xf00ff00f
210
211#endif /* SYM_FW_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw1.h b/drivers/scsi/sym53c8xx_2/sym_fw1.h
new file mode 100644
index 000000000000..cdd92d82f4b2
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw1.h
@@ -0,0 +1,1838 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40/*
41 * Scripts for SYMBIOS-Processor
42 *
43 * We have to know the offsets of all labels before we reach
44 * them (for forward jumps). Therefore we declare a struct
45 * here. If you make changes inside the script,
46 *
47 * DONT FORGET TO CHANGE THE LENGTHS HERE!
48 */
49
50/*
51 * Script fragments which are loaded into the on-chip RAM
52 * of 825A, 875, 876, 895, 895A, 896 and 1010 chips.
53 * Must not exceed 4K bytes.
54 */
55struct SYM_FWA_SCR {
56 u32 start [ 11];
57 u32 getjob_begin [ 4];
58 u32 _sms_a10 [ 5];
59 u32 getjob_end [ 4];
60 u32 _sms_a20 [ 4];
61#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
62 u32 select [ 8];
63#else
64 u32 select [ 6];
65#endif
66 u32 _sms_a30 [ 5];
67 u32 wf_sel_done [ 2];
68 u32 send_ident [ 2];
69#ifdef SYM_CONF_IARB_SUPPORT
70 u32 select2 [ 8];
71#else
72 u32 select2 [ 2];
73#endif
74 u32 command [ 2];
75 u32 dispatch [ 28];
76 u32 sel_no_cmd [ 10];
77 u32 init [ 6];
78 u32 clrack [ 4];
79 u32 datai_done [ 11];
80 u32 datai_done_wsr [ 20];
81 u32 datao_done [ 11];
82 u32 datao_done_wss [ 6];
83 u32 datai_phase [ 5];
84 u32 datao_phase [ 5];
85 u32 msg_in [ 2];
86 u32 msg_in2 [ 10];
87#ifdef SYM_CONF_IARB_SUPPORT
88 u32 status [ 14];
89#else
90 u32 status [ 10];
91#endif
92 u32 complete [ 6];
93 u32 complete2 [ 8];
94 u32 _sms_a40 [ 12];
95 u32 done [ 5];
96 u32 _sms_a50 [ 5];
97 u32 _sms_a60 [ 2];
98 u32 done_end [ 4];
99 u32 complete_error [ 5];
100 u32 save_dp [ 11];
101 u32 restore_dp [ 7];
102 u32 disconnect [ 11];
103 u32 disconnect2 [ 5];
104 u32 _sms_a65 [ 3];
105#ifdef SYM_CONF_IARB_SUPPORT
106 u32 idle [ 4];
107#else
108 u32 idle [ 2];
109#endif
110#ifdef SYM_CONF_IARB_SUPPORT
111 u32 ungetjob [ 7];
112#else
113 u32 ungetjob [ 5];
114#endif
115#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
116 u32 reselect [ 4];
117#else
118 u32 reselect [ 2];
119#endif
120 u32 reselected [ 19];
121 u32 _sms_a70 [ 6];
122 u32 _sms_a80 [ 4];
123 u32 reselected1 [ 25];
124 u32 _sms_a90 [ 4];
125 u32 resel_lun0 [ 7];
126 u32 _sms_a100 [ 4];
127 u32 resel_tag [ 8];
128#if SYM_CONF_MAX_TASK*4 > 512
129 u32 _sms_a110 [ 23];
130#elif SYM_CONF_MAX_TASK*4 > 256
131 u32 _sms_a110 [ 17];
132#else
133 u32 _sms_a110 [ 13];
134#endif
135 u32 _sms_a120 [ 2];
136 u32 resel_go [ 4];
137 u32 _sms_a130 [ 7];
138 u32 resel_dsa [ 2];
139 u32 resel_dsa1 [ 4];
140 u32 _sms_a140 [ 7];
141 u32 resel_no_tag [ 4];
142 u32 _sms_a145 [ 7];
143 u32 data_in [SYM_CONF_MAX_SG * 2];
144 u32 data_in2 [ 4];
145 u32 data_out [SYM_CONF_MAX_SG * 2];
146 u32 data_out2 [ 4];
147 u32 pm0_data [ 12];
148 u32 pm0_data_out [ 6];
149 u32 pm0_data_end [ 7];
150 u32 pm_data_end [ 4];
151 u32 _sms_a150 [ 4];
152 u32 pm1_data [ 12];
153 u32 pm1_data_out [ 6];
154 u32 pm1_data_end [ 9];
155};
156
157/*
158 * Script fragments which stay in main memory for all chips
159 * except for chips that support 8K on-chip RAM.
160 */
161struct SYM_FWB_SCR {
162 u32 no_data [ 2];
163#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
164 u32 sel_for_abort [ 18];
165#else
166 u32 sel_for_abort [ 16];
167#endif
168 u32 sel_for_abort_1 [ 2];
169 u32 msg_in_etc [ 12];
170 u32 msg_received [ 5];
171 u32 msg_weird_seen [ 5];
172 u32 msg_extended [ 17];
173 u32 _sms_b10 [ 4];
174 u32 msg_bad [ 6];
175 u32 msg_weird [ 4];
176 u32 msg_weird1 [ 8];
177 u32 wdtr_resp [ 6];
178 u32 send_wdtr [ 4];
179 u32 sdtr_resp [ 6];
180 u32 send_sdtr [ 4];
181 u32 ppr_resp [ 6];
182 u32 send_ppr [ 4];
183 u32 nego_bad_phase [ 4];
184 u32 msg_out [ 4];
185 u32 msg_out_done [ 4];
186 u32 data_ovrun [ 3];
187 u32 data_ovrun1 [ 22];
188 u32 data_ovrun2 [ 8];
189 u32 abort_resel [ 16];
190 u32 resend_ident [ 4];
191 u32 ident_break [ 4];
192 u32 ident_break_atn [ 4];
193 u32 sdata_in [ 6];
194 u32 resel_bad_lun [ 4];
195 u32 bad_i_t_l [ 4];
196 u32 bad_i_t_l_q [ 4];
197 u32 bad_status [ 7];
198 u32 wsr_ma_helper [ 4];
199
200#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
201 /* Unknown direction handling */
202 u32 data_io [ 2];
203 u32 data_io_com [ 8];
204 u32 data_io_out [ 7];
205#endif
206 /* Data area */
207 u32 zero [ 1];
208 u32 scratch [ 1];
209 u32 scratch1 [ 1];
210 u32 prev_done [ 1];
211 u32 done_pos [ 1];
212 u32 nextjob [ 1];
213 u32 startpos [ 1];
214 u32 targtbl [ 1];
215};
216
217/*
218 * Script fragments used at initialisations.
219 * Only runs out of main memory.
220 */
221struct SYM_FWZ_SCR {
222 u32 snooptest [ 9];
223 u32 snoopend [ 2];
224};
225
226static struct SYM_FWA_SCR SYM_FWA_SCR = {
227/*--------------------------< START >----------------------------*/ {
228 /*
229 * Switch the LED on.
230 * Will be patched with a NO_OP if LED
231 * not needed or not desired.
232 */
233 SCR_REG_REG (gpreg, SCR_AND, 0xfe),
234 0,
235 /*
236 * Clear SIGP.
237 */
238 SCR_FROM_REG (ctest2),
239 0,
240 /*
241 * Stop here if the C code wants to perform
242 * some error recovery procedure manually.
243 * (Indicate this by setting SEM in ISTAT)
244 */
245 SCR_FROM_REG (istat),
246 0,
247 /*
248 * Report to the C code the next position in
249 * the start queue the SCRIPTS will schedule.
250 * The C code must not change SCRATCHA.
251 */
252 SCR_COPY (4),
253 PADDR_B (startpos),
254 RADDR_1 (scratcha),
255 SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
256 SIR_SCRIPT_STOPPED,
257 /*
258 * Start the next job.
259 *
260 * @DSA = start point for this job.
261 * SCRATCHA = address of this job in the start queue.
262 *
263 * We will restore startpos with SCRATCHA if we fails the
264 * arbitration or if it is the idle job.
265 *
266 * The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
267 * is a critical path. If it is partially executed, it then
268 * may happen that the job address is not yet in the DSA
269 * and the next queue position points to the next JOB.
270 */
271}/*-------------------------< GETJOB_BEGIN >---------------------*/,{
272 /*
273 * Copy to a fixed location both the next STARTPOS
274 * and the current JOB address, using self modifying
275 * SCRIPTS.
276 */
277 SCR_COPY (4),
278 RADDR_1 (scratcha),
279 PADDR_A (_sms_a10),
280 SCR_COPY (8),
281}/*-------------------------< _SMS_A10 >-------------------------*/,{
282 0,
283 PADDR_B (nextjob),
284 /*
285 * Move the start address to TEMP using self-
286 * modifying SCRIPTS and jump indirectly to
287 * that address.
288 */
289 SCR_COPY (4),
290 PADDR_B (nextjob),
291 RADDR_1 (dsa),
292}/*-------------------------< GETJOB_END >-----------------------*/,{
293 SCR_COPY (4),
294 RADDR_1 (dsa),
295 PADDR_A (_sms_a20),
296 SCR_COPY (4),
297}/*-------------------------< _SMS_A20 >-------------------------*/,{
298 0,
299 RADDR_1 (temp),
300 SCR_RETURN,
301 0,
302}/*-------------------------< SELECT >---------------------------*/,{
303 /*
304 * DSA contains the address of a scheduled
305 * data structure.
306 *
307 * SCRATCHA contains the address of the start queue
308 * entry which points to the next job.
309 *
310 * Set Initiator mode.
311 *
312 * (Target mode is left as an exercise for the reader)
313 */
314#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
315 SCR_CLR (SCR_TRG),
316 0,
317#endif
318 /*
319 * And try to select this target.
320 */
321 SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select),
322 PADDR_A (ungetjob),
323 /*
324 * Now there are 4 possibilities:
325 *
326 * (1) The chip loses arbitration.
327 * This is ok, because it will try again,
328 * when the bus becomes idle.
329 * (But beware of the timeout function!)
330 *
331 * (2) The chip is reselected.
332 * Then the script processor takes the jump
333 * to the RESELECT label.
334 *
335 * (3) The chip wins arbitration.
336 * Then it will execute SCRIPTS instruction until
337 * the next instruction that checks SCSI phase.
338 * Then will stop and wait for selection to be
339 * complete or selection time-out to occur.
340 *
341 * After having won arbitration, the SCRIPTS
342 * processor is able to execute instructions while
343 * the SCSI core is performing SCSI selection.
344 */
345
346 /*
347 * Copy the CCB header to a fixed location
348 * in the HCB using self-modifying SCRIPTS.
349 */
350 SCR_COPY (4),
351 RADDR_1 (dsa),
352 PADDR_A (_sms_a30),
353 SCR_COPY (sizeof(struct sym_ccbh)),
354}/*-------------------------< _SMS_A30 >-------------------------*/,{
355 0,
356 HADDR_1 (ccb_head),
357 /*
358 * Initialize the status register
359 */
360 SCR_COPY (4),
361 HADDR_1 (ccb_head.status),
362 RADDR_1 (scr0),
363}/*-------------------------< WF_SEL_DONE >----------------------*/,{
364 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
365 SIR_SEL_ATN_NO_MSG_OUT,
366}/*-------------------------< SEND_IDENT >-----------------------*/,{
367 /*
368 * Selection complete.
369 * Send the IDENTIFY and possibly the TAG message
370 * and negotiation message if present.
371 */
372 SCR_MOVE_TBL ^ SCR_MSG_OUT,
373 offsetof (struct sym_dsb, smsg),
374}/*-------------------------< SELECT2 >--------------------------*/,{
375#ifdef SYM_CONF_IARB_SUPPORT
376 /*
377 * Set IMMEDIATE ARBITRATION if we have been given
378 * a hint to do so. (Some job to do after this one).
379 */
380 SCR_FROM_REG (HF_REG),
381 0,
382 SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
383 8,
384 SCR_REG_REG (scntl1, SCR_OR, IARB),
385 0,
386#endif
387 /*
388 * Anticipate the COMMAND phase.
389 * This is the PHASE we expect at this point.
390 */
391 SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
392 PADDR_A (sel_no_cmd),
393}/*-------------------------< COMMAND >--------------------------*/,{
394 /*
395 * ... and send the command
396 */
397 SCR_MOVE_TBL ^ SCR_COMMAND,
398 offsetof (struct sym_dsb, cmd),
399}/*-------------------------< DISPATCH >-------------------------*/,{
400 /*
401 * MSG_IN is the only phase that shall be
402 * entered at least once for each (re)selection.
403 * So we test it first.
404 */
405 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
406 PADDR_A (msg_in),
407 SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
408 PADDR_A (datao_phase),
409 SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
410 PADDR_A (datai_phase),
411 SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
412 PADDR_A (status),
413 SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
414 PADDR_A (command),
415 SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
416 PADDR_B (msg_out),
417 /*
418 * Discard as many illegal phases as
419 * required and tell the C code about.
420 */
421 SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
422 16,
423 SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
424 HADDR_1 (scratch),
425 SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
426 -16,
427 SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
428 16,
429 SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
430 HADDR_1 (scratch),
431 SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
432 -16,
433 SCR_INT,
434 SIR_BAD_PHASE,
435 SCR_JUMP,
436 PADDR_A (dispatch),
437}/*-------------------------< SEL_NO_CMD >-----------------------*/,{
438 /*
439 * The target does not switch to command
440 * phase after IDENTIFY has been sent.
441 *
442 * If it stays in MSG OUT phase send it
443 * the IDENTIFY again.
444 */
445 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
446 PADDR_B (resend_ident),
447 /*
448 * If target does not switch to MSG IN phase
449 * and we sent a negotiation, assert the
450 * failure immediately.
451 */
452 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
453 PADDR_A (dispatch),
454 SCR_FROM_REG (HS_REG),
455 0,
456 SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
457 SIR_NEGO_FAILED,
458 /*
459 * Jump to dispatcher.
460 */
461 SCR_JUMP,
462 PADDR_A (dispatch),
463}/*-------------------------< INIT >-----------------------------*/,{
464 /*
465 * Wait for the SCSI RESET signal to be
466 * inactive before restarting operations,
467 * since the chip may hang on SEL_ATN
468 * if SCSI RESET is active.
469 */
470 SCR_FROM_REG (sstat0),
471 0,
472 SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
473 -16,
474 SCR_JUMP,
475 PADDR_A (start),
476}/*-------------------------< CLRACK >---------------------------*/,{
477 /*
478 * Terminate possible pending message phase.
479 */
480 SCR_CLR (SCR_ACK),
481 0,
482 SCR_JUMP,
483 PADDR_A (dispatch),
484}/*-------------------------< DATAI_DONE >-----------------------*/,{
485 /*
486 * Save current pointer to LASTP.
487 */
488 SCR_COPY (4),
489 RADDR_1 (temp),
490 HADDR_1 (ccb_head.lastp),
491 /*
492 * If the SWIDE is not full, jump to dispatcher.
493 * We anticipate a STATUS phase.
494 */
495 SCR_FROM_REG (scntl2),
496 0,
497 SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)),
498 PADDR_A (datai_done_wsr),
499 SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
500 PADDR_A (status),
501 SCR_JUMP,
502 PADDR_A (dispatch),
503}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{
504 /*
505 * The SWIDE is full.
506 * Clear this condition.
507 */
508 SCR_REG_REG (scntl2, SCR_OR, WSR),
509 0,
510 /*
511 * We are expecting an IGNORE RESIDUE message
512 * from the device, otherwise we are in data
513 * overrun condition. Check against MSG_IN phase.
514 */
515 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
516 SIR_SWIDE_OVERRUN,
517 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
518 PADDR_A (dispatch),
519 /*
520 * We are in MSG_IN phase,
521 * Read the first byte of the message.
522 * If it is not an IGNORE RESIDUE message,
523 * signal overrun and jump to message
524 * processing.
525 */
526 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
527 HADDR_1 (msgin[0]),
528 SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
529 SIR_SWIDE_OVERRUN,
530 SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
531 PADDR_A (msg_in2),
532 /*
533 * We got the message we expected.
534 * Read the 2nd byte, and jump to dispatcher.
535 */
536 SCR_CLR (SCR_ACK),
537 0,
538 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
539 HADDR_1 (msgin[1]),
540 SCR_CLR (SCR_ACK),
541 0,
542 SCR_JUMP,
543 PADDR_A (dispatch),
544}/*-------------------------< DATAO_DONE >-----------------------*/,{
545 /*
546 * Save current pointer to LASTP.
547 */
548 SCR_COPY (4),
549 RADDR_1 (temp),
550 HADDR_1 (ccb_head.lastp),
551 /*
552 * If the SODL is not full jump to dispatcher.
553 * We anticipate a STATUS phase.
554 */
555 SCR_FROM_REG (scntl2),
556 0,
557 SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)),
558 PADDR_A (datao_done_wss),
559 SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
560 PADDR_A (status),
561 SCR_JUMP,
562 PADDR_A (dispatch),
563}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{
564 /*
565 * The SODL is full, clear this condition.
566 */
567 SCR_REG_REG (scntl2, SCR_OR, WSS),
568 0,
569 /*
570 * And signal a DATA UNDERRUN condition
571 * to the C code.
572 */
573 SCR_INT,
574 SIR_SODL_UNDERRUN,
575 SCR_JUMP,
576 PADDR_A (dispatch),
577}/*-------------------------< DATAI_PHASE >----------------------*/,{
578 /*
579 * Jump to current pointer.
580 */
581 SCR_COPY (4),
582 HADDR_1 (ccb_head.lastp),
583 RADDR_1 (temp),
584 SCR_RETURN,
585 0,
586}/*-------------------------< DATAO_PHASE >----------------------*/,{
587 /*
588 * Jump to current pointer.
589 */
590 SCR_COPY (4),
591 HADDR_1 (ccb_head.lastp),
592 RADDR_1 (temp),
593 SCR_RETURN,
594 0,
595}/*-------------------------< MSG_IN >---------------------------*/,{
596 /*
597 * Get the first byte of the message.
598 *
599 * The script processor doesn't negate the
600 * ACK signal after this transfer.
601 */
602 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
603 HADDR_1 (msgin[0]),
604}/*-------------------------< MSG_IN2 >--------------------------*/,{
605 /*
606 * Check first against 1 byte messages
607 * that we handle from SCRIPTS.
608 */
609 SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
610 PADDR_A (complete),
611 SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
612 PADDR_A (disconnect),
613 SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
614 PADDR_A (save_dp),
615 SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
616 PADDR_A (restore_dp),
617 /*
618 * We handle all other messages from the
619 * C code, so no need to waste on-chip RAM
620 * for those ones.
621 */
622 SCR_JUMP,
623 PADDR_B (msg_in_etc),
624}/*-------------------------< STATUS >---------------------------*/,{
625 /*
626 * get the status
627 */
628 SCR_MOVE_ABS (1) ^ SCR_STATUS,
629 HADDR_1 (scratch),
630#ifdef SYM_CONF_IARB_SUPPORT
631 /*
632 * If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
633 * since we may have to tamper the start queue from
634 * the C code.
635 */
636 SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
637 8,
638 SCR_REG_REG (scntl1, SCR_AND, ~IARB),
639 0,
640#endif
641 /*
642 * save status to scsi_status.
643 * mark as complete.
644 */
645 SCR_TO_REG (SS_REG),
646 0,
647 SCR_LOAD_REG (HS_REG, HS_COMPLETE),
648 0,
649 /*
650 * Anticipate the MESSAGE PHASE for
651 * the TASK COMPLETE message.
652 */
653 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
654 PADDR_A (msg_in),
655 SCR_JUMP,
656 PADDR_A (dispatch),
657}/*-------------------------< COMPLETE >-------------------------*/,{
658 /*
659 * Complete message.
660 *
661 * When we terminate the cycle by clearing ACK,
662 * the target may disconnect immediately.
663 *
664 * We don't want to be told of an "unexpected disconnect",
665 * so we disable this feature.
666 */
667 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
668 0,
669 /*
670 * Terminate cycle ...
671 */
672 SCR_CLR (SCR_ACK|SCR_ATN),
673 0,
674 /*
675 * ... and wait for the disconnect.
676 */
677 SCR_WAIT_DISC,
678 0,
679}/*-------------------------< COMPLETE2 >------------------------*/,{
680 /*
681 * Save host status.
682 */
683 SCR_COPY (4),
684 RADDR_1 (scr0),
685 HADDR_1 (ccb_head.status),
686 /*
687 * Move back the CCB header using self-modifying
688 * SCRIPTS.
689 */
690 SCR_COPY (4),
691 RADDR_1 (dsa),
692 PADDR_A (_sms_a40),
693 SCR_COPY (sizeof(struct sym_ccbh)),
694 HADDR_1 (ccb_head),
695}/*-------------------------< _SMS_A40 >-------------------------*/,{
696 0,
697 /*
698 * Some bridges may reorder DMA writes to memory.
699 * We donnot want the CPU to deal with completions
700 * without all the posted write having been flushed
701 * to memory. This DUMMY READ should flush posted
702 * buffers prior to the CPU having to deal with
703 * completions.
704 */
705 SCR_COPY (4), /* DUMMY READ */
706 HADDR_1 (ccb_head.status),
707 RADDR_1 (scr0),
708 /*
709 * If command resulted in not GOOD status,
710 * call the C code if needed.
711 */
712 SCR_FROM_REG (SS_REG),
713 0,
714 SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
715 PADDR_B (bad_status),
716 /*
717 * If we performed an auto-sense, call
718 * the C code to synchronyze task aborts
719 * with UNIT ATTENTION conditions.
720 */
721 SCR_FROM_REG (HF_REG),
722 0,
723 SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))),
724 PADDR_A (complete_error),
725}/*-------------------------< DONE >-----------------------------*/,{
726 /*
727 * Copy the DSA to the DONE QUEUE and
728 * signal completion to the host.
729 * If we are interrupted between DONE
730 * and DONE_END, we must reset, otherwise
731 * the completed CCB may be lost.
732 */
733 SCR_COPY (4),
734 PADDR_B (done_pos),
735 PADDR_A (_sms_a50),
736 SCR_COPY (4),
737 RADDR_1 (dsa),
738}/*-------------------------< _SMS_A50 >-------------------------*/,{
739 0,
740 SCR_COPY (4),
741 PADDR_B (done_pos),
742 PADDR_A (_sms_a60),
743 /*
744 * The instruction below reads the DONE QUEUE next
745 * free position from memory.
746 * In addition it ensures that all PCI posted writes
747 * are flushed and so the DSA value of the done
748 * CCB is visible by the CPU before INTFLY is raised.
749 */
750 SCR_COPY (8),
751}/*-------------------------< _SMS_A60 >-------------------------*/,{
752 0,
753 PADDR_B (prev_done),
754}/*-------------------------< DONE_END >-------------------------*/,{
755 SCR_INT_FLY,
756 0,
757 SCR_JUMP,
758 PADDR_A (start),
759}/*-------------------------< COMPLETE_ERROR >-------------------*/,{
760 SCR_COPY (4),
761 PADDR_B (startpos),
762 RADDR_1 (scratcha),
763 SCR_INT,
764 SIR_COMPLETE_ERROR,
765}/*-------------------------< SAVE_DP >--------------------------*/,{
766 /*
767 * Clear ACK immediately.
768 * No need to delay it.
769 */
770 SCR_CLR (SCR_ACK),
771 0,
772 /*
773 * Keep track we received a SAVE DP, so
774 * we will switch to the other PM context
775 * on the next PM since the DP may point
776 * to the current PM context.
777 */
778 SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
779 0,
780 /*
781 * SAVE_DP message:
782 * Copy LASTP to SAVEP.
783 */
784 SCR_COPY (4),
785 HADDR_1 (ccb_head.lastp),
786 HADDR_1 (ccb_head.savep),
787 /*
788 * Anticipate the MESSAGE PHASE for
789 * the DISCONNECT message.
790 */
791 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
792 PADDR_A (msg_in),
793 SCR_JUMP,
794 PADDR_A (dispatch),
795}/*-------------------------< RESTORE_DP >-----------------------*/,{
796 /*
797 * Clear ACK immediately.
798 * No need to delay it.
799 */
800 SCR_CLR (SCR_ACK),
801 0,
802 /*
803 * Copy SAVEP to LASTP.
804 */
805 SCR_COPY (4),
806 HADDR_1 (ccb_head.savep),
807 HADDR_1 (ccb_head.lastp),
808 SCR_JUMP,
809 PADDR_A (dispatch),
810}/*-------------------------< DISCONNECT >-----------------------*/,{
811 /*
812 * DISCONNECTing ...
813 *
814 * disable the "unexpected disconnect" feature,
815 * and remove the ACK signal.
816 */
817 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
818 0,
819 SCR_CLR (SCR_ACK|SCR_ATN),
820 0,
821 /*
822 * Wait for the disconnect.
823 */
824 SCR_WAIT_DISC,
825 0,
826 /*
827 * Status is: DISCONNECTED.
828 */
829 SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
830 0,
831 /*
832 * Save host status.
833 */
834 SCR_COPY (4),
835 RADDR_1 (scr0),
836 HADDR_1 (ccb_head.status),
837}/*-------------------------< DISCONNECT2 >----------------------*/,{
838 /*
839 * Move back the CCB header using self-modifying
840 * SCRIPTS.
841 */
842 SCR_COPY (4),
843 RADDR_1 (dsa),
844 PADDR_A (_sms_a65),
845 SCR_COPY (sizeof(struct sym_ccbh)),
846 HADDR_1 (ccb_head),
847}/*-------------------------< _SMS_A65 >-------------------------*/,{
848 0,
849 SCR_JUMP,
850 PADDR_A (start),
851}/*-------------------------< IDLE >-----------------------------*/,{
852 /*
853 * Nothing to do?
854 * Switch the LED off and wait for reselect.
855 * Will be patched with a NO_OP if LED
856 * not needed or not desired.
857 */
858 SCR_REG_REG (gpreg, SCR_OR, 0x01),
859 0,
860#ifdef SYM_CONF_IARB_SUPPORT
861 SCR_JUMPR,
862 8,
863#endif
864}/*-------------------------< UNGETJOB >-------------------------*/,{
865#ifdef SYM_CONF_IARB_SUPPORT
866 /*
867 * Set IMMEDIATE ARBITRATION, for the next time.
868 * This will give us better chance to win arbitration
869 * for the job we just wanted to do.
870 */
871 SCR_REG_REG (scntl1, SCR_OR, IARB),
872 0,
873#endif
874 /*
875 * We are not able to restart the SCRIPTS if we are
876 * interrupted and these instruction haven't been
877 * all executed. BTW, this is very unlikely to
878 * happen, but we check that from the C code.
879 */
880 SCR_LOAD_REG (dsa, 0xff),
881 0,
882 SCR_COPY (4),
883 RADDR_1 (scratcha),
884 PADDR_B (startpos),
885}/*-------------------------< RESELECT >-------------------------*/,{
886#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
887 /*
888 * Make sure we are in initiator mode.
889 */
890 SCR_CLR (SCR_TRG),
891 0,
892#endif
893 /*
894 * Sleep waiting for a reselection.
895 */
896 SCR_WAIT_RESEL,
897 PADDR_A(start),
898}/*-------------------------< RESELECTED >-----------------------*/,{
899 /*
900 * Switch the LED on.
901 * Will be patched with a NO_OP if LED
902 * not needed or not desired.
903 */
904 SCR_REG_REG (gpreg, SCR_AND, 0xfe),
905 0,
906 /*
907 * load the target id into the sdid
908 */
909 SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
910 0,
911 SCR_TO_REG (sdid),
912 0,
913 /*
914 * Load the target control block address
915 */
916 SCR_COPY (4),
917 PADDR_B (targtbl),
918 RADDR_1 (dsa),
919 SCR_SFBR_REG (dsa, SCR_SHL, 0),
920 0,
921 SCR_REG_REG (dsa, SCR_SHL, 0),
922 0,
923 SCR_REG_REG (dsa, SCR_AND, 0x3c),
924 0,
925 SCR_COPY (4),
926 RADDR_1 (dsa),
927 PADDR_A (_sms_a70),
928 SCR_COPY (4),
929}/*-------------------------< _SMS_A70 >-------------------------*/,{
930 0,
931 RADDR_1 (dsa),
932 /*
933 * Copy the TCB header to a fixed place in
934 * the HCB.
935 */
936 SCR_COPY (4),
937 RADDR_1 (dsa),
938 PADDR_A (_sms_a80),
939 SCR_COPY (sizeof(struct sym_tcbh)),
940}/*-------------------------< _SMS_A80 >-------------------------*/,{
941 0,
942 HADDR_1 (tcb_head),
943 /*
944 * We expect MESSAGE IN phase.
945 * If not, get help from the C code.
946 */
947 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
948 SIR_RESEL_NO_MSG_IN,
949}/*-------------------------< RESELECTED1 >----------------------*/,{
950 /*
951 * Load the synchronous transfer registers.
952 */
953 SCR_COPY (1),
954 HADDR_1 (tcb_head.wval),
955 RADDR_1 (scntl3),
956 SCR_COPY (1),
957 HADDR_1 (tcb_head.sval),
958 RADDR_1 (sxfer),
959 /*
960 * Get the IDENTIFY message.
961 */
962 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
963 HADDR_1 (msgin),
964 /*
965 * If IDENTIFY LUN #0, use a faster path
966 * to find the LCB structure.
967 */
968 SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)),
969 PADDR_A (resel_lun0),
970 /*
971 * If message isn't an IDENTIFY,
972 * tell the C code about.
973 */
974 SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
975 SIR_RESEL_NO_IDENTIFY,
976 /*
977 * It is an IDENTIFY message,
978 * Load the LUN control block address.
979 */
980 SCR_COPY (4),
981 HADDR_1 (tcb_head.luntbl_sa),
982 RADDR_1 (dsa),
983 SCR_SFBR_REG (dsa, SCR_SHL, 0),
984 0,
985 SCR_REG_REG (dsa, SCR_SHL, 0),
986 0,
987 SCR_REG_REG (dsa, SCR_AND, 0xfc),
988 0,
989 SCR_COPY (4),
990 RADDR_1 (dsa),
991 PADDR_A (_sms_a90),
992 SCR_COPY (4),
993}/*-------------------------< _SMS_A90 >-------------------------*/,{
994 0,
995 RADDR_1 (dsa),
996 SCR_JUMPR,
997 12,
998}/*-------------------------< RESEL_LUN0 >-----------------------*/,{
999 /*
1000 * LUN 0 special case (but usual one :))
1001 */
1002 SCR_COPY (4),
1003 HADDR_1 (tcb_head.lun0_sa),
1004 RADDR_1 (dsa),
1005 /*
1006 * Jump indirectly to the reselect action for this LUN.
1007 * (lcb.head.resel_sa assumed at offset zero of lcb).
1008 */
1009 SCR_COPY (4),
1010 RADDR_1 (dsa),
1011 PADDR_A (_sms_a100),
1012 SCR_COPY (4),
1013}/*-------------------------< _SMS_A100 >------------------------*/,{
1014 0,
1015 RADDR_1 (temp),
1016 SCR_RETURN,
1017 0,
1018 /* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */
1019}/*-------------------------< RESEL_TAG >------------------------*/,{
1020 /*
1021 * ACK the IDENTIFY previously received.
1022 */
1023 SCR_CLR (SCR_ACK),
1024 0,
1025 /*
1026 * It shall be a tagged command.
1027 * Read SIMPLE+TAG.
1028 * The C code will deal with errors.
1029 * Agressive optimization, is'nt it? :)
1030 */
1031 SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
1032 HADDR_1 (msgin),
1033 /*
1034 * Copy the LCB header to a fixed place in
1035 * the HCB using self-modifying SCRIPTS.
1036 */
1037 SCR_COPY (4),
1038 RADDR_1 (dsa),
1039 PADDR_A (_sms_a110),
1040 SCR_COPY (sizeof(struct sym_lcbh)),
1041}/*-------------------------< _SMS_A110 >------------------------*/,{
1042 0,
1043 HADDR_1 (lcb_head),
1044 /*
1045 * Load the pointer to the tagged task
1046 * table for this LUN.
1047 */
1048 SCR_COPY (4),
1049 HADDR_1 (lcb_head.itlq_tbl_sa),
1050 RADDR_1 (dsa),
1051 /*
1052 * The SIDL still contains the TAG value.
1053 * Agressive optimization, isn't it? :):)
1054 */
1055 SCR_REG_SFBR (sidl, SCR_SHL, 0),
1056 0,
1057#if SYM_CONF_MAX_TASK*4 > 512
1058 SCR_JUMPR ^ IFFALSE (CARRYSET),
1059 8,
1060 SCR_REG_REG (dsa1, SCR_OR, 2),
1061 0,
1062 SCR_REG_REG (sfbr, SCR_SHL, 0),
1063 0,
1064 SCR_JUMPR ^ IFFALSE (CARRYSET),
1065 8,
1066 SCR_REG_REG (dsa1, SCR_OR, 1),
1067 0,
1068#elif SYM_CONF_MAX_TASK*4 > 256
1069 SCR_JUMPR ^ IFFALSE (CARRYSET),
1070 8,
1071 SCR_REG_REG (dsa1, SCR_OR, 1),
1072 0,
1073#endif
1074 /*
1075 * Retrieve the DSA of this task.
1076 * JUMP indirectly to the restart point of the CCB.
1077 */
1078 SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
1079 0,
1080 SCR_COPY (4),
1081 RADDR_1 (dsa),
1082 PADDR_A (_sms_a120),
1083 SCR_COPY (4),
1084}/*-------------------------< _SMS_A120 >------------------------*/,{
1085 0,
1086 RADDR_1 (dsa),
1087}/*-------------------------< RESEL_GO >-------------------------*/,{
1088 SCR_COPY (4),
1089 RADDR_1 (dsa),
1090 PADDR_A (_sms_a130),
1091 /*
1092 * Move 'ccb.phys.head.go' action to
1093 * scratch/scratch1. So scratch1 will
1094 * contain the 'restart' field of the
1095 * 'go' structure.
1096 */
1097 SCR_COPY (8),
1098}/*-------------------------< _SMS_A130 >------------------------*/,{
1099 0,
1100 PADDR_B (scratch),
1101 SCR_COPY (4),
1102 PADDR_B (scratch1), /* phys.head.go.restart */
1103 RADDR_1 (temp),
1104 SCR_RETURN,
1105 0,
1106 /* In normal situations we branch to RESEL_DSA */
1107}/*-------------------------< RESEL_DSA >------------------------*/,{
1108 /*
1109 * ACK the IDENTIFY or TAG previously received.
1110 */
1111 SCR_CLR (SCR_ACK),
1112 0,
1113}/*-------------------------< RESEL_DSA1 >-----------------------*/,{
1114 /*
1115 * Copy the CCB header to a fixed location
1116 * in the HCB using self-modifying SCRIPTS.
1117 */
1118 SCR_COPY (4),
1119 RADDR_1 (dsa),
1120 PADDR_A (_sms_a140),
1121 SCR_COPY (sizeof(struct sym_ccbh)),
1122}/*-------------------------< _SMS_A140 >------------------------*/,{
1123 0,
1124 HADDR_1 (ccb_head),
1125 /*
1126 * Initialize the status register
1127 */
1128 SCR_COPY (4),
1129 HADDR_1 (ccb_head.status),
1130 RADDR_1 (scr0),
1131 /*
1132 * Jump to dispatcher.
1133 */
1134 SCR_JUMP,
1135 PADDR_A (dispatch),
1136}/*-------------------------< RESEL_NO_TAG >---------------------*/,{
1137 /*
1138 * Copy the LCB header to a fixed place in
1139 * the HCB using self-modifying SCRIPTS.
1140 */
1141 SCR_COPY (4),
1142 RADDR_1 (dsa),
1143 PADDR_A (_sms_a145),
1144 SCR_COPY (sizeof(struct sym_lcbh)),
1145}/*-------------------------< _SMS_A145 >------------------------*/,{
1146 0,
1147 HADDR_1 (lcb_head),
1148 /*
1149 * Load the DSA with the unique ITL task.
1150 */
1151 SCR_COPY (4),
1152 HADDR_1 (lcb_head.itl_task_sa),
1153 RADDR_1 (dsa),
1154 SCR_JUMP,
1155 PADDR_A (resel_go),
1156}/*-------------------------< DATA_IN >--------------------------*/,{
1157/*
1158 * Because the size depends on the
1159 * #define SYM_CONF_MAX_SG parameter,
1160 * it is filled in at runtime.
1161 *
1162 * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
1163 * || SCR_CHMOV_TBL ^ SCR_DATA_IN,
1164 * || offsetof (struct sym_dsb, data[ i]),
1165 * ##==========================================
1166 */
11670
1168}/*-------------------------< DATA_IN2 >-------------------------*/,{
1169 SCR_CALL,
1170 PADDR_A (datai_done),
1171 SCR_JUMP,
1172 PADDR_B (data_ovrun),
1173}/*-------------------------< DATA_OUT >-------------------------*/,{
1174/*
1175 * Because the size depends on the
1176 * #define SYM_CONF_MAX_SG parameter,
1177 * it is filled in at runtime.
1178 *
1179 * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
1180 * || SCR_CHMOV_TBL ^ SCR_DATA_OUT,
1181 * || offsetof (struct sym_dsb, data[ i]),
1182 * ##==========================================
1183 */
11840
1185}/*-------------------------< DATA_OUT2 >------------------------*/,{
1186 SCR_CALL,
1187 PADDR_A (datao_done),
1188 SCR_JUMP,
1189 PADDR_B (data_ovrun),
1190}/*-------------------------< PM0_DATA >-------------------------*/,{
1191 /*
1192 * Read our host flags to SFBR, so we will be able
1193 * to check against the data direction we expect.
1194 */
1195 SCR_FROM_REG (HF_REG),
1196 0,
1197 /*
1198 * Check against actual DATA PHASE.
1199 */
1200 SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
1201 PADDR_A (pm0_data_out),
1202 /*
1203 * Actual phase is DATA IN.
1204 * Check against expected direction.
1205 */
1206 SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
1207 PADDR_B (data_ovrun),
1208 /*
1209 * Keep track we are moving data from the
1210 * PM0 DATA mini-script.
1211 */
1212 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
1213 0,
1214 /*
1215 * Move the data to memory.
1216 */
1217 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1218 offsetof (struct sym_ccb, phys.pm0.sg),
1219 SCR_JUMP,
1220 PADDR_A (pm0_data_end),
1221}/*-------------------------< PM0_DATA_OUT >---------------------*/,{
1222 /*
1223 * Actual phase is DATA OUT.
1224 * Check against expected direction.
1225 */
1226 SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
1227 PADDR_B (data_ovrun),
1228 /*
1229 * Keep track we are moving data from the
1230 * PM0 DATA mini-script.
1231 */
1232 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
1233 0,
1234 /*
1235 * Move the data from memory.
1236 */
1237 SCR_CHMOV_TBL ^ SCR_DATA_OUT,
1238 offsetof (struct sym_ccb, phys.pm0.sg),
1239}/*-------------------------< PM0_DATA_END >---------------------*/,{
1240 /*
1241 * Clear the flag that told we were moving
1242 * data from the PM0 DATA mini-script.
1243 */
1244 SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
1245 0,
1246 /*
1247 * Return to the previous DATA script which
1248 * is guaranteed by design (if no bug) to be
1249 * the main DATA script for this transfer.
1250 */
1251 SCR_COPY (4),
1252 RADDR_1 (dsa),
1253 RADDR_1 (scratcha),
1254 SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm0.ret)),
1255 0,
1256}/*-------------------------< PM_DATA_END >----------------------*/,{
1257 SCR_COPY (4),
1258 RADDR_1 (scratcha),
1259 PADDR_A (_sms_a150),
1260 SCR_COPY (4),
1261}/*-------------------------< _SMS_A150 >------------------------*/,{
1262 0,
1263 RADDR_1 (temp),
1264 SCR_RETURN,
1265 0,
1266}/*-------------------------< PM1_DATA >-------------------------*/,{
1267 /*
1268 * Read our host flags to SFBR, so we will be able
1269 * to check against the data direction we expect.
1270 */
1271 SCR_FROM_REG (HF_REG),
1272 0,
1273 /*
1274 * Check against actual DATA PHASE.
1275 */
1276 SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
1277 PADDR_A (pm1_data_out),
1278 /*
1279 * Actual phase is DATA IN.
1280 * Check against expected direction.
1281 */
1282 SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
1283 PADDR_B (data_ovrun),
1284 /*
1285 * Keep track we are moving data from the
1286 * PM1 DATA mini-script.
1287 */
1288 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
1289 0,
1290 /*
1291 * Move the data to memory.
1292 */
1293 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1294 offsetof (struct sym_ccb, phys.pm1.sg),
1295 SCR_JUMP,
1296 PADDR_A (pm1_data_end),
1297}/*-------------------------< PM1_DATA_OUT >---------------------*/,{
1298 /*
1299 * Actual phase is DATA OUT.
1300 * Check against expected direction.
1301 */
1302 SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
1303 PADDR_B (data_ovrun),
1304 /*
1305 * Keep track we are moving data from the
1306 * PM1 DATA mini-script.
1307 */
1308 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
1309 0,
1310 /*
1311 * Move the data from memory.
1312 */
1313 SCR_CHMOV_TBL ^ SCR_DATA_OUT,
1314 offsetof (struct sym_ccb, phys.pm1.sg),
1315}/*-------------------------< PM1_DATA_END >---------------------*/,{
1316 /*
1317 * Clear the flag that told we were moving
1318 * data from the PM1 DATA mini-script.
1319 */
1320 SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
1321 0,
1322 /*
1323 * Return to the previous DATA script which
1324 * is guaranteed by design (if no bug) to be
1325 * the main DATA script for this transfer.
1326 */
1327 SCR_COPY (4),
1328 RADDR_1 (dsa),
1329 RADDR_1 (scratcha),
1330 SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm1.ret)),
1331 0,
1332 SCR_JUMP,
1333 PADDR_A (pm_data_end),
1334}/*--------------------------<>----------------------------------*/
1335};
1336
1337static struct SYM_FWB_SCR SYM_FWB_SCR = {
1338/*-------------------------< NO_DATA >--------------------------*/ {
1339 SCR_JUMP,
1340 PADDR_B (data_ovrun),
1341}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{
1342 /*
1343 * We are jumped here by the C code, if we have
1344 * some target to reset or some disconnected
1345 * job to abort. Since error recovery is a serious
1346 * busyness, we will really reset the SCSI BUS, if
1347 * case of a SCSI interrupt occurring in this path.
1348 */
1349
1350#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
1351 /*
1352 * Set initiator mode.
1353 */
1354 SCR_CLR (SCR_TRG),
1355 0,
1356#endif
1357 /*
1358 * And try to select this target.
1359 */
1360 SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel),
1361 PADDR_A (reselect),
1362 /*
1363 * Wait for the selection to complete or
1364 * the selection to time out.
1365 */
1366 SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1367 -8,
1368 /*
1369 * Call the C code.
1370 */
1371 SCR_INT,
1372 SIR_TARGET_SELECTED,
1373 /*
1374 * The C code should let us continue here.
1375 * Send the 'kiss of death' message.
1376 * We expect an immediate disconnect once
1377 * the target has eaten the message.
1378 */
1379 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
1380 0,
1381 SCR_MOVE_TBL ^ SCR_MSG_OUT,
1382 offsetof (struct sym_hcb, abrt_tbl),
1383 SCR_CLR (SCR_ACK|SCR_ATN),
1384 0,
1385 SCR_WAIT_DISC,
1386 0,
1387 /*
1388 * Tell the C code that we are done.
1389 */
1390 SCR_INT,
1391 SIR_ABORT_SENT,
1392}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{
1393 /*
1394 * Jump at scheduler.
1395 */
1396 SCR_JUMP,
1397 PADDR_A (start),
1398}/*-------------------------< MSG_IN_ETC >-----------------------*/,{
1399 /*
1400 * If it is an EXTENDED (variable size message)
1401 * Handle it.
1402 */
1403 SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
1404 PADDR_B (msg_extended),
1405 /*
1406 * Let the C code handle any other
1407 * 1 byte message.
1408 */
1409 SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
1410 PADDR_B (msg_received),
1411 SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
1412 PADDR_B (msg_received),
1413 /*
1414 * We donnot handle 2 bytes messages from SCRIPTS.
1415 * So, let the C code deal with these ones too.
1416 */
1417 SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
1418 PADDR_B (msg_weird_seen),
1419 SCR_CLR (SCR_ACK),
1420 0,
1421 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
1422 HADDR_1 (msgin[1]),
1423}/*-------------------------< MSG_RECEIVED >---------------------*/,{
1424 SCR_COPY (4), /* DUMMY READ */
1425 HADDR_1 (scratch),
1426 RADDR_1 (scratcha),
1427 SCR_INT,
1428 SIR_MSG_RECEIVED,
1429}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{
1430 SCR_COPY (4), /* DUMMY READ */
1431 HADDR_1 (scratch),
1432 RADDR_1 (scratcha),
1433 SCR_INT,
1434 SIR_MSG_WEIRD,
1435}/*-------------------------< MSG_EXTENDED >---------------------*/,{
1436 /*
1437 * Clear ACK and get the next byte
1438 * assumed to be the message length.
1439 */
1440 SCR_CLR (SCR_ACK),
1441 0,
1442 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
1443 HADDR_1 (msgin[1]),
1444 /*
1445 * Try to catch some unlikely situations as 0 length
1446 * or too large the length.
1447 */
1448 SCR_JUMP ^ IFTRUE (DATA (0)),
1449 PADDR_B (msg_weird_seen),
1450 SCR_TO_REG (scratcha),
1451 0,
1452 SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
1453 0,
1454 SCR_JUMP ^ IFTRUE (CARRYSET),
1455 PADDR_B (msg_weird_seen),
1456 /*
1457 * We donnot handle extended messages from SCRIPTS.
1458 * Read the amount of data correponding to the
1459 * message length and call the C code.
1460 */
1461 SCR_COPY (1),
1462 RADDR_1 (scratcha),
1463 PADDR_B (_sms_b10),
1464 SCR_CLR (SCR_ACK),
1465 0,
1466}/*-------------------------< _SMS_B10 >-------------------------*/,{
1467 SCR_MOVE_ABS (0) ^ SCR_MSG_IN,
1468 HADDR_1 (msgin[2]),
1469 SCR_JUMP,
1470 PADDR_B (msg_received),
1471}/*-------------------------< MSG_BAD >--------------------------*/,{
1472 /*
1473 * unimplemented message - reject it.
1474 */
1475 SCR_INT,
1476 SIR_REJECT_TO_SEND,
1477 SCR_SET (SCR_ATN),
1478 0,
1479 SCR_JUMP,
1480 PADDR_A (clrack),
1481}/*-------------------------< MSG_WEIRD >------------------------*/,{
1482 /*
1483 * weird message received
1484 * ignore all MSG IN phases and reject it.
1485 */
1486 SCR_INT,
1487 SIR_REJECT_TO_SEND,
1488 SCR_SET (SCR_ATN),
1489 0,
1490}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{
1491 SCR_CLR (SCR_ACK),
1492 0,
1493 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
1494 PADDR_A (dispatch),
1495 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
1496 HADDR_1 (scratch),
1497 SCR_JUMP,
1498 PADDR_B (msg_weird1),
1499}/*-------------------------< WDTR_RESP >------------------------*/,{
1500 /*
1501 * let the target fetch our answer.
1502 */
1503 SCR_SET (SCR_ATN),
1504 0,
1505 SCR_CLR (SCR_ACK),
1506 0,
1507 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1508 PADDR_B (nego_bad_phase),
1509}/*-------------------------< SEND_WDTR >------------------------*/,{
1510 /*
1511 * Send the M_X_WIDE_REQ
1512 */
1513 SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
1514 HADDR_1 (msgout),
1515 SCR_JUMP,
1516 PADDR_B (msg_out_done),
1517}/*-------------------------< SDTR_RESP >------------------------*/,{
1518 /*
1519 * let the target fetch our answer.
1520 */
1521 SCR_SET (SCR_ATN),
1522 0,
1523 SCR_CLR (SCR_ACK),
1524 0,
1525 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1526 PADDR_B (nego_bad_phase),
1527}/*-------------------------< SEND_SDTR >------------------------*/,{
1528 /*
1529 * Send the M_X_SYNC_REQ
1530 */
1531 SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
1532 HADDR_1 (msgout),
1533 SCR_JUMP,
1534 PADDR_B (msg_out_done),
1535}/*-------------------------< PPR_RESP >-------------------------*/,{
1536 /*
1537 * let the target fetch our answer.
1538 */
1539 SCR_SET (SCR_ATN),
1540 0,
1541 SCR_CLR (SCR_ACK),
1542 0,
1543 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1544 PADDR_B (nego_bad_phase),
1545}/*-------------------------< SEND_PPR >-------------------------*/,{
1546 /*
1547 * Send the M_X_PPR_REQ
1548 */
1549 SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
1550 HADDR_1 (msgout),
1551 SCR_JUMP,
1552 PADDR_B (msg_out_done),
1553}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{
1554 SCR_INT,
1555 SIR_NEGO_PROTO,
1556 SCR_JUMP,
1557 PADDR_A (dispatch),
1558}/*-------------------------< MSG_OUT >--------------------------*/,{
1559 /*
1560 * The target requests a message.
1561 * We donnot send messages that may
1562 * require the device to go to bus free.
1563 */
1564 SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
1565 HADDR_1 (msgout),
1566 /*
1567 * ... wait for the next phase
1568 * if it's a message out, send it again, ...
1569 */
1570 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
1571 PADDR_B (msg_out),
1572}/*-------------------------< MSG_OUT_DONE >---------------------*/,{
1573 /*
1574 * Let the C code be aware of the
1575 * sent message and clear the message.
1576 */
1577 SCR_INT,
1578 SIR_MSG_OUT_DONE,
1579 /*
1580 * ... and process the next phase
1581 */
1582 SCR_JUMP,
1583 PADDR_A (dispatch),
1584}/*-------------------------< DATA_OVRUN >-----------------------*/,{
1585 /*
1586 * Zero scratcha that will count the
1587 * extras bytes.
1588 */
1589 SCR_COPY (4),
1590 PADDR_B (zero),
1591 RADDR_1 (scratcha),
1592}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
1593 /*
1594 * The target may want to transfer too much data.
1595 *
1596 * If phase is DATA OUT write 1 byte and count it.
1597 */
1598 SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
1599 16,
1600 SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
1601 HADDR_1 (scratch),
1602 SCR_JUMP,
1603 PADDR_B (data_ovrun2),
1604 /*
1605 * If WSR is set, clear this condition, and
1606 * count this byte.
1607 */
1608 SCR_FROM_REG (scntl2),
1609 0,
1610 SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
1611 16,
1612 SCR_REG_REG (scntl2, SCR_OR, WSR),
1613 0,
1614 SCR_JUMP,
1615 PADDR_B (data_ovrun2),
1616 /*
1617 * Finally check against DATA IN phase.
1618 * Signal data overrun to the C code
1619 * and jump to dispatcher if not so.
1620 * Read 1 byte otherwise and count it.
1621 */
1622 SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
1623 16,
1624 SCR_INT,
1625 SIR_DATA_OVERRUN,
1626 SCR_JUMP,
1627 PADDR_A (dispatch),
1628 SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
1629 HADDR_1 (scratch),
1630}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
1631 /*
1632 * Count this byte.
1633 * This will allow to return a negative
1634 * residual to user.
1635 */
1636 SCR_REG_REG (scratcha, SCR_ADD, 0x01),
1637 0,
1638 SCR_REG_REG (scratcha1, SCR_ADDC, 0),
1639 0,
1640 SCR_REG_REG (scratcha2, SCR_ADDC, 0),
1641 0,
1642 /*
1643 * .. and repeat as required.
1644 */
1645 SCR_JUMP,
1646 PADDR_B (data_ovrun1),
1647}/*-------------------------< ABORT_RESEL >----------------------*/,{
1648 SCR_SET (SCR_ATN),
1649 0,
1650 SCR_CLR (SCR_ACK),
1651 0,
1652 /*
1653 * send the abort/abortag/reset message
1654 * we expect an immediate disconnect
1655 */
1656 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
1657 0,
1658 SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
1659 HADDR_1 (msgout),
1660 SCR_CLR (SCR_ACK|SCR_ATN),
1661 0,
1662 SCR_WAIT_DISC,
1663 0,
1664 SCR_INT,
1665 SIR_RESEL_ABORTED,
1666 SCR_JUMP,
1667 PADDR_A (start),
1668}/*-------------------------< RESEND_IDENT >---------------------*/,{
1669 /*
1670 * The target stays in MSG OUT phase after having acked
1671 * Identify [+ Tag [+ Extended message ]]. Targets shall
1672 * behave this way on parity error.
1673 * We must send it again all the messages.
1674 */
1675 SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
1676 0, /* 1rst ACK = 90 ns. Hope the chip isn't too fast */
1677 SCR_JUMP,
1678 PADDR_A (send_ident),
1679}/*-------------------------< IDENT_BREAK >----------------------*/,{
1680 SCR_CLR (SCR_ATN),
1681 0,
1682 SCR_JUMP,
1683 PADDR_A (select2),
1684}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{
1685 SCR_SET (SCR_ATN),
1686 0,
1687 SCR_JUMP,
1688 PADDR_A (select2),
1689}/*-------------------------< SDATA_IN >-------------------------*/,{
1690 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1691 offsetof (struct sym_dsb, sense),
1692 SCR_CALL,
1693 PADDR_A (datai_done),
1694 SCR_JUMP,
1695 PADDR_B (data_ovrun),
1696}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{
1697 /*
1698 * Message is an IDENTIFY, but lun is unknown.
1699 * Signal problem to C code for logging the event.
1700 * Send a M_ABORT to clear all pending tasks.
1701 */
1702 SCR_INT,
1703 SIR_RESEL_BAD_LUN,
1704 SCR_JUMP,
1705 PADDR_B (abort_resel),
1706}/*-------------------------< BAD_I_T_L >------------------------*/,{
1707 /*
1708 * We donnot have a task for that I_T_L.
1709 * Signal problem to C code for logging the event.
1710 * Send a M_ABORT message.
1711 */
1712 SCR_INT,
1713 SIR_RESEL_BAD_I_T_L,
1714 SCR_JUMP,
1715 PADDR_B (abort_resel),
1716}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{
1717 /*
1718 * We donnot have a task that matches the tag.
1719 * Signal problem to C code for logging the event.
1720 * Send a M_ABORTTAG message.
1721 */
1722 SCR_INT,
1723 SIR_RESEL_BAD_I_T_L_Q,
1724 SCR_JUMP,
1725 PADDR_B (abort_resel),
1726}/*-------------------------< BAD_STATUS >-----------------------*/,{
1727 /*
1728 * Anything different from INTERMEDIATE
1729 * CONDITION MET should be a bad SCSI status,
1730 * given that GOOD status has already been tested.
1731 * Call the C code.
1732 */
1733 SCR_COPY (4),
1734 PADDR_B (startpos),
1735 RADDR_1 (scratcha),
1736 SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
1737 SIR_BAD_SCSI_STATUS,
1738 SCR_RETURN,
1739 0,
1740}/*-------------------------< WSR_MA_HELPER >--------------------*/,{
1741 /*
1742 * Helper for the C code when WSR bit is set.
1743 * Perform the move of the residual byte.
1744 */
1745 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1746 offsetof (struct sym_ccb, phys.wresid),
1747 SCR_JUMP,
1748 PADDR_A (dispatch),
1749
1750#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
1751}/*-------------------------< DATA_IO >--------------------------*/,{
1752 /*
1753 * We jump here if the data direction was unknown at the
1754 * time we had to queue the command to the scripts processor.
1755 * Pointers had been set as follow in this situation:
1756 * savep --> DATA_IO
1757 * lastp --> start pointer when DATA_IN
1758 * wlastp --> start pointer when DATA_OUT
1759 * This script sets savep and lastp according to the
1760 * direction chosen by the target.
1761 */
1762 SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
1763 PADDR_B (data_io_out),
1764}/*-------------------------< DATA_IO_COM >----------------------*/,{
1765 /*
1766 * Direction is DATA IN.
1767 */
1768 SCR_COPY (4),
1769 HADDR_1 (ccb_head.lastp),
1770 HADDR_1 (ccb_head.savep),
1771 /*
1772 * Jump to the SCRIPTS according to actual direction.
1773 */
1774 SCR_COPY (4),
1775 HADDR_1 (ccb_head.savep),
1776 RADDR_1 (temp),
1777 SCR_RETURN,
1778 0,
1779}/*-------------------------< DATA_IO_OUT >----------------------*/,{
1780 /*
1781 * Direction is DATA OUT.
1782 */
1783 SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
1784 0,
1785 SCR_COPY (4),
1786 HADDR_1 (ccb_head.wlastp),
1787 HADDR_1 (ccb_head.lastp),
1788 SCR_JUMP,
1789 PADDR_B(data_io_com),
1790#endif /* SYM_OPT_HANDLE_DIR_UNKNOWN */
1791
1792}/*-------------------------< ZERO >-----------------------------*/,{
1793 SCR_DATA_ZERO,
1794}/*-------------------------< SCRATCH >--------------------------*/,{
1795 SCR_DATA_ZERO, /* MUST BE BEFORE SCRATCH1 */
1796}/*-------------------------< SCRATCH1 >-------------------------*/,{
1797 SCR_DATA_ZERO,
1798}/*-------------------------< PREV_DONE >------------------------*/,{
1799 SCR_DATA_ZERO, /* MUST BE BEFORE DONE_POS ! */
1800}/*-------------------------< DONE_POS >-------------------------*/,{
1801 SCR_DATA_ZERO,
1802}/*-------------------------< NEXTJOB >--------------------------*/,{
1803 SCR_DATA_ZERO, /* MUST BE BEFORE STARTPOS ! */
1804}/*-------------------------< STARTPOS >-------------------------*/,{
1805 SCR_DATA_ZERO,
1806}/*-------------------------< TARGTBL >--------------------------*/,{
1807 SCR_DATA_ZERO,
1808}/*--------------------------<>----------------------------------*/
1809};
1810
1811static struct SYM_FWZ_SCR SYM_FWZ_SCR = {
1812 /*-------------------------< SNOOPTEST >------------------------*/{
1813 /*
1814 * Read the variable.
1815 */
1816 SCR_COPY (4),
1817 HADDR_1 (scratch),
1818 RADDR_1 (scratcha),
1819 /*
1820 * Write the variable.
1821 */
1822 SCR_COPY (4),
1823 RADDR_1 (temp),
1824 HADDR_1 (scratch),
1825 /*
1826 * Read back the variable.
1827 */
1828 SCR_COPY (4),
1829 HADDR_1 (scratch),
1830 RADDR_1 (temp),
1831}/*-------------------------< SNOOPEND >-------------------------*/,{
1832 /*
1833 * And stop.
1834 */
1835 SCR_INT,
1836 99,
1837}/*--------------------------<>----------------------------------*/
1838};
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw2.h b/drivers/scsi/sym53c8xx_2/sym_fw2.h
new file mode 100644
index 000000000000..7ea7151f5d1d
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw2.h
@@ -0,0 +1,1927 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40/*
41 * Scripts for SYMBIOS-Processor
42 *
43 * We have to know the offsets of all labels before we reach
44 * them (for forward jumps). Therefore we declare a struct
45 * here. If you make changes inside the script,
46 *
47 * DONT FORGET TO CHANGE THE LENGTHS HERE!
48 */
49
50/*
51 * Script fragments which are loaded into the on-chip RAM
52 * of 825A, 875, 876, 895, 895A, 896 and 1010 chips.
53 * Must not exceed 4K bytes.
54 */
55struct SYM_FWA_SCR {
56 u32 start [ 14];
57 u32 getjob_begin [ 4];
58 u32 getjob_end [ 4];
59#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
60 u32 select [ 6];
61#else
62 u32 select [ 4];
63#endif
64#if SYM_CONF_DMA_ADDRESSING_MODE == 2
65 u32 is_dmap_dirty [ 4];
66#endif
67 u32 wf_sel_done [ 2];
68 u32 sel_done [ 2];
69 u32 send_ident [ 2];
70#ifdef SYM_CONF_IARB_SUPPORT
71 u32 select2 [ 8];
72#else
73 u32 select2 [ 2];
74#endif
75 u32 command [ 2];
76 u32 dispatch [ 28];
77 u32 sel_no_cmd [ 10];
78 u32 init [ 6];
79 u32 clrack [ 4];
80 u32 datai_done [ 10];
81 u32 datai_done_wsr [ 20];
82 u32 datao_done [ 10];
83 u32 datao_done_wss [ 6];
84 u32 datai_phase [ 4];
85 u32 datao_phase [ 6];
86 u32 msg_in [ 2];
87 u32 msg_in2 [ 10];
88#ifdef SYM_CONF_IARB_SUPPORT
89 u32 status [ 14];
90#else
91 u32 status [ 10];
92#endif
93 u32 complete [ 6];
94 u32 complete2 [ 12];
95 u32 done [ 14];
96 u32 done_end [ 2];
97 u32 complete_error [ 4];
98 u32 save_dp [ 12];
99 u32 restore_dp [ 8];
100 u32 disconnect [ 12];
101#ifdef SYM_CONF_IARB_SUPPORT
102 u32 idle [ 4];
103#else
104 u32 idle [ 2];
105#endif
106#ifdef SYM_CONF_IARB_SUPPORT
107 u32 ungetjob [ 6];
108#else
109 u32 ungetjob [ 4];
110#endif
111#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
112 u32 reselect [ 4];
113#else
114 u32 reselect [ 2];
115#endif
116 u32 reselected [ 22];
117 u32 resel_scntl4 [ 20];
118 u32 resel_lun0 [ 6];
119#if SYM_CONF_MAX_TASK*4 > 512
120 u32 resel_tag [ 26];
121#elif SYM_CONF_MAX_TASK*4 > 256
122 u32 resel_tag [ 20];
123#else
124 u32 resel_tag [ 16];
125#endif
126 u32 resel_dsa [ 2];
127 u32 resel_dsa1 [ 4];
128 u32 resel_no_tag [ 6];
129 u32 data_in [SYM_CONF_MAX_SG * 2];
130 u32 data_in2 [ 4];
131 u32 data_out [SYM_CONF_MAX_SG * 2];
132 u32 data_out2 [ 4];
133 u32 pm0_data [ 12];
134 u32 pm0_data_out [ 6];
135 u32 pm0_data_end [ 6];
136 u32 pm1_data [ 12];
137 u32 pm1_data_out [ 6];
138 u32 pm1_data_end [ 6];
139};
140
141/*
142 * Script fragments which stay in main memory for all chips
143 * except for chips that support 8K on-chip RAM.
144 */
145struct SYM_FWB_SCR {
146 u32 start64 [ 2];
147 u32 no_data [ 2];
148#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
149 u32 sel_for_abort [ 18];
150#else
151 u32 sel_for_abort [ 16];
152#endif
153 u32 sel_for_abort_1 [ 2];
154 u32 msg_in_etc [ 12];
155 u32 msg_received [ 4];
156 u32 msg_weird_seen [ 4];
157 u32 msg_extended [ 20];
158 u32 msg_bad [ 6];
159 u32 msg_weird [ 4];
160 u32 msg_weird1 [ 8];
161
162 u32 wdtr_resp [ 6];
163 u32 send_wdtr [ 4];
164 u32 sdtr_resp [ 6];
165 u32 send_sdtr [ 4];
166 u32 ppr_resp [ 6];
167 u32 send_ppr [ 4];
168 u32 nego_bad_phase [ 4];
169 u32 msg_out [ 4];
170 u32 msg_out_done [ 4];
171 u32 data_ovrun [ 2];
172 u32 data_ovrun1 [ 22];
173 u32 data_ovrun2 [ 8];
174 u32 abort_resel [ 16];
175 u32 resend_ident [ 4];
176 u32 ident_break [ 4];
177 u32 ident_break_atn [ 4];
178 u32 sdata_in [ 6];
179 u32 resel_bad_lun [ 4];
180 u32 bad_i_t_l [ 4];
181 u32 bad_i_t_l_q [ 4];
182 u32 bad_status [ 6];
183 u32 pm_handle [ 20];
184 u32 pm_handle1 [ 4];
185 u32 pm_save [ 4];
186 u32 pm0_save [ 12];
187 u32 pm_save_end [ 4];
188 u32 pm1_save [ 14];
189
190 /* WSR handling */
191 u32 pm_wsr_handle [ 38];
192 u32 wsr_ma_helper [ 4];
193
194#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
195 /* Unknown direction handling */
196 u32 data_io [ 2];
197 u32 data_io_in [ 2];
198 u32 data_io_com [ 6];
199 u32 data_io_out [ 8];
200#endif
201 /* Data area */
202 u32 zero [ 1];
203 u32 scratch [ 1];
204 u32 pm0_data_addr [ 1];
205 u32 pm1_data_addr [ 1];
206 u32 done_pos [ 1];
207 u32 startpos [ 1];
208 u32 targtbl [ 1];
209};
210
211/*
212 * Script fragments used at initialisations.
213 * Only runs out of main memory.
214 */
215struct SYM_FWZ_SCR {
216 u32 snooptest [ 6];
217 u32 snoopend [ 2];
218};
219
220static struct SYM_FWA_SCR SYM_FWA_SCR = {
221/*--------------------------< START >----------------------------*/ {
222 /*
223 * Switch the LED on.
224 * Will be patched with a NO_OP if LED
225 * not needed or not desired.
226 */
227 SCR_REG_REG (gpreg, SCR_AND, 0xfe),
228 0,
229 /*
230 * Clear SIGP.
231 */
232 SCR_FROM_REG (ctest2),
233 0,
234 /*
235 * Stop here if the C code wants to perform
236 * some error recovery procedure manually.
237 * (Indicate this by setting SEM in ISTAT)
238 */
239 SCR_FROM_REG (istat),
240 0,
241 /*
242 * Report to the C code the next position in
243 * the start queue the SCRIPTS will schedule.
244 * The C code must not change SCRATCHA.
245 */
246 SCR_LOAD_ABS (scratcha, 4),
247 PADDR_B (startpos),
248 SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
249 SIR_SCRIPT_STOPPED,
250 /*
251 * Start the next job.
252 *
253 * @DSA = start point for this job.
254 * SCRATCHA = address of this job in the start queue.
255 *
256 * We will restore startpos with SCRATCHA if we fails the
257 * arbitration or if it is the idle job.
258 *
259 * The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
260 * is a critical path. If it is partially executed, it then
261 * may happen that the job address is not yet in the DSA
262 * and the next queue position points to the next JOB.
263 */
264 SCR_LOAD_ABS (dsa, 4),
265 PADDR_B (startpos),
266 SCR_LOAD_REL (temp, 4),
267 4,
268}/*-------------------------< GETJOB_BEGIN >---------------------*/,{
269 SCR_STORE_ABS (temp, 4),
270 PADDR_B (startpos),
271 SCR_LOAD_REL (dsa, 4),
272 0,
273}/*-------------------------< GETJOB_END >-----------------------*/,{
274 SCR_LOAD_REL (temp, 4),
275 0,
276 SCR_RETURN,
277 0,
278}/*-------------------------< SELECT >---------------------------*/,{
279 /*
280 * DSA contains the address of a scheduled
281 * data structure.
282 *
283 * SCRATCHA contains the address of the start queue
284 * entry which points to the next job.
285 *
286 * Set Initiator mode.
287 *
288 * (Target mode is left as an exercise for the reader)
289 */
290#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
291 SCR_CLR (SCR_TRG),
292 0,
293#endif
294 /*
295 * And try to select this target.
296 */
297 SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select),
298 PADDR_A (ungetjob),
299 /*
300 * Now there are 4 possibilities:
301 *
302 * (1) The chip loses arbitration.
303 * This is ok, because it will try again,
304 * when the bus becomes idle.
305 * (But beware of the timeout function!)
306 *
307 * (2) The chip is reselected.
308 * Then the script processor takes the jump
309 * to the RESELECT label.
310 *
311 * (3) The chip wins arbitration.
312 * Then it will execute SCRIPTS instruction until
313 * the next instruction that checks SCSI phase.
314 * Then will stop and wait for selection to be
315 * complete or selection time-out to occur.
316 *
317 * After having won arbitration, the SCRIPTS
318 * processor is able to execute instructions while
319 * the SCSI core is performing SCSI selection.
320 */
321 /*
322 * Initialize the status registers
323 */
324 SCR_LOAD_REL (scr0, 4),
325 offsetof (struct sym_ccb, phys.head.status),
326 /*
327 * We may need help from CPU if the DMA segment
328 * registers aren't up-to-date for this IO.
329 * Patched with NOOP for chips that donnot
330 * support DAC addressing.
331 */
332#if SYM_CONF_DMA_ADDRESSING_MODE == 2
333}/*-------------------------< IS_DMAP_DIRTY >--------------------*/,{
334 SCR_FROM_REG (HX_REG),
335 0,
336 SCR_INT ^ IFTRUE (MASK (HX_DMAP_DIRTY, HX_DMAP_DIRTY)),
337 SIR_DMAP_DIRTY,
338#endif
339}/*-------------------------< WF_SEL_DONE >----------------------*/,{
340 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
341 SIR_SEL_ATN_NO_MSG_OUT,
342}/*-------------------------< SEL_DONE >-------------------------*/,{
343 /*
344 * C1010-33 errata work-around.
345 * Due to a race, the SCSI core may not have
346 * loaded SCNTL3 on SEL_TBL instruction.
347 * We reload it once phase is stable.
348 * Patched with a NOOP for other chips.
349 */
350 SCR_LOAD_REL (scntl3, 1),
351 offsetof(struct sym_dsb, select.sel_scntl3),
352}/*-------------------------< SEND_IDENT >-----------------------*/,{
353 /*
354 * Selection complete.
355 * Send the IDENTIFY and possibly the TAG message
356 * and negotiation message if present.
357 */
358 SCR_MOVE_TBL ^ SCR_MSG_OUT,
359 offsetof (struct sym_dsb, smsg),
360}/*-------------------------< SELECT2 >--------------------------*/,{
361#ifdef SYM_CONF_IARB_SUPPORT
362 /*
363 * Set IMMEDIATE ARBITRATION if we have been given
364 * a hint to do so. (Some job to do after this one).
365 */
366 SCR_FROM_REG (HF_REG),
367 0,
368 SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
369 8,
370 SCR_REG_REG (scntl1, SCR_OR, IARB),
371 0,
372#endif
373 /*
374 * Anticipate the COMMAND phase.
375 * This is the PHASE we expect at this point.
376 */
377 SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
378 PADDR_A (sel_no_cmd),
379}/*-------------------------< COMMAND >--------------------------*/,{
380 /*
381 * ... and send the command
382 */
383 SCR_MOVE_TBL ^ SCR_COMMAND,
384 offsetof (struct sym_dsb, cmd),
385}/*-------------------------< DISPATCH >-------------------------*/,{
386 /*
387 * MSG_IN is the only phase that shall be
388 * entered at least once for each (re)selection.
389 * So we test it first.
390 */
391 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
392 PADDR_A (msg_in),
393 SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
394 PADDR_A (datao_phase),
395 SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
396 PADDR_A (datai_phase),
397 SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
398 PADDR_A (status),
399 SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
400 PADDR_A (command),
401 SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
402 PADDR_B (msg_out),
403 /*
404 * Discard as many illegal phases as
405 * required and tell the C code about.
406 */
407 SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
408 16,
409 SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
410 HADDR_1 (scratch),
411 SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
412 -16,
413 SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
414 16,
415 SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
416 HADDR_1 (scratch),
417 SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
418 -16,
419 SCR_INT,
420 SIR_BAD_PHASE,
421 SCR_JUMP,
422 PADDR_A (dispatch),
423}/*-------------------------< SEL_NO_CMD >-----------------------*/,{
424 /*
425 * The target does not switch to command
426 * phase after IDENTIFY has been sent.
427 *
428 * If it stays in MSG OUT phase send it
429 * the IDENTIFY again.
430 */
431 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
432 PADDR_B (resend_ident),
433 /*
434 * If target does not switch to MSG IN phase
435 * and we sent a negotiation, assert the
436 * failure immediately.
437 */
438 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
439 PADDR_A (dispatch),
440 SCR_FROM_REG (HS_REG),
441 0,
442 SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
443 SIR_NEGO_FAILED,
444 /*
445 * Jump to dispatcher.
446 */
447 SCR_JUMP,
448 PADDR_A (dispatch),
449}/*-------------------------< INIT >-----------------------------*/,{
450 /*
451 * Wait for the SCSI RESET signal to be
452 * inactive before restarting operations,
453 * since the chip may hang on SEL_ATN
454 * if SCSI RESET is active.
455 */
456 SCR_FROM_REG (sstat0),
457 0,
458 SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
459 -16,
460 SCR_JUMP,
461 PADDR_A (start),
462}/*-------------------------< CLRACK >---------------------------*/,{
463 /*
464 * Terminate possible pending message phase.
465 */
466 SCR_CLR (SCR_ACK),
467 0,
468 SCR_JUMP,
469 PADDR_A (dispatch),
470}/*-------------------------< DATAI_DONE >-----------------------*/,{
471 /*
472 * Save current pointer to LASTP.
473 */
474 SCR_STORE_REL (temp, 4),
475 offsetof (struct sym_ccb, phys.head.lastp),
476 /*
477 * If the SWIDE is not full, jump to dispatcher.
478 * We anticipate a STATUS phase.
479 */
480 SCR_FROM_REG (scntl2),
481 0,
482 SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)),
483 PADDR_A (datai_done_wsr),
484 SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
485 PADDR_A (status),
486 SCR_JUMP,
487 PADDR_A (dispatch),
488}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{
489 /*
490 * The SWIDE is full.
491 * Clear this condition.
492 */
493 SCR_REG_REG (scntl2, SCR_OR, WSR),
494 0,
495 /*
496 * We are expecting an IGNORE RESIDUE message
497 * from the device, otherwise we are in data
498 * overrun condition. Check against MSG_IN phase.
499 */
500 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
501 SIR_SWIDE_OVERRUN,
502 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
503 PADDR_A (dispatch),
504 /*
505 * We are in MSG_IN phase,
506 * Read the first byte of the message.
507 * If it is not an IGNORE RESIDUE message,
508 * signal overrun and jump to message
509 * processing.
510 */
511 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
512 HADDR_1 (msgin[0]),
513 SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
514 SIR_SWIDE_OVERRUN,
515 SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
516 PADDR_A (msg_in2),
517 /*
518 * We got the message we expected.
519 * Read the 2nd byte, and jump to dispatcher.
520 */
521 SCR_CLR (SCR_ACK),
522 0,
523 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
524 HADDR_1 (msgin[1]),
525 SCR_CLR (SCR_ACK),
526 0,
527 SCR_JUMP,
528 PADDR_A (dispatch),
529}/*-------------------------< DATAO_DONE >-----------------------*/,{
530 /*
531 * Save current pointer to LASTP.
532 */
533 SCR_STORE_REL (temp, 4),
534 offsetof (struct sym_ccb, phys.head.lastp),
535 /*
536 * If the SODL is not full jump to dispatcher.
537 * We anticipate a STATUS phase.
538 */
539 SCR_FROM_REG (scntl2),
540 0,
541 SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)),
542 PADDR_A (datao_done_wss),
543 SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
544 PADDR_A (status),
545 SCR_JUMP,
546 PADDR_A (dispatch),
547}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{
548 /*
549 * The SODL is full, clear this condition.
550 */
551 SCR_REG_REG (scntl2, SCR_OR, WSS),
552 0,
553 /*
554 * And signal a DATA UNDERRUN condition
555 * to the C code.
556 */
557 SCR_INT,
558 SIR_SODL_UNDERRUN,
559 SCR_JUMP,
560 PADDR_A (dispatch),
561}/*-------------------------< DATAI_PHASE >----------------------*/,{
562 /*
563 * Jump to current pointer.
564 */
565 SCR_LOAD_REL (temp, 4),
566 offsetof (struct sym_ccb, phys.head.lastp),
567 SCR_RETURN,
568 0,
569}/*-------------------------< DATAO_PHASE >----------------------*/,{
570 /*
571 * C1010-66 errata work-around.
572 * Extra clocks of data hold must be inserted
573 * in DATA OUT phase on 33 MHz PCI BUS.
574 * Patched with a NOOP for other chips.
575 */
576 SCR_REG_REG (scntl4, SCR_OR, (XCLKH_DT|XCLKH_ST)),
577 0,
578 /*
579 * Jump to current pointer.
580 */
581 SCR_LOAD_REL (temp, 4),
582 offsetof (struct sym_ccb, phys.head.lastp),
583 SCR_RETURN,
584 0,
585}/*-------------------------< MSG_IN >---------------------------*/,{
586 /*
587 * Get the first byte of the message.
588 *
589 * The script processor doesn't negate the
590 * ACK signal after this transfer.
591 */
592 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
593 HADDR_1 (msgin[0]),
594}/*-------------------------< MSG_IN2 >--------------------------*/,{
595 /*
596 * Check first against 1 byte messages
597 * that we handle from SCRIPTS.
598 */
599 SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
600 PADDR_A (complete),
601 SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
602 PADDR_A (disconnect),
603 SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
604 PADDR_A (save_dp),
605 SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
606 PADDR_A (restore_dp),
607 /*
608 * We handle all other messages from the
609 * C code, so no need to waste on-chip RAM
610 * for those ones.
611 */
612 SCR_JUMP,
613 PADDR_B (msg_in_etc),
614}/*-------------------------< STATUS >---------------------------*/,{
615 /*
616 * get the status
617 */
618 SCR_MOVE_ABS (1) ^ SCR_STATUS,
619 HADDR_1 (scratch),
620#ifdef SYM_CONF_IARB_SUPPORT
621 /*
622 * If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
623 * since we may have to tamper the start queue from
624 * the C code.
625 */
626 SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
627 8,
628 SCR_REG_REG (scntl1, SCR_AND, ~IARB),
629 0,
630#endif
631 /*
632 * save status to scsi_status.
633 * mark as complete.
634 */
635 SCR_TO_REG (SS_REG),
636 0,
637 SCR_LOAD_REG (HS_REG, HS_COMPLETE),
638 0,
639 /*
640 * Anticipate the MESSAGE PHASE for
641 * the TASK COMPLETE message.
642 */
643 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
644 PADDR_A (msg_in),
645 SCR_JUMP,
646 PADDR_A (dispatch),
647}/*-------------------------< COMPLETE >-------------------------*/,{
648 /*
649 * Complete message.
650 *
651 * When we terminate the cycle by clearing ACK,
652 * the target may disconnect immediately.
653 *
654 * We don't want to be told of an "unexpected disconnect",
655 * so we disable this feature.
656 */
657 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
658 0,
659 /*
660 * Terminate cycle ...
661 */
662 SCR_CLR (SCR_ACK|SCR_ATN),
663 0,
664 /*
665 * ... and wait for the disconnect.
666 */
667 SCR_WAIT_DISC,
668 0,
669}/*-------------------------< COMPLETE2 >------------------------*/,{
670 /*
671 * Save host status.
672 */
673 SCR_STORE_REL (scr0, 4),
674 offsetof (struct sym_ccb, phys.head.status),
675 /*
676 * Some bridges may reorder DMA writes to memory.
677 * We donnot want the CPU to deal with completions
678 * without all the posted write having been flushed
679 * to memory. This DUMMY READ should flush posted
680 * buffers prior to the CPU having to deal with
681 * completions.
682 */
683 SCR_LOAD_REL (scr0, 4), /* DUMMY READ */
684 offsetof (struct sym_ccb, phys.head.status),
685
686 /*
687 * If command resulted in not GOOD status,
688 * call the C code if needed.
689 */
690 SCR_FROM_REG (SS_REG),
691 0,
692 SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
693 PADDR_B (bad_status),
694 /*
695 * If we performed an auto-sense, call
696 * the C code to synchronyze task aborts
697 * with UNIT ATTENTION conditions.
698 */
699 SCR_FROM_REG (HF_REG),
700 0,
701 SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))),
702 PADDR_A (complete_error),
703}/*-------------------------< DONE >-----------------------------*/,{
704 /*
705 * Copy the DSA to the DONE QUEUE and
706 * signal completion to the host.
707 * If we are interrupted between DONE
708 * and DONE_END, we must reset, otherwise
709 * the completed CCB may be lost.
710 */
711 SCR_STORE_ABS (dsa, 4),
712 PADDR_B (scratch),
713 SCR_LOAD_ABS (dsa, 4),
714 PADDR_B (done_pos),
715 SCR_LOAD_ABS (scratcha, 4),
716 PADDR_B (scratch),
717 SCR_STORE_REL (scratcha, 4),
718 0,
719 /*
720 * The instruction below reads the DONE QUEUE next
721 * free position from memory.
722 * In addition it ensures that all PCI posted writes
723 * are flushed and so the DSA value of the done
724 * CCB is visible by the CPU before INTFLY is raised.
725 */
726 SCR_LOAD_REL (scratcha, 4),
727 4,
728 SCR_INT_FLY,
729 0,
730 SCR_STORE_ABS (scratcha, 4),
731 PADDR_B (done_pos),
732}/*-------------------------< DONE_END >-------------------------*/,{
733 SCR_JUMP,
734 PADDR_A (start),
735}/*-------------------------< COMPLETE_ERROR >-------------------*/,{
736 SCR_LOAD_ABS (scratcha, 4),
737 PADDR_B (startpos),
738 SCR_INT,
739 SIR_COMPLETE_ERROR,
740}/*-------------------------< SAVE_DP >--------------------------*/,{
741 /*
742 * Clear ACK immediately.
743 * No need to delay it.
744 */
745 SCR_CLR (SCR_ACK),
746 0,
747 /*
748 * Keep track we received a SAVE DP, so
749 * we will switch to the other PM context
750 * on the next PM since the DP may point
751 * to the current PM context.
752 */
753 SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
754 0,
755 /*
756 * SAVE_DP message:
757 * Copy LASTP to SAVEP.
758 */
759 SCR_LOAD_REL (scratcha, 4),
760 offsetof (struct sym_ccb, phys.head.lastp),
761 SCR_STORE_REL (scratcha, 4),
762 offsetof (struct sym_ccb, phys.head.savep),
763 /*
764 * Anticipate the MESSAGE PHASE for
765 * the DISCONNECT message.
766 */
767 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
768 PADDR_A (msg_in),
769 SCR_JUMP,
770 PADDR_A (dispatch),
771}/*-------------------------< RESTORE_DP >-----------------------*/,{
772 /*
773 * Clear ACK immediately.
774 * No need to delay it.
775 */
776 SCR_CLR (SCR_ACK),
777 0,
778 /*
779 * Copy SAVEP to LASTP.
780 */
781 SCR_LOAD_REL (scratcha, 4),
782 offsetof (struct sym_ccb, phys.head.savep),
783 SCR_STORE_REL (scratcha, 4),
784 offsetof (struct sym_ccb, phys.head.lastp),
785 SCR_JUMP,
786 PADDR_A (dispatch),
787}/*-------------------------< DISCONNECT >-----------------------*/,{
788 /*
789 * DISCONNECTing ...
790 *
791 * disable the "unexpected disconnect" feature,
792 * and remove the ACK signal.
793 */
794 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
795 0,
796 SCR_CLR (SCR_ACK|SCR_ATN),
797 0,
798 /*
799 * Wait for the disconnect.
800 */
801 SCR_WAIT_DISC,
802 0,
803 /*
804 * Status is: DISCONNECTED.
805 */
806 SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
807 0,
808 /*
809 * Save host status.
810 */
811 SCR_STORE_REL (scr0, 4),
812 offsetof (struct sym_ccb, phys.head.status),
813 SCR_JUMP,
814 PADDR_A (start),
815}/*-------------------------< IDLE >-----------------------------*/,{
816 /*
817 * Nothing to do?
818 * Switch the LED off and wait for reselect.
819 * Will be patched with a NO_OP if LED
820 * not needed or not desired.
821 */
822 SCR_REG_REG (gpreg, SCR_OR, 0x01),
823 0,
824#ifdef SYM_CONF_IARB_SUPPORT
825 SCR_JUMPR,
826 8,
827#endif
828}/*-------------------------< UNGETJOB >-------------------------*/,{
829#ifdef SYM_CONF_IARB_SUPPORT
830 /*
831 * Set IMMEDIATE ARBITRATION, for the next time.
832 * This will give us better chance to win arbitration
833 * for the job we just wanted to do.
834 */
835 SCR_REG_REG (scntl1, SCR_OR, IARB),
836 0,
837#endif
838 /*
839 * We are not able to restart the SCRIPTS if we are
840 * interrupted and these instruction haven't been
841 * all executed. BTW, this is very unlikely to
842 * happen, but we check that from the C code.
843 */
844 SCR_LOAD_REG (dsa, 0xff),
845 0,
846 SCR_STORE_ABS (scratcha, 4),
847 PADDR_B (startpos),
848}/*-------------------------< RESELECT >-------------------------*/,{
849#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
850 /*
851 * Make sure we are in initiator mode.
852 */
853 SCR_CLR (SCR_TRG),
854 0,
855#endif
856 /*
857 * Sleep waiting for a reselection.
858 */
859 SCR_WAIT_RESEL,
860 PADDR_A(start),
861}/*-------------------------< RESELECTED >-----------------------*/,{
862 /*
863 * Switch the LED on.
864 * Will be patched with a NO_OP if LED
865 * not needed or not desired.
866 */
867 SCR_REG_REG (gpreg, SCR_AND, 0xfe),
868 0,
869 /*
870 * load the target id into the sdid
871 */
872 SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
873 0,
874 SCR_TO_REG (sdid),
875 0,
876 /*
877 * Load the target control block address
878 */
879 SCR_LOAD_ABS (dsa, 4),
880 PADDR_B (targtbl),
881 SCR_SFBR_REG (dsa, SCR_SHL, 0),
882 0,
883 SCR_REG_REG (dsa, SCR_SHL, 0),
884 0,
885 SCR_REG_REG (dsa, SCR_AND, 0x3c),
886 0,
887 SCR_LOAD_REL (dsa, 4),
888 0,
889 /*
890 * We expect MESSAGE IN phase.
891 * If not, get help from the C code.
892 */
893 SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
894 SIR_RESEL_NO_MSG_IN,
895 /*
896 * Load the legacy synchronous transfer registers.
897 */
898 SCR_LOAD_REL (scntl3, 1),
899 offsetof(struct sym_tcb, head.wval),
900 SCR_LOAD_REL (sxfer, 1),
901 offsetof(struct sym_tcb, head.sval),
902}/*-------------------------< RESEL_SCNTL4 >---------------------*/,{
903 /*
904 * The C1010 uses a new synchronous timing scheme.
905 * Will be patched with a NO_OP if not a C1010.
906 */
907 SCR_LOAD_REL (scntl4, 1),
908 offsetof(struct sym_tcb, head.uval),
909 /*
910 * Get the IDENTIFY message.
911 */
912 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
913 HADDR_1 (msgin),
914 /*
915 * If IDENTIFY LUN #0, use a faster path
916 * to find the LCB structure.
917 */
918 SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)),
919 PADDR_A (resel_lun0),
920 /*
921 * If message isn't an IDENTIFY,
922 * tell the C code about.
923 */
924 SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
925 SIR_RESEL_NO_IDENTIFY,
926 /*
927 * It is an IDENTIFY message,
928 * Load the LUN control block address.
929 */
930 SCR_LOAD_REL (dsa, 4),
931 offsetof(struct sym_tcb, head.luntbl_sa),
932 SCR_SFBR_REG (dsa, SCR_SHL, 0),
933 0,
934 SCR_REG_REG (dsa, SCR_SHL, 0),
935 0,
936 SCR_REG_REG (dsa, SCR_AND, 0xfc),
937 0,
938 SCR_LOAD_REL (dsa, 4),
939 0,
940 SCR_JUMPR,
941 8,
942}/*-------------------------< RESEL_LUN0 >-----------------------*/,{
943 /*
944 * LUN 0 special case (but usual one :))
945 */
946 SCR_LOAD_REL (dsa, 4),
947 offsetof(struct sym_tcb, head.lun0_sa),
948 /*
949 * Jump indirectly to the reselect action for this LUN.
950 */
951 SCR_LOAD_REL (temp, 4),
952 offsetof(struct sym_lcb, head.resel_sa),
953 SCR_RETURN,
954 0,
955 /* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */
956}/*-------------------------< RESEL_TAG >------------------------*/,{
957 /*
958 * ACK the IDENTIFY previously received.
959 */
960 SCR_CLR (SCR_ACK),
961 0,
962 /*
963 * It shall be a tagged command.
964 * Read SIMPLE+TAG.
965 * The C code will deal with errors.
966 * Agressive optimization, is'nt it? :)
967 */
968 SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
969 HADDR_1 (msgin),
970 /*
971 * Load the pointer to the tagged task
972 * table for this LUN.
973 */
974 SCR_LOAD_REL (dsa, 4),
975 offsetof(struct sym_lcb, head.itlq_tbl_sa),
976 /*
977 * The SIDL still contains the TAG value.
978 * Agressive optimization, isn't it? :):)
979 */
980 SCR_REG_SFBR (sidl, SCR_SHL, 0),
981 0,
982#if SYM_CONF_MAX_TASK*4 > 512
983 SCR_JUMPR ^ IFFALSE (CARRYSET),
984 8,
985 SCR_REG_REG (dsa1, SCR_OR, 2),
986 0,
987 SCR_REG_REG (sfbr, SCR_SHL, 0),
988 0,
989 SCR_JUMPR ^ IFFALSE (CARRYSET),
990 8,
991 SCR_REG_REG (dsa1, SCR_OR, 1),
992 0,
993#elif SYM_CONF_MAX_TASK*4 > 256
994 SCR_JUMPR ^ IFFALSE (CARRYSET),
995 8,
996 SCR_REG_REG (dsa1, SCR_OR, 1),
997 0,
998#endif
999 /*
1000 * Retrieve the DSA of this task.
1001 * JUMP indirectly to the restart point of the CCB.
1002 */
1003 SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
1004 0,
1005 SCR_LOAD_REL (dsa, 4),
1006 0,
1007 SCR_LOAD_REL (temp, 4),
1008 offsetof(struct sym_ccb, phys.head.go.restart),
1009 SCR_RETURN,
1010 0,
1011 /* In normal situations we branch to RESEL_DSA */
1012}/*-------------------------< RESEL_DSA >------------------------*/,{
1013 /*
1014 * ACK the IDENTIFY or TAG previously received.
1015 */
1016 SCR_CLR (SCR_ACK),
1017 0,
1018}/*-------------------------< RESEL_DSA1 >-----------------------*/,{
1019 /*
1020 * Initialize the status registers
1021 */
1022 SCR_LOAD_REL (scr0, 4),
1023 offsetof (struct sym_ccb, phys.head.status),
1024 /*
1025 * Jump to dispatcher.
1026 */
1027 SCR_JUMP,
1028 PADDR_A (dispatch),
1029}/*-------------------------< RESEL_NO_TAG >---------------------*/,{
1030 /*
1031 * Load the DSA with the unique ITL task.
1032 */
1033 SCR_LOAD_REL (dsa, 4),
1034 offsetof(struct sym_lcb, head.itl_task_sa),
1035 /*
1036 * JUMP indirectly to the restart point of the CCB.
1037 */
1038 SCR_LOAD_REL (temp, 4),
1039 offsetof(struct sym_ccb, phys.head.go.restart),
1040 SCR_RETURN,
1041 0,
1042 /* In normal situations we branch to RESEL_DSA */
1043}/*-------------------------< DATA_IN >--------------------------*/,{
1044/*
1045 * Because the size depends on the
1046 * #define SYM_CONF_MAX_SG parameter,
1047 * it is filled in at runtime.
1048 *
1049 * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
1050 * || SCR_CHMOV_TBL ^ SCR_DATA_IN,
1051 * || offsetof (struct sym_dsb, data[ i]),
1052 * ##==========================================
1053 */
10540
1055}/*-------------------------< DATA_IN2 >-------------------------*/,{
1056 SCR_CALL,
1057 PADDR_A (datai_done),
1058 SCR_JUMP,
1059 PADDR_B (data_ovrun),
1060}/*-------------------------< DATA_OUT >-------------------------*/,{
1061/*
1062 * Because the size depends on the
1063 * #define SYM_CONF_MAX_SG parameter,
1064 * it is filled in at runtime.
1065 *
1066 * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
1067 * || SCR_CHMOV_TBL ^ SCR_DATA_OUT,
1068 * || offsetof (struct sym_dsb, data[ i]),
1069 * ##==========================================
1070 */
10710
1072}/*-------------------------< DATA_OUT2 >------------------------*/,{
1073 SCR_CALL,
1074 PADDR_A (datao_done),
1075 SCR_JUMP,
1076 PADDR_B (data_ovrun),
1077}/*-------------------------< PM0_DATA >-------------------------*/,{
1078 /*
1079 * Read our host flags to SFBR, so we will be able
1080 * to check against the data direction we expect.
1081 */
1082 SCR_FROM_REG (HF_REG),
1083 0,
1084 /*
1085 * Check against actual DATA PHASE.
1086 */
1087 SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
1088 PADDR_A (pm0_data_out),
1089 /*
1090 * Actual phase is DATA IN.
1091 * Check against expected direction.
1092 */
1093 SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
1094 PADDR_B (data_ovrun),
1095 /*
1096 * Keep track we are moving data from the
1097 * PM0 DATA mini-script.
1098 */
1099 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
1100 0,
1101 /*
1102 * Move the data to memory.
1103 */
1104 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1105 offsetof (struct sym_ccb, phys.pm0.sg),
1106 SCR_JUMP,
1107 PADDR_A (pm0_data_end),
1108}/*-------------------------< PM0_DATA_OUT >---------------------*/,{
1109 /*
1110 * Actual phase is DATA OUT.
1111 * Check against expected direction.
1112 */
1113 SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
1114 PADDR_B (data_ovrun),
1115 /*
1116 * Keep track we are moving data from the
1117 * PM0 DATA mini-script.
1118 */
1119 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
1120 0,
1121 /*
1122 * Move the data from memory.
1123 */
1124 SCR_CHMOV_TBL ^ SCR_DATA_OUT,
1125 offsetof (struct sym_ccb, phys.pm0.sg),
1126}/*-------------------------< PM0_DATA_END >---------------------*/,{
1127 /*
1128 * Clear the flag that told we were moving
1129 * data from the PM0 DATA mini-script.
1130 */
1131 SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
1132 0,
1133 /*
1134 * Return to the previous DATA script which
1135 * is guaranteed by design (if no bug) to be
1136 * the main DATA script for this transfer.
1137 */
1138 SCR_LOAD_REL (temp, 4),
1139 offsetof (struct sym_ccb, phys.pm0.ret),
1140 SCR_RETURN,
1141 0,
1142}/*-------------------------< PM1_DATA >-------------------------*/,{
1143 /*
1144 * Read our host flags to SFBR, so we will be able
1145 * to check against the data direction we expect.
1146 */
1147 SCR_FROM_REG (HF_REG),
1148 0,
1149 /*
1150 * Check against actual DATA PHASE.
1151 */
1152 SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
1153 PADDR_A (pm1_data_out),
1154 /*
1155 * Actual phase is DATA IN.
1156 * Check against expected direction.
1157 */
1158 SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
1159 PADDR_B (data_ovrun),
1160 /*
1161 * Keep track we are moving data from the
1162 * PM1 DATA mini-script.
1163 */
1164 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
1165 0,
1166 /*
1167 * Move the data to memory.
1168 */
1169 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1170 offsetof (struct sym_ccb, phys.pm1.sg),
1171 SCR_JUMP,
1172 PADDR_A (pm1_data_end),
1173}/*-------------------------< PM1_DATA_OUT >---------------------*/,{
1174 /*
1175 * Actual phase is DATA OUT.
1176 * Check against expected direction.
1177 */
1178 SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
1179 PADDR_B (data_ovrun),
1180 /*
1181 * Keep track we are moving data from the
1182 * PM1 DATA mini-script.
1183 */
1184 SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
1185 0,
1186 /*
1187 * Move the data from memory.
1188 */
1189 SCR_CHMOV_TBL ^ SCR_DATA_OUT,
1190 offsetof (struct sym_ccb, phys.pm1.sg),
1191}/*-------------------------< PM1_DATA_END >---------------------*/,{
1192 /*
1193 * Clear the flag that told we were moving
1194 * data from the PM1 DATA mini-script.
1195 */
1196 SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
1197 0,
1198 /*
1199 * Return to the previous DATA script which
1200 * is guaranteed by design (if no bug) to be
1201 * the main DATA script for this transfer.
1202 */
1203 SCR_LOAD_REL (temp, 4),
1204 offsetof (struct sym_ccb, phys.pm1.ret),
1205 SCR_RETURN,
1206 0,
1207}/*-------------------------<>-----------------------------------*/
1208};
1209
1210static struct SYM_FWB_SCR SYM_FWB_SCR = {
1211/*--------------------------< START64 >--------------------------*/ {
1212 /*
1213 * SCRIPT entry point for the 895A, 896 and 1010.
1214 * For now, there is no specific stuff for those
1215 * chips at this point, but this may come.
1216 */
1217 SCR_JUMP,
1218 PADDR_A (init),
1219}/*-------------------------< NO_DATA >--------------------------*/,{
1220 SCR_JUMP,
1221 PADDR_B (data_ovrun),
1222}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{
1223 /*
1224 * We are jumped here by the C code, if we have
1225 * some target to reset or some disconnected
1226 * job to abort. Since error recovery is a serious
1227 * busyness, we will really reset the SCSI BUS, if
1228 * case of a SCSI interrupt occurring in this path.
1229 */
1230#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
1231 /*
1232 * Set initiator mode.
1233 */
1234 SCR_CLR (SCR_TRG),
1235 0,
1236#endif
1237 /*
1238 * And try to select this target.
1239 */
1240 SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel),
1241 PADDR_A (reselect),
1242 /*
1243 * Wait for the selection to complete or
1244 * the selection to time out.
1245 */
1246 SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1247 -8,
1248 /*
1249 * Call the C code.
1250 */
1251 SCR_INT,
1252 SIR_TARGET_SELECTED,
1253 /*
1254 * The C code should let us continue here.
1255 * Send the 'kiss of death' message.
1256 * We expect an immediate disconnect once
1257 * the target has eaten the message.
1258 */
1259 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
1260 0,
1261 SCR_MOVE_TBL ^ SCR_MSG_OUT,
1262 offsetof (struct sym_hcb, abrt_tbl),
1263 SCR_CLR (SCR_ACK|SCR_ATN),
1264 0,
1265 SCR_WAIT_DISC,
1266 0,
1267 /*
1268 * Tell the C code that we are done.
1269 */
1270 SCR_INT,
1271 SIR_ABORT_SENT,
1272}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{
1273 /*
1274 * Jump at scheduler.
1275 */
1276 SCR_JUMP,
1277 PADDR_A (start),
1278}/*-------------------------< MSG_IN_ETC >-----------------------*/,{
1279 /*
1280 * If it is an EXTENDED (variable size message)
1281 * Handle it.
1282 */
1283 SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
1284 PADDR_B (msg_extended),
1285 /*
1286 * Let the C code handle any other
1287 * 1 byte message.
1288 */
1289 SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
1290 PADDR_B (msg_received),
1291 SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
1292 PADDR_B (msg_received),
1293 /*
1294 * We donnot handle 2 bytes messages from SCRIPTS.
1295 * So, let the C code deal with these ones too.
1296 */
1297 SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
1298 PADDR_B (msg_weird_seen),
1299 SCR_CLR (SCR_ACK),
1300 0,
1301 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
1302 HADDR_1 (msgin[1]),
1303}/*-------------------------< MSG_RECEIVED >---------------------*/,{
1304 SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */
1305 0,
1306 SCR_INT,
1307 SIR_MSG_RECEIVED,
1308}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{
1309 SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */
1310 0,
1311 SCR_INT,
1312 SIR_MSG_WEIRD,
1313}/*-------------------------< MSG_EXTENDED >---------------------*/,{
1314 /*
1315 * Clear ACK and get the next byte
1316 * assumed to be the message length.
1317 */
1318 SCR_CLR (SCR_ACK),
1319 0,
1320 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
1321 HADDR_1 (msgin[1]),
1322 /*
1323 * Try to catch some unlikely situations as 0 length
1324 * or too large the length.
1325 */
1326 SCR_JUMP ^ IFTRUE (DATA (0)),
1327 PADDR_B (msg_weird_seen),
1328 SCR_TO_REG (scratcha),
1329 0,
1330 SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
1331 0,
1332 SCR_JUMP ^ IFTRUE (CARRYSET),
1333 PADDR_B (msg_weird_seen),
1334 /*
1335 * We donnot handle extended messages from SCRIPTS.
1336 * Read the amount of data correponding to the
1337 * message length and call the C code.
1338 */
1339 SCR_STORE_REL (scratcha, 1),
1340 offsetof (struct sym_dsb, smsg_ext.size),
1341 SCR_CLR (SCR_ACK),
1342 0,
1343 SCR_MOVE_TBL ^ SCR_MSG_IN,
1344 offsetof (struct sym_dsb, smsg_ext),
1345 SCR_JUMP,
1346 PADDR_B (msg_received),
1347}/*-------------------------< MSG_BAD >--------------------------*/,{
1348 /*
1349 * unimplemented message - reject it.
1350 */
1351 SCR_INT,
1352 SIR_REJECT_TO_SEND,
1353 SCR_SET (SCR_ATN),
1354 0,
1355 SCR_JUMP,
1356 PADDR_A (clrack),
1357}/*-------------------------< MSG_WEIRD >------------------------*/,{
1358 /*
1359 * weird message received
1360 * ignore all MSG IN phases and reject it.
1361 */
1362 SCR_INT,
1363 SIR_REJECT_TO_SEND,
1364 SCR_SET (SCR_ATN),
1365 0,
1366}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{
1367 SCR_CLR (SCR_ACK),
1368 0,
1369 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
1370 PADDR_A (dispatch),
1371 SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
1372 HADDR_1 (scratch),
1373 SCR_JUMP,
1374 PADDR_B (msg_weird1),
1375}/*-------------------------< WDTR_RESP >------------------------*/,{
1376 /*
1377 * let the target fetch our answer.
1378 */
1379 SCR_SET (SCR_ATN),
1380 0,
1381 SCR_CLR (SCR_ACK),
1382 0,
1383 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1384 PADDR_B (nego_bad_phase),
1385}/*-------------------------< SEND_WDTR >------------------------*/,{
1386 /*
1387 * Send the M_X_WIDE_REQ
1388 */
1389 SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
1390 HADDR_1 (msgout),
1391 SCR_JUMP,
1392 PADDR_B (msg_out_done),
1393}/*-------------------------< SDTR_RESP >------------------------*/,{
1394 /*
1395 * let the target fetch our answer.
1396 */
1397 SCR_SET (SCR_ATN),
1398 0,
1399 SCR_CLR (SCR_ACK),
1400 0,
1401 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1402 PADDR_B (nego_bad_phase),
1403}/*-------------------------< SEND_SDTR >------------------------*/,{
1404 /*
1405 * Send the M_X_SYNC_REQ
1406 */
1407 SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
1408 HADDR_1 (msgout),
1409 SCR_JUMP,
1410 PADDR_B (msg_out_done),
1411}/*-------------------------< PPR_RESP >-------------------------*/,{
1412 /*
1413 * let the target fetch our answer.
1414 */
1415 SCR_SET (SCR_ATN),
1416 0,
1417 SCR_CLR (SCR_ACK),
1418 0,
1419 SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
1420 PADDR_B (nego_bad_phase),
1421}/*-------------------------< SEND_PPR >-------------------------*/,{
1422 /*
1423 * Send the M_X_PPR_REQ
1424 */
1425 SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
1426 HADDR_1 (msgout),
1427 SCR_JUMP,
1428 PADDR_B (msg_out_done),
1429}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{
1430 SCR_INT,
1431 SIR_NEGO_PROTO,
1432 SCR_JUMP,
1433 PADDR_A (dispatch),
1434}/*-------------------------< MSG_OUT >--------------------------*/,{
1435 /*
1436 * The target requests a message.
1437 * We donnot send messages that may
1438 * require the device to go to bus free.
1439 */
1440 SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
1441 HADDR_1 (msgout),
1442 /*
1443 * ... wait for the next phase
1444 * if it's a message out, send it again, ...
1445 */
1446 SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
1447 PADDR_B (msg_out),
1448}/*-------------------------< MSG_OUT_DONE >---------------------*/,{
1449 /*
1450 * Let the C code be aware of the
1451 * sent message and clear the message.
1452 */
1453 SCR_INT,
1454 SIR_MSG_OUT_DONE,
1455 /*
1456 * ... and process the next phase
1457 */
1458 SCR_JUMP,
1459 PADDR_A (dispatch),
1460}/*-------------------------< DATA_OVRUN >-----------------------*/,{
1461 /*
1462 * Use scratcha to count the extra bytes.
1463 */
1464 SCR_LOAD_ABS (scratcha, 4),
1465 PADDR_B (zero),
1466}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
1467 /*
1468 * The target may want to transfer too much data.
1469 *
1470 * If phase is DATA OUT write 1 byte and count it.
1471 */
1472 SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
1473 16,
1474 SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
1475 HADDR_1 (scratch),
1476 SCR_JUMP,
1477 PADDR_B (data_ovrun2),
1478 /*
1479 * If WSR is set, clear this condition, and
1480 * count this byte.
1481 */
1482 SCR_FROM_REG (scntl2),
1483 0,
1484 SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
1485 16,
1486 SCR_REG_REG (scntl2, SCR_OR, WSR),
1487 0,
1488 SCR_JUMP,
1489 PADDR_B (data_ovrun2),
1490 /*
1491 * Finally check against DATA IN phase.
1492 * Signal data overrun to the C code
1493 * and jump to dispatcher if not so.
1494 * Read 1 byte otherwise and count it.
1495 */
1496 SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
1497 16,
1498 SCR_INT,
1499 SIR_DATA_OVERRUN,
1500 SCR_JUMP,
1501 PADDR_A (dispatch),
1502 SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
1503 HADDR_1 (scratch),
1504}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
1505 /*
1506 * Count this byte.
1507 * This will allow to return a negative
1508 * residual to user.
1509 */
1510 SCR_REG_REG (scratcha, SCR_ADD, 0x01),
1511 0,
1512 SCR_REG_REG (scratcha1, SCR_ADDC, 0),
1513 0,
1514 SCR_REG_REG (scratcha2, SCR_ADDC, 0),
1515 0,
1516 /*
1517 * .. and repeat as required.
1518 */
1519 SCR_JUMP,
1520 PADDR_B (data_ovrun1),
1521}/*-------------------------< ABORT_RESEL >----------------------*/,{
1522 SCR_SET (SCR_ATN),
1523 0,
1524 SCR_CLR (SCR_ACK),
1525 0,
1526 /*
1527 * send the abort/abortag/reset message
1528 * we expect an immediate disconnect
1529 */
1530 SCR_REG_REG (scntl2, SCR_AND, 0x7f),
1531 0,
1532 SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
1533 HADDR_1 (msgout),
1534 SCR_CLR (SCR_ACK|SCR_ATN),
1535 0,
1536 SCR_WAIT_DISC,
1537 0,
1538 SCR_INT,
1539 SIR_RESEL_ABORTED,
1540 SCR_JUMP,
1541 PADDR_A (start),
1542}/*-------------------------< RESEND_IDENT >---------------------*/,{
1543 /*
1544 * The target stays in MSG OUT phase after having acked
1545 * Identify [+ Tag [+ Extended message ]]. Targets shall
1546 * behave this way on parity error.
1547 * We must send it again all the messages.
1548 */
1549 SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
1550 0, /* 1rst ACK = 90 ns. Hope the chip isn't too fast */
1551 SCR_JUMP,
1552 PADDR_A (send_ident),
1553}/*-------------------------< IDENT_BREAK >----------------------*/,{
1554 SCR_CLR (SCR_ATN),
1555 0,
1556 SCR_JUMP,
1557 PADDR_A (select2),
1558}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{
1559 SCR_SET (SCR_ATN),
1560 0,
1561 SCR_JUMP,
1562 PADDR_A (select2),
1563}/*-------------------------< SDATA_IN >-------------------------*/,{
1564 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1565 offsetof (struct sym_dsb, sense),
1566 SCR_CALL,
1567 PADDR_A (datai_done),
1568 SCR_JUMP,
1569 PADDR_B (data_ovrun),
1570}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{
1571 /*
1572 * Message is an IDENTIFY, but lun is unknown.
1573 * Signal problem to C code for logging the event.
1574 * Send a M_ABORT to clear all pending tasks.
1575 */
1576 SCR_INT,
1577 SIR_RESEL_BAD_LUN,
1578 SCR_JUMP,
1579 PADDR_B (abort_resel),
1580}/*-------------------------< BAD_I_T_L >------------------------*/,{
1581 /*
1582 * We donnot have a task for that I_T_L.
1583 * Signal problem to C code for logging the event.
1584 * Send a M_ABORT message.
1585 */
1586 SCR_INT,
1587 SIR_RESEL_BAD_I_T_L,
1588 SCR_JUMP,
1589 PADDR_B (abort_resel),
1590}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{
1591 /*
1592 * We donnot have a task that matches the tag.
1593 * Signal problem to C code for logging the event.
1594 * Send a M_ABORTTAG message.
1595 */
1596 SCR_INT,
1597 SIR_RESEL_BAD_I_T_L_Q,
1598 SCR_JUMP,
1599 PADDR_B (abort_resel),
1600}/*-------------------------< BAD_STATUS >-----------------------*/,{
1601 /*
1602 * Anything different from INTERMEDIATE
1603 * CONDITION MET should be a bad SCSI status,
1604 * given that GOOD status has already been tested.
1605 * Call the C code.
1606 */
1607 SCR_LOAD_ABS (scratcha, 4),
1608 PADDR_B (startpos),
1609 SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
1610 SIR_BAD_SCSI_STATUS,
1611 SCR_RETURN,
1612 0,
1613}/*-------------------------< PM_HANDLE >------------------------*/,{
1614 /*
1615 * Phase mismatch handling.
1616 *
1617 * Since we have to deal with 2 SCSI data pointers
1618 * (current and saved), we need at least 2 contexts.
1619 * Each context (pm0 and pm1) has a saved area, a
1620 * SAVE mini-script and a DATA phase mini-script.
1621 */
1622 /*
1623 * Get the PM handling flags.
1624 */
1625 SCR_FROM_REG (HF_REG),
1626 0,
1627 /*
1628 * If no flags (1rst PM for example), avoid
1629 * all the below heavy flags testing.
1630 * This makes the normal case a bit faster.
1631 */
1632 SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))),
1633 PADDR_B (pm_handle1),
1634 /*
1635 * If we received a SAVE DP, switch to the
1636 * other PM context since the savep may point
1637 * to the current PM context.
1638 */
1639 SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)),
1640 8,
1641 SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM),
1642 0,
1643 /*
1644 * If we have been interrupt in a PM DATA mini-script,
1645 * we take the return address from the corresponding
1646 * saved area.
1647 * This ensure the return address always points to the
1648 * main DATA script for this transfer.
1649 */
1650 SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))),
1651 PADDR_B (pm_handle1),
1652 SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)),
1653 16,
1654 SCR_LOAD_REL (ia, 4),
1655 offsetof(struct sym_ccb, phys.pm0.ret),
1656 SCR_JUMP,
1657 PADDR_B (pm_save),
1658 SCR_LOAD_REL (ia, 4),
1659 offsetof(struct sym_ccb, phys.pm1.ret),
1660 SCR_JUMP,
1661 PADDR_B (pm_save),
1662}/*-------------------------< PM_HANDLE1 >-----------------------*/,{
1663 /*
1664 * Normal case.
1665 * Update the return address so that it
1666 * will point after the interrupted MOVE.
1667 */
1668 SCR_REG_REG (ia, SCR_ADD, 8),
1669 0,
1670 SCR_REG_REG (ia1, SCR_ADDC, 0),
1671 0,
1672}/*-------------------------< PM_SAVE >--------------------------*/,{
1673 /*
1674 * Clear all the flags that told us if we were
1675 * interrupted in a PM DATA mini-script and/or
1676 * we received a SAVE DP.
1677 */
1678 SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))),
1679 0,
1680 /*
1681 * Choose the current PM context.
1682 */
1683 SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)),
1684 PADDR_B (pm1_save),
1685}/*-------------------------< PM0_SAVE >-------------------------*/,{
1686 SCR_STORE_REL (ia, 4),
1687 offsetof(struct sym_ccb, phys.pm0.ret),
1688 /*
1689 * If WSR bit is set, either UA and RBC may
1690 * have to be changed whether the device wants
1691 * to ignore this residue or not.
1692 */
1693 SCR_FROM_REG (scntl2),
1694 0,
1695 SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
1696 PADDR_B (pm_wsr_handle),
1697 /*
1698 * Save the remaining byte count, the updated
1699 * address and the return address.
1700 */
1701 SCR_STORE_REL (rbc, 4),
1702 offsetof(struct sym_ccb, phys.pm0.sg.size),
1703 SCR_STORE_REL (ua, 4),
1704 offsetof(struct sym_ccb, phys.pm0.sg.addr),
1705 /*
1706 * Set the current pointer at the PM0 DATA mini-script.
1707 */
1708 SCR_LOAD_ABS (ia, 4),
1709 PADDR_B (pm0_data_addr),
1710}/*-------------------------< PM_SAVE_END >----------------------*/,{
1711 SCR_STORE_REL (ia, 4),
1712 offsetof(struct sym_ccb, phys.head.lastp),
1713 SCR_JUMP,
1714 PADDR_A (dispatch),
1715}/*-------------------------< PM1_SAVE >-------------------------*/,{
1716 SCR_STORE_REL (ia, 4),
1717 offsetof(struct sym_ccb, phys.pm1.ret),
1718 /*
1719 * If WSR bit is set, either UA and RBC may
1720 * have to be changed whether the device wants
1721 * to ignore this residue or not.
1722 */
1723 SCR_FROM_REG (scntl2),
1724 0,
1725 SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
1726 PADDR_B (pm_wsr_handle),
1727 /*
1728 * Save the remaining byte count, the updated
1729 * address and the return address.
1730 */
1731 SCR_STORE_REL (rbc, 4),
1732 offsetof(struct sym_ccb, phys.pm1.sg.size),
1733 SCR_STORE_REL (ua, 4),
1734 offsetof(struct sym_ccb, phys.pm1.sg.addr),
1735 /*
1736 * Set the current pointer at the PM1 DATA mini-script.
1737 */
1738 SCR_LOAD_ABS (ia, 4),
1739 PADDR_B (pm1_data_addr),
1740 SCR_JUMP,
1741 PADDR_B (pm_save_end),
1742}/*-------------------------< PM_WSR_HANDLE >--------------------*/,{
1743 /*
1744 * Phase mismatch handling from SCRIPT with WSR set.
1745 * Such a condition can occur if the chip wants to
1746 * execute a CHMOV(size > 1) when the WSR bit is
1747 * set and the target changes PHASE.
1748 *
1749 * We must move the residual byte to memory.
1750 *
1751 * UA contains bit 0..31 of the address to
1752 * move the residual byte.
1753 * Move it to the table indirect.
1754 */
1755 SCR_STORE_REL (ua, 4),
1756 offsetof (struct sym_ccb, phys.wresid.addr),
1757 /*
1758 * Increment UA (move address to next position).
1759 */
1760 SCR_REG_REG (ua, SCR_ADD, 1),
1761 0,
1762 SCR_REG_REG (ua1, SCR_ADDC, 0),
1763 0,
1764 SCR_REG_REG (ua2, SCR_ADDC, 0),
1765 0,
1766 SCR_REG_REG (ua3, SCR_ADDC, 0),
1767 0,
1768 /*
1769 * Compute SCRATCHA as:
1770 * - size to transfer = 1 byte.
1771 * - bit 24..31 = high address bit [32...39].
1772 */
1773 SCR_LOAD_ABS (scratcha, 4),
1774 PADDR_B (zero),
1775 SCR_REG_REG (scratcha, SCR_OR, 1),
1776 0,
1777 SCR_FROM_REG (rbc3),
1778 0,
1779 SCR_TO_REG (scratcha3),
1780 0,
1781 /*
1782 * Move this value to the table indirect.
1783 */
1784 SCR_STORE_REL (scratcha, 4),
1785 offsetof (struct sym_ccb, phys.wresid.size),
1786 /*
1787 * Wait for a valid phase.
1788 * While testing with bogus QUANTUM drives, the C1010
1789 * sometimes raised a spurious phase mismatch with
1790 * WSR and the CHMOV(1) triggered another PM.
1791 * Waiting explicitely for the PHASE seemed to avoid
1792 * the nested phase mismatch. Btw, this didn't happen
1793 * using my IBM drives.
1794 */
1795 SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)),
1796 0,
1797 /*
1798 * Perform the move of the residual byte.
1799 */
1800 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1801 offsetof (struct sym_ccb, phys.wresid),
1802 /*
1803 * We can now handle the phase mismatch with UA fixed.
1804 * RBC[0..23]=0 is a special case that does not require
1805 * a PM context. The C code also checks against this.
1806 */
1807 SCR_FROM_REG (rbc),
1808 0,
1809 SCR_RETURN ^ IFFALSE (DATA (0)),
1810 0,
1811 SCR_FROM_REG (rbc1),
1812 0,
1813 SCR_RETURN ^ IFFALSE (DATA (0)),
1814 0,
1815 SCR_FROM_REG (rbc2),
1816 0,
1817 SCR_RETURN ^ IFFALSE (DATA (0)),
1818 0,
1819 /*
1820 * RBC[0..23]=0.
1821 * Not only we donnot need a PM context, but this would
1822 * lead to a bogus CHMOV(0). This condition means that
1823 * the residual was the last byte to move from this CHMOV.
1824 * So, we just have to move the current data script pointer
1825 * (i.e. TEMP) to the SCRIPTS address following the
1826 * interrupted CHMOV and jump to dispatcher.
1827 * IA contains the data pointer to save.
1828 */
1829 SCR_JUMP,
1830 PADDR_B (pm_save_end),
1831}/*-------------------------< WSR_MA_HELPER >--------------------*/,{
1832 /*
1833 * Helper for the C code when WSR bit is set.
1834 * Perform the move of the residual byte.
1835 */
1836 SCR_CHMOV_TBL ^ SCR_DATA_IN,
1837 offsetof (struct sym_ccb, phys.wresid),
1838 SCR_JUMP,
1839 PADDR_A (dispatch),
1840
1841#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
1842}/*-------------------------< DATA_IO >--------------------------*/,{
1843 /*
1844 * We jump here if the data direction was unknown at the
1845 * time we had to queue the command to the scripts processor.
1846 * Pointers had been set as follow in this situation:
1847 * savep --> DATA_IO
1848 * lastp --> start pointer when DATA_IN
1849 * wlastp --> start pointer when DATA_OUT
1850 * This script sets savep and lastp according to the
1851 * direction chosen by the target.
1852 */
1853 SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
1854 PADDR_B (data_io_out),
1855}/*-------------------------< DATA_IO_IN >-----------------------*/,{
1856 /*
1857 * Direction is DATA IN.
1858 */
1859 SCR_LOAD_REL (scratcha, 4),
1860 offsetof (struct sym_ccb, phys.head.lastp),
1861}/*-------------------------< DATA_IO_COM >----------------------*/,{
1862 SCR_STORE_REL (scratcha, 4),
1863 offsetof (struct sym_ccb, phys.head.savep),
1864
1865 /*
1866 * Jump to the SCRIPTS according to actual direction.
1867 */
1868 SCR_LOAD_REL (temp, 4),
1869 offsetof (struct sym_ccb, phys.head.savep),
1870 SCR_RETURN,
1871 0,
1872}/*-------------------------< DATA_IO_OUT >----------------------*/,{
1873 /*
1874 * Direction is DATA OUT.
1875 */
1876 SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
1877 0,
1878 SCR_LOAD_REL (scratcha, 4),
1879 offsetof (struct sym_ccb, phys.head.wlastp),
1880 SCR_STORE_REL (scratcha, 4),
1881 offsetof (struct sym_ccb, phys.head.lastp),
1882 SCR_JUMP,
1883 PADDR_B(data_io_com),
1884#endif /* SYM_OPT_HANDLE_DIR_UNKNOWN */
1885
1886}/*-------------------------< ZERO >-----------------------------*/,{
1887 SCR_DATA_ZERO,
1888}/*-------------------------< SCRATCH >--------------------------*/,{
1889 SCR_DATA_ZERO,
1890}/*-------------------------< PM0_DATA_ADDR >--------------------*/,{
1891 SCR_DATA_ZERO,
1892}/*-------------------------< PM1_DATA_ADDR >--------------------*/,{
1893 SCR_DATA_ZERO,
1894}/*-------------------------< DONE_POS >-------------------------*/,{
1895 SCR_DATA_ZERO,
1896}/*-------------------------< STARTPOS >-------------------------*/,{
1897 SCR_DATA_ZERO,
1898}/*-------------------------< TARGTBL >--------------------------*/,{
1899 SCR_DATA_ZERO,
1900}/*-------------------------<>-----------------------------------*/
1901};
1902
1903static struct SYM_FWZ_SCR SYM_FWZ_SCR = {
1904 /*-------------------------< SNOOPTEST >------------------------*/{
1905 /*
1906 * Read the variable from memory.
1907 */
1908 SCR_LOAD_REL (scratcha, 4),
1909 offsetof(struct sym_hcb, scratch),
1910 /*
1911 * Write the variable to memory.
1912 */
1913 SCR_STORE_REL (temp, 4),
1914 offsetof(struct sym_hcb, scratch),
1915 /*
1916 * Read back the variable from memory.
1917 */
1918 SCR_LOAD_REL (temp, 4),
1919 offsetof(struct sym_hcb, scratch),
1920}/*-------------------------< SNOOPEND >-------------------------*/,{
1921 /*
1922 * And stop.
1923 */
1924 SCR_INT,
1925 99,
1926}/*-------------------------<>-----------------------------------*/
1927};
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
new file mode 100644
index 000000000000..a1dff6d437bc
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -0,0 +1,2196 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx>
7 *
8 * This driver is derived from the Linux sym53c8xx driver.
9 * Copyright (C) 1998-2000 Gerard Roudier
10 *
11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
12 * a port of the FreeBSD ncr driver to Linux-1.2.13.
13 *
14 * The original ncr driver has been written for 386bsd and FreeBSD by
15 * Wolfgang Stanglmeier <wolf@cologne.de>
16 * Stefan Esser <se@mi.Uni-Koeln.de>
17 * Copyright (C) 1994 Wolfgang Stanglmeier
18 *
19 * Other major contributions:
20 *
21 * NVRAM detection and reading.
22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
23 *
24 *-----------------------------------------------------------------------------
25 *
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2 of the License, or
29 * (at your option) any later version.
30 *
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
35 *
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */
40#include <linux/ctype.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <scsi/scsi.h>
47#include <scsi/scsi_tcq.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_transport.h>
50
51#include "sym_glue.h"
52#include "sym_nvram.h"
53
54#define NAME53C "sym53c"
55#define NAME53C8XX "sym53c8xx"
56
57/* SPARC just has to be different ... */
58#ifdef __sparc__
59#define IRQ_FMT "%s"
60#define IRQ_PRM(x) __irq_itoa(x)
61#else
62#define IRQ_FMT "%d"
63#define IRQ_PRM(x) (x)
64#endif
65
66struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP;
67unsigned int sym_debug_flags = 0;
68
69static char *excl_string;
70static char *safe_string;
71module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0);
72module_param_string(tag_ctrl, sym_driver_setup.tag_ctrl, 100, 0);
73module_param_named(burst, sym_driver_setup.burst_order, byte, 0);
74module_param_named(led, sym_driver_setup.scsi_led, byte, 0);
75module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0);
76module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0);
77module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0);
78module_param_named(hostid, sym_driver_setup.host_id, byte, 0);
79module_param_named(verb, sym_driver_setup.verbose, byte, 0);
80module_param_named(debug, sym_debug_flags, uint, 0);
81module_param_named(settle, sym_driver_setup.settle_delay, byte, 0);
82module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0);
83module_param_named(excl, excl_string, charp, 0);
84module_param_named(safe, safe_string, charp, 0);
85
86MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default");
87MODULE_PARM_DESC(tag_ctrl, "More detailed control over tags per LUN");
88MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers");
89MODULE_PARM_DESC(led, "Set to 1 to enable LED support");
90MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3");
91MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole");
92MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error");
93MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters");
94MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive");
95MODULE_PARM_DESC(debug, "Set bits to enable debugging");
96MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3");
97MODULE_PARM_DESC(nvram, "Option currently not used");
98MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached");
99MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\"");
100
101MODULE_LICENSE("GPL");
102MODULE_VERSION(SYM_VERSION);
103MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>");
104MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters");
105
106static void sym2_setup_params(void)
107{
108 char *p = excl_string;
109 int xi = 0;
110
111 while (p && (xi < 8)) {
112 char *next_p;
113 int val = (int) simple_strtoul(p, &next_p, 0);
114 sym_driver_setup.excludes[xi++] = val;
115 p = next_p;
116 }
117
118 if (safe_string) {
119 if (*safe_string == 'y') {
120 sym_driver_setup.max_tag = 0;
121 sym_driver_setup.burst_order = 0;
122 sym_driver_setup.scsi_led = 0;
123 sym_driver_setup.scsi_diff = 1;
124 sym_driver_setup.irq_mode = 0;
125 sym_driver_setup.scsi_bus_check = 2;
126 sym_driver_setup.host_id = 7;
127 sym_driver_setup.verbose = 2;
128 sym_driver_setup.settle_delay = 10;
129 sym_driver_setup.use_nvram = 1;
130 } else if (*safe_string != 'n') {
131 printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s"
132 " passed to safe option", safe_string);
133 }
134 }
135}
136
137/*
138 * We used to try to deal with 64-bit BARs here, but don't any more.
139 * There are many parts of this driver which would need to be modified
140 * to handle a 64-bit base address, including scripts. I'm uncomfortable
141 * with making those changes when I have no way of testing it, so I'm
142 * just going to disable it.
143 *
144 * Note that some machines (eg HP rx8620 and Superdome) have bus addresses
145 * below 4GB and physical addresses above 4GB. These will continue to work.
146 */
147static int __devinit
148pci_get_base_address(struct pci_dev *pdev, int index, unsigned long *basep)
149{
150 u32 tmp;
151 unsigned long base;
152#define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2))
153
154 pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp);
155 base = tmp;
156 if ((tmp & 0x7) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
157 pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp);
158 if (tmp > 0)
159 dev_err(&pdev->dev,
160 "BAR %d is 64-bit, disabling\n", index - 1);
161 base = 0;
162 }
163
164 if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
165 base &= PCI_BASE_ADDRESS_IO_MASK;
166 } else {
167 base &= PCI_BASE_ADDRESS_MEM_MASK;
168 }
169
170 *basep = base;
171 return index;
172#undef PCI_BAR_OFFSET
173}
174
175static struct scsi_transport_template *sym2_transport_template = NULL;
176
177/*
178 * Used by the eh thread to wait for command completion.
179 * It is allocated on the eh thread stack.
180 */
181struct sym_eh_wait {
182 struct completion done;
183 struct timer_list timer;
184 void (*old_done)(struct scsi_cmnd *);
185 int to_do;
186 int timed_out;
187};
188
189/*
190 * Driver private area in the SCSI command structure.
191 */
192struct sym_ucmd { /* Override the SCSI pointer structure */
193 dma_addr_t data_mapping;
194 u_char data_mapped;
195 struct sym_eh_wait *eh_wait;
196};
197
198#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
199#define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
200
201static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
202{
203 int dma_dir = cmd->sc_data_direction;
204
205 switch(SYM_UCMD_PTR(cmd)->data_mapped) {
206 case 2:
207 pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
208 break;
209 case 1:
210 pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping,
211 cmd->request_bufflen, dma_dir);
212 break;
213 }
214 SYM_UCMD_PTR(cmd)->data_mapped = 0;
215}
216
217static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
218{
219 dma_addr_t mapping;
220 int dma_dir = cmd->sc_data_direction;
221
222 mapping = pci_map_single(pdev, cmd->request_buffer,
223 cmd->request_bufflen, dma_dir);
224 if (mapping) {
225 SYM_UCMD_PTR(cmd)->data_mapped = 1;
226 SYM_UCMD_PTR(cmd)->data_mapping = mapping;
227 }
228
229 return mapping;
230}
231
232static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
233{
234 int use_sg;
235 int dma_dir = cmd->sc_data_direction;
236
237 use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
238 if (use_sg > 0) {
239 SYM_UCMD_PTR(cmd)->data_mapped = 2;
240 SYM_UCMD_PTR(cmd)->data_mapping = use_sg;
241 }
242
243 return use_sg;
244}
245
246#define unmap_scsi_data(np, cmd) \
247 __unmap_scsi_data(np->s.device, cmd)
248#define map_scsi_single_data(np, cmd) \
249 __map_scsi_single_data(np->s.device, cmd)
250#define map_scsi_sg_data(np, cmd) \
251 __map_scsi_sg_data(np->s.device, cmd)
252/*
253 * Complete a pending CAM CCB.
254 */
255void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
256{
257 unmap_scsi_data(np, cmd);
258 cmd->scsi_done(cmd);
259}
260
261static void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *cmd, int cam_status)
262{
263 sym_set_cam_status(cmd, cam_status);
264 sym_xpt_done(np, cmd);
265}
266
267
268/*
269 * Tell the SCSI layer about a BUS RESET.
270 */
271void sym_xpt_async_bus_reset(struct sym_hcb *np)
272{
273 printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np));
274 np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ;
275 np->s.settle_time_valid = 1;
276 if (sym_verbose >= 2)
277 printf_info("%s: command processing suspended for %d seconds\n",
278 sym_name(np), sym_driver_setup.settle_delay);
279}
280
281/*
282 * Tell the SCSI layer about a BUS DEVICE RESET message sent.
283 */
284void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target)
285{
286 printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target);
287}
288
289/*
290 * Choose the more appropriate CAM status if
291 * the IO encountered an extended error.
292 */
293static int sym_xerr_cam_status(int cam_status, int x_status)
294{
295 if (x_status) {
296 if (x_status & XE_PARITY_ERR)
297 cam_status = DID_PARITY;
298 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
299 cam_status = DID_ERROR;
300 else if (x_status & XE_BAD_PHASE)
301 cam_status = DID_ERROR;
302 else
303 cam_status = DID_ERROR;
304 }
305 return cam_status;
306}
307
308/*
309 * Build CAM result for a failed or auto-sensed IO.
310 */
311void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
312{
313 struct scsi_cmnd *cmd = cp->cmd;
314 u_int cam_status, scsi_status, drv_status;
315
316 drv_status = 0;
317 cam_status = DID_OK;
318 scsi_status = cp->ssss_status;
319
320 if (cp->host_flags & HF_SENSE) {
321 scsi_status = cp->sv_scsi_status;
322 resid = cp->sv_resid;
323 if (sym_verbose && cp->sv_xerr_status)
324 sym_print_xerr(cmd, cp->sv_xerr_status);
325 if (cp->host_status == HS_COMPLETE &&
326 cp->ssss_status == S_GOOD &&
327 cp->xerr_status == 0) {
328 cam_status = sym_xerr_cam_status(DID_OK,
329 cp->sv_xerr_status);
330 drv_status = DRIVER_SENSE;
331 /*
332 * Bounce back the sense data to user.
333 */
334 memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
335 memcpy(cmd->sense_buffer, cp->sns_bbuf,
336 min(sizeof(cmd->sense_buffer),
337 (size_t)SYM_SNS_BBUF_LEN));
338#if 0
339 /*
340 * If the device reports a UNIT ATTENTION condition
341 * due to a RESET condition, we should consider all
342 * disconnect CCBs for this unit as aborted.
343 */
344 if (1) {
345 u_char *p;
346 p = (u_char *) cmd->sense_data;
347 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
348 sym_clear_tasks(np, DID_ABORT,
349 cp->target,cp->lun, -1);
350 }
351#endif
352 } else {
353 /*
354 * Error return from our internal request sense. This
355 * is bad: we must clear the contingent allegiance
356 * condition otherwise the device will always return
357 * BUSY. Use a big stick.
358 */
359 sym_reset_scsi_target(np, cmd->device->id);
360 cam_status = DID_ERROR;
361 }
362 } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */
363 cam_status = DID_OK;
364 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */
365 cam_status = DID_NO_CONNECT;
366 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/
367 cam_status = DID_ERROR;
368 else { /* Extended error */
369 if (sym_verbose) {
370 sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n",
371 cp->host_status, cp->ssss_status,
372 cp->xerr_status);
373 }
374 /*
375 * Set the most appropriate value for CAM status.
376 */
377 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
378 }
379 cmd->resid = resid;
380 cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
381}
382
383
384/*
385 * Build the scatter/gather array for an I/O.
386 */
387
388static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
389{
390 struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1];
391 int segment;
392
393 cp->data_len = cmd->request_bufflen;
394
395 if (cmd->request_bufflen) {
396 dma_addr_t baddr = map_scsi_single_data(np, cmd);
397 if (baddr) {
398 sym_build_sge(np, data, baddr, cmd->request_bufflen);
399 segment = 1;
400 } else {
401 segment = -2;
402 }
403 } else {
404 segment = 0;
405 }
406
407 return segment;
408}
409
410static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
411{
412 int segment;
413 int use_sg = (int) cmd->use_sg;
414
415 cp->data_len = 0;
416
417 if (!use_sg)
418 segment = sym_scatter_no_sglist(np, cp, cmd);
419 else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
420 struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
421 struct sym_tblmove *data;
422
423 if (use_sg > SYM_CONF_MAX_SG) {
424 unmap_scsi_data(np, cmd);
425 return -1;
426 }
427
428 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
429
430 for (segment = 0; segment < use_sg; segment++) {
431 dma_addr_t baddr = sg_dma_address(&scatter[segment]);
432 unsigned int len = sg_dma_len(&scatter[segment]);
433
434 sym_build_sge(np, &data[segment], baddr, len);
435 cp->data_len += len;
436 }
437 } else {
438 segment = -2;
439 }
440
441 return segment;
442}
443
444/*
445 * Queue a SCSI command.
446 */
447static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd)
448{
449 struct scsi_device *sdev = cmd->device;
450 struct sym_tcb *tp;
451 struct sym_lcb *lp;
452 struct sym_ccb *cp;
453 int order;
454
455 /*
456 * Minimal checkings, so that we will not
457 * go outside our tables.
458 */
459 if (sdev->id == np->myaddr ||
460 sdev->id >= SYM_CONF_MAX_TARGET ||
461 sdev->lun >= SYM_CONF_MAX_LUN) {
462 sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE);
463 return 0;
464 }
465
466 /*
467 * Retrieve the target descriptor.
468 */
469 tp = &np->target[sdev->id];
470
471 /*
472 * Complete the 1st INQUIRY command with error
473 * condition if the device is flagged NOSCAN
474 * at BOOT in the NVRAM. This may speed up
475 * the boot and maintain coherency with BIOS
476 * device numbering. Clearing the flag allows
477 * user to rescan skipped devices later.
478 * We also return error for devices not flagged
479 * for SCAN LUNS in the NVRAM since some mono-lun
480 * devices behave badly when asked for some non
481 * zero LUN. Btw, this is an absolute hack.:-)
482 */
483 if (cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 0x0) {
484 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) ||
485 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) &&
486 sdev->lun != 0)) {
487 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
488 sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE);
489 return 0;
490 }
491 }
492
493 /*
494 * Select tagged/untagged.
495 */
496 lp = sym_lp(tp, sdev->lun);
497 order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0;
498
499 /*
500 * Queue the SCSI IO.
501 */
502 cp = sym_get_ccb(np, cmd, order);
503 if (!cp)
504 return 1; /* Means resource shortage */
505 sym_queue_scsiio(np, cmd, cp);
506 return 0;
507}
508
509/*
510 * Setup buffers and pointers that address the CDB.
511 */
512static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
513{
514 u32 cmd_ba;
515 int cmd_len;
516
517 /*
518 * CDB is 16 bytes max.
519 */
520 if (cmd->cmd_len > sizeof(cp->cdb_buf)) {
521 sym_set_cam_status(cp->cmd, CAM_REQ_INVALID);
522 return -1;
523 }
524
525 memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len);
526 cmd_ba = CCB_BA (cp, cdb_buf[0]);
527 cmd_len = cmd->cmd_len;
528
529 cp->phys.cmd.addr = cpu_to_scr(cmd_ba);
530 cp->phys.cmd.size = cpu_to_scr(cmd_len);
531
532 return 0;
533}
534
535/*
536 * Setup pointers that address the data and start the I/O.
537 */
538int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
539{
540 int dir;
541 struct sym_tcb *tp = &np->target[cp->target];
542 struct sym_lcb *lp = sym_lp(tp, cp->lun);
543
544 /*
545 * Build the CDB.
546 */
547 if (sym_setup_cdb(np, cmd, cp))
548 goto out_abort;
549
550 /*
551 * No direction means no data.
552 */
553 dir = cmd->sc_data_direction;
554 if (dir != DMA_NONE) {
555 cp->segments = sym_scatter(np, cp, cmd);
556 if (cp->segments < 0) {
557 if (cp->segments == -2)
558 sym_set_cam_status(cmd, CAM_RESRC_UNAVAIL);
559 else
560 sym_set_cam_status(cmd, CAM_REQ_TOO_BIG);
561 goto out_abort;
562 }
563 } else {
564 cp->data_len = 0;
565 cp->segments = 0;
566 }
567
568 /*
569 * Set data pointers.
570 */
571 sym_setup_data_pointers(np, cp, dir);
572
573 /*
574 * When `#ifed 1', the code below makes the driver
575 * panic on the first attempt to write to a SCSI device.
576 * It is the first test we want to do after a driver
577 * change that does not seem obviously safe. :)
578 */
579#if 0
580 switch (cp->cdb_buf[0]) {
581 case 0x0A: case 0x2A: case 0xAA:
582 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
583 break;
584 default:
585 break;
586 }
587#endif
588
589 /*
590 * activate this job.
591 */
592 if (lp)
593 sym_start_next_ccbs(np, lp, 2);
594 else
595 sym_put_start_queue(np, cp);
596 return 0;
597
598out_abort:
599 sym_free_ccb(np, cp);
600 sym_xpt_done(np, cmd);
601 return 0;
602}
603
604
605/*
606 * timer daemon.
607 *
608 * Misused to keep the driver running when
609 * interrupts are not configured correctly.
610 */
611static void sym_timer(struct sym_hcb *np)
612{
613 unsigned long thistime = jiffies;
614
615 /*
616 * Restart the timer.
617 */
618 np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL;
619 add_timer(&np->s.timer);
620
621 /*
622 * If we are resetting the ncr, wait for settle_time before
623 * clearing it. Then command processing will be resumed.
624 */
625 if (np->s.settle_time_valid) {
626 if (time_before_eq(np->s.settle_time, thistime)) {
627 if (sym_verbose >= 2 )
628 printk("%s: command processing resumed\n",
629 sym_name(np));
630 np->s.settle_time_valid = 0;
631 }
632 return;
633 }
634
635 /*
636 * Nothing to do for now, but that may come.
637 */
638 if (np->s.lasttime + 4*HZ < thistime) {
639 np->s.lasttime = thistime;
640 }
641
642#ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS
643 /*
644 * Some way-broken PCI bridges may lead to
645 * completions being lost when the clearing
646 * of the INTFLY flag by the CPU occurs
647 * concurrently with the chip raising this flag.
648 * If this ever happen, lost completions will
649 * be reaped here.
650 */
651 sym_wakeup_done(np);
652#endif
653}
654
655
656/*
657 * PCI BUS error handler.
658 */
659void sym_log_bus_error(struct sym_hcb *np)
660{
661 u_short pci_sts;
662 pci_read_config_word(np->s.device, PCI_STATUS, &pci_sts);
663 if (pci_sts & 0xf900) {
664 pci_write_config_word(np->s.device, PCI_STATUS, pci_sts);
665 printf("%s: PCI STATUS = 0x%04x\n",
666 sym_name(np), pci_sts & 0xf900);
667 }
668}
669
670/*
671 * queuecommand method. Entered with the host adapter lock held and
672 * interrupts disabled.
673 */
674static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
675 void (*done)(struct scsi_cmnd *))
676{
677 struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
678 struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
679 int sts = 0;
680
681 cmd->scsi_done = done;
682 memset(ucp, 0, sizeof(*ucp));
683
684 /*
685 * Shorten our settle_time if needed for
686 * this command not to time out.
687 */
688 if (np->s.settle_time_valid && cmd->timeout_per_command) {
689 unsigned long tlimit = jiffies + cmd->timeout_per_command;
690 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
691 if (time_after(np->s.settle_time, tlimit)) {
692 np->s.settle_time = tlimit;
693 }
694 }
695
696 if (np->s.settle_time_valid)
697 return SCSI_MLQUEUE_HOST_BUSY;
698
699 sts = sym_queue_command(np, cmd);
700 if (sts)
701 return SCSI_MLQUEUE_HOST_BUSY;
702 return 0;
703}
704
705/*
706 * Linux entry point of the interrupt handler.
707 */
708static irqreturn_t sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs)
709{
710 unsigned long flags;
711 struct sym_hcb *np = (struct sym_hcb *)dev_id;
712
713 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("[");
714
715 spin_lock_irqsave(np->s.host->host_lock, flags);
716 sym_interrupt(np);
717 spin_unlock_irqrestore(np->s.host->host_lock, flags);
718
719 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n");
720
721 return IRQ_HANDLED;
722}
723
724/*
725 * Linux entry point of the timer handler
726 */
727static void sym53c8xx_timer(unsigned long npref)
728{
729 struct sym_hcb *np = (struct sym_hcb *)npref;
730 unsigned long flags;
731
732 spin_lock_irqsave(np->s.host->host_lock, flags);
733 sym_timer(np);
734 spin_unlock_irqrestore(np->s.host->host_lock, flags);
735}
736
737
738/*
739 * What the eh thread wants us to perform.
740 */
741#define SYM_EH_ABORT 0
742#define SYM_EH_DEVICE_RESET 1
743#define SYM_EH_BUS_RESET 2
744#define SYM_EH_HOST_RESET 3
745
746/*
747 * What we will do regarding the involved SCSI command.
748 */
749#define SYM_EH_DO_IGNORE 0
750#define SYM_EH_DO_COMPLETE 1
751#define SYM_EH_DO_WAIT 2
752
753/*
754 * Our general completion handler.
755 */
756static void __sym_eh_done(struct scsi_cmnd *cmd, int timed_out)
757{
758 struct sym_eh_wait *ep = SYM_UCMD_PTR(cmd)->eh_wait;
759 if (!ep)
760 return;
761
762 /* Try to avoid a race here (not 100% safe) */
763 if (!timed_out) {
764 ep->timed_out = 0;
765 if (ep->to_do == SYM_EH_DO_WAIT && !del_timer(&ep->timer))
766 return;
767 }
768
769 /* Revert everything */
770 SYM_UCMD_PTR(cmd)->eh_wait = NULL;
771 cmd->scsi_done = ep->old_done;
772
773 /* Wake up the eh thread if it wants to sleep */
774 if (ep->to_do == SYM_EH_DO_WAIT)
775 complete(&ep->done);
776}
777
778/*
779 * scsi_done() alias when error recovery is in progress.
780 */
781static void sym_eh_done(struct scsi_cmnd *cmd) { __sym_eh_done(cmd, 0); }
782
783/*
784 * Some timeout handler to avoid waiting too long.
785 */
786static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1); }
787
788/*
789 * Generic method for our eh processing.
790 * The 'op' argument tells what we have to do.
791 */
792static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
793{
794 struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
795 SYM_QUEHEAD *qp;
796 int to_do = SYM_EH_DO_IGNORE;
797 int sts = -1;
798 struct sym_eh_wait eh, *ep = &eh;
799
800 dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname);
801
802#if 0
803 /* This one should be the result of some race, thus to ignore */
804 if (cmd->serial_number != cmd->serial_number_at_timeout)
805 goto prepare;
806#endif
807
808 /* This one is queued in some place -> to wait for completion */
809 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
810 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
811 if (cp->cmd == cmd) {
812 to_do = SYM_EH_DO_WAIT;
813 goto prepare;
814 }
815 }
816
817prepare:
818 /* Prepare stuff to either ignore, complete or wait for completion */
819 switch(to_do) {
820 default:
821 case SYM_EH_DO_IGNORE:
822 break;
823 case SYM_EH_DO_WAIT:
824 init_completion(&ep->done);
825 /* fall through */
826 case SYM_EH_DO_COMPLETE:
827 ep->old_done = cmd->scsi_done;
828 cmd->scsi_done = sym_eh_done;
829 SYM_UCMD_PTR(cmd)->eh_wait = ep;
830 }
831
832 /* Try to proceed the operation we have been asked for */
833 sts = -1;
834 switch(op) {
835 case SYM_EH_ABORT:
836 sts = sym_abort_scsiio(np, cmd, 1);
837 break;
838 case SYM_EH_DEVICE_RESET:
839 sts = sym_reset_scsi_target(np, cmd->device->id);
840 break;
841 case SYM_EH_BUS_RESET:
842 sym_reset_scsi_bus(np, 1);
843 sts = 0;
844 break;
845 case SYM_EH_HOST_RESET:
846 sym_reset_scsi_bus(np, 0);
847 sym_start_up (np, 1);
848 sts = 0;
849 break;
850 default:
851 break;
852 }
853
854 /* On error, restore everything and cross fingers :) */
855 if (sts) {
856 SYM_UCMD_PTR(cmd)->eh_wait = NULL;
857 cmd->scsi_done = ep->old_done;
858 to_do = SYM_EH_DO_IGNORE;
859 }
860
861 ep->to_do = to_do;
862 /* Complete the command with locks held as required by the driver */
863 if (to_do == SYM_EH_DO_COMPLETE)
864 sym_xpt_done2(np, cmd, CAM_REQ_ABORTED);
865
866 /* Wait for completion with locks released, as required by kernel */
867 if (to_do == SYM_EH_DO_WAIT) {
868 init_timer(&ep->timer);
869 ep->timer.expires = jiffies + (5*HZ);
870 ep->timer.function = sym_eh_timeout;
871 ep->timer.data = (u_long)cmd;
872 ep->timed_out = 1; /* Be pessimistic for once :) */
873 add_timer(&ep->timer);
874 spin_unlock_irq(np->s.host->host_lock);
875 wait_for_completion(&ep->done);
876 spin_lock_irq(np->s.host->host_lock);
877 if (ep->timed_out)
878 sts = -2;
879 }
880 dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
881 sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
882 return sts ? SCSI_FAILED : SCSI_SUCCESS;
883}
884
885
886/*
887 * Error handlers called from the eh thread (one thread per HBA).
888 */
889static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
890{
891 return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
892}
893
894static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
895{
896 return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
897}
898
899static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
900{
901 return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
902}
903
904static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
905{
906 return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
907}
908
909/*
910 * Tune device queuing depth, according to various limits.
911 */
912static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
913{
914 struct sym_lcb *lp = sym_lp(tp, lun);
915 u_short oldtags;
916
917 if (!lp)
918 return;
919
920 oldtags = lp->s.reqtags;
921
922 if (reqtags > lp->s.scdev_depth)
923 reqtags = lp->s.scdev_depth;
924
925 lp->started_limit = reqtags ? reqtags : 2;
926 lp->started_max = 1;
927 lp->s.reqtags = reqtags;
928
929 if (reqtags != oldtags) {
930 dev_info(&tp->sdev->sdev_target->dev,
931 "tagged command queuing %s, command queue depth %d.\n",
932 lp->s.reqtags ? "enabled" : "disabled",
933 lp->started_limit);
934 }
935}
936
937/*
938 * Linux select queue depths function
939 */
940#define DEF_DEPTH (sym_driver_setup.max_tag)
941#define ALL_TARGETS -2
942#define NO_TARGET -1
943#define ALL_LUNS -2
944#define NO_LUN -1
945
946static int device_queue_depth(struct sym_hcb *np, int target, int lun)
947{
948 int c, h, t, u, v;
949 char *p = sym_driver_setup.tag_ctrl;
950 char *ep;
951
952 h = -1;
953 t = NO_TARGET;
954 u = NO_LUN;
955 while ((c = *p++) != 0) {
956 v = simple_strtoul(p, &ep, 0);
957 switch(c) {
958 case '/':
959 ++h;
960 t = ALL_TARGETS;
961 u = ALL_LUNS;
962 break;
963 case 't':
964 if (t != target)
965 t = (target == v) ? v : NO_TARGET;
966 u = ALL_LUNS;
967 break;
968 case 'u':
969 if (u != lun)
970 u = (lun == v) ? v : NO_LUN;
971 break;
972 case 'q':
973 if (h == np->s.unit &&
974 (t == ALL_TARGETS || t == target) &&
975 (u == ALL_LUNS || u == lun))
976 return v;
977 break;
978 case '-':
979 t = ALL_TARGETS;
980 u = ALL_LUNS;
981 break;
982 default:
983 break;
984 }
985 p = ep;
986 }
987 return DEF_DEPTH;
988}
989
990static int sym53c8xx_slave_alloc(struct scsi_device *device)
991{
992 struct sym_hcb *np = sym_get_hcb(device->host);
993 struct sym_tcb *tp = &np->target[device->id];
994 if (!tp->sdev)
995 tp->sdev = device;
996
997 return 0;
998}
999
1000static void sym53c8xx_slave_destroy(struct scsi_device *device)
1001{
1002 struct sym_hcb *np = sym_get_hcb(device->host);
1003 struct sym_tcb *tp = &np->target[device->id];
1004 if (tp->sdev == device)
1005 tp->sdev = NULL;
1006}
1007
1008/*
1009 * Linux entry point for device queue sizing.
1010 */
1011static int sym53c8xx_slave_configure(struct scsi_device *device)
1012{
1013 struct sym_hcb *np = sym_get_hcb(device->host);
1014 struct sym_tcb *tp = &np->target[device->id];
1015 struct sym_lcb *lp;
1016 int reqtags, depth_to_use;
1017
1018 /*
1019 * Allocate the LCB if not yet.
1020 * If it fail, we may well be in the sh*t. :)
1021 */
1022 lp = sym_alloc_lcb(np, device->id, device->lun);
1023 if (!lp)
1024 return -ENOMEM;
1025
1026 /*
1027 * Get user flags.
1028 */
1029 lp->curr_flags = lp->user_flags;
1030
1031 /*
1032 * Select queue depth from driver setup.
1033 * Donnot use more than configured by user.
1034 * Use at least 2.
1035 * Donnot use more than our maximum.
1036 */
1037 reqtags = device_queue_depth(np, device->id, device->lun);
1038 if (reqtags > tp->usrtags)
1039 reqtags = tp->usrtags;
1040 if (!device->tagged_supported)
1041 reqtags = 0;
1042#if 1 /* Avoid to locally queue commands for no good reasons */
1043 if (reqtags > SYM_CONF_MAX_TAG)
1044 reqtags = SYM_CONF_MAX_TAG;
1045 depth_to_use = (reqtags ? reqtags : 2);
1046#else
1047 depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2);
1048#endif
1049 scsi_adjust_queue_depth(device,
1050 (device->tagged_supported ?
1051 MSG_SIMPLE_TAG : 0),
1052 depth_to_use);
1053 lp->s.scdev_depth = depth_to_use;
1054 sym_tune_dev_queuing(tp, device->lun, reqtags);
1055
1056 if (!spi_initial_dv(device->sdev_target))
1057 spi_dv_device(device);
1058
1059 return 0;
1060}
1061
1062/*
1063 * Linux entry point for info() function
1064 */
1065static const char *sym53c8xx_info (struct Scsi_Host *host)
1066{
1067 return SYM_DRIVER_NAME;
1068}
1069
1070
1071#ifdef SYM_LINUX_PROC_INFO_SUPPORT
1072/*
1073 * Proc file system stuff
1074 *
1075 * A read operation returns adapter information.
1076 * A write operation is a control command.
1077 * The string is parsed in the driver code and the command is passed
1078 * to the sym_usercmd() function.
1079 */
1080
1081#ifdef SYM_LINUX_USER_COMMAND_SUPPORT
1082
1083struct sym_usrcmd {
1084 u_long target;
1085 u_long lun;
1086 u_long data;
1087 u_long cmd;
1088};
1089
1090#define UC_SETSYNC 10
1091#define UC_SETTAGS 11
1092#define UC_SETDEBUG 12
1093#define UC_SETWIDE 14
1094#define UC_SETFLAG 15
1095#define UC_SETVERBOSE 17
1096#define UC_RESETDEV 18
1097#define UC_CLEARDEV 19
1098
1099static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
1100{
1101 struct sym_tcb *tp;
1102 int t, l;
1103
1104 switch (uc->cmd) {
1105 case 0: return;
1106
1107#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
1108 case UC_SETDEBUG:
1109 sym_debug_flags = uc->data;
1110 break;
1111#endif
1112 case UC_SETVERBOSE:
1113 np->verbose = uc->data;
1114 break;
1115 default:
1116 /*
1117 * We assume that other commands apply to targets.
1118 * This should always be the case and avoid the below
1119 * 4 lines to be repeated 6 times.
1120 */
1121 for (t = 0; t < SYM_CONF_MAX_TARGET; t++) {
1122 if (!((uc->target >> t) & 1))
1123 continue;
1124 tp = &np->target[t];
1125
1126 switch (uc->cmd) {
1127
1128 case UC_SETSYNC:
1129 if (!uc->data || uc->data >= 255) {
1130 tp->tgoal.iu = tp->tgoal.dt =
1131 tp->tgoal.qas = 0;
1132 tp->tgoal.offset = 0;
1133 } else if (uc->data <= 9 && np->minsync_dt) {
1134 if (uc->data < np->minsync_dt)
1135 uc->data = np->minsync_dt;
1136 tp->tgoal.iu = tp->tgoal.dt =
1137 tp->tgoal.qas = 1;
1138 tp->tgoal.width = 1;
1139 tp->tgoal.period = uc->data;
1140 tp->tgoal.offset = np->maxoffs_dt;
1141 } else {
1142 if (uc->data < np->minsync)
1143 uc->data = np->minsync;
1144 tp->tgoal.iu = tp->tgoal.dt =
1145 tp->tgoal.qas = 0;
1146 tp->tgoal.period = uc->data;
1147 tp->tgoal.offset = np->maxoffs;
1148 }
1149 tp->tgoal.check_nego = 1;
1150 break;
1151 case UC_SETWIDE:
1152 tp->tgoal.width = uc->data ? 1 : 0;
1153 tp->tgoal.check_nego = 1;
1154 break;
1155 case UC_SETTAGS:
1156 for (l = 0; l < SYM_CONF_MAX_LUN; l++)
1157 sym_tune_dev_queuing(tp, l, uc->data);
1158 break;
1159 case UC_RESETDEV:
1160 tp->to_reset = 1;
1161 np->istat_sem = SEM;
1162 OUTB(np, nc_istat, SIGP|SEM);
1163 break;
1164 case UC_CLEARDEV:
1165 for (l = 0; l < SYM_CONF_MAX_LUN; l++) {
1166 struct sym_lcb *lp = sym_lp(tp, l);
1167 if (lp) lp->to_clear = 1;
1168 }
1169 np->istat_sem = SEM;
1170 OUTB(np, nc_istat, SIGP|SEM);
1171 break;
1172 case UC_SETFLAG:
1173 tp->usrflags = uc->data;
1174 break;
1175 }
1176 }
1177 break;
1178 }
1179}
1180
1181static int skip_spaces(char *ptr, int len)
1182{
1183 int cnt, c;
1184
1185 for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--);
1186
1187 return (len - cnt);
1188}
1189
1190static int get_int_arg(char *ptr, int len, u_long *pv)
1191{
1192 char *end;
1193
1194 *pv = simple_strtoul(ptr, &end, 10);
1195 return (end - ptr);
1196}
1197
1198static int is_keyword(char *ptr, int len, char *verb)
1199{
1200 int verb_len = strlen(verb);
1201
1202 if (len >= verb_len && !memcmp(verb, ptr, verb_len))
1203 return verb_len;
1204 else
1205 return 0;
1206}
1207
1208#define SKIP_SPACES(ptr, len) \
1209 if ((arg_len = skip_spaces(ptr, len)) < 1) \
1210 return -EINVAL; \
1211 ptr += arg_len; len -= arg_len;
1212
1213#define GET_INT_ARG(ptr, len, v) \
1214 if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
1215 return -EINVAL; \
1216 ptr += arg_len; len -= arg_len;
1217
1218
1219/*
1220 * Parse a control command
1221 */
1222
1223static int sym_user_command(struct sym_hcb *np, char *buffer, int length)
1224{
1225 char *ptr = buffer;
1226 int len = length;
1227 struct sym_usrcmd cmd, *uc = &cmd;
1228 int arg_len;
1229 u_long target;
1230
1231 memset(uc, 0, sizeof(*uc));
1232
1233 if (len > 0 && ptr[len-1] == '\n')
1234 --len;
1235
1236 if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
1237 uc->cmd = UC_SETSYNC;
1238 else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
1239 uc->cmd = UC_SETTAGS;
1240 else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
1241 uc->cmd = UC_SETVERBOSE;
1242 else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
1243 uc->cmd = UC_SETWIDE;
1244#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
1245 else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
1246 uc->cmd = UC_SETDEBUG;
1247#endif
1248 else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
1249 uc->cmd = UC_SETFLAG;
1250 else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
1251 uc->cmd = UC_RESETDEV;
1252 else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
1253 uc->cmd = UC_CLEARDEV;
1254 else
1255 arg_len = 0;
1256
1257#ifdef DEBUG_PROC_INFO
1258printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
1259#endif
1260
1261 if (!arg_len)
1262 return -EINVAL;
1263 ptr += arg_len; len -= arg_len;
1264
1265 switch(uc->cmd) {
1266 case UC_SETSYNC:
1267 case UC_SETTAGS:
1268 case UC_SETWIDE:
1269 case UC_SETFLAG:
1270 case UC_RESETDEV:
1271 case UC_CLEARDEV:
1272 SKIP_SPACES(ptr, len);
1273 if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
1274 ptr += arg_len; len -= arg_len;
1275 uc->target = ~0;
1276 } else {
1277 GET_INT_ARG(ptr, len, target);
1278 uc->target = (1<<target);
1279#ifdef DEBUG_PROC_INFO
1280printk("sym_user_command: target=%ld\n", target);
1281#endif
1282 }
1283 break;
1284 }
1285
1286 switch(uc->cmd) {
1287 case UC_SETVERBOSE:
1288 case UC_SETSYNC:
1289 case UC_SETTAGS:
1290 case UC_SETWIDE:
1291 SKIP_SPACES(ptr, len);
1292 GET_INT_ARG(ptr, len, uc->data);
1293#ifdef DEBUG_PROC_INFO
1294printk("sym_user_command: data=%ld\n", uc->data);
1295#endif
1296 break;
1297#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
1298 case UC_SETDEBUG:
1299 while (len > 0) {
1300 SKIP_SPACES(ptr, len);
1301 if ((arg_len = is_keyword(ptr, len, "alloc")))
1302 uc->data |= DEBUG_ALLOC;
1303 else if ((arg_len = is_keyword(ptr, len, "phase")))
1304 uc->data |= DEBUG_PHASE;
1305 else if ((arg_len = is_keyword(ptr, len, "queue")))
1306 uc->data |= DEBUG_QUEUE;
1307 else if ((arg_len = is_keyword(ptr, len, "result")))
1308 uc->data |= DEBUG_RESULT;
1309 else if ((arg_len = is_keyword(ptr, len, "scatter")))
1310 uc->data |= DEBUG_SCATTER;
1311 else if ((arg_len = is_keyword(ptr, len, "script")))
1312 uc->data |= DEBUG_SCRIPT;
1313 else if ((arg_len = is_keyword(ptr, len, "tiny")))
1314 uc->data |= DEBUG_TINY;
1315 else if ((arg_len = is_keyword(ptr, len, "timing")))
1316 uc->data |= DEBUG_TIMING;
1317 else if ((arg_len = is_keyword(ptr, len, "nego")))
1318 uc->data |= DEBUG_NEGO;
1319 else if ((arg_len = is_keyword(ptr, len, "tags")))
1320 uc->data |= DEBUG_TAGS;
1321 else if ((arg_len = is_keyword(ptr, len, "pointer")))
1322 uc->data |= DEBUG_POINTER;
1323 else
1324 return -EINVAL;
1325 ptr += arg_len; len -= arg_len;
1326 }
1327#ifdef DEBUG_PROC_INFO
1328printk("sym_user_command: data=%ld\n", uc->data);
1329#endif
1330 break;
1331#endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */
1332 case UC_SETFLAG:
1333 while (len > 0) {
1334 SKIP_SPACES(ptr, len);
1335 if ((arg_len = is_keyword(ptr, len, "no_disc")))
1336 uc->data &= ~SYM_DISC_ENABLED;
1337 else
1338 return -EINVAL;
1339 ptr += arg_len; len -= arg_len;
1340 }
1341 break;
1342 default:
1343 break;
1344 }
1345
1346 if (len)
1347 return -EINVAL;
1348 else {
1349 unsigned long flags;
1350
1351 spin_lock_irqsave(np->s.host->host_lock, flags);
1352 sym_exec_user_command (np, uc);
1353 spin_unlock_irqrestore(np->s.host->host_lock, flags);
1354 }
1355 return length;
1356}
1357
1358#endif /* SYM_LINUX_USER_COMMAND_SUPPORT */
1359
1360
1361#ifdef SYM_LINUX_USER_INFO_SUPPORT
1362/*
1363 * Informations through the proc file system.
1364 */
1365struct info_str {
1366 char *buffer;
1367 int length;
1368 int offset;
1369 int pos;
1370};
1371
1372static void copy_mem_info(struct info_str *info, char *data, int len)
1373{
1374 if (info->pos + len > info->length)
1375 len = info->length - info->pos;
1376
1377 if (info->pos + len < info->offset) {
1378 info->pos += len;
1379 return;
1380 }
1381 if (info->pos < info->offset) {
1382 data += (info->offset - info->pos);
1383 len -= (info->offset - info->pos);
1384 }
1385
1386 if (len > 0) {
1387 memcpy(info->buffer + info->pos, data, len);
1388 info->pos += len;
1389 }
1390}
1391
1392static int copy_info(struct info_str *info, char *fmt, ...)
1393{
1394 va_list args;
1395 char buf[81];
1396 int len;
1397
1398 va_start(args, fmt);
1399 len = vsprintf(buf, fmt, args);
1400 va_end(args);
1401
1402 copy_mem_info(info, buf, len);
1403 return len;
1404}
1405
1406/*
1407 * Copy formatted information into the input buffer.
1408 */
1409static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len)
1410{
1411 struct info_str info;
1412
1413 info.buffer = ptr;
1414 info.length = len;
1415 info.offset = offset;
1416 info.pos = 0;
1417
1418 copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, "
1419 "revision id 0x%x\n",
1420 np->s.chip_name, np->device_id, np->revision_id);
1421 copy_info(&info, "At PCI address %s, IRQ " IRQ_FMT "\n",
1422 pci_name(np->s.device), IRQ_PRM(np->s.irq));
1423 copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n",
1424 (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
1425 np->maxwide ? "Wide" : "Narrow",
1426 np->minsync_dt ? ", DT capable" : "");
1427
1428 copy_info(&info, "Max. started commands %d, "
1429 "max. commands per LUN %d\n",
1430 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
1431
1432 return info.pos > info.offset? info.pos - info.offset : 0;
1433}
1434#endif /* SYM_LINUX_USER_INFO_SUPPORT */
1435
1436/*
1437 * Entry point of the scsi proc fs of the driver.
1438 * - func = 0 means read (returns adapter infos)
1439 * - func = 1 means write (not yet merget from sym53c8xx)
1440 */
1441static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer,
1442 char **start, off_t offset, int length, int func)
1443{
1444 struct sym_hcb *np = sym_get_hcb(host);
1445 int retv;
1446
1447 if (func) {
1448#ifdef SYM_LINUX_USER_COMMAND_SUPPORT
1449 retv = sym_user_command(np, buffer, length);
1450#else
1451 retv = -EINVAL;
1452#endif
1453 } else {
1454 if (start)
1455 *start = buffer;
1456#ifdef SYM_LINUX_USER_INFO_SUPPORT
1457 retv = sym_host_info(np, buffer, offset, length);
1458#else
1459 retv = -EINVAL;
1460#endif
1461 }
1462
1463 return retv;
1464}
1465#endif /* SYM_LINUX_PROC_INFO_SUPPORT */
1466
1467/*
1468 * Free controller resources.
1469 */
1470static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev)
1471{
1472 /*
1473 * Free O/S specific resources.
1474 */
1475 if (np->s.irq)
1476 free_irq(np->s.irq, np);
1477 if (np->s.ioaddr)
1478 pci_iounmap(pdev, np->s.ioaddr);
1479 if (np->s.ramaddr)
1480 pci_iounmap(pdev, np->s.ramaddr);
1481 /*
1482 * Free O/S independent resources.
1483 */
1484 sym_hcb_free(np);
1485
1486 sym_mfree_dma(np, sizeof(*np), "HCB");
1487}
1488
1489/*
1490 * Ask/tell the system about DMA addressing.
1491 */
1492static int sym_setup_bus_dma_mask(struct sym_hcb *np)
1493{
1494#if SYM_CONF_DMA_ADDRESSING_MODE > 0
1495#if SYM_CONF_DMA_ADDRESSING_MODE == 1
1496#define DMA_DAC_MASK 0x000000ffffffffffULL /* 40-bit */
1497#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
1498#define DMA_DAC_MASK DMA_64BIT_MASK
1499#endif
1500 if ((np->features & FE_DAC) &&
1501 !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) {
1502 np->use_dac = 1;
1503 return 0;
1504 }
1505#endif
1506
1507 if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK))
1508 return 0;
1509
1510 printf_warning("%s: No suitable DMA available\n", sym_name(np));
1511 return -1;
1512}
1513
1514/*
1515 * Host attach and initialisations.
1516 *
1517 * Allocate host data and ncb structure.
1518 * Remap MMIO region.
1519 * Do chip initialization.
1520 * If all is OK, install interrupt handling and
1521 * start the timer daemon.
1522 */
1523static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1524 int unit, struct sym_device *dev)
1525{
1526 struct host_data *host_data;
1527 struct sym_hcb *np = NULL;
1528 struct Scsi_Host *instance = NULL;
1529 struct pci_dev *pdev = dev->pdev;
1530 unsigned long flags;
1531 struct sym_fw *fw;
1532
1533 printk(KERN_INFO
1534 "sym%d: <%s> rev 0x%x at pci %s irq " IRQ_FMT "\n",
1535 unit, dev->chip.name, dev->chip.revision_id,
1536 pci_name(pdev), IRQ_PRM(pdev->irq));
1537
1538 /*
1539 * Get the firmware for this chip.
1540 */
1541 fw = sym_find_firmware(&dev->chip);
1542 if (!fw)
1543 goto attach_failed;
1544
1545 /*
1546 * Allocate host_data structure
1547 */
1548 instance = scsi_host_alloc(tpnt, sizeof(*host_data));
1549 if (!instance)
1550 goto attach_failed;
1551 host_data = (struct host_data *) instance->hostdata;
1552
1553 /*
1554 * Allocate immediately the host control block,
1555 * since we are only expecting to succeed. :)
1556 * We keep track in the HCB of all the resources that
1557 * are to be released on error.
1558 */
1559 np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB");
1560 if (!np)
1561 goto attach_failed;
1562 np->s.device = pdev;
1563 np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */
1564 host_data->ncb = np;
1565 np->s.host = instance;
1566
1567 pci_set_drvdata(pdev, np);
1568
1569 /*
1570 * Copy some useful infos to the HCB.
1571 */
1572 np->hcb_ba = vtobus(np);
1573 np->verbose = sym_driver_setup.verbose;
1574 np->s.device = pdev;
1575 np->s.unit = unit;
1576 np->device_id = dev->chip.device_id;
1577 np->revision_id = dev->chip.revision_id;
1578 np->features = dev->chip.features;
1579 np->clock_divn = dev->chip.nr_divisor;
1580 np->maxoffs = dev->chip.offset_max;
1581 np->maxburst = dev->chip.burst_max;
1582 np->myaddr = dev->host_id;
1583
1584 /*
1585 * Edit its name.
1586 */
1587 strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name));
1588 sprintf(np->s.inst_name, "sym%d", np->s.unit);
1589
1590 if (sym_setup_bus_dma_mask(np))
1591 goto attach_failed;
1592
1593 /*
1594 * Try to map the controller chip to
1595 * virtual and physical memory.
1596 */
1597 np->mmio_ba = (u32)dev->mmio_base;
1598 np->s.ioaddr = dev->s.ioaddr;
1599 np->s.ramaddr = dev->s.ramaddr;
1600 np->s.io_ws = (np->features & FE_IO256) ? 256 : 128;
1601
1602 /*
1603 * Map on-chip RAM if present and supported.
1604 */
1605 if (!(np->features & FE_RAM))
1606 dev->ram_base = 0;
1607 if (dev->ram_base) {
1608 np->ram_ba = (u32)dev->ram_base;
1609 np->ram_ws = (np->features & FE_RAM8K) ? 8192 : 4096;
1610 }
1611
1612 if (sym_hcb_attach(instance, fw, dev->nvram))
1613 goto attach_failed;
1614
1615 /*
1616 * Install the interrupt handler.
1617 * If we synchonize the C code with SCRIPTS on interrupt,
1618 * we do not want to share the INTR line at all.
1619 */
1620 if (request_irq(pdev->irq, sym53c8xx_intr, SA_SHIRQ, NAME53C8XX, np)) {
1621 printf_err("%s: request irq %d failure\n",
1622 sym_name(np), pdev->irq);
1623 goto attach_failed;
1624 }
1625 np->s.irq = pdev->irq;
1626
1627 /*
1628 * After SCSI devices have been opened, we cannot
1629 * reset the bus safely, so we do it here.
1630 */
1631 spin_lock_irqsave(instance->host_lock, flags);
1632 if (sym_reset_scsi_bus(np, 0))
1633 goto reset_failed;
1634
1635 /*
1636 * Start the SCRIPTS.
1637 */
1638 sym_start_up (np, 1);
1639
1640 /*
1641 * Start the timer daemon
1642 */
1643 init_timer(&np->s.timer);
1644 np->s.timer.data = (unsigned long) np;
1645 np->s.timer.function = sym53c8xx_timer;
1646 np->s.lasttime=0;
1647 sym_timer (np);
1648
1649 /*
1650 * Fill Linux host instance structure
1651 * and return success.
1652 */
1653 instance->max_channel = 0;
1654 instance->this_id = np->myaddr;
1655 instance->max_id = np->maxwide ? 16 : 8;
1656 instance->max_lun = SYM_CONF_MAX_LUN;
1657 instance->unique_id = pci_resource_start(pdev, 0);
1658 instance->cmd_per_lun = SYM_CONF_MAX_TAG;
1659 instance->can_queue = (SYM_CONF_MAX_START-2);
1660 instance->sg_tablesize = SYM_CONF_MAX_SG;
1661 instance->max_cmd_len = 16;
1662 BUG_ON(sym2_transport_template == NULL);
1663 instance->transportt = sym2_transport_template;
1664
1665 spin_unlock_irqrestore(instance->host_lock, flags);
1666
1667 return instance;
1668
1669 reset_failed:
1670 printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, "
1671 "TERMINATION, DEVICE POWER etc.!\n", sym_name(np));
1672 spin_unlock_irqrestore(instance->host_lock, flags);
1673 attach_failed:
1674 if (!instance)
1675 return NULL;
1676 printf_info("%s: giving up ...\n", sym_name(np));
1677 if (np)
1678 sym_free_resources(np, pdev);
1679 scsi_host_put(instance);
1680
1681 return NULL;
1682 }
1683
1684
1685/*
1686 * Detect and try to read SYMBIOS and TEKRAM NVRAM.
1687 */
1688#if SYM_CONF_NVRAM_SUPPORT
1689static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
1690{
1691 devp->nvram = nvp;
1692 devp->device_id = devp->chip.device_id;
1693 nvp->type = 0;
1694
1695 sym_read_nvram(devp, nvp);
1696}
1697#else
1698static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
1699{
1700}
1701#endif /* SYM_CONF_NVRAM_SUPPORT */
1702
1703static int __devinit sym_check_supported(struct sym_device *device)
1704{
1705 struct sym_chip *chip;
1706 struct pci_dev *pdev = device->pdev;
1707 u_char revision;
1708 unsigned long io_port = pci_resource_start(pdev, 0);
1709 int i;
1710
1711 /*
1712 * If user excluded this chip, do not initialize it.
1713 * I hate this code so much. Must kill it.
1714 */
1715 if (io_port) {
1716 for (i = 0 ; i < 8 ; i++) {
1717 if (sym_driver_setup.excludes[i] == io_port)
1718 return -ENODEV;
1719 }
1720 }
1721
1722 /*
1723 * Check if the chip is supported. Then copy the chip description
1724 * to our device structure so we can make it match the actual device
1725 * and options.
1726 */
1727 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1728 chip = sym_lookup_chip_table(pdev->device, revision);
1729 if (!chip) {
1730 dev_info(&pdev->dev, "device not supported\n");
1731 return -ENODEV;
1732 }
1733 memcpy(&device->chip, chip, sizeof(device->chip));
1734 device->chip.revision_id = revision;
1735
1736 return 0;
1737}
1738
1739/*
1740 * Ignore Symbios chips controlled by various RAID controllers.
1741 * These controllers set value 0x52414944 at RAM end - 16.
1742 */
1743static int __devinit sym_check_raid(struct sym_device *device)
1744{
1745 unsigned int ram_size, ram_val;
1746
1747 if (!device->s.ramaddr)
1748 return 0;
1749
1750 if (device->chip.features & FE_RAM8K)
1751 ram_size = 8192;
1752 else
1753 ram_size = 4096;
1754
1755 ram_val = readl(device->s.ramaddr + ram_size - 16);
1756 if (ram_val != 0x52414944)
1757 return 0;
1758
1759 dev_info(&device->pdev->dev,
1760 "not initializing, driven by RAID controller.\n");
1761 return -ENODEV;
1762}
1763
1764static int __devinit sym_set_workarounds(struct sym_device *device)
1765{
1766 struct sym_chip *chip = &device->chip;
1767 struct pci_dev *pdev = device->pdev;
1768 u_short status_reg;
1769
1770 /*
1771 * (ITEM 12 of a DEL about the 896 I haven't yet).
1772 * We must ensure the chip will use WRITE AND INVALIDATE.
1773 * The revision number limit is for now arbitrary.
1774 */
1775 if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && chip->revision_id < 0x4) {
1776 chip->features |= (FE_WRIE | FE_CLSE);
1777 }
1778
1779 /* If the chip can do Memory Write Invalidate, enable it */
1780 if (chip->features & FE_WRIE) {
1781 if (pci_set_mwi(pdev))
1782 return -ENODEV;
1783 }
1784
1785 /*
1786 * Work around for errant bit in 895A. The 66Mhz
1787 * capable bit is set erroneously. Clear this bit.
1788 * (Item 1 DEL 533)
1789 *
1790 * Make sure Config space and Features agree.
1791 *
1792 * Recall: writes are not normal to status register -
1793 * write a 1 to clear and a 0 to leave unchanged.
1794 * Can only reset bits.
1795 */
1796 pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1797 if (chip->features & FE_66MHZ) {
1798 if (!(status_reg & PCI_STATUS_66MHZ))
1799 chip->features &= ~FE_66MHZ;
1800 } else {
1801 if (status_reg & PCI_STATUS_66MHZ) {
1802 status_reg = PCI_STATUS_66MHZ;
1803 pci_write_config_word(pdev, PCI_STATUS, status_reg);
1804 pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1805 }
1806 }
1807
1808 return 0;
1809}
1810
1811/*
1812 * Read and check the PCI configuration for any detected NCR
1813 * boards and save data for attaching after all boards have
1814 * been detected.
1815 */
1816static void __devinit
1817sym_init_device(struct pci_dev *pdev, struct sym_device *device)
1818{
1819 int i;
1820
1821 device->host_id = SYM_SETUP_HOST_ID;
1822 device->pdev = pdev;
1823
1824 i = pci_get_base_address(pdev, 1, &device->mmio_base);
1825 pci_get_base_address(pdev, i, &device->ram_base);
1826
1827#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
1828 if (device->mmio_base)
1829 device->s.ioaddr = pci_iomap(pdev, 1,
1830 pci_resource_len(pdev, 1));
1831#endif
1832 if (!device->s.ioaddr)
1833 device->s.ioaddr = pci_iomap(pdev, 0,
1834 pci_resource_len(pdev, 0));
1835 if (device->ram_base)
1836 device->s.ramaddr = pci_iomap(pdev, i,
1837 pci_resource_len(pdev, i));
1838}
1839
1840/*
1841 * The NCR PQS and PDS cards are constructed as a DEC bridge
1842 * behind which sits a proprietary NCR memory controller and
1843 * either four or two 53c875s as separate devices. We can tell
1844 * if an 875 is part of a PQS/PDS or not since if it is, it will
1845 * be on the same bus as the memory controller. In its usual
1846 * mode of operation, the 875s are slaved to the memory
1847 * controller for all transfers. To operate with the Linux
1848 * driver, the memory controller is disabled and the 875s
1849 * freed to function independently. The only wrinkle is that
1850 * the preset SCSI ID (which may be zero) must be read in from
1851 * a special configuration space register of the 875.
1852 */
1853static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev)
1854{
1855 int slot;
1856 u8 tmp;
1857
1858 for (slot = 0; slot < 256; slot++) {
1859 struct pci_dev *memc = pci_get_slot(pdev->bus, slot);
1860
1861 if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) {
1862 pci_dev_put(memc);
1863 continue;
1864 }
1865
1866 /* bit 1: allow individual 875 configuration */
1867 pci_read_config_byte(memc, 0x44, &tmp);
1868 if ((tmp & 0x2) == 0) {
1869 tmp |= 0x2;
1870 pci_write_config_byte(memc, 0x44, tmp);
1871 }
1872
1873 /* bit 2: drive individual 875 interrupts to the bus */
1874 pci_read_config_byte(memc, 0x45, &tmp);
1875 if ((tmp & 0x4) == 0) {
1876 tmp |= 0x4;
1877 pci_write_config_byte(memc, 0x45, tmp);
1878 }
1879
1880 pci_dev_put(memc);
1881 break;
1882 }
1883
1884 pci_read_config_byte(pdev, 0x84, &tmp);
1885 sym_dev->host_id = tmp;
1886}
1887
1888/*
1889 * Called before unloading the module.
1890 * Detach the host.
1891 * We have to free resources and halt the NCR chip.
1892 */
1893static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev)
1894{
1895 printk("%s: detaching ...\n", sym_name(np));
1896
1897 del_timer_sync(&np->s.timer);
1898
1899 /*
1900 * Reset NCR chip.
1901 * We should use sym_soft_reset(), but we don't want to do
1902 * so, since we may not be safe if interrupts occur.
1903 */
1904 printk("%s: resetting chip\n", sym_name(np));
1905 OUTB(np, nc_istat, SRST);
1906 udelay(10);
1907 OUTB(np, nc_istat, 0);
1908
1909 sym_free_resources(np, pdev);
1910
1911 return 1;
1912}
1913
1914/*
1915 * Driver host template.
1916 */
1917static struct scsi_host_template sym2_template = {
1918 .module = THIS_MODULE,
1919 .name = "sym53c8xx",
1920 .info = sym53c8xx_info,
1921 .queuecommand = sym53c8xx_queue_command,
1922 .slave_alloc = sym53c8xx_slave_alloc,
1923 .slave_configure = sym53c8xx_slave_configure,
1924 .slave_destroy = sym53c8xx_slave_destroy,
1925 .eh_abort_handler = sym53c8xx_eh_abort_handler,
1926 .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
1927 .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
1928 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
1929 .this_id = 7,
1930 .use_clustering = DISABLE_CLUSTERING,
1931#ifdef SYM_LINUX_PROC_INFO_SUPPORT
1932 .proc_info = sym53c8xx_proc_info,
1933 .proc_name = NAME53C8XX,
1934#endif
1935};
1936
1937static int attach_count;
1938
1939static int __devinit sym2_probe(struct pci_dev *pdev,
1940 const struct pci_device_id *ent)
1941{
1942 struct sym_device sym_dev;
1943 struct sym_nvram nvram;
1944 struct Scsi_Host *instance;
1945
1946 memset(&sym_dev, 0, sizeof(sym_dev));
1947 memset(&nvram, 0, sizeof(nvram));
1948
1949 if (pci_enable_device(pdev))
1950 goto leave;
1951
1952 pci_set_master(pdev);
1953
1954 if (pci_request_regions(pdev, NAME53C8XX))
1955 goto disable;
1956
1957 sym_init_device(pdev, &sym_dev);
1958 if (sym_check_supported(&sym_dev))
1959 goto free;
1960
1961 if (sym_check_raid(&sym_dev))
1962 goto leave; /* Don't disable the device */
1963
1964 if (sym_set_workarounds(&sym_dev))
1965 goto free;
1966
1967 sym_config_pqs(pdev, &sym_dev);
1968
1969 sym_get_nvram(&sym_dev, &nvram);
1970
1971 instance = sym_attach(&sym2_template, attach_count, &sym_dev);
1972 if (!instance)
1973 goto free;
1974
1975 if (scsi_add_host(instance, &pdev->dev))
1976 goto detach;
1977 scsi_scan_host(instance);
1978
1979 attach_count++;
1980
1981 return 0;
1982
1983 detach:
1984 sym_detach(pci_get_drvdata(pdev), pdev);
1985 free:
1986 pci_release_regions(pdev);
1987 disable:
1988 pci_disable_device(pdev);
1989 leave:
1990 return -ENODEV;
1991}
1992
1993static void __devexit sym2_remove(struct pci_dev *pdev)
1994{
1995 struct sym_hcb *np = pci_get_drvdata(pdev);
1996 struct Scsi_Host *host = np->s.host;
1997
1998 scsi_remove_host(host);
1999 scsi_host_put(host);
2000
2001 sym_detach(np, pdev);
2002
2003 pci_release_regions(pdev);
2004 pci_disable_device(pdev);
2005
2006 attach_count--;
2007}
2008
2009static void sym2_get_signalling(struct Scsi_Host *shost)
2010{
2011 struct sym_hcb *np = sym_get_hcb(shost);
2012 enum spi_signal_type type;
2013
2014 switch (np->scsi_mode) {
2015 case SMODE_SE:
2016 type = SPI_SIGNAL_SE;
2017 break;
2018 case SMODE_LVD:
2019 type = SPI_SIGNAL_LVD;
2020 break;
2021 case SMODE_HVD:
2022 type = SPI_SIGNAL_HVD;
2023 break;
2024 default:
2025 type = SPI_SIGNAL_UNKNOWN;
2026 break;
2027 }
2028 spi_signalling(shost) = type;
2029}
2030
2031static void sym2_set_offset(struct scsi_target *starget, int offset)
2032{
2033 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2034 struct sym_hcb *np = sym_get_hcb(shost);
2035 struct sym_tcb *tp = &np->target[starget->id];
2036
2037 tp->tgoal.offset = offset;
2038 tp->tgoal.check_nego = 1;
2039}
2040
2041static void sym2_set_period(struct scsi_target *starget, int period)
2042{
2043 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2044 struct sym_hcb *np = sym_get_hcb(shost);
2045 struct sym_tcb *tp = &np->target[starget->id];
2046
2047 /* have to have DT for these transfers */
2048 if (period <= np->minsync)
2049 tp->tgoal.dt = 1;
2050
2051 tp->tgoal.period = period;
2052 tp->tgoal.check_nego = 1;
2053}
2054
2055static void sym2_set_width(struct scsi_target *starget, int width)
2056{
2057 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2058 struct sym_hcb *np = sym_get_hcb(shost);
2059 struct sym_tcb *tp = &np->target[starget->id];
2060
2061 /* It is illegal to have DT set on narrow transfers. If DT is
2062 * clear, we must also clear IU and QAS. */
2063 if (width == 0)
2064 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
2065
2066 tp->tgoal.width = width;
2067 tp->tgoal.check_nego = 1;
2068}
2069
2070static void sym2_set_dt(struct scsi_target *starget, int dt)
2071{
2072 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2073 struct sym_hcb *np = sym_get_hcb(shost);
2074 struct sym_tcb *tp = &np->target[starget->id];
2075
2076 /* We must clear QAS and IU if DT is clear */
2077 if (dt)
2078 tp->tgoal.dt = 1;
2079 else
2080 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
2081 tp->tgoal.check_nego = 1;
2082}
2083
2084static void sym2_set_iu(struct scsi_target *starget, int iu)
2085{
2086 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2087 struct sym_hcb *np = sym_get_hcb(shost);
2088 struct sym_tcb *tp = &np->target[starget->id];
2089
2090 if (iu)
2091 tp->tgoal.iu = tp->tgoal.dt = 1;
2092 else
2093 tp->tgoal.iu = 0;
2094 tp->tgoal.check_nego = 1;
2095}
2096
2097static void sym2_set_qas(struct scsi_target *starget, int qas)
2098{
2099 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2100 struct sym_hcb *np = sym_get_hcb(shost);
2101 struct sym_tcb *tp = &np->target[starget->id];
2102
2103 if (qas)
2104 tp->tgoal.dt = tp->tgoal.qas = 1;
2105 else
2106 tp->tgoal.qas = 0;
2107 tp->tgoal.check_nego = 1;
2108}
2109
2110
2111static struct spi_function_template sym2_transport_functions = {
2112 .set_offset = sym2_set_offset,
2113 .show_offset = 1,
2114 .set_period = sym2_set_period,
2115 .show_period = 1,
2116 .set_width = sym2_set_width,
2117 .show_width = 1,
2118 .set_dt = sym2_set_dt,
2119 .show_dt = 1,
2120 .set_iu = sym2_set_iu,
2121 .show_iu = 1,
2122 .set_qas = sym2_set_qas,
2123 .show_qas = 1,
2124 .get_signalling = sym2_get_signalling,
2125};
2126
2127static struct pci_device_id sym2_id_table[] __devinitdata = {
2128 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
2129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2130 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
2131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
2132 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825,
2133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2134 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815,
2135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2136 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP,
2137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
2138 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
2139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2140 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
2141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2142 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
2143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2144 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
2145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2146 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885,
2147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2148 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875,
2149 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2150 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510,
2151 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
2152 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A,
2153 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2154 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A,
2155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2156 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33,
2157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2158 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66,
2159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2160 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J,
2161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2162 { 0, }
2163};
2164
2165MODULE_DEVICE_TABLE(pci, sym2_id_table);
2166
2167static struct pci_driver sym2_driver = {
2168 .name = NAME53C8XX,
2169 .id_table = sym2_id_table,
2170 .probe = sym2_probe,
2171 .remove = __devexit_p(sym2_remove),
2172};
2173
2174static int __init sym2_init(void)
2175{
2176 int error;
2177
2178 sym2_setup_params();
2179 sym2_transport_template = spi_attach_transport(&sym2_transport_functions);
2180 if (!sym2_transport_template)
2181 return -ENODEV;
2182
2183 error = pci_register_driver(&sym2_driver);
2184 if (error)
2185 spi_release_transport(sym2_transport_template);
2186 return error;
2187}
2188
2189static void __exit sym2_exit(void)
2190{
2191 pci_unregister_driver(&sym2_driver);
2192 spi_release_transport(sym2_transport_template);
2193}
2194
2195module_init(sym2_init);
2196module_exit(sym2_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
new file mode 100644
index 000000000000..e943f167fb51
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -0,0 +1,300 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifndef SYM_GLUE_H
41#define SYM_GLUE_H
42
43#include <linux/config.h>
44#include <linux/delay.h>
45#include <linux/ioport.h>
46#include <linux/pci.h>
47#include <linux/string.h>
48#include <linux/timer.h>
49#include <linux/types.h>
50
51#include <asm/io.h>
52#ifdef __sparc__
53# include <asm/irq.h>
54#endif
55
56#include <scsi/scsi.h>
57#include <scsi/scsi_cmnd.h>
58#include <scsi/scsi_device.h>
59#include <scsi/scsi_transport_spi.h>
60#include <scsi/scsi_host.h>
61
62#include "sym53c8xx.h"
63#include "sym_defs.h"
64#include "sym_misc.h"
65
66/*
67 * Configuration addendum for Linux.
68 */
69#define SYM_CONF_TIMER_INTERVAL ((HZ+1)/2)
70
71#define SYM_OPT_HANDLE_DIR_UNKNOWN
72#define SYM_OPT_HANDLE_DEVICE_QUEUEING
73#define SYM_OPT_LIMIT_COMMAND_REORDERING
74
75/*
76 * Print a message with severity.
77 */
78#define printf_emerg(args...) printk(KERN_EMERG args)
79#define printf_alert(args...) printk(KERN_ALERT args)
80#define printf_crit(args...) printk(KERN_CRIT args)
81#define printf_err(args...) printk(KERN_ERR args)
82#define printf_warning(args...) printk(KERN_WARNING args)
83#define printf_notice(args...) printk(KERN_NOTICE args)
84#define printf_info(args...) printk(KERN_INFO args)
85#define printf_debug(args...) printk(KERN_DEBUG args)
86#define printf(args...) printk(args)
87
88/*
89 * A 'read barrier' flushes any data that have been prefetched
90 * by the processor due to out of order execution. Such a barrier
91 * must notably be inserted prior to looking at data that have
92 * been DMAed, assuming that program does memory READs in proper
93 * order and that the device ensured proper ordering of WRITEs.
94 *
95 * A 'write barrier' prevents any previous WRITEs to pass further
96 * WRITEs. Such barriers must be inserted each time another agent
97 * relies on ordering of WRITEs.
98 *
99 * Note that, due to posting of PCI memory writes, we also must
100 * insert dummy PCI read transactions when some ordering involving
101 * both directions over the PCI does matter. PCI transactions are
102 * fully ordered in each direction.
103 */
104
105#define MEMORY_READ_BARRIER() rmb()
106#define MEMORY_WRITE_BARRIER() wmb()
107
108/*
109 * IO functions definition for big/little endian CPU support.
110 * For now, PCI chips are only supported in little endian addressing mode,
111 */
112
113#ifdef __BIG_ENDIAN
114
115#define readw_l2b readw
116#define readl_l2b readl
117#define writew_b2l writew
118#define writel_b2l writel
119
120#else /* little endian */
121
122#define readw_raw readw
123#define readl_raw readl
124#define writew_raw writew
125#define writel_raw writel
126
127#endif /* endian */
128
129#ifdef SYM_CONF_CHIP_BIG_ENDIAN
130#error "Chips in BIG ENDIAN addressing mode are not (yet) supported"
131#endif
132
133/*
134 * If the CPU and the chip use same endian-ness addressing,
135 * no byte reordering is needed for script patching.
136 * Macro cpu_to_scr() is to be used for script patching.
137 * Macro scr_to_cpu() is to be used for getting a DWORD
138 * from the script.
139 */
140
141#define cpu_to_scr(dw) cpu_to_le32(dw)
142#define scr_to_cpu(dw) le32_to_cpu(dw)
143
144/*
145 * Remap some status field values.
146 */
147#define CAM_REQ_CMP DID_OK
148#define CAM_SEL_TIMEOUT DID_NO_CONNECT
149#define CAM_CMD_TIMEOUT DID_TIME_OUT
150#define CAM_REQ_ABORTED DID_ABORT
151#define CAM_UNCOR_PARITY DID_PARITY
152#define CAM_SCSI_BUS_RESET DID_RESET
153#define CAM_REQUEUE_REQ DID_SOFT_ERROR
154#define CAM_UNEXP_BUSFREE DID_ERROR
155#define CAM_SCSI_BUSY DID_BUS_BUSY
156
157#define CAM_DEV_NOT_THERE DID_NO_CONNECT
158#define CAM_REQ_INVALID DID_ERROR
159#define CAM_REQ_TOO_BIG DID_ERROR
160
161#define CAM_RESRC_UNAVAIL DID_ERROR
162
163/*
164 * Remap data direction values.
165 */
166#define CAM_DIR_NONE DMA_NONE
167#define CAM_DIR_IN DMA_FROM_DEVICE
168#define CAM_DIR_OUT DMA_TO_DEVICE
169#define CAM_DIR_UNKNOWN DMA_BIDIRECTIONAL
170
171/*
172 * These ones are used as return code from
173 * error recovery handlers under Linux.
174 */
175#define SCSI_SUCCESS SUCCESS
176#define SCSI_FAILED FAILED
177
178/*
179 * System specific target data structure.
180 * None for now, under Linux.
181 */
182/* #define SYM_HAVE_STCB */
183
184/*
185 * System specific lun data structure.
186 */
187#define SYM_HAVE_SLCB
188struct sym_slcb {
189 u_short reqtags; /* Number of tags requested by user */
190 u_short scdev_depth; /* Queue depth set in select_queue_depth() */
191};
192
193/*
194 * System specific command data structure.
195 * Not needed under Linux.
196 */
197/* struct sym_sccb */
198
199/*
200 * System specific host data structure.
201 */
202struct sym_shcb {
203 /*
204 * Chip and controller indentification.
205 */
206 int unit;
207 char inst_name[16];
208 char chip_name[8];
209 struct pci_dev *device;
210
211 struct Scsi_Host *host;
212
213 void __iomem * ioaddr; /* MMIO kernel io address */
214 void __iomem * ramaddr; /* RAM kernel io address */
215 u_short io_ws; /* IO window size */
216 int irq; /* IRQ number */
217
218 struct timer_list timer; /* Timer handler link header */
219 u_long lasttime;
220 u_long settle_time; /* Resetting the SCSI BUS */
221 u_char settle_time_valid;
222};
223
224/*
225 * Return the name of the controller.
226 */
227#define sym_name(np) (np)->s.inst_name
228
229struct sym_nvram;
230
231/*
232 * The IO macros require a struct called 's' and are abused in sym_nvram.c
233 */
234struct sym_device {
235 struct pci_dev *pdev;
236 unsigned long mmio_base;
237 unsigned long ram_base;
238 struct {
239 void __iomem *ioaddr;
240 void __iomem *ramaddr;
241 } s;
242 struct sym_chip chip;
243 struct sym_nvram *nvram;
244 u_short device_id;
245 u_char host_id;
246};
247
248/*
249 * Driver host data structure.
250 */
251struct host_data {
252 struct sym_hcb *ncb;
253};
254
255static inline struct sym_hcb * sym_get_hcb(struct Scsi_Host *host)
256{
257 return ((struct host_data *)host->hostdata)->ncb;
258}
259
260#include "sym_fw.h"
261#include "sym_hipd.h"
262
263/*
264 * Set the status field of a CAM CCB.
265 */
266static __inline void
267sym_set_cam_status(struct scsi_cmnd *cmd, int status)
268{
269 cmd->result &= ~(0xff << 16);
270 cmd->result |= (status << 16);
271}
272
273/*
274 * Get the status field of a CAM CCB.
275 */
276static __inline int
277sym_get_cam_status(struct scsi_cmnd *cmd)
278{
279 return host_byte(cmd->result);
280}
281
282/*
283 * Build CAM result for a successful IO and for a failed IO.
284 */
285static __inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
286{
287 cmd->resid = resid;
288 cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
289}
290void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
291
292void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *ccb);
293#define sym_print_addr(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg)
294void sym_xpt_async_bus_reset(struct sym_hcb *np);
295void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target);
296int sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
297void sym_log_bus_error(struct sym_hcb *np);
298void sym_sniff_inquiry(struct sym_hcb *np, struct scsi_cmnd *cmd, int resid);
299
300#endif /* SYM_GLUE_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
new file mode 100644
index 000000000000..50a176b3888d
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -0,0 +1,5865 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx>
7 *
8 * This driver is derived from the Linux sym53c8xx driver.
9 * Copyright (C) 1998-2000 Gerard Roudier
10 *
11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
12 * a port of the FreeBSD ncr driver to Linux-1.2.13.
13 *
14 * The original ncr driver has been written for 386bsd and FreeBSD by
15 * Wolfgang Stanglmeier <wolf@cologne.de>
16 * Stefan Esser <se@mi.Uni-Koeln.de>
17 * Copyright (C) 1994 Wolfgang Stanglmeier
18 *
19 * Other major contributions:
20 *
21 * NVRAM detection and reading.
22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
23 *
24 *-----------------------------------------------------------------------------
25 *
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2 of the License, or
29 * (at your option) any later version.
30 *
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
35 *
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */
40#include "sym_glue.h"
41#include "sym_nvram.h"
42
43#if 0
44#define SYM_DEBUG_GENERIC_SUPPORT
45#endif
46
47/*
48 * Needed function prototypes.
49 */
50static void sym_int_ma (struct sym_hcb *np);
51static void sym_int_sir (struct sym_hcb *np);
52static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np);
53static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa);
54static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln);
55static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp);
56static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp);
57static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp);
58
59/*
60 * Print a buffer in hexadecimal format with a ".\n" at end.
61 */
62static void sym_printl_hex(u_char *p, int n)
63{
64 while (n-- > 0)
65 printf (" %x", *p++);
66 printf (".\n");
67}
68
69/*
70 * Print out the content of a SCSI message.
71 */
72static int sym_show_msg (u_char * msg)
73{
74 u_char i;
75 printf ("%x",*msg);
76 if (*msg==M_EXTENDED) {
77 for (i=1;i<8;i++) {
78 if (i-1>msg[1]) break;
79 printf ("-%x",msg[i]);
80 }
81 return (i+1);
82 } else if ((*msg & 0xf0) == 0x20) {
83 printf ("-%x",msg[1]);
84 return (2);
85 }
86 return (1);
87}
88
89static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
90{
91 sym_print_addr(cp->cmd, "%s: ", label);
92
93 sym_show_msg(msg);
94 printf(".\n");
95}
96
97static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg)
98{
99 struct sym_tcb *tp = &np->target[target];
100 dev_info(&tp->sdev->sdev_target->dev, "%s: ", label);
101
102 sym_show_msg(msg);
103 printf(".\n");
104}
105
106/*
107 * Print something that tells about extended errors.
108 */
109void sym_print_xerr(struct scsi_cmnd *cmd, int x_status)
110{
111 if (x_status & XE_PARITY_ERR) {
112 sym_print_addr(cmd, "unrecovered SCSI parity error.\n");
113 }
114 if (x_status & XE_EXTRA_DATA) {
115 sym_print_addr(cmd, "extraneous data discarded.\n");
116 }
117 if (x_status & XE_BAD_PHASE) {
118 sym_print_addr(cmd, "illegal scsi phase (4/5).\n");
119 }
120 if (x_status & XE_SODL_UNRUN) {
121 sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n");
122 }
123 if (x_status & XE_SWIDE_OVRUN) {
124 sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n");
125 }
126}
127
128/*
129 * Return a string for SCSI BUS mode.
130 */
131static char *sym_scsi_bus_mode(int mode)
132{
133 switch(mode) {
134 case SMODE_HVD: return "HVD";
135 case SMODE_SE: return "SE";
136 case SMODE_LVD: return "LVD";
137 }
138 return "??";
139}
140
141/*
142 * Soft reset the chip.
143 *
144 * Raising SRST when the chip is running may cause
145 * problems on dual function chips (see below).
146 * On the other hand, LVD devices need some delay
147 * to settle and report actual BUS mode in STEST4.
148 */
149static void sym_chip_reset (struct sym_hcb *np)
150{
151 OUTB(np, nc_istat, SRST);
152 udelay(10);
153 OUTB(np, nc_istat, 0);
154 udelay(2000); /* For BUS MODE to settle */
155}
156
157/*
158 * Really soft reset the chip.:)
159 *
160 * Some 896 and 876 chip revisions may hang-up if we set
161 * the SRST (soft reset) bit at the wrong time when SCRIPTS
162 * are running.
163 * So, we need to abort the current operation prior to
164 * soft resetting the chip.
165 */
166static void sym_soft_reset (struct sym_hcb *np)
167{
168 u_char istat = 0;
169 int i;
170
171 if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN))
172 goto do_chip_reset;
173
174 OUTB(np, nc_istat, CABRT);
175 for (i = 100000 ; i ; --i) {
176 istat = INB(np, nc_istat);
177 if (istat & SIP) {
178 INW(np, nc_sist);
179 }
180 else if (istat & DIP) {
181 if (INB(np, nc_dstat) & ABRT)
182 break;
183 }
184 udelay(5);
185 }
186 OUTB(np, nc_istat, 0);
187 if (!i)
188 printf("%s: unable to abort current chip operation, "
189 "ISTAT=0x%02x.\n", sym_name(np), istat);
190do_chip_reset:
191 sym_chip_reset(np);
192}
193
194/*
195 * Start reset process.
196 *
197 * The interrupt handler will reinitialize the chip.
198 */
199static void sym_start_reset(struct sym_hcb *np)
200{
201 sym_reset_scsi_bus(np, 1);
202}
203
204int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int)
205{
206 u32 term;
207 int retv = 0;
208
209 sym_soft_reset(np); /* Soft reset the chip */
210 if (enab_int)
211 OUTW(np, nc_sien, RST);
212 /*
213 * Enable Tolerant, reset IRQD if present and
214 * properly set IRQ mode, prior to resetting the bus.
215 */
216 OUTB(np, nc_stest3, TE);
217 OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM));
218 OUTB(np, nc_scntl1, CRST);
219 udelay(200);
220
221 if (!SYM_SETUP_SCSI_BUS_CHECK)
222 goto out;
223 /*
224 * Check for no terminators or SCSI bus shorts to ground.
225 * Read SCSI data bus, data parity bits and control signals.
226 * We are expecting RESET to be TRUE and other signals to be
227 * FALSE.
228 */
229 term = INB(np, nc_sstat0);
230 term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
231 term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */
232 ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */
233 ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */
234 INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */
235
236 if (!np->maxwide)
237 term &= 0x3ffff;
238
239 if (term != (2<<7)) {
240 printf("%s: suspicious SCSI data while resetting the BUS.\n",
241 sym_name(np));
242 printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
243 "0x%lx, expecting 0x%lx\n",
244 sym_name(np),
245 (np->features & FE_WIDE) ? "dp1,d15-8," : "",
246 (u_long)term, (u_long)(2<<7));
247 if (SYM_SETUP_SCSI_BUS_CHECK == 1)
248 retv = 1;
249 }
250out:
251 OUTB(np, nc_scntl1, 0);
252 return retv;
253}
254
255/*
256 * Select SCSI clock frequency
257 */
258static void sym_selectclock(struct sym_hcb *np, u_char scntl3)
259{
260 /*
261 * If multiplier not present or not selected, leave here.
262 */
263 if (np->multiplier <= 1) {
264 OUTB(np, nc_scntl3, scntl3);
265 return;
266 }
267
268 if (sym_verbose >= 2)
269 printf ("%s: enabling clock multiplier\n", sym_name(np));
270
271 OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */
272 /*
273 * Wait for the LCKFRQ bit to be set if supported by the chip.
274 * Otherwise wait 50 micro-seconds (at least).
275 */
276 if (np->features & FE_LCKFRQ) {
277 int i = 20;
278 while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0)
279 udelay(20);
280 if (!i)
281 printf("%s: the chip cannot lock the frequency\n",
282 sym_name(np));
283 } else
284 udelay((50+10));
285 OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */
286 OUTB(np, nc_scntl3, scntl3);
287 OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
288 OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */
289}
290
291
292/*
293 * Determine the chip's clock frequency.
294 *
295 * This is essential for the negotiation of the synchronous
296 * transfer rate.
297 *
298 * Note: we have to return the correct value.
299 * THERE IS NO SAFE DEFAULT VALUE.
300 *
301 * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
302 * 53C860 and 53C875 rev. 1 support fast20 transfers but
303 * do not have a clock doubler and so are provided with a
304 * 80 MHz clock. All other fast20 boards incorporate a doubler
305 * and so should be delivered with a 40 MHz clock.
306 * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base
307 * clock and provide a clock quadrupler (160 Mhz).
308 */
309
310/*
311 * calculate SCSI clock frequency (in KHz)
312 */
313static unsigned getfreq (struct sym_hcb *np, int gen)
314{
315 unsigned int ms = 0;
316 unsigned int f;
317
318 /*
319 * Measure GEN timer delay in order
320 * to calculate SCSI clock frequency
321 *
322 * This code will never execute too
323 * many loop iterations (if DELAY is
324 * reasonably correct). It could get
325 * too low a delay (too high a freq.)
326 * if the CPU is slow executing the
327 * loop for some reason (an NMI, for
328 * example). For this reason we will
329 * if multiple measurements are to be
330 * performed trust the higher delay
331 * (lower frequency returned).
332 */
333 OUTW(np, nc_sien, 0); /* mask all scsi interrupts */
334 INW(np, nc_sist); /* clear pending scsi interrupt */
335 OUTB(np, nc_dien, 0); /* mask all dma interrupts */
336 INW(np, nc_sist); /* another one, just to be sure :) */
337 /*
338 * The C1010-33 core does not report GEN in SIST,
339 * if this interrupt is masked in SIEN.
340 * I don't know yet if the C1010-66 behaves the same way.
341 */
342 if (np->features & FE_C10) {
343 OUTW(np, nc_sien, GEN);
344 OUTB(np, nc_istat1, SIRQD);
345 }
346 OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */
347 OUTB(np, nc_stime1, 0); /* disable general purpose timer */
348 OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
349 while (!(INW(np, nc_sist) & GEN) && ms++ < 100000)
350 udelay(1000/4); /* count in 1/4 of ms */
351 OUTB(np, nc_stime1, 0); /* disable general purpose timer */
352 /*
353 * Undo C1010-33 specific settings.
354 */
355 if (np->features & FE_C10) {
356 OUTW(np, nc_sien, 0);
357 OUTB(np, nc_istat1, 0);
358 }
359 /*
360 * set prescaler to divide by whatever 0 means
361 * 0 ought to choose divide by 2, but appears
362 * to set divide by 3.5 mode in my 53c810 ...
363 */
364 OUTB(np, nc_scntl3, 0);
365
366 /*
367 * adjust for prescaler, and convert into KHz
368 */
369 f = ms ? ((1 << gen) * (4340*4)) / ms : 0;
370
371 /*
372 * The C1010-33 result is biased by a factor
373 * of 2/3 compared to earlier chips.
374 */
375 if (np->features & FE_C10)
376 f = (f * 2) / 3;
377
378 if (sym_verbose >= 2)
379 printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
380 sym_name(np), gen, ms/4, f);
381
382 return f;
383}
384
385static unsigned sym_getfreq (struct sym_hcb *np)
386{
387 u_int f1, f2;
388 int gen = 8;
389
390 getfreq (np, gen); /* throw away first result */
391 f1 = getfreq (np, gen);
392 f2 = getfreq (np, gen);
393 if (f1 > f2) f1 = f2; /* trust lower result */
394 return f1;
395}
396
397/*
398 * Get/probe chip SCSI clock frequency
399 */
400static void sym_getclock (struct sym_hcb *np, int mult)
401{
402 unsigned char scntl3 = np->sv_scntl3;
403 unsigned char stest1 = np->sv_stest1;
404 unsigned f1;
405
406 np->multiplier = 1;
407 f1 = 40000;
408 /*
409 * True with 875/895/896/895A with clock multiplier selected
410 */
411 if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
412 if (sym_verbose >= 2)
413 printf ("%s: clock multiplier found\n", sym_name(np));
414 np->multiplier = mult;
415 }
416
417 /*
418 * If multiplier not found or scntl3 not 7,5,3,
419 * reset chip and get frequency from general purpose timer.
420 * Otherwise trust scntl3 BIOS setting.
421 */
422 if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
423 OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */
424 f1 = sym_getfreq (np);
425
426 if (sym_verbose)
427 printf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
428
429 if (f1 < 45000) f1 = 40000;
430 else if (f1 < 55000) f1 = 50000;
431 else f1 = 80000;
432
433 if (f1 < 80000 && mult > 1) {
434 if (sym_verbose >= 2)
435 printf ("%s: clock multiplier assumed\n",
436 sym_name(np));
437 np->multiplier = mult;
438 }
439 } else {
440 if ((scntl3 & 7) == 3) f1 = 40000;
441 else if ((scntl3 & 7) == 5) f1 = 80000;
442 else f1 = 160000;
443
444 f1 /= np->multiplier;
445 }
446
447 /*
448 * Compute controller synchronous parameters.
449 */
450 f1 *= np->multiplier;
451 np->clock_khz = f1;
452}
453
454/*
455 * Get/probe PCI clock frequency
456 */
457static int sym_getpciclock (struct sym_hcb *np)
458{
459 int f = 0;
460
461 /*
462 * For now, we only need to know about the actual
463 * PCI BUS clock frequency for C1010-66 chips.
464 */
465#if 1
466 if (np->features & FE_66MHZ) {
467#else
468 if (1) {
469#endif
470 OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
471 f = sym_getfreq(np);
472 OUTB(np, nc_stest1, 0);
473 }
474 np->pciclk_khz = f;
475
476 return f;
477}
478
479/*
480 * SYMBIOS chip clock divisor table.
481 *
482 * Divisors are multiplied by 10,000,000 in order to make
483 * calculations more simple.
484 */
485#define _5M 5000000
486static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
487
488/*
489 * Get clock factor and sync divisor for a given
490 * synchronous factor period.
491 */
492static int
493sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
494{
495 u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */
496 int div = np->clock_divn; /* Number of divisors supported */
497 u32 fak; /* Sync factor in sxfer */
498 u32 per; /* Period in tenths of ns */
499 u32 kpc; /* (per * clk) */
500 int ret;
501
502 /*
503 * Compute the synchronous period in tenths of nano-seconds
504 */
505 if (dt && sfac <= 9) per = 125;
506 else if (sfac <= 10) per = 250;
507 else if (sfac == 11) per = 303;
508 else if (sfac == 12) per = 500;
509 else per = 40 * sfac;
510 ret = per;
511
512 kpc = per * clk;
513 if (dt)
514 kpc <<= 1;
515
516 /*
517 * For earliest C10 revision 0, we cannot use extra
518 * clocks for the setting of the SCSI clocking.
519 * Note that this limits the lowest sync data transfer
520 * to 5 Mega-transfers per second and may result in
521 * using higher clock divisors.
522 */
523#if 1
524 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
525 /*
526 * Look for the lowest clock divisor that allows an
527 * output speed not faster than the period.
528 */
529 while (div > 0) {
530 --div;
531 if (kpc > (div_10M[div] << 2)) {
532 ++div;
533 break;
534 }
535 }
536 fak = 0; /* No extra clocks */
537 if (div == np->clock_divn) { /* Are we too fast ? */
538 ret = -1;
539 }
540 *divp = div;
541 *fakp = fak;
542 return ret;
543 }
544#endif
545
546 /*
547 * Look for the greatest clock divisor that allows an
548 * input speed faster than the period.
549 */
550 while (div-- > 0)
551 if (kpc >= (div_10M[div] << 2)) break;
552
553 /*
554 * Calculate the lowest clock factor that allows an output
555 * speed not faster than the period, and the max output speed.
556 * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
557 * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
558 */
559 if (dt) {
560 fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
561 /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
562 } else {
563 fak = (kpc - 1) / div_10M[div] + 1 - 4;
564 /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
565 }
566
567 /*
568 * Check against our hardware limits, or bugs :).
569 */
570 if (fak > 2) {
571 fak = 2;
572 ret = -1;
573 }
574
575 /*
576 * Compute and return sync parameters.
577 */
578 *divp = div;
579 *fakp = fak;
580
581 return ret;
582}
583
584/*
585 * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
586 * 128 transfers. All chips support at least 16 transfers
587 * bursts. The 825A, 875 and 895 chips support bursts of up
588 * to 128 transfers and the 895A and 896 support bursts of up
589 * to 64 transfers. All other chips support up to 16
590 * transfers bursts.
591 *
592 * For PCI 32 bit data transfers each transfer is a DWORD.
593 * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
594 *
595 * We use log base 2 (burst length) as internal code, with
596 * value 0 meaning "burst disabled".
597 */
598
599/*
600 * Burst length from burst code.
601 */
602#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
603
604/*
605 * Burst code from io register bits.
606 */
607#define burst_code(dmode, ctest4, ctest5) \
608 (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
609
610/*
611 * Set initial io register bits from burst code.
612 */
613static __inline void sym_init_burst(struct sym_hcb *np, u_char bc)
614{
615 np->rv_ctest4 &= ~0x80;
616 np->rv_dmode &= ~(0x3 << 6);
617 np->rv_ctest5 &= ~0x4;
618
619 if (!bc) {
620 np->rv_ctest4 |= 0x80;
621 }
622 else {
623 --bc;
624 np->rv_dmode |= ((bc & 0x3) << 6);
625 np->rv_ctest5 |= (bc & 0x4);
626 }
627}
628
629
630/*
631 * Print out the list of targets that have some flag disabled by user.
632 */
633static void sym_print_targets_flag(struct sym_hcb *np, int mask, char *msg)
634{
635 int cnt;
636 int i;
637
638 for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
639 if (i == np->myaddr)
640 continue;
641 if (np->target[i].usrflags & mask) {
642 if (!cnt++)
643 printf("%s: %s disabled for targets",
644 sym_name(np), msg);
645 printf(" %d", i);
646 }
647 }
648 if (cnt)
649 printf(".\n");
650}
651
652/*
653 * Save initial settings of some IO registers.
654 * Assumed to have been set by BIOS.
655 * We cannot reset the chip prior to reading the
656 * IO registers, since informations will be lost.
657 * Since the SCRIPTS processor may be running, this
658 * is not safe on paper, but it seems to work quite
659 * well. :)
660 */
661static void sym_save_initial_setting (struct sym_hcb *np)
662{
663 np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a;
664 np->sv_scntl3 = INB(np, nc_scntl3) & 0x07;
665 np->sv_dmode = INB(np, nc_dmode) & 0xce;
666 np->sv_dcntl = INB(np, nc_dcntl) & 0xa8;
667 np->sv_ctest3 = INB(np, nc_ctest3) & 0x01;
668 np->sv_ctest4 = INB(np, nc_ctest4) & 0x80;
669 np->sv_gpcntl = INB(np, nc_gpcntl);
670 np->sv_stest1 = INB(np, nc_stest1);
671 np->sv_stest2 = INB(np, nc_stest2) & 0x20;
672 np->sv_stest4 = INB(np, nc_stest4);
673 if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */
674 np->sv_scntl4 = INB(np, nc_scntl4);
675 np->sv_ctest5 = INB(np, nc_ctest5) & 0x04;
676 }
677 else
678 np->sv_ctest5 = INB(np, nc_ctest5) & 0x24;
679}
680
681/*
682 * Prepare io register values used by sym_start_up()
683 * according to selected and supported features.
684 */
685static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
686{
687 u_char burst_max;
688 u32 period;
689 int i;
690
691 /*
692 * Wide ?
693 */
694 np->maxwide = (np->features & FE_WIDE)? 1 : 0;
695
696 /*
697 * Guess the frequency of the chip's clock.
698 */
699 if (np->features & (FE_ULTRA3 | FE_ULTRA2))
700 np->clock_khz = 160000;
701 else if (np->features & FE_ULTRA)
702 np->clock_khz = 80000;
703 else
704 np->clock_khz = 40000;
705
706 /*
707 * Get the clock multiplier factor.
708 */
709 if (np->features & FE_QUAD)
710 np->multiplier = 4;
711 else if (np->features & FE_DBLR)
712 np->multiplier = 2;
713 else
714 np->multiplier = 1;
715
716 /*
717 * Measure SCSI clock frequency for chips
718 * it may vary from assumed one.
719 */
720 if (np->features & FE_VARCLK)
721 sym_getclock(np, np->multiplier);
722
723 /*
724 * Divisor to be used for async (timer pre-scaler).
725 */
726 i = np->clock_divn - 1;
727 while (--i >= 0) {
728 if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
729 ++i;
730 break;
731 }
732 }
733 np->rv_scntl3 = i+1;
734
735 /*
736 * The C1010 uses hardwired divisors for async.
737 * So, we just throw away, the async. divisor.:-)
738 */
739 if (np->features & FE_C10)
740 np->rv_scntl3 = 0;
741
742 /*
743 * Minimum synchronous period factor supported by the chip.
744 * Btw, 'period' is in tenths of nanoseconds.
745 */
746 period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
747
748 if (period <= 250) np->minsync = 10;
749 else if (period <= 303) np->minsync = 11;
750 else if (period <= 500) np->minsync = 12;
751 else np->minsync = (period + 40 - 1) / 40;
752
753 /*
754 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
755 */
756 if (np->minsync < 25 &&
757 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
758 np->minsync = 25;
759 else if (np->minsync < 12 &&
760 !(np->features & (FE_ULTRA2|FE_ULTRA3)))
761 np->minsync = 12;
762
763 /*
764 * Maximum synchronous period factor supported by the chip.
765 */
766 period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
767 np->maxsync = period > 2540 ? 254 : period / 10;
768
769 /*
770 * If chip is a C1010, guess the sync limits in DT mode.
771 */
772 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
773 if (np->clock_khz == 160000) {
774 np->minsync_dt = 9;
775 np->maxsync_dt = 50;
776 np->maxoffs_dt = nvram->type ? 62 : 31;
777 }
778 }
779
780 /*
781 * 64 bit addressing (895A/896/1010) ?
782 */
783 if (np->features & FE_DAC) {
784#if SYM_CONF_DMA_ADDRESSING_MODE == 0
785 np->rv_ccntl1 |= (DDAC);
786#elif SYM_CONF_DMA_ADDRESSING_MODE == 1
787 if (!np->use_dac)
788 np->rv_ccntl1 |= (DDAC);
789 else
790 np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
791#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
792 if (!np->use_dac)
793 np->rv_ccntl1 |= (DDAC);
794 else
795 np->rv_ccntl1 |= (0 | EXTIBMV);
796#endif
797 }
798
799 /*
800 * Phase mismatch handled by SCRIPTS (895A/896/1010) ?
801 */
802 if (np->features & FE_NOPM)
803 np->rv_ccntl0 |= (ENPMJ);
804
805 /*
806 * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed.
807 * In dual channel mode, contention occurs if internal cycles
808 * are used. Disable internal cycles.
809 */
810 if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 &&
811 np->revision_id < 0x1)
812 np->rv_ccntl0 |= DILS;
813
814 /*
815 * Select burst length (dwords)
816 */
817 burst_max = SYM_SETUP_BURST_ORDER;
818 if (burst_max == 255)
819 burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
820 np->sv_ctest5);
821 if (burst_max > 7)
822 burst_max = 7;
823 if (burst_max > np->maxburst)
824 burst_max = np->maxburst;
825
826 /*
827 * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
828 * This chip and the 860 Rev 1 may wrongly use PCI cache line
829 * based transactions on LOAD/STORE instructions. So we have
830 * to prevent these chips from using such PCI transactions in
831 * this driver. The generic ncr driver that does not use
832 * LOAD/STORE instructions does not need this work-around.
833 */
834 if ((np->device_id == PCI_DEVICE_ID_NCR_53C810 &&
835 np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
836 (np->device_id == PCI_DEVICE_ID_NCR_53C860 &&
837 np->revision_id <= 0x1))
838 np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
839
840 /*
841 * Select all supported special features.
842 * If we are using on-board RAM for scripts, prefetch (PFEN)
843 * does not help, but burst op fetch (BOF) does.
844 * Disabling PFEN makes sure BOF will be used.
845 */
846 if (np->features & FE_ERL)
847 np->rv_dmode |= ERL; /* Enable Read Line */
848 if (np->features & FE_BOF)
849 np->rv_dmode |= BOF; /* Burst Opcode Fetch */
850 if (np->features & FE_ERMP)
851 np->rv_dmode |= ERMP; /* Enable Read Multiple */
852#if 1
853 if ((np->features & FE_PFEN) && !np->ram_ba)
854#else
855 if (np->features & FE_PFEN)
856#endif
857 np->rv_dcntl |= PFEN; /* Prefetch Enable */
858 if (np->features & FE_CLSE)
859 np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
860 if (np->features & FE_WRIE)
861 np->rv_ctest3 |= WRIE; /* Write and Invalidate */
862 if (np->features & FE_DFS)
863 np->rv_ctest5 |= DFS; /* Dma Fifo Size */
864
865 /*
866 * Select some other
867 */
868 np->rv_ctest4 |= MPEE; /* Master parity checking */
869 np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
870
871 /*
872 * Get parity checking, host ID and verbose mode from NVRAM
873 */
874 np->myaddr = 255;
875 sym_nvram_setup_host(shost, np, nvram);
876
877 /*
878 * Get SCSI addr of host adapter (set by bios?).
879 */
880 if (np->myaddr == 255) {
881 np->myaddr = INB(np, nc_scid) & 0x07;
882 if (!np->myaddr)
883 np->myaddr = SYM_SETUP_HOST_ID;
884 }
885
886 /*
887 * Prepare initial io register bits for burst length
888 */
889 sym_init_burst(np, burst_max);
890
891 /*
892 * Set SCSI BUS mode.
893 * - LVD capable chips (895/895A/896/1010) report the
894 * current BUS mode through the STEST4 IO register.
895 * - For previous generation chips (825/825A/875),
896 * user has to tell us how to check against HVD,
897 * since a 100% safe algorithm is not possible.
898 */
899 np->scsi_mode = SMODE_SE;
900 if (np->features & (FE_ULTRA2|FE_ULTRA3))
901 np->scsi_mode = (np->sv_stest4 & SMODE);
902 else if (np->features & FE_DIFF) {
903 if (SYM_SETUP_SCSI_DIFF == 1) {
904 if (np->sv_scntl3) {
905 if (np->sv_stest2 & 0x20)
906 np->scsi_mode = SMODE_HVD;
907 }
908 else if (nvram->type == SYM_SYMBIOS_NVRAM) {
909 if (!(INB(np, nc_gpreg) & 0x08))
910 np->scsi_mode = SMODE_HVD;
911 }
912 }
913 else if (SYM_SETUP_SCSI_DIFF == 2)
914 np->scsi_mode = SMODE_HVD;
915 }
916 if (np->scsi_mode == SMODE_HVD)
917 np->rv_stest2 |= 0x20;
918
919 /*
920 * Set LED support from SCRIPTS.
921 * Ignore this feature for boards known to use a
922 * specific GPIO wiring and for the 895A, 896
923 * and 1010 that drive the LED directly.
924 */
925 if ((SYM_SETUP_SCSI_LED ||
926 (nvram->type == SYM_SYMBIOS_NVRAM ||
927 (nvram->type == SYM_TEKRAM_NVRAM &&
928 np->device_id == PCI_DEVICE_ID_NCR_53C895))) &&
929 !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
930 np->features |= FE_LED0;
931
932 /*
933 * Set irq mode.
934 */
935 switch(SYM_SETUP_IRQ_MODE & 3) {
936 case 2:
937 np->rv_dcntl |= IRQM;
938 break;
939 case 1:
940 np->rv_dcntl |= (np->sv_dcntl & IRQM);
941 break;
942 default:
943 break;
944 }
945
946 /*
947 * Configure targets according to driver setup.
948 * If NVRAM present get targets setup from NVRAM.
949 */
950 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
951 struct sym_tcb *tp = &np->target[i];
952
953 tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
954 tp->usrtags = SYM_SETUP_MAX_TAG;
955
956 sym_nvram_setup_target(np, i, nvram);
957
958 if (!tp->usrtags)
959 tp->usrflags &= ~SYM_TAGS_ENABLED;
960 }
961
962 /*
963 * Let user know about the settings.
964 */
965 printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np),
966 sym_nvram_type(nvram), np->myaddr,
967 (np->features & FE_ULTRA3) ? 80 :
968 (np->features & FE_ULTRA2) ? 40 :
969 (np->features & FE_ULTRA) ? 20 : 10,
970 sym_scsi_bus_mode(np->scsi_mode),
971 (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
972 /*
973 * Tell him more on demand.
974 */
975 if (sym_verbose) {
976 printf("%s: %s IRQ line driver%s\n",
977 sym_name(np),
978 np->rv_dcntl & IRQM ? "totem pole" : "open drain",
979 np->ram_ba ? ", using on-chip SRAM" : "");
980 printf("%s: using %s firmware.\n", sym_name(np), np->fw_name);
981 if (np->features & FE_NOPM)
982 printf("%s: handling phase mismatch from SCRIPTS.\n",
983 sym_name(np));
984 }
985 /*
986 * And still more.
987 */
988 if (sym_verbose >= 2) {
989 printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
990 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
991 sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
992 np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
993
994 printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
995 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
996 sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
997 np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
998 }
999 /*
1000 * Let user be aware of targets that have some disable flags set.
1001 */
1002 sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT");
1003 if (sym_verbose)
1004 sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED,
1005 "SCAN FOR LUNS");
1006
1007 return 0;
1008}
1009
1010/*
1011 * Test the pci bus snoop logic :-(
1012 *
1013 * Has to be called with interrupts disabled.
1014 */
1015#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
1016static int sym_regtest (struct sym_hcb *np)
1017{
1018 register volatile u32 data;
1019 /*
1020 * chip registers may NOT be cached.
1021 * write 0xffffffff to a read only register area,
1022 * and try to read it back.
1023 */
1024 data = 0xffffffff;
1025 OUTL(np, nc_dstat, data);
1026 data = INL(np, nc_dstat);
1027#if 1
1028 if (data == 0xffffffff) {
1029#else
1030 if ((data & 0xe2f0fffd) != 0x02000080) {
1031#endif
1032 printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
1033 (unsigned) data);
1034 return (0x10);
1035 }
1036 return (0);
1037}
1038#endif
1039
1040static int sym_snooptest (struct sym_hcb *np)
1041{
1042 u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
1043 int i, err=0;
1044#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
1045 err |= sym_regtest (np);
1046 if (err) return (err);
1047#endif
1048restart_test:
1049 /*
1050 * Enable Master Parity Checking as we intend
1051 * to enable it for normal operations.
1052 */
1053 OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE));
1054 /*
1055 * init
1056 */
1057 pc = SCRIPTZ_BA(np, snooptest);
1058 host_wr = 1;
1059 sym_wr = 2;
1060 /*
1061 * Set memory and register.
1062 */
1063 np->scratch = cpu_to_scr(host_wr);
1064 OUTL(np, nc_temp, sym_wr);
1065 /*
1066 * Start script (exchange values)
1067 */
1068 OUTL(np, nc_dsa, np->hcb_ba);
1069 OUTL_DSP(np, pc);
1070 /*
1071 * Wait 'til done (with timeout)
1072 */
1073 for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
1074 if (INB(np, nc_istat) & (INTF|SIP|DIP))
1075 break;
1076 if (i>=SYM_SNOOP_TIMEOUT) {
1077 printf ("CACHE TEST FAILED: timeout.\n");
1078 return (0x20);
1079 }
1080 /*
1081 * Check for fatal DMA errors.
1082 */
1083 dstat = INB(np, nc_dstat);
1084#if 1 /* Band aiding for broken hardwares that fail PCI parity */
1085 if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
1086 printf ("%s: PCI DATA PARITY ERROR DETECTED - "
1087 "DISABLING MASTER DATA PARITY CHECKING.\n",
1088 sym_name(np));
1089 np->rv_ctest4 &= ~MPEE;
1090 goto restart_test;
1091 }
1092#endif
1093 if (dstat & (MDPE|BF|IID)) {
1094 printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat);
1095 return (0x80);
1096 }
1097 /*
1098 * Save termination position.
1099 */
1100 pc = INL(np, nc_dsp);
1101 /*
1102 * Read memory and register.
1103 */
1104 host_rd = scr_to_cpu(np->scratch);
1105 sym_rd = INL(np, nc_scratcha);
1106 sym_bk = INL(np, nc_temp);
1107 /*
1108 * Check termination position.
1109 */
1110 if (pc != SCRIPTZ_BA(np, snoopend)+8) {
1111 printf ("CACHE TEST FAILED: script execution failed.\n");
1112 printf ("start=%08lx, pc=%08lx, end=%08lx\n",
1113 (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc,
1114 (u_long) SCRIPTZ_BA(np, snoopend) +8);
1115 return (0x40);
1116 }
1117 /*
1118 * Show results.
1119 */
1120 if (host_wr != sym_rd) {
1121 printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
1122 (int) host_wr, (int) sym_rd);
1123 err |= 1;
1124 }
1125 if (host_rd != sym_wr) {
1126 printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
1127 (int) sym_wr, (int) host_rd);
1128 err |= 2;
1129 }
1130 if (sym_bk != sym_wr) {
1131 printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
1132 (int) sym_wr, (int) sym_bk);
1133 err |= 4;
1134 }
1135
1136 return (err);
1137}
1138
1139/*
1140 * log message for real hard errors
1141 *
1142 * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc).
1143 * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
1144 *
1145 * exception register:
1146 * ds: dstat
1147 * si: sist
1148 *
1149 * SCSI bus lines:
1150 * so: control lines as driven by chip.
1151 * si: control lines as seen by chip.
1152 * sd: scsi data lines as seen by chip.
1153 *
1154 * wide/fastmode:
1155 * sx: sxfer (see the manual)
1156 * s3: scntl3 (see the manual)
1157 * s4: scntl4 (see the manual)
1158 *
1159 * current script command:
1160 * dsp: script address (relative to start of script).
1161 * dbc: first word of script command.
1162 *
1163 * First 24 register of the chip:
1164 * r0..rf
1165 */
1166static void sym_log_hard_error(struct sym_hcb *np, u_short sist, u_char dstat)
1167{
1168 u32 dsp;
1169 int script_ofs;
1170 int script_size;
1171 char *script_name;
1172 u_char *script_base;
1173 int i;
1174
1175 dsp = INL(np, nc_dsp);
1176
1177 if (dsp > np->scripta_ba &&
1178 dsp <= np->scripta_ba + np->scripta_sz) {
1179 script_ofs = dsp - np->scripta_ba;
1180 script_size = np->scripta_sz;
1181 script_base = (u_char *) np->scripta0;
1182 script_name = "scripta";
1183 }
1184 else if (np->scriptb_ba < dsp &&
1185 dsp <= np->scriptb_ba + np->scriptb_sz) {
1186 script_ofs = dsp - np->scriptb_ba;
1187 script_size = np->scriptb_sz;
1188 script_base = (u_char *) np->scriptb0;
1189 script_name = "scriptb";
1190 } else {
1191 script_ofs = dsp;
1192 script_size = 0;
1193 script_base = NULL;
1194 script_name = "mem";
1195 }
1196
1197 printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n",
1198 sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist,
1199 (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl),
1200 (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer),
1201 (unsigned)INB(np, nc_scntl3),
1202 (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0,
1203 script_name, script_ofs, (unsigned)INL(np, nc_dbc));
1204
1205 if (((script_ofs & 3) == 0) &&
1206 (unsigned)script_ofs < script_size) {
1207 printf ("%s: script cmd = %08x\n", sym_name(np),
1208 scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
1209 }
1210
1211 printf ("%s: regdump:", sym_name(np));
1212 for (i=0; i<24;i++)
1213 printf (" %02x", (unsigned)INB_OFF(np, i));
1214 printf (".\n");
1215
1216 /*
1217 * PCI BUS error.
1218 */
1219 if (dstat & (MDPE|BF))
1220 sym_log_bus_error(np);
1221}
1222
1223static struct sym_chip sym_dev_table[] = {
1224 {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64,
1225 FE_ERL}
1226 ,
1227#ifdef SYM_DEBUG_GENERIC_SUPPORT
1228 {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1,
1229 FE_BOF}
1230 ,
1231#else
1232 {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1,
1233 FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
1234 ,
1235#endif
1236 {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64,
1237 FE_BOF|FE_ERL}
1238 ,
1239 {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64,
1240 FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
1241 ,
1242 {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2,
1243 FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
1244 ,
1245 {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1,
1246 FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
1247 ,
1248 {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2,
1249 FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1250 FE_RAM|FE_DIFF|FE_VARCLK}
1251 ,
1252 {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2,
1253 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1254 FE_RAM|FE_DIFF|FE_VARCLK}
1255 ,
1256 {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2,
1257 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1258 FE_RAM|FE_DIFF|FE_VARCLK}
1259 ,
1260 {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2,
1261 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1262 FE_RAM|FE_DIFF|FE_VARCLK}
1263 ,
1264#ifdef SYM_DEBUG_GENERIC_SUPPORT
1265 {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
1266 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|
1267 FE_RAM|FE_LCKFRQ}
1268 ,
1269#else
1270 {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
1271 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1272 FE_RAM|FE_LCKFRQ}
1273 ,
1274#endif
1275 {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4,
1276 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1277 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
1278 ,
1279 {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4,
1280 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1281 FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
1282 ,
1283 {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4,
1284 FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1285 FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
1286 ,
1287 {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8,
1288 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
1289 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
1290 FE_C10}
1291 ,
1292 {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8,
1293 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
1294 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
1295 FE_C10|FE_U3EN}
1296 ,
1297 {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8,
1298 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
1299 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC|
1300 FE_C10|FE_U3EN}
1301 ,
1302 {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4,
1303 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
1304 FE_RAM|FE_IO256|FE_LEDC}
1305};
1306
1307#define sym_num_devs \
1308 (sizeof(sym_dev_table) / sizeof(sym_dev_table[0]))
1309
1310/*
1311 * Look up the chip table.
1312 *
1313 * Return a pointer to the chip entry if found,
1314 * zero otherwise.
1315 */
1316struct sym_chip *
1317sym_lookup_chip_table (u_short device_id, u_char revision)
1318{
1319 struct sym_chip *chip;
1320 int i;
1321
1322 for (i = 0; i < sym_num_devs; i++) {
1323 chip = &sym_dev_table[i];
1324 if (device_id != chip->device_id)
1325 continue;
1326 if (revision > chip->revision_id)
1327 continue;
1328 return chip;
1329 }
1330
1331 return NULL;
1332}
1333
1334#if SYM_CONF_DMA_ADDRESSING_MODE == 2
1335/*
1336 * Lookup the 64 bit DMA segments map.
1337 * This is only used if the direct mapping
1338 * has been unsuccessful.
1339 */
1340int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s)
1341{
1342 int i;
1343
1344 if (!np->use_dac)
1345 goto weird;
1346
1347 /* Look up existing mappings */
1348 for (i = SYM_DMAP_SIZE-1; i > 0; i--) {
1349 if (h == np->dmap_bah[i])
1350 return i;
1351 }
1352 /* If direct mapping is free, get it */
1353 if (!np->dmap_bah[s])
1354 goto new;
1355 /* Collision -> lookup free mappings */
1356 for (s = SYM_DMAP_SIZE-1; s > 0; s--) {
1357 if (!np->dmap_bah[s])
1358 goto new;
1359 }
1360weird:
1361 panic("sym: ran out of 64 bit DMA segment registers");
1362 return -1;
1363new:
1364 np->dmap_bah[s] = h;
1365 np->dmap_dirty = 1;
1366 return s;
1367}
1368
1369/*
1370 * Update IO registers scratch C..R so they will be
1371 * in sync. with queued CCB expectations.
1372 */
1373static void sym_update_dmap_regs(struct sym_hcb *np)
1374{
1375 int o, i;
1376
1377 if (!np->dmap_dirty)
1378 return;
1379 o = offsetof(struct sym_reg, nc_scrx[0]);
1380 for (i = 0; i < SYM_DMAP_SIZE; i++) {
1381 OUTL_OFF(np, o, np->dmap_bah[i]);
1382 o += 4;
1383 }
1384 np->dmap_dirty = 0;
1385}
1386#endif
1387
1388/* Enforce all the fiddly SPI rules and the chip limitations */
1389static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget,
1390 struct sym_trans *goal)
1391{
1392 if (!spi_support_wide(starget))
1393 goal->width = 0;
1394
1395 if (!spi_support_sync(starget)) {
1396 goal->iu = 0;
1397 goal->dt = 0;
1398 goal->qas = 0;
1399 goal->period = 0;
1400 goal->offset = 0;
1401 return;
1402 }
1403
1404 if (spi_support_dt(starget)) {
1405 if (spi_support_dt_only(starget))
1406 goal->dt = 1;
1407
1408 if (goal->offset == 0)
1409 goal->dt = 0;
1410 } else {
1411 goal->dt = 0;
1412 }
1413
1414 /* Some targets fail to properly negotiate DT in SE mode */
1415 if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN))
1416 goal->dt = 0;
1417
1418 if (goal->dt) {
1419 /* all DT transfers must be wide */
1420 goal->width = 1;
1421 if (goal->offset > np->maxoffs_dt)
1422 goal->offset = np->maxoffs_dt;
1423 if (goal->period < np->minsync_dt)
1424 goal->period = np->minsync_dt;
1425 if (goal->period > np->maxsync_dt)
1426 goal->period = np->maxsync_dt;
1427 } else {
1428 goal->iu = goal->qas = 0;
1429 if (goal->offset > np->maxoffs)
1430 goal->offset = np->maxoffs;
1431 if (goal->period < np->minsync)
1432 goal->period = np->minsync;
1433 if (goal->period > np->maxsync)
1434 goal->period = np->maxsync;
1435 }
1436}
1437
1438/*
1439 * Prepare the next negotiation message if needed.
1440 *
1441 * Fill in the part of message buffer that contains the
1442 * negotiation and the nego_status field of the CCB.
1443 * Returns the size of the message in bytes.
1444 */
1445static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr)
1446{
1447 struct sym_tcb *tp = &np->target[cp->target];
1448 struct scsi_target *starget = tp->sdev->sdev_target;
1449 struct sym_trans *goal = &tp->tgoal;
1450 int msglen = 0;
1451 int nego;
1452
1453 sym_check_goals(np, starget, goal);
1454
1455 /*
1456 * Many devices implement PPR in a buggy way, so only use it if we
1457 * really want to.
1458 */
1459 if (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)) {
1460 nego = NS_PPR;
1461 } else if (spi_width(starget) != goal->width) {
1462 nego = NS_WIDE;
1463 } else if (spi_period(starget) != goal->period ||
1464 spi_offset(starget) != goal->offset) {
1465 nego = NS_SYNC;
1466 } else {
1467 goal->check_nego = 0;
1468 nego = 0;
1469 }
1470
1471 switch (nego) {
1472 case NS_SYNC:
1473 msgptr[msglen++] = M_EXTENDED;
1474 msgptr[msglen++] = 3;
1475 msgptr[msglen++] = M_X_SYNC_REQ;
1476 msgptr[msglen++] = goal->period;
1477 msgptr[msglen++] = goal->offset;
1478 break;
1479 case NS_WIDE:
1480 msgptr[msglen++] = M_EXTENDED;
1481 msgptr[msglen++] = 2;
1482 msgptr[msglen++] = M_X_WIDE_REQ;
1483 msgptr[msglen++] = goal->width;
1484 break;
1485 case NS_PPR:
1486 msgptr[msglen++] = M_EXTENDED;
1487 msgptr[msglen++] = 6;
1488 msgptr[msglen++] = M_X_PPR_REQ;
1489 msgptr[msglen++] = goal->period;
1490 msgptr[msglen++] = 0;
1491 msgptr[msglen++] = goal->offset;
1492 msgptr[msglen++] = goal->width;
1493 msgptr[msglen++] = (goal->iu ? PPR_OPT_IU : 0) |
1494 (goal->dt ? PPR_OPT_DT : 0) |
1495 (goal->qas ? PPR_OPT_QAS : 0);
1496 break;
1497 }
1498
1499 cp->nego_status = nego;
1500
1501 if (nego) {
1502 tp->nego_cp = cp; /* Keep track a nego will be performed */
1503 if (DEBUG_FLAGS & DEBUG_NEGO) {
1504 sym_print_nego_msg(np, cp->target,
1505 nego == NS_SYNC ? "sync msgout" :
1506 nego == NS_WIDE ? "wide msgout" :
1507 "ppr msgout", msgptr);
1508 }
1509 }
1510
1511 return msglen;
1512}
1513
1514/*
1515 * Insert a job into the start queue.
1516 */
1517void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
1518{
1519 u_short qidx;
1520
1521#ifdef SYM_CONF_IARB_SUPPORT
1522 /*
1523 * If the previously queued CCB is not yet done,
1524 * set the IARB hint. The SCRIPTS will go with IARB
1525 * for this job when starting the previous one.
1526 * We leave devices a chance to win arbitration by
1527 * not using more than 'iarb_max' consecutive
1528 * immediate arbitrations.
1529 */
1530 if (np->last_cp && np->iarb_count < np->iarb_max) {
1531 np->last_cp->host_flags |= HF_HINT_IARB;
1532 ++np->iarb_count;
1533 }
1534 else
1535 np->iarb_count = 0;
1536 np->last_cp = cp;
1537#endif
1538
1539#if SYM_CONF_DMA_ADDRESSING_MODE == 2
1540 /*
1541 * Make SCRIPTS aware of the 64 bit DMA
1542 * segment registers not being up-to-date.
1543 */
1544 if (np->dmap_dirty)
1545 cp->host_xflags |= HX_DMAP_DIRTY;
1546#endif
1547
1548 /*
1549 * Insert first the idle task and then our job.
1550 * The MBs should ensure proper ordering.
1551 */
1552 qidx = np->squeueput + 2;
1553 if (qidx >= MAX_QUEUE*2) qidx = 0;
1554
1555 np->squeue [qidx] = cpu_to_scr(np->idletask_ba);
1556 MEMORY_WRITE_BARRIER();
1557 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
1558
1559 np->squeueput = qidx;
1560
1561 if (DEBUG_FLAGS & DEBUG_QUEUE)
1562 printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput);
1563
1564 /*
1565 * Script processor may be waiting for reselect.
1566 * Wake it up.
1567 */
1568 MEMORY_WRITE_BARRIER();
1569 OUTB(np, nc_istat, SIGP|np->istat_sem);
1570}
1571
1572#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1573/*
1574 * Start next ready-to-start CCBs.
1575 */
1576void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn)
1577{
1578 SYM_QUEHEAD *qp;
1579 struct sym_ccb *cp;
1580
1581 /*
1582 * Paranoia, as usual. :-)
1583 */
1584 assert(!lp->started_tags || !lp->started_no_tag);
1585
1586 /*
1587 * Try to start as many commands as asked by caller.
1588 * Prevent from having both tagged and untagged
1589 * commands queued to the device at the same time.
1590 */
1591 while (maxn--) {
1592 qp = sym_remque_head(&lp->waiting_ccbq);
1593 if (!qp)
1594 break;
1595 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq);
1596 if (cp->tag != NO_TAG) {
1597 if (lp->started_no_tag ||
1598 lp->started_tags >= lp->started_max) {
1599 sym_insque_head(qp, &lp->waiting_ccbq);
1600 break;
1601 }
1602 lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba);
1603 lp->head.resel_sa =
1604 cpu_to_scr(SCRIPTA_BA(np, resel_tag));
1605 ++lp->started_tags;
1606 } else {
1607 if (lp->started_no_tag || lp->started_tags) {
1608 sym_insque_head(qp, &lp->waiting_ccbq);
1609 break;
1610 }
1611 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
1612 lp->head.resel_sa =
1613 cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
1614 ++lp->started_no_tag;
1615 }
1616 cp->started = 1;
1617 sym_insque_tail(qp, &lp->started_ccbq);
1618 sym_put_start_queue(np, cp);
1619 }
1620}
1621#endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */
1622
1623/*
1624 * The chip may have completed jobs. Look at the DONE QUEUE.
1625 *
1626 * On paper, memory read barriers may be needed here to
1627 * prevent out of order LOADs by the CPU from having
1628 * prefetched stale data prior to DMA having occurred.
1629 */
1630static int sym_wakeup_done (struct sym_hcb *np)
1631{
1632 struct sym_ccb *cp;
1633 int i, n;
1634 u32 dsa;
1635
1636 n = 0;
1637 i = np->dqueueget;
1638
1639 /* MEMORY_READ_BARRIER(); */
1640 while (1) {
1641 dsa = scr_to_cpu(np->dqueue[i]);
1642 if (!dsa)
1643 break;
1644 np->dqueue[i] = 0;
1645 if ((i = i+2) >= MAX_QUEUE*2)
1646 i = 0;
1647
1648 cp = sym_ccb_from_dsa(np, dsa);
1649 if (cp) {
1650 MEMORY_READ_BARRIER();
1651 sym_complete_ok (np, cp);
1652 ++n;
1653 }
1654 else
1655 printf ("%s: bad DSA (%x) in done queue.\n",
1656 sym_name(np), (u_int) dsa);
1657 }
1658 np->dqueueget = i;
1659
1660 return n;
1661}
1662
1663/*
1664 * Complete all CCBs queued to the COMP queue.
1665 *
1666 * These CCBs are assumed:
1667 * - Not to be referenced either by devices or
1668 * SCRIPTS-related queues and datas.
1669 * - To have to be completed with an error condition
1670 * or requeued.
1671 *
1672 * The device queue freeze count is incremented
1673 * for each CCB that does not prevent this.
1674 * This function is called when all CCBs involved
1675 * in error handling/recovery have been reaped.
1676 */
1677static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status)
1678{
1679 SYM_QUEHEAD *qp;
1680 struct sym_ccb *cp;
1681
1682 while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) {
1683 struct scsi_cmnd *cmd;
1684 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
1685 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
1686 /* Leave quiet CCBs waiting for resources */
1687 if (cp->host_status == HS_WAIT)
1688 continue;
1689 cmd = cp->cmd;
1690 if (cam_status)
1691 sym_set_cam_status(cmd, cam_status);
1692#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1693 if (sym_get_cam_status(cmd) == CAM_REQUEUE_REQ) {
1694 struct sym_tcb *tp = &np->target[cp->target];
1695 struct sym_lcb *lp = sym_lp(tp, cp->lun);
1696 if (lp) {
1697 sym_remque(&cp->link2_ccbq);
1698 sym_insque_tail(&cp->link2_ccbq,
1699 &lp->waiting_ccbq);
1700 if (cp->started) {
1701 if (cp->tag != NO_TAG)
1702 --lp->started_tags;
1703 else
1704 --lp->started_no_tag;
1705 }
1706 }
1707 cp->started = 0;
1708 continue;
1709 }
1710#endif
1711 sym_free_ccb(np, cp);
1712 sym_xpt_done(np, cmd);
1713 }
1714}
1715
1716/*
1717 * Complete all active CCBs with error.
1718 * Used on CHIP/SCSI RESET.
1719 */
1720static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status)
1721{
1722 /*
1723 * Move all active CCBs to the COMP queue
1724 * and flush this queue.
1725 */
1726 sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
1727 sym_que_init(&np->busy_ccbq);
1728 sym_flush_comp_queue(np, cam_status);
1729}
1730
1731/*
1732 * Start chip.
1733 *
1734 * 'reason' means:
1735 * 0: initialisation.
1736 * 1: SCSI BUS RESET delivered or received.
1737 * 2: SCSI BUS MODE changed.
1738 */
1739void sym_start_up (struct sym_hcb *np, int reason)
1740{
1741 int i;
1742 u32 phys;
1743
1744 /*
1745 * Reset chip if asked, otherwise just clear fifos.
1746 */
1747 if (reason == 1)
1748 sym_soft_reset(np);
1749 else {
1750 OUTB(np, nc_stest3, TE|CSF);
1751 OUTONB(np, nc_ctest3, CLF);
1752 }
1753
1754 /*
1755 * Clear Start Queue
1756 */
1757 phys = np->squeue_ba;
1758 for (i = 0; i < MAX_QUEUE*2; i += 2) {
1759 np->squeue[i] = cpu_to_scr(np->idletask_ba);
1760 np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
1761 }
1762 np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
1763
1764 /*
1765 * Start at first entry.
1766 */
1767 np->squeueput = 0;
1768
1769 /*
1770 * Clear Done Queue
1771 */
1772 phys = np->dqueue_ba;
1773 for (i = 0; i < MAX_QUEUE*2; i += 2) {
1774 np->dqueue[i] = 0;
1775 np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
1776 }
1777 np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
1778
1779 /*
1780 * Start at first entry.
1781 */
1782 np->dqueueget = 0;
1783
1784 /*
1785 * Install patches in scripts.
1786 * This also let point to first position the start
1787 * and done queue pointers used from SCRIPTS.
1788 */
1789 np->fw_patch(np);
1790
1791 /*
1792 * Wakeup all pending jobs.
1793 */
1794 sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET);
1795
1796 /*
1797 * Init chip.
1798 */
1799 OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */
1800 udelay(2000); /* The 895 needs time for the bus mode to settle */
1801
1802 OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0);
1803 /* full arb., ena parity, par->ATN */
1804 OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
1805
1806 sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
1807
1808 OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
1809 OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */
1810 OUTB(np, nc_istat , SIGP ); /* Signal Process */
1811 OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */
1812 OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
1813
1814 OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
1815 OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */
1816 OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */
1817
1818 /* Extended Sreq/Sack filtering not supported on the C10 */
1819 if (np->features & FE_C10)
1820 OUTB(np, nc_stest2, np->rv_stest2);
1821 else
1822 OUTB(np, nc_stest2, EXT|np->rv_stest2);
1823
1824 OUTB(np, nc_stest3, TE); /* TolerANT enable */
1825 OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
1826
1827 /*
1828 * For now, disable AIP generation on C1010-66.
1829 */
1830 if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)
1831 OUTB(np, nc_aipcntl1, DISAIP);
1832
1833 /*
1834 * C10101 rev. 0 errata.
1835 * Errant SGE's when in narrow. Write bits 4 & 5 of
1836 * STEST1 register to disable SGE. We probably should do
1837 * that from SCRIPTS for each selection/reselection, but
1838 * I just don't want. :)
1839 */
1840 if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 &&
1841 np->revision_id < 1)
1842 OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30);
1843
1844 /*
1845 * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
1846 * Disable overlapped arbitration for some dual function devices,
1847 * regardless revision id (kind of post-chip-design feature. ;-))
1848 */
1849 if (np->device_id == PCI_DEVICE_ID_NCR_53C875)
1850 OUTB(np, nc_ctest0, (1<<5));
1851 else if (np->device_id == PCI_DEVICE_ID_NCR_53C896)
1852 np->rv_ccntl0 |= DPR;
1853
1854 /*
1855 * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing
1856 * and/or hardware phase mismatch, since only such chips
1857 * seem to support those IO registers.
1858 */
1859 if (np->features & (FE_DAC|FE_NOPM)) {
1860 OUTB(np, nc_ccntl0, np->rv_ccntl0);
1861 OUTB(np, nc_ccntl1, np->rv_ccntl1);
1862 }
1863
1864#if SYM_CONF_DMA_ADDRESSING_MODE == 2
1865 /*
1866 * Set up scratch C and DRS IO registers to map the 32 bit
1867 * DMA address range our data structures are located in.
1868 */
1869 if (np->use_dac) {
1870 np->dmap_bah[0] = 0; /* ??? */
1871 OUTL(np, nc_scrx[0], np->dmap_bah[0]);
1872 OUTL(np, nc_drs, np->dmap_bah[0]);
1873 }
1874#endif
1875
1876 /*
1877 * If phase mismatch handled by scripts (895A/896/1010),
1878 * set PM jump addresses.
1879 */
1880 if (np->features & FE_NOPM) {
1881 OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle));
1882 OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle));
1883 }
1884
1885 /*
1886 * Enable GPIO0 pin for writing if LED support from SCRIPTS.
1887 * Also set GPIO5 and clear GPIO6 if hardware LED control.
1888 */
1889 if (np->features & FE_LED0)
1890 OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01);
1891 else if (np->features & FE_LEDC)
1892 OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20);
1893
1894 /*
1895 * enable ints
1896 */
1897 OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
1898 OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID);
1899
1900 /*
1901 * For 895/6 enable SBMC interrupt and save current SCSI bus mode.
1902 * Try to eat the spurious SBMC interrupt that may occur when
1903 * we reset the chip but not the SCSI BUS (at initialization).
1904 */
1905 if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
1906 OUTONW(np, nc_sien, SBMC);
1907 if (reason == 0) {
1908 mdelay(100);
1909 INW(np, nc_sist);
1910 }
1911 np->scsi_mode = INB(np, nc_stest4) & SMODE;
1912 }
1913
1914 /*
1915 * Fill in target structure.
1916 * Reinitialize usrsync.
1917 * Reinitialize usrwide.
1918 * Prepare sync negotiation according to actual SCSI bus mode.
1919 */
1920 for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
1921 struct sym_tcb *tp = &np->target[i];
1922
1923 tp->to_reset = 0;
1924 tp->head.sval = 0;
1925 tp->head.wval = np->rv_scntl3;
1926 tp->head.uval = 0;
1927 }
1928
1929 /*
1930 * Download SCSI SCRIPTS to on-chip RAM if present,
1931 * and start script processor.
1932 * We do the download preferently from the CPU.
1933 * For platforms that may not support PCI memory mapping,
1934 * we use simple SCRIPTS that performs MEMORY MOVEs.
1935 */
1936 phys = SCRIPTA_BA(np, init);
1937 if (np->ram_ba) {
1938 if (sym_verbose >= 2)
1939 printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np));
1940 memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz);
1941 if (np->ram_ws == 8192) {
1942 memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz);
1943 phys = scr_to_cpu(np->scr_ram_seg);
1944 OUTL(np, nc_mmws, phys);
1945 OUTL(np, nc_mmrs, phys);
1946 OUTL(np, nc_sfs, phys);
1947 phys = SCRIPTB_BA(np, start64);
1948 }
1949 }
1950
1951 np->istat_sem = 0;
1952
1953 OUTL(np, nc_dsa, np->hcb_ba);
1954 OUTL_DSP(np, phys);
1955
1956 /*
1957 * Notify the XPT about the RESET condition.
1958 */
1959 if (reason != 0)
1960 sym_xpt_async_bus_reset(np);
1961}
1962
1963/*
1964 * Switch trans mode for current job and its target.
1965 */
1966static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs,
1967 u_char per, u_char wide, u_char div, u_char fak)
1968{
1969 SYM_QUEHEAD *qp;
1970 u_char sval, wval, uval;
1971 struct sym_tcb *tp = &np->target[target];
1972
1973 assert(target == (INB(np, nc_sdid) & 0x0f));
1974
1975 sval = tp->head.sval;
1976 wval = tp->head.wval;
1977 uval = tp->head.uval;
1978
1979#if 0
1980 printf("XXXX sval=%x wval=%x uval=%x (%x)\n",
1981 sval, wval, uval, np->rv_scntl3);
1982#endif
1983 /*
1984 * Set the offset.
1985 */
1986 if (!(np->features & FE_C10))
1987 sval = (sval & ~0x1f) | ofs;
1988 else
1989 sval = (sval & ~0x3f) | ofs;
1990
1991 /*
1992 * Set the sync divisor and extra clock factor.
1993 */
1994 if (ofs != 0) {
1995 wval = (wval & ~0x70) | ((div+1) << 4);
1996 if (!(np->features & FE_C10))
1997 sval = (sval & ~0xe0) | (fak << 5);
1998 else {
1999 uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
2000 if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
2001 if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
2002 }
2003 }
2004
2005 /*
2006 * Set the bus width.
2007 */
2008 wval = wval & ~EWS;
2009 if (wide != 0)
2010 wval |= EWS;
2011
2012 /*
2013 * Set misc. ultra enable bits.
2014 */
2015 if (np->features & FE_C10) {
2016 uval = uval & ~(U3EN|AIPCKEN);
2017 if (opts) {
2018 assert(np->features & FE_U3EN);
2019 uval |= U3EN;
2020 }
2021 } else {
2022 wval = wval & ~ULTRA;
2023 if (per <= 12) wval |= ULTRA;
2024 }
2025
2026 /*
2027 * Stop there if sync parameters are unchanged.
2028 */
2029 if (tp->head.sval == sval &&
2030 tp->head.wval == wval &&
2031 tp->head.uval == uval)
2032 return;
2033 tp->head.sval = sval;
2034 tp->head.wval = wval;
2035 tp->head.uval = uval;
2036
2037 /*
2038 * Disable extended Sreq/Sack filtering if per < 50.
2039 * Not supported on the C1010.
2040 */
2041 if (per < 50 && !(np->features & FE_C10))
2042 OUTOFFB(np, nc_stest2, EXT);
2043
2044 /*
2045 * set actual value and sync_status
2046 */
2047 OUTB(np, nc_sxfer, tp->head.sval);
2048 OUTB(np, nc_scntl3, tp->head.wval);
2049
2050 if (np->features & FE_C10) {
2051 OUTB(np, nc_scntl4, tp->head.uval);
2052 }
2053
2054 /*
2055 * patch ALL busy ccbs of this target.
2056 */
2057 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
2058 struct sym_ccb *cp;
2059 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
2060 if (cp->target != target)
2061 continue;
2062 cp->phys.select.sel_scntl3 = tp->head.wval;
2063 cp->phys.select.sel_sxfer = tp->head.sval;
2064 if (np->features & FE_C10) {
2065 cp->phys.select.sel_scntl4 = tp->head.uval;
2066 }
2067 }
2068}
2069
2070/*
2071 * We received a WDTR.
2072 * Let everything be aware of the changes.
2073 */
2074static void sym_setwide(struct sym_hcb *np, int target, u_char wide)
2075{
2076 struct sym_tcb *tp = &np->target[target];
2077 struct scsi_target *starget = tp->sdev->sdev_target;
2078
2079 if (spi_width(starget) == wide)
2080 return;
2081
2082 sym_settrans(np, target, 0, 0, 0, wide, 0, 0);
2083
2084 tp->tgoal.width = wide;
2085 spi_offset(starget) = 0;
2086 spi_period(starget) = 0;
2087 spi_width(starget) = wide;
2088 spi_iu(starget) = 0;
2089 spi_dt(starget) = 0;
2090 spi_qas(starget) = 0;
2091
2092 if (sym_verbose >= 3)
2093 spi_display_xfer_agreement(starget);
2094}
2095
2096/*
2097 * We received a SDTR.
2098 * Let everything be aware of the changes.
2099 */
2100static void
2101sym_setsync(struct sym_hcb *np, int target,
2102 u_char ofs, u_char per, u_char div, u_char fak)
2103{
2104 struct sym_tcb *tp = &np->target[target];
2105 struct scsi_target *starget = tp->sdev->sdev_target;
2106 u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT;
2107
2108 sym_settrans(np, target, 0, ofs, per, wide, div, fak);
2109
2110 spi_period(starget) = per;
2111 spi_offset(starget) = ofs;
2112 spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0;
2113
2114 if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) {
2115 tp->tgoal.period = per;
2116 tp->tgoal.offset = ofs;
2117 tp->tgoal.check_nego = 0;
2118 }
2119
2120 spi_display_xfer_agreement(starget);
2121}
2122
2123/*
2124 * We received a PPR.
2125 * Let everything be aware of the changes.
2126 */
2127static void
2128sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs,
2129 u_char per, u_char wide, u_char div, u_char fak)
2130{
2131 struct sym_tcb *tp = &np->target[target];
2132 struct scsi_target *starget = tp->sdev->sdev_target;
2133
2134 sym_settrans(np, target, opts, ofs, per, wide, div, fak);
2135
2136 spi_width(starget) = tp->tgoal.width = wide;
2137 spi_period(starget) = tp->tgoal.period = per;
2138 spi_offset(starget) = tp->tgoal.offset = ofs;
2139 spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU);
2140 spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT);
2141 spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS);
2142 tp->tgoal.check_nego = 0;
2143
2144 spi_display_xfer_agreement(starget);
2145}
2146
2147/*
2148 * generic recovery from scsi interrupt
2149 *
2150 * The doc says that when the chip gets an SCSI interrupt,
2151 * it tries to stop in an orderly fashion, by completing
2152 * an instruction fetch that had started or by flushing
2153 * the DMA fifo for a write to memory that was executing.
2154 * Such a fashion is not enough to know if the instruction
2155 * that was just before the current DSP value has been
2156 * executed or not.
2157 *
2158 * There are some small SCRIPTS sections that deal with
2159 * the start queue and the done queue that may break any
2160 * assomption from the C code if we are interrupted
2161 * inside, so we reset if this happens. Btw, since these
2162 * SCRIPTS sections are executed while the SCRIPTS hasn't
2163 * started SCSI operations, it is very unlikely to happen.
2164 *
2165 * All the driver data structures are supposed to be
2166 * allocated from the same 4 GB memory window, so there
2167 * is a 1 to 1 relationship between DSA and driver data
2168 * structures. Since we are careful :) to invalidate the
2169 * DSA when we complete a command or when the SCRIPTS
2170 * pushes a DSA into a queue, we can trust it when it
2171 * points to a CCB.
2172 */
2173static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts)
2174{
2175 u32 dsp = INL(np, nc_dsp);
2176 u32 dsa = INL(np, nc_dsa);
2177 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
2178
2179 /*
2180 * If we haven't been interrupted inside the SCRIPTS
2181 * critical pathes, we can safely restart the SCRIPTS
2182 * and trust the DSA value if it matches a CCB.
2183 */
2184 if ((!(dsp > SCRIPTA_BA(np, getjob_begin) &&
2185 dsp < SCRIPTA_BA(np, getjob_end) + 1)) &&
2186 (!(dsp > SCRIPTA_BA(np, ungetjob) &&
2187 dsp < SCRIPTA_BA(np, reselect) + 1)) &&
2188 (!(dsp > SCRIPTB_BA(np, sel_for_abort) &&
2189 dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) &&
2190 (!(dsp > SCRIPTA_BA(np, done) &&
2191 dsp < SCRIPTA_BA(np, done_end) + 1))) {
2192 OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
2193 OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */
2194 /*
2195 * If we have a CCB, let the SCRIPTS call us back for
2196 * the handling of the error with SCRATCHA filled with
2197 * STARTPOS. This way, we will be able to freeze the
2198 * device queue and requeue awaiting IOs.
2199 */
2200 if (cp) {
2201 cp->host_status = hsts;
2202 OUTL_DSP(np, SCRIPTA_BA(np, complete_error));
2203 }
2204 /*
2205 * Otherwise just restart the SCRIPTS.
2206 */
2207 else {
2208 OUTL(np, nc_dsa, 0xffffff);
2209 OUTL_DSP(np, SCRIPTA_BA(np, start));
2210 }
2211 }
2212 else
2213 goto reset_all;
2214
2215 return;
2216
2217reset_all:
2218 sym_start_reset(np);
2219}
2220
2221/*
2222 * chip exception handler for selection timeout
2223 */
2224static void sym_int_sto (struct sym_hcb *np)
2225{
2226 u32 dsp = INL(np, nc_dsp);
2227
2228 if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
2229
2230 if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8)
2231 sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
2232 else
2233 sym_start_reset(np);
2234}
2235
2236/*
2237 * chip exception handler for unexpected disconnect
2238 */
2239static void sym_int_udc (struct sym_hcb *np)
2240{
2241 printf ("%s: unexpected disconnect\n", sym_name(np));
2242 sym_recover_scsi_int(np, HS_UNEXPECTED);
2243}
2244
2245/*
2246 * chip exception handler for SCSI bus mode change
2247 *
2248 * spi2-r12 11.2.3 says a transceiver mode change must
2249 * generate a reset event and a device that detects a reset
2250 * event shall initiate a hard reset. It says also that a
2251 * device that detects a mode change shall set data transfer
2252 * mode to eight bit asynchronous, etc...
2253 * So, just reinitializing all except chip should be enough.
2254 */
2255static void sym_int_sbmc (struct sym_hcb *np)
2256{
2257 u_char scsi_mode = INB(np, nc_stest4) & SMODE;
2258
2259 /*
2260 * Notify user.
2261 */
2262 printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np),
2263 sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
2264
2265 /*
2266 * Should suspend command processing for a few seconds and
2267 * reinitialize all except the chip.
2268 */
2269 sym_start_up (np, 2);
2270}
2271
2272/*
2273 * chip exception handler for SCSI parity error.
2274 *
2275 * When the chip detects a SCSI parity error and is
2276 * currently executing a (CH)MOV instruction, it does
2277 * not interrupt immediately, but tries to finish the
2278 * transfer of the current scatter entry before
2279 * interrupting. The following situations may occur:
2280 *
2281 * - The complete scatter entry has been transferred
2282 * without the device having changed phase.
2283 * The chip will then interrupt with the DSP pointing
2284 * to the instruction that follows the MOV.
2285 *
2286 * - A phase mismatch occurs before the MOV finished
2287 * and phase errors are to be handled by the C code.
2288 * The chip will then interrupt with both PAR and MA
2289 * conditions set.
2290 *
2291 * - A phase mismatch occurs before the MOV finished and
2292 * phase errors are to be handled by SCRIPTS.
2293 * The chip will load the DSP with the phase mismatch
2294 * JUMP address and interrupt the host processor.
2295 */
2296static void sym_int_par (struct sym_hcb *np, u_short sist)
2297{
2298 u_char hsts = INB(np, HS_PRT);
2299 u32 dsp = INL(np, nc_dsp);
2300 u32 dbc = INL(np, nc_dbc);
2301 u32 dsa = INL(np, nc_dsa);
2302 u_char sbcl = INB(np, nc_sbcl);
2303 u_char cmd = dbc >> 24;
2304 int phase = cmd & 7;
2305 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
2306
2307 printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
2308 sym_name(np), hsts, dbc, sbcl);
2309
2310 /*
2311 * Check that the chip is connected to the SCSI BUS.
2312 */
2313 if (!(INB(np, nc_scntl1) & ISCON)) {
2314 sym_recover_scsi_int(np, HS_UNEXPECTED);
2315 return;
2316 }
2317
2318 /*
2319 * If the nexus is not clearly identified, reset the bus.
2320 * We will try to do better later.
2321 */
2322 if (!cp)
2323 goto reset_all;
2324
2325 /*
2326 * Check instruction was a MOV, direction was INPUT and
2327 * ATN is asserted.
2328 */
2329 if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
2330 goto reset_all;
2331
2332 /*
2333 * Keep track of the parity error.
2334 */
2335 OUTONB(np, HF_PRT, HF_EXT_ERR);
2336 cp->xerr_status |= XE_PARITY_ERR;
2337
2338 /*
2339 * Prepare the message to send to the device.
2340 */
2341 np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
2342
2343 /*
2344 * If the old phase was DATA IN phase, we have to deal with
2345 * the 3 situations described above.
2346 * For other input phases (MSG IN and STATUS), the device
2347 * must resend the whole thing that failed parity checking
2348 * or signal error. So, jumping to dispatcher should be OK.
2349 */
2350 if (phase == 1 || phase == 5) {
2351 /* Phase mismatch handled by SCRIPTS */
2352 if (dsp == SCRIPTB_BA(np, pm_handle))
2353 OUTL_DSP(np, dsp);
2354 /* Phase mismatch handled by the C code */
2355 else if (sist & MA)
2356 sym_int_ma (np);
2357 /* No phase mismatch occurred */
2358 else {
2359 sym_set_script_dp (np, cp, dsp);
2360 OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
2361 }
2362 }
2363 else if (phase == 7) /* We definitely cannot handle parity errors */
2364#if 1 /* in message-in phase due to the relection */
2365 goto reset_all; /* path and various message anticipations. */
2366#else
2367 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
2368#endif
2369 else
2370 OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
2371 return;
2372
2373reset_all:
2374 sym_start_reset(np);
2375 return;
2376}
2377
2378/*
2379 * chip exception handler for phase errors.
2380 *
2381 * We have to construct a new transfer descriptor,
2382 * to transfer the rest of the current block.
2383 */
2384static void sym_int_ma (struct sym_hcb *np)
2385{
2386 u32 dbc;
2387 u32 rest;
2388 u32 dsp;
2389 u32 dsa;
2390 u32 nxtdsp;
2391 u32 *vdsp;
2392 u32 oadr, olen;
2393 u32 *tblp;
2394 u32 newcmd;
2395 u_int delta;
2396 u_char cmd;
2397 u_char hflags, hflags0;
2398 struct sym_pmc *pm;
2399 struct sym_ccb *cp;
2400
2401 dsp = INL(np, nc_dsp);
2402 dbc = INL(np, nc_dbc);
2403 dsa = INL(np, nc_dsa);
2404
2405 cmd = dbc >> 24;
2406 rest = dbc & 0xffffff;
2407 delta = 0;
2408
2409 /*
2410 * locate matching cp if any.
2411 */
2412 cp = sym_ccb_from_dsa(np, dsa);
2413
2414 /*
2415 * Donnot take into account dma fifo and various buffers in
2416 * INPUT phase since the chip flushes everything before
2417 * raising the MA interrupt for interrupted INPUT phases.
2418 * For DATA IN phase, we will check for the SWIDE later.
2419 */
2420 if ((cmd & 7) != 1 && (cmd & 7) != 5) {
2421 u_char ss0, ss2;
2422
2423 if (np->features & FE_DFBC)
2424 delta = INW(np, nc_dfbc);
2425 else {
2426 u32 dfifo;
2427
2428 /*
2429 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
2430 */
2431 dfifo = INL(np, nc_dfifo);
2432
2433 /*
2434 * Calculate remaining bytes in DMA fifo.
2435 * (CTEST5 = dfifo >> 16)
2436 */
2437 if (dfifo & (DFS << 16))
2438 delta = ((((dfifo >> 8) & 0x300) |
2439 (dfifo & 0xff)) - rest) & 0x3ff;
2440 else
2441 delta = ((dfifo & 0xff) - rest) & 0x7f;
2442 }
2443
2444 /*
2445 * The data in the dma fifo has not been transfered to
2446 * the target -> add the amount to the rest
2447 * and clear the data.
2448 * Check the sstat2 register in case of wide transfer.
2449 */
2450 rest += delta;
2451 ss0 = INB(np, nc_sstat0);
2452 if (ss0 & OLF) rest++;
2453 if (!(np->features & FE_C10))
2454 if (ss0 & ORF) rest++;
2455 if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
2456 ss2 = INB(np, nc_sstat2);
2457 if (ss2 & OLF1) rest++;
2458 if (!(np->features & FE_C10))
2459 if (ss2 & ORF1) rest++;
2460 }
2461
2462 /*
2463 * Clear fifos.
2464 */
2465 OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
2466 OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */
2467 }
2468
2469 /*
2470 * log the information
2471 */
2472 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
2473 printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7,
2474 (unsigned) rest, (unsigned) delta);
2475
2476 /*
2477 * try to find the interrupted script command,
2478 * and the address at which to continue.
2479 */
2480 vdsp = NULL;
2481 nxtdsp = 0;
2482 if (dsp > np->scripta_ba &&
2483 dsp <= np->scripta_ba + np->scripta_sz) {
2484 vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8));
2485 nxtdsp = dsp;
2486 }
2487 else if (dsp > np->scriptb_ba &&
2488 dsp <= np->scriptb_ba + np->scriptb_sz) {
2489 vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8));
2490 nxtdsp = dsp;
2491 }
2492
2493 /*
2494 * log the information
2495 */
2496 if (DEBUG_FLAGS & DEBUG_PHASE) {
2497 printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
2498 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
2499 }
2500
2501 if (!vdsp) {
2502 printf ("%s: interrupted SCRIPT address not found.\n",
2503 sym_name (np));
2504 goto reset_all;
2505 }
2506
2507 if (!cp) {
2508 printf ("%s: SCSI phase error fixup: CCB already dequeued.\n",
2509 sym_name (np));
2510 goto reset_all;
2511 }
2512
2513 /*
2514 * get old startaddress and old length.
2515 */
2516 oadr = scr_to_cpu(vdsp[1]);
2517
2518 if (cmd & 0x10) { /* Table indirect */
2519 tblp = (u32 *) ((char*) &cp->phys + oadr);
2520 olen = scr_to_cpu(tblp[0]);
2521 oadr = scr_to_cpu(tblp[1]);
2522 } else {
2523 tblp = (u32 *) 0;
2524 olen = scr_to_cpu(vdsp[0]) & 0xffffff;
2525 }
2526
2527 if (DEBUG_FLAGS & DEBUG_PHASE) {
2528 printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
2529 (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
2530 tblp,
2531 (unsigned) olen,
2532 (unsigned) oadr);
2533 }
2534
2535 /*
2536 * check cmd against assumed interrupted script command.
2537 * If dt data phase, the MOVE instruction hasn't bit 4 of
2538 * the phase.
2539 */
2540 if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
2541 sym_print_addr(cp->cmd,
2542 "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
2543 cmd, scr_to_cpu(vdsp[0]) >> 24);
2544
2545 goto reset_all;
2546 }
2547
2548 /*
2549 * if old phase not dataphase, leave here.
2550 */
2551 if (cmd & 2) {
2552 sym_print_addr(cp->cmd,
2553 "phase change %x-%x %d@%08x resid=%d.\n",
2554 cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen,
2555 (unsigned)oadr, (unsigned)rest);
2556 goto unexpected_phase;
2557 }
2558
2559 /*
2560 * Choose the correct PM save area.
2561 *
2562 * Look at the PM_SAVE SCRIPT if you want to understand
2563 * this stuff. The equivalent code is implemented in
2564 * SCRIPTS for the 895A, 896 and 1010 that are able to
2565 * handle PM from the SCRIPTS processor.
2566 */
2567 hflags0 = INB(np, HF_PRT);
2568 hflags = hflags0;
2569
2570 if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
2571 if (hflags & HF_IN_PM0)
2572 nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
2573 else if (hflags & HF_IN_PM1)
2574 nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
2575
2576 if (hflags & HF_DP_SAVED)
2577 hflags ^= HF_ACT_PM;
2578 }
2579
2580 if (!(hflags & HF_ACT_PM)) {
2581 pm = &cp->phys.pm0;
2582 newcmd = SCRIPTA_BA(np, pm0_data);
2583 }
2584 else {
2585 pm = &cp->phys.pm1;
2586 newcmd = SCRIPTA_BA(np, pm1_data);
2587 }
2588
2589 hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
2590 if (hflags != hflags0)
2591 OUTB(np, HF_PRT, hflags);
2592
2593 /*
2594 * fillin the phase mismatch context
2595 */
2596 pm->sg.addr = cpu_to_scr(oadr + olen - rest);
2597 pm->sg.size = cpu_to_scr(rest);
2598 pm->ret = cpu_to_scr(nxtdsp);
2599
2600 /*
2601 * If we have a SWIDE,
2602 * - prepare the address to write the SWIDE from SCRIPTS,
2603 * - compute the SCRIPTS address to restart from,
2604 * - move current data pointer context by one byte.
2605 */
2606 nxtdsp = SCRIPTA_BA(np, dispatch);
2607 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
2608 (INB(np, nc_scntl2) & WSR)) {
2609 u32 tmp;
2610
2611 /*
2612 * Set up the table indirect for the MOVE
2613 * of the residual byte and adjust the data
2614 * pointer context.
2615 */
2616 tmp = scr_to_cpu(pm->sg.addr);
2617 cp->phys.wresid.addr = cpu_to_scr(tmp);
2618 pm->sg.addr = cpu_to_scr(tmp + 1);
2619 tmp = scr_to_cpu(pm->sg.size);
2620 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
2621 pm->sg.size = cpu_to_scr(tmp - 1);
2622
2623 /*
2624 * If only the residual byte is to be moved,
2625 * no PM context is needed.
2626 */
2627 if ((tmp&0xffffff) == 1)
2628 newcmd = pm->ret;
2629
2630 /*
2631 * Prepare the address of SCRIPTS that will
2632 * move the residual byte to memory.
2633 */
2634 nxtdsp = SCRIPTB_BA(np, wsr_ma_helper);
2635 }
2636
2637 if (DEBUG_FLAGS & DEBUG_PHASE) {
2638 sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n",
2639 hflags0, hflags, newcmd,
2640 (unsigned)scr_to_cpu(pm->sg.addr),
2641 (unsigned)scr_to_cpu(pm->sg.size),
2642 (unsigned)scr_to_cpu(pm->ret));
2643 }
2644
2645 /*
2646 * Restart the SCRIPTS processor.
2647 */
2648 sym_set_script_dp (np, cp, newcmd);
2649 OUTL_DSP(np, nxtdsp);
2650 return;
2651
2652 /*
2653 * Unexpected phase changes that occurs when the current phase
2654 * is not a DATA IN or DATA OUT phase are due to error conditions.
2655 * Such event may only happen when the SCRIPTS is using a
2656 * multibyte SCSI MOVE.
2657 *
2658 * Phase change Some possible cause
2659 *
2660 * COMMAND --> MSG IN SCSI parity error detected by target.
2661 * COMMAND --> STATUS Bad command or refused by target.
2662 * MSG OUT --> MSG IN Message rejected by target.
2663 * MSG OUT --> COMMAND Bogus target that discards extended
2664 * negotiation messages.
2665 *
2666 * The code below does not care of the new phase and so
2667 * trusts the target. Why to annoy it ?
2668 * If the interrupted phase is COMMAND phase, we restart at
2669 * dispatcher.
2670 * If a target does not get all the messages after selection,
2671 * the code assumes blindly that the target discards extended
2672 * messages and clears the negotiation status.
2673 * If the target does not want all our response to negotiation,
2674 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
2675 * bloat for such a should_not_happen situation).
2676 * In all other situation, we reset the BUS.
2677 * Are these assumptions reasonnable ? (Wait and see ...)
2678 */
2679unexpected_phase:
2680 dsp -= 8;
2681 nxtdsp = 0;
2682
2683 switch (cmd & 7) {
2684 case 2: /* COMMAND phase */
2685 nxtdsp = SCRIPTA_BA(np, dispatch);
2686 break;
2687#if 0
2688 case 3: /* STATUS phase */
2689 nxtdsp = SCRIPTA_BA(np, dispatch);
2690 break;
2691#endif
2692 case 6: /* MSG OUT phase */
2693 /*
2694 * If the device may want to use untagged when we want
2695 * tagged, we prepare an IDENTIFY without disc. granted,
2696 * since we will not be able to handle reselect.
2697 * Otherwise, we just don't care.
2698 */
2699 if (dsp == SCRIPTA_BA(np, send_ident)) {
2700 if (cp->tag != NO_TAG && olen - rest <= 3) {
2701 cp->host_status = HS_BUSY;
2702 np->msgout[0] = IDENTIFY(0, cp->lun);
2703 nxtdsp = SCRIPTB_BA(np, ident_break_atn);
2704 }
2705 else
2706 nxtdsp = SCRIPTB_BA(np, ident_break);
2707 }
2708 else if (dsp == SCRIPTB_BA(np, send_wdtr) ||
2709 dsp == SCRIPTB_BA(np, send_sdtr) ||
2710 dsp == SCRIPTB_BA(np, send_ppr)) {
2711 nxtdsp = SCRIPTB_BA(np, nego_bad_phase);
2712 if (dsp == SCRIPTB_BA(np, send_ppr)) {
2713 struct scsi_device *dev = cp->cmd->device;
2714 dev->ppr = 0;
2715 }
2716 }
2717 break;
2718#if 0
2719 case 7: /* MSG IN phase */
2720 nxtdsp = SCRIPTA_BA(np, clrack);
2721 break;
2722#endif
2723 }
2724
2725 if (nxtdsp) {
2726 OUTL_DSP(np, nxtdsp);
2727 return;
2728 }
2729
2730reset_all:
2731 sym_start_reset(np);
2732}
2733
2734/*
2735 * chip interrupt handler
2736 *
2737 * In normal situations, interrupt conditions occur one at
2738 * a time. But when something bad happens on the SCSI BUS,
2739 * the chip may raise several interrupt flags before
2740 * stopping and interrupting the CPU. The additionnal
2741 * interrupt flags are stacked in some extra registers
2742 * after the SIP and/or DIP flag has been raised in the
2743 * ISTAT. After the CPU has read the interrupt condition
2744 * flag from SIST or DSTAT, the chip unstacks the other
2745 * interrupt flags and sets the corresponding bits in
2746 * SIST or DSTAT. Since the chip starts stacking once the
2747 * SIP or DIP flag is set, there is a small window of time
2748 * where the stacking does not occur.
2749 *
2750 * Typically, multiple interrupt conditions may happen in
2751 * the following situations:
2752 *
2753 * - SCSI parity error + Phase mismatch (PAR|MA)
2754 * When an parity error is detected in input phase
2755 * and the device switches to msg-in phase inside a
2756 * block MOV.
2757 * - SCSI parity error + Unexpected disconnect (PAR|UDC)
2758 * When a stupid device does not want to handle the
2759 * recovery of an SCSI parity error.
2760 * - Some combinations of STO, PAR, UDC, ...
2761 * When using non compliant SCSI stuff, when user is
2762 * doing non compliant hot tampering on the BUS, when
2763 * something really bad happens to a device, etc ...
2764 *
2765 * The heuristic suggested by SYMBIOS to handle
2766 * multiple interrupts is to try unstacking all
2767 * interrupts conditions and to handle them on some
2768 * priority based on error severity.
2769 * This will work when the unstacking has been
2770 * successful, but we cannot be 100 % sure of that,
2771 * since the CPU may have been faster to unstack than
2772 * the chip is able to stack. Hmmm ... But it seems that
2773 * such a situation is very unlikely to happen.
2774 *
2775 * If this happen, for example STO caught by the CPU
2776 * then UDC happenning before the CPU have restarted
2777 * the SCRIPTS, the driver may wrongly complete the
2778 * same command on UDC, since the SCRIPTS didn't restart
2779 * and the DSA still points to the same command.
2780 * We avoid this situation by setting the DSA to an
2781 * invalid value when the CCB is completed and before
2782 * restarting the SCRIPTS.
2783 *
2784 * Another issue is that we need some section of our
2785 * recovery procedures to be somehow uninterruptible but
2786 * the SCRIPTS processor does not provides such a
2787 * feature. For this reason, we handle recovery preferently
2788 * from the C code and check against some SCRIPTS critical
2789 * sections from the C code.
2790 *
2791 * Hopefully, the interrupt handling of the driver is now
2792 * able to resist to weird BUS error conditions, but donnot
2793 * ask me for any guarantee that it will never fail. :-)
2794 * Use at your own decision and risk.
2795 */
2796
2797void sym_interrupt (struct sym_hcb *np)
2798{
2799 u_char istat, istatc;
2800 u_char dstat;
2801 u_short sist;
2802
2803 /*
2804 * interrupt on the fly ?
2805 * (SCRIPTS may still be running)
2806 *
2807 * A `dummy read' is needed to ensure that the
2808 * clear of the INTF flag reaches the device
2809 * and that posted writes are flushed to memory
2810 * before the scanning of the DONE queue.
2811 * Note that SCRIPTS also (dummy) read to memory
2812 * prior to deliver the INTF interrupt condition.
2813 */
2814 istat = INB(np, nc_istat);
2815 if (istat & INTF) {
2816 OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem);
2817 istat = INB(np, nc_istat); /* DUMMY READ */
2818 if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
2819 sym_wakeup_done(np);
2820 }
2821
2822 if (!(istat & (SIP|DIP)))
2823 return;
2824
2825#if 0 /* We should never get this one */
2826 if (istat & CABRT)
2827 OUTB(np, nc_istat, CABRT);
2828#endif
2829
2830 /*
2831 * PAR and MA interrupts may occur at the same time,
2832 * and we need to know of both in order to handle
2833 * this situation properly. We try to unstack SCSI
2834 * interrupts for that reason. BTW, I dislike a LOT
2835 * such a loop inside the interrupt routine.
2836 * Even if DMA interrupt stacking is very unlikely to
2837 * happen, we also try unstacking these ones, since
2838 * this has no performance impact.
2839 */
2840 sist = 0;
2841 dstat = 0;
2842 istatc = istat;
2843 do {
2844 if (istatc & SIP)
2845 sist |= INW(np, nc_sist);
2846 if (istatc & DIP)
2847 dstat |= INB(np, nc_dstat);
2848 istatc = INB(np, nc_istat);
2849 istat |= istatc;
2850 } while (istatc & (SIP|DIP));
2851
2852 if (DEBUG_FLAGS & DEBUG_TINY)
2853 printf ("<%d|%x:%x|%x:%x>",
2854 (int)INB(np, nc_scr0),
2855 dstat,sist,
2856 (unsigned)INL(np, nc_dsp),
2857 (unsigned)INL(np, nc_dbc));
2858 /*
2859 * On paper, a memory read barrier may be needed here to
2860 * prevent out of order LOADs by the CPU from having
2861 * prefetched stale data prior to DMA having occurred.
2862 * And since we are paranoid ... :)
2863 */
2864 MEMORY_READ_BARRIER();
2865
2866 /*
2867 * First, interrupts we want to service cleanly.
2868 *
2869 * Phase mismatch (MA) is the most frequent interrupt
2870 * for chip earlier than the 896 and so we have to service
2871 * it as quickly as possible.
2872 * A SCSI parity error (PAR) may be combined with a phase
2873 * mismatch condition (MA).
2874 * Programmed interrupts (SIR) are used to call the C code
2875 * from SCRIPTS.
2876 * The single step interrupt (SSI) is not used in this
2877 * driver.
2878 */
2879 if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
2880 !(dstat & (MDPE|BF|ABRT|IID))) {
2881 if (sist & PAR) sym_int_par (np, sist);
2882 else if (sist & MA) sym_int_ma (np);
2883 else if (dstat & SIR) sym_int_sir (np);
2884 else if (dstat & SSI) OUTONB_STD();
2885 else goto unknown_int;
2886 return;
2887 }
2888
2889 /*
2890 * Now, interrupts that donnot happen in normal
2891 * situations and that we may need to recover from.
2892 *
2893 * On SCSI RESET (RST), we reset everything.
2894 * On SCSI BUS MODE CHANGE (SBMC), we complete all
2895 * active CCBs with RESET status, prepare all devices
2896 * for negotiating again and restart the SCRIPTS.
2897 * On STO and UDC, we complete the CCB with the corres-
2898 * ponding status and restart the SCRIPTS.
2899 */
2900 if (sist & RST) {
2901 printf("%s: SCSI BUS reset detected.\n", sym_name(np));
2902 sym_start_up (np, 1);
2903 return;
2904 }
2905
2906 OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
2907 OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */
2908
2909 if (!(sist & (GEN|HTH|SGE)) &&
2910 !(dstat & (MDPE|BF|ABRT|IID))) {
2911 if (sist & SBMC) sym_int_sbmc (np);
2912 else if (sist & STO) sym_int_sto (np);
2913 else if (sist & UDC) sym_int_udc (np);
2914 else goto unknown_int;
2915 return;
2916 }
2917
2918 /*
2919 * Now, interrupts we are not able to recover cleanly.
2920 *
2921 * Log message for hard errors.
2922 * Reset everything.
2923 */
2924
2925 sym_log_hard_error(np, sist, dstat);
2926
2927 if ((sist & (GEN|HTH|SGE)) ||
2928 (dstat & (MDPE|BF|ABRT|IID))) {
2929 sym_start_reset(np);
2930 return;
2931 }
2932
2933unknown_int:
2934 /*
2935 * We just miss the cause of the interrupt. :(
2936 * Print a message. The timeout will do the real work.
2937 */
2938 printf( "%s: unknown interrupt(s) ignored, "
2939 "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
2940 sym_name(np), istat, dstat, sist);
2941}
2942
2943/*
2944 * Dequeue from the START queue all CCBs that match
2945 * a given target/lun/task condition (-1 means all),
2946 * and move them from the BUSY queue to the COMP queue
2947 * with CAM_REQUEUE_REQ status condition.
2948 * This function is used during error handling/recovery.
2949 * It is called with SCRIPTS not running.
2950 */
2951static int
2952sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task)
2953{
2954 int j;
2955 struct sym_ccb *cp;
2956
2957 /*
2958 * Make sure the starting index is within range.
2959 */
2960 assert((i >= 0) && (i < 2*MAX_QUEUE));
2961
2962 /*
2963 * Walk until end of START queue and dequeue every job
2964 * that matches the target/lun/task condition.
2965 */
2966 j = i;
2967 while (i != np->squeueput) {
2968 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
2969 assert(cp);
2970#ifdef SYM_CONF_IARB_SUPPORT
2971 /* Forget hints for IARB, they may be no longer relevant */
2972 cp->host_flags &= ~HF_HINT_IARB;
2973#endif
2974 if ((target == -1 || cp->target == target) &&
2975 (lun == -1 || cp->lun == lun) &&
2976 (task == -1 || cp->tag == task)) {
2977 sym_set_cam_status(cp->cmd, CAM_REQUEUE_REQ);
2978 sym_remque(&cp->link_ccbq);
2979 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
2980 }
2981 else {
2982 if (i != j)
2983 np->squeue[j] = np->squeue[i];
2984 if ((j += 2) >= MAX_QUEUE*2) j = 0;
2985 }
2986 if ((i += 2) >= MAX_QUEUE*2) i = 0;
2987 }
2988 if (i != j) /* Copy back the idle task if needed */
2989 np->squeue[j] = np->squeue[i];
2990 np->squeueput = j; /* Update our current start queue pointer */
2991
2992 return (i - j) / 2;
2993}
2994
2995/*
2996 * chip handler for bad SCSI status condition
2997 *
2998 * In case of bad SCSI status, we unqueue all the tasks
2999 * currently queued to the controller but not yet started
3000 * and then restart the SCRIPTS processor immediately.
3001 *
3002 * QUEUE FULL and BUSY conditions are handled the same way.
3003 * Basically all the not yet started tasks are requeued in
3004 * device queue and the queue is frozen until a completion.
3005 *
3006 * For CHECK CONDITION and COMMAND TERMINATED status, we use
3007 * the CCB of the failed command to prepare a REQUEST SENSE
3008 * SCSI command and queue it to the controller queue.
3009 *
3010 * SCRATCHA is assumed to have been loaded with STARTPOS
3011 * before the SCRIPTS called the C code.
3012 */
3013static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp)
3014{
3015 u32 startp;
3016 u_char s_status = cp->ssss_status;
3017 u_char h_flags = cp->host_flags;
3018 int msglen;
3019 int i;
3020
3021 /*
3022 * Compute the index of the next job to start from SCRIPTS.
3023 */
3024 i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
3025
3026 /*
3027 * The last CCB queued used for IARB hint may be
3028 * no longer relevant. Forget it.
3029 */
3030#ifdef SYM_CONF_IARB_SUPPORT
3031 if (np->last_cp)
3032 np->last_cp = 0;
3033#endif
3034
3035 /*
3036 * Now deal with the SCSI status.
3037 */
3038 switch(s_status) {
3039 case S_BUSY:
3040 case S_QUEUE_FULL:
3041 if (sym_verbose >= 2) {
3042 sym_print_addr(cp->cmd, "%s\n",
3043 s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
3044 }
3045 default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
3046 sym_complete_error (np, cp);
3047 break;
3048 case S_TERMINATED:
3049 case S_CHECK_COND:
3050 /*
3051 * If we get an SCSI error when requesting sense, give up.
3052 */
3053 if (h_flags & HF_SENSE) {
3054 sym_complete_error (np, cp);
3055 break;
3056 }
3057
3058 /*
3059 * Dequeue all queued CCBs for that device not yet started,
3060 * and restart the SCRIPTS processor immediately.
3061 */
3062 sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
3063 OUTL_DSP(np, SCRIPTA_BA(np, start));
3064
3065 /*
3066 * Save some info of the actual IO.
3067 * Compute the data residual.
3068 */
3069 cp->sv_scsi_status = cp->ssss_status;
3070 cp->sv_xerr_status = cp->xerr_status;
3071 cp->sv_resid = sym_compute_residual(np, cp);
3072
3073 /*
3074 * Prepare all needed data structures for
3075 * requesting sense data.
3076 */
3077
3078 cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun);
3079 msglen = 1;
3080
3081 /*
3082 * If we are currently using anything different from
3083 * async. 8 bit data transfers with that target,
3084 * start a negotiation, since the device may want
3085 * to report us a UNIT ATTENTION condition due to
3086 * a cause we currently ignore, and we donnot want
3087 * to be stuck with WIDE and/or SYNC data transfer.
3088 *
3089 * cp->nego_status is filled by sym_prepare_nego().
3090 */
3091 cp->nego_status = 0;
3092 msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]);
3093 /*
3094 * Message table indirect structure.
3095 */
3096 cp->phys.smsg.addr = cpu_to_scr(CCB_BA(cp, scsi_smsg2));
3097 cp->phys.smsg.size = cpu_to_scr(msglen);
3098
3099 /*
3100 * sense command
3101 */
3102 cp->phys.cmd.addr = cpu_to_scr(CCB_BA(cp, sensecmd));
3103 cp->phys.cmd.size = cpu_to_scr(6);
3104
3105 /*
3106 * patch requested size into sense command
3107 */
3108 cp->sensecmd[0] = REQUEST_SENSE;
3109 cp->sensecmd[1] = 0;
3110 if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7)
3111 cp->sensecmd[1] = cp->lun << 5;
3112 cp->sensecmd[4] = SYM_SNS_BBUF_LEN;
3113 cp->data_len = SYM_SNS_BBUF_LEN;
3114
3115 /*
3116 * sense data
3117 */
3118 memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN);
3119 cp->phys.sense.addr = cpu_to_scr(CCB_BA(cp, sns_bbuf));
3120 cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN);
3121
3122 /*
3123 * requeue the command.
3124 */
3125 startp = SCRIPTB_BA(np, sdata_in);
3126
3127 cp->phys.head.savep = cpu_to_scr(startp);
3128 cp->phys.head.lastp = cpu_to_scr(startp);
3129 cp->startp = cpu_to_scr(startp);
3130 cp->goalp = cpu_to_scr(startp + 16);
3131
3132 cp->host_xflags = 0;
3133 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
3134 cp->ssss_status = S_ILLEGAL;
3135 cp->host_flags = (HF_SENSE|HF_DATA_IN);
3136 cp->xerr_status = 0;
3137 cp->extra_bytes = 0;
3138
3139 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select));
3140
3141 /*
3142 * Requeue the command.
3143 */
3144 sym_put_start_queue(np, cp);
3145
3146 /*
3147 * Give back to upper layer everything we have dequeued.
3148 */
3149 sym_flush_comp_queue(np, 0);
3150 break;
3151 }
3152}
3153
3154/*
3155 * After a device has accepted some management message
3156 * as BUS DEVICE RESET, ABORT TASK, etc ..., or when
3157 * a device signals a UNIT ATTENTION condition, some
3158 * tasks are thrown away by the device. We are required
3159 * to reflect that on our tasks list since the device
3160 * will never complete these tasks.
3161 *
3162 * This function move from the BUSY queue to the COMP
3163 * queue all disconnected CCBs for a given target that
3164 * match the following criteria:
3165 * - lun=-1 means any logical UNIT otherwise a given one.
3166 * - task=-1 means any task, otherwise a given one.
3167 */
3168int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task)
3169{
3170 SYM_QUEHEAD qtmp, *qp;
3171 int i = 0;
3172 struct sym_ccb *cp;
3173
3174 /*
3175 * Move the entire BUSY queue to our temporary queue.
3176 */
3177 sym_que_init(&qtmp);
3178 sym_que_splice(&np->busy_ccbq, &qtmp);
3179 sym_que_init(&np->busy_ccbq);
3180
3181 /*
3182 * Put all CCBs that matches our criteria into
3183 * the COMP queue and put back other ones into
3184 * the BUSY queue.
3185 */
3186 while ((qp = sym_remque_head(&qtmp)) != 0) {
3187 struct scsi_cmnd *cmd;
3188 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
3189 cmd = cp->cmd;
3190 if (cp->host_status != HS_DISCONNECT ||
3191 cp->target != target ||
3192 (lun != -1 && cp->lun != lun) ||
3193 (task != -1 &&
3194 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
3195 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
3196 continue;
3197 }
3198 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
3199
3200 /* Preserve the software timeout condition */
3201 if (sym_get_cam_status(cmd) != CAM_CMD_TIMEOUT)
3202 sym_set_cam_status(cmd, cam_status);
3203 ++i;
3204#if 0
3205printf("XXXX TASK @%p CLEARED\n", cp);
3206#endif
3207 }
3208 return i;
3209}
3210
3211/*
3212 * chip handler for TASKS recovery
3213 *
3214 * We cannot safely abort a command, while the SCRIPTS
3215 * processor is running, since we just would be in race
3216 * with it.
3217 *
3218 * As long as we have tasks to abort, we keep the SEM
3219 * bit set in the ISTAT. When this bit is set, the
3220 * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
3221 * each time it enters the scheduler.
3222 *
3223 * If we have to reset a target, clear tasks of a unit,
3224 * or to perform the abort of a disconnected job, we
3225 * restart the SCRIPTS for selecting the target. Once
3226 * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
3227 * If it loses arbitration, the SCRIPTS will interrupt again
3228 * the next time it will enter its scheduler, and so on ...
3229 *
3230 * On SIR_TARGET_SELECTED, we scan for the more
3231 * appropriate thing to do:
3232 *
3233 * - If nothing, we just sent a M_ABORT message to the
3234 * target to get rid of the useless SCSI bus ownership.
3235 * According to the specs, no tasks shall be affected.
3236 * - If the target is to be reset, we send it a M_RESET
3237 * message.
3238 * - If a logical UNIT is to be cleared , we send the
3239 * IDENTIFY(lun) + M_ABORT.
3240 * - If an untagged task is to be aborted, we send the
3241 * IDENTIFY(lun) + M_ABORT.
3242 * - If a tagged task is to be aborted, we send the
3243 * IDENTIFY(lun) + task attributes + M_ABORT_TAG.
3244 *
3245 * Once our 'kiss of death' :) message has been accepted
3246 * by the target, the SCRIPTS interrupts again
3247 * (SIR_ABORT_SENT). On this interrupt, we complete
3248 * all the CCBs that should have been aborted by the
3249 * target according to our message.
3250 */
3251static void sym_sir_task_recovery(struct sym_hcb *np, int num)
3252{
3253 SYM_QUEHEAD *qp;
3254 struct sym_ccb *cp;
3255 struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */
3256 struct scsi_target *starget;
3257 int target=-1, lun=-1, task;
3258 int i, k;
3259
3260 switch(num) {
3261 /*
3262 * The SCRIPTS processor stopped before starting
3263 * the next command in order to allow us to perform
3264 * some task recovery.
3265 */
3266 case SIR_SCRIPT_STOPPED:
3267 /*
3268 * Do we have any target to reset or unit to clear ?
3269 */
3270 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
3271 tp = &np->target[i];
3272 if (tp->to_reset ||
3273 (tp->lun0p && tp->lun0p->to_clear)) {
3274 target = i;
3275 break;
3276 }
3277 if (!tp->lunmp)
3278 continue;
3279 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
3280 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
3281 target = i;
3282 break;
3283 }
3284 }
3285 if (target != -1)
3286 break;
3287 }
3288
3289 /*
3290 * If not, walk the busy queue for any
3291 * disconnected CCB to be aborted.
3292 */
3293 if (target == -1) {
3294 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
3295 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
3296 if (cp->host_status != HS_DISCONNECT)
3297 continue;
3298 if (cp->to_abort) {
3299 target = cp->target;
3300 break;
3301 }
3302 }
3303 }
3304
3305 /*
3306 * If some target is to be selected,
3307 * prepare and start the selection.
3308 */
3309 if (target != -1) {
3310 tp = &np->target[target];
3311 np->abrt_sel.sel_id = target;
3312 np->abrt_sel.sel_scntl3 = tp->head.wval;
3313 np->abrt_sel.sel_sxfer = tp->head.sval;
3314 OUTL(np, nc_dsa, np->hcb_ba);
3315 OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort));
3316 return;
3317 }
3318
3319 /*
3320 * Now look for a CCB to abort that haven't started yet.
3321 * Btw, the SCRIPTS processor is still stopped, so
3322 * we are not in race.
3323 */
3324 i = 0;
3325 cp = NULL;
3326 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
3327 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
3328 if (cp->host_status != HS_BUSY &&
3329 cp->host_status != HS_NEGOTIATE)
3330 continue;
3331 if (!cp->to_abort)
3332 continue;
3333#ifdef SYM_CONF_IARB_SUPPORT
3334 /*
3335 * If we are using IMMEDIATE ARBITRATION, we donnot
3336 * want to cancel the last queued CCB, since the
3337 * SCRIPTS may have anticipated the selection.
3338 */
3339 if (cp == np->last_cp) {
3340 cp->to_abort = 0;
3341 continue;
3342 }
3343#endif
3344 i = 1; /* Means we have found some */
3345 break;
3346 }
3347 if (!i) {
3348 /*
3349 * We are done, so we donnot need
3350 * to synchronize with the SCRIPTS anylonger.
3351 * Remove the SEM flag from the ISTAT.
3352 */
3353 np->istat_sem = 0;
3354 OUTB(np, nc_istat, SIGP);
3355 break;
3356 }
3357 /*
3358 * Compute index of next position in the start
3359 * queue the SCRIPTS intends to start and dequeue
3360 * all CCBs for that device that haven't been started.
3361 */
3362 i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
3363 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
3364
3365 /*
3366 * Make sure at least our IO to abort has been dequeued.
3367 */
3368#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
3369 assert(i && sym_get_cam_status(cp->cmd) == CAM_REQUEUE_REQ);
3370#else
3371 sym_remque(&cp->link_ccbq);
3372 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
3373#endif
3374 /*
3375 * Keep track in cam status of the reason of the abort.
3376 */
3377 if (cp->to_abort == 2)
3378 sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT);
3379 else
3380 sym_set_cam_status(cp->cmd, CAM_REQ_ABORTED);
3381
3382 /*
3383 * Complete with error everything that we have dequeued.
3384 */
3385 sym_flush_comp_queue(np, 0);
3386 break;
3387 /*
3388 * The SCRIPTS processor has selected a target
3389 * we may have some manual recovery to perform for.
3390 */
3391 case SIR_TARGET_SELECTED:
3392 target = INB(np, nc_sdid) & 0xf;
3393 tp = &np->target[target];
3394
3395 np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
3396
3397 /*
3398 * If the target is to be reset, prepare a
3399 * M_RESET message and clear the to_reset flag
3400 * since we donnot expect this operation to fail.
3401 */
3402 if (tp->to_reset) {
3403 np->abrt_msg[0] = M_RESET;
3404 np->abrt_tbl.size = 1;
3405 tp->to_reset = 0;
3406 break;
3407 }
3408
3409 /*
3410 * Otherwise, look for some logical unit to be cleared.
3411 */
3412 if (tp->lun0p && tp->lun0p->to_clear)
3413 lun = 0;
3414 else if (tp->lunmp) {
3415 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
3416 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
3417 lun = k;
3418 break;
3419 }
3420 }
3421 }
3422
3423 /*
3424 * If a logical unit is to be cleared, prepare
3425 * an IDENTIFY(lun) + ABORT MESSAGE.
3426 */
3427 if (lun != -1) {
3428 struct sym_lcb *lp = sym_lp(tp, lun);
3429 lp->to_clear = 0; /* We don't expect to fail here */
3430 np->abrt_msg[0] = IDENTIFY(0, lun);
3431 np->abrt_msg[1] = M_ABORT;
3432 np->abrt_tbl.size = 2;
3433 break;
3434 }
3435
3436 /*
3437 * Otherwise, look for some disconnected job to
3438 * abort for this target.
3439 */
3440 i = 0;
3441 cp = NULL;
3442 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
3443 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
3444 if (cp->host_status != HS_DISCONNECT)
3445 continue;
3446 if (cp->target != target)
3447 continue;
3448 if (!cp->to_abort)
3449 continue;
3450 i = 1; /* Means we have some */
3451 break;
3452 }
3453
3454 /*
3455 * If we have none, probably since the device has
3456 * completed the command before we won abitration,
3457 * send a M_ABORT message without IDENTIFY.
3458 * According to the specs, the device must just
3459 * disconnect the BUS and not abort any task.
3460 */
3461 if (!i) {
3462 np->abrt_msg[0] = M_ABORT;
3463 np->abrt_tbl.size = 1;
3464 break;
3465 }
3466
3467 /*
3468 * We have some task to abort.
3469 * Set the IDENTIFY(lun)
3470 */
3471 np->abrt_msg[0] = IDENTIFY(0, cp->lun);
3472
3473 /*
3474 * If we want to abort an untagged command, we
3475 * will send a IDENTIFY + M_ABORT.
3476 * Otherwise (tagged command), we will send
3477 * a IDENTITFY + task attributes + ABORT TAG.
3478 */
3479 if (cp->tag == NO_TAG) {
3480 np->abrt_msg[1] = M_ABORT;
3481 np->abrt_tbl.size = 2;
3482 } else {
3483 np->abrt_msg[1] = cp->scsi_smsg[1];
3484 np->abrt_msg[2] = cp->scsi_smsg[2];
3485 np->abrt_msg[3] = M_ABORT_TAG;
3486 np->abrt_tbl.size = 4;
3487 }
3488 /*
3489 * Keep track of software timeout condition, since the
3490 * peripheral driver may not count retries on abort
3491 * conditions not due to timeout.
3492 */
3493 if (cp->to_abort == 2)
3494 sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT);
3495 cp->to_abort = 0; /* We donnot expect to fail here */
3496 break;
3497
3498 /*
3499 * The target has accepted our message and switched
3500 * to BUS FREE phase as we expected.
3501 */
3502 case SIR_ABORT_SENT:
3503 target = INB(np, nc_sdid) & 0xf;
3504 tp = &np->target[target];
3505 starget = tp->sdev->sdev_target;
3506
3507 /*
3508 ** If we didn't abort anything, leave here.
3509 */
3510 if (np->abrt_msg[0] == M_ABORT)
3511 break;
3512
3513 /*
3514 * If we sent a M_RESET, then a hardware reset has
3515 * been performed by the target.
3516 * - Reset everything to async 8 bit
3517 * - Tell ourself to negotiate next time :-)
3518 * - Prepare to clear all disconnected CCBs for
3519 * this target from our task list (lun=task=-1)
3520 */
3521 lun = -1;
3522 task = -1;
3523 if (np->abrt_msg[0] == M_RESET) {
3524 tp->head.sval = 0;
3525 tp->head.wval = np->rv_scntl3;
3526 tp->head.uval = 0;
3527 spi_period(starget) = 0;
3528 spi_offset(starget) = 0;
3529 spi_width(starget) = 0;
3530 spi_iu(starget) = 0;
3531 spi_dt(starget) = 0;
3532 spi_qas(starget) = 0;
3533 tp->tgoal.check_nego = 1;
3534 }
3535
3536 /*
3537 * Otherwise, check for the LUN and TASK(s)
3538 * concerned by the cancelation.
3539 * If it is not ABORT_TAG then it is CLEAR_QUEUE
3540 * or an ABORT message :-)
3541 */
3542 else {
3543 lun = np->abrt_msg[0] & 0x3f;
3544 if (np->abrt_msg[1] == M_ABORT_TAG)
3545 task = np->abrt_msg[2];
3546 }
3547
3548 /*
3549 * Complete all the CCBs the device should have
3550 * aborted due to our 'kiss of death' message.
3551 */
3552 i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
3553 sym_dequeue_from_squeue(np, i, target, lun, -1);
3554 sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task);
3555 sym_flush_comp_queue(np, 0);
3556
3557 /*
3558 * If we sent a BDR, make upper layer aware of that.
3559 */
3560 if (np->abrt_msg[0] == M_RESET)
3561 sym_xpt_async_sent_bdr(np, target);
3562 break;
3563 }
3564
3565 /*
3566 * Print to the log the message we intend to send.
3567 */
3568 if (num == SIR_TARGET_SELECTED) {
3569 dev_info(&tp->sdev->sdev_target->dev, "control msgout:");
3570 sym_printl_hex(np->abrt_msg, np->abrt_tbl.size);
3571 np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
3572 }
3573
3574 /*
3575 * Let the SCRIPTS processor continue.
3576 */
3577 OUTONB_STD();
3578}
3579
3580/*
3581 * Gerard's alchemy:) that deals with with the data
3582 * pointer for both MDP and the residual calculation.
3583 *
3584 * I didn't want to bloat the code by more than 200
3585 * lines for the handling of both MDP and the residual.
3586 * This has been achieved by using a data pointer
3587 * representation consisting in an index in the data
3588 * array (dp_sg) and a negative offset (dp_ofs) that
3589 * have the following meaning:
3590 *
3591 * - dp_sg = SYM_CONF_MAX_SG
3592 * we are at the end of the data script.
3593 * - dp_sg < SYM_CONF_MAX_SG
3594 * dp_sg points to the next entry of the scatter array
3595 * we want to transfer.
3596 * - dp_ofs < 0
3597 * dp_ofs represents the residual of bytes of the
3598 * previous entry scatter entry we will send first.
3599 * - dp_ofs = 0
3600 * no residual to send first.
3601 *
3602 * The function sym_evaluate_dp() accepts an arbitray
3603 * offset (basically from the MDP message) and returns
3604 * the corresponding values of dp_sg and dp_ofs.
3605 */
3606
3607static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs)
3608{
3609 u32 dp_scr;
3610 int dp_ofs, dp_sg, dp_sgmin;
3611 int tmp;
3612 struct sym_pmc *pm;
3613
3614 /*
3615 * Compute the resulted data pointer in term of a script
3616 * address within some DATA script and a signed byte offset.
3617 */
3618 dp_scr = scr;
3619 dp_ofs = *ofs;
3620 if (dp_scr == SCRIPTA_BA(np, pm0_data))
3621 pm = &cp->phys.pm0;
3622 else if (dp_scr == SCRIPTA_BA(np, pm1_data))
3623 pm = &cp->phys.pm1;
3624 else
3625 pm = NULL;
3626
3627 if (pm) {
3628 dp_scr = scr_to_cpu(pm->ret);
3629 dp_ofs -= scr_to_cpu(pm->sg.size);
3630 }
3631
3632 /*
3633 * If we are auto-sensing, then we are done.
3634 */
3635 if (cp->host_flags & HF_SENSE) {
3636 *ofs = dp_ofs;
3637 return 0;
3638 }
3639
3640 /*
3641 * Deduce the index of the sg entry.
3642 * Keep track of the index of the first valid entry.
3643 * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
3644 * end of the data.
3645 */
3646 tmp = scr_to_cpu(sym_goalp(cp));
3647 dp_sg = SYM_CONF_MAX_SG;
3648 if (dp_scr != tmp)
3649 dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
3650 dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
3651
3652 /*
3653 * Move to the sg entry the data pointer belongs to.
3654 *
3655 * If we are inside the data area, we expect result to be:
3656 *
3657 * Either,
3658 * dp_ofs = 0 and dp_sg is the index of the sg entry
3659 * the data pointer belongs to (or the end of the data)
3660 * Or,
3661 * dp_ofs < 0 and dp_sg is the index of the sg entry
3662 * the data pointer belongs to + 1.
3663 */
3664 if (dp_ofs < 0) {
3665 int n;
3666 while (dp_sg > dp_sgmin) {
3667 --dp_sg;
3668 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
3669 n = dp_ofs + (tmp & 0xffffff);
3670 if (n > 0) {
3671 ++dp_sg;
3672 break;
3673 }
3674 dp_ofs = n;
3675 }
3676 }
3677 else if (dp_ofs > 0) {
3678 while (dp_sg < SYM_CONF_MAX_SG) {
3679 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
3680 dp_ofs -= (tmp & 0xffffff);
3681 ++dp_sg;
3682 if (dp_ofs <= 0)
3683 break;
3684 }
3685 }
3686
3687 /*
3688 * Make sure the data pointer is inside the data area.
3689 * If not, return some error.
3690 */
3691 if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
3692 goto out_err;
3693 else if (dp_sg > SYM_CONF_MAX_SG ||
3694 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
3695 goto out_err;
3696
3697 /*
3698 * Save the extreme pointer if needed.
3699 */
3700 if (dp_sg > cp->ext_sg ||
3701 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
3702 cp->ext_sg = dp_sg;
3703 cp->ext_ofs = dp_ofs;
3704 }
3705
3706 /*
3707 * Return data.
3708 */
3709 *ofs = dp_ofs;
3710 return dp_sg;
3711
3712out_err:
3713 return -1;
3714}
3715
3716/*
3717 * chip handler for MODIFY DATA POINTER MESSAGE
3718 *
3719 * We also call this function on IGNORE WIDE RESIDUE
3720 * messages that do not match a SWIDE full condition.
3721 * Btw, we assume in that situation that such a message
3722 * is equivalent to a MODIFY DATA POINTER (offset=-1).
3723 */
3724
3725static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs)
3726{
3727 int dp_ofs = ofs;
3728 u32 dp_scr = sym_get_script_dp (np, cp);
3729 u32 dp_ret;
3730 u32 tmp;
3731 u_char hflags;
3732 int dp_sg;
3733 struct sym_pmc *pm;
3734
3735 /*
3736 * Not supported for auto-sense.
3737 */
3738 if (cp->host_flags & HF_SENSE)
3739 goto out_reject;
3740
3741 /*
3742 * Apply our alchemy:) (see comments in sym_evaluate_dp()),
3743 * to the resulted data pointer.
3744 */
3745 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
3746 if (dp_sg < 0)
3747 goto out_reject;
3748
3749 /*
3750 * And our alchemy:) allows to easily calculate the data
3751 * script address we want to return for the next data phase.
3752 */
3753 dp_ret = cpu_to_scr(sym_goalp(cp));
3754 dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
3755
3756 /*
3757 * If offset / scatter entry is zero we donnot need
3758 * a context for the new current data pointer.
3759 */
3760 if (dp_ofs == 0) {
3761 dp_scr = dp_ret;
3762 goto out_ok;
3763 }
3764
3765 /*
3766 * Get a context for the new current data pointer.
3767 */
3768 hflags = INB(np, HF_PRT);
3769
3770 if (hflags & HF_DP_SAVED)
3771 hflags ^= HF_ACT_PM;
3772
3773 if (!(hflags & HF_ACT_PM)) {
3774 pm = &cp->phys.pm0;
3775 dp_scr = SCRIPTA_BA(np, pm0_data);
3776 }
3777 else {
3778 pm = &cp->phys.pm1;
3779 dp_scr = SCRIPTA_BA(np, pm1_data);
3780 }
3781
3782 hflags &= ~(HF_DP_SAVED);
3783
3784 OUTB(np, HF_PRT, hflags);
3785
3786 /*
3787 * Set up the new current data pointer.
3788 * ofs < 0 there, and for the next data phase, we
3789 * want to transfer part of the data of the sg entry
3790 * corresponding to index dp_sg-1 prior to returning
3791 * to the main data script.
3792 */
3793 pm->ret = cpu_to_scr(dp_ret);
3794 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
3795 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
3796 pm->sg.addr = cpu_to_scr(tmp);
3797 pm->sg.size = cpu_to_scr(-dp_ofs);
3798
3799out_ok:
3800 sym_set_script_dp (np, cp, dp_scr);
3801 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
3802 return;
3803
3804out_reject:
3805 OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
3806}
3807
3808
3809/*
3810 * chip calculation of the data residual.
3811 *
3812 * As I used to say, the requirement of data residual
3813 * in SCSI is broken, useless and cannot be achieved
3814 * without huge complexity.
3815 * But most OSes and even the official CAM require it.
3816 * When stupidity happens to be so widely spread inside
3817 * a community, it gets hard to convince.
3818 *
3819 * Anyway, I don't care, since I am not going to use
3820 * any software that considers this data residual as
3821 * a relevant information. :)
3822 */
3823
3824int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
3825{
3826 int dp_sg, dp_sgmin, resid = 0;
3827 int dp_ofs = 0;
3828
3829 /*
3830 * Check for some data lost or just thrown away.
3831 * We are not required to be quite accurate in this
3832 * situation. Btw, if we are odd for output and the
3833 * device claims some more data, it may well happen
3834 * than our residual be zero. :-)
3835 */
3836 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
3837 if (cp->xerr_status & XE_EXTRA_DATA)
3838 resid -= cp->extra_bytes;
3839 if (cp->xerr_status & XE_SODL_UNRUN)
3840 ++resid;
3841 if (cp->xerr_status & XE_SWIDE_OVRUN)
3842 --resid;
3843 }
3844
3845 /*
3846 * If all data has been transferred,
3847 * there is no residual.
3848 */
3849 if (cp->phys.head.lastp == sym_goalp(cp))
3850 return resid;
3851
3852 /*
3853 * If no data transfer occurs, or if the data
3854 * pointer is weird, return full residual.
3855 */
3856 if (cp->startp == cp->phys.head.lastp ||
3857 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
3858 &dp_ofs) < 0) {
3859 return cp->data_len;
3860 }
3861
3862 /*
3863 * If we were auto-sensing, then we are done.
3864 */
3865 if (cp->host_flags & HF_SENSE) {
3866 return -dp_ofs;
3867 }
3868
3869 /*
3870 * We are now full comfortable in the computation
3871 * of the data residual (2's complement).
3872 */
3873 dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
3874 resid = -cp->ext_ofs;
3875 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
3876 u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
3877 resid += (tmp & 0xffffff);
3878 }
3879
3880 /*
3881 * Hopefully, the result is not too wrong.
3882 */
3883 return resid;
3884}
3885
3886/*
3887 * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
3888 *
3889 * When we try to negotiate, we append the negotiation message
3890 * to the identify and (maybe) simple tag message.
3891 * The host status field is set to HS_NEGOTIATE to mark this
3892 * situation.
3893 *
3894 * If the target doesn't answer this message immediately
3895 * (as required by the standard), the SIR_NEGO_FAILED interrupt
3896 * will be raised eventually.
3897 * The handler removes the HS_NEGOTIATE status, and sets the
3898 * negotiated value to the default (async / nowide).
3899 *
3900 * If we receive a matching answer immediately, we check it
3901 * for validity, and set the values.
3902 *
3903 * If we receive a Reject message immediately, we assume the
3904 * negotiation has failed, and fall back to standard values.
3905 *
3906 * If we receive a negotiation message while not in HS_NEGOTIATE
3907 * state, it's a target initiated negotiation. We prepare a
3908 * (hopefully) valid answer, set our parameters, and send back
3909 * this answer to the target.
3910 *
3911 * If the target doesn't fetch the answer (no message out phase),
3912 * we assume the negotiation has failed, and fall back to default
3913 * settings (SIR_NEGO_PROTO interrupt).
3914 *
3915 * When we set the values, we adjust them in all ccbs belonging
3916 * to this target, in the controller's register, and in the "phys"
3917 * field of the controller's struct sym_hcb.
3918 */
3919
3920/*
3921 * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
3922 */
3923static int
3924sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
3925{
3926 int target = cp->target;
3927 u_char chg, ofs, per, fak, div;
3928
3929 if (DEBUG_FLAGS & DEBUG_NEGO) {
3930 sym_print_nego_msg(np, target, "sync msgin", np->msgin);
3931 }
3932
3933 /*
3934 * Get requested values.
3935 */
3936 chg = 0;
3937 per = np->msgin[3];
3938 ofs = np->msgin[4];
3939
3940 /*
3941 * Check values against our limits.
3942 */
3943 if (ofs) {
3944 if (ofs > np->maxoffs)
3945 {chg = 1; ofs = np->maxoffs;}
3946 }
3947
3948 if (ofs) {
3949 if (per < np->minsync)
3950 {chg = 1; per = np->minsync;}
3951 }
3952
3953 /*
3954 * Get new chip synchronous parameters value.
3955 */
3956 div = fak = 0;
3957 if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
3958 goto reject_it;
3959
3960 if (DEBUG_FLAGS & DEBUG_NEGO) {
3961 sym_print_addr(cp->cmd,
3962 "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
3963 ofs, per, div, fak, chg);
3964 }
3965
3966 /*
3967 * If it was an answer we want to change,
3968 * then it isn't acceptable. Reject it.
3969 */
3970 if (!req && chg)
3971 goto reject_it;
3972
3973 /*
3974 * Apply new values.
3975 */
3976 sym_setsync (np, target, ofs, per, div, fak);
3977
3978 /*
3979 * It was an answer. We are done.
3980 */
3981 if (!req)
3982 return 0;
3983
3984 /*
3985 * It was a request. Prepare an answer message.
3986 */
3987 np->msgout[0] = M_EXTENDED;
3988 np->msgout[1] = 3;
3989 np->msgout[2] = M_X_SYNC_REQ;
3990 np->msgout[3] = per;
3991 np->msgout[4] = ofs;
3992
3993 if (DEBUG_FLAGS & DEBUG_NEGO) {
3994 sym_print_nego_msg(np, target, "sync msgout", np->msgout);
3995 }
3996
3997 np->msgin [0] = M_NOOP;
3998
3999 return 0;
4000
4001reject_it:
4002 sym_setsync (np, target, 0, 0, 0, 0);
4003 return -1;
4004}
4005
4006static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
4007{
4008 int req = 1;
4009 int result;
4010
4011 /*
4012 * Request or answer ?
4013 */
4014 if (INB(np, HS_PRT) == HS_NEGOTIATE) {
4015 OUTB(np, HS_PRT, HS_BUSY);
4016 if (cp->nego_status && cp->nego_status != NS_SYNC)
4017 goto reject_it;
4018 req = 0;
4019 }
4020
4021 /*
4022 * Check and apply new values.
4023 */
4024 result = sym_sync_nego_check(np, req, cp);
4025 if (result) /* Not acceptable, reject it */
4026 goto reject_it;
4027 if (req) { /* Was a request, send response. */
4028 cp->nego_status = NS_SYNC;
4029 OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
4030 }
4031 else /* Was a response, we are done. */
4032 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4033 return;
4034
4035reject_it:
4036 OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
4037}
4038
4039/*
4040 * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
4041 */
4042static int
4043sym_ppr_nego_check(struct sym_hcb *np, int req, int target)
4044{
4045 struct sym_tcb *tp = &np->target[target];
4046 unsigned char fak, div;
4047 int dt, chg = 0;
4048
4049 unsigned char per = np->msgin[3];
4050 unsigned char ofs = np->msgin[5];
4051 unsigned char wide = np->msgin[6];
4052 unsigned char opts = np->msgin[7] & PPR_OPT_MASK;
4053
4054 if (DEBUG_FLAGS & DEBUG_NEGO) {
4055 sym_print_nego_msg(np, target, "ppr msgin", np->msgin);
4056 }
4057
4058 /*
4059 * Check values against our limits.
4060 */
4061 if (wide > np->maxwide) {
4062 chg = 1;
4063 wide = np->maxwide;
4064 }
4065 if (!wide || !(np->features & FE_U3EN))
4066 opts = 0;
4067
4068 if (opts != (np->msgin[7] & PPR_OPT_MASK))
4069 chg = 1;
4070
4071 dt = opts & PPR_OPT_DT;
4072
4073 if (ofs) {
4074 unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs;
4075 if (ofs > maxoffs) {
4076 chg = 1;
4077 ofs = maxoffs;
4078 }
4079 }
4080
4081 if (ofs) {
4082 unsigned char minsync = dt ? np->minsync_dt : np->minsync;
4083 if (per < minsync) {
4084 chg = 1;
4085 per = minsync;
4086 }
4087 }
4088
4089 /*
4090 * Get new chip synchronous parameters value.
4091 */
4092 div = fak = 0;
4093 if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
4094 goto reject_it;
4095
4096 /*
4097 * If it was an answer we want to change,
4098 * then it isn't acceptable. Reject it.
4099 */
4100 if (!req && chg)
4101 goto reject_it;
4102
4103 /*
4104 * Apply new values.
4105 */
4106 sym_setpprot(np, target, opts, ofs, per, wide, div, fak);
4107
4108 /*
4109 * It was an answer. We are done.
4110 */
4111 if (!req)
4112 return 0;
4113
4114 /*
4115 * It was a request. Prepare an answer message.
4116 */
4117 np->msgout[0] = M_EXTENDED;
4118 np->msgout[1] = 6;
4119 np->msgout[2] = M_X_PPR_REQ;
4120 np->msgout[3] = per;
4121 np->msgout[4] = 0;
4122 np->msgout[5] = ofs;
4123 np->msgout[6] = wide;
4124 np->msgout[7] = opts;
4125
4126 if (DEBUG_FLAGS & DEBUG_NEGO) {
4127 sym_print_nego_msg(np, target, "ppr msgout", np->msgout);
4128 }
4129
4130 np->msgin [0] = M_NOOP;
4131
4132 return 0;
4133
4134reject_it:
4135 sym_setpprot (np, target, 0, 0, 0, 0, 0, 0);
4136 /*
4137 * If it is a device response that should result in
4138 * ST, we may want to try a legacy negotiation later.
4139 */
4140 if (!req && !opts) {
4141 tp->tgoal.period = per;
4142 tp->tgoal.offset = ofs;
4143 tp->tgoal.width = wide;
4144 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
4145 tp->tgoal.check_nego = 1;
4146 }
4147 return -1;
4148}
4149
4150static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
4151{
4152 int req = 1;
4153 int result;
4154
4155 /*
4156 * Request or answer ?
4157 */
4158 if (INB(np, HS_PRT) == HS_NEGOTIATE) {
4159 OUTB(np, HS_PRT, HS_BUSY);
4160 if (cp->nego_status && cp->nego_status != NS_PPR)
4161 goto reject_it;
4162 req = 0;
4163 }
4164
4165 /*
4166 * Check and apply new values.
4167 */
4168 result = sym_ppr_nego_check(np, req, cp->target);
4169 if (result) /* Not acceptable, reject it */
4170 goto reject_it;
4171 if (req) { /* Was a request, send response. */
4172 cp->nego_status = NS_PPR;
4173 OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp));
4174 }
4175 else /* Was a response, we are done. */
4176 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4177 return;
4178
4179reject_it:
4180 OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
4181}
4182
4183/*
4184 * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
4185 */
4186static int
4187sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
4188{
4189 int target = cp->target;
4190 u_char chg, wide;
4191
4192 if (DEBUG_FLAGS & DEBUG_NEGO) {
4193 sym_print_nego_msg(np, target, "wide msgin", np->msgin);
4194 }
4195
4196 /*
4197 * Get requested values.
4198 */
4199 chg = 0;
4200 wide = np->msgin[3];
4201
4202 /*
4203 * Check values against our limits.
4204 */
4205 if (wide > np->maxwide) {
4206 chg = 1;
4207 wide = np->maxwide;
4208 }
4209
4210 if (DEBUG_FLAGS & DEBUG_NEGO) {
4211 sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n",
4212 wide, chg);
4213 }
4214
4215 /*
4216 * If it was an answer we want to change,
4217 * then it isn't acceptable. Reject it.
4218 */
4219 if (!req && chg)
4220 goto reject_it;
4221
4222 /*
4223 * Apply new values.
4224 */
4225 sym_setwide (np, target, wide);
4226
4227 /*
4228 * It was an answer. We are done.
4229 */
4230 if (!req)
4231 return 0;
4232
4233 /*
4234 * It was a request. Prepare an answer message.
4235 */
4236 np->msgout[0] = M_EXTENDED;
4237 np->msgout[1] = 2;
4238 np->msgout[2] = M_X_WIDE_REQ;
4239 np->msgout[3] = wide;
4240
4241 np->msgin [0] = M_NOOP;
4242
4243 if (DEBUG_FLAGS & DEBUG_NEGO) {
4244 sym_print_nego_msg(np, target, "wide msgout", np->msgout);
4245 }
4246
4247 return 0;
4248
4249reject_it:
4250 return -1;
4251}
4252
4253static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
4254{
4255 int req = 1;
4256 int result;
4257
4258 /*
4259 * Request or answer ?
4260 */
4261 if (INB(np, HS_PRT) == HS_NEGOTIATE) {
4262 OUTB(np, HS_PRT, HS_BUSY);
4263 if (cp->nego_status && cp->nego_status != NS_WIDE)
4264 goto reject_it;
4265 req = 0;
4266 }
4267
4268 /*
4269 * Check and apply new values.
4270 */
4271 result = sym_wide_nego_check(np, req, cp);
4272 if (result) /* Not acceptable, reject it */
4273 goto reject_it;
4274 if (req) { /* Was a request, send response. */
4275 cp->nego_status = NS_WIDE;
4276 OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp));
4277 } else { /* Was a response. */
4278 /*
4279 * Negotiate for SYNC immediately after WIDE response.
4280 * This allows to negotiate for both WIDE and SYNC on
4281 * a single SCSI command (Suggested by Justin Gibbs).
4282 */
4283 if (tp->tgoal.offset) {
4284 np->msgout[0] = M_EXTENDED;
4285 np->msgout[1] = 3;
4286 np->msgout[2] = M_X_SYNC_REQ;
4287 np->msgout[3] = tp->tgoal.period;
4288 np->msgout[4] = tp->tgoal.offset;
4289
4290 if (DEBUG_FLAGS & DEBUG_NEGO) {
4291 sym_print_nego_msg(np, cp->target,
4292 "sync msgout", np->msgout);
4293 }
4294
4295 cp->nego_status = NS_SYNC;
4296 OUTB(np, HS_PRT, HS_NEGOTIATE);
4297 OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
4298 return;
4299 } else
4300 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4301 }
4302
4303 return;
4304
4305reject_it:
4306 OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
4307}
4308
4309/*
4310 * Reset DT, SYNC or WIDE to default settings.
4311 *
4312 * Called when a negotiation does not succeed either
4313 * on rejection or on protocol error.
4314 *
4315 * A target that understands a PPR message should never
4316 * reject it, and messing with it is very unlikely.
4317 * So, if a PPR makes problems, we may just want to
4318 * try a legacy negotiation later.
4319 */
4320static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
4321{
4322 switch (cp->nego_status) {
4323 case NS_PPR:
4324#if 0
4325 sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0);
4326#else
4327 if (tp->tgoal.period < np->minsync)
4328 tp->tgoal.period = np->minsync;
4329 if (tp->tgoal.offset > np->maxoffs)
4330 tp->tgoal.offset = np->maxoffs;
4331 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
4332 tp->tgoal.check_nego = 1;
4333#endif
4334 break;
4335 case NS_SYNC:
4336 sym_setsync (np, cp->target, 0, 0, 0, 0);
4337 break;
4338 case NS_WIDE:
4339 sym_setwide (np, cp->target, 0);
4340 break;
4341 }
4342 np->msgin [0] = M_NOOP;
4343 np->msgout[0] = M_NOOP;
4344 cp->nego_status = 0;
4345}
4346
4347/*
4348 * chip handler for MESSAGE REJECT received in response to
4349 * PPR, WIDE or SYNCHRONOUS negotiation.
4350 */
4351static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
4352{
4353 sym_nego_default(np, tp, cp);
4354 OUTB(np, HS_PRT, HS_BUSY);
4355}
4356
4357/*
4358 * chip exception handler for programmed interrupts.
4359 */
4360static void sym_int_sir (struct sym_hcb *np)
4361{
4362 u_char num = INB(np, nc_dsps);
4363 u32 dsa = INL(np, nc_dsa);
4364 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
4365 u_char target = INB(np, nc_sdid) & 0x0f;
4366 struct sym_tcb *tp = &np->target[target];
4367 int tmp;
4368
4369 if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
4370
4371 switch (num) {
4372#if SYM_CONF_DMA_ADDRESSING_MODE == 2
4373 /*
4374 * SCRIPTS tell us that we may have to update
4375 * 64 bit DMA segment registers.
4376 */
4377 case SIR_DMAP_DIRTY:
4378 sym_update_dmap_regs(np);
4379 goto out;
4380#endif
4381 /*
4382 * Command has been completed with error condition
4383 * or has been auto-sensed.
4384 */
4385 case SIR_COMPLETE_ERROR:
4386 sym_complete_error(np, cp);
4387 return;
4388 /*
4389 * The C code is currently trying to recover from something.
4390 * Typically, user want to abort some command.
4391 */
4392 case SIR_SCRIPT_STOPPED:
4393 case SIR_TARGET_SELECTED:
4394 case SIR_ABORT_SENT:
4395 sym_sir_task_recovery(np, num);
4396 return;
4397 /*
4398 * The device didn't go to MSG OUT phase after having
4399 * been selected with ATN. We donnot want to handle
4400 * that.
4401 */
4402 case SIR_SEL_ATN_NO_MSG_OUT:
4403 printf ("%s:%d: No MSG OUT phase after selection with ATN.\n",
4404 sym_name (np), target);
4405 goto out_stuck;
4406 /*
4407 * The device didn't switch to MSG IN phase after
4408 * having reseleted the initiator.
4409 */
4410 case SIR_RESEL_NO_MSG_IN:
4411 printf ("%s:%d: No MSG IN phase after reselection.\n",
4412 sym_name (np), target);
4413 goto out_stuck;
4414 /*
4415 * After reselection, the device sent a message that wasn't
4416 * an IDENTIFY.
4417 */
4418 case SIR_RESEL_NO_IDENTIFY:
4419 printf ("%s:%d: No IDENTIFY after reselection.\n",
4420 sym_name (np), target);
4421 goto out_stuck;
4422 /*
4423 * The device reselected a LUN we donnot know about.
4424 */
4425 case SIR_RESEL_BAD_LUN:
4426 np->msgout[0] = M_RESET;
4427 goto out;
4428 /*
4429 * The device reselected for an untagged nexus and we
4430 * haven't any.
4431 */
4432 case SIR_RESEL_BAD_I_T_L:
4433 np->msgout[0] = M_ABORT;
4434 goto out;
4435 /*
4436 * The device reselected for a tagged nexus that we donnot
4437 * have.
4438 */
4439 case SIR_RESEL_BAD_I_T_L_Q:
4440 np->msgout[0] = M_ABORT_TAG;
4441 goto out;
4442 /*
4443 * The SCRIPTS let us know that the device has grabbed
4444 * our message and will abort the job.
4445 */
4446 case SIR_RESEL_ABORTED:
4447 np->lastmsg = np->msgout[0];
4448 np->msgout[0] = M_NOOP;
4449 printf ("%s:%d: message %x sent on bad reselection.\n",
4450 sym_name (np), target, np->lastmsg);
4451 goto out;
4452 /*
4453 * The SCRIPTS let us know that a message has been
4454 * successfully sent to the device.
4455 */
4456 case SIR_MSG_OUT_DONE:
4457 np->lastmsg = np->msgout[0];
4458 np->msgout[0] = M_NOOP;
4459 /* Should we really care of that */
4460 if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
4461 if (cp) {
4462 cp->xerr_status &= ~XE_PARITY_ERR;
4463 if (!cp->xerr_status)
4464 OUTOFFB(np, HF_PRT, HF_EXT_ERR);
4465 }
4466 }
4467 goto out;
4468 /*
4469 * The device didn't send a GOOD SCSI status.
4470 * We may have some work to do prior to allow
4471 * the SCRIPTS processor to continue.
4472 */
4473 case SIR_BAD_SCSI_STATUS:
4474 if (!cp)
4475 goto out;
4476 sym_sir_bad_scsi_status(np, num, cp);
4477 return;
4478 /*
4479 * We are asked by the SCRIPTS to prepare a
4480 * REJECT message.
4481 */
4482 case SIR_REJECT_TO_SEND:
4483 sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
4484 np->msgout[0] = M_REJECT;
4485 goto out;
4486 /*
4487 * We have been ODD at the end of a DATA IN
4488 * transfer and the device didn't send a
4489 * IGNORE WIDE RESIDUE message.
4490 * It is a data overrun condition.
4491 */
4492 case SIR_SWIDE_OVERRUN:
4493 if (cp) {
4494 OUTONB(np, HF_PRT, HF_EXT_ERR);
4495 cp->xerr_status |= XE_SWIDE_OVRUN;
4496 }
4497 goto out;
4498 /*
4499 * We have been ODD at the end of a DATA OUT
4500 * transfer.
4501 * It is a data underrun condition.
4502 */
4503 case SIR_SODL_UNDERRUN:
4504 if (cp) {
4505 OUTONB(np, HF_PRT, HF_EXT_ERR);
4506 cp->xerr_status |= XE_SODL_UNRUN;
4507 }
4508 goto out;
4509 /*
4510 * The device wants us to tranfer more data than
4511 * expected or in the wrong direction.
4512 * The number of extra bytes is in scratcha.
4513 * It is a data overrun condition.
4514 */
4515 case SIR_DATA_OVERRUN:
4516 if (cp) {
4517 OUTONB(np, HF_PRT, HF_EXT_ERR);
4518 cp->xerr_status |= XE_EXTRA_DATA;
4519 cp->extra_bytes += INL(np, nc_scratcha);
4520 }
4521 goto out;
4522 /*
4523 * The device switched to an illegal phase (4/5).
4524 */
4525 case SIR_BAD_PHASE:
4526 if (cp) {
4527 OUTONB(np, HF_PRT, HF_EXT_ERR);
4528 cp->xerr_status |= XE_BAD_PHASE;
4529 }
4530 goto out;
4531 /*
4532 * We received a message.
4533 */
4534 case SIR_MSG_RECEIVED:
4535 if (!cp)
4536 goto out_stuck;
4537 switch (np->msgin [0]) {
4538 /*
4539 * We received an extended message.
4540 * We handle MODIFY DATA POINTER, SDTR, WDTR
4541 * and reject all other extended messages.
4542 */
4543 case M_EXTENDED:
4544 switch (np->msgin [2]) {
4545 case M_X_MODIFY_DP:
4546 if (DEBUG_FLAGS & DEBUG_POINTER)
4547 sym_print_msg(cp,"modify DP",np->msgin);
4548 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
4549 (np->msgin[5]<<8) + (np->msgin[6]);
4550 sym_modify_dp(np, tp, cp, tmp);
4551 return;
4552 case M_X_SYNC_REQ:
4553 sym_sync_nego(np, tp, cp);
4554 return;
4555 case M_X_PPR_REQ:
4556 sym_ppr_nego(np, tp, cp);
4557 return;
4558 case M_X_WIDE_REQ:
4559 sym_wide_nego(np, tp, cp);
4560 return;
4561 default:
4562 goto out_reject;
4563 }
4564 break;
4565 /*
4566 * We received a 1/2 byte message not handled from SCRIPTS.
4567 * We are only expecting MESSAGE REJECT and IGNORE WIDE
4568 * RESIDUE messages that haven't been anticipated by
4569 * SCRIPTS on SWIDE full condition. Unanticipated IGNORE
4570 * WIDE RESIDUE messages are aliased as MODIFY DP (-1).
4571 */
4572 case M_IGN_RESIDUE:
4573 if (DEBUG_FLAGS & DEBUG_POINTER)
4574 sym_print_msg(cp,"ign wide residue", np->msgin);
4575 if (cp->host_flags & HF_SENSE)
4576 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4577 else
4578 sym_modify_dp(np, tp, cp, -1);
4579 return;
4580 case M_REJECT:
4581 if (INB(np, HS_PRT) == HS_NEGOTIATE)
4582 sym_nego_rejected(np, tp, cp);
4583 else {
4584 sym_print_addr(cp->cmd,
4585 "M_REJECT received (%x:%x).\n",
4586 scr_to_cpu(np->lastmsg), np->msgout[0]);
4587 }
4588 goto out_clrack;
4589 break;
4590 default:
4591 goto out_reject;
4592 }
4593 break;
4594 /*
4595 * We received an unknown message.
4596 * Ignore all MSG IN phases and reject it.
4597 */
4598 case SIR_MSG_WEIRD:
4599 sym_print_msg(cp, "WEIRD message received", np->msgin);
4600 OUTL_DSP(np, SCRIPTB_BA(np, msg_weird));
4601 return;
4602 /*
4603 * Negotiation failed.
4604 * Target does not send us the reply.
4605 * Remove the HS_NEGOTIATE status.
4606 */
4607 case SIR_NEGO_FAILED:
4608 OUTB(np, HS_PRT, HS_BUSY);
4609 /*
4610 * Negotiation failed.
4611 * Target does not want answer message.
4612 */
4613 case SIR_NEGO_PROTO:
4614 sym_nego_default(np, tp, cp);
4615 goto out;
4616 }
4617
4618out:
4619 OUTONB_STD();
4620 return;
4621out_reject:
4622 OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
4623 return;
4624out_clrack:
4625 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4626 return;
4627out_stuck:
4628 return;
4629}
4630
4631/*
4632 * Acquire a control block
4633 */
4634struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order)
4635{
4636 u_char tn = cmd->device->id;
4637 u_char ln = cmd->device->lun;
4638 struct sym_tcb *tp = &np->target[tn];
4639 struct sym_lcb *lp = sym_lp(tp, ln);
4640 u_short tag = NO_TAG;
4641 SYM_QUEHEAD *qp;
4642 struct sym_ccb *cp = NULL;
4643
4644 /*
4645 * Look for a free CCB
4646 */
4647 if (sym_que_empty(&np->free_ccbq))
4648 sym_alloc_ccb(np);
4649 qp = sym_remque_head(&np->free_ccbq);
4650 if (!qp)
4651 goto out;
4652 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4653
4654#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
4655 /*
4656 * If the LCB is not yet available and the LUN
4657 * has been probed ok, try to allocate the LCB.
4658 */
4659 if (!lp && sym_is_bit(tp->lun_map, ln)) {
4660 lp = sym_alloc_lcb(np, tn, ln);
4661 if (!lp)
4662 goto out_free;
4663 }
4664#endif
4665
4666 /*
4667 * If the LCB is not available here, then the
4668 * logical unit is not yet discovered. For those
4669 * ones only accept 1 SCSI IO per logical unit,
4670 * since we cannot allow disconnections.
4671 */
4672 if (!lp) {
4673 if (!sym_is_bit(tp->busy0_map, ln))
4674 sym_set_bit(tp->busy0_map, ln);
4675 else
4676 goto out_free;
4677 } else {
4678 /*
4679 * If we have been asked for a tagged command.
4680 */
4681 if (tag_order) {
4682 /*
4683 * Debugging purpose.
4684 */
4685#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
4686 assert(lp->busy_itl == 0);
4687#endif
4688 /*
4689 * Allocate resources for tags if not yet.
4690 */
4691 if (!lp->cb_tags) {
4692 sym_alloc_lcb_tags(np, tn, ln);
4693 if (!lp->cb_tags)
4694 goto out_free;
4695 }
4696 /*
4697 * Get a tag for this SCSI IO and set up
4698 * the CCB bus address for reselection,
4699 * and count it for this LUN.
4700 * Toggle reselect path to tagged.
4701 */
4702 if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
4703 tag = lp->cb_tags[lp->ia_tag];
4704 if (++lp->ia_tag == SYM_CONF_MAX_TASK)
4705 lp->ia_tag = 0;
4706 ++lp->busy_itlq;
4707#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
4708 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
4709 lp->head.resel_sa =
4710 cpu_to_scr(SCRIPTA_BA(np, resel_tag));
4711#endif
4712#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
4713 cp->tags_si = lp->tags_si;
4714 ++lp->tags_sum[cp->tags_si];
4715 ++lp->tags_since;
4716#endif
4717 }
4718 else
4719 goto out_free;
4720 }
4721 /*
4722 * This command will not be tagged.
4723 * If we already have either a tagged or untagged
4724 * one, refuse to overlap this untagged one.
4725 */
4726 else {
4727 /*
4728 * Debugging purpose.
4729 */
4730#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
4731 assert(lp->busy_itl == 0 && lp->busy_itlq == 0);
4732#endif
4733 /*
4734 * Count this nexus for this LUN.
4735 * Set up the CCB bus address for reselection.
4736 * Toggle reselect path to untagged.
4737 */
4738 ++lp->busy_itl;
4739#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
4740 if (lp->busy_itl == 1) {
4741 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
4742 lp->head.resel_sa =
4743 cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
4744 }
4745 else
4746 goto out_free;
4747#endif
4748 }
4749 }
4750 /*
4751 * Put the CCB into the busy queue.
4752 */
4753 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
4754#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
4755 if (lp) {
4756 sym_remque(&cp->link2_ccbq);
4757 sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq);
4758 }
4759
4760#endif
4761 /*
4762 * Remember all informations needed to free this CCB.
4763 */
4764 cp->to_abort = 0;
4765 cp->tag = tag;
4766 cp->order = tag_order;
4767 cp->target = tn;
4768 cp->lun = ln;
4769
4770 if (DEBUG_FLAGS & DEBUG_TAGS) {
4771 sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag);
4772 }
4773
4774out:
4775 return cp;
4776out_free:
4777 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
4778 return NULL;
4779}
4780
4781/*
4782 * Release one control block
4783 */
4784void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp)
4785{
4786 struct sym_tcb *tp = &np->target[cp->target];
4787 struct sym_lcb *lp = sym_lp(tp, cp->lun);
4788
4789 if (DEBUG_FLAGS & DEBUG_TAGS) {
4790 sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n",
4791 cp, cp->tag);
4792 }
4793
4794 /*
4795 * If LCB available,
4796 */
4797 if (lp) {
4798 /*
4799 * If tagged, release the tag, set the relect path
4800 */
4801 if (cp->tag != NO_TAG) {
4802#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
4803 --lp->tags_sum[cp->tags_si];
4804#endif
4805 /*
4806 * Free the tag value.
4807 */
4808 lp->cb_tags[lp->if_tag] = cp->tag;
4809 if (++lp->if_tag == SYM_CONF_MAX_TASK)
4810 lp->if_tag = 0;
4811 /*
4812 * Make the reselect path invalid,
4813 * and uncount this CCB.
4814 */
4815 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
4816 --lp->busy_itlq;
4817 } else { /* Untagged */
4818 /*
4819 * Make the reselect path invalid,
4820 * and uncount this CCB.
4821 */
4822 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
4823 --lp->busy_itl;
4824 }
4825 /*
4826 * If no JOB active, make the LUN reselect path invalid.
4827 */
4828 if (lp->busy_itlq == 0 && lp->busy_itl == 0)
4829 lp->head.resel_sa =
4830 cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
4831 }
4832 /*
4833 * Otherwise, we only accept 1 IO per LUN.
4834 * Clear the bit that keeps track of this IO.
4835 */
4836 else
4837 sym_clr_bit(tp->busy0_map, cp->lun);
4838
4839 /*
4840 * We donnot queue more than 1 ccb per target
4841 * with negotiation at any time. If this ccb was
4842 * used for negotiation, clear this info in the tcb.
4843 */
4844 if (cp == tp->nego_cp)
4845 tp->nego_cp = NULL;
4846
4847#ifdef SYM_CONF_IARB_SUPPORT
4848 /*
4849 * If we just complete the last queued CCB,
4850 * clear this info that is no longer relevant.
4851 */
4852 if (cp == np->last_cp)
4853 np->last_cp = 0;
4854#endif
4855
4856 /*
4857 * Make this CCB available.
4858 */
4859 cp->cmd = NULL;
4860 cp->host_status = HS_IDLE;
4861 sym_remque(&cp->link_ccbq);
4862 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
4863
4864#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
4865 if (lp) {
4866 sym_remque(&cp->link2_ccbq);
4867 sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq);
4868 if (cp->started) {
4869 if (cp->tag != NO_TAG)
4870 --lp->started_tags;
4871 else
4872 --lp->started_no_tag;
4873 }
4874 }
4875 cp->started = 0;
4876#endif
4877}
4878
4879/*
4880 * Allocate a CCB from memory and initialize its fixed part.
4881 */
4882static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np)
4883{
4884 struct sym_ccb *cp = NULL;
4885 int hcode;
4886
4887 /*
4888 * Prevent from allocating more CCBs than we can
4889 * queue to the controller.
4890 */
4891 if (np->actccbs >= SYM_CONF_MAX_START)
4892 return NULL;
4893
4894 /*
4895 * Allocate memory for this CCB.
4896 */
4897 cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
4898 if (!cp)
4899 goto out_free;
4900
4901 /*
4902 * Count it.
4903 */
4904 np->actccbs++;
4905
4906 /*
4907 * Compute the bus address of this ccb.
4908 */
4909 cp->ccb_ba = vtobus(cp);
4910
4911 /*
4912 * Insert this ccb into the hashed list.
4913 */
4914 hcode = CCB_HASH_CODE(cp->ccb_ba);
4915 cp->link_ccbh = np->ccbh[hcode];
4916 np->ccbh[hcode] = cp;
4917
4918 /*
4919 * Initialyze the start and restart actions.
4920 */
4921 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle));
4922 cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
4923
4924 /*
4925 * Initilialyze some other fields.
4926 */
4927 cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
4928
4929 /*
4930 * Chain into free ccb queue.
4931 */
4932 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
4933
4934 /*
4935 * Chain into optionnal lists.
4936 */
4937#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
4938 sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq);
4939#endif
4940 return cp;
4941out_free:
4942 if (cp)
4943 sym_mfree_dma(cp, sizeof(*cp), "CCB");
4944 return NULL;
4945}
4946
4947/*
4948 * Look up a CCB from a DSA value.
4949 */
4950static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa)
4951{
4952 int hcode;
4953 struct sym_ccb *cp;
4954
4955 hcode = CCB_HASH_CODE(dsa);
4956 cp = np->ccbh[hcode];
4957 while (cp) {
4958 if (cp->ccb_ba == dsa)
4959 break;
4960 cp = cp->link_ccbh;
4961 }
4962
4963 return cp;
4964}
4965
4966/*
4967 * Target control block initialisation.
4968 * Nothing important to do at the moment.
4969 */
4970static void sym_init_tcb (struct sym_hcb *np, u_char tn)
4971{
4972#if 0 /* Hmmm... this checking looks paranoid. */
4973 /*
4974 * Check some alignments required by the chip.
4975 */
4976 assert (((offsetof(struct sym_reg, nc_sxfer) ^
4977 offsetof(struct sym_tcb, head.sval)) &3) == 0);
4978 assert (((offsetof(struct sym_reg, nc_scntl3) ^
4979 offsetof(struct sym_tcb, head.wval)) &3) == 0);
4980#endif
4981}
4982
4983/*
4984 * Lun control block allocation and initialization.
4985 */
4986struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
4987{
4988 struct sym_tcb *tp = &np->target[tn];
4989 struct sym_lcb *lp = sym_lp(tp, ln);
4990
4991 /*
4992 * Already done, just return.
4993 */
4994 if (lp)
4995 return lp;
4996
4997 /*
4998 * Donnot allow LUN control block
4999 * allocation for not probed LUNs.
5000 */
5001 if (!sym_is_bit(tp->lun_map, ln))
5002 return NULL;
5003
5004 /*
5005 * Initialize the target control block if not yet.
5006 */
5007 sym_init_tcb (np, tn);
5008
5009 /*
5010 * Allocate the LCB bus address array.
5011 * Compute the bus address of this table.
5012 */
5013 if (ln && !tp->luntbl) {
5014 int i;
5015
5016 tp->luntbl = sym_calloc_dma(256, "LUNTBL");
5017 if (!tp->luntbl)
5018 goto fail;
5019 for (i = 0 ; i < 64 ; i++)
5020 tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
5021 tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
5022 }
5023
5024 /*
5025 * Allocate the table of pointers for LUN(s) > 0, if needed.
5026 */
5027 if (ln && !tp->lunmp) {
5028 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
5029 GFP_KERNEL);
5030 if (!tp->lunmp)
5031 goto fail;
5032 }
5033
5034 /*
5035 * Allocate the lcb.
5036 * Make it available to the chip.
5037 */
5038 lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
5039 if (!lp)
5040 goto fail;
5041 if (ln) {
5042 tp->lunmp[ln] = lp;
5043 tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
5044 }
5045 else {
5046 tp->lun0p = lp;
5047 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
5048 }
5049
5050 /*
5051 * Let the itl task point to error handling.
5052 */
5053 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
5054
5055 /*
5056 * Set the reselect pattern to our default. :)
5057 */
5058 lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
5059
5060 /*
5061 * Set user capabilities.
5062 */
5063 lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
5064
5065#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
5066 /*
5067 * Initialize device queueing.
5068 */
5069 sym_que_init(&lp->waiting_ccbq);
5070 sym_que_init(&lp->started_ccbq);
5071 lp->started_max = SYM_CONF_MAX_TASK;
5072 lp->started_limit = SYM_CONF_MAX_TASK;
5073#endif
5074 /*
5075 * If we are busy, count the IO.
5076 */
5077 if (sym_is_bit(tp->busy0_map, ln)) {
5078 lp->busy_itl = 1;
5079 sym_clr_bit(tp->busy0_map, ln);
5080 }
5081fail:
5082 return lp;
5083}
5084
5085/*
5086 * Allocate LCB resources for tagged command queuing.
5087 */
5088static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln)
5089{
5090 struct sym_tcb *tp = &np->target[tn];
5091 struct sym_lcb *lp = sym_lp(tp, ln);
5092 int i;
5093
5094 /*
5095 * If LCB not available, try to allocate it.
5096 */
5097 if (!lp && !(lp = sym_alloc_lcb(np, tn, ln)))
5098 goto fail;
5099
5100 /*
5101 * Allocate the task table and and the tag allocation
5102 * circular buffer. We want both or none.
5103 */
5104 lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
5105 if (!lp->itlq_tbl)
5106 goto fail;
5107 lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_KERNEL);
5108 if (!lp->cb_tags) {
5109 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
5110 lp->itlq_tbl = NULL;
5111 goto fail;
5112 }
5113
5114 /*
5115 * Initialize the task table with invalid entries.
5116 */
5117 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
5118 lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
5119
5120 /*
5121 * Fill up the tag buffer with tag numbers.
5122 */
5123 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
5124 lp->cb_tags[i] = i;
5125
5126 /*
5127 * Make the task table available to SCRIPTS,
5128 * And accept tagged commands now.
5129 */
5130 lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
5131
5132 return;
5133fail:
5134 return;
5135}
5136
5137/*
5138 * Queue a SCSI IO to the controller.
5139 */
5140int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
5141{
5142 struct scsi_device *sdev = cmd->device;
5143 struct sym_tcb *tp;
5144 struct sym_lcb *lp;
5145 u_char *msgptr;
5146 u_int msglen;
5147 int can_disconnect;
5148
5149 /*
5150 * Keep track of the IO in our CCB.
5151 */
5152 cp->cmd = cmd;
5153
5154 /*
5155 * Retrieve the target descriptor.
5156 */
5157 tp = &np->target[cp->target];
5158
5159 /*
5160 * Retrieve the lun descriptor.
5161 */
5162 lp = sym_lp(tp, sdev->lun);
5163
5164 can_disconnect = (cp->tag != NO_TAG) ||
5165 (lp && (lp->curr_flags & SYM_DISC_ENABLED));
5166
5167 msgptr = cp->scsi_smsg;
5168 msglen = 0;
5169 msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun);
5170
5171 /*
5172 * Build the tag message if present.
5173 */
5174 if (cp->tag != NO_TAG) {
5175 u_char order = cp->order;
5176
5177 switch(order) {
5178 case M_ORDERED_TAG:
5179 break;
5180 case M_HEAD_TAG:
5181 break;
5182 default:
5183 order = M_SIMPLE_TAG;
5184 }
5185#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
5186 /*
5187 * Avoid too much reordering of SCSI commands.
5188 * The algorithm tries to prevent completion of any
5189 * tagged command from being delayed against more
5190 * than 3 times the max number of queued commands.
5191 */
5192 if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) {
5193 lp->tags_si = !(lp->tags_si);
5194 if (lp->tags_sum[lp->tags_si]) {
5195 order = M_ORDERED_TAG;
5196 if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) {
5197 sym_print_addr(cmd,
5198 "ordered tag forced.\n");
5199 }
5200 }
5201 lp->tags_since = 0;
5202 }
5203#endif
5204 msgptr[msglen++] = order;
5205
5206 /*
5207 * For less than 128 tags, actual tags are numbered
5208 * 1,3,5,..2*MAXTAGS+1,since we may have to deal
5209 * with devices that have problems with #TAG 0 or too
5210 * great #TAG numbers. For more tags (up to 256),
5211 * we use directly our tag number.
5212 */
5213#if SYM_CONF_MAX_TASK > (512/4)
5214 msgptr[msglen++] = cp->tag;
5215#else
5216 msgptr[msglen++] = (cp->tag << 1) + 1;
5217#endif
5218 }
5219
5220 /*
5221 * Build a negotiation message if needed.
5222 * (nego_status is filled by sym_prepare_nego())
5223 */
5224 cp->nego_status = 0;
5225 if (tp->tgoal.check_nego && !tp->nego_cp && lp) {
5226 msglen += sym_prepare_nego(np, cp, msgptr + msglen);
5227 }
5228
5229 /*
5230 * Startqueue
5231 */
5232 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select));
5233 cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa));
5234
5235 /*
5236 * select
5237 */
5238 cp->phys.select.sel_id = cp->target;
5239 cp->phys.select.sel_scntl3 = tp->head.wval;
5240 cp->phys.select.sel_sxfer = tp->head.sval;
5241 cp->phys.select.sel_scntl4 = tp->head.uval;
5242
5243 /*
5244 * message
5245 */
5246 cp->phys.smsg.addr = cpu_to_scr(CCB_BA(cp, scsi_smsg));
5247 cp->phys.smsg.size = cpu_to_scr(msglen);
5248
5249 /*
5250 * status
5251 */
5252 cp->host_xflags = 0;
5253 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
5254 cp->ssss_status = S_ILLEGAL;
5255 cp->xerr_status = 0;
5256 cp->host_flags = 0;
5257 cp->extra_bytes = 0;
5258
5259 /*
5260 * extreme data pointer.
5261 * shall be positive, so -1 is lower than lowest.:)
5262 */
5263 cp->ext_sg = -1;
5264 cp->ext_ofs = 0;
5265
5266 /*
5267 * Build the CDB and DATA descriptor block
5268 * and start the IO.
5269 */
5270 return sym_setup_data_and_start(np, cmd, cp);
5271}
5272
5273/*
5274 * Reset a SCSI target (all LUNs of this target).
5275 */
5276int sym_reset_scsi_target(struct sym_hcb *np, int target)
5277{
5278 struct sym_tcb *tp;
5279
5280 if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET)
5281 return -1;
5282
5283 tp = &np->target[target];
5284 tp->to_reset = 1;
5285
5286 np->istat_sem = SEM;
5287 OUTB(np, nc_istat, SIGP|SEM);
5288
5289 return 0;
5290}
5291
5292/*
5293 * Abort a SCSI IO.
5294 */
5295static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out)
5296{
5297 /*
5298 * Check that the IO is active.
5299 */
5300 if (!cp || !cp->host_status || cp->host_status == HS_WAIT)
5301 return -1;
5302
5303 /*
5304 * If a previous abort didn't succeed in time,
5305 * perform a BUS reset.
5306 */
5307 if (cp->to_abort) {
5308 sym_reset_scsi_bus(np, 1);
5309 return 0;
5310 }
5311
5312 /*
5313 * Mark the CCB for abort and allow time for.
5314 */
5315 cp->to_abort = timed_out ? 2 : 1;
5316
5317 /*
5318 * Tell the SCRIPTS processor to stop and synchronize with us.
5319 */
5320 np->istat_sem = SEM;
5321 OUTB(np, nc_istat, SIGP|SEM);
5322 return 0;
5323}
5324
5325int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out)
5326{
5327 struct sym_ccb *cp;
5328 SYM_QUEHEAD *qp;
5329
5330 /*
5331 * Look up our CCB control block.
5332 */
5333 cp = NULL;
5334 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
5335 struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5336 if (cp2->cmd == cmd) {
5337 cp = cp2;
5338 break;
5339 }
5340 }
5341
5342 return sym_abort_ccb(np, cp, timed_out);
5343}
5344
5345/*
5346 * Complete execution of a SCSI command with extented
5347 * error, SCSI status error, or having been auto-sensed.
5348 *
5349 * The SCRIPTS processor is not running there, so we
5350 * can safely access IO registers and remove JOBs from
5351 * the START queue.
5352 * SCRATCHA is assumed to have been loaded with STARTPOS
5353 * before the SCRIPTS called the C code.
5354 */
5355void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp)
5356{
5357 struct scsi_device *sdev;
5358 struct scsi_cmnd *cmd;
5359 struct sym_tcb *tp;
5360 struct sym_lcb *lp;
5361 int resid;
5362 int i;
5363
5364 /*
5365 * Paranoid check. :)
5366 */
5367 if (!cp || !cp->cmd)
5368 return;
5369
5370 cmd = cp->cmd;
5371 sdev = cmd->device;
5372 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
5373 dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp,
5374 cp->host_status, cp->ssss_status, cp->host_flags);
5375 }
5376
5377 /*
5378 * Get target and lun pointers.
5379 */
5380 tp = &np->target[cp->target];
5381 lp = sym_lp(tp, sdev->lun);
5382
5383 /*
5384 * Check for extended errors.
5385 */
5386 if (cp->xerr_status) {
5387 if (sym_verbose)
5388 sym_print_xerr(cmd, cp->xerr_status);
5389 if (cp->host_status == HS_COMPLETE)
5390 cp->host_status = HS_COMP_ERR;
5391 }
5392
5393 /*
5394 * Calculate the residual.
5395 */
5396 resid = sym_compute_residual(np, cp);
5397
5398 if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */
5399 resid = 0; /* throw them away. :) */
5400 cp->sv_resid = 0;
5401 }
5402#ifdef DEBUG_2_0_X
5403if (resid)
5404 printf("XXXX RESID= %d - 0x%x\n", resid, resid);
5405#endif
5406
5407 /*
5408 * Dequeue all queued CCBs for that device
5409 * not yet started by SCRIPTS.
5410 */
5411 i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
5412 i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1);
5413
5414 /*
5415 * Restart the SCRIPTS processor.
5416 */
5417 OUTL_DSP(np, SCRIPTA_BA(np, start));
5418
5419#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
5420 if (cp->host_status == HS_COMPLETE &&
5421 cp->ssss_status == S_QUEUE_FULL) {
5422 if (!lp || lp->started_tags - i < 2)
5423 goto weirdness;
5424 /*
5425 * Decrease queue depth as needed.
5426 */
5427 lp->started_max = lp->started_tags - i - 1;
5428 lp->num_sgood = 0;
5429
5430 if (sym_verbose >= 2) {
5431 sym_print_addr(cmd, " queue depth is now %d\n",
5432 lp->started_max);
5433 }
5434
5435 /*
5436 * Repair the CCB.
5437 */
5438 cp->host_status = HS_BUSY;
5439 cp->ssss_status = S_ILLEGAL;
5440
5441 /*
5442 * Let's requeue it to device.
5443 */
5444 sym_set_cam_status(cmd, CAM_REQUEUE_REQ);
5445 goto finish;
5446 }
5447weirdness:
5448#endif
5449 /*
5450 * Build result in CAM ccb.
5451 */
5452 sym_set_cam_result_error(np, cp, resid);
5453
5454#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
5455finish:
5456#endif
5457 /*
5458 * Add this one to the COMP queue.
5459 */
5460 sym_remque(&cp->link_ccbq);
5461 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
5462
5463 /*
5464 * Complete all those commands with either error
5465 * or requeue condition.
5466 */
5467 sym_flush_comp_queue(np, 0);
5468
5469#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
5470 /*
5471 * Donnot start more than 1 command after an error.
5472 */
5473 if (lp)
5474 sym_start_next_ccbs(np, lp, 1);
5475#endif
5476}
5477
5478/*
5479 * Complete execution of a successful SCSI command.
5480 *
5481 * Only successful commands go to the DONE queue,
5482 * since we need to have the SCRIPTS processor
5483 * stopped on any error condition.
5484 * The SCRIPTS processor is running while we are
5485 * completing successful commands.
5486 */
5487void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp)
5488{
5489 struct sym_tcb *tp;
5490 struct sym_lcb *lp;
5491 struct scsi_cmnd *cmd;
5492 int resid;
5493
5494 /*
5495 * Paranoid check. :)
5496 */
5497 if (!cp || !cp->cmd)
5498 return;
5499 assert (cp->host_status == HS_COMPLETE);
5500
5501 /*
5502 * Get user command.
5503 */
5504 cmd = cp->cmd;
5505
5506 /*
5507 * Get target and lun pointers.
5508 */
5509 tp = &np->target[cp->target];
5510 lp = sym_lp(tp, cp->lun);
5511
5512 /*
5513 * Assume device discovered on first success.
5514 */
5515 if (!lp)
5516 sym_set_bit(tp->lun_map, cp->lun);
5517
5518 /*
5519 * If all data have been transferred, given than no
5520 * extended error did occur, there is no residual.
5521 */
5522 resid = 0;
5523 if (cp->phys.head.lastp != sym_goalp(cp))
5524 resid = sym_compute_residual(np, cp);
5525
5526 /*
5527 * Wrong transfer residuals may be worse than just always
5528 * returning zero. User can disable this feature in
5529 * sym53c8xx.h. Residual support is enabled by default.
5530 */
5531 if (!SYM_SETUP_RESIDUAL_SUPPORT)
5532 resid = 0;
5533#ifdef DEBUG_2_0_X
5534if (resid)
5535 printf("XXXX RESID= %d - 0x%x\n", resid, resid);
5536#endif
5537
5538 /*
5539 * Build result in CAM ccb.
5540 */
5541 sym_set_cam_result_ok(cp, cmd, resid);
5542
5543#ifdef SYM_OPT_SNIFF_INQUIRY
5544 /*
5545 * On standard INQUIRY response (EVPD and CmDt
5546 * not set), sniff out device capabilities.
5547 */
5548 if (cp->cdb_buf[0] == INQUIRY && !(cp->cdb_buf[1] & 0x3))
5549 sym_sniff_inquiry(np, cmd, resid);
5550#endif
5551
5552#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
5553 /*
5554 * If max number of started ccbs had been reduced,
5555 * increase it if 200 good status received.
5556 */
5557 if (lp && lp->started_max < lp->started_limit) {
5558 ++lp->num_sgood;
5559 if (lp->num_sgood >= 200) {
5560 lp->num_sgood = 0;
5561 ++lp->started_max;
5562 if (sym_verbose >= 2) {
5563 sym_print_addr(cmd, " queue depth is now %d\n",
5564 lp->started_max);
5565 }
5566 }
5567 }
5568#endif
5569
5570 /*
5571 * Free our CCB.
5572 */
5573 sym_free_ccb (np, cp);
5574
5575#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
5576 /*
5577 * Requeue a couple of awaiting scsi commands.
5578 */
5579 if (lp && !sym_que_empty(&lp->waiting_ccbq))
5580 sym_start_next_ccbs(np, lp, 2);
5581#endif
5582 /*
5583 * Complete the command.
5584 */
5585 sym_xpt_done(np, cmd);
5586}
5587
5588/*
5589 * Soft-attach the controller.
5590 */
5591int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram)
5592{
5593 struct sym_hcb *np = sym_get_hcb(shost);
5594 int i;
5595
5596 /*
5597 * Get some info about the firmware.
5598 */
5599 np->scripta_sz = fw->a_size;
5600 np->scriptb_sz = fw->b_size;
5601 np->scriptz_sz = fw->z_size;
5602 np->fw_setup = fw->setup;
5603 np->fw_patch = fw->patch;
5604 np->fw_name = fw->name;
5605
5606 /*
5607 * Save setting of some IO registers, so we will
5608 * be able to probe specific implementations.
5609 */
5610 sym_save_initial_setting (np);
5611
5612 /*
5613 * Reset the chip now, since it has been reported
5614 * that SCSI clock calibration may not work properly
5615 * if the chip is currently active.
5616 */
5617 sym_chip_reset(np);
5618
5619 /*
5620 * Prepare controller and devices settings, according
5621 * to chip features, user set-up and driver set-up.
5622 */
5623 sym_prepare_setting(shost, np, nvram);
5624
5625 /*
5626 * Check the PCI clock frequency.
5627 * Must be performed after prepare_setting since it destroys
5628 * STEST1 that is used to probe for the clock doubler.
5629 */
5630 i = sym_getpciclock(np);
5631 if (i > 37000 && !(np->features & FE_66MHZ))
5632 printf("%s: PCI BUS clock seems too high: %u KHz.\n",
5633 sym_name(np), i);
5634
5635 /*
5636 * Allocate the start queue.
5637 */
5638 np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
5639 if (!np->squeue)
5640 goto attach_failed;
5641 np->squeue_ba = vtobus(np->squeue);
5642
5643 /*
5644 * Allocate the done queue.
5645 */
5646 np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
5647 if (!np->dqueue)
5648 goto attach_failed;
5649 np->dqueue_ba = vtobus(np->dqueue);
5650
5651 /*
5652 * Allocate the target bus address array.
5653 */
5654 np->targtbl = sym_calloc_dma(256, "TARGTBL");
5655 if (!np->targtbl)
5656 goto attach_failed;
5657 np->targtbl_ba = vtobus(np->targtbl);
5658
5659 /*
5660 * Allocate SCRIPTS areas.
5661 */
5662 np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0");
5663 np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0");
5664 np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0");
5665 if (!np->scripta0 || !np->scriptb0 || !np->scriptz0)
5666 goto attach_failed;
5667
5668 /*
5669 * Allocate the array of lists of CCBs hashed by DSA.
5670 */
5671 np->ccbh = kcalloc(sizeof(struct sym_ccb **), CCB_HASH_SIZE, GFP_KERNEL);
5672 if (!np->ccbh)
5673 goto attach_failed;
5674
5675 /*
5676 * Initialyze the CCB free and busy queues.
5677 */
5678 sym_que_init(&np->free_ccbq);
5679 sym_que_init(&np->busy_ccbq);
5680 sym_que_init(&np->comp_ccbq);
5681
5682 /*
5683 * Initialization for optional handling
5684 * of device queueing.
5685 */
5686#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
5687 sym_que_init(&np->dummy_ccbq);
5688#endif
5689 /*
5690 * Allocate some CCB. We need at least ONE.
5691 */
5692 if (!sym_alloc_ccb(np))
5693 goto attach_failed;
5694
5695 /*
5696 * Calculate BUS addresses where we are going
5697 * to load the SCRIPTS.
5698 */
5699 np->scripta_ba = vtobus(np->scripta0);
5700 np->scriptb_ba = vtobus(np->scriptb0);
5701 np->scriptz_ba = vtobus(np->scriptz0);
5702
5703 if (np->ram_ba) {
5704 np->scripta_ba = np->ram_ba;
5705 if (np->features & FE_RAM8K) {
5706 np->ram_ws = 8192;
5707 np->scriptb_ba = np->scripta_ba + 4096;
5708#if 0 /* May get useful for 64 BIT PCI addressing */
5709 np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32);
5710#endif
5711 }
5712 else
5713 np->ram_ws = 4096;
5714 }
5715
5716 /*
5717 * Copy scripts to controller instance.
5718 */
5719 memcpy(np->scripta0, fw->a_base, np->scripta_sz);
5720 memcpy(np->scriptb0, fw->b_base, np->scriptb_sz);
5721 memcpy(np->scriptz0, fw->z_base, np->scriptz_sz);
5722
5723 /*
5724 * Setup variable parts in scripts and compute
5725 * scripts bus addresses used from the C code.
5726 */
5727 np->fw_setup(np, fw);
5728
5729 /*
5730 * Bind SCRIPTS with physical addresses usable by the
5731 * SCRIPTS processor (as seen from the BUS = BUS addresses).
5732 */
5733 sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz);
5734 sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz);
5735 sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz);
5736
5737#ifdef SYM_CONF_IARB_SUPPORT
5738 /*
5739 * If user wants IARB to be set when we win arbitration
5740 * and have other jobs, compute the max number of consecutive
5741 * settings of IARB hints before we leave devices a chance to
5742 * arbitrate for reselection.
5743 */
5744#ifdef SYM_SETUP_IARB_MAX
5745 np->iarb_max = SYM_SETUP_IARB_MAX;
5746#else
5747 np->iarb_max = 4;
5748#endif
5749#endif
5750
5751 /*
5752 * Prepare the idle and invalid task actions.
5753 */
5754 np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle));
5755 np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
5756 np->idletask_ba = vtobus(&np->idletask);
5757
5758 np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle));
5759 np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
5760 np->notask_ba = vtobus(&np->notask);
5761
5762 np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle));
5763 np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
5764 np->bad_itl_ba = vtobus(&np->bad_itl);
5765
5766 np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle));
5767 np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q));
5768 np->bad_itlq_ba = vtobus(&np->bad_itlq);
5769
5770 /*
5771 * Allocate and prepare the lun JUMP table that is used
5772 * for a target prior the probing of devices (bad lun table).
5773 * A private table will be allocated for the target on the
5774 * first INQUIRY response received.
5775 */
5776 np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
5777 if (!np->badluntbl)
5778 goto attach_failed;
5779
5780 np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
5781 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */
5782 np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
5783
5784 /*
5785 * Prepare the bus address array that contains the bus
5786 * address of each target control block.
5787 * For now, assume all logical units are wrong. :)
5788 */
5789 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
5790 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
5791 np->target[i].head.luntbl_sa =
5792 cpu_to_scr(vtobus(np->badluntbl));
5793 np->target[i].head.lun0_sa =
5794 cpu_to_scr(vtobus(&np->badlun_sa));
5795 }
5796
5797 /*
5798 * Now check the cache handling of the pci chipset.
5799 */
5800 if (sym_snooptest (np)) {
5801 printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np));
5802 goto attach_failed;
5803 }
5804
5805 /*
5806 * Sigh! we are done.
5807 */
5808 return 0;
5809
5810attach_failed:
5811 return -ENXIO;
5812}
5813
5814/*
5815 * Free everything that has been allocated for this device.
5816 */
5817void sym_hcb_free(struct sym_hcb *np)
5818{
5819 SYM_QUEHEAD *qp;
5820 struct sym_ccb *cp;
5821 struct sym_tcb *tp;
5822 struct sym_lcb *lp;
5823 int target, lun;
5824
5825 if (np->scriptz0)
5826 sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0");
5827 if (np->scriptb0)
5828 sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0");
5829 if (np->scripta0)
5830 sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0");
5831 if (np->squeue)
5832 sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
5833 if (np->dqueue)
5834 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
5835
5836 if (np->actccbs) {
5837 while ((qp = sym_remque_head(&np->free_ccbq)) != 0) {
5838 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5839 sym_mfree_dma(cp, sizeof(*cp), "CCB");
5840 }
5841 }
5842 kfree(np->ccbh);
5843
5844 if (np->badluntbl)
5845 sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
5846
5847 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
5848 tp = &np->target[target];
5849 for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) {
5850 lp = sym_lp(tp, lun);
5851 if (!lp)
5852 continue;
5853 if (lp->itlq_tbl)
5854 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4,
5855 "ITLQ_TBL");
5856 kfree(lp->cb_tags);
5857 sym_mfree_dma(lp, sizeof(*lp), "LCB");
5858 }
5859#if SYM_CONF_MAX_LUN > 1
5860 kfree(tp->lunmp);
5861#endif
5862 }
5863 if (np->targtbl)
5864 sym_mfree_dma(np->targtbl, 256, "TARGTBL");
5865}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
new file mode 100644
index 000000000000..a95cbe4b8e39
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -0,0 +1,1304 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifndef SYM_HIPD_H
41#define SYM_HIPD_H
42
43/*
44 * Generic driver options.
45 *
46 * They may be defined in platform specific headers, if they
47 * are useful.
48 *
49 * SYM_OPT_HANDLE_DIR_UNKNOWN
50 * When this option is set, the SCRIPTS used by the driver
51 * are able to handle SCSI transfers with direction not
52 * supplied by user.
53 * (set for Linux-2.0.X)
54 *
55 * SYM_OPT_HANDLE_DEVICE_QUEUEING
56 * When this option is set, the driver will use a queue per
57 * device and handle QUEUE FULL status requeuing internally.
58 *
59 * SYM_OPT_LIMIT_COMMAND_REORDERING
60 * When this option is set, the driver tries to limit tagged
61 * command reordering to some reasonnable value.
62 * (set for Linux)
63 */
64#if 0
65#define SYM_OPT_HANDLE_DIR_UNKNOWN
66#define SYM_OPT_HANDLE_DEVICE_QUEUEING
67#define SYM_OPT_LIMIT_COMMAND_REORDERING
68#endif
69
70/*
71 * Active debugging tags and verbosity.
72 * Both DEBUG_FLAGS and sym_verbose can be redefined
73 * by the platform specific code to something else.
74 */
75#define DEBUG_ALLOC (0x0001)
76#define DEBUG_PHASE (0x0002)
77#define DEBUG_POLL (0x0004)
78#define DEBUG_QUEUE (0x0008)
79#define DEBUG_RESULT (0x0010)
80#define DEBUG_SCATTER (0x0020)
81#define DEBUG_SCRIPT (0x0040)
82#define DEBUG_TINY (0x0080)
83#define DEBUG_TIMING (0x0100)
84#define DEBUG_NEGO (0x0200)
85#define DEBUG_TAGS (0x0400)
86#define DEBUG_POINTER (0x0800)
87
88#ifndef DEBUG_FLAGS
89#define DEBUG_FLAGS (0x0000)
90#endif
91
92#ifndef sym_verbose
93#define sym_verbose (np->verbose)
94#endif
95
96/*
97 * These ones should have been already defined.
98 */
99#ifndef assert
100#define assert(expression) { \
101 if (!(expression)) { \
102 (void)panic( \
103 "assertion \"%s\" failed: file \"%s\", line %d\n", \
104 #expression, \
105 __FILE__, __LINE__); \
106 } \
107}
108#endif
109
110/*
111 * Number of tasks per device we want to handle.
112 */
113#if SYM_CONF_MAX_TAG_ORDER > 8
114#error "more than 256 tags per logical unit not allowed."
115#endif
116#define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
117
118/*
119 * Donnot use more tasks that we can handle.
120 */
121#ifndef SYM_CONF_MAX_TAG
122#define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
123#endif
124#if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
125#undef SYM_CONF_MAX_TAG
126#define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
127#endif
128
129/*
130 * This one means 'NO TAG for this job'
131 */
132#define NO_TAG (256)
133
134/*
135 * Number of SCSI targets.
136 */
137#if SYM_CONF_MAX_TARGET > 16
138#error "more than 16 targets not allowed."
139#endif
140
141/*
142 * Number of logical units per target.
143 */
144#if SYM_CONF_MAX_LUN > 64
145#error "more than 64 logical units per target not allowed."
146#endif
147
148/*
149 * Asynchronous pre-scaler (ns). Shall be 40 for
150 * the SCSI timings to be compliant.
151 */
152#define SYM_CONF_MIN_ASYNC (40)
153
154/*
155 * Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16.
156 * Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized.
157 * (1 PAGE at a time is just fine).
158 */
159#define SYM_MEM_SHIFT 4
160#define SYM_MEM_CLUSTER_SIZE (1UL << SYM_MEM_CLUSTER_SHIFT)
161#define SYM_MEM_CLUSTER_MASK (SYM_MEM_CLUSTER_SIZE-1)
162
163/*
164 * Number of entries in the START and DONE queues.
165 *
166 * We limit to 1 PAGE in order to succeed allocation of
167 * these queues. Each entry is 8 bytes long (2 DWORDS).
168 */
169#ifdef SYM_CONF_MAX_START
170#define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
171#else
172#define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
173#define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
174#endif
175
176#if SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8
177#undef SYM_CONF_MAX_QUEUE
178#define SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8)
179#undef SYM_CONF_MAX_START
180#define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
181#endif
182
183/*
184 * For this one, we want a short name :-)
185 */
186#define MAX_QUEUE SYM_CONF_MAX_QUEUE
187
188/*
189 * Common definitions for both bus space based and legacy IO methods.
190 */
191
192#define INB_OFF(np, o) ioread8(np->s.ioaddr + (o))
193#define INW_OFF(np, o) ioread16(np->s.ioaddr + (o))
194#define INL_OFF(np, o) ioread32(np->s.ioaddr + (o))
195
196#define OUTB_OFF(np, o, val) iowrite8((val), np->s.ioaddr + (o))
197#define OUTW_OFF(np, o, val) iowrite16((val), np->s.ioaddr + (o))
198#define OUTL_OFF(np, o, val) iowrite32((val), np->s.ioaddr + (o))
199
200#define INB(np, r) INB_OFF(np, offsetof(struct sym_reg, r))
201#define INW(np, r) INW_OFF(np, offsetof(struct sym_reg, r))
202#define INL(np, r) INL_OFF(np, offsetof(struct sym_reg, r))
203
204#define OUTB(np, r, v) OUTB_OFF(np, offsetof(struct sym_reg, r), (v))
205#define OUTW(np, r, v) OUTW_OFF(np, offsetof(struct sym_reg, r), (v))
206#define OUTL(np, r, v) OUTL_OFF(np, offsetof(struct sym_reg, r), (v))
207
208#define OUTONB(np, r, m) OUTB(np, r, INB(np, r) | (m))
209#define OUTOFFB(np, r, m) OUTB(np, r, INB(np, r) & ~(m))
210#define OUTONW(np, r, m) OUTW(np, r, INW(np, r) | (m))
211#define OUTOFFW(np, r, m) OUTW(np, r, INW(np, r) & ~(m))
212#define OUTONL(np, r, m) OUTL(np, r, INL(np, r) | (m))
213#define OUTOFFL(np, r, m) OUTL(np, r, INL(np, r) & ~(m))
214
215/*
216 * We normally want the chip to have a consistent view
217 * of driver internal data structures when we restart it.
218 * Thus these macros.
219 */
220#define OUTL_DSP(np, v) \
221 do { \
222 MEMORY_WRITE_BARRIER(); \
223 OUTL(np, nc_dsp, (v)); \
224 } while (0)
225
226#define OUTONB_STD() \
227 do { \
228 MEMORY_WRITE_BARRIER(); \
229 OUTONB(np, nc_dcntl, (STD|NOCOM)); \
230 } while (0)
231
232/*
233 * Command control block states.
234 */
235#define HS_IDLE (0)
236#define HS_BUSY (1)
237#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
238#define HS_DISCONNECT (3) /* Disconnected by target */
239#define HS_WAIT (4) /* waiting for resource */
240
241#define HS_DONEMASK (0x80)
242#define HS_COMPLETE (4|HS_DONEMASK)
243#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
244#define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */
245#define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */
246
247/*
248 * Software Interrupt Codes
249 */
250#define SIR_BAD_SCSI_STATUS (1)
251#define SIR_SEL_ATN_NO_MSG_OUT (2)
252#define SIR_MSG_RECEIVED (3)
253#define SIR_MSG_WEIRD (4)
254#define SIR_NEGO_FAILED (5)
255#define SIR_NEGO_PROTO (6)
256#define SIR_SCRIPT_STOPPED (7)
257#define SIR_REJECT_TO_SEND (8)
258#define SIR_SWIDE_OVERRUN (9)
259#define SIR_SODL_UNDERRUN (10)
260#define SIR_RESEL_NO_MSG_IN (11)
261#define SIR_RESEL_NO_IDENTIFY (12)
262#define SIR_RESEL_BAD_LUN (13)
263#define SIR_TARGET_SELECTED (14)
264#define SIR_RESEL_BAD_I_T_L (15)
265#define SIR_RESEL_BAD_I_T_L_Q (16)
266#define SIR_ABORT_SENT (17)
267#define SIR_RESEL_ABORTED (18)
268#define SIR_MSG_OUT_DONE (19)
269#define SIR_COMPLETE_ERROR (20)
270#define SIR_DATA_OVERRUN (21)
271#define SIR_BAD_PHASE (22)
272#if SYM_CONF_DMA_ADDRESSING_MODE == 2
273#define SIR_DMAP_DIRTY (23)
274#define SIR_MAX (23)
275#else
276#define SIR_MAX (22)
277#endif
278
279/*
280 * Extended error bit codes.
281 * xerr_status field of struct sym_ccb.
282 */
283#define XE_EXTRA_DATA (1) /* unexpected data phase */
284#define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */
285#define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */
286#define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */
287#define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */
288
289/*
290 * Negotiation status.
291 * nego_status field of struct sym_ccb.
292 */
293#define NS_SYNC (1)
294#define NS_WIDE (2)
295#define NS_PPR (3)
296
297/*
298 * A CCB hashed table is used to retrieve CCB address
299 * from DSA value.
300 */
301#define CCB_HASH_SHIFT 8
302#define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
303#define CCB_HASH_MASK (CCB_HASH_SIZE-1)
304#if 1
305#define CCB_HASH_CODE(dsa) \
306 (((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK)
307#else
308#define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
309#endif
310
311#if SYM_CONF_DMA_ADDRESSING_MODE == 2
312/*
313 * We may want to use segment registers for 64 bit DMA.
314 * 16 segments registers -> up to 64 GB addressable.
315 */
316#define SYM_DMAP_SHIFT (4)
317#define SYM_DMAP_SIZE (1u<<SYM_DMAP_SHIFT)
318#define SYM_DMAP_MASK (SYM_DMAP_SIZE-1)
319#endif
320
321/*
322 * Device flags.
323 */
324#define SYM_DISC_ENABLED (1)
325#define SYM_TAGS_ENABLED (1<<1)
326#define SYM_SCAN_BOOT_DISABLED (1<<2)
327#define SYM_SCAN_LUNS_DISABLED (1<<3)
328
329/*
330 * Host adapter miscellaneous flags.
331 */
332#define SYM_AVOID_BUS_RESET (1)
333
334/*
335 * Misc.
336 */
337#define SYM_SNOOP_TIMEOUT (10000000)
338#define BUS_8_BIT 0
339#define BUS_16_BIT 1
340
341/*
342 * Gather negotiable parameters value
343 */
344struct sym_trans {
345 u8 period;
346 u8 offset;
347 unsigned int width:1;
348 unsigned int iu:1;
349 unsigned int dt:1;
350 unsigned int qas:1;
351 unsigned int check_nego:1;
352};
353
354/*
355 * Global TCB HEADER.
356 *
357 * Due to lack of indirect addressing on earlier NCR chips,
358 * this substructure is copied from the TCB to a global
359 * address after selection.
360 * For SYMBIOS chips that support LOAD/STORE this copy is
361 * not needed and thus not performed.
362 */
363struct sym_tcbh {
364 /*
365 * Scripts bus addresses of LUN table accessed from scripts.
366 * LUN #0 is a special case, since multi-lun devices are rare,
367 * and we we want to speed-up the general case and not waste
368 * resources.
369 */
370 u32 luntbl_sa; /* bus address of this table */
371 u32 lun0_sa; /* bus address of LCB #0 */
372 /*
373 * Actual SYNC/WIDE IO registers value for this target.
374 * 'sval', 'wval' and 'uval' are read from SCRIPTS and
375 * so have alignment constraints.
376 */
377/*0*/ u_char uval; /* -> SCNTL4 register */
378/*1*/ u_char sval; /* -> SXFER io register */
379/*2*/ u_char filler1;
380/*3*/ u_char wval; /* -> SCNTL3 io register */
381};
382
383/*
384 * Target Control Block
385 */
386struct sym_tcb {
387 /*
388 * TCB header.
389 * Assumed at offset 0.
390 */
391/*0*/ struct sym_tcbh head;
392
393 /*
394 * LUN table used by the SCRIPTS processor.
395 * An array of bus addresses is used on reselection.
396 */
397 u32 *luntbl; /* LCBs bus address table */
398
399 /*
400 * LUN table used by the C code.
401 */
402 struct sym_lcb *lun0p; /* LCB of LUN #0 (usual case) */
403#if SYM_CONF_MAX_LUN > 1
404 struct sym_lcb **lunmp; /* Other LCBs [1..MAX_LUN] */
405#endif
406
407 /*
408 * Bitmap that tells about LUNs that succeeded at least
409 * 1 IO and therefore assumed to be a real device.
410 * Avoid useless allocation of the LCB structure.
411 */
412 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32];
413
414 /*
415 * Bitmap that tells about LUNs that haven't yet an LCB
416 * allocated (not discovered or LCB allocation failed).
417 */
418 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32];
419
420#ifdef SYM_HAVE_STCB
421 /*
422 * O/S specific data structure.
423 */
424 struct sym_stcb s;
425#endif
426
427 /* Transfer goal */
428 struct sym_trans tgoal;
429
430 /*
431 * Keep track of the CCB used for the negotiation in order
432 * to ensure that only 1 negotiation is queued at a time.
433 */
434 struct sym_ccb * nego_cp; /* CCB used for the nego */
435
436 /*
437 * Set when we want to reset the device.
438 */
439 u_char to_reset;
440
441 /*
442 * Other user settable limits and options.
443 * These limits are read from the NVRAM if present.
444 */
445 u_char usrflags;
446 u_short usrtags;
447 struct scsi_device *sdev;
448};
449
450/*
451 * Global LCB HEADER.
452 *
453 * Due to lack of indirect addressing on earlier NCR chips,
454 * this substructure is copied from the LCB to a global
455 * address after selection.
456 * For SYMBIOS chips that support LOAD/STORE this copy is
457 * not needed and thus not performed.
458 */
459struct sym_lcbh {
460 /*
461 * SCRIPTS address jumped by SCRIPTS on reselection.
462 * For not probed logical units, this address points to
463 * SCRIPTS that deal with bad LU handling (must be at
464 * offset zero of the LCB for that reason).
465 */
466/*0*/ u32 resel_sa;
467
468 /*
469 * Task (bus address of a CCB) read from SCRIPTS that points
470 * to the unique ITL nexus allowed to be disconnected.
471 */
472 u32 itl_task_sa;
473
474 /*
475 * Task table bus address (read from SCRIPTS).
476 */
477 u32 itlq_tbl_sa;
478};
479
480/*
481 * Logical Unit Control Block
482 */
483struct sym_lcb {
484 /*
485 * TCB header.
486 * Assumed at offset 0.
487 */
488/*0*/ struct sym_lcbh head;
489
490 /*
491 * Task table read from SCRIPTS that contains pointers to
492 * ITLQ nexuses. The bus address read from SCRIPTS is
493 * inside the header.
494 */
495 u32 *itlq_tbl; /* Kernel virtual address */
496
497 /*
498 * Busy CCBs management.
499 */
500 u_short busy_itlq; /* Number of busy tagged CCBs */
501 u_short busy_itl; /* Number of busy untagged CCBs */
502
503 /*
504 * Circular tag allocation buffer.
505 */
506 u_short ia_tag; /* Tag allocation index */
507 u_short if_tag; /* Tag release index */
508 u_char *cb_tags; /* Circular tags buffer */
509
510 /*
511 * O/S specific data structure.
512 */
513#ifdef SYM_HAVE_SLCB
514 struct sym_slcb s;
515#endif
516
517#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
518 /*
519 * Optionnaly the driver can handle device queueing,
520 * and requeues internally command to redo.
521 */
522 SYM_QUEHEAD waiting_ccbq;
523 SYM_QUEHEAD started_ccbq;
524 int num_sgood;
525 u_short started_tags;
526 u_short started_no_tag;
527 u_short started_max;
528 u_short started_limit;
529#endif
530
531#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
532 /*
533 * Optionally the driver can try to prevent SCSI
534 * IOs from being reordered too much.
535 */
536 u_char tags_si; /* Current index to tags sum */
537 u_short tags_sum[2]; /* Tags sum counters */
538 u_short tags_since; /* # of tags since last switch */
539#endif
540
541 /*
542 * Set when we want to clear all tasks.
543 */
544 u_char to_clear;
545
546 /*
547 * Capabilities.
548 */
549 u_char user_flags;
550 u_char curr_flags;
551};
552
553/*
554 * Action from SCRIPTS on a task.
555 * Is part of the CCB, but is also used separately to plug
556 * error handling action to perform from SCRIPTS.
557 */
558struct sym_actscr {
559 u32 start; /* Jumped by SCRIPTS after selection */
560 u32 restart; /* Jumped by SCRIPTS on relection */
561};
562
563/*
564 * Phase mismatch context.
565 *
566 * It is part of the CCB and is used as parameters for the
567 * DATA pointer. We need two contexts to handle correctly the
568 * SAVED DATA POINTER.
569 */
570struct sym_pmc {
571 struct sym_tblmove sg; /* Updated interrupted SG block */
572 u32 ret; /* SCRIPT return address */
573};
574
575/*
576 * LUN control block lookup.
577 * We use a direct pointer for LUN #0, and a table of
578 * pointers which is only allocated for devices that support
579 * LUN(s) > 0.
580 */
581#if SYM_CONF_MAX_LUN <= 1
582#define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
583#else
584#define sym_lp(tp, lun) \
585 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL
586#endif
587
588/*
589 * Status are used by the host and the script processor.
590 *
591 * The last four bytes (status[4]) are copied to the
592 * scratchb register (declared as scr0..scr3) just after the
593 * select/reselect, and copied back just after disconnecting.
594 * Inside the script the XX_REG are used.
595 */
596
597/*
598 * Last four bytes (script)
599 */
600#define HX_REG scr0
601#define HX_PRT nc_scr0
602#define HS_REG scr1
603#define HS_PRT nc_scr1
604#define SS_REG scr2
605#define SS_PRT nc_scr2
606#define HF_REG scr3
607#define HF_PRT nc_scr3
608
609/*
610 * Last four bytes (host)
611 */
612#define host_xflags phys.head.status[0]
613#define host_status phys.head.status[1]
614#define ssss_status phys.head.status[2]
615#define host_flags phys.head.status[3]
616
617/*
618 * Host flags
619 */
620#define HF_IN_PM0 1u
621#define HF_IN_PM1 (1u<<1)
622#define HF_ACT_PM (1u<<2)
623#define HF_DP_SAVED (1u<<3)
624#define HF_SENSE (1u<<4)
625#define HF_EXT_ERR (1u<<5)
626#define HF_DATA_IN (1u<<6)
627#ifdef SYM_CONF_IARB_SUPPORT
628#define HF_HINT_IARB (1u<<7)
629#endif
630
631/*
632 * More host flags
633 */
634#if SYM_CONF_DMA_ADDRESSING_MODE == 2
635#define HX_DMAP_DIRTY (1u<<7)
636#endif
637
638/*
639 * Global CCB HEADER.
640 *
641 * Due to lack of indirect addressing on earlier NCR chips,
642 * this substructure is copied from the ccb to a global
643 * address after selection (or reselection) and copied back
644 * before disconnect.
645 * For SYMBIOS chips that support LOAD/STORE this copy is
646 * not needed and thus not performed.
647 */
648
649struct sym_ccbh {
650 /*
651 * Start and restart SCRIPTS addresses (must be at 0).
652 */
653/*0*/ struct sym_actscr go;
654
655 /*
656 * SCRIPTS jump address that deal with data pointers.
657 * 'savep' points to the position in the script responsible
658 * for the actual transfer of data.
659 * It's written on reception of a SAVE_DATA_POINTER message.
660 */
661 u32 savep; /* Jump address to saved data pointer */
662 u32 lastp; /* SCRIPTS address at end of data */
663#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
664 u32 wlastp;
665#endif
666
667 /*
668 * Status fields.
669 */
670 u8 status[4];
671};
672
673/*
674 * GET/SET the value of the data pointer used by SCRIPTS.
675 *
676 * We must distinguish between the LOAD/STORE-based SCRIPTS
677 * that use directly the header in the CCB, and the NCR-GENERIC
678 * SCRIPTS that use the copy of the header in the HCB.
679 */
680#if SYM_CONF_GENERIC_SUPPORT
681#define sym_set_script_dp(np, cp, dp) \
682 do { \
683 if (np->features & FE_LDSTR) \
684 cp->phys.head.lastp = cpu_to_scr(dp); \
685 else \
686 np->ccb_head.lastp = cpu_to_scr(dp); \
687 } while (0)
688#define sym_get_script_dp(np, cp) \
689 scr_to_cpu((np->features & FE_LDSTR) ? \
690 cp->phys.head.lastp : np->ccb_head.lastp)
691#else
692#define sym_set_script_dp(np, cp, dp) \
693 do { \
694 cp->phys.head.lastp = cpu_to_scr(dp); \
695 } while (0)
696
697#define sym_get_script_dp(np, cp) (cp->phys.head.lastp)
698#endif
699
700/*
701 * Data Structure Block
702 *
703 * During execution of a ccb by the script processor, the
704 * DSA (data structure address) register points to this
705 * substructure of the ccb.
706 */
707struct sym_dsb {
708 /*
709 * CCB header.
710 * Also assumed at offset 0 of the sym_ccb structure.
711 */
712/*0*/ struct sym_ccbh head;
713
714 /*
715 * Phase mismatch contexts.
716 * We need two to handle correctly the SAVED DATA POINTER.
717 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
718 * for address calculation from SCRIPTS.
719 */
720 struct sym_pmc pm0;
721 struct sym_pmc pm1;
722
723 /*
724 * Table data for Script
725 */
726 struct sym_tblsel select;
727 struct sym_tblmove smsg;
728 struct sym_tblmove smsg_ext;
729 struct sym_tblmove cmd;
730 struct sym_tblmove sense;
731 struct sym_tblmove wresid;
732 struct sym_tblmove data [SYM_CONF_MAX_SG];
733};
734
735/*
736 * Our Command Control Block
737 */
738struct sym_ccb {
739 /*
740 * This is the data structure which is pointed by the DSA
741 * register when it is executed by the script processor.
742 * It must be the first entry.
743 */
744 struct sym_dsb phys;
745
746 /*
747 * Pointer to CAM ccb and related stuff.
748 */
749 struct scsi_cmnd *cmd; /* CAM scsiio ccb */
750 u8 cdb_buf[16]; /* Copy of CDB */
751#define SYM_SNS_BBUF_LEN 32
752 u8 sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */
753 int data_len; /* Total data length */
754 int segments; /* Number of SG segments */
755
756 u8 order; /* Tag type (if tagged command) */
757
758 /*
759 * Miscellaneous status'.
760 */
761 u_char nego_status; /* Negotiation status */
762 u_char xerr_status; /* Extended error flags */
763 u32 extra_bytes; /* Extraneous bytes transferred */
764
765 /*
766 * Message areas.
767 * We prepare a message to be sent after selection.
768 * We may use a second one if the command is rescheduled
769 * due to CHECK_CONDITION or COMMAND TERMINATED.
770 * Contents are IDENTIFY and SIMPLE_TAG.
771 * While negotiating sync or wide transfer,
772 * a SDTR or WDTR message is appended.
773 */
774 u_char scsi_smsg [12];
775 u_char scsi_smsg2[12];
776
777 /*
778 * Auto request sense related fields.
779 */
780 u_char sensecmd[6]; /* Request Sense command */
781 u_char sv_scsi_status; /* Saved SCSI status */
782 u_char sv_xerr_status; /* Saved extended status */
783 int sv_resid; /* Saved residual */
784
785 /*
786 * Other fields.
787 */
788 u32 ccb_ba; /* BUS address of this CCB */
789 u_short tag; /* Tag for this transfer */
790 /* NO_TAG means no tag */
791 u_char target;
792 u_char lun;
793 struct sym_ccb *link_ccbh; /* Host adapter CCB hash chain */
794 SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */
795 u32 startp; /* Initial data pointer */
796 u32 goalp; /* Expected last data pointer */
797#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
798 u32 wgoalp;
799#endif
800 int ext_sg; /* Extreme data pointer, used */
801 int ext_ofs; /* to calculate the residual. */
802#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
803 SYM_QUEHEAD link2_ccbq; /* Link for device queueing */
804 u_char started; /* CCB queued to the squeue */
805#endif
806 u_char to_abort; /* Want this IO to be aborted */
807#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
808 u_char tags_si; /* Lun tags sum index (0,1) */
809#endif
810};
811
812#define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl))
813
814#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
815#define sym_goalp(cp) ((cp->host_flags & HF_DATA_IN) ? cp->goalp : cp->wgoalp)
816#else
817#define sym_goalp(cp) (cp->goalp)
818#endif
819
820typedef struct device *m_pool_ident_t;
821
822/*
823 * Host Control Block
824 */
825struct sym_hcb {
826 /*
827 * Global headers.
828 * Due to poorness of addressing capabilities, earlier
829 * chips (810, 815, 825) copy part of the data structures
830 * (CCB, TCB and LCB) in fixed areas.
831 */
832#if SYM_CONF_GENERIC_SUPPORT
833 struct sym_ccbh ccb_head;
834 struct sym_tcbh tcb_head;
835 struct sym_lcbh lcb_head;
836#endif
837 /*
838 * Idle task and invalid task actions and
839 * their bus addresses.
840 */
841 struct sym_actscr idletask, notask, bad_itl, bad_itlq;
842 u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
843
844 /*
845 * Dummy lun table to protect us against target
846 * returning bad lun number on reselection.
847 */
848 u32 *badluntbl; /* Table physical address */
849 u32 badlun_sa; /* SCRIPT handler BUS address */
850
851 /*
852 * Bus address of this host control block.
853 */
854 u32 hcb_ba;
855
856 /*
857 * Bit 32-63 of the on-chip RAM bus address in LE format.
858 * The START_RAM64 script loads the MMRS and MMWS from this
859 * field.
860 */
861 u32 scr_ram_seg;
862
863 /*
864 * Initial value of some IO register bits.
865 * These values are assumed to have been set by BIOS, and may
866 * be used to probe adapter implementation differences.
867 */
868 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
869 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
870 sv_stest1;
871
872 /*
873 * Actual initial value of IO register bits used by the
874 * driver. They are loaded at initialisation according to
875 * features that are to be enabled/disabled.
876 */
877 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
878 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
879
880 /*
881 * Target data.
882 */
883 struct sym_tcb target[SYM_CONF_MAX_TARGET];
884
885 /*
886 * Target control block bus address array used by the SCRIPT
887 * on reselection.
888 */
889 u32 *targtbl;
890 u32 targtbl_ba;
891
892 /*
893 * DMA pool handle for this HBA.
894 */
895 m_pool_ident_t bus_dmat;
896
897 /*
898 * O/S specific data structure
899 */
900 struct sym_shcb s;
901
902 /*
903 * Physical bus addresses of the chip.
904 */
905 u32 mmio_ba; /* MMIO 32 bit BUS address */
906 int mmio_ws; /* MMIO Window size */
907
908 u32 ram_ba; /* RAM 32 bit BUS address */
909 int ram_ws; /* RAM window size */
910
911 /*
912 * SCRIPTS virtual and physical bus addresses.
913 * 'script' is loaded in the on-chip RAM if present.
914 * 'scripth' stays in main memory for all chips except the
915 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
916 */
917 u_char *scripta0; /* Copy of scripts A, B, Z */
918 u_char *scriptb0;
919 u_char *scriptz0;
920 u32 scripta_ba; /* Actual scripts A, B, Z */
921 u32 scriptb_ba; /* 32 bit bus addresses. */
922 u32 scriptz_ba;
923 u_short scripta_sz; /* Actual size of script A, B, Z*/
924 u_short scriptb_sz;
925 u_short scriptz_sz;
926
927 /*
928 * Bus addresses, setup and patch methods for
929 * the selected firmware.
930 */
931 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */
932 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */
933 struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */
934 void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw);
935 void (*fw_patch)(struct sym_hcb *np);
936 char *fw_name;
937
938 /*
939 * General controller parameters and configuration.
940 */
941 u_short device_id; /* PCI device id */
942 u_char revision_id; /* PCI device revision id */
943 u_int features; /* Chip features map */
944 u_char myaddr; /* SCSI id of the adapter */
945 u_char maxburst; /* log base 2 of dwords burst */
946 u_char maxwide; /* Maximum transfer width */
947 u_char minsync; /* Min sync period factor (ST) */
948 u_char maxsync; /* Max sync period factor (ST) */
949 u_char maxoffs; /* Max scsi offset (ST) */
950 u_char minsync_dt; /* Min sync period factor (DT) */
951 u_char maxsync_dt; /* Max sync period factor (DT) */
952 u_char maxoffs_dt; /* Max scsi offset (DT) */
953 u_char multiplier; /* Clock multiplier (1,2,4) */
954 u_char clock_divn; /* Number of clock divisors */
955 u32 clock_khz; /* SCSI clock frequency in KHz */
956 u32 pciclk_khz; /* Estimated PCI clock in KHz */
957 /*
958 * Start queue management.
959 * It is filled up by the host processor and accessed by the
960 * SCRIPTS processor in order to start SCSI commands.
961 */
962 volatile /* Prevent code optimizations */
963 u32 *squeue; /* Start queue virtual address */
964 u32 squeue_ba; /* Start queue BUS address */
965 u_short squeueput; /* Next free slot of the queue */
966 u_short actccbs; /* Number of allocated CCBs */
967
968 /*
969 * Command completion queue.
970 * It is the same size as the start queue to avoid overflow.
971 */
972 u_short dqueueget; /* Next position to scan */
973 volatile /* Prevent code optimizations */
974 u32 *dqueue; /* Completion (done) queue */
975 u32 dqueue_ba; /* Done queue BUS address */
976
977 /*
978 * Miscellaneous buffers accessed by the scripts-processor.
979 * They shall be DWORD aligned, because they may be read or
980 * written with a script command.
981 */
982 u_char msgout[8]; /* Buffer for MESSAGE OUT */
983 u_char msgin [8]; /* Buffer for MESSAGE IN */
984 u32 lastmsg; /* Last SCSI message sent */
985 u32 scratch; /* Scratch for SCSI receive */
986 /* Also used for cache test */
987 /*
988 * Miscellaneous configuration and status parameters.
989 */
990 u_char usrflags; /* Miscellaneous user flags */
991 u_char scsi_mode; /* Current SCSI BUS mode */
992 u_char verbose; /* Verbosity for this controller*/
993
994 /*
995 * CCB lists and queue.
996 */
997 struct sym_ccb **ccbh; /* CCBs hashed by DSA value */
998 /* CCB_HASH_SIZE lists of CCBs */
999 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
1000 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
1001
1002 /*
1003 * During error handling and/or recovery,
1004 * active CCBs that are to be completed with
1005 * error or requeued are moved from the busy_ccbq
1006 * to the comp_ccbq prior to completion.
1007 */
1008 SYM_QUEHEAD comp_ccbq;
1009
1010#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1011 SYM_QUEHEAD dummy_ccbq;
1012#endif
1013
1014 /*
1015 * IMMEDIATE ARBITRATION (IARB) control.
1016 *
1017 * We keep track in 'last_cp' of the last CCB that has been
1018 * queued to the SCRIPTS processor and clear 'last_cp' when
1019 * this CCB completes. If last_cp is not zero at the moment
1020 * we queue a new CCB, we set a flag in 'last_cp' that is
1021 * used by the SCRIPTS as a hint for setting IARB.
1022 * We donnot set more than 'iarb_max' consecutive hints for
1023 * IARB in order to leave devices a chance to reselect.
1024 * By the way, any non zero value of 'iarb_max' is unfair. :)
1025 */
1026#ifdef SYM_CONF_IARB_SUPPORT
1027 u_short iarb_max; /* Max. # consecutive IARB hints*/
1028 u_short iarb_count; /* Actual # of these hints */
1029 struct sym_ccb * last_cp;
1030#endif
1031
1032 /*
1033 * Command abort handling.
1034 * We need to synchronize tightly with the SCRIPTS
1035 * processor in order to handle things correctly.
1036 */
1037 u_char abrt_msg[4]; /* Message to send buffer */
1038 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */
1039 struct sym_tblsel abrt_sel; /* Sync params for selection */
1040 u_char istat_sem; /* Tells the chip to stop (SEM) */
1041
1042 /*
1043 * 64 bit DMA handling.
1044 */
1045#if SYM_CONF_DMA_ADDRESSING_MODE != 0
1046 u_char use_dac; /* Use PCI DAC cycles */
1047#if SYM_CONF_DMA_ADDRESSING_MODE == 2
1048 u_char dmap_dirty; /* Dma segments registers dirty */
1049 u32 dmap_bah[SYM_DMAP_SIZE];/* Segment registers map */
1050#endif
1051#endif
1052};
1053
1054#define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
1055
1056
1057/*
1058 * FIRMWARES (sym_fw.c)
1059 */
1060struct sym_fw * sym_find_firmware(struct sym_chip *chip);
1061void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len);
1062
1063/*
1064 * Driver methods called from O/S specific code.
1065 */
1066char *sym_driver_name(void);
1067void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
1068int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
1069struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
1070void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
1071#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1072void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
1073#endif
1074void sym_start_up(struct sym_hcb *np, int reason);
1075void sym_interrupt(struct sym_hcb *np);
1076int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
1077struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1078void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1079struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1080int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1081int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1082int sym_reset_scsi_target(struct sym_hcb *np, int target);
1083void sym_hcb_free(struct sym_hcb *np);
1084int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram);
1085
1086/*
1087 * Build a scatter/gather entry.
1088 *
1089 * For 64 bit systems, we use the 8 upper bits of the size field
1090 * to provide bus address bits 32-39 to the SCRIPTS processor.
1091 * This allows the 895A, 896, 1010 to address up to 1 TB of memory.
1092 */
1093
1094#if SYM_CONF_DMA_ADDRESSING_MODE == 0
1095#define sym_build_sge(np, data, badd, len) \
1096do { \
1097 (data)->addr = cpu_to_scr(badd); \
1098 (data)->size = cpu_to_scr(len); \
1099} while (0)
1100#elif SYM_CONF_DMA_ADDRESSING_MODE == 1
1101#define sym_build_sge(np, data, badd, len) \
1102do { \
1103 (data)->addr = cpu_to_scr(badd); \
1104 (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
1105} while (0)
1106#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
1107int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
1108static __inline void
1109sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
1110{
1111 u32 h = (badd>>32);
1112 int s = (h&SYM_DMAP_MASK);
1113
1114 if (h != np->dmap_bah[s])
1115 goto bad;
1116good:
1117 (data)->addr = cpu_to_scr(badd);
1118 (data)->size = cpu_to_scr((s<<24) + len);
1119 return;
1120bad:
1121 s = sym_lookup_dmap(np, h, s);
1122 goto good;
1123}
1124#else
1125#error "Unsupported DMA addressing mode"
1126#endif
1127
1128/*
1129 * Set up data pointers used by SCRIPTS.
1130 * Called from O/S specific code.
1131 */
1132static inline void sym_setup_data_pointers(struct sym_hcb *np,
1133 struct sym_ccb *cp, int dir)
1134{
1135 u32 lastp, goalp;
1136
1137 /*
1138 * No segments means no data.
1139 */
1140 if (!cp->segments)
1141 dir = CAM_DIR_NONE;
1142
1143 /*
1144 * Set the data pointer.
1145 */
1146 switch(dir) {
1147#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
1148 case CAM_DIR_UNKNOWN:
1149#endif
1150 case CAM_DIR_OUT:
1151 goalp = SCRIPTA_BA(np, data_out2) + 8;
1152 lastp = goalp - 8 - (cp->segments * (2*4));
1153#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
1154 cp->wgoalp = cpu_to_scr(goalp);
1155 if (dir != CAM_DIR_UNKNOWN)
1156 break;
1157 cp->phys.head.wlastp = cpu_to_scr(lastp);
1158 /* fall through */
1159#else
1160 break;
1161#endif
1162 case CAM_DIR_IN:
1163 cp->host_flags |= HF_DATA_IN;
1164 goalp = SCRIPTA_BA(np, data_in2) + 8;
1165 lastp = goalp - 8 - (cp->segments * (2*4));
1166 break;
1167 case CAM_DIR_NONE:
1168 default:
1169#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
1170 cp->host_flags |= HF_DATA_IN;
1171#endif
1172 lastp = goalp = SCRIPTB_BA(np, no_data);
1173 break;
1174 }
1175
1176 /*
1177 * Set all pointers values needed by SCRIPTS.
1178 */
1179 cp->phys.head.lastp = cpu_to_scr(lastp);
1180 cp->phys.head.savep = cpu_to_scr(lastp);
1181 cp->startp = cp->phys.head.savep;
1182 cp->goalp = cpu_to_scr(goalp);
1183
1184#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
1185 /*
1186 * If direction is unknown, start at data_io.
1187 */
1188 if (dir == CAM_DIR_UNKNOWN)
1189 cp->phys.head.savep = cpu_to_scr(SCRIPTB_BA(np, data_io));
1190#endif
1191}
1192
1193/*
1194 * MEMORY ALLOCATOR.
1195 */
1196
1197#define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */
1198#define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
1199#define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */
1200
1201#define SYM_MEM_WARN 1 /* Warn on failed operations */
1202
1203#define sym_get_mem_cluster() \
1204 (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
1205#define sym_free_mem_cluster(p) \
1206 free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
1207
1208/*
1209 * Link between free memory chunks of a given size.
1210 */
1211typedef struct sym_m_link {
1212 struct sym_m_link *next;
1213} *m_link_p;
1214
1215/*
1216 * Virtual to bus physical translation for a given cluster.
1217 * Such a structure is only useful with DMA abstraction.
1218 */
1219typedef struct sym_m_vtob { /* Virtual to Bus address translation */
1220 struct sym_m_vtob *next;
1221 void *vaddr; /* Virtual address */
1222 dma_addr_t baddr; /* Bus physical address */
1223} *m_vtob_p;
1224
1225/* Hash this stuff a bit to speed up translations */
1226#define VTOB_HASH_SHIFT 5
1227#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
1228#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
1229#define VTOB_HASH_CODE(m) \
1230 ((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK)
1231
1232/*
1233 * Memory pool of a given kind.
1234 * Ideally, we want to use:
1235 * 1) 1 pool for memory we donnot need to involve in DMA.
1236 * 2) The same pool for controllers that require same DMA
1237 * constraints and features.
1238 * The OS specific m_pool_id_t thing and the sym_m_pool_match()
1239 * method are expected to tell the driver about.
1240 */
1241typedef struct sym_m_pool {
1242 m_pool_ident_t dev_dmat; /* Identifies the pool (see above) */
1243 void * (*get_mem_cluster)(struct sym_m_pool *);
1244#ifdef SYM_MEM_FREE_UNUSED
1245 void (*free_mem_cluster)(struct sym_m_pool *, void *);
1246#endif
1247#define M_GET_MEM_CLUSTER() mp->get_mem_cluster(mp)
1248#define M_FREE_MEM_CLUSTER(p) mp->free_mem_cluster(mp, p)
1249 int nump;
1250 m_vtob_p vtob[VTOB_HASH_SIZE];
1251 struct sym_m_pool *next;
1252 struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1];
1253} *m_pool_p;
1254
1255/*
1256 * Alloc, free and translate addresses to bus physical
1257 * for DMAable memory.
1258 */
1259void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name);
1260void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name);
1261dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
1262
1263/*
1264 * Verbs used by the driver code for DMAable memory handling.
1265 * The _uvptv_ macro avoids a nasty warning about pointer to volatile
1266 * being discarded.
1267 */
1268#define _uvptv_(p) ((void *)((u_long)(p)))
1269
1270#define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n)
1271#define _sym_mfree_dma(np, p, l, n) \
1272 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
1273#define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n)
1274#define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n)
1275#define vtobus(p) __vtobus(np->bus_dmat, _uvptv_(p))
1276
1277/*
1278 * We have to provide the driver memory allocator with methods for
1279 * it to maintain virtual to bus physical address translations.
1280 */
1281
1282#define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2)
1283
1284static __inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1285{
1286 void *vaddr = NULL;
1287 dma_addr_t baddr = 0;
1288
1289 vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
1290 GFP_ATOMIC);
1291 if (vaddr) {
1292 vbp->vaddr = vaddr;
1293 vbp->baddr = baddr;
1294 }
1295 return vaddr;
1296}
1297
1298static __inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1299{
1300 dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
1301 vbp->baddr);
1302}
1303
1304#endif /* SYM_HIPD_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_malloc.c b/drivers/scsi/sym53c8xx_2/sym_malloc.c
new file mode 100644
index 000000000000..a34d403ccc6c
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_malloc.c
@@ -0,0 +1,382 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifdef __FreeBSD__
41#include <dev/sym/sym_glue.h>
42#else
43#include "sym_glue.h"
44#endif
45
46/*
47 * Simple power of two buddy-like generic allocator.
48 * Provides naturally aligned memory chunks.
49 *
50 * This simple code is not intended to be fast, but to
51 * provide power of 2 aligned memory allocations.
52 * Since the SCRIPTS processor only supplies 8 bit arithmetic,
53 * this allocator allows simple and fast address calculations
54 * from the SCRIPTS code. In addition, cache line alignment
55 * is guaranteed for power of 2 cache line size.
56 *
57 * This allocator has been developped for the Linux sym53c8xx
58 * driver, since this O/S does not provide naturally aligned
59 * allocations.
60 * It has the advantage of allowing the driver to use private
61 * pages of memory that will be useful if we ever need to deal
62 * with IO MMUs for PCI.
63 */
64static void *___sym_malloc(m_pool_p mp, int size)
65{
66 int i = 0;
67 int s = (1 << SYM_MEM_SHIFT);
68 int j;
69 void *a;
70 m_link_p h = mp->h;
71
72 if (size > SYM_MEM_CLUSTER_SIZE)
73 return NULL;
74
75 while (size > s) {
76 s <<= 1;
77 ++i;
78 }
79
80 j = i;
81 while (!h[j].next) {
82 if (s == SYM_MEM_CLUSTER_SIZE) {
83 h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
84 if (h[j].next)
85 h[j].next->next = NULL;
86 break;
87 }
88 ++j;
89 s <<= 1;
90 }
91 a = h[j].next;
92 if (a) {
93 h[j].next = h[j].next->next;
94 while (j > i) {
95 j -= 1;
96 s >>= 1;
97 h[j].next = (m_link_p) (a+s);
98 h[j].next->next = NULL;
99 }
100 }
101#ifdef DEBUG
102 printf("___sym_malloc(%d) = %p\n", size, (void *) a);
103#endif
104 return a;
105}
106
107/*
108 * Counter-part of the generic allocator.
109 */
110static void ___sym_mfree(m_pool_p mp, void *ptr, int size)
111{
112 int i = 0;
113 int s = (1 << SYM_MEM_SHIFT);
114 m_link_p q;
115 unsigned long a, b;
116 m_link_p h = mp->h;
117
118#ifdef DEBUG
119 printf("___sym_mfree(%p, %d)\n", ptr, size);
120#endif
121
122 if (size > SYM_MEM_CLUSTER_SIZE)
123 return;
124
125 while (size > s) {
126 s <<= 1;
127 ++i;
128 }
129
130 a = (unsigned long)ptr;
131
132 while (1) {
133 if (s == SYM_MEM_CLUSTER_SIZE) {
134#ifdef SYM_MEM_FREE_UNUSED
135 M_FREE_MEM_CLUSTER((void *)a);
136#else
137 ((m_link_p) a)->next = h[i].next;
138 h[i].next = (m_link_p) a;
139#endif
140 break;
141 }
142 b = a ^ s;
143 q = &h[i];
144 while (q->next && q->next != (m_link_p) b) {
145 q = q->next;
146 }
147 if (!q->next) {
148 ((m_link_p) a)->next = h[i].next;
149 h[i].next = (m_link_p) a;
150 break;
151 }
152 q->next = q->next->next;
153 a = a & b;
154 s <<= 1;
155 ++i;
156 }
157}
158
159/*
160 * Verbose and zeroing allocator that wrapps to the generic allocator.
161 */
162static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags)
163{
164 void *p;
165
166 p = ___sym_malloc(mp, size);
167
168 if (DEBUG_FLAGS & DEBUG_ALLOC) {
169 printf ("new %-10s[%4d] @%p.\n", name, size, p);
170 }
171
172 if (p)
173 memset(p, 0, size);
174 else if (uflags & SYM_MEM_WARN)
175 printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
176 return p;
177}
178#define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN)
179
180/*
181 * Its counter-part.
182 */
183static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name)
184{
185 if (DEBUG_FLAGS & DEBUG_ALLOC)
186 printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
187
188 ___sym_mfree(mp, ptr, size);
189}
190
191/*
192 * Default memory pool we donnot need to involve in DMA.
193 *
194 * With DMA abstraction, we use functions (methods), to
195 * distinguish between non DMAable memory and DMAable memory.
196 */
197static void *___mp0_get_mem_cluster(m_pool_p mp)
198{
199 void *m = sym_get_mem_cluster();
200 if (m)
201 ++mp->nump;
202 return m;
203}
204
205#ifdef SYM_MEM_FREE_UNUSED
206static void ___mp0_free_mem_cluster(m_pool_p mp, void *m)
207{
208 sym_free_mem_cluster(m);
209 --mp->nump;
210}
211#else
212#define ___mp0_free_mem_cluster NULL
213#endif
214
215static struct sym_m_pool mp0 = {
216 NULL,
217 ___mp0_get_mem_cluster,
218 ___mp0_free_mem_cluster
219};
220
221/*
222 * Methods that maintains DMAable pools according to user allocations.
223 * New pools are created on the fly when a new pool id is provided.
224 * They are deleted on the fly when they get emptied.
225 */
226/* Get a memory cluster that matches the DMA constraints of a given pool */
227static void * ___get_dma_mem_cluster(m_pool_p mp)
228{
229 m_vtob_p vbp;
230 void *vaddr;
231
232 vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
233 if (!vbp)
234 goto out_err;
235
236 vaddr = sym_m_get_dma_mem_cluster(mp, vbp);
237 if (vaddr) {
238 int hc = VTOB_HASH_CODE(vaddr);
239 vbp->next = mp->vtob[hc];
240 mp->vtob[hc] = vbp;
241 ++mp->nump;
242 }
243 return vaddr;
244out_err:
245 return NULL;
246}
247
248#ifdef SYM_MEM_FREE_UNUSED
249/* Free a memory cluster and associated resources for DMA */
250static void ___free_dma_mem_cluster(m_pool_p mp, void *m)
251{
252 m_vtob_p *vbpp, vbp;
253 int hc = VTOB_HASH_CODE(m);
254
255 vbpp = &mp->vtob[hc];
256 while (*vbpp && (*vbpp)->vaddr != m)
257 vbpp = &(*vbpp)->next;
258 if (*vbpp) {
259 vbp = *vbpp;
260 *vbpp = (*vbpp)->next;
261 sym_m_free_dma_mem_cluster(mp, vbp);
262 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
263 --mp->nump;
264 }
265}
266#endif
267
268/* Fetch the memory pool for a given pool id (i.e. DMA constraints) */
269static __inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
270{
271 m_pool_p mp;
272 for (mp = mp0.next;
273 mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat);
274 mp = mp->next);
275 return mp;
276}
277
278/* Create a new memory DMAable pool (when fetch failed) */
279static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
280{
281 m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
282 if (mp) {
283 mp->dev_dmat = dev_dmat;
284 mp->get_mem_cluster = ___get_dma_mem_cluster;
285#ifdef SYM_MEM_FREE_UNUSED
286 mp->free_mem_cluster = ___free_dma_mem_cluster;
287#endif
288 mp->next = mp0.next;
289 mp0.next = mp;
290 return mp;
291 }
292 return NULL;
293}
294
295#ifdef SYM_MEM_FREE_UNUSED
296/* Destroy a DMAable memory pool (when got emptied) */
297static void ___del_dma_pool(m_pool_p p)
298{
299 m_pool_p *pp = &mp0.next;
300
301 while (*pp && *pp != p)
302 pp = &(*pp)->next;
303 if (*pp) {
304 *pp = (*pp)->next;
305 __sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
306 }
307}
308#endif
309
310/* This lock protects only the memory allocation/free. */
311static DEFINE_SPINLOCK(sym53c8xx_lock);
312
313/*
314 * Actual allocator for DMAable memory.
315 */
316void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name)
317{
318 unsigned long flags;
319 m_pool_p mp;
320 void *m = NULL;
321
322 spin_lock_irqsave(&sym53c8xx_lock, flags);
323 mp = ___get_dma_pool(dev_dmat);
324 if (!mp)
325 mp = ___cre_dma_pool(dev_dmat);
326 if (!mp)
327 goto out;
328 m = __sym_calloc(mp, size, name);
329#ifdef SYM_MEM_FREE_UNUSED
330 if (!mp->nump)
331 ___del_dma_pool(mp);
332#endif
333
334 out:
335 spin_unlock_irqrestore(&sym53c8xx_lock, flags);
336 return m;
337}
338
339void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name)
340{
341 unsigned long flags;
342 m_pool_p mp;
343
344 spin_lock_irqsave(&sym53c8xx_lock, flags);
345 mp = ___get_dma_pool(dev_dmat);
346 if (!mp)
347 goto out;
348 __sym_mfree(mp, m, size, name);
349#ifdef SYM_MEM_FREE_UNUSED
350 if (!mp->nump)
351 ___del_dma_pool(mp);
352#endif
353 out:
354 spin_unlock_irqrestore(&sym53c8xx_lock, flags);
355}
356
357/*
358 * Actual virtual to bus physical address translator
359 * for 32 bit addressable DMAable memory.
360 */
361dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m)
362{
363 unsigned long flags;
364 m_pool_p mp;
365 int hc = VTOB_HASH_CODE(m);
366 m_vtob_p vp = NULL;
367 void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK);
368 dma_addr_t b;
369
370 spin_lock_irqsave(&sym53c8xx_lock, flags);
371 mp = ___get_dma_pool(dev_dmat);
372 if (mp) {
373 vp = mp->vtob[hc];
374 while (vp && vp->vaddr != a)
375 vp = vp->next;
376 }
377 if (!vp)
378 panic("sym: VTOBUS FAILED!\n");
379 b = vp->baddr + (m - a);
380 spin_unlock_irqrestore(&sym53c8xx_lock, flags);
381 return b;
382}
diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
new file mode 100644
index 000000000000..0433d5d0caf3
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -0,0 +1,192 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifndef SYM_MISC_H
41#define SYM_MISC_H
42
43/*
44 * A la VMS/CAM-3 queue management.
45 */
46typedef struct sym_quehead {
47 struct sym_quehead *flink; /* Forward pointer */
48 struct sym_quehead *blink; /* Backward pointer */
49} SYM_QUEHEAD;
50
51#define sym_que_init(ptr) do { \
52 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
53} while (0)
54
55static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
56{
57 return (head->flink == head) ? 0 : head->flink;
58}
59
60static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
61{
62 return (head->blink == head) ? 0 : head->blink;
63}
64
65static __inline void __sym_que_add(struct sym_quehead * new,
66 struct sym_quehead * blink,
67 struct sym_quehead * flink)
68{
69 flink->blink = new;
70 new->flink = flink;
71 new->blink = blink;
72 blink->flink = new;
73}
74
75static __inline void __sym_que_del(struct sym_quehead * blink,
76 struct sym_quehead * flink)
77{
78 flink->blink = blink;
79 blink->flink = flink;
80}
81
82static __inline int sym_que_empty(struct sym_quehead *head)
83{
84 return head->flink == head;
85}
86
87static __inline void sym_que_splice(struct sym_quehead *list,
88 struct sym_quehead *head)
89{
90 struct sym_quehead *first = list->flink;
91
92 if (first != list) {
93 struct sym_quehead *last = list->blink;
94 struct sym_quehead *at = head->flink;
95
96 first->blink = head;
97 head->flink = first;
98
99 last->flink = at;
100 at->blink = last;
101 }
102}
103
104static __inline void sym_que_move(struct sym_quehead *orig,
105 struct sym_quehead *dest)
106{
107 struct sym_quehead *first, *last;
108
109 first = orig->flink;
110 if (first != orig) {
111 first->blink = dest;
112 dest->flink = first;
113 last = orig->blink;
114 last->flink = dest;
115 dest->blink = last;
116 orig->flink = orig;
117 orig->blink = orig;
118 } else {
119 dest->flink = dest;
120 dest->blink = dest;
121 }
122}
123
124#define sym_que_entry(ptr, type, member) \
125 ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
126
127
128#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
129
130#define sym_remque(el) __sym_que_del((el)->blink, (el)->flink)
131
132#define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink)
133
134static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
135{
136 struct sym_quehead *elem = head->flink;
137
138 if (elem != head)
139 __sym_que_del(head, elem->flink);
140 else
141 elem = NULL;
142 return elem;
143}
144
145#define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head)
146
147static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
148{
149 struct sym_quehead *elem = head->blink;
150
151 if (elem != head)
152 __sym_que_del(elem->blink, head);
153 else
154 elem = 0;
155 return elem;
156}
157
158/*
159 * This one may be useful.
160 */
161#define FOR_EACH_QUEUED_ELEMENT(head, qp) \
162 for (qp = (head)->flink; qp != (head); qp = qp->flink)
163/*
164 * FreeBSD does not offer our kind of queue in the CAM CCB.
165 * So, we have to cast.
166 */
167#define sym_qptr(p) ((struct sym_quehead *) (p))
168
169/*
170 * Simple bitmap operations.
171 */
172#define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f)))
173#define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
174#define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f)))
175
176/*
177 * The below round up/down macros are to be used with a constant
178 * as argument (sizeof(...) for example), for the compiler to
179 * optimize the whole thing.
180 */
181#define _U_(a,m) (a)<=(1<<m)?m:
182
183/*
184 * Round up logarithm to base 2 of a 16 bit constant.
185 */
186#define _LGRU16_(a) \
187( \
188 _U_(a, 0)_U_(a, 1)_U_(a, 2)_U_(a, 3)_U_(a, 4)_U_(a, 5)_U_(a, 6)_U_(a, 7) \
189 _U_(a, 8)_U_(a, 9)_U_(a,10)_U_(a,11)_U_(a,12)_U_(a,13)_U_(a,14)_U_(a,15) \
190 16)
191
192#endif /* SYM_MISC_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
new file mode 100644
index 000000000000..1b721e3ec520
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -0,0 +1,771 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#include "sym_glue.h"
41#include "sym_nvram.h"
42
43#ifdef SYM_CONF_DEBUG_NVRAM
44static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120};
45#endif
46
47/*
48 * Get host setup from NVRAM.
49 */
50void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
51{
52 /*
53 * Get parity checking, host ID, verbose mode
54 * and miscellaneous host flags from NVRAM.
55 */
56 switch (nvram->type) {
57 case SYM_SYMBIOS_NVRAM:
58 if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
59 np->rv_scntl0 &= ~0x0a;
60 np->myaddr = nvram->data.Symbios.host_id & 0x0f;
61 if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
62 np->verbose += 1;
63 if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO)
64 shost->reverse_ordering = 1;
65 if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET)
66 np->usrflags |= SYM_AVOID_BUS_RESET;
67 break;
68 case SYM_TEKRAM_NVRAM:
69 np->myaddr = nvram->data.Tekram.host_id & 0x0f;
70 break;
71#ifdef CONFIG_PARISC
72 case SYM_PARISC_PDC:
73 if (nvram->data.parisc.host_id != -1)
74 np->myaddr = nvram->data.parisc.host_id;
75 if (nvram->data.parisc.factor != -1)
76 np->minsync = nvram->data.parisc.factor;
77 if (nvram->data.parisc.width != -1)
78 np->maxwide = nvram->data.parisc.width;
79 switch (nvram->data.parisc.mode) {
80 case 0: np->scsi_mode = SMODE_SE; break;
81 case 1: np->scsi_mode = SMODE_HVD; break;
82 case 2: np->scsi_mode = SMODE_LVD; break;
83 default: break;
84 }
85#endif
86 default:
87 break;
88 }
89}
90
91/*
92 * Get target set-up from Symbios format NVRAM.
93 */
94static void
95sym_Symbios_setup_target(struct sym_hcb *np, int target, Symbios_nvram *nvram)
96{
97 struct sym_tcb *tp = &np->target[target];
98 Symbios_target *tn = &nvram->target[target];
99
100 tp->usrtags =
101 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0;
102
103 if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
104 tp->usrflags &= ~SYM_DISC_ENABLED;
105 if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
106 tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
107 if (!(tn->flags & SYMBIOS_SCAN_LUNS))
108 tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
109}
110
111/*
112 * Get target set-up from Tekram format NVRAM.
113 */
114static void
115sym_Tekram_setup_target(struct sym_hcb *np, int target, Tekram_nvram *nvram)
116{
117 struct sym_tcb *tp = &np->target[target];
118 struct Tekram_target *tn = &nvram->target[target];
119
120 if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
121 tp->usrtags = 2 << nvram->max_tags_index;
122 }
123
124 if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
125 tp->usrflags |= SYM_DISC_ENABLED;
126
127 /* If any device does not support parity, we will not use this option */
128 if (!(tn->flags & TEKRAM_PARITY_CHECK))
129 np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
130}
131
132/*
133 * Get target setup from NVRAM.
134 */
135void sym_nvram_setup_target(struct sym_hcb *np, int target, struct sym_nvram *nvp)
136{
137 switch (nvp->type) {
138 case SYM_SYMBIOS_NVRAM:
139 sym_Symbios_setup_target(np, target, &nvp->data.Symbios);
140 break;
141 case SYM_TEKRAM_NVRAM:
142 sym_Tekram_setup_target(np, target, &nvp->data.Tekram);
143 break;
144 default:
145 break;
146 }
147}
148
149#ifdef SYM_CONF_DEBUG_NVRAM
150/*
151 * Dump Symbios format NVRAM for debugging purpose.
152 */
153static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
154{
155 int i;
156
157 /* display Symbios nvram host data */
158 printf("%s: HOST ID=%d%s%s%s%s%s%s\n",
159 sym_name(np), nvram->host_id & 0x0f,
160 (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
161 (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
162 (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
163 (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
164 (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"",
165 (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
166
167 /* display Symbios nvram drive data */
168 for (i = 0 ; i < 15 ; i++) {
169 struct Symbios_target *tn = &nvram->target[i];
170 printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
171 sym_name(np), i,
172 (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
173 (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
174 (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
175 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
176 tn->bus_width,
177 tn->sync_period / 4,
178 tn->timeout);
179 }
180}
181
182/*
183 * Dump TEKRAM format NVRAM for debugging purpose.
184 */
185static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram)
186{
187 int i, tags, boot_delay;
188 char *rem;
189
190 /* display Tekram nvram host data */
191 tags = 2 << nvram->max_tags_index;
192 boot_delay = 0;
193 if (nvram->boot_delay_index < 6)
194 boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
195 switch ((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
196 default:
197 case 0: rem = ""; break;
198 case 1: rem = " REMOVABLE=boot device"; break;
199 case 2: rem = " REMOVABLE=all"; break;
200 }
201
202 printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
203 sym_name(np), nvram->host_id & 0x0f,
204 (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
205 (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"",
206 (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
207 (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
208 (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
209 (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
210 (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
211 (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
212 rem, boot_delay, tags);
213
214 /* display Tekram nvram drive data */
215 for (i = 0; i <= 15; i++) {
216 int sync, j;
217 struct Tekram_target *tn = &nvram->target[i];
218 j = tn->sync_index & 0xf;
219 sync = Tekram_sync[j];
220 printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
221 sym_name(np), i,
222 (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
223 (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
224 (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
225 (tn->flags & TEKRAM_START_CMD) ? " START" : "",
226 (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
227 (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
228 sync);
229 }
230}
231#else
232static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) { (void)np; (void)nvram; }
233static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) { (void)np; (void)nvram; }
234#endif /* SYM_CONF_DEBUG_NVRAM */
235
236
237/*
238 * 24C16 EEPROM reading.
239 *
240 * GPOI0 - data in/data out
241 * GPIO1 - clock
242 * Symbios NVRAM wiring now also used by Tekram.
243 */
244
245#define SET_BIT 0
246#define CLR_BIT 1
247#define SET_CLK 2
248#define CLR_CLK 3
249
250/*
251 * Set/clear data/clock bit in GPIO0
252 */
253static void S24C16_set_bit(struct sym_device *np, u_char write_bit, u_char *gpreg,
254 int bit_mode)
255{
256 udelay(5);
257 switch (bit_mode) {
258 case SET_BIT:
259 *gpreg |= write_bit;
260 break;
261 case CLR_BIT:
262 *gpreg &= 0xfe;
263 break;
264 case SET_CLK:
265 *gpreg |= 0x02;
266 break;
267 case CLR_CLK:
268 *gpreg &= 0xfd;
269 break;
270
271 }
272 OUTB(np, nc_gpreg, *gpreg);
273 udelay(5);
274}
275
276/*
277 * Send START condition to NVRAM to wake it up.
278 */
279static void S24C16_start(struct sym_device *np, u_char *gpreg)
280{
281 S24C16_set_bit(np, 1, gpreg, SET_BIT);
282 S24C16_set_bit(np, 0, gpreg, SET_CLK);
283 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
284 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
285}
286
287/*
288 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
289 */
290static void S24C16_stop(struct sym_device *np, u_char *gpreg)
291{
292 S24C16_set_bit(np, 0, gpreg, SET_CLK);
293 S24C16_set_bit(np, 1, gpreg, SET_BIT);
294}
295
296/*
297 * Read or write a bit to the NVRAM,
298 * read if GPIO0 input else write if GPIO0 output
299 */
300static void S24C16_do_bit(struct sym_device *np, u_char *read_bit, u_char write_bit,
301 u_char *gpreg)
302{
303 S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
304 S24C16_set_bit(np, 0, gpreg, SET_CLK);
305 if (read_bit)
306 *read_bit = INB(np, nc_gpreg);
307 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
308 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
309}
310
311/*
312 * Output an ACK to the NVRAM after reading,
313 * change GPIO0 to output and when done back to an input
314 */
315static void S24C16_write_ack(struct sym_device *np, u_char write_bit, u_char *gpreg,
316 u_char *gpcntl)
317{
318 OUTB(np, nc_gpcntl, *gpcntl & 0xfe);
319 S24C16_do_bit(np, NULL, write_bit, gpreg);
320 OUTB(np, nc_gpcntl, *gpcntl);
321}
322
323/*
324 * Input an ACK from NVRAM after writing,
325 * change GPIO0 to input and when done back to an output
326 */
327static void S24C16_read_ack(struct sym_device *np, u_char *read_bit, u_char *gpreg,
328 u_char *gpcntl)
329{
330 OUTB(np, nc_gpcntl, *gpcntl | 0x01);
331 S24C16_do_bit(np, read_bit, 1, gpreg);
332 OUTB(np, nc_gpcntl, *gpcntl);
333}
334
335/*
336 * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
337 * GPIO0 must already be set as an output
338 */
339static void S24C16_write_byte(struct sym_device *np, u_char *ack_data, u_char write_data,
340 u_char *gpreg, u_char *gpcntl)
341{
342 int x;
343
344 for (x = 0; x < 8; x++)
345 S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg);
346
347 S24C16_read_ack(np, ack_data, gpreg, gpcntl);
348}
349
350/*
351 * READ a byte from the NVRAM and then send an ACK to say we have got it,
352 * GPIO0 must already be set as an input
353 */
354static void S24C16_read_byte(struct sym_device *np, u_char *read_data, u_char ack_data,
355 u_char *gpreg, u_char *gpcntl)
356{
357 int x;
358 u_char read_bit;
359
360 *read_data = 0;
361 for (x = 0; x < 8; x++) {
362 S24C16_do_bit(np, &read_bit, 1, gpreg);
363 *read_data |= ((read_bit & 0x01) << (7 - x));
364 }
365
366 S24C16_write_ack(np, ack_data, gpreg, gpcntl);
367}
368
369#if SYM_CONF_NVRAM_WRITE_SUPPORT
370/*
371 * Write 'len' bytes starting at 'offset'.
372 */
373static int sym_write_S24C16_nvram(struct sym_device *np, int offset,
374 u_char *data, int len)
375{
376 u_char gpcntl, gpreg;
377 u_char old_gpcntl, old_gpreg;
378 u_char ack_data;
379 int x;
380
381 /* save current state of GPCNTL and GPREG */
382 old_gpreg = INB(np, nc_gpreg);
383 old_gpcntl = INB(np, nc_gpcntl);
384 gpcntl = old_gpcntl & 0x1c;
385
386 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
387 OUTB(np, nc_gpreg, old_gpreg);
388 OUTB(np, nc_gpcntl, gpcntl);
389
390 /* this is to set NVRAM into a known state with GPIO0/1 both low */
391 gpreg = old_gpreg;
392 S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
393 S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
394
395 /* now set NVRAM inactive with GPIO0/1 both high */
396 S24C16_stop(np, &gpreg);
397
398 /* NVRAM has to be written in segments of 16 bytes */
399 for (x = 0; x < len ; x += 16) {
400 do {
401 S24C16_start(np, &gpreg);
402 S24C16_write_byte(np, &ack_data,
403 0xa0 | (((offset+x) >> 7) & 0x0e),
404 &gpreg, &gpcntl);
405 } while (ack_data & 0x01);
406
407 S24C16_write_byte(np, &ack_data, (offset+x) & 0xff,
408 &gpreg, &gpcntl);
409
410 for (y = 0; y < 16; y++)
411 S24C16_write_byte(np, &ack_data, data[x+y],
412 &gpreg, &gpcntl);
413 S24C16_stop(np, &gpreg);
414 }
415
416 /* return GPIO0/1 to original states after having accessed NVRAM */
417 OUTB(np, nc_gpcntl, old_gpcntl);
418 OUTB(np, nc_gpreg, old_gpreg);
419
420 return 0;
421}
422#endif /* SYM_CONF_NVRAM_WRITE_SUPPORT */
423
424/*
425 * Read 'len' bytes starting at 'offset'.
426 */
427static int sym_read_S24C16_nvram(struct sym_device *np, int offset, u_char *data, int len)
428{
429 u_char gpcntl, gpreg;
430 u_char old_gpcntl, old_gpreg;
431 u_char ack_data;
432 int retv = 1;
433 int x;
434
435 /* save current state of GPCNTL and GPREG */
436 old_gpreg = INB(np, nc_gpreg);
437 old_gpcntl = INB(np, nc_gpcntl);
438 gpcntl = old_gpcntl & 0x1c;
439
440 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
441 OUTB(np, nc_gpreg, old_gpreg);
442 OUTB(np, nc_gpcntl, gpcntl);
443
444 /* this is to set NVRAM into a known state with GPIO0/1 both low */
445 gpreg = old_gpreg;
446 S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
447 S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
448
449 /* now set NVRAM inactive with GPIO0/1 both high */
450 S24C16_stop(np, &gpreg);
451
452 /* activate NVRAM */
453 S24C16_start(np, &gpreg);
454
455 /* write device code and random address MSB */
456 S24C16_write_byte(np, &ack_data,
457 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
458 if (ack_data & 0x01)
459 goto out;
460
461 /* write random address LSB */
462 S24C16_write_byte(np, &ack_data,
463 offset & 0xff, &gpreg, &gpcntl);
464 if (ack_data & 0x01)
465 goto out;
466
467 /* regenerate START state to set up for reading */
468 S24C16_start(np, &gpreg);
469
470 /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
471 S24C16_write_byte(np, &ack_data,
472 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
473 if (ack_data & 0x01)
474 goto out;
475
476 /* now set up GPIO0 for inputting data */
477 gpcntl |= 0x01;
478 OUTB(np, nc_gpcntl, gpcntl);
479
480 /* input all requested data - only part of total NVRAM */
481 for (x = 0; x < len; x++)
482 S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
483
484 /* finally put NVRAM back in inactive mode */
485 gpcntl &= 0xfe;
486 OUTB(np, nc_gpcntl, gpcntl);
487 S24C16_stop(np, &gpreg);
488 retv = 0;
489out:
490 /* return GPIO0/1 to original states after having accessed NVRAM */
491 OUTB(np, nc_gpcntl, old_gpcntl);
492 OUTB(np, nc_gpreg, old_gpreg);
493
494 return retv;
495}
496
497#undef SET_BIT
498#undef CLR_BIT
499#undef SET_CLK
500#undef CLR_CLK
501
502/*
503 * Try reading Symbios NVRAM.
504 * Return 0 if OK.
505 */
506static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
507{
508 static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
509 u_char *data = (u_char *) nvram;
510 int len = sizeof(*nvram);
511 u_short csum;
512 int x;
513
514 /* probe the 24c16 and read the SYMBIOS 24c16 area */
515 if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
516 return 1;
517
518 /* check valid NVRAM signature, verify byte count and checksum */
519 if (nvram->type != 0 ||
520 memcmp(nvram->trailer, Symbios_trailer, 6) ||
521 nvram->byte_count != len - 12)
522 return 1;
523
524 /* verify checksum */
525 for (x = 6, csum = 0; x < len - 6; x++)
526 csum += data[x];
527 if (csum != nvram->checksum)
528 return 1;
529
530 return 0;
531}
532
533/*
534 * 93C46 EEPROM reading.
535 *
536 * GPOI0 - data in
537 * GPIO1 - data out
538 * GPIO2 - clock
539 * GPIO4 - chip select
540 *
541 * Used by Tekram.
542 */
543
544/*
545 * Pulse clock bit in GPIO0
546 */
547static void T93C46_Clk(struct sym_device *np, u_char *gpreg)
548{
549 OUTB(np, nc_gpreg, *gpreg | 0x04);
550 udelay(2);
551 OUTB(np, nc_gpreg, *gpreg);
552}
553
554/*
555 * Read bit from NVRAM
556 */
557static void T93C46_Read_Bit(struct sym_device *np, u_char *read_bit, u_char *gpreg)
558{
559 udelay(2);
560 T93C46_Clk(np, gpreg);
561 *read_bit = INB(np, nc_gpreg);
562}
563
564/*
565 * Write bit to GPIO0
566 */
567static void T93C46_Write_Bit(struct sym_device *np, u_char write_bit, u_char *gpreg)
568{
569 if (write_bit & 0x01)
570 *gpreg |= 0x02;
571 else
572 *gpreg &= 0xfd;
573
574 *gpreg |= 0x10;
575
576 OUTB(np, nc_gpreg, *gpreg);
577 udelay(2);
578
579 T93C46_Clk(np, gpreg);
580}
581
582/*
583 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
584 */
585static void T93C46_Stop(struct sym_device *np, u_char *gpreg)
586{
587 *gpreg &= 0xef;
588 OUTB(np, nc_gpreg, *gpreg);
589 udelay(2);
590
591 T93C46_Clk(np, gpreg);
592}
593
594/*
595 * Send read command and address to NVRAM
596 */
597static void T93C46_Send_Command(struct sym_device *np, u_short write_data,
598 u_char *read_bit, u_char *gpreg)
599{
600 int x;
601
602 /* send 9 bits, start bit (1), command (2), address (6) */
603 for (x = 0; x < 9; x++)
604 T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
605
606 *read_bit = INB(np, nc_gpreg);
607}
608
609/*
610 * READ 2 bytes from the NVRAM
611 */
612static void T93C46_Read_Word(struct sym_device *np,
613 unsigned short *nvram_data, unsigned char *gpreg)
614{
615 int x;
616 u_char read_bit;
617
618 *nvram_data = 0;
619 for (x = 0; x < 16; x++) {
620 T93C46_Read_Bit(np, &read_bit, gpreg);
621
622 if (read_bit & 0x01)
623 *nvram_data |= (0x01 << (15 - x));
624 else
625 *nvram_data &= ~(0x01 << (15 - x));
626 }
627}
628
629/*
630 * Read Tekram NvRAM data.
631 */
632static int T93C46_Read_Data(struct sym_device *np, unsigned short *data,
633 int len, unsigned char *gpreg)
634{
635 int x;
636
637 for (x = 0; x < len; x++) {
638 unsigned char read_bit;
639 /* output read command and address */
640 T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
641 if (read_bit & 0x01)
642 return 1; /* Bad */
643 T93C46_Read_Word(np, &data[x], gpreg);
644 T93C46_Stop(np, gpreg);
645 }
646
647 return 0;
648}
649
650/*
651 * Try reading 93C46 Tekram NVRAM.
652 */
653static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram)
654{
655 u_char gpcntl, gpreg;
656 u_char old_gpcntl, old_gpreg;
657 int retv = 1;
658
659 /* save current state of GPCNTL and GPREG */
660 old_gpreg = INB(np, nc_gpreg);
661 old_gpcntl = INB(np, nc_gpcntl);
662
663 /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
664 1/2/4 out */
665 gpreg = old_gpreg & 0xe9;
666 OUTB(np, nc_gpreg, gpreg);
667 gpcntl = (old_gpcntl & 0xe9) | 0x09;
668 OUTB(np, nc_gpcntl, gpcntl);
669
670 /* input all of NVRAM, 64 words */
671 retv = T93C46_Read_Data(np, (u_short *) nvram,
672 sizeof(*nvram) / sizeof(short), &gpreg);
673
674 /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
675 OUTB(np, nc_gpcntl, old_gpcntl);
676 OUTB(np, nc_gpreg, old_gpreg);
677
678 return retv;
679}
680
681/*
682 * Try reading Tekram NVRAM.
683 * Return 0 if OK.
684 */
685static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram)
686{
687 u_char *data = (u_char *) nvram;
688 int len = sizeof(*nvram);
689 u_short csum;
690 int x;
691
692 switch (np->device_id) {
693 case PCI_DEVICE_ID_NCR_53C885:
694 case PCI_DEVICE_ID_NCR_53C895:
695 case PCI_DEVICE_ID_NCR_53C896:
696 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
697 data, len);
698 break;
699 case PCI_DEVICE_ID_NCR_53C875:
700 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
701 data, len);
702 if (!x)
703 break;
704 default:
705 x = sym_read_T93C46_nvram(np, nvram);
706 break;
707 }
708 if (x)
709 return 1;
710
711 /* verify checksum */
712 for (x = 0, csum = 0; x < len - 1; x += 2)
713 csum += data[x] + (data[x+1] << 8);
714 if (csum != 0x1234)
715 return 1;
716
717 return 0;
718}
719
720#ifdef CONFIG_PARISC
721/*
722 * Host firmware (PDC) keeps a table for altering SCSI capabilities.
723 * Many newer machines export one channel of 53c896 chip as SE, 50-pin HD.
724 * Also used for Multi-initiator SCSI clusters to set the SCSI Initiator ID.
725 */
726static int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *pdc)
727{
728 struct hardware_path hwpath;
729 get_pci_node_path(np->pdev, &hwpath);
730 if (!pdc_get_initiator(&hwpath, pdc))
731 return 0;
732
733 return SYM_PARISC_PDC;
734}
735#else
736static int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *x)
737{
738 return 0;
739}
740#endif
741
742/*
743 * Try reading Symbios or Tekram NVRAM
744 */
745int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp)
746{
747 if (!sym_read_Symbios_nvram(np, &nvp->data.Symbios)) {
748 nvp->type = SYM_SYMBIOS_NVRAM;
749 sym_display_Symbios_nvram(np, &nvp->data.Symbios);
750 } else if (!sym_read_Tekram_nvram(np, &nvp->data.Tekram)) {
751 nvp->type = SYM_TEKRAM_NVRAM;
752 sym_display_Tekram_nvram(np, &nvp->data.Tekram);
753 } else {
754 nvp->type = sym_read_parisc_pdc(np, &nvp->data.parisc);
755 }
756 return nvp->type;
757}
758
759char *sym_nvram_type(struct sym_nvram *nvp)
760{
761 switch (nvp->type) {
762 case SYM_SYMBIOS_NVRAM:
763 return "Symbios NVRAM";
764 case SYM_TEKRAM_NVRAM:
765 return "Tekram NVRAM";
766 case SYM_PARISC_PDC:
767 return "PA-RISC Firmware";
768 default:
769 return "No NVRAM";
770 }
771}
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.h b/drivers/scsi/sym53c8xx_2/sym_nvram.h
new file mode 100644
index 000000000000..1538bede5277
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.h
@@ -0,0 +1,214 @@
1/*
2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3 * of PCI-SCSI IO processors.
4 *
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
6 *
7 * This driver is derived from the Linux sym53c8xx driver.
8 * Copyright (C) 1998-2000 Gerard Roudier
9 *
10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11 * a port of the FreeBSD ncr driver to Linux-1.2.13.
12 *
13 * The original ncr driver has been written for 386bsd and FreeBSD by
14 * Wolfgang Stanglmeier <wolf@cologne.de>
15 * Stefan Esser <se@mi.Uni-Koeln.de>
16 * Copyright (C) 1994 Wolfgang Stanglmeier
17 *
18 * Other major contributions:
19 *
20 * NVRAM detection and reading.
21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22 *
23 *-----------------------------------------------------------------------------
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */
39
40#ifndef SYM_NVRAM_H
41#define SYM_NVRAM_H
42
43#include "sym53c8xx.h"
44
45/*
46 * Symbios NVRAM data format
47 */
48#define SYMBIOS_NVRAM_SIZE 368
49#define SYMBIOS_NVRAM_ADDRESS 0x100
50
51struct Symbios_nvram {
52/* Header 6 bytes */
53 u_short type; /* 0x0000 */
54 u_short byte_count; /* excluding header/trailer */
55 u_short checksum;
56
57/* Controller set up 20 bytes */
58 u_char v_major; /* 0x00 */
59 u_char v_minor; /* 0x30 */
60 u32 boot_crc;
61 u_short flags;
62#define SYMBIOS_SCAM_ENABLE (1)
63#define SYMBIOS_PARITY_ENABLE (1<<1)
64#define SYMBIOS_VERBOSE_MSGS (1<<2)
65#define SYMBIOS_CHS_MAPPING (1<<3)
66#define SYMBIOS_NO_NVRAM (1<<3) /* ??? */
67 u_short flags1;
68#define SYMBIOS_SCAN_HI_LO (1)
69 u_short term_state;
70#define SYMBIOS_TERM_CANT_PROGRAM (0)
71#define SYMBIOS_TERM_ENABLED (1)
72#define SYMBIOS_TERM_DISABLED (2)
73 u_short rmvbl_flags;
74#define SYMBIOS_RMVBL_NO_SUPPORT (0)
75#define SYMBIOS_RMVBL_BOOT_DEVICE (1)
76#define SYMBIOS_RMVBL_MEDIA_INSTALLED (2)
77 u_char host_id;
78 u_char num_hba; /* 0x04 */
79 u_char num_devices; /* 0x10 */
80 u_char max_scam_devices; /* 0x04 */
81 u_char num_valid_scam_devices; /* 0x00 */
82 u_char flags2;
83#define SYMBIOS_AVOID_BUS_RESET (1<<2)
84
85/* Boot order 14 bytes * 4 */
86 struct Symbios_host{
87 u_short type; /* 4:8xx / 0:nok */
88 u_short device_id; /* PCI device id */
89 u_short vendor_id; /* PCI vendor id */
90 u_char bus_nr; /* PCI bus number */
91 u_char device_fn; /* PCI device/function number << 3*/
92 u_short word8;
93 u_short flags;
94#define SYMBIOS_INIT_SCAN_AT_BOOT (1)
95 u_short io_port; /* PCI io_port address */
96 } host[4];
97
98/* Targets 8 bytes * 16 */
99 struct Symbios_target {
100 u_char flags;
101#define SYMBIOS_DISCONNECT_ENABLE (1)
102#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1)
103#define SYMBIOS_SCAN_LUNS (1<<2)
104#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3)
105 u_char rsvd;
106 u_char bus_width; /* 0x08/0x10 */
107 u_char sync_offset;
108 u_short sync_period; /* 4*period factor */
109 u_short timeout;
110 } target[16];
111/* Scam table 8 bytes * 4 */
112 struct Symbios_scam {
113 u_short id;
114 u_short method;
115#define SYMBIOS_SCAM_DEFAULT_METHOD (0)
116#define SYMBIOS_SCAM_DONT_ASSIGN (1)
117#define SYMBIOS_SCAM_SET_SPECIFIC_ID (2)
118#define SYMBIOS_SCAM_USE_ORDER_GIVEN (3)
119 u_short status;
120#define SYMBIOS_SCAM_UNKNOWN (0)
121#define SYMBIOS_SCAM_DEVICE_NOT_FOUND (1)
122#define SYMBIOS_SCAM_ID_NOT_SET (2)
123#define SYMBIOS_SCAM_ID_VALID (3)
124 u_char target_id;
125 u_char rsvd;
126 } scam[4];
127
128 u_char spare_devices[15*8];
129 u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
130};
131typedef struct Symbios_nvram Symbios_nvram;
132typedef struct Symbios_host Symbios_host;
133typedef struct Symbios_target Symbios_target;
134typedef struct Symbios_scam Symbios_scam;
135
136/*
137 * Tekram NvRAM data format.
138 */
139#define TEKRAM_NVRAM_SIZE 64
140#define TEKRAM_93C46_NVRAM_ADDRESS 0
141#define TEKRAM_24C16_NVRAM_ADDRESS 0x40
142
143struct Tekram_nvram {
144 struct Tekram_target {
145 u_char flags;
146#define TEKRAM_PARITY_CHECK (1)
147#define TEKRAM_SYNC_NEGO (1<<1)
148#define TEKRAM_DISCONNECT_ENABLE (1<<2)
149#define TEKRAM_START_CMD (1<<3)
150#define TEKRAM_TAGGED_COMMANDS (1<<4)
151#define TEKRAM_WIDE_NEGO (1<<5)
152 u_char sync_index;
153 u_short word2;
154 } target[16];
155 u_char host_id;
156 u_char flags;
157#define TEKRAM_MORE_THAN_2_DRIVES (1)
158#define TEKRAM_DRIVES_SUP_1GB (1<<1)
159#define TEKRAM_RESET_ON_POWER_ON (1<<2)
160#define TEKRAM_ACTIVE_NEGATION (1<<3)
161#define TEKRAM_IMMEDIATE_SEEK (1<<4)
162#define TEKRAM_SCAN_LUNS (1<<5)
163#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; */
164 /* 1: boot device; 2:all */
165 u_char boot_delay_index;
166 u_char max_tags_index;
167 u_short flags1;
168#define TEKRAM_F2_F6_ENABLED (1)
169 u_short spare[29];
170};
171typedef struct Tekram_nvram Tekram_nvram;
172typedef struct Tekram_target Tekram_target;
173
174#ifndef CONFIG_PARISC
175struct pdc_initiator { int dummy; };
176#endif
177
178/*
179 * Union of supported NVRAM formats.
180 */
181struct sym_nvram {
182 int type;
183#define SYM_SYMBIOS_NVRAM (1)
184#define SYM_TEKRAM_NVRAM (2)
185#define SYM_PARISC_PDC (3)
186#if SYM_CONF_NVRAM_SUPPORT
187 union {
188 Symbios_nvram Symbios;
189 Tekram_nvram Tekram;
190 struct pdc_initiator parisc;
191 } data;
192#endif
193};
194
195#if SYM_CONF_NVRAM_SUPPORT
196void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram);
197void sym_nvram_setup_target (struct sym_hcb *np, int target, struct sym_nvram *nvp);
198int sym_read_nvram (struct sym_device *np, struct sym_nvram *nvp);
199char *sym_nvram_type(struct sym_nvram *nvp);
200#else
201static inline void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { }
202static inline void sym_nvram_setup_target(struct sym_hcb *np, struct sym_nvram *nvram) { }
203static inline int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp)
204{
205 nvp->type = 0;
206 return 0;
207}
208static inline char *sym_nvram_type(struct sym_nvram *nvp)
209{
210 return "No NVRAM";
211}
212#endif
213
214#endif /* SYM_NVRAM_H */