diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/sym53c8xx_2/sym_hipd.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/scsi/sym53c8xx_2/sym_hipd.c')
-rw-r--r-- | drivers/scsi/sym53c8xx_2/sym_hipd.c | 5865 |
1 files changed, 5865 insertions, 0 deletions
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c new file mode 100644 index 000000000000..50a176b3888d --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -0,0 +1,5865 @@ | |||
1 | /* | ||
2 | * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family | ||
3 | * of PCI-SCSI IO processors. | ||
4 | * | ||
5 | * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> | ||
6 | * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> | ||
7 | * | ||
8 | * This driver is derived from the Linux sym53c8xx driver. | ||
9 | * Copyright (C) 1998-2000 Gerard Roudier | ||
10 | * | ||
11 | * The sym53c8xx driver is derived from the ncr53c8xx driver that had been | ||
12 | * a port of the FreeBSD ncr driver to Linux-1.2.13. | ||
13 | * | ||
14 | * The original ncr driver has been written for 386bsd and FreeBSD by | ||
15 | * Wolfgang Stanglmeier <wolf@cologne.de> | ||
16 | * Stefan Esser <se@mi.Uni-Koeln.de> | ||
17 | * Copyright (C) 1994 Wolfgang Stanglmeier | ||
18 | * | ||
19 | * Other major contributions: | ||
20 | * | ||
21 | * NVRAM detection and reading. | ||
22 | * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> | ||
23 | * | ||
24 | *----------------------------------------------------------------------------- | ||
25 | * | ||
26 | * This program is free software; you can redistribute it and/or modify | ||
27 | * it under the terms of the GNU General Public License as published by | ||
28 | * the Free Software Foundation; either version 2 of the License, or | ||
29 | * (at your option) any later version. | ||
30 | * | ||
31 | * This program is distributed in the hope that it will be useful, | ||
32 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
33 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
34 | * GNU General Public License for more details. | ||
35 | * | ||
36 | * You should have received a copy of the GNU General Public License | ||
37 | * along with this program; if not, write to the Free Software | ||
38 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
39 | */ | ||
40 | #include "sym_glue.h" | ||
41 | #include "sym_nvram.h" | ||
42 | |||
43 | #if 0 | ||
44 | #define SYM_DEBUG_GENERIC_SUPPORT | ||
45 | #endif | ||
46 | |||
47 | /* | ||
48 | * Needed function prototypes. | ||
49 | */ | ||
50 | static void sym_int_ma (struct sym_hcb *np); | ||
51 | static void sym_int_sir (struct sym_hcb *np); | ||
52 | static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); | ||
53 | static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); | ||
54 | static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); | ||
55 | static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp); | ||
56 | static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp); | ||
57 | static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp); | ||
58 | |||
59 | /* | ||
60 | * Print a buffer in hexadecimal format with a ".\n" at end. | ||
61 | */ | ||
62 | static void sym_printl_hex(u_char *p, int n) | ||
63 | { | ||
64 | while (n-- > 0) | ||
65 | printf (" %x", *p++); | ||
66 | printf (".\n"); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Print out the content of a SCSI message. | ||
71 | */ | ||
72 | static int sym_show_msg (u_char * msg) | ||
73 | { | ||
74 | u_char i; | ||
75 | printf ("%x",*msg); | ||
76 | if (*msg==M_EXTENDED) { | ||
77 | for (i=1;i<8;i++) { | ||
78 | if (i-1>msg[1]) break; | ||
79 | printf ("-%x",msg[i]); | ||
80 | } | ||
81 | return (i+1); | ||
82 | } else if ((*msg & 0xf0) == 0x20) { | ||
83 | printf ("-%x",msg[1]); | ||
84 | return (2); | ||
85 | } | ||
86 | return (1); | ||
87 | } | ||
88 | |||
89 | static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) | ||
90 | { | ||
91 | sym_print_addr(cp->cmd, "%s: ", label); | ||
92 | |||
93 | sym_show_msg(msg); | ||
94 | printf(".\n"); | ||
95 | } | ||
96 | |||
97 | static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) | ||
98 | { | ||
99 | struct sym_tcb *tp = &np->target[target]; | ||
100 | dev_info(&tp->sdev->sdev_target->dev, "%s: ", label); | ||
101 | |||
102 | sym_show_msg(msg); | ||
103 | printf(".\n"); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Print something that tells about extended errors. | ||
108 | */ | ||
109 | void sym_print_xerr(struct scsi_cmnd *cmd, int x_status) | ||
110 | { | ||
111 | if (x_status & XE_PARITY_ERR) { | ||
112 | sym_print_addr(cmd, "unrecovered SCSI parity error.\n"); | ||
113 | } | ||
114 | if (x_status & XE_EXTRA_DATA) { | ||
115 | sym_print_addr(cmd, "extraneous data discarded.\n"); | ||
116 | } | ||
117 | if (x_status & XE_BAD_PHASE) { | ||
118 | sym_print_addr(cmd, "illegal scsi phase (4/5).\n"); | ||
119 | } | ||
120 | if (x_status & XE_SODL_UNRUN) { | ||
121 | sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n"); | ||
122 | } | ||
123 | if (x_status & XE_SWIDE_OVRUN) { | ||
124 | sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n"); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Return a string for SCSI BUS mode. | ||
130 | */ | ||
131 | static char *sym_scsi_bus_mode(int mode) | ||
132 | { | ||
133 | switch(mode) { | ||
134 | case SMODE_HVD: return "HVD"; | ||
135 | case SMODE_SE: return "SE"; | ||
136 | case SMODE_LVD: return "LVD"; | ||
137 | } | ||
138 | return "??"; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Soft reset the chip. | ||
143 | * | ||
144 | * Raising SRST when the chip is running may cause | ||
145 | * problems on dual function chips (see below). | ||
146 | * On the other hand, LVD devices need some delay | ||
147 | * to settle and report actual BUS mode in STEST4. | ||
148 | */ | ||
149 | static void sym_chip_reset (struct sym_hcb *np) | ||
150 | { | ||
151 | OUTB(np, nc_istat, SRST); | ||
152 | udelay(10); | ||
153 | OUTB(np, nc_istat, 0); | ||
154 | udelay(2000); /* For BUS MODE to settle */ | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Really soft reset the chip.:) | ||
159 | * | ||
160 | * Some 896 and 876 chip revisions may hang-up if we set | ||
161 | * the SRST (soft reset) bit at the wrong time when SCRIPTS | ||
162 | * are running. | ||
163 | * So, we need to abort the current operation prior to | ||
164 | * soft resetting the chip. | ||
165 | */ | ||
166 | static void sym_soft_reset (struct sym_hcb *np) | ||
167 | { | ||
168 | u_char istat = 0; | ||
169 | int i; | ||
170 | |||
171 | if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN)) | ||
172 | goto do_chip_reset; | ||
173 | |||
174 | OUTB(np, nc_istat, CABRT); | ||
175 | for (i = 100000 ; i ; --i) { | ||
176 | istat = INB(np, nc_istat); | ||
177 | if (istat & SIP) { | ||
178 | INW(np, nc_sist); | ||
179 | } | ||
180 | else if (istat & DIP) { | ||
181 | if (INB(np, nc_dstat) & ABRT) | ||
182 | break; | ||
183 | } | ||
184 | udelay(5); | ||
185 | } | ||
186 | OUTB(np, nc_istat, 0); | ||
187 | if (!i) | ||
188 | printf("%s: unable to abort current chip operation, " | ||
189 | "ISTAT=0x%02x.\n", sym_name(np), istat); | ||
190 | do_chip_reset: | ||
191 | sym_chip_reset(np); | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * Start reset process. | ||
196 | * | ||
197 | * The interrupt handler will reinitialize the chip. | ||
198 | */ | ||
199 | static void sym_start_reset(struct sym_hcb *np) | ||
200 | { | ||
201 | sym_reset_scsi_bus(np, 1); | ||
202 | } | ||
203 | |||
204 | int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int) | ||
205 | { | ||
206 | u32 term; | ||
207 | int retv = 0; | ||
208 | |||
209 | sym_soft_reset(np); /* Soft reset the chip */ | ||
210 | if (enab_int) | ||
211 | OUTW(np, nc_sien, RST); | ||
212 | /* | ||
213 | * Enable Tolerant, reset IRQD if present and | ||
214 | * properly set IRQ mode, prior to resetting the bus. | ||
215 | */ | ||
216 | OUTB(np, nc_stest3, TE); | ||
217 | OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM)); | ||
218 | OUTB(np, nc_scntl1, CRST); | ||
219 | udelay(200); | ||
220 | |||
221 | if (!SYM_SETUP_SCSI_BUS_CHECK) | ||
222 | goto out; | ||
223 | /* | ||
224 | * Check for no terminators or SCSI bus shorts to ground. | ||
225 | * Read SCSI data bus, data parity bits and control signals. | ||
226 | * We are expecting RESET to be TRUE and other signals to be | ||
227 | * FALSE. | ||
228 | */ | ||
229 | term = INB(np, nc_sstat0); | ||
230 | term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ | ||
231 | term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */ | ||
232 | ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */ | ||
233 | ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */ | ||
234 | INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */ | ||
235 | |||
236 | if (!np->maxwide) | ||
237 | term &= 0x3ffff; | ||
238 | |||
239 | if (term != (2<<7)) { | ||
240 | printf("%s: suspicious SCSI data while resetting the BUS.\n", | ||
241 | sym_name(np)); | ||
242 | printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " | ||
243 | "0x%lx, expecting 0x%lx\n", | ||
244 | sym_name(np), | ||
245 | (np->features & FE_WIDE) ? "dp1,d15-8," : "", | ||
246 | (u_long)term, (u_long)(2<<7)); | ||
247 | if (SYM_SETUP_SCSI_BUS_CHECK == 1) | ||
248 | retv = 1; | ||
249 | } | ||
250 | out: | ||
251 | OUTB(np, nc_scntl1, 0); | ||
252 | return retv; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Select SCSI clock frequency | ||
257 | */ | ||
258 | static void sym_selectclock(struct sym_hcb *np, u_char scntl3) | ||
259 | { | ||
260 | /* | ||
261 | * If multiplier not present or not selected, leave here. | ||
262 | */ | ||
263 | if (np->multiplier <= 1) { | ||
264 | OUTB(np, nc_scntl3, scntl3); | ||
265 | return; | ||
266 | } | ||
267 | |||
268 | if (sym_verbose >= 2) | ||
269 | printf ("%s: enabling clock multiplier\n", sym_name(np)); | ||
270 | |||
271 | OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */ | ||
272 | /* | ||
273 | * Wait for the LCKFRQ bit to be set if supported by the chip. | ||
274 | * Otherwise wait 50 micro-seconds (at least). | ||
275 | */ | ||
276 | if (np->features & FE_LCKFRQ) { | ||
277 | int i = 20; | ||
278 | while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0) | ||
279 | udelay(20); | ||
280 | if (!i) | ||
281 | printf("%s: the chip cannot lock the frequency\n", | ||
282 | sym_name(np)); | ||
283 | } else | ||
284 | udelay((50+10)); | ||
285 | OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */ | ||
286 | OUTB(np, nc_scntl3, scntl3); | ||
287 | OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ | ||
288 | OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */ | ||
289 | } | ||
290 | |||
291 | |||
292 | /* | ||
293 | * Determine the chip's clock frequency. | ||
294 | * | ||
295 | * This is essential for the negotiation of the synchronous | ||
296 | * transfer rate. | ||
297 | * | ||
298 | * Note: we have to return the correct value. | ||
299 | * THERE IS NO SAFE DEFAULT VALUE. | ||
300 | * | ||
301 | * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. | ||
302 | * 53C860 and 53C875 rev. 1 support fast20 transfers but | ||
303 | * do not have a clock doubler and so are provided with a | ||
304 | * 80 MHz clock. All other fast20 boards incorporate a doubler | ||
305 | * and so should be delivered with a 40 MHz clock. | ||
306 | * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base | ||
307 | * clock and provide a clock quadrupler (160 Mhz). | ||
308 | */ | ||
309 | |||
310 | /* | ||
311 | * calculate SCSI clock frequency (in KHz) | ||
312 | */ | ||
313 | static unsigned getfreq (struct sym_hcb *np, int gen) | ||
314 | { | ||
315 | unsigned int ms = 0; | ||
316 | unsigned int f; | ||
317 | |||
318 | /* | ||
319 | * Measure GEN timer delay in order | ||
320 | * to calculate SCSI clock frequency | ||
321 | * | ||
322 | * This code will never execute too | ||
323 | * many loop iterations (if DELAY is | ||
324 | * reasonably correct). It could get | ||
325 | * too low a delay (too high a freq.) | ||
326 | * if the CPU is slow executing the | ||
327 | * loop for some reason (an NMI, for | ||
328 | * example). For this reason we will | ||
329 | * if multiple measurements are to be | ||
330 | * performed trust the higher delay | ||
331 | * (lower frequency returned). | ||
332 | */ | ||
333 | OUTW(np, nc_sien, 0); /* mask all scsi interrupts */ | ||
334 | INW(np, nc_sist); /* clear pending scsi interrupt */ | ||
335 | OUTB(np, nc_dien, 0); /* mask all dma interrupts */ | ||
336 | INW(np, nc_sist); /* another one, just to be sure :) */ | ||
337 | /* | ||
338 | * The C1010-33 core does not report GEN in SIST, | ||
339 | * if this interrupt is masked in SIEN. | ||
340 | * I don't know yet if the C1010-66 behaves the same way. | ||
341 | */ | ||
342 | if (np->features & FE_C10) { | ||
343 | OUTW(np, nc_sien, GEN); | ||
344 | OUTB(np, nc_istat1, SIRQD); | ||
345 | } | ||
346 | OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */ | ||
347 | OUTB(np, nc_stime1, 0); /* disable general purpose timer */ | ||
348 | OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ | ||
349 | while (!(INW(np, nc_sist) & GEN) && ms++ < 100000) | ||
350 | udelay(1000/4); /* count in 1/4 of ms */ | ||
351 | OUTB(np, nc_stime1, 0); /* disable general purpose timer */ | ||
352 | /* | ||
353 | * Undo C1010-33 specific settings. | ||
354 | */ | ||
355 | if (np->features & FE_C10) { | ||
356 | OUTW(np, nc_sien, 0); | ||
357 | OUTB(np, nc_istat1, 0); | ||
358 | } | ||
359 | /* | ||
360 | * set prescaler to divide by whatever 0 means | ||
361 | * 0 ought to choose divide by 2, but appears | ||
362 | * to set divide by 3.5 mode in my 53c810 ... | ||
363 | */ | ||
364 | OUTB(np, nc_scntl3, 0); | ||
365 | |||
366 | /* | ||
367 | * adjust for prescaler, and convert into KHz | ||
368 | */ | ||
369 | f = ms ? ((1 << gen) * (4340*4)) / ms : 0; | ||
370 | |||
371 | /* | ||
372 | * The C1010-33 result is biased by a factor | ||
373 | * of 2/3 compared to earlier chips. | ||
374 | */ | ||
375 | if (np->features & FE_C10) | ||
376 | f = (f * 2) / 3; | ||
377 | |||
378 | if (sym_verbose >= 2) | ||
379 | printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", | ||
380 | sym_name(np), gen, ms/4, f); | ||
381 | |||
382 | return f; | ||
383 | } | ||
384 | |||
385 | static unsigned sym_getfreq (struct sym_hcb *np) | ||
386 | { | ||
387 | u_int f1, f2; | ||
388 | int gen = 8; | ||
389 | |||
390 | getfreq (np, gen); /* throw away first result */ | ||
391 | f1 = getfreq (np, gen); | ||
392 | f2 = getfreq (np, gen); | ||
393 | if (f1 > f2) f1 = f2; /* trust lower result */ | ||
394 | return f1; | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * Get/probe chip SCSI clock frequency | ||
399 | */ | ||
400 | static void sym_getclock (struct sym_hcb *np, int mult) | ||
401 | { | ||
402 | unsigned char scntl3 = np->sv_scntl3; | ||
403 | unsigned char stest1 = np->sv_stest1; | ||
404 | unsigned f1; | ||
405 | |||
406 | np->multiplier = 1; | ||
407 | f1 = 40000; | ||
408 | /* | ||
409 | * True with 875/895/896/895A with clock multiplier selected | ||
410 | */ | ||
411 | if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { | ||
412 | if (sym_verbose >= 2) | ||
413 | printf ("%s: clock multiplier found\n", sym_name(np)); | ||
414 | np->multiplier = mult; | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * If multiplier not found or scntl3 not 7,5,3, | ||
419 | * reset chip and get frequency from general purpose timer. | ||
420 | * Otherwise trust scntl3 BIOS setting. | ||
421 | */ | ||
422 | if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { | ||
423 | OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */ | ||
424 | f1 = sym_getfreq (np); | ||
425 | |||
426 | if (sym_verbose) | ||
427 | printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); | ||
428 | |||
429 | if (f1 < 45000) f1 = 40000; | ||
430 | else if (f1 < 55000) f1 = 50000; | ||
431 | else f1 = 80000; | ||
432 | |||
433 | if (f1 < 80000 && mult > 1) { | ||
434 | if (sym_verbose >= 2) | ||
435 | printf ("%s: clock multiplier assumed\n", | ||
436 | sym_name(np)); | ||
437 | np->multiplier = mult; | ||
438 | } | ||
439 | } else { | ||
440 | if ((scntl3 & 7) == 3) f1 = 40000; | ||
441 | else if ((scntl3 & 7) == 5) f1 = 80000; | ||
442 | else f1 = 160000; | ||
443 | |||
444 | f1 /= np->multiplier; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * Compute controller synchronous parameters. | ||
449 | */ | ||
450 | f1 *= np->multiplier; | ||
451 | np->clock_khz = f1; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Get/probe PCI clock frequency | ||
456 | */ | ||
457 | static int sym_getpciclock (struct sym_hcb *np) | ||
458 | { | ||
459 | int f = 0; | ||
460 | |||
461 | /* | ||
462 | * For now, we only need to know about the actual | ||
463 | * PCI BUS clock frequency for C1010-66 chips. | ||
464 | */ | ||
465 | #if 1 | ||
466 | if (np->features & FE_66MHZ) { | ||
467 | #else | ||
468 | if (1) { | ||
469 | #endif | ||
470 | OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ | ||
471 | f = sym_getfreq(np); | ||
472 | OUTB(np, nc_stest1, 0); | ||
473 | } | ||
474 | np->pciclk_khz = f; | ||
475 | |||
476 | return f; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * SYMBIOS chip clock divisor table. | ||
481 | * | ||
482 | * Divisors are multiplied by 10,000,000 in order to make | ||
483 | * calculations more simple. | ||
484 | */ | ||
485 | #define _5M 5000000 | ||
486 | static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; | ||
487 | |||
488 | /* | ||
489 | * Get clock factor and sync divisor for a given | ||
490 | * synchronous factor period. | ||
491 | */ | ||
492 | static int | ||
493 | sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) | ||
494 | { | ||
495 | u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ | ||
496 | int div = np->clock_divn; /* Number of divisors supported */ | ||
497 | u32 fak; /* Sync factor in sxfer */ | ||
498 | u32 per; /* Period in tenths of ns */ | ||
499 | u32 kpc; /* (per * clk) */ | ||
500 | int ret; | ||
501 | |||
502 | /* | ||
503 | * Compute the synchronous period in tenths of nano-seconds | ||
504 | */ | ||
505 | if (dt && sfac <= 9) per = 125; | ||
506 | else if (sfac <= 10) per = 250; | ||
507 | else if (sfac == 11) per = 303; | ||
508 | else if (sfac == 12) per = 500; | ||
509 | else per = 40 * sfac; | ||
510 | ret = per; | ||
511 | |||
512 | kpc = per * clk; | ||
513 | if (dt) | ||
514 | kpc <<= 1; | ||
515 | |||
516 | /* | ||
517 | * For earliest C10 revision 0, we cannot use extra | ||
518 | * clocks for the setting of the SCSI clocking. | ||
519 | * Note that this limits the lowest sync data transfer | ||
520 | * to 5 Mega-transfers per second and may result in | ||
521 | * using higher clock divisors. | ||
522 | */ | ||
523 | #if 1 | ||
524 | if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { | ||
525 | /* | ||
526 | * Look for the lowest clock divisor that allows an | ||
527 | * output speed not faster than the period. | ||
528 | */ | ||
529 | while (div > 0) { | ||
530 | --div; | ||
531 | if (kpc > (div_10M[div] << 2)) { | ||
532 | ++div; | ||
533 | break; | ||
534 | } | ||
535 | } | ||
536 | fak = 0; /* No extra clocks */ | ||
537 | if (div == np->clock_divn) { /* Are we too fast ? */ | ||
538 | ret = -1; | ||
539 | } | ||
540 | *divp = div; | ||
541 | *fakp = fak; | ||
542 | return ret; | ||
543 | } | ||
544 | #endif | ||
545 | |||
546 | /* | ||
547 | * Look for the greatest clock divisor that allows an | ||
548 | * input speed faster than the period. | ||
549 | */ | ||
550 | while (div-- > 0) | ||
551 | if (kpc >= (div_10M[div] << 2)) break; | ||
552 | |||
553 | /* | ||
554 | * Calculate the lowest clock factor that allows an output | ||
555 | * speed not faster than the period, and the max output speed. | ||
556 | * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. | ||
557 | * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. | ||
558 | */ | ||
559 | if (dt) { | ||
560 | fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; | ||
561 | /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ | ||
562 | } else { | ||
563 | fak = (kpc - 1) / div_10M[div] + 1 - 4; | ||
564 | /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Check against our hardware limits, or bugs :). | ||
569 | */ | ||
570 | if (fak > 2) { | ||
571 | fak = 2; | ||
572 | ret = -1; | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * Compute and return sync parameters. | ||
577 | */ | ||
578 | *divp = div; | ||
579 | *fakp = fak; | ||
580 | |||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, | ||
586 | * 128 transfers. All chips support at least 16 transfers | ||
587 | * bursts. The 825A, 875 and 895 chips support bursts of up | ||
588 | * to 128 transfers and the 895A and 896 support bursts of up | ||
589 | * to 64 transfers. All other chips support up to 16 | ||
590 | * transfers bursts. | ||
591 | * | ||
592 | * For PCI 32 bit data transfers each transfer is a DWORD. | ||
593 | * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. | ||
594 | * | ||
595 | * We use log base 2 (burst length) as internal code, with | ||
596 | * value 0 meaning "burst disabled". | ||
597 | */ | ||
598 | |||
599 | /* | ||
600 | * Burst length from burst code. | ||
601 | */ | ||
602 | #define burst_length(bc) (!(bc))? 0 : 1 << (bc) | ||
603 | |||
604 | /* | ||
605 | * Burst code from io register bits. | ||
606 | */ | ||
607 | #define burst_code(dmode, ctest4, ctest5) \ | ||
608 | (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 | ||
609 | |||
610 | /* | ||
611 | * Set initial io register bits from burst code. | ||
612 | */ | ||
613 | static __inline void sym_init_burst(struct sym_hcb *np, u_char bc) | ||
614 | { | ||
615 | np->rv_ctest4 &= ~0x80; | ||
616 | np->rv_dmode &= ~(0x3 << 6); | ||
617 | np->rv_ctest5 &= ~0x4; | ||
618 | |||
619 | if (!bc) { | ||
620 | np->rv_ctest4 |= 0x80; | ||
621 | } | ||
622 | else { | ||
623 | --bc; | ||
624 | np->rv_dmode |= ((bc & 0x3) << 6); | ||
625 | np->rv_ctest5 |= (bc & 0x4); | ||
626 | } | ||
627 | } | ||
628 | |||
629 | |||
630 | /* | ||
631 | * Print out the list of targets that have some flag disabled by user. | ||
632 | */ | ||
633 | static void sym_print_targets_flag(struct sym_hcb *np, int mask, char *msg) | ||
634 | { | ||
635 | int cnt; | ||
636 | int i; | ||
637 | |||
638 | for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { | ||
639 | if (i == np->myaddr) | ||
640 | continue; | ||
641 | if (np->target[i].usrflags & mask) { | ||
642 | if (!cnt++) | ||
643 | printf("%s: %s disabled for targets", | ||
644 | sym_name(np), msg); | ||
645 | printf(" %d", i); | ||
646 | } | ||
647 | } | ||
648 | if (cnt) | ||
649 | printf(".\n"); | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * Save initial settings of some IO registers. | ||
654 | * Assumed to have been set by BIOS. | ||
655 | * We cannot reset the chip prior to reading the | ||
656 | * IO registers, since informations will be lost. | ||
657 | * Since the SCRIPTS processor may be running, this | ||
658 | * is not safe on paper, but it seems to work quite | ||
659 | * well. :) | ||
660 | */ | ||
661 | static void sym_save_initial_setting (struct sym_hcb *np) | ||
662 | { | ||
663 | np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a; | ||
664 | np->sv_scntl3 = INB(np, nc_scntl3) & 0x07; | ||
665 | np->sv_dmode = INB(np, nc_dmode) & 0xce; | ||
666 | np->sv_dcntl = INB(np, nc_dcntl) & 0xa8; | ||
667 | np->sv_ctest3 = INB(np, nc_ctest3) & 0x01; | ||
668 | np->sv_ctest4 = INB(np, nc_ctest4) & 0x80; | ||
669 | np->sv_gpcntl = INB(np, nc_gpcntl); | ||
670 | np->sv_stest1 = INB(np, nc_stest1); | ||
671 | np->sv_stest2 = INB(np, nc_stest2) & 0x20; | ||
672 | np->sv_stest4 = INB(np, nc_stest4); | ||
673 | if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ | ||
674 | np->sv_scntl4 = INB(np, nc_scntl4); | ||
675 | np->sv_ctest5 = INB(np, nc_ctest5) & 0x04; | ||
676 | } | ||
677 | else | ||
678 | np->sv_ctest5 = INB(np, nc_ctest5) & 0x24; | ||
679 | } | ||
680 | |||
681 | /* | ||
682 | * Prepare io register values used by sym_start_up() | ||
683 | * according to selected and supported features. | ||
684 | */ | ||
685 | static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) | ||
686 | { | ||
687 | u_char burst_max; | ||
688 | u32 period; | ||
689 | int i; | ||
690 | |||
691 | /* | ||
692 | * Wide ? | ||
693 | */ | ||
694 | np->maxwide = (np->features & FE_WIDE)? 1 : 0; | ||
695 | |||
696 | /* | ||
697 | * Guess the frequency of the chip's clock. | ||
698 | */ | ||
699 | if (np->features & (FE_ULTRA3 | FE_ULTRA2)) | ||
700 | np->clock_khz = 160000; | ||
701 | else if (np->features & FE_ULTRA) | ||
702 | np->clock_khz = 80000; | ||
703 | else | ||
704 | np->clock_khz = 40000; | ||
705 | |||
706 | /* | ||
707 | * Get the clock multiplier factor. | ||
708 | */ | ||
709 | if (np->features & FE_QUAD) | ||
710 | np->multiplier = 4; | ||
711 | else if (np->features & FE_DBLR) | ||
712 | np->multiplier = 2; | ||
713 | else | ||
714 | np->multiplier = 1; | ||
715 | |||
716 | /* | ||
717 | * Measure SCSI clock frequency for chips | ||
718 | * it may vary from assumed one. | ||
719 | */ | ||
720 | if (np->features & FE_VARCLK) | ||
721 | sym_getclock(np, np->multiplier); | ||
722 | |||
723 | /* | ||
724 | * Divisor to be used for async (timer pre-scaler). | ||
725 | */ | ||
726 | i = np->clock_divn - 1; | ||
727 | while (--i >= 0) { | ||
728 | if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { | ||
729 | ++i; | ||
730 | break; | ||
731 | } | ||
732 | } | ||
733 | np->rv_scntl3 = i+1; | ||
734 | |||
735 | /* | ||
736 | * The C1010 uses hardwired divisors for async. | ||
737 | * So, we just throw away, the async. divisor.:-) | ||
738 | */ | ||
739 | if (np->features & FE_C10) | ||
740 | np->rv_scntl3 = 0; | ||
741 | |||
742 | /* | ||
743 | * Minimum synchronous period factor supported by the chip. | ||
744 | * Btw, 'period' is in tenths of nanoseconds. | ||
745 | */ | ||
746 | period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; | ||
747 | |||
748 | if (period <= 250) np->minsync = 10; | ||
749 | else if (period <= 303) np->minsync = 11; | ||
750 | else if (period <= 500) np->minsync = 12; | ||
751 | else np->minsync = (period + 40 - 1) / 40; | ||
752 | |||
753 | /* | ||
754 | * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). | ||
755 | */ | ||
756 | if (np->minsync < 25 && | ||
757 | !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) | ||
758 | np->minsync = 25; | ||
759 | else if (np->minsync < 12 && | ||
760 | !(np->features & (FE_ULTRA2|FE_ULTRA3))) | ||
761 | np->minsync = 12; | ||
762 | |||
763 | /* | ||
764 | * Maximum synchronous period factor supported by the chip. | ||
765 | */ | ||
766 | period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); | ||
767 | np->maxsync = period > 2540 ? 254 : period / 10; | ||
768 | |||
769 | /* | ||
770 | * If chip is a C1010, guess the sync limits in DT mode. | ||
771 | */ | ||
772 | if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { | ||
773 | if (np->clock_khz == 160000) { | ||
774 | np->minsync_dt = 9; | ||
775 | np->maxsync_dt = 50; | ||
776 | np->maxoffs_dt = nvram->type ? 62 : 31; | ||
777 | } | ||
778 | } | ||
779 | |||
780 | /* | ||
781 | * 64 bit addressing (895A/896/1010) ? | ||
782 | */ | ||
783 | if (np->features & FE_DAC) { | ||
784 | #if SYM_CONF_DMA_ADDRESSING_MODE == 0 | ||
785 | np->rv_ccntl1 |= (DDAC); | ||
786 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 1 | ||
787 | if (!np->use_dac) | ||
788 | np->rv_ccntl1 |= (DDAC); | ||
789 | else | ||
790 | np->rv_ccntl1 |= (XTIMOD | EXTIBMV); | ||
791 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 | ||
792 | if (!np->use_dac) | ||
793 | np->rv_ccntl1 |= (DDAC); | ||
794 | else | ||
795 | np->rv_ccntl1 |= (0 | EXTIBMV); | ||
796 | #endif | ||
797 | } | ||
798 | |||
799 | /* | ||
800 | * Phase mismatch handled by SCRIPTS (895A/896/1010) ? | ||
801 | */ | ||
802 | if (np->features & FE_NOPM) | ||
803 | np->rv_ccntl0 |= (ENPMJ); | ||
804 | |||
805 | /* | ||
806 | * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed. | ||
807 | * In dual channel mode, contention occurs if internal cycles | ||
808 | * are used. Disable internal cycles. | ||
809 | */ | ||
810 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 && | ||
811 | np->revision_id < 0x1) | ||
812 | np->rv_ccntl0 |= DILS; | ||
813 | |||
814 | /* | ||
815 | * Select burst length (dwords) | ||
816 | */ | ||
817 | burst_max = SYM_SETUP_BURST_ORDER; | ||
818 | if (burst_max == 255) | ||
819 | burst_max = burst_code(np->sv_dmode, np->sv_ctest4, | ||
820 | np->sv_ctest5); | ||
821 | if (burst_max > 7) | ||
822 | burst_max = 7; | ||
823 | if (burst_max > np->maxburst) | ||
824 | burst_max = np->maxburst; | ||
825 | |||
826 | /* | ||
827 | * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. | ||
828 | * This chip and the 860 Rev 1 may wrongly use PCI cache line | ||
829 | * based transactions on LOAD/STORE instructions. So we have | ||
830 | * to prevent these chips from using such PCI transactions in | ||
831 | * this driver. The generic ncr driver that does not use | ||
832 | * LOAD/STORE instructions does not need this work-around. | ||
833 | */ | ||
834 | if ((np->device_id == PCI_DEVICE_ID_NCR_53C810 && | ||
835 | np->revision_id >= 0x10 && np->revision_id <= 0x11) || | ||
836 | (np->device_id == PCI_DEVICE_ID_NCR_53C860 && | ||
837 | np->revision_id <= 0x1)) | ||
838 | np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); | ||
839 | |||
840 | /* | ||
841 | * Select all supported special features. | ||
842 | * If we are using on-board RAM for scripts, prefetch (PFEN) | ||
843 | * does not help, but burst op fetch (BOF) does. | ||
844 | * Disabling PFEN makes sure BOF will be used. | ||
845 | */ | ||
846 | if (np->features & FE_ERL) | ||
847 | np->rv_dmode |= ERL; /* Enable Read Line */ | ||
848 | if (np->features & FE_BOF) | ||
849 | np->rv_dmode |= BOF; /* Burst Opcode Fetch */ | ||
850 | if (np->features & FE_ERMP) | ||
851 | np->rv_dmode |= ERMP; /* Enable Read Multiple */ | ||
852 | #if 1 | ||
853 | if ((np->features & FE_PFEN) && !np->ram_ba) | ||
854 | #else | ||
855 | if (np->features & FE_PFEN) | ||
856 | #endif | ||
857 | np->rv_dcntl |= PFEN; /* Prefetch Enable */ | ||
858 | if (np->features & FE_CLSE) | ||
859 | np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ | ||
860 | if (np->features & FE_WRIE) | ||
861 | np->rv_ctest3 |= WRIE; /* Write and Invalidate */ | ||
862 | if (np->features & FE_DFS) | ||
863 | np->rv_ctest5 |= DFS; /* Dma Fifo Size */ | ||
864 | |||
865 | /* | ||
866 | * Select some other | ||
867 | */ | ||
868 | np->rv_ctest4 |= MPEE; /* Master parity checking */ | ||
869 | np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ | ||
870 | |||
871 | /* | ||
872 | * Get parity checking, host ID and verbose mode from NVRAM | ||
873 | */ | ||
874 | np->myaddr = 255; | ||
875 | sym_nvram_setup_host(shost, np, nvram); | ||
876 | |||
877 | /* | ||
878 | * Get SCSI addr of host adapter (set by bios?). | ||
879 | */ | ||
880 | if (np->myaddr == 255) { | ||
881 | np->myaddr = INB(np, nc_scid) & 0x07; | ||
882 | if (!np->myaddr) | ||
883 | np->myaddr = SYM_SETUP_HOST_ID; | ||
884 | } | ||
885 | |||
886 | /* | ||
887 | * Prepare initial io register bits for burst length | ||
888 | */ | ||
889 | sym_init_burst(np, burst_max); | ||
890 | |||
891 | /* | ||
892 | * Set SCSI BUS mode. | ||
893 | * - LVD capable chips (895/895A/896/1010) report the | ||
894 | * current BUS mode through the STEST4 IO register. | ||
895 | * - For previous generation chips (825/825A/875), | ||
896 | * user has to tell us how to check against HVD, | ||
897 | * since a 100% safe algorithm is not possible. | ||
898 | */ | ||
899 | np->scsi_mode = SMODE_SE; | ||
900 | if (np->features & (FE_ULTRA2|FE_ULTRA3)) | ||
901 | np->scsi_mode = (np->sv_stest4 & SMODE); | ||
902 | else if (np->features & FE_DIFF) { | ||
903 | if (SYM_SETUP_SCSI_DIFF == 1) { | ||
904 | if (np->sv_scntl3) { | ||
905 | if (np->sv_stest2 & 0x20) | ||
906 | np->scsi_mode = SMODE_HVD; | ||
907 | } | ||
908 | else if (nvram->type == SYM_SYMBIOS_NVRAM) { | ||
909 | if (!(INB(np, nc_gpreg) & 0x08)) | ||
910 | np->scsi_mode = SMODE_HVD; | ||
911 | } | ||
912 | } | ||
913 | else if (SYM_SETUP_SCSI_DIFF == 2) | ||
914 | np->scsi_mode = SMODE_HVD; | ||
915 | } | ||
916 | if (np->scsi_mode == SMODE_HVD) | ||
917 | np->rv_stest2 |= 0x20; | ||
918 | |||
919 | /* | ||
920 | * Set LED support from SCRIPTS. | ||
921 | * Ignore this feature for boards known to use a | ||
922 | * specific GPIO wiring and for the 895A, 896 | ||
923 | * and 1010 that drive the LED directly. | ||
924 | */ | ||
925 | if ((SYM_SETUP_SCSI_LED || | ||
926 | (nvram->type == SYM_SYMBIOS_NVRAM || | ||
927 | (nvram->type == SYM_TEKRAM_NVRAM && | ||
928 | np->device_id == PCI_DEVICE_ID_NCR_53C895))) && | ||
929 | !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) | ||
930 | np->features |= FE_LED0; | ||
931 | |||
932 | /* | ||
933 | * Set irq mode. | ||
934 | */ | ||
935 | switch(SYM_SETUP_IRQ_MODE & 3) { | ||
936 | case 2: | ||
937 | np->rv_dcntl |= IRQM; | ||
938 | break; | ||
939 | case 1: | ||
940 | np->rv_dcntl |= (np->sv_dcntl & IRQM); | ||
941 | break; | ||
942 | default: | ||
943 | break; | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * Configure targets according to driver setup. | ||
948 | * If NVRAM present get targets setup from NVRAM. | ||
949 | */ | ||
950 | for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { | ||
951 | struct sym_tcb *tp = &np->target[i]; | ||
952 | |||
953 | tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); | ||
954 | tp->usrtags = SYM_SETUP_MAX_TAG; | ||
955 | |||
956 | sym_nvram_setup_target(np, i, nvram); | ||
957 | |||
958 | if (!tp->usrtags) | ||
959 | tp->usrflags &= ~SYM_TAGS_ENABLED; | ||
960 | } | ||
961 | |||
962 | /* | ||
963 | * Let user know about the settings. | ||
964 | */ | ||
965 | printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np), | ||
966 | sym_nvram_type(nvram), np->myaddr, | ||
967 | (np->features & FE_ULTRA3) ? 80 : | ||
968 | (np->features & FE_ULTRA2) ? 40 : | ||
969 | (np->features & FE_ULTRA) ? 20 : 10, | ||
970 | sym_scsi_bus_mode(np->scsi_mode), | ||
971 | (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); | ||
972 | /* | ||
973 | * Tell him more on demand. | ||
974 | */ | ||
975 | if (sym_verbose) { | ||
976 | printf("%s: %s IRQ line driver%s\n", | ||
977 | sym_name(np), | ||
978 | np->rv_dcntl & IRQM ? "totem pole" : "open drain", | ||
979 | np->ram_ba ? ", using on-chip SRAM" : ""); | ||
980 | printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); | ||
981 | if (np->features & FE_NOPM) | ||
982 | printf("%s: handling phase mismatch from SCRIPTS.\n", | ||
983 | sym_name(np)); | ||
984 | } | ||
985 | /* | ||
986 | * And still more. | ||
987 | */ | ||
988 | if (sym_verbose >= 2) { | ||
989 | printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " | ||
990 | "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", | ||
991 | sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, | ||
992 | np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); | ||
993 | |||
994 | printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " | ||
995 | "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", | ||
996 | sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, | ||
997 | np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); | ||
998 | } | ||
999 | /* | ||
1000 | * Let user be aware of targets that have some disable flags set. | ||
1001 | */ | ||
1002 | sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); | ||
1003 | if (sym_verbose) | ||
1004 | sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, | ||
1005 | "SCAN FOR LUNS"); | ||
1006 | |||
1007 | return 0; | ||
1008 | } | ||
1009 | |||
1010 | /* | ||
1011 | * Test the pci bus snoop logic :-( | ||
1012 | * | ||
1013 | * Has to be called with interrupts disabled. | ||
1014 | */ | ||
1015 | #ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED | ||
1016 | static int sym_regtest (struct sym_hcb *np) | ||
1017 | { | ||
1018 | register volatile u32 data; | ||
1019 | /* | ||
1020 | * chip registers may NOT be cached. | ||
1021 | * write 0xffffffff to a read only register area, | ||
1022 | * and try to read it back. | ||
1023 | */ | ||
1024 | data = 0xffffffff; | ||
1025 | OUTL(np, nc_dstat, data); | ||
1026 | data = INL(np, nc_dstat); | ||
1027 | #if 1 | ||
1028 | if (data == 0xffffffff) { | ||
1029 | #else | ||
1030 | if ((data & 0xe2f0fffd) != 0x02000080) { | ||
1031 | #endif | ||
1032 | printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", | ||
1033 | (unsigned) data); | ||
1034 | return (0x10); | ||
1035 | } | ||
1036 | return (0); | ||
1037 | } | ||
1038 | #endif | ||
1039 | |||
1040 | static int sym_snooptest (struct sym_hcb *np) | ||
1041 | { | ||
1042 | u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; | ||
1043 | int i, err=0; | ||
1044 | #ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED | ||
1045 | err |= sym_regtest (np); | ||
1046 | if (err) return (err); | ||
1047 | #endif | ||
1048 | restart_test: | ||
1049 | /* | ||
1050 | * Enable Master Parity Checking as we intend | ||
1051 | * to enable it for normal operations. | ||
1052 | */ | ||
1053 | OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE)); | ||
1054 | /* | ||
1055 | * init | ||
1056 | */ | ||
1057 | pc = SCRIPTZ_BA(np, snooptest); | ||
1058 | host_wr = 1; | ||
1059 | sym_wr = 2; | ||
1060 | /* | ||
1061 | * Set memory and register. | ||
1062 | */ | ||
1063 | np->scratch = cpu_to_scr(host_wr); | ||
1064 | OUTL(np, nc_temp, sym_wr); | ||
1065 | /* | ||
1066 | * Start script (exchange values) | ||
1067 | */ | ||
1068 | OUTL(np, nc_dsa, np->hcb_ba); | ||
1069 | OUTL_DSP(np, pc); | ||
1070 | /* | ||
1071 | * Wait 'til done (with timeout) | ||
1072 | */ | ||
1073 | for (i=0; i<SYM_SNOOP_TIMEOUT; i++) | ||
1074 | if (INB(np, nc_istat) & (INTF|SIP|DIP)) | ||
1075 | break; | ||
1076 | if (i>=SYM_SNOOP_TIMEOUT) { | ||
1077 | printf ("CACHE TEST FAILED: timeout.\n"); | ||
1078 | return (0x20); | ||
1079 | } | ||
1080 | /* | ||
1081 | * Check for fatal DMA errors. | ||
1082 | */ | ||
1083 | dstat = INB(np, nc_dstat); | ||
1084 | #if 1 /* Band aiding for broken hardwares that fail PCI parity */ | ||
1085 | if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { | ||
1086 | printf ("%s: PCI DATA PARITY ERROR DETECTED - " | ||
1087 | "DISABLING MASTER DATA PARITY CHECKING.\n", | ||
1088 | sym_name(np)); | ||
1089 | np->rv_ctest4 &= ~MPEE; | ||
1090 | goto restart_test; | ||
1091 | } | ||
1092 | #endif | ||
1093 | if (dstat & (MDPE|BF|IID)) { | ||
1094 | printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); | ||
1095 | return (0x80); | ||
1096 | } | ||
1097 | /* | ||
1098 | * Save termination position. | ||
1099 | */ | ||
1100 | pc = INL(np, nc_dsp); | ||
1101 | /* | ||
1102 | * Read memory and register. | ||
1103 | */ | ||
1104 | host_rd = scr_to_cpu(np->scratch); | ||
1105 | sym_rd = INL(np, nc_scratcha); | ||
1106 | sym_bk = INL(np, nc_temp); | ||
1107 | /* | ||
1108 | * Check termination position. | ||
1109 | */ | ||
1110 | if (pc != SCRIPTZ_BA(np, snoopend)+8) { | ||
1111 | printf ("CACHE TEST FAILED: script execution failed.\n"); | ||
1112 | printf ("start=%08lx, pc=%08lx, end=%08lx\n", | ||
1113 | (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc, | ||
1114 | (u_long) SCRIPTZ_BA(np, snoopend) +8); | ||
1115 | return (0x40); | ||
1116 | } | ||
1117 | /* | ||
1118 | * Show results. | ||
1119 | */ | ||
1120 | if (host_wr != sym_rd) { | ||
1121 | printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", | ||
1122 | (int) host_wr, (int) sym_rd); | ||
1123 | err |= 1; | ||
1124 | } | ||
1125 | if (host_rd != sym_wr) { | ||
1126 | printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", | ||
1127 | (int) sym_wr, (int) host_rd); | ||
1128 | err |= 2; | ||
1129 | } | ||
1130 | if (sym_bk != sym_wr) { | ||
1131 | printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", | ||
1132 | (int) sym_wr, (int) sym_bk); | ||
1133 | err |= 4; | ||
1134 | } | ||
1135 | |||
1136 | return (err); | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * log message for real hard errors | ||
1141 | * | ||
1142 | * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc). | ||
1143 | * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. | ||
1144 | * | ||
1145 | * exception register: | ||
1146 | * ds: dstat | ||
1147 | * si: sist | ||
1148 | * | ||
1149 | * SCSI bus lines: | ||
1150 | * so: control lines as driven by chip. | ||
1151 | * si: control lines as seen by chip. | ||
1152 | * sd: scsi data lines as seen by chip. | ||
1153 | * | ||
1154 | * wide/fastmode: | ||
1155 | * sx: sxfer (see the manual) | ||
1156 | * s3: scntl3 (see the manual) | ||
1157 | * s4: scntl4 (see the manual) | ||
1158 | * | ||
1159 | * current script command: | ||
1160 | * dsp: script address (relative to start of script). | ||
1161 | * dbc: first word of script command. | ||
1162 | * | ||
1163 | * First 24 register of the chip: | ||
1164 | * r0..rf | ||
1165 | */ | ||
1166 | static void sym_log_hard_error(struct sym_hcb *np, u_short sist, u_char dstat) | ||
1167 | { | ||
1168 | u32 dsp; | ||
1169 | int script_ofs; | ||
1170 | int script_size; | ||
1171 | char *script_name; | ||
1172 | u_char *script_base; | ||
1173 | int i; | ||
1174 | |||
1175 | dsp = INL(np, nc_dsp); | ||
1176 | |||
1177 | if (dsp > np->scripta_ba && | ||
1178 | dsp <= np->scripta_ba + np->scripta_sz) { | ||
1179 | script_ofs = dsp - np->scripta_ba; | ||
1180 | script_size = np->scripta_sz; | ||
1181 | script_base = (u_char *) np->scripta0; | ||
1182 | script_name = "scripta"; | ||
1183 | } | ||
1184 | else if (np->scriptb_ba < dsp && | ||
1185 | dsp <= np->scriptb_ba + np->scriptb_sz) { | ||
1186 | script_ofs = dsp - np->scriptb_ba; | ||
1187 | script_size = np->scriptb_sz; | ||
1188 | script_base = (u_char *) np->scriptb0; | ||
1189 | script_name = "scriptb"; | ||
1190 | } else { | ||
1191 | script_ofs = dsp; | ||
1192 | script_size = 0; | ||
1193 | script_base = NULL; | ||
1194 | script_name = "mem"; | ||
1195 | } | ||
1196 | |||
1197 | printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n", | ||
1198 | sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist, | ||
1199 | (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl), | ||
1200 | (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer), | ||
1201 | (unsigned)INB(np, nc_scntl3), | ||
1202 | (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0, | ||
1203 | script_name, script_ofs, (unsigned)INL(np, nc_dbc)); | ||
1204 | |||
1205 | if (((script_ofs & 3) == 0) && | ||
1206 | (unsigned)script_ofs < script_size) { | ||
1207 | printf ("%s: script cmd = %08x\n", sym_name(np), | ||
1208 | scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); | ||
1209 | } | ||
1210 | |||
1211 | printf ("%s: regdump:", sym_name(np)); | ||
1212 | for (i=0; i<24;i++) | ||
1213 | printf (" %02x", (unsigned)INB_OFF(np, i)); | ||
1214 | printf (".\n"); | ||
1215 | |||
1216 | /* | ||
1217 | * PCI BUS error. | ||
1218 | */ | ||
1219 | if (dstat & (MDPE|BF)) | ||
1220 | sym_log_bus_error(np); | ||
1221 | } | ||
1222 | |||
1223 | static struct sym_chip sym_dev_table[] = { | ||
1224 | {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64, | ||
1225 | FE_ERL} | ||
1226 | , | ||
1227 | #ifdef SYM_DEBUG_GENERIC_SUPPORT | ||
1228 | {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, | ||
1229 | FE_BOF} | ||
1230 | , | ||
1231 | #else | ||
1232 | {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, | ||
1233 | FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} | ||
1234 | , | ||
1235 | #endif | ||
1236 | {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64, | ||
1237 | FE_BOF|FE_ERL} | ||
1238 | , | ||
1239 | {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64, | ||
1240 | FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} | ||
1241 | , | ||
1242 | {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2, | ||
1243 | FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} | ||
1244 | , | ||
1245 | {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1, | ||
1246 | FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} | ||
1247 | , | ||
1248 | {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2, | ||
1249 | FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1250 | FE_RAM|FE_DIFF|FE_VARCLK} | ||
1251 | , | ||
1252 | {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2, | ||
1253 | FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1254 | FE_RAM|FE_DIFF|FE_VARCLK} | ||
1255 | , | ||
1256 | {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2, | ||
1257 | FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1258 | FE_RAM|FE_DIFF|FE_VARCLK} | ||
1259 | , | ||
1260 | {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2, | ||
1261 | FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1262 | FE_RAM|FE_DIFF|FE_VARCLK} | ||
1263 | , | ||
1264 | #ifdef SYM_DEBUG_GENERIC_SUPPORT | ||
1265 | {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, | ||
1266 | FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| | ||
1267 | FE_RAM|FE_LCKFRQ} | ||
1268 | , | ||
1269 | #else | ||
1270 | {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, | ||
1271 | FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1272 | FE_RAM|FE_LCKFRQ} | ||
1273 | , | ||
1274 | #endif | ||
1275 | {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4, | ||
1276 | FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1277 | FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} | ||
1278 | , | ||
1279 | {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4, | ||
1280 | FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1281 | FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} | ||
1282 | , | ||
1283 | {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4, | ||
1284 | FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1285 | FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} | ||
1286 | , | ||
1287 | {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8, | ||
1288 | FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| | ||
1289 | FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| | ||
1290 | FE_C10} | ||
1291 | , | ||
1292 | {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8, | ||
1293 | FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| | ||
1294 | FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| | ||
1295 | FE_C10|FE_U3EN} | ||
1296 | , | ||
1297 | {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8, | ||
1298 | FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| | ||
1299 | FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| | ||
1300 | FE_C10|FE_U3EN} | ||
1301 | , | ||
1302 | {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4, | ||
1303 | FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| | ||
1304 | FE_RAM|FE_IO256|FE_LEDC} | ||
1305 | }; | ||
1306 | |||
1307 | #define sym_num_devs \ | ||
1308 | (sizeof(sym_dev_table) / sizeof(sym_dev_table[0])) | ||
1309 | |||
1310 | /* | ||
1311 | * Look up the chip table. | ||
1312 | * | ||
1313 | * Return a pointer to the chip entry if found, | ||
1314 | * zero otherwise. | ||
1315 | */ | ||
1316 | struct sym_chip * | ||
1317 | sym_lookup_chip_table (u_short device_id, u_char revision) | ||
1318 | { | ||
1319 | struct sym_chip *chip; | ||
1320 | int i; | ||
1321 | |||
1322 | for (i = 0; i < sym_num_devs; i++) { | ||
1323 | chip = &sym_dev_table[i]; | ||
1324 | if (device_id != chip->device_id) | ||
1325 | continue; | ||
1326 | if (revision > chip->revision_id) | ||
1327 | continue; | ||
1328 | return chip; | ||
1329 | } | ||
1330 | |||
1331 | return NULL; | ||
1332 | } | ||
1333 | |||
1334 | #if SYM_CONF_DMA_ADDRESSING_MODE == 2 | ||
1335 | /* | ||
1336 | * Lookup the 64 bit DMA segments map. | ||
1337 | * This is only used if the direct mapping | ||
1338 | * has been unsuccessful. | ||
1339 | */ | ||
1340 | int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) | ||
1341 | { | ||
1342 | int i; | ||
1343 | |||
1344 | if (!np->use_dac) | ||
1345 | goto weird; | ||
1346 | |||
1347 | /* Look up existing mappings */ | ||
1348 | for (i = SYM_DMAP_SIZE-1; i > 0; i--) { | ||
1349 | if (h == np->dmap_bah[i]) | ||
1350 | return i; | ||
1351 | } | ||
1352 | /* If direct mapping is free, get it */ | ||
1353 | if (!np->dmap_bah[s]) | ||
1354 | goto new; | ||
1355 | /* Collision -> lookup free mappings */ | ||
1356 | for (s = SYM_DMAP_SIZE-1; s > 0; s--) { | ||
1357 | if (!np->dmap_bah[s]) | ||
1358 | goto new; | ||
1359 | } | ||
1360 | weird: | ||
1361 | panic("sym: ran out of 64 bit DMA segment registers"); | ||
1362 | return -1; | ||
1363 | new: | ||
1364 | np->dmap_bah[s] = h; | ||
1365 | np->dmap_dirty = 1; | ||
1366 | return s; | ||
1367 | } | ||
1368 | |||
1369 | /* | ||
1370 | * Update IO registers scratch C..R so they will be | ||
1371 | * in sync. with queued CCB expectations. | ||
1372 | */ | ||
1373 | static void sym_update_dmap_regs(struct sym_hcb *np) | ||
1374 | { | ||
1375 | int o, i; | ||
1376 | |||
1377 | if (!np->dmap_dirty) | ||
1378 | return; | ||
1379 | o = offsetof(struct sym_reg, nc_scrx[0]); | ||
1380 | for (i = 0; i < SYM_DMAP_SIZE; i++) { | ||
1381 | OUTL_OFF(np, o, np->dmap_bah[i]); | ||
1382 | o += 4; | ||
1383 | } | ||
1384 | np->dmap_dirty = 0; | ||
1385 | } | ||
1386 | #endif | ||
1387 | |||
1388 | /* Enforce all the fiddly SPI rules and the chip limitations */ | ||
1389 | static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, | ||
1390 | struct sym_trans *goal) | ||
1391 | { | ||
1392 | if (!spi_support_wide(starget)) | ||
1393 | goal->width = 0; | ||
1394 | |||
1395 | if (!spi_support_sync(starget)) { | ||
1396 | goal->iu = 0; | ||
1397 | goal->dt = 0; | ||
1398 | goal->qas = 0; | ||
1399 | goal->period = 0; | ||
1400 | goal->offset = 0; | ||
1401 | return; | ||
1402 | } | ||
1403 | |||
1404 | if (spi_support_dt(starget)) { | ||
1405 | if (spi_support_dt_only(starget)) | ||
1406 | goal->dt = 1; | ||
1407 | |||
1408 | if (goal->offset == 0) | ||
1409 | goal->dt = 0; | ||
1410 | } else { | ||
1411 | goal->dt = 0; | ||
1412 | } | ||
1413 | |||
1414 | /* Some targets fail to properly negotiate DT in SE mode */ | ||
1415 | if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN)) | ||
1416 | goal->dt = 0; | ||
1417 | |||
1418 | if (goal->dt) { | ||
1419 | /* all DT transfers must be wide */ | ||
1420 | goal->width = 1; | ||
1421 | if (goal->offset > np->maxoffs_dt) | ||
1422 | goal->offset = np->maxoffs_dt; | ||
1423 | if (goal->period < np->minsync_dt) | ||
1424 | goal->period = np->minsync_dt; | ||
1425 | if (goal->period > np->maxsync_dt) | ||
1426 | goal->period = np->maxsync_dt; | ||
1427 | } else { | ||
1428 | goal->iu = goal->qas = 0; | ||
1429 | if (goal->offset > np->maxoffs) | ||
1430 | goal->offset = np->maxoffs; | ||
1431 | if (goal->period < np->minsync) | ||
1432 | goal->period = np->minsync; | ||
1433 | if (goal->period > np->maxsync) | ||
1434 | goal->period = np->maxsync; | ||
1435 | } | ||
1436 | } | ||
1437 | |||
1438 | /* | ||
1439 | * Prepare the next negotiation message if needed. | ||
1440 | * | ||
1441 | * Fill in the part of message buffer that contains the | ||
1442 | * negotiation and the nego_status field of the CCB. | ||
1443 | * Returns the size of the message in bytes. | ||
1444 | */ | ||
1445 | static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) | ||
1446 | { | ||
1447 | struct sym_tcb *tp = &np->target[cp->target]; | ||
1448 | struct scsi_target *starget = tp->sdev->sdev_target; | ||
1449 | struct sym_trans *goal = &tp->tgoal; | ||
1450 | int msglen = 0; | ||
1451 | int nego; | ||
1452 | |||
1453 | sym_check_goals(np, starget, goal); | ||
1454 | |||
1455 | /* | ||
1456 | * Many devices implement PPR in a buggy way, so only use it if we | ||
1457 | * really want to. | ||
1458 | */ | ||
1459 | if (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)) { | ||
1460 | nego = NS_PPR; | ||
1461 | } else if (spi_width(starget) != goal->width) { | ||
1462 | nego = NS_WIDE; | ||
1463 | } else if (spi_period(starget) != goal->period || | ||
1464 | spi_offset(starget) != goal->offset) { | ||
1465 | nego = NS_SYNC; | ||
1466 | } else { | ||
1467 | goal->check_nego = 0; | ||
1468 | nego = 0; | ||
1469 | } | ||
1470 | |||
1471 | switch (nego) { | ||
1472 | case NS_SYNC: | ||
1473 | msgptr[msglen++] = M_EXTENDED; | ||
1474 | msgptr[msglen++] = 3; | ||
1475 | msgptr[msglen++] = M_X_SYNC_REQ; | ||
1476 | msgptr[msglen++] = goal->period; | ||
1477 | msgptr[msglen++] = goal->offset; | ||
1478 | break; | ||
1479 | case NS_WIDE: | ||
1480 | msgptr[msglen++] = M_EXTENDED; | ||
1481 | msgptr[msglen++] = 2; | ||
1482 | msgptr[msglen++] = M_X_WIDE_REQ; | ||
1483 | msgptr[msglen++] = goal->width; | ||
1484 | break; | ||
1485 | case NS_PPR: | ||
1486 | msgptr[msglen++] = M_EXTENDED; | ||
1487 | msgptr[msglen++] = 6; | ||
1488 | msgptr[msglen++] = M_X_PPR_REQ; | ||
1489 | msgptr[msglen++] = goal->period; | ||
1490 | msgptr[msglen++] = 0; | ||
1491 | msgptr[msglen++] = goal->offset; | ||
1492 | msgptr[msglen++] = goal->width; | ||
1493 | msgptr[msglen++] = (goal->iu ? PPR_OPT_IU : 0) | | ||
1494 | (goal->dt ? PPR_OPT_DT : 0) | | ||
1495 | (goal->qas ? PPR_OPT_QAS : 0); | ||
1496 | break; | ||
1497 | } | ||
1498 | |||
1499 | cp->nego_status = nego; | ||
1500 | |||
1501 | if (nego) { | ||
1502 | tp->nego_cp = cp; /* Keep track a nego will be performed */ | ||
1503 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
1504 | sym_print_nego_msg(np, cp->target, | ||
1505 | nego == NS_SYNC ? "sync msgout" : | ||
1506 | nego == NS_WIDE ? "wide msgout" : | ||
1507 | "ppr msgout", msgptr); | ||
1508 | } | ||
1509 | } | ||
1510 | |||
1511 | return msglen; | ||
1512 | } | ||
1513 | |||
1514 | /* | ||
1515 | * Insert a job into the start queue. | ||
1516 | */ | ||
1517 | void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) | ||
1518 | { | ||
1519 | u_short qidx; | ||
1520 | |||
1521 | #ifdef SYM_CONF_IARB_SUPPORT | ||
1522 | /* | ||
1523 | * If the previously queued CCB is not yet done, | ||
1524 | * set the IARB hint. The SCRIPTS will go with IARB | ||
1525 | * for this job when starting the previous one. | ||
1526 | * We leave devices a chance to win arbitration by | ||
1527 | * not using more than 'iarb_max' consecutive | ||
1528 | * immediate arbitrations. | ||
1529 | */ | ||
1530 | if (np->last_cp && np->iarb_count < np->iarb_max) { | ||
1531 | np->last_cp->host_flags |= HF_HINT_IARB; | ||
1532 | ++np->iarb_count; | ||
1533 | } | ||
1534 | else | ||
1535 | np->iarb_count = 0; | ||
1536 | np->last_cp = cp; | ||
1537 | #endif | ||
1538 | |||
1539 | #if SYM_CONF_DMA_ADDRESSING_MODE == 2 | ||
1540 | /* | ||
1541 | * Make SCRIPTS aware of the 64 bit DMA | ||
1542 | * segment registers not being up-to-date. | ||
1543 | */ | ||
1544 | if (np->dmap_dirty) | ||
1545 | cp->host_xflags |= HX_DMAP_DIRTY; | ||
1546 | #endif | ||
1547 | |||
1548 | /* | ||
1549 | * Insert first the idle task and then our job. | ||
1550 | * The MBs should ensure proper ordering. | ||
1551 | */ | ||
1552 | qidx = np->squeueput + 2; | ||
1553 | if (qidx >= MAX_QUEUE*2) qidx = 0; | ||
1554 | |||
1555 | np->squeue [qidx] = cpu_to_scr(np->idletask_ba); | ||
1556 | MEMORY_WRITE_BARRIER(); | ||
1557 | np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); | ||
1558 | |||
1559 | np->squeueput = qidx; | ||
1560 | |||
1561 | if (DEBUG_FLAGS & DEBUG_QUEUE) | ||
1562 | printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); | ||
1563 | |||
1564 | /* | ||
1565 | * Script processor may be waiting for reselect. | ||
1566 | * Wake it up. | ||
1567 | */ | ||
1568 | MEMORY_WRITE_BARRIER(); | ||
1569 | OUTB(np, nc_istat, SIGP|np->istat_sem); | ||
1570 | } | ||
1571 | |||
1572 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
1573 | /* | ||
1574 | * Start next ready-to-start CCBs. | ||
1575 | */ | ||
1576 | void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn) | ||
1577 | { | ||
1578 | SYM_QUEHEAD *qp; | ||
1579 | struct sym_ccb *cp; | ||
1580 | |||
1581 | /* | ||
1582 | * Paranoia, as usual. :-) | ||
1583 | */ | ||
1584 | assert(!lp->started_tags || !lp->started_no_tag); | ||
1585 | |||
1586 | /* | ||
1587 | * Try to start as many commands as asked by caller. | ||
1588 | * Prevent from having both tagged and untagged | ||
1589 | * commands queued to the device at the same time. | ||
1590 | */ | ||
1591 | while (maxn--) { | ||
1592 | qp = sym_remque_head(&lp->waiting_ccbq); | ||
1593 | if (!qp) | ||
1594 | break; | ||
1595 | cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); | ||
1596 | if (cp->tag != NO_TAG) { | ||
1597 | if (lp->started_no_tag || | ||
1598 | lp->started_tags >= lp->started_max) { | ||
1599 | sym_insque_head(qp, &lp->waiting_ccbq); | ||
1600 | break; | ||
1601 | } | ||
1602 | lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); | ||
1603 | lp->head.resel_sa = | ||
1604 | cpu_to_scr(SCRIPTA_BA(np, resel_tag)); | ||
1605 | ++lp->started_tags; | ||
1606 | } else { | ||
1607 | if (lp->started_no_tag || lp->started_tags) { | ||
1608 | sym_insque_head(qp, &lp->waiting_ccbq); | ||
1609 | break; | ||
1610 | } | ||
1611 | lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); | ||
1612 | lp->head.resel_sa = | ||
1613 | cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); | ||
1614 | ++lp->started_no_tag; | ||
1615 | } | ||
1616 | cp->started = 1; | ||
1617 | sym_insque_tail(qp, &lp->started_ccbq); | ||
1618 | sym_put_start_queue(np, cp); | ||
1619 | } | ||
1620 | } | ||
1621 | #endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */ | ||
1622 | |||
1623 | /* | ||
1624 | * The chip may have completed jobs. Look at the DONE QUEUE. | ||
1625 | * | ||
1626 | * On paper, memory read barriers may be needed here to | ||
1627 | * prevent out of order LOADs by the CPU from having | ||
1628 | * prefetched stale data prior to DMA having occurred. | ||
1629 | */ | ||
1630 | static int sym_wakeup_done (struct sym_hcb *np) | ||
1631 | { | ||
1632 | struct sym_ccb *cp; | ||
1633 | int i, n; | ||
1634 | u32 dsa; | ||
1635 | |||
1636 | n = 0; | ||
1637 | i = np->dqueueget; | ||
1638 | |||
1639 | /* MEMORY_READ_BARRIER(); */ | ||
1640 | while (1) { | ||
1641 | dsa = scr_to_cpu(np->dqueue[i]); | ||
1642 | if (!dsa) | ||
1643 | break; | ||
1644 | np->dqueue[i] = 0; | ||
1645 | if ((i = i+2) >= MAX_QUEUE*2) | ||
1646 | i = 0; | ||
1647 | |||
1648 | cp = sym_ccb_from_dsa(np, dsa); | ||
1649 | if (cp) { | ||
1650 | MEMORY_READ_BARRIER(); | ||
1651 | sym_complete_ok (np, cp); | ||
1652 | ++n; | ||
1653 | } | ||
1654 | else | ||
1655 | printf ("%s: bad DSA (%x) in done queue.\n", | ||
1656 | sym_name(np), (u_int) dsa); | ||
1657 | } | ||
1658 | np->dqueueget = i; | ||
1659 | |||
1660 | return n; | ||
1661 | } | ||
1662 | |||
1663 | /* | ||
1664 | * Complete all CCBs queued to the COMP queue. | ||
1665 | * | ||
1666 | * These CCBs are assumed: | ||
1667 | * - Not to be referenced either by devices or | ||
1668 | * SCRIPTS-related queues and datas. | ||
1669 | * - To have to be completed with an error condition | ||
1670 | * or requeued. | ||
1671 | * | ||
1672 | * The device queue freeze count is incremented | ||
1673 | * for each CCB that does not prevent this. | ||
1674 | * This function is called when all CCBs involved | ||
1675 | * in error handling/recovery have been reaped. | ||
1676 | */ | ||
1677 | static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status) | ||
1678 | { | ||
1679 | SYM_QUEHEAD *qp; | ||
1680 | struct sym_ccb *cp; | ||
1681 | |||
1682 | while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) { | ||
1683 | struct scsi_cmnd *cmd; | ||
1684 | cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
1685 | sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); | ||
1686 | /* Leave quiet CCBs waiting for resources */ | ||
1687 | if (cp->host_status == HS_WAIT) | ||
1688 | continue; | ||
1689 | cmd = cp->cmd; | ||
1690 | if (cam_status) | ||
1691 | sym_set_cam_status(cmd, cam_status); | ||
1692 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
1693 | if (sym_get_cam_status(cmd) == CAM_REQUEUE_REQ) { | ||
1694 | struct sym_tcb *tp = &np->target[cp->target]; | ||
1695 | struct sym_lcb *lp = sym_lp(tp, cp->lun); | ||
1696 | if (lp) { | ||
1697 | sym_remque(&cp->link2_ccbq); | ||
1698 | sym_insque_tail(&cp->link2_ccbq, | ||
1699 | &lp->waiting_ccbq); | ||
1700 | if (cp->started) { | ||
1701 | if (cp->tag != NO_TAG) | ||
1702 | --lp->started_tags; | ||
1703 | else | ||
1704 | --lp->started_no_tag; | ||
1705 | } | ||
1706 | } | ||
1707 | cp->started = 0; | ||
1708 | continue; | ||
1709 | } | ||
1710 | #endif | ||
1711 | sym_free_ccb(np, cp); | ||
1712 | sym_xpt_done(np, cmd); | ||
1713 | } | ||
1714 | } | ||
1715 | |||
1716 | /* | ||
1717 | * Complete all active CCBs with error. | ||
1718 | * Used on CHIP/SCSI RESET. | ||
1719 | */ | ||
1720 | static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status) | ||
1721 | { | ||
1722 | /* | ||
1723 | * Move all active CCBs to the COMP queue | ||
1724 | * and flush this queue. | ||
1725 | */ | ||
1726 | sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); | ||
1727 | sym_que_init(&np->busy_ccbq); | ||
1728 | sym_flush_comp_queue(np, cam_status); | ||
1729 | } | ||
1730 | |||
1731 | /* | ||
1732 | * Start chip. | ||
1733 | * | ||
1734 | * 'reason' means: | ||
1735 | * 0: initialisation. | ||
1736 | * 1: SCSI BUS RESET delivered or received. | ||
1737 | * 2: SCSI BUS MODE changed. | ||
1738 | */ | ||
1739 | void sym_start_up (struct sym_hcb *np, int reason) | ||
1740 | { | ||
1741 | int i; | ||
1742 | u32 phys; | ||
1743 | |||
1744 | /* | ||
1745 | * Reset chip if asked, otherwise just clear fifos. | ||
1746 | */ | ||
1747 | if (reason == 1) | ||
1748 | sym_soft_reset(np); | ||
1749 | else { | ||
1750 | OUTB(np, nc_stest3, TE|CSF); | ||
1751 | OUTONB(np, nc_ctest3, CLF); | ||
1752 | } | ||
1753 | |||
1754 | /* | ||
1755 | * Clear Start Queue | ||
1756 | */ | ||
1757 | phys = np->squeue_ba; | ||
1758 | for (i = 0; i < MAX_QUEUE*2; i += 2) { | ||
1759 | np->squeue[i] = cpu_to_scr(np->idletask_ba); | ||
1760 | np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); | ||
1761 | } | ||
1762 | np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); | ||
1763 | |||
1764 | /* | ||
1765 | * Start at first entry. | ||
1766 | */ | ||
1767 | np->squeueput = 0; | ||
1768 | |||
1769 | /* | ||
1770 | * Clear Done Queue | ||
1771 | */ | ||
1772 | phys = np->dqueue_ba; | ||
1773 | for (i = 0; i < MAX_QUEUE*2; i += 2) { | ||
1774 | np->dqueue[i] = 0; | ||
1775 | np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); | ||
1776 | } | ||
1777 | np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); | ||
1778 | |||
1779 | /* | ||
1780 | * Start at first entry. | ||
1781 | */ | ||
1782 | np->dqueueget = 0; | ||
1783 | |||
1784 | /* | ||
1785 | * Install patches in scripts. | ||
1786 | * This also let point to first position the start | ||
1787 | * and done queue pointers used from SCRIPTS. | ||
1788 | */ | ||
1789 | np->fw_patch(np); | ||
1790 | |||
1791 | /* | ||
1792 | * Wakeup all pending jobs. | ||
1793 | */ | ||
1794 | sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); | ||
1795 | |||
1796 | /* | ||
1797 | * Init chip. | ||
1798 | */ | ||
1799 | OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ | ||
1800 | udelay(2000); /* The 895 needs time for the bus mode to settle */ | ||
1801 | |||
1802 | OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); | ||
1803 | /* full arb., ena parity, par->ATN */ | ||
1804 | OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ | ||
1805 | |||
1806 | sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ | ||
1807 | |||
1808 | OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ | ||
1809 | OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */ | ||
1810 | OUTB(np, nc_istat , SIGP ); /* Signal Process */ | ||
1811 | OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */ | ||
1812 | OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ | ||
1813 | |||
1814 | OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ | ||
1815 | OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */ | ||
1816 | OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */ | ||
1817 | |||
1818 | /* Extended Sreq/Sack filtering not supported on the C10 */ | ||
1819 | if (np->features & FE_C10) | ||
1820 | OUTB(np, nc_stest2, np->rv_stest2); | ||
1821 | else | ||
1822 | OUTB(np, nc_stest2, EXT|np->rv_stest2); | ||
1823 | |||
1824 | OUTB(np, nc_stest3, TE); /* TolerANT enable */ | ||
1825 | OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ | ||
1826 | |||
1827 | /* | ||
1828 | * For now, disable AIP generation on C1010-66. | ||
1829 | */ | ||
1830 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66) | ||
1831 | OUTB(np, nc_aipcntl1, DISAIP); | ||
1832 | |||
1833 | /* | ||
1834 | * C10101 rev. 0 errata. | ||
1835 | * Errant SGE's when in narrow. Write bits 4 & 5 of | ||
1836 | * STEST1 register to disable SGE. We probably should do | ||
1837 | * that from SCRIPTS for each selection/reselection, but | ||
1838 | * I just don't want. :) | ||
1839 | */ | ||
1840 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 && | ||
1841 | np->revision_id < 1) | ||
1842 | OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); | ||
1843 | |||
1844 | /* | ||
1845 | * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. | ||
1846 | * Disable overlapped arbitration for some dual function devices, | ||
1847 | * regardless revision id (kind of post-chip-design feature. ;-)) | ||
1848 | */ | ||
1849 | if (np->device_id == PCI_DEVICE_ID_NCR_53C875) | ||
1850 | OUTB(np, nc_ctest0, (1<<5)); | ||
1851 | else if (np->device_id == PCI_DEVICE_ID_NCR_53C896) | ||
1852 | np->rv_ccntl0 |= DPR; | ||
1853 | |||
1854 | /* | ||
1855 | * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing | ||
1856 | * and/or hardware phase mismatch, since only such chips | ||
1857 | * seem to support those IO registers. | ||
1858 | */ | ||
1859 | if (np->features & (FE_DAC|FE_NOPM)) { | ||
1860 | OUTB(np, nc_ccntl0, np->rv_ccntl0); | ||
1861 | OUTB(np, nc_ccntl1, np->rv_ccntl1); | ||
1862 | } | ||
1863 | |||
1864 | #if SYM_CONF_DMA_ADDRESSING_MODE == 2 | ||
1865 | /* | ||
1866 | * Set up scratch C and DRS IO registers to map the 32 bit | ||
1867 | * DMA address range our data structures are located in. | ||
1868 | */ | ||
1869 | if (np->use_dac) { | ||
1870 | np->dmap_bah[0] = 0; /* ??? */ | ||
1871 | OUTL(np, nc_scrx[0], np->dmap_bah[0]); | ||
1872 | OUTL(np, nc_drs, np->dmap_bah[0]); | ||
1873 | } | ||
1874 | #endif | ||
1875 | |||
1876 | /* | ||
1877 | * If phase mismatch handled by scripts (895A/896/1010), | ||
1878 | * set PM jump addresses. | ||
1879 | */ | ||
1880 | if (np->features & FE_NOPM) { | ||
1881 | OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle)); | ||
1882 | OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle)); | ||
1883 | } | ||
1884 | |||
1885 | /* | ||
1886 | * Enable GPIO0 pin for writing if LED support from SCRIPTS. | ||
1887 | * Also set GPIO5 and clear GPIO6 if hardware LED control. | ||
1888 | */ | ||
1889 | if (np->features & FE_LED0) | ||
1890 | OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); | ||
1891 | else if (np->features & FE_LEDC) | ||
1892 | OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20); | ||
1893 | |||
1894 | /* | ||
1895 | * enable ints | ||
1896 | */ | ||
1897 | OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); | ||
1898 | OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID); | ||
1899 | |||
1900 | /* | ||
1901 | * For 895/6 enable SBMC interrupt and save current SCSI bus mode. | ||
1902 | * Try to eat the spurious SBMC interrupt that may occur when | ||
1903 | * we reset the chip but not the SCSI BUS (at initialization). | ||
1904 | */ | ||
1905 | if (np->features & (FE_ULTRA2|FE_ULTRA3)) { | ||
1906 | OUTONW(np, nc_sien, SBMC); | ||
1907 | if (reason == 0) { | ||
1908 | mdelay(100); | ||
1909 | INW(np, nc_sist); | ||
1910 | } | ||
1911 | np->scsi_mode = INB(np, nc_stest4) & SMODE; | ||
1912 | } | ||
1913 | |||
1914 | /* | ||
1915 | * Fill in target structure. | ||
1916 | * Reinitialize usrsync. | ||
1917 | * Reinitialize usrwide. | ||
1918 | * Prepare sync negotiation according to actual SCSI bus mode. | ||
1919 | */ | ||
1920 | for (i=0;i<SYM_CONF_MAX_TARGET;i++) { | ||
1921 | struct sym_tcb *tp = &np->target[i]; | ||
1922 | |||
1923 | tp->to_reset = 0; | ||
1924 | tp->head.sval = 0; | ||
1925 | tp->head.wval = np->rv_scntl3; | ||
1926 | tp->head.uval = 0; | ||
1927 | } | ||
1928 | |||
1929 | /* | ||
1930 | * Download SCSI SCRIPTS to on-chip RAM if present, | ||
1931 | * and start script processor. | ||
1932 | * We do the download preferently from the CPU. | ||
1933 | * For platforms that may not support PCI memory mapping, | ||
1934 | * we use simple SCRIPTS that performs MEMORY MOVEs. | ||
1935 | */ | ||
1936 | phys = SCRIPTA_BA(np, init); | ||
1937 | if (np->ram_ba) { | ||
1938 | if (sym_verbose >= 2) | ||
1939 | printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); | ||
1940 | memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); | ||
1941 | if (np->ram_ws == 8192) { | ||
1942 | memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); | ||
1943 | phys = scr_to_cpu(np->scr_ram_seg); | ||
1944 | OUTL(np, nc_mmws, phys); | ||
1945 | OUTL(np, nc_mmrs, phys); | ||
1946 | OUTL(np, nc_sfs, phys); | ||
1947 | phys = SCRIPTB_BA(np, start64); | ||
1948 | } | ||
1949 | } | ||
1950 | |||
1951 | np->istat_sem = 0; | ||
1952 | |||
1953 | OUTL(np, nc_dsa, np->hcb_ba); | ||
1954 | OUTL_DSP(np, phys); | ||
1955 | |||
1956 | /* | ||
1957 | * Notify the XPT about the RESET condition. | ||
1958 | */ | ||
1959 | if (reason != 0) | ||
1960 | sym_xpt_async_bus_reset(np); | ||
1961 | } | ||
1962 | |||
1963 | /* | ||
1964 | * Switch trans mode for current job and its target. | ||
1965 | */ | ||
1966 | static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, | ||
1967 | u_char per, u_char wide, u_char div, u_char fak) | ||
1968 | { | ||
1969 | SYM_QUEHEAD *qp; | ||
1970 | u_char sval, wval, uval; | ||
1971 | struct sym_tcb *tp = &np->target[target]; | ||
1972 | |||
1973 | assert(target == (INB(np, nc_sdid) & 0x0f)); | ||
1974 | |||
1975 | sval = tp->head.sval; | ||
1976 | wval = tp->head.wval; | ||
1977 | uval = tp->head.uval; | ||
1978 | |||
1979 | #if 0 | ||
1980 | printf("XXXX sval=%x wval=%x uval=%x (%x)\n", | ||
1981 | sval, wval, uval, np->rv_scntl3); | ||
1982 | #endif | ||
1983 | /* | ||
1984 | * Set the offset. | ||
1985 | */ | ||
1986 | if (!(np->features & FE_C10)) | ||
1987 | sval = (sval & ~0x1f) | ofs; | ||
1988 | else | ||
1989 | sval = (sval & ~0x3f) | ofs; | ||
1990 | |||
1991 | /* | ||
1992 | * Set the sync divisor and extra clock factor. | ||
1993 | */ | ||
1994 | if (ofs != 0) { | ||
1995 | wval = (wval & ~0x70) | ((div+1) << 4); | ||
1996 | if (!(np->features & FE_C10)) | ||
1997 | sval = (sval & ~0xe0) | (fak << 5); | ||
1998 | else { | ||
1999 | uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); | ||
2000 | if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); | ||
2001 | if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); | ||
2002 | } | ||
2003 | } | ||
2004 | |||
2005 | /* | ||
2006 | * Set the bus width. | ||
2007 | */ | ||
2008 | wval = wval & ~EWS; | ||
2009 | if (wide != 0) | ||
2010 | wval |= EWS; | ||
2011 | |||
2012 | /* | ||
2013 | * Set misc. ultra enable bits. | ||
2014 | */ | ||
2015 | if (np->features & FE_C10) { | ||
2016 | uval = uval & ~(U3EN|AIPCKEN); | ||
2017 | if (opts) { | ||
2018 | assert(np->features & FE_U3EN); | ||
2019 | uval |= U3EN; | ||
2020 | } | ||
2021 | } else { | ||
2022 | wval = wval & ~ULTRA; | ||
2023 | if (per <= 12) wval |= ULTRA; | ||
2024 | } | ||
2025 | |||
2026 | /* | ||
2027 | * Stop there if sync parameters are unchanged. | ||
2028 | */ | ||
2029 | if (tp->head.sval == sval && | ||
2030 | tp->head.wval == wval && | ||
2031 | tp->head.uval == uval) | ||
2032 | return; | ||
2033 | tp->head.sval = sval; | ||
2034 | tp->head.wval = wval; | ||
2035 | tp->head.uval = uval; | ||
2036 | |||
2037 | /* | ||
2038 | * Disable extended Sreq/Sack filtering if per < 50. | ||
2039 | * Not supported on the C1010. | ||
2040 | */ | ||
2041 | if (per < 50 && !(np->features & FE_C10)) | ||
2042 | OUTOFFB(np, nc_stest2, EXT); | ||
2043 | |||
2044 | /* | ||
2045 | * set actual value and sync_status | ||
2046 | */ | ||
2047 | OUTB(np, nc_sxfer, tp->head.sval); | ||
2048 | OUTB(np, nc_scntl3, tp->head.wval); | ||
2049 | |||
2050 | if (np->features & FE_C10) { | ||
2051 | OUTB(np, nc_scntl4, tp->head.uval); | ||
2052 | } | ||
2053 | |||
2054 | /* | ||
2055 | * patch ALL busy ccbs of this target. | ||
2056 | */ | ||
2057 | FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { | ||
2058 | struct sym_ccb *cp; | ||
2059 | cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
2060 | if (cp->target != target) | ||
2061 | continue; | ||
2062 | cp->phys.select.sel_scntl3 = tp->head.wval; | ||
2063 | cp->phys.select.sel_sxfer = tp->head.sval; | ||
2064 | if (np->features & FE_C10) { | ||
2065 | cp->phys.select.sel_scntl4 = tp->head.uval; | ||
2066 | } | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2070 | /* | ||
2071 | * We received a WDTR. | ||
2072 | * Let everything be aware of the changes. | ||
2073 | */ | ||
2074 | static void sym_setwide(struct sym_hcb *np, int target, u_char wide) | ||
2075 | { | ||
2076 | struct sym_tcb *tp = &np->target[target]; | ||
2077 | struct scsi_target *starget = tp->sdev->sdev_target; | ||
2078 | |||
2079 | if (spi_width(starget) == wide) | ||
2080 | return; | ||
2081 | |||
2082 | sym_settrans(np, target, 0, 0, 0, wide, 0, 0); | ||
2083 | |||
2084 | tp->tgoal.width = wide; | ||
2085 | spi_offset(starget) = 0; | ||
2086 | spi_period(starget) = 0; | ||
2087 | spi_width(starget) = wide; | ||
2088 | spi_iu(starget) = 0; | ||
2089 | spi_dt(starget) = 0; | ||
2090 | spi_qas(starget) = 0; | ||
2091 | |||
2092 | if (sym_verbose >= 3) | ||
2093 | spi_display_xfer_agreement(starget); | ||
2094 | } | ||
2095 | |||
2096 | /* | ||
2097 | * We received a SDTR. | ||
2098 | * Let everything be aware of the changes. | ||
2099 | */ | ||
2100 | static void | ||
2101 | sym_setsync(struct sym_hcb *np, int target, | ||
2102 | u_char ofs, u_char per, u_char div, u_char fak) | ||
2103 | { | ||
2104 | struct sym_tcb *tp = &np->target[target]; | ||
2105 | struct scsi_target *starget = tp->sdev->sdev_target; | ||
2106 | u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; | ||
2107 | |||
2108 | sym_settrans(np, target, 0, ofs, per, wide, div, fak); | ||
2109 | |||
2110 | spi_period(starget) = per; | ||
2111 | spi_offset(starget) = ofs; | ||
2112 | spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0; | ||
2113 | |||
2114 | if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) { | ||
2115 | tp->tgoal.period = per; | ||
2116 | tp->tgoal.offset = ofs; | ||
2117 | tp->tgoal.check_nego = 0; | ||
2118 | } | ||
2119 | |||
2120 | spi_display_xfer_agreement(starget); | ||
2121 | } | ||
2122 | |||
2123 | /* | ||
2124 | * We received a PPR. | ||
2125 | * Let everything be aware of the changes. | ||
2126 | */ | ||
2127 | static void | ||
2128 | sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, | ||
2129 | u_char per, u_char wide, u_char div, u_char fak) | ||
2130 | { | ||
2131 | struct sym_tcb *tp = &np->target[target]; | ||
2132 | struct scsi_target *starget = tp->sdev->sdev_target; | ||
2133 | |||
2134 | sym_settrans(np, target, opts, ofs, per, wide, div, fak); | ||
2135 | |||
2136 | spi_width(starget) = tp->tgoal.width = wide; | ||
2137 | spi_period(starget) = tp->tgoal.period = per; | ||
2138 | spi_offset(starget) = tp->tgoal.offset = ofs; | ||
2139 | spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU); | ||
2140 | spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT); | ||
2141 | spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS); | ||
2142 | tp->tgoal.check_nego = 0; | ||
2143 | |||
2144 | spi_display_xfer_agreement(starget); | ||
2145 | } | ||
2146 | |||
2147 | /* | ||
2148 | * generic recovery from scsi interrupt | ||
2149 | * | ||
2150 | * The doc says that when the chip gets an SCSI interrupt, | ||
2151 | * it tries to stop in an orderly fashion, by completing | ||
2152 | * an instruction fetch that had started or by flushing | ||
2153 | * the DMA fifo for a write to memory that was executing. | ||
2154 | * Such a fashion is not enough to know if the instruction | ||
2155 | * that was just before the current DSP value has been | ||
2156 | * executed or not. | ||
2157 | * | ||
2158 | * There are some small SCRIPTS sections that deal with | ||
2159 | * the start queue and the done queue that may break any | ||
2160 | * assomption from the C code if we are interrupted | ||
2161 | * inside, so we reset if this happens. Btw, since these | ||
2162 | * SCRIPTS sections are executed while the SCRIPTS hasn't | ||
2163 | * started SCSI operations, it is very unlikely to happen. | ||
2164 | * | ||
2165 | * All the driver data structures are supposed to be | ||
2166 | * allocated from the same 4 GB memory window, so there | ||
2167 | * is a 1 to 1 relationship between DSA and driver data | ||
2168 | * structures. Since we are careful :) to invalidate the | ||
2169 | * DSA when we complete a command or when the SCRIPTS | ||
2170 | * pushes a DSA into a queue, we can trust it when it | ||
2171 | * points to a CCB. | ||
2172 | */ | ||
2173 | static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts) | ||
2174 | { | ||
2175 | u32 dsp = INL(np, nc_dsp); | ||
2176 | u32 dsa = INL(np, nc_dsa); | ||
2177 | struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); | ||
2178 | |||
2179 | /* | ||
2180 | * If we haven't been interrupted inside the SCRIPTS | ||
2181 | * critical pathes, we can safely restart the SCRIPTS | ||
2182 | * and trust the DSA value if it matches a CCB. | ||
2183 | */ | ||
2184 | if ((!(dsp > SCRIPTA_BA(np, getjob_begin) && | ||
2185 | dsp < SCRIPTA_BA(np, getjob_end) + 1)) && | ||
2186 | (!(dsp > SCRIPTA_BA(np, ungetjob) && | ||
2187 | dsp < SCRIPTA_BA(np, reselect) + 1)) && | ||
2188 | (!(dsp > SCRIPTB_BA(np, sel_for_abort) && | ||
2189 | dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) && | ||
2190 | (!(dsp > SCRIPTA_BA(np, done) && | ||
2191 | dsp < SCRIPTA_BA(np, done_end) + 1))) { | ||
2192 | OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ | ||
2193 | OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ | ||
2194 | /* | ||
2195 | * If we have a CCB, let the SCRIPTS call us back for | ||
2196 | * the handling of the error with SCRATCHA filled with | ||
2197 | * STARTPOS. This way, we will be able to freeze the | ||
2198 | * device queue and requeue awaiting IOs. | ||
2199 | */ | ||
2200 | if (cp) { | ||
2201 | cp->host_status = hsts; | ||
2202 | OUTL_DSP(np, SCRIPTA_BA(np, complete_error)); | ||
2203 | } | ||
2204 | /* | ||
2205 | * Otherwise just restart the SCRIPTS. | ||
2206 | */ | ||
2207 | else { | ||
2208 | OUTL(np, nc_dsa, 0xffffff); | ||
2209 | OUTL_DSP(np, SCRIPTA_BA(np, start)); | ||
2210 | } | ||
2211 | } | ||
2212 | else | ||
2213 | goto reset_all; | ||
2214 | |||
2215 | return; | ||
2216 | |||
2217 | reset_all: | ||
2218 | sym_start_reset(np); | ||
2219 | } | ||
2220 | |||
2221 | /* | ||
2222 | * chip exception handler for selection timeout | ||
2223 | */ | ||
2224 | static void sym_int_sto (struct sym_hcb *np) | ||
2225 | { | ||
2226 | u32 dsp = INL(np, nc_dsp); | ||
2227 | |||
2228 | if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); | ||
2229 | |||
2230 | if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8) | ||
2231 | sym_recover_scsi_int(np, HS_SEL_TIMEOUT); | ||
2232 | else | ||
2233 | sym_start_reset(np); | ||
2234 | } | ||
2235 | |||
2236 | /* | ||
2237 | * chip exception handler for unexpected disconnect | ||
2238 | */ | ||
2239 | static void sym_int_udc (struct sym_hcb *np) | ||
2240 | { | ||
2241 | printf ("%s: unexpected disconnect\n", sym_name(np)); | ||
2242 | sym_recover_scsi_int(np, HS_UNEXPECTED); | ||
2243 | } | ||
2244 | |||
2245 | /* | ||
2246 | * chip exception handler for SCSI bus mode change | ||
2247 | * | ||
2248 | * spi2-r12 11.2.3 says a transceiver mode change must | ||
2249 | * generate a reset event and a device that detects a reset | ||
2250 | * event shall initiate a hard reset. It says also that a | ||
2251 | * device that detects a mode change shall set data transfer | ||
2252 | * mode to eight bit asynchronous, etc... | ||
2253 | * So, just reinitializing all except chip should be enough. | ||
2254 | */ | ||
2255 | static void sym_int_sbmc (struct sym_hcb *np) | ||
2256 | { | ||
2257 | u_char scsi_mode = INB(np, nc_stest4) & SMODE; | ||
2258 | |||
2259 | /* | ||
2260 | * Notify user. | ||
2261 | */ | ||
2262 | printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np), | ||
2263 | sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); | ||
2264 | |||
2265 | /* | ||
2266 | * Should suspend command processing for a few seconds and | ||
2267 | * reinitialize all except the chip. | ||
2268 | */ | ||
2269 | sym_start_up (np, 2); | ||
2270 | } | ||
2271 | |||
2272 | /* | ||
2273 | * chip exception handler for SCSI parity error. | ||
2274 | * | ||
2275 | * When the chip detects a SCSI parity error and is | ||
2276 | * currently executing a (CH)MOV instruction, it does | ||
2277 | * not interrupt immediately, but tries to finish the | ||
2278 | * transfer of the current scatter entry before | ||
2279 | * interrupting. The following situations may occur: | ||
2280 | * | ||
2281 | * - The complete scatter entry has been transferred | ||
2282 | * without the device having changed phase. | ||
2283 | * The chip will then interrupt with the DSP pointing | ||
2284 | * to the instruction that follows the MOV. | ||
2285 | * | ||
2286 | * - A phase mismatch occurs before the MOV finished | ||
2287 | * and phase errors are to be handled by the C code. | ||
2288 | * The chip will then interrupt with both PAR and MA | ||
2289 | * conditions set. | ||
2290 | * | ||
2291 | * - A phase mismatch occurs before the MOV finished and | ||
2292 | * phase errors are to be handled by SCRIPTS. | ||
2293 | * The chip will load the DSP with the phase mismatch | ||
2294 | * JUMP address and interrupt the host processor. | ||
2295 | */ | ||
2296 | static void sym_int_par (struct sym_hcb *np, u_short sist) | ||
2297 | { | ||
2298 | u_char hsts = INB(np, HS_PRT); | ||
2299 | u32 dsp = INL(np, nc_dsp); | ||
2300 | u32 dbc = INL(np, nc_dbc); | ||
2301 | u32 dsa = INL(np, nc_dsa); | ||
2302 | u_char sbcl = INB(np, nc_sbcl); | ||
2303 | u_char cmd = dbc >> 24; | ||
2304 | int phase = cmd & 7; | ||
2305 | struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); | ||
2306 | |||
2307 | printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", | ||
2308 | sym_name(np), hsts, dbc, sbcl); | ||
2309 | |||
2310 | /* | ||
2311 | * Check that the chip is connected to the SCSI BUS. | ||
2312 | */ | ||
2313 | if (!(INB(np, nc_scntl1) & ISCON)) { | ||
2314 | sym_recover_scsi_int(np, HS_UNEXPECTED); | ||
2315 | return; | ||
2316 | } | ||
2317 | |||
2318 | /* | ||
2319 | * If the nexus is not clearly identified, reset the bus. | ||
2320 | * We will try to do better later. | ||
2321 | */ | ||
2322 | if (!cp) | ||
2323 | goto reset_all; | ||
2324 | |||
2325 | /* | ||
2326 | * Check instruction was a MOV, direction was INPUT and | ||
2327 | * ATN is asserted. | ||
2328 | */ | ||
2329 | if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) | ||
2330 | goto reset_all; | ||
2331 | |||
2332 | /* | ||
2333 | * Keep track of the parity error. | ||
2334 | */ | ||
2335 | OUTONB(np, HF_PRT, HF_EXT_ERR); | ||
2336 | cp->xerr_status |= XE_PARITY_ERR; | ||
2337 | |||
2338 | /* | ||
2339 | * Prepare the message to send to the device. | ||
2340 | */ | ||
2341 | np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; | ||
2342 | |||
2343 | /* | ||
2344 | * If the old phase was DATA IN phase, we have to deal with | ||
2345 | * the 3 situations described above. | ||
2346 | * For other input phases (MSG IN and STATUS), the device | ||
2347 | * must resend the whole thing that failed parity checking | ||
2348 | * or signal error. So, jumping to dispatcher should be OK. | ||
2349 | */ | ||
2350 | if (phase == 1 || phase == 5) { | ||
2351 | /* Phase mismatch handled by SCRIPTS */ | ||
2352 | if (dsp == SCRIPTB_BA(np, pm_handle)) | ||
2353 | OUTL_DSP(np, dsp); | ||
2354 | /* Phase mismatch handled by the C code */ | ||
2355 | else if (sist & MA) | ||
2356 | sym_int_ma (np); | ||
2357 | /* No phase mismatch occurred */ | ||
2358 | else { | ||
2359 | sym_set_script_dp (np, cp, dsp); | ||
2360 | OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); | ||
2361 | } | ||
2362 | } | ||
2363 | else if (phase == 7) /* We definitely cannot handle parity errors */ | ||
2364 | #if 1 /* in message-in phase due to the relection */ | ||
2365 | goto reset_all; /* path and various message anticipations. */ | ||
2366 | #else | ||
2367 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | ||
2368 | #endif | ||
2369 | else | ||
2370 | OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); | ||
2371 | return; | ||
2372 | |||
2373 | reset_all: | ||
2374 | sym_start_reset(np); | ||
2375 | return; | ||
2376 | } | ||
2377 | |||
2378 | /* | ||
2379 | * chip exception handler for phase errors. | ||
2380 | * | ||
2381 | * We have to construct a new transfer descriptor, | ||
2382 | * to transfer the rest of the current block. | ||
2383 | */ | ||
2384 | static void sym_int_ma (struct sym_hcb *np) | ||
2385 | { | ||
2386 | u32 dbc; | ||
2387 | u32 rest; | ||
2388 | u32 dsp; | ||
2389 | u32 dsa; | ||
2390 | u32 nxtdsp; | ||
2391 | u32 *vdsp; | ||
2392 | u32 oadr, olen; | ||
2393 | u32 *tblp; | ||
2394 | u32 newcmd; | ||
2395 | u_int delta; | ||
2396 | u_char cmd; | ||
2397 | u_char hflags, hflags0; | ||
2398 | struct sym_pmc *pm; | ||
2399 | struct sym_ccb *cp; | ||
2400 | |||
2401 | dsp = INL(np, nc_dsp); | ||
2402 | dbc = INL(np, nc_dbc); | ||
2403 | dsa = INL(np, nc_dsa); | ||
2404 | |||
2405 | cmd = dbc >> 24; | ||
2406 | rest = dbc & 0xffffff; | ||
2407 | delta = 0; | ||
2408 | |||
2409 | /* | ||
2410 | * locate matching cp if any. | ||
2411 | */ | ||
2412 | cp = sym_ccb_from_dsa(np, dsa); | ||
2413 | |||
2414 | /* | ||
2415 | * Donnot take into account dma fifo and various buffers in | ||
2416 | * INPUT phase since the chip flushes everything before | ||
2417 | * raising the MA interrupt for interrupted INPUT phases. | ||
2418 | * For DATA IN phase, we will check for the SWIDE later. | ||
2419 | */ | ||
2420 | if ((cmd & 7) != 1 && (cmd & 7) != 5) { | ||
2421 | u_char ss0, ss2; | ||
2422 | |||
2423 | if (np->features & FE_DFBC) | ||
2424 | delta = INW(np, nc_dfbc); | ||
2425 | else { | ||
2426 | u32 dfifo; | ||
2427 | |||
2428 | /* | ||
2429 | * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. | ||
2430 | */ | ||
2431 | dfifo = INL(np, nc_dfifo); | ||
2432 | |||
2433 | /* | ||
2434 | * Calculate remaining bytes in DMA fifo. | ||
2435 | * (CTEST5 = dfifo >> 16) | ||
2436 | */ | ||
2437 | if (dfifo & (DFS << 16)) | ||
2438 | delta = ((((dfifo >> 8) & 0x300) | | ||
2439 | (dfifo & 0xff)) - rest) & 0x3ff; | ||
2440 | else | ||
2441 | delta = ((dfifo & 0xff) - rest) & 0x7f; | ||
2442 | } | ||
2443 | |||
2444 | /* | ||
2445 | * The data in the dma fifo has not been transfered to | ||
2446 | * the target -> add the amount to the rest | ||
2447 | * and clear the data. | ||
2448 | * Check the sstat2 register in case of wide transfer. | ||
2449 | */ | ||
2450 | rest += delta; | ||
2451 | ss0 = INB(np, nc_sstat0); | ||
2452 | if (ss0 & OLF) rest++; | ||
2453 | if (!(np->features & FE_C10)) | ||
2454 | if (ss0 & ORF) rest++; | ||
2455 | if (cp && (cp->phys.select.sel_scntl3 & EWS)) { | ||
2456 | ss2 = INB(np, nc_sstat2); | ||
2457 | if (ss2 & OLF1) rest++; | ||
2458 | if (!(np->features & FE_C10)) | ||
2459 | if (ss2 & ORF1) rest++; | ||
2460 | } | ||
2461 | |||
2462 | /* | ||
2463 | * Clear fifos. | ||
2464 | */ | ||
2465 | OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ | ||
2466 | OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */ | ||
2467 | } | ||
2468 | |||
2469 | /* | ||
2470 | * log the information | ||
2471 | */ | ||
2472 | if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) | ||
2473 | printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7, | ||
2474 | (unsigned) rest, (unsigned) delta); | ||
2475 | |||
2476 | /* | ||
2477 | * try to find the interrupted script command, | ||
2478 | * and the address at which to continue. | ||
2479 | */ | ||
2480 | vdsp = NULL; | ||
2481 | nxtdsp = 0; | ||
2482 | if (dsp > np->scripta_ba && | ||
2483 | dsp <= np->scripta_ba + np->scripta_sz) { | ||
2484 | vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); | ||
2485 | nxtdsp = dsp; | ||
2486 | } | ||
2487 | else if (dsp > np->scriptb_ba && | ||
2488 | dsp <= np->scriptb_ba + np->scriptb_sz) { | ||
2489 | vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); | ||
2490 | nxtdsp = dsp; | ||
2491 | } | ||
2492 | |||
2493 | /* | ||
2494 | * log the information | ||
2495 | */ | ||
2496 | if (DEBUG_FLAGS & DEBUG_PHASE) { | ||
2497 | printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", | ||
2498 | cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); | ||
2499 | } | ||
2500 | |||
2501 | if (!vdsp) { | ||
2502 | printf ("%s: interrupted SCRIPT address not found.\n", | ||
2503 | sym_name (np)); | ||
2504 | goto reset_all; | ||
2505 | } | ||
2506 | |||
2507 | if (!cp) { | ||
2508 | printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", | ||
2509 | sym_name (np)); | ||
2510 | goto reset_all; | ||
2511 | } | ||
2512 | |||
2513 | /* | ||
2514 | * get old startaddress and old length. | ||
2515 | */ | ||
2516 | oadr = scr_to_cpu(vdsp[1]); | ||
2517 | |||
2518 | if (cmd & 0x10) { /* Table indirect */ | ||
2519 | tblp = (u32 *) ((char*) &cp->phys + oadr); | ||
2520 | olen = scr_to_cpu(tblp[0]); | ||
2521 | oadr = scr_to_cpu(tblp[1]); | ||
2522 | } else { | ||
2523 | tblp = (u32 *) 0; | ||
2524 | olen = scr_to_cpu(vdsp[0]) & 0xffffff; | ||
2525 | } | ||
2526 | |||
2527 | if (DEBUG_FLAGS & DEBUG_PHASE) { | ||
2528 | printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", | ||
2529 | (unsigned) (scr_to_cpu(vdsp[0]) >> 24), | ||
2530 | tblp, | ||
2531 | (unsigned) olen, | ||
2532 | (unsigned) oadr); | ||
2533 | } | ||
2534 | |||
2535 | /* | ||
2536 | * check cmd against assumed interrupted script command. | ||
2537 | * If dt data phase, the MOVE instruction hasn't bit 4 of | ||
2538 | * the phase. | ||
2539 | */ | ||
2540 | if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { | ||
2541 | sym_print_addr(cp->cmd, | ||
2542 | "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", | ||
2543 | cmd, scr_to_cpu(vdsp[0]) >> 24); | ||
2544 | |||
2545 | goto reset_all; | ||
2546 | } | ||
2547 | |||
2548 | /* | ||
2549 | * if old phase not dataphase, leave here. | ||
2550 | */ | ||
2551 | if (cmd & 2) { | ||
2552 | sym_print_addr(cp->cmd, | ||
2553 | "phase change %x-%x %d@%08x resid=%d.\n", | ||
2554 | cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen, | ||
2555 | (unsigned)oadr, (unsigned)rest); | ||
2556 | goto unexpected_phase; | ||
2557 | } | ||
2558 | |||
2559 | /* | ||
2560 | * Choose the correct PM save area. | ||
2561 | * | ||
2562 | * Look at the PM_SAVE SCRIPT if you want to understand | ||
2563 | * this stuff. The equivalent code is implemented in | ||
2564 | * SCRIPTS for the 895A, 896 and 1010 that are able to | ||
2565 | * handle PM from the SCRIPTS processor. | ||
2566 | */ | ||
2567 | hflags0 = INB(np, HF_PRT); | ||
2568 | hflags = hflags0; | ||
2569 | |||
2570 | if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { | ||
2571 | if (hflags & HF_IN_PM0) | ||
2572 | nxtdsp = scr_to_cpu(cp->phys.pm0.ret); | ||
2573 | else if (hflags & HF_IN_PM1) | ||
2574 | nxtdsp = scr_to_cpu(cp->phys.pm1.ret); | ||
2575 | |||
2576 | if (hflags & HF_DP_SAVED) | ||
2577 | hflags ^= HF_ACT_PM; | ||
2578 | } | ||
2579 | |||
2580 | if (!(hflags & HF_ACT_PM)) { | ||
2581 | pm = &cp->phys.pm0; | ||
2582 | newcmd = SCRIPTA_BA(np, pm0_data); | ||
2583 | } | ||
2584 | else { | ||
2585 | pm = &cp->phys.pm1; | ||
2586 | newcmd = SCRIPTA_BA(np, pm1_data); | ||
2587 | } | ||
2588 | |||
2589 | hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); | ||
2590 | if (hflags != hflags0) | ||
2591 | OUTB(np, HF_PRT, hflags); | ||
2592 | |||
2593 | /* | ||
2594 | * fillin the phase mismatch context | ||
2595 | */ | ||
2596 | pm->sg.addr = cpu_to_scr(oadr + olen - rest); | ||
2597 | pm->sg.size = cpu_to_scr(rest); | ||
2598 | pm->ret = cpu_to_scr(nxtdsp); | ||
2599 | |||
2600 | /* | ||
2601 | * If we have a SWIDE, | ||
2602 | * - prepare the address to write the SWIDE from SCRIPTS, | ||
2603 | * - compute the SCRIPTS address to restart from, | ||
2604 | * - move current data pointer context by one byte. | ||
2605 | */ | ||
2606 | nxtdsp = SCRIPTA_BA(np, dispatch); | ||
2607 | if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && | ||
2608 | (INB(np, nc_scntl2) & WSR)) { | ||
2609 | u32 tmp; | ||
2610 | |||
2611 | /* | ||
2612 | * Set up the table indirect for the MOVE | ||
2613 | * of the residual byte and adjust the data | ||
2614 | * pointer context. | ||
2615 | */ | ||
2616 | tmp = scr_to_cpu(pm->sg.addr); | ||
2617 | cp->phys.wresid.addr = cpu_to_scr(tmp); | ||
2618 | pm->sg.addr = cpu_to_scr(tmp + 1); | ||
2619 | tmp = scr_to_cpu(pm->sg.size); | ||
2620 | cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); | ||
2621 | pm->sg.size = cpu_to_scr(tmp - 1); | ||
2622 | |||
2623 | /* | ||
2624 | * If only the residual byte is to be moved, | ||
2625 | * no PM context is needed. | ||
2626 | */ | ||
2627 | if ((tmp&0xffffff) == 1) | ||
2628 | newcmd = pm->ret; | ||
2629 | |||
2630 | /* | ||
2631 | * Prepare the address of SCRIPTS that will | ||
2632 | * move the residual byte to memory. | ||
2633 | */ | ||
2634 | nxtdsp = SCRIPTB_BA(np, wsr_ma_helper); | ||
2635 | } | ||
2636 | |||
2637 | if (DEBUG_FLAGS & DEBUG_PHASE) { | ||
2638 | sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n", | ||
2639 | hflags0, hflags, newcmd, | ||
2640 | (unsigned)scr_to_cpu(pm->sg.addr), | ||
2641 | (unsigned)scr_to_cpu(pm->sg.size), | ||
2642 | (unsigned)scr_to_cpu(pm->ret)); | ||
2643 | } | ||
2644 | |||
2645 | /* | ||
2646 | * Restart the SCRIPTS processor. | ||
2647 | */ | ||
2648 | sym_set_script_dp (np, cp, newcmd); | ||
2649 | OUTL_DSP(np, nxtdsp); | ||
2650 | return; | ||
2651 | |||
2652 | /* | ||
2653 | * Unexpected phase changes that occurs when the current phase | ||
2654 | * is not a DATA IN or DATA OUT phase are due to error conditions. | ||
2655 | * Such event may only happen when the SCRIPTS is using a | ||
2656 | * multibyte SCSI MOVE. | ||
2657 | * | ||
2658 | * Phase change Some possible cause | ||
2659 | * | ||
2660 | * COMMAND --> MSG IN SCSI parity error detected by target. | ||
2661 | * COMMAND --> STATUS Bad command or refused by target. | ||
2662 | * MSG OUT --> MSG IN Message rejected by target. | ||
2663 | * MSG OUT --> COMMAND Bogus target that discards extended | ||
2664 | * negotiation messages. | ||
2665 | * | ||
2666 | * The code below does not care of the new phase and so | ||
2667 | * trusts the target. Why to annoy it ? | ||
2668 | * If the interrupted phase is COMMAND phase, we restart at | ||
2669 | * dispatcher. | ||
2670 | * If a target does not get all the messages after selection, | ||
2671 | * the code assumes blindly that the target discards extended | ||
2672 | * messages and clears the negotiation status. | ||
2673 | * If the target does not want all our response to negotiation, | ||
2674 | * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids | ||
2675 | * bloat for such a should_not_happen situation). | ||
2676 | * In all other situation, we reset the BUS. | ||
2677 | * Are these assumptions reasonnable ? (Wait and see ...) | ||
2678 | */ | ||
2679 | unexpected_phase: | ||
2680 | dsp -= 8; | ||
2681 | nxtdsp = 0; | ||
2682 | |||
2683 | switch (cmd & 7) { | ||
2684 | case 2: /* COMMAND phase */ | ||
2685 | nxtdsp = SCRIPTA_BA(np, dispatch); | ||
2686 | break; | ||
2687 | #if 0 | ||
2688 | case 3: /* STATUS phase */ | ||
2689 | nxtdsp = SCRIPTA_BA(np, dispatch); | ||
2690 | break; | ||
2691 | #endif | ||
2692 | case 6: /* MSG OUT phase */ | ||
2693 | /* | ||
2694 | * If the device may want to use untagged when we want | ||
2695 | * tagged, we prepare an IDENTIFY without disc. granted, | ||
2696 | * since we will not be able to handle reselect. | ||
2697 | * Otherwise, we just don't care. | ||
2698 | */ | ||
2699 | if (dsp == SCRIPTA_BA(np, send_ident)) { | ||
2700 | if (cp->tag != NO_TAG && olen - rest <= 3) { | ||
2701 | cp->host_status = HS_BUSY; | ||
2702 | np->msgout[0] = IDENTIFY(0, cp->lun); | ||
2703 | nxtdsp = SCRIPTB_BA(np, ident_break_atn); | ||
2704 | } | ||
2705 | else | ||
2706 | nxtdsp = SCRIPTB_BA(np, ident_break); | ||
2707 | } | ||
2708 | else if (dsp == SCRIPTB_BA(np, send_wdtr) || | ||
2709 | dsp == SCRIPTB_BA(np, send_sdtr) || | ||
2710 | dsp == SCRIPTB_BA(np, send_ppr)) { | ||
2711 | nxtdsp = SCRIPTB_BA(np, nego_bad_phase); | ||
2712 | if (dsp == SCRIPTB_BA(np, send_ppr)) { | ||
2713 | struct scsi_device *dev = cp->cmd->device; | ||
2714 | dev->ppr = 0; | ||
2715 | } | ||
2716 | } | ||
2717 | break; | ||
2718 | #if 0 | ||
2719 | case 7: /* MSG IN phase */ | ||
2720 | nxtdsp = SCRIPTA_BA(np, clrack); | ||
2721 | break; | ||
2722 | #endif | ||
2723 | } | ||
2724 | |||
2725 | if (nxtdsp) { | ||
2726 | OUTL_DSP(np, nxtdsp); | ||
2727 | return; | ||
2728 | } | ||
2729 | |||
2730 | reset_all: | ||
2731 | sym_start_reset(np); | ||
2732 | } | ||
2733 | |||
2734 | /* | ||
2735 | * chip interrupt handler | ||
2736 | * | ||
2737 | * In normal situations, interrupt conditions occur one at | ||
2738 | * a time. But when something bad happens on the SCSI BUS, | ||
2739 | * the chip may raise several interrupt flags before | ||
2740 | * stopping and interrupting the CPU. The additionnal | ||
2741 | * interrupt flags are stacked in some extra registers | ||
2742 | * after the SIP and/or DIP flag has been raised in the | ||
2743 | * ISTAT. After the CPU has read the interrupt condition | ||
2744 | * flag from SIST or DSTAT, the chip unstacks the other | ||
2745 | * interrupt flags and sets the corresponding bits in | ||
2746 | * SIST or DSTAT. Since the chip starts stacking once the | ||
2747 | * SIP or DIP flag is set, there is a small window of time | ||
2748 | * where the stacking does not occur. | ||
2749 | * | ||
2750 | * Typically, multiple interrupt conditions may happen in | ||
2751 | * the following situations: | ||
2752 | * | ||
2753 | * - SCSI parity error + Phase mismatch (PAR|MA) | ||
2754 | * When an parity error is detected in input phase | ||
2755 | * and the device switches to msg-in phase inside a | ||
2756 | * block MOV. | ||
2757 | * - SCSI parity error + Unexpected disconnect (PAR|UDC) | ||
2758 | * When a stupid device does not want to handle the | ||
2759 | * recovery of an SCSI parity error. | ||
2760 | * - Some combinations of STO, PAR, UDC, ... | ||
2761 | * When using non compliant SCSI stuff, when user is | ||
2762 | * doing non compliant hot tampering on the BUS, when | ||
2763 | * something really bad happens to a device, etc ... | ||
2764 | * | ||
2765 | * The heuristic suggested by SYMBIOS to handle | ||
2766 | * multiple interrupts is to try unstacking all | ||
2767 | * interrupts conditions and to handle them on some | ||
2768 | * priority based on error severity. | ||
2769 | * This will work when the unstacking has been | ||
2770 | * successful, but we cannot be 100 % sure of that, | ||
2771 | * since the CPU may have been faster to unstack than | ||
2772 | * the chip is able to stack. Hmmm ... But it seems that | ||
2773 | * such a situation is very unlikely to happen. | ||
2774 | * | ||
2775 | * If this happen, for example STO caught by the CPU | ||
2776 | * then UDC happenning before the CPU have restarted | ||
2777 | * the SCRIPTS, the driver may wrongly complete the | ||
2778 | * same command on UDC, since the SCRIPTS didn't restart | ||
2779 | * and the DSA still points to the same command. | ||
2780 | * We avoid this situation by setting the DSA to an | ||
2781 | * invalid value when the CCB is completed and before | ||
2782 | * restarting the SCRIPTS. | ||
2783 | * | ||
2784 | * Another issue is that we need some section of our | ||
2785 | * recovery procedures to be somehow uninterruptible but | ||
2786 | * the SCRIPTS processor does not provides such a | ||
2787 | * feature. For this reason, we handle recovery preferently | ||
2788 | * from the C code and check against some SCRIPTS critical | ||
2789 | * sections from the C code. | ||
2790 | * | ||
2791 | * Hopefully, the interrupt handling of the driver is now | ||
2792 | * able to resist to weird BUS error conditions, but donnot | ||
2793 | * ask me for any guarantee that it will never fail. :-) | ||
2794 | * Use at your own decision and risk. | ||
2795 | */ | ||
2796 | |||
2797 | void sym_interrupt (struct sym_hcb *np) | ||
2798 | { | ||
2799 | u_char istat, istatc; | ||
2800 | u_char dstat; | ||
2801 | u_short sist; | ||
2802 | |||
2803 | /* | ||
2804 | * interrupt on the fly ? | ||
2805 | * (SCRIPTS may still be running) | ||
2806 | * | ||
2807 | * A `dummy read' is needed to ensure that the | ||
2808 | * clear of the INTF flag reaches the device | ||
2809 | * and that posted writes are flushed to memory | ||
2810 | * before the scanning of the DONE queue. | ||
2811 | * Note that SCRIPTS also (dummy) read to memory | ||
2812 | * prior to deliver the INTF interrupt condition. | ||
2813 | */ | ||
2814 | istat = INB(np, nc_istat); | ||
2815 | if (istat & INTF) { | ||
2816 | OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); | ||
2817 | istat = INB(np, nc_istat); /* DUMMY READ */ | ||
2818 | if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); | ||
2819 | sym_wakeup_done(np); | ||
2820 | } | ||
2821 | |||
2822 | if (!(istat & (SIP|DIP))) | ||
2823 | return; | ||
2824 | |||
2825 | #if 0 /* We should never get this one */ | ||
2826 | if (istat & CABRT) | ||
2827 | OUTB(np, nc_istat, CABRT); | ||
2828 | #endif | ||
2829 | |||
2830 | /* | ||
2831 | * PAR and MA interrupts may occur at the same time, | ||
2832 | * and we need to know of both in order to handle | ||
2833 | * this situation properly. We try to unstack SCSI | ||
2834 | * interrupts for that reason. BTW, I dislike a LOT | ||
2835 | * such a loop inside the interrupt routine. | ||
2836 | * Even if DMA interrupt stacking is very unlikely to | ||
2837 | * happen, we also try unstacking these ones, since | ||
2838 | * this has no performance impact. | ||
2839 | */ | ||
2840 | sist = 0; | ||
2841 | dstat = 0; | ||
2842 | istatc = istat; | ||
2843 | do { | ||
2844 | if (istatc & SIP) | ||
2845 | sist |= INW(np, nc_sist); | ||
2846 | if (istatc & DIP) | ||
2847 | dstat |= INB(np, nc_dstat); | ||
2848 | istatc = INB(np, nc_istat); | ||
2849 | istat |= istatc; | ||
2850 | } while (istatc & (SIP|DIP)); | ||
2851 | |||
2852 | if (DEBUG_FLAGS & DEBUG_TINY) | ||
2853 | printf ("<%d|%x:%x|%x:%x>", | ||
2854 | (int)INB(np, nc_scr0), | ||
2855 | dstat,sist, | ||
2856 | (unsigned)INL(np, nc_dsp), | ||
2857 | (unsigned)INL(np, nc_dbc)); | ||
2858 | /* | ||
2859 | * On paper, a memory read barrier may be needed here to | ||
2860 | * prevent out of order LOADs by the CPU from having | ||
2861 | * prefetched stale data prior to DMA having occurred. | ||
2862 | * And since we are paranoid ... :) | ||
2863 | */ | ||
2864 | MEMORY_READ_BARRIER(); | ||
2865 | |||
2866 | /* | ||
2867 | * First, interrupts we want to service cleanly. | ||
2868 | * | ||
2869 | * Phase mismatch (MA) is the most frequent interrupt | ||
2870 | * for chip earlier than the 896 and so we have to service | ||
2871 | * it as quickly as possible. | ||
2872 | * A SCSI parity error (PAR) may be combined with a phase | ||
2873 | * mismatch condition (MA). | ||
2874 | * Programmed interrupts (SIR) are used to call the C code | ||
2875 | * from SCRIPTS. | ||
2876 | * The single step interrupt (SSI) is not used in this | ||
2877 | * driver. | ||
2878 | */ | ||
2879 | if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && | ||
2880 | !(dstat & (MDPE|BF|ABRT|IID))) { | ||
2881 | if (sist & PAR) sym_int_par (np, sist); | ||
2882 | else if (sist & MA) sym_int_ma (np); | ||
2883 | else if (dstat & SIR) sym_int_sir (np); | ||
2884 | else if (dstat & SSI) OUTONB_STD(); | ||
2885 | else goto unknown_int; | ||
2886 | return; | ||
2887 | } | ||
2888 | |||
2889 | /* | ||
2890 | * Now, interrupts that donnot happen in normal | ||
2891 | * situations and that we may need to recover from. | ||
2892 | * | ||
2893 | * On SCSI RESET (RST), we reset everything. | ||
2894 | * On SCSI BUS MODE CHANGE (SBMC), we complete all | ||
2895 | * active CCBs with RESET status, prepare all devices | ||
2896 | * for negotiating again and restart the SCRIPTS. | ||
2897 | * On STO and UDC, we complete the CCB with the corres- | ||
2898 | * ponding status and restart the SCRIPTS. | ||
2899 | */ | ||
2900 | if (sist & RST) { | ||
2901 | printf("%s: SCSI BUS reset detected.\n", sym_name(np)); | ||
2902 | sym_start_up (np, 1); | ||
2903 | return; | ||
2904 | } | ||
2905 | |||
2906 | OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ | ||
2907 | OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ | ||
2908 | |||
2909 | if (!(sist & (GEN|HTH|SGE)) && | ||
2910 | !(dstat & (MDPE|BF|ABRT|IID))) { | ||
2911 | if (sist & SBMC) sym_int_sbmc (np); | ||
2912 | else if (sist & STO) sym_int_sto (np); | ||
2913 | else if (sist & UDC) sym_int_udc (np); | ||
2914 | else goto unknown_int; | ||
2915 | return; | ||
2916 | } | ||
2917 | |||
2918 | /* | ||
2919 | * Now, interrupts we are not able to recover cleanly. | ||
2920 | * | ||
2921 | * Log message for hard errors. | ||
2922 | * Reset everything. | ||
2923 | */ | ||
2924 | |||
2925 | sym_log_hard_error(np, sist, dstat); | ||
2926 | |||
2927 | if ((sist & (GEN|HTH|SGE)) || | ||
2928 | (dstat & (MDPE|BF|ABRT|IID))) { | ||
2929 | sym_start_reset(np); | ||
2930 | return; | ||
2931 | } | ||
2932 | |||
2933 | unknown_int: | ||
2934 | /* | ||
2935 | * We just miss the cause of the interrupt. :( | ||
2936 | * Print a message. The timeout will do the real work. | ||
2937 | */ | ||
2938 | printf( "%s: unknown interrupt(s) ignored, " | ||
2939 | "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", | ||
2940 | sym_name(np), istat, dstat, sist); | ||
2941 | } | ||
2942 | |||
2943 | /* | ||
2944 | * Dequeue from the START queue all CCBs that match | ||
2945 | * a given target/lun/task condition (-1 means all), | ||
2946 | * and move them from the BUSY queue to the COMP queue | ||
2947 | * with CAM_REQUEUE_REQ status condition. | ||
2948 | * This function is used during error handling/recovery. | ||
2949 | * It is called with SCRIPTS not running. | ||
2950 | */ | ||
2951 | static int | ||
2952 | sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) | ||
2953 | { | ||
2954 | int j; | ||
2955 | struct sym_ccb *cp; | ||
2956 | |||
2957 | /* | ||
2958 | * Make sure the starting index is within range. | ||
2959 | */ | ||
2960 | assert((i >= 0) && (i < 2*MAX_QUEUE)); | ||
2961 | |||
2962 | /* | ||
2963 | * Walk until end of START queue and dequeue every job | ||
2964 | * that matches the target/lun/task condition. | ||
2965 | */ | ||
2966 | j = i; | ||
2967 | while (i != np->squeueput) { | ||
2968 | cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); | ||
2969 | assert(cp); | ||
2970 | #ifdef SYM_CONF_IARB_SUPPORT | ||
2971 | /* Forget hints for IARB, they may be no longer relevant */ | ||
2972 | cp->host_flags &= ~HF_HINT_IARB; | ||
2973 | #endif | ||
2974 | if ((target == -1 || cp->target == target) && | ||
2975 | (lun == -1 || cp->lun == lun) && | ||
2976 | (task == -1 || cp->tag == task)) { | ||
2977 | sym_set_cam_status(cp->cmd, CAM_REQUEUE_REQ); | ||
2978 | sym_remque(&cp->link_ccbq); | ||
2979 | sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); | ||
2980 | } | ||
2981 | else { | ||
2982 | if (i != j) | ||
2983 | np->squeue[j] = np->squeue[i]; | ||
2984 | if ((j += 2) >= MAX_QUEUE*2) j = 0; | ||
2985 | } | ||
2986 | if ((i += 2) >= MAX_QUEUE*2) i = 0; | ||
2987 | } | ||
2988 | if (i != j) /* Copy back the idle task if needed */ | ||
2989 | np->squeue[j] = np->squeue[i]; | ||
2990 | np->squeueput = j; /* Update our current start queue pointer */ | ||
2991 | |||
2992 | return (i - j) / 2; | ||
2993 | } | ||
2994 | |||
2995 | /* | ||
2996 | * chip handler for bad SCSI status condition | ||
2997 | * | ||
2998 | * In case of bad SCSI status, we unqueue all the tasks | ||
2999 | * currently queued to the controller but not yet started | ||
3000 | * and then restart the SCRIPTS processor immediately. | ||
3001 | * | ||
3002 | * QUEUE FULL and BUSY conditions are handled the same way. | ||
3003 | * Basically all the not yet started tasks are requeued in | ||
3004 | * device queue and the queue is frozen until a completion. | ||
3005 | * | ||
3006 | * For CHECK CONDITION and COMMAND TERMINATED status, we use | ||
3007 | * the CCB of the failed command to prepare a REQUEST SENSE | ||
3008 | * SCSI command and queue it to the controller queue. | ||
3009 | * | ||
3010 | * SCRATCHA is assumed to have been loaded with STARTPOS | ||
3011 | * before the SCRIPTS called the C code. | ||
3012 | */ | ||
3013 | static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp) | ||
3014 | { | ||
3015 | u32 startp; | ||
3016 | u_char s_status = cp->ssss_status; | ||
3017 | u_char h_flags = cp->host_flags; | ||
3018 | int msglen; | ||
3019 | int i; | ||
3020 | |||
3021 | /* | ||
3022 | * Compute the index of the next job to start from SCRIPTS. | ||
3023 | */ | ||
3024 | i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; | ||
3025 | |||
3026 | /* | ||
3027 | * The last CCB queued used for IARB hint may be | ||
3028 | * no longer relevant. Forget it. | ||
3029 | */ | ||
3030 | #ifdef SYM_CONF_IARB_SUPPORT | ||
3031 | if (np->last_cp) | ||
3032 | np->last_cp = 0; | ||
3033 | #endif | ||
3034 | |||
3035 | /* | ||
3036 | * Now deal with the SCSI status. | ||
3037 | */ | ||
3038 | switch(s_status) { | ||
3039 | case S_BUSY: | ||
3040 | case S_QUEUE_FULL: | ||
3041 | if (sym_verbose >= 2) { | ||
3042 | sym_print_addr(cp->cmd, "%s\n", | ||
3043 | s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); | ||
3044 | } | ||
3045 | default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ | ||
3046 | sym_complete_error (np, cp); | ||
3047 | break; | ||
3048 | case S_TERMINATED: | ||
3049 | case S_CHECK_COND: | ||
3050 | /* | ||
3051 | * If we get an SCSI error when requesting sense, give up. | ||
3052 | */ | ||
3053 | if (h_flags & HF_SENSE) { | ||
3054 | sym_complete_error (np, cp); | ||
3055 | break; | ||
3056 | } | ||
3057 | |||
3058 | /* | ||
3059 | * Dequeue all queued CCBs for that device not yet started, | ||
3060 | * and restart the SCRIPTS processor immediately. | ||
3061 | */ | ||
3062 | sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); | ||
3063 | OUTL_DSP(np, SCRIPTA_BA(np, start)); | ||
3064 | |||
3065 | /* | ||
3066 | * Save some info of the actual IO. | ||
3067 | * Compute the data residual. | ||
3068 | */ | ||
3069 | cp->sv_scsi_status = cp->ssss_status; | ||
3070 | cp->sv_xerr_status = cp->xerr_status; | ||
3071 | cp->sv_resid = sym_compute_residual(np, cp); | ||
3072 | |||
3073 | /* | ||
3074 | * Prepare all needed data structures for | ||
3075 | * requesting sense data. | ||
3076 | */ | ||
3077 | |||
3078 | cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun); | ||
3079 | msglen = 1; | ||
3080 | |||
3081 | /* | ||
3082 | * If we are currently using anything different from | ||
3083 | * async. 8 bit data transfers with that target, | ||
3084 | * start a negotiation, since the device may want | ||
3085 | * to report us a UNIT ATTENTION condition due to | ||
3086 | * a cause we currently ignore, and we donnot want | ||
3087 | * to be stuck with WIDE and/or SYNC data transfer. | ||
3088 | * | ||
3089 | * cp->nego_status is filled by sym_prepare_nego(). | ||
3090 | */ | ||
3091 | cp->nego_status = 0; | ||
3092 | msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]); | ||
3093 | /* | ||
3094 | * Message table indirect structure. | ||
3095 | */ | ||
3096 | cp->phys.smsg.addr = cpu_to_scr(CCB_BA(cp, scsi_smsg2)); | ||
3097 | cp->phys.smsg.size = cpu_to_scr(msglen); | ||
3098 | |||
3099 | /* | ||
3100 | * sense command | ||
3101 | */ | ||
3102 | cp->phys.cmd.addr = cpu_to_scr(CCB_BA(cp, sensecmd)); | ||
3103 | cp->phys.cmd.size = cpu_to_scr(6); | ||
3104 | |||
3105 | /* | ||
3106 | * patch requested size into sense command | ||
3107 | */ | ||
3108 | cp->sensecmd[0] = REQUEST_SENSE; | ||
3109 | cp->sensecmd[1] = 0; | ||
3110 | if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7) | ||
3111 | cp->sensecmd[1] = cp->lun << 5; | ||
3112 | cp->sensecmd[4] = SYM_SNS_BBUF_LEN; | ||
3113 | cp->data_len = SYM_SNS_BBUF_LEN; | ||
3114 | |||
3115 | /* | ||
3116 | * sense data | ||
3117 | */ | ||
3118 | memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); | ||
3119 | cp->phys.sense.addr = cpu_to_scr(CCB_BA(cp, sns_bbuf)); | ||
3120 | cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); | ||
3121 | |||
3122 | /* | ||
3123 | * requeue the command. | ||
3124 | */ | ||
3125 | startp = SCRIPTB_BA(np, sdata_in); | ||
3126 | |||
3127 | cp->phys.head.savep = cpu_to_scr(startp); | ||
3128 | cp->phys.head.lastp = cpu_to_scr(startp); | ||
3129 | cp->startp = cpu_to_scr(startp); | ||
3130 | cp->goalp = cpu_to_scr(startp + 16); | ||
3131 | |||
3132 | cp->host_xflags = 0; | ||
3133 | cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; | ||
3134 | cp->ssss_status = S_ILLEGAL; | ||
3135 | cp->host_flags = (HF_SENSE|HF_DATA_IN); | ||
3136 | cp->xerr_status = 0; | ||
3137 | cp->extra_bytes = 0; | ||
3138 | |||
3139 | cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); | ||
3140 | |||
3141 | /* | ||
3142 | * Requeue the command. | ||
3143 | */ | ||
3144 | sym_put_start_queue(np, cp); | ||
3145 | |||
3146 | /* | ||
3147 | * Give back to upper layer everything we have dequeued. | ||
3148 | */ | ||
3149 | sym_flush_comp_queue(np, 0); | ||
3150 | break; | ||
3151 | } | ||
3152 | } | ||
3153 | |||
3154 | /* | ||
3155 | * After a device has accepted some management message | ||
3156 | * as BUS DEVICE RESET, ABORT TASK, etc ..., or when | ||
3157 | * a device signals a UNIT ATTENTION condition, some | ||
3158 | * tasks are thrown away by the device. We are required | ||
3159 | * to reflect that on our tasks list since the device | ||
3160 | * will never complete these tasks. | ||
3161 | * | ||
3162 | * This function move from the BUSY queue to the COMP | ||
3163 | * queue all disconnected CCBs for a given target that | ||
3164 | * match the following criteria: | ||
3165 | * - lun=-1 means any logical UNIT otherwise a given one. | ||
3166 | * - task=-1 means any task, otherwise a given one. | ||
3167 | */ | ||
3168 | int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) | ||
3169 | { | ||
3170 | SYM_QUEHEAD qtmp, *qp; | ||
3171 | int i = 0; | ||
3172 | struct sym_ccb *cp; | ||
3173 | |||
3174 | /* | ||
3175 | * Move the entire BUSY queue to our temporary queue. | ||
3176 | */ | ||
3177 | sym_que_init(&qtmp); | ||
3178 | sym_que_splice(&np->busy_ccbq, &qtmp); | ||
3179 | sym_que_init(&np->busy_ccbq); | ||
3180 | |||
3181 | /* | ||
3182 | * Put all CCBs that matches our criteria into | ||
3183 | * the COMP queue and put back other ones into | ||
3184 | * the BUSY queue. | ||
3185 | */ | ||
3186 | while ((qp = sym_remque_head(&qtmp)) != 0) { | ||
3187 | struct scsi_cmnd *cmd; | ||
3188 | cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
3189 | cmd = cp->cmd; | ||
3190 | if (cp->host_status != HS_DISCONNECT || | ||
3191 | cp->target != target || | ||
3192 | (lun != -1 && cp->lun != lun) || | ||
3193 | (task != -1 && | ||
3194 | (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { | ||
3195 | sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); | ||
3196 | continue; | ||
3197 | } | ||
3198 | sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); | ||
3199 | |||
3200 | /* Preserve the software timeout condition */ | ||
3201 | if (sym_get_cam_status(cmd) != CAM_CMD_TIMEOUT) | ||
3202 | sym_set_cam_status(cmd, cam_status); | ||
3203 | ++i; | ||
3204 | #if 0 | ||
3205 | printf("XXXX TASK @%p CLEARED\n", cp); | ||
3206 | #endif | ||
3207 | } | ||
3208 | return i; | ||
3209 | } | ||
3210 | |||
3211 | /* | ||
3212 | * chip handler for TASKS recovery | ||
3213 | * | ||
3214 | * We cannot safely abort a command, while the SCRIPTS | ||
3215 | * processor is running, since we just would be in race | ||
3216 | * with it. | ||
3217 | * | ||
3218 | * As long as we have tasks to abort, we keep the SEM | ||
3219 | * bit set in the ISTAT. When this bit is set, the | ||
3220 | * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) | ||
3221 | * each time it enters the scheduler. | ||
3222 | * | ||
3223 | * If we have to reset a target, clear tasks of a unit, | ||
3224 | * or to perform the abort of a disconnected job, we | ||
3225 | * restart the SCRIPTS for selecting the target. Once | ||
3226 | * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). | ||
3227 | * If it loses arbitration, the SCRIPTS will interrupt again | ||
3228 | * the next time it will enter its scheduler, and so on ... | ||
3229 | * | ||
3230 | * On SIR_TARGET_SELECTED, we scan for the more | ||
3231 | * appropriate thing to do: | ||
3232 | * | ||
3233 | * - If nothing, we just sent a M_ABORT message to the | ||
3234 | * target to get rid of the useless SCSI bus ownership. | ||
3235 | * According to the specs, no tasks shall be affected. | ||
3236 | * - If the target is to be reset, we send it a M_RESET | ||
3237 | * message. | ||
3238 | * - If a logical UNIT is to be cleared , we send the | ||
3239 | * IDENTIFY(lun) + M_ABORT. | ||
3240 | * - If an untagged task is to be aborted, we send the | ||
3241 | * IDENTIFY(lun) + M_ABORT. | ||
3242 | * - If a tagged task is to be aborted, we send the | ||
3243 | * IDENTIFY(lun) + task attributes + M_ABORT_TAG. | ||
3244 | * | ||
3245 | * Once our 'kiss of death' :) message has been accepted | ||
3246 | * by the target, the SCRIPTS interrupts again | ||
3247 | * (SIR_ABORT_SENT). On this interrupt, we complete | ||
3248 | * all the CCBs that should have been aborted by the | ||
3249 | * target according to our message. | ||
3250 | */ | ||
3251 | static void sym_sir_task_recovery(struct sym_hcb *np, int num) | ||
3252 | { | ||
3253 | SYM_QUEHEAD *qp; | ||
3254 | struct sym_ccb *cp; | ||
3255 | struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */ | ||
3256 | struct scsi_target *starget; | ||
3257 | int target=-1, lun=-1, task; | ||
3258 | int i, k; | ||
3259 | |||
3260 | switch(num) { | ||
3261 | /* | ||
3262 | * The SCRIPTS processor stopped before starting | ||
3263 | * the next command in order to allow us to perform | ||
3264 | * some task recovery. | ||
3265 | */ | ||
3266 | case SIR_SCRIPT_STOPPED: | ||
3267 | /* | ||
3268 | * Do we have any target to reset or unit to clear ? | ||
3269 | */ | ||
3270 | for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { | ||
3271 | tp = &np->target[i]; | ||
3272 | if (tp->to_reset || | ||
3273 | (tp->lun0p && tp->lun0p->to_clear)) { | ||
3274 | target = i; | ||
3275 | break; | ||
3276 | } | ||
3277 | if (!tp->lunmp) | ||
3278 | continue; | ||
3279 | for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { | ||
3280 | if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { | ||
3281 | target = i; | ||
3282 | break; | ||
3283 | } | ||
3284 | } | ||
3285 | if (target != -1) | ||
3286 | break; | ||
3287 | } | ||
3288 | |||
3289 | /* | ||
3290 | * If not, walk the busy queue for any | ||
3291 | * disconnected CCB to be aborted. | ||
3292 | */ | ||
3293 | if (target == -1) { | ||
3294 | FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { | ||
3295 | cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); | ||
3296 | if (cp->host_status != HS_DISCONNECT) | ||
3297 | continue; | ||
3298 | if (cp->to_abort) { | ||
3299 | target = cp->target; | ||
3300 | break; | ||
3301 | } | ||
3302 | } | ||
3303 | } | ||
3304 | |||
3305 | /* | ||
3306 | * If some target is to be selected, | ||
3307 | * prepare and start the selection. | ||
3308 | */ | ||
3309 | if (target != -1) { | ||
3310 | tp = &np->target[target]; | ||
3311 | np->abrt_sel.sel_id = target; | ||
3312 | np->abrt_sel.sel_scntl3 = tp->head.wval; | ||
3313 | np->abrt_sel.sel_sxfer = tp->head.sval; | ||
3314 | OUTL(np, nc_dsa, np->hcb_ba); | ||
3315 | OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort)); | ||
3316 | return; | ||
3317 | } | ||
3318 | |||
3319 | /* | ||
3320 | * Now look for a CCB to abort that haven't started yet. | ||
3321 | * Btw, the SCRIPTS processor is still stopped, so | ||
3322 | * we are not in race. | ||
3323 | */ | ||
3324 | i = 0; | ||
3325 | cp = NULL; | ||
3326 | FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { | ||
3327 | cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
3328 | if (cp->host_status != HS_BUSY && | ||
3329 | cp->host_status != HS_NEGOTIATE) | ||
3330 | continue; | ||
3331 | if (!cp->to_abort) | ||
3332 | continue; | ||
3333 | #ifdef SYM_CONF_IARB_SUPPORT | ||
3334 | /* | ||
3335 | * If we are using IMMEDIATE ARBITRATION, we donnot | ||
3336 | * want to cancel the last queued CCB, since the | ||
3337 | * SCRIPTS may have anticipated the selection. | ||
3338 | */ | ||
3339 | if (cp == np->last_cp) { | ||
3340 | cp->to_abort = 0; | ||
3341 | continue; | ||
3342 | } | ||
3343 | #endif | ||
3344 | i = 1; /* Means we have found some */ | ||
3345 | break; | ||
3346 | } | ||
3347 | if (!i) { | ||
3348 | /* | ||
3349 | * We are done, so we donnot need | ||
3350 | * to synchronize with the SCRIPTS anylonger. | ||
3351 | * Remove the SEM flag from the ISTAT. | ||
3352 | */ | ||
3353 | np->istat_sem = 0; | ||
3354 | OUTB(np, nc_istat, SIGP); | ||
3355 | break; | ||
3356 | } | ||
3357 | /* | ||
3358 | * Compute index of next position in the start | ||
3359 | * queue the SCRIPTS intends to start and dequeue | ||
3360 | * all CCBs for that device that haven't been started. | ||
3361 | */ | ||
3362 | i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; | ||
3363 | i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); | ||
3364 | |||
3365 | /* | ||
3366 | * Make sure at least our IO to abort has been dequeued. | ||
3367 | */ | ||
3368 | #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
3369 | assert(i && sym_get_cam_status(cp->cmd) == CAM_REQUEUE_REQ); | ||
3370 | #else | ||
3371 | sym_remque(&cp->link_ccbq); | ||
3372 | sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); | ||
3373 | #endif | ||
3374 | /* | ||
3375 | * Keep track in cam status of the reason of the abort. | ||
3376 | */ | ||
3377 | if (cp->to_abort == 2) | ||
3378 | sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT); | ||
3379 | else | ||
3380 | sym_set_cam_status(cp->cmd, CAM_REQ_ABORTED); | ||
3381 | |||
3382 | /* | ||
3383 | * Complete with error everything that we have dequeued. | ||
3384 | */ | ||
3385 | sym_flush_comp_queue(np, 0); | ||
3386 | break; | ||
3387 | /* | ||
3388 | * The SCRIPTS processor has selected a target | ||
3389 | * we may have some manual recovery to perform for. | ||
3390 | */ | ||
3391 | case SIR_TARGET_SELECTED: | ||
3392 | target = INB(np, nc_sdid) & 0xf; | ||
3393 | tp = &np->target[target]; | ||
3394 | |||
3395 | np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); | ||
3396 | |||
3397 | /* | ||
3398 | * If the target is to be reset, prepare a | ||
3399 | * M_RESET message and clear the to_reset flag | ||
3400 | * since we donnot expect this operation to fail. | ||
3401 | */ | ||
3402 | if (tp->to_reset) { | ||
3403 | np->abrt_msg[0] = M_RESET; | ||
3404 | np->abrt_tbl.size = 1; | ||
3405 | tp->to_reset = 0; | ||
3406 | break; | ||
3407 | } | ||
3408 | |||
3409 | /* | ||
3410 | * Otherwise, look for some logical unit to be cleared. | ||
3411 | */ | ||
3412 | if (tp->lun0p && tp->lun0p->to_clear) | ||
3413 | lun = 0; | ||
3414 | else if (tp->lunmp) { | ||
3415 | for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { | ||
3416 | if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { | ||
3417 | lun = k; | ||
3418 | break; | ||
3419 | } | ||
3420 | } | ||
3421 | } | ||
3422 | |||
3423 | /* | ||
3424 | * If a logical unit is to be cleared, prepare | ||
3425 | * an IDENTIFY(lun) + ABORT MESSAGE. | ||
3426 | */ | ||
3427 | if (lun != -1) { | ||
3428 | struct sym_lcb *lp = sym_lp(tp, lun); | ||
3429 | lp->to_clear = 0; /* We don't expect to fail here */ | ||
3430 | np->abrt_msg[0] = IDENTIFY(0, lun); | ||
3431 | np->abrt_msg[1] = M_ABORT; | ||
3432 | np->abrt_tbl.size = 2; | ||
3433 | break; | ||
3434 | } | ||
3435 | |||
3436 | /* | ||
3437 | * Otherwise, look for some disconnected job to | ||
3438 | * abort for this target. | ||
3439 | */ | ||
3440 | i = 0; | ||
3441 | cp = NULL; | ||
3442 | FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { | ||
3443 | cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
3444 | if (cp->host_status != HS_DISCONNECT) | ||
3445 | continue; | ||
3446 | if (cp->target != target) | ||
3447 | continue; | ||
3448 | if (!cp->to_abort) | ||
3449 | continue; | ||
3450 | i = 1; /* Means we have some */ | ||
3451 | break; | ||
3452 | } | ||
3453 | |||
3454 | /* | ||
3455 | * If we have none, probably since the device has | ||
3456 | * completed the command before we won abitration, | ||
3457 | * send a M_ABORT message without IDENTIFY. | ||
3458 | * According to the specs, the device must just | ||
3459 | * disconnect the BUS and not abort any task. | ||
3460 | */ | ||
3461 | if (!i) { | ||
3462 | np->abrt_msg[0] = M_ABORT; | ||
3463 | np->abrt_tbl.size = 1; | ||
3464 | break; | ||
3465 | } | ||
3466 | |||
3467 | /* | ||
3468 | * We have some task to abort. | ||
3469 | * Set the IDENTIFY(lun) | ||
3470 | */ | ||
3471 | np->abrt_msg[0] = IDENTIFY(0, cp->lun); | ||
3472 | |||
3473 | /* | ||
3474 | * If we want to abort an untagged command, we | ||
3475 | * will send a IDENTIFY + M_ABORT. | ||
3476 | * Otherwise (tagged command), we will send | ||
3477 | * a IDENTITFY + task attributes + ABORT TAG. | ||
3478 | */ | ||
3479 | if (cp->tag == NO_TAG) { | ||
3480 | np->abrt_msg[1] = M_ABORT; | ||
3481 | np->abrt_tbl.size = 2; | ||
3482 | } else { | ||
3483 | np->abrt_msg[1] = cp->scsi_smsg[1]; | ||
3484 | np->abrt_msg[2] = cp->scsi_smsg[2]; | ||
3485 | np->abrt_msg[3] = M_ABORT_TAG; | ||
3486 | np->abrt_tbl.size = 4; | ||
3487 | } | ||
3488 | /* | ||
3489 | * Keep track of software timeout condition, since the | ||
3490 | * peripheral driver may not count retries on abort | ||
3491 | * conditions not due to timeout. | ||
3492 | */ | ||
3493 | if (cp->to_abort == 2) | ||
3494 | sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT); | ||
3495 | cp->to_abort = 0; /* We donnot expect to fail here */ | ||
3496 | break; | ||
3497 | |||
3498 | /* | ||
3499 | * The target has accepted our message and switched | ||
3500 | * to BUS FREE phase as we expected. | ||
3501 | */ | ||
3502 | case SIR_ABORT_SENT: | ||
3503 | target = INB(np, nc_sdid) & 0xf; | ||
3504 | tp = &np->target[target]; | ||
3505 | starget = tp->sdev->sdev_target; | ||
3506 | |||
3507 | /* | ||
3508 | ** If we didn't abort anything, leave here. | ||
3509 | */ | ||
3510 | if (np->abrt_msg[0] == M_ABORT) | ||
3511 | break; | ||
3512 | |||
3513 | /* | ||
3514 | * If we sent a M_RESET, then a hardware reset has | ||
3515 | * been performed by the target. | ||
3516 | * - Reset everything to async 8 bit | ||
3517 | * - Tell ourself to negotiate next time :-) | ||
3518 | * - Prepare to clear all disconnected CCBs for | ||
3519 | * this target from our task list (lun=task=-1) | ||
3520 | */ | ||
3521 | lun = -1; | ||
3522 | task = -1; | ||
3523 | if (np->abrt_msg[0] == M_RESET) { | ||
3524 | tp->head.sval = 0; | ||
3525 | tp->head.wval = np->rv_scntl3; | ||
3526 | tp->head.uval = 0; | ||
3527 | spi_period(starget) = 0; | ||
3528 | spi_offset(starget) = 0; | ||
3529 | spi_width(starget) = 0; | ||
3530 | spi_iu(starget) = 0; | ||
3531 | spi_dt(starget) = 0; | ||
3532 | spi_qas(starget) = 0; | ||
3533 | tp->tgoal.check_nego = 1; | ||
3534 | } | ||
3535 | |||
3536 | /* | ||
3537 | * Otherwise, check for the LUN and TASK(s) | ||
3538 | * concerned by the cancelation. | ||
3539 | * If it is not ABORT_TAG then it is CLEAR_QUEUE | ||
3540 | * or an ABORT message :-) | ||
3541 | */ | ||
3542 | else { | ||
3543 | lun = np->abrt_msg[0] & 0x3f; | ||
3544 | if (np->abrt_msg[1] == M_ABORT_TAG) | ||
3545 | task = np->abrt_msg[2]; | ||
3546 | } | ||
3547 | |||
3548 | /* | ||
3549 | * Complete all the CCBs the device should have | ||
3550 | * aborted due to our 'kiss of death' message. | ||
3551 | */ | ||
3552 | i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; | ||
3553 | sym_dequeue_from_squeue(np, i, target, lun, -1); | ||
3554 | sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); | ||
3555 | sym_flush_comp_queue(np, 0); | ||
3556 | |||
3557 | /* | ||
3558 | * If we sent a BDR, make upper layer aware of that. | ||
3559 | */ | ||
3560 | if (np->abrt_msg[0] == M_RESET) | ||
3561 | sym_xpt_async_sent_bdr(np, target); | ||
3562 | break; | ||
3563 | } | ||
3564 | |||
3565 | /* | ||
3566 | * Print to the log the message we intend to send. | ||
3567 | */ | ||
3568 | if (num == SIR_TARGET_SELECTED) { | ||
3569 | dev_info(&tp->sdev->sdev_target->dev, "control msgout:"); | ||
3570 | sym_printl_hex(np->abrt_msg, np->abrt_tbl.size); | ||
3571 | np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); | ||
3572 | } | ||
3573 | |||
3574 | /* | ||
3575 | * Let the SCRIPTS processor continue. | ||
3576 | */ | ||
3577 | OUTONB_STD(); | ||
3578 | } | ||
3579 | |||
3580 | /* | ||
3581 | * Gerard's alchemy:) that deals with with the data | ||
3582 | * pointer for both MDP and the residual calculation. | ||
3583 | * | ||
3584 | * I didn't want to bloat the code by more than 200 | ||
3585 | * lines for the handling of both MDP and the residual. | ||
3586 | * This has been achieved by using a data pointer | ||
3587 | * representation consisting in an index in the data | ||
3588 | * array (dp_sg) and a negative offset (dp_ofs) that | ||
3589 | * have the following meaning: | ||
3590 | * | ||
3591 | * - dp_sg = SYM_CONF_MAX_SG | ||
3592 | * we are at the end of the data script. | ||
3593 | * - dp_sg < SYM_CONF_MAX_SG | ||
3594 | * dp_sg points to the next entry of the scatter array | ||
3595 | * we want to transfer. | ||
3596 | * - dp_ofs < 0 | ||
3597 | * dp_ofs represents the residual of bytes of the | ||
3598 | * previous entry scatter entry we will send first. | ||
3599 | * - dp_ofs = 0 | ||
3600 | * no residual to send first. | ||
3601 | * | ||
3602 | * The function sym_evaluate_dp() accepts an arbitray | ||
3603 | * offset (basically from the MDP message) and returns | ||
3604 | * the corresponding values of dp_sg and dp_ofs. | ||
3605 | */ | ||
3606 | |||
3607 | static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs) | ||
3608 | { | ||
3609 | u32 dp_scr; | ||
3610 | int dp_ofs, dp_sg, dp_sgmin; | ||
3611 | int tmp; | ||
3612 | struct sym_pmc *pm; | ||
3613 | |||
3614 | /* | ||
3615 | * Compute the resulted data pointer in term of a script | ||
3616 | * address within some DATA script and a signed byte offset. | ||
3617 | */ | ||
3618 | dp_scr = scr; | ||
3619 | dp_ofs = *ofs; | ||
3620 | if (dp_scr == SCRIPTA_BA(np, pm0_data)) | ||
3621 | pm = &cp->phys.pm0; | ||
3622 | else if (dp_scr == SCRIPTA_BA(np, pm1_data)) | ||
3623 | pm = &cp->phys.pm1; | ||
3624 | else | ||
3625 | pm = NULL; | ||
3626 | |||
3627 | if (pm) { | ||
3628 | dp_scr = scr_to_cpu(pm->ret); | ||
3629 | dp_ofs -= scr_to_cpu(pm->sg.size); | ||
3630 | } | ||
3631 | |||
3632 | /* | ||
3633 | * If we are auto-sensing, then we are done. | ||
3634 | */ | ||
3635 | if (cp->host_flags & HF_SENSE) { | ||
3636 | *ofs = dp_ofs; | ||
3637 | return 0; | ||
3638 | } | ||
3639 | |||
3640 | /* | ||
3641 | * Deduce the index of the sg entry. | ||
3642 | * Keep track of the index of the first valid entry. | ||
3643 | * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the | ||
3644 | * end of the data. | ||
3645 | */ | ||
3646 | tmp = scr_to_cpu(sym_goalp(cp)); | ||
3647 | dp_sg = SYM_CONF_MAX_SG; | ||
3648 | if (dp_scr != tmp) | ||
3649 | dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); | ||
3650 | dp_sgmin = SYM_CONF_MAX_SG - cp->segments; | ||
3651 | |||
3652 | /* | ||
3653 | * Move to the sg entry the data pointer belongs to. | ||
3654 | * | ||
3655 | * If we are inside the data area, we expect result to be: | ||
3656 | * | ||
3657 | * Either, | ||
3658 | * dp_ofs = 0 and dp_sg is the index of the sg entry | ||
3659 | * the data pointer belongs to (or the end of the data) | ||
3660 | * Or, | ||
3661 | * dp_ofs < 0 and dp_sg is the index of the sg entry | ||
3662 | * the data pointer belongs to + 1. | ||
3663 | */ | ||
3664 | if (dp_ofs < 0) { | ||
3665 | int n; | ||
3666 | while (dp_sg > dp_sgmin) { | ||
3667 | --dp_sg; | ||
3668 | tmp = scr_to_cpu(cp->phys.data[dp_sg].size); | ||
3669 | n = dp_ofs + (tmp & 0xffffff); | ||
3670 | if (n > 0) { | ||
3671 | ++dp_sg; | ||
3672 | break; | ||
3673 | } | ||
3674 | dp_ofs = n; | ||
3675 | } | ||
3676 | } | ||
3677 | else if (dp_ofs > 0) { | ||
3678 | while (dp_sg < SYM_CONF_MAX_SG) { | ||
3679 | tmp = scr_to_cpu(cp->phys.data[dp_sg].size); | ||
3680 | dp_ofs -= (tmp & 0xffffff); | ||
3681 | ++dp_sg; | ||
3682 | if (dp_ofs <= 0) | ||
3683 | break; | ||
3684 | } | ||
3685 | } | ||
3686 | |||
3687 | /* | ||
3688 | * Make sure the data pointer is inside the data area. | ||
3689 | * If not, return some error. | ||
3690 | */ | ||
3691 | if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) | ||
3692 | goto out_err; | ||
3693 | else if (dp_sg > SYM_CONF_MAX_SG || | ||
3694 | (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) | ||
3695 | goto out_err; | ||
3696 | |||
3697 | /* | ||
3698 | * Save the extreme pointer if needed. | ||
3699 | */ | ||
3700 | if (dp_sg > cp->ext_sg || | ||
3701 | (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { | ||
3702 | cp->ext_sg = dp_sg; | ||
3703 | cp->ext_ofs = dp_ofs; | ||
3704 | } | ||
3705 | |||
3706 | /* | ||
3707 | * Return data. | ||
3708 | */ | ||
3709 | *ofs = dp_ofs; | ||
3710 | return dp_sg; | ||
3711 | |||
3712 | out_err: | ||
3713 | return -1; | ||
3714 | } | ||
3715 | |||
3716 | /* | ||
3717 | * chip handler for MODIFY DATA POINTER MESSAGE | ||
3718 | * | ||
3719 | * We also call this function on IGNORE WIDE RESIDUE | ||
3720 | * messages that do not match a SWIDE full condition. | ||
3721 | * Btw, we assume in that situation that such a message | ||
3722 | * is equivalent to a MODIFY DATA POINTER (offset=-1). | ||
3723 | */ | ||
3724 | |||
3725 | static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs) | ||
3726 | { | ||
3727 | int dp_ofs = ofs; | ||
3728 | u32 dp_scr = sym_get_script_dp (np, cp); | ||
3729 | u32 dp_ret; | ||
3730 | u32 tmp; | ||
3731 | u_char hflags; | ||
3732 | int dp_sg; | ||
3733 | struct sym_pmc *pm; | ||
3734 | |||
3735 | /* | ||
3736 | * Not supported for auto-sense. | ||
3737 | */ | ||
3738 | if (cp->host_flags & HF_SENSE) | ||
3739 | goto out_reject; | ||
3740 | |||
3741 | /* | ||
3742 | * Apply our alchemy:) (see comments in sym_evaluate_dp()), | ||
3743 | * to the resulted data pointer. | ||
3744 | */ | ||
3745 | dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); | ||
3746 | if (dp_sg < 0) | ||
3747 | goto out_reject; | ||
3748 | |||
3749 | /* | ||
3750 | * And our alchemy:) allows to easily calculate the data | ||
3751 | * script address we want to return for the next data phase. | ||
3752 | */ | ||
3753 | dp_ret = cpu_to_scr(sym_goalp(cp)); | ||
3754 | dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); | ||
3755 | |||
3756 | /* | ||
3757 | * If offset / scatter entry is zero we donnot need | ||
3758 | * a context for the new current data pointer. | ||
3759 | */ | ||
3760 | if (dp_ofs == 0) { | ||
3761 | dp_scr = dp_ret; | ||
3762 | goto out_ok; | ||
3763 | } | ||
3764 | |||
3765 | /* | ||
3766 | * Get a context for the new current data pointer. | ||
3767 | */ | ||
3768 | hflags = INB(np, HF_PRT); | ||
3769 | |||
3770 | if (hflags & HF_DP_SAVED) | ||
3771 | hflags ^= HF_ACT_PM; | ||
3772 | |||
3773 | if (!(hflags & HF_ACT_PM)) { | ||
3774 | pm = &cp->phys.pm0; | ||
3775 | dp_scr = SCRIPTA_BA(np, pm0_data); | ||
3776 | } | ||
3777 | else { | ||
3778 | pm = &cp->phys.pm1; | ||
3779 | dp_scr = SCRIPTA_BA(np, pm1_data); | ||
3780 | } | ||
3781 | |||
3782 | hflags &= ~(HF_DP_SAVED); | ||
3783 | |||
3784 | OUTB(np, HF_PRT, hflags); | ||
3785 | |||
3786 | /* | ||
3787 | * Set up the new current data pointer. | ||
3788 | * ofs < 0 there, and for the next data phase, we | ||
3789 | * want to transfer part of the data of the sg entry | ||
3790 | * corresponding to index dp_sg-1 prior to returning | ||
3791 | * to the main data script. | ||
3792 | */ | ||
3793 | pm->ret = cpu_to_scr(dp_ret); | ||
3794 | tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); | ||
3795 | tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; | ||
3796 | pm->sg.addr = cpu_to_scr(tmp); | ||
3797 | pm->sg.size = cpu_to_scr(-dp_ofs); | ||
3798 | |||
3799 | out_ok: | ||
3800 | sym_set_script_dp (np, cp, dp_scr); | ||
3801 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | ||
3802 | return; | ||
3803 | |||
3804 | out_reject: | ||
3805 | OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); | ||
3806 | } | ||
3807 | |||
3808 | |||
3809 | /* | ||
3810 | * chip calculation of the data residual. | ||
3811 | * | ||
3812 | * As I used to say, the requirement of data residual | ||
3813 | * in SCSI is broken, useless and cannot be achieved | ||
3814 | * without huge complexity. | ||
3815 | * But most OSes and even the official CAM require it. | ||
3816 | * When stupidity happens to be so widely spread inside | ||
3817 | * a community, it gets hard to convince. | ||
3818 | * | ||
3819 | * Anyway, I don't care, since I am not going to use | ||
3820 | * any software that considers this data residual as | ||
3821 | * a relevant information. :) | ||
3822 | */ | ||
3823 | |||
3824 | int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) | ||
3825 | { | ||
3826 | int dp_sg, dp_sgmin, resid = 0; | ||
3827 | int dp_ofs = 0; | ||
3828 | |||
3829 | /* | ||
3830 | * Check for some data lost or just thrown away. | ||
3831 | * We are not required to be quite accurate in this | ||
3832 | * situation. Btw, if we are odd for output and the | ||
3833 | * device claims some more data, it may well happen | ||
3834 | * than our residual be zero. :-) | ||
3835 | */ | ||
3836 | if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { | ||
3837 | if (cp->xerr_status & XE_EXTRA_DATA) | ||
3838 | resid -= cp->extra_bytes; | ||
3839 | if (cp->xerr_status & XE_SODL_UNRUN) | ||
3840 | ++resid; | ||
3841 | if (cp->xerr_status & XE_SWIDE_OVRUN) | ||
3842 | --resid; | ||
3843 | } | ||
3844 | |||
3845 | /* | ||
3846 | * If all data has been transferred, | ||
3847 | * there is no residual. | ||
3848 | */ | ||
3849 | if (cp->phys.head.lastp == sym_goalp(cp)) | ||
3850 | return resid; | ||
3851 | |||
3852 | /* | ||
3853 | * If no data transfer occurs, or if the data | ||
3854 | * pointer is weird, return full residual. | ||
3855 | */ | ||
3856 | if (cp->startp == cp->phys.head.lastp || | ||
3857 | sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), | ||
3858 | &dp_ofs) < 0) { | ||
3859 | return cp->data_len; | ||
3860 | } | ||
3861 | |||
3862 | /* | ||
3863 | * If we were auto-sensing, then we are done. | ||
3864 | */ | ||
3865 | if (cp->host_flags & HF_SENSE) { | ||
3866 | return -dp_ofs; | ||
3867 | } | ||
3868 | |||
3869 | /* | ||
3870 | * We are now full comfortable in the computation | ||
3871 | * of the data residual (2's complement). | ||
3872 | */ | ||
3873 | dp_sgmin = SYM_CONF_MAX_SG - cp->segments; | ||
3874 | resid = -cp->ext_ofs; | ||
3875 | for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { | ||
3876 | u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); | ||
3877 | resid += (tmp & 0xffffff); | ||
3878 | } | ||
3879 | |||
3880 | /* | ||
3881 | * Hopefully, the result is not too wrong. | ||
3882 | */ | ||
3883 | return resid; | ||
3884 | } | ||
3885 | |||
3886 | /* | ||
3887 | * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. | ||
3888 | * | ||
3889 | * When we try to negotiate, we append the negotiation message | ||
3890 | * to the identify and (maybe) simple tag message. | ||
3891 | * The host status field is set to HS_NEGOTIATE to mark this | ||
3892 | * situation. | ||
3893 | * | ||
3894 | * If the target doesn't answer this message immediately | ||
3895 | * (as required by the standard), the SIR_NEGO_FAILED interrupt | ||
3896 | * will be raised eventually. | ||
3897 | * The handler removes the HS_NEGOTIATE status, and sets the | ||
3898 | * negotiated value to the default (async / nowide). | ||
3899 | * | ||
3900 | * If we receive a matching answer immediately, we check it | ||
3901 | * for validity, and set the values. | ||
3902 | * | ||
3903 | * If we receive a Reject message immediately, we assume the | ||
3904 | * negotiation has failed, and fall back to standard values. | ||
3905 | * | ||
3906 | * If we receive a negotiation message while not in HS_NEGOTIATE | ||
3907 | * state, it's a target initiated negotiation. We prepare a | ||
3908 | * (hopefully) valid answer, set our parameters, and send back | ||
3909 | * this answer to the target. | ||
3910 | * | ||
3911 | * If the target doesn't fetch the answer (no message out phase), | ||
3912 | * we assume the negotiation has failed, and fall back to default | ||
3913 | * settings (SIR_NEGO_PROTO interrupt). | ||
3914 | * | ||
3915 | * When we set the values, we adjust them in all ccbs belonging | ||
3916 | * to this target, in the controller's register, and in the "phys" | ||
3917 | * field of the controller's struct sym_hcb. | ||
3918 | */ | ||
3919 | |||
3920 | /* | ||
3921 | * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. | ||
3922 | */ | ||
3923 | static int | ||
3924 | sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) | ||
3925 | { | ||
3926 | int target = cp->target; | ||
3927 | u_char chg, ofs, per, fak, div; | ||
3928 | |||
3929 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
3930 | sym_print_nego_msg(np, target, "sync msgin", np->msgin); | ||
3931 | } | ||
3932 | |||
3933 | /* | ||
3934 | * Get requested values. | ||
3935 | */ | ||
3936 | chg = 0; | ||
3937 | per = np->msgin[3]; | ||
3938 | ofs = np->msgin[4]; | ||
3939 | |||
3940 | /* | ||
3941 | * Check values against our limits. | ||
3942 | */ | ||
3943 | if (ofs) { | ||
3944 | if (ofs > np->maxoffs) | ||
3945 | {chg = 1; ofs = np->maxoffs;} | ||
3946 | } | ||
3947 | |||
3948 | if (ofs) { | ||
3949 | if (per < np->minsync) | ||
3950 | {chg = 1; per = np->minsync;} | ||
3951 | } | ||
3952 | |||
3953 | /* | ||
3954 | * Get new chip synchronous parameters value. | ||
3955 | */ | ||
3956 | div = fak = 0; | ||
3957 | if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) | ||
3958 | goto reject_it; | ||
3959 | |||
3960 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
3961 | sym_print_addr(cp->cmd, | ||
3962 | "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", | ||
3963 | ofs, per, div, fak, chg); | ||
3964 | } | ||
3965 | |||
3966 | /* | ||
3967 | * If it was an answer we want to change, | ||
3968 | * then it isn't acceptable. Reject it. | ||
3969 | */ | ||
3970 | if (!req && chg) | ||
3971 | goto reject_it; | ||
3972 | |||
3973 | /* | ||
3974 | * Apply new values. | ||
3975 | */ | ||
3976 | sym_setsync (np, target, ofs, per, div, fak); | ||
3977 | |||
3978 | /* | ||
3979 | * It was an answer. We are done. | ||
3980 | */ | ||
3981 | if (!req) | ||
3982 | return 0; | ||
3983 | |||
3984 | /* | ||
3985 | * It was a request. Prepare an answer message. | ||
3986 | */ | ||
3987 | np->msgout[0] = M_EXTENDED; | ||
3988 | np->msgout[1] = 3; | ||
3989 | np->msgout[2] = M_X_SYNC_REQ; | ||
3990 | np->msgout[3] = per; | ||
3991 | np->msgout[4] = ofs; | ||
3992 | |||
3993 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
3994 | sym_print_nego_msg(np, target, "sync msgout", np->msgout); | ||
3995 | } | ||
3996 | |||
3997 | np->msgin [0] = M_NOOP; | ||
3998 | |||
3999 | return 0; | ||
4000 | |||
4001 | reject_it: | ||
4002 | sym_setsync (np, target, 0, 0, 0, 0); | ||
4003 | return -1; | ||
4004 | } | ||
4005 | |||
4006 | static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) | ||
4007 | { | ||
4008 | int req = 1; | ||
4009 | int result; | ||
4010 | |||
4011 | /* | ||
4012 | * Request or answer ? | ||
4013 | */ | ||
4014 | if (INB(np, HS_PRT) == HS_NEGOTIATE) { | ||
4015 | OUTB(np, HS_PRT, HS_BUSY); | ||
4016 | if (cp->nego_status && cp->nego_status != NS_SYNC) | ||
4017 | goto reject_it; | ||
4018 | req = 0; | ||
4019 | } | ||
4020 | |||
4021 | /* | ||
4022 | * Check and apply new values. | ||
4023 | */ | ||
4024 | result = sym_sync_nego_check(np, req, cp); | ||
4025 | if (result) /* Not acceptable, reject it */ | ||
4026 | goto reject_it; | ||
4027 | if (req) { /* Was a request, send response. */ | ||
4028 | cp->nego_status = NS_SYNC; | ||
4029 | OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); | ||
4030 | } | ||
4031 | else /* Was a response, we are done. */ | ||
4032 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | ||
4033 | return; | ||
4034 | |||
4035 | reject_it: | ||
4036 | OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); | ||
4037 | } | ||
4038 | |||
4039 | /* | ||
4040 | * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. | ||
4041 | */ | ||
4042 | static int | ||
4043 | sym_ppr_nego_check(struct sym_hcb *np, int req, int target) | ||
4044 | { | ||
4045 | struct sym_tcb *tp = &np->target[target]; | ||
4046 | unsigned char fak, div; | ||
4047 | int dt, chg = 0; | ||
4048 | |||
4049 | unsigned char per = np->msgin[3]; | ||
4050 | unsigned char ofs = np->msgin[5]; | ||
4051 | unsigned char wide = np->msgin[6]; | ||
4052 | unsigned char opts = np->msgin[7] & PPR_OPT_MASK; | ||
4053 | |||
4054 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
4055 | sym_print_nego_msg(np, target, "ppr msgin", np->msgin); | ||
4056 | } | ||
4057 | |||
4058 | /* | ||
4059 | * Check values against our limits. | ||
4060 | */ | ||
4061 | if (wide > np->maxwide) { | ||
4062 | chg = 1; | ||
4063 | wide = np->maxwide; | ||
4064 | } | ||
4065 | if (!wide || !(np->features & FE_U3EN)) | ||
4066 | opts = 0; | ||
4067 | |||
4068 | if (opts != (np->msgin[7] & PPR_OPT_MASK)) | ||
4069 | chg = 1; | ||
4070 | |||
4071 | dt = opts & PPR_OPT_DT; | ||
4072 | |||
4073 | if (ofs) { | ||
4074 | unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs; | ||
4075 | if (ofs > maxoffs) { | ||
4076 | chg = 1; | ||
4077 | ofs = maxoffs; | ||
4078 | } | ||
4079 | } | ||
4080 | |||
4081 | if (ofs) { | ||
4082 | unsigned char minsync = dt ? np->minsync_dt : np->minsync; | ||
4083 | if (per < minsync) { | ||
4084 | chg = 1; | ||
4085 | per = minsync; | ||
4086 | } | ||
4087 | } | ||
4088 | |||
4089 | /* | ||
4090 | * Get new chip synchronous parameters value. | ||
4091 | */ | ||
4092 | div = fak = 0; | ||
4093 | if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) | ||
4094 | goto reject_it; | ||
4095 | |||
4096 | /* | ||
4097 | * If it was an answer we want to change, | ||
4098 | * then it isn't acceptable. Reject it. | ||
4099 | */ | ||
4100 | if (!req && chg) | ||
4101 | goto reject_it; | ||
4102 | |||
4103 | /* | ||
4104 | * Apply new values. | ||
4105 | */ | ||
4106 | sym_setpprot(np, target, opts, ofs, per, wide, div, fak); | ||
4107 | |||
4108 | /* | ||
4109 | * It was an answer. We are done. | ||
4110 | */ | ||
4111 | if (!req) | ||
4112 | return 0; | ||
4113 | |||
4114 | /* | ||
4115 | * It was a request. Prepare an answer message. | ||
4116 | */ | ||
4117 | np->msgout[0] = M_EXTENDED; | ||
4118 | np->msgout[1] = 6; | ||
4119 | np->msgout[2] = M_X_PPR_REQ; | ||
4120 | np->msgout[3] = per; | ||
4121 | np->msgout[4] = 0; | ||
4122 | np->msgout[5] = ofs; | ||
4123 | np->msgout[6] = wide; | ||
4124 | np->msgout[7] = opts; | ||
4125 | |||
4126 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
4127 | sym_print_nego_msg(np, target, "ppr msgout", np->msgout); | ||
4128 | } | ||
4129 | |||
4130 | np->msgin [0] = M_NOOP; | ||
4131 | |||
4132 | return 0; | ||
4133 | |||
4134 | reject_it: | ||
4135 | sym_setpprot (np, target, 0, 0, 0, 0, 0, 0); | ||
4136 | /* | ||
4137 | * If it is a device response that should result in | ||
4138 | * ST, we may want to try a legacy negotiation later. | ||
4139 | */ | ||
4140 | if (!req && !opts) { | ||
4141 | tp->tgoal.period = per; | ||
4142 | tp->tgoal.offset = ofs; | ||
4143 | tp->tgoal.width = wide; | ||
4144 | tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; | ||
4145 | tp->tgoal.check_nego = 1; | ||
4146 | } | ||
4147 | return -1; | ||
4148 | } | ||
4149 | |||
4150 | static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) | ||
4151 | { | ||
4152 | int req = 1; | ||
4153 | int result; | ||
4154 | |||
4155 | /* | ||
4156 | * Request or answer ? | ||
4157 | */ | ||
4158 | if (INB(np, HS_PRT) == HS_NEGOTIATE) { | ||
4159 | OUTB(np, HS_PRT, HS_BUSY); | ||
4160 | if (cp->nego_status && cp->nego_status != NS_PPR) | ||
4161 | goto reject_it; | ||
4162 | req = 0; | ||
4163 | } | ||
4164 | |||
4165 | /* | ||
4166 | * Check and apply new values. | ||
4167 | */ | ||
4168 | result = sym_ppr_nego_check(np, req, cp->target); | ||
4169 | if (result) /* Not acceptable, reject it */ | ||
4170 | goto reject_it; | ||
4171 | if (req) { /* Was a request, send response. */ | ||
4172 | cp->nego_status = NS_PPR; | ||
4173 | OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp)); | ||
4174 | } | ||
4175 | else /* Was a response, we are done. */ | ||
4176 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | ||
4177 | return; | ||
4178 | |||
4179 | reject_it: | ||
4180 | OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); | ||
4181 | } | ||
4182 | |||
4183 | /* | ||
4184 | * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. | ||
4185 | */ | ||
4186 | static int | ||
4187 | sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) | ||
4188 | { | ||
4189 | int target = cp->target; | ||
4190 | u_char chg, wide; | ||
4191 | |||
4192 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
4193 | sym_print_nego_msg(np, target, "wide msgin", np->msgin); | ||
4194 | } | ||
4195 | |||
4196 | /* | ||
4197 | * Get requested values. | ||
4198 | */ | ||
4199 | chg = 0; | ||
4200 | wide = np->msgin[3]; | ||
4201 | |||
4202 | /* | ||
4203 | * Check values against our limits. | ||
4204 | */ | ||
4205 | if (wide > np->maxwide) { | ||
4206 | chg = 1; | ||
4207 | wide = np->maxwide; | ||
4208 | } | ||
4209 | |||
4210 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
4211 | sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n", | ||
4212 | wide, chg); | ||
4213 | } | ||
4214 | |||
4215 | /* | ||
4216 | * If it was an answer we want to change, | ||
4217 | * then it isn't acceptable. Reject it. | ||
4218 | */ | ||
4219 | if (!req && chg) | ||
4220 | goto reject_it; | ||
4221 | |||
4222 | /* | ||
4223 | * Apply new values. | ||
4224 | */ | ||
4225 | sym_setwide (np, target, wide); | ||
4226 | |||
4227 | /* | ||
4228 | * It was an answer. We are done. | ||
4229 | */ | ||
4230 | if (!req) | ||
4231 | return 0; | ||
4232 | |||
4233 | /* | ||
4234 | * It was a request. Prepare an answer message. | ||
4235 | */ | ||
4236 | np->msgout[0] = M_EXTENDED; | ||
4237 | np->msgout[1] = 2; | ||
4238 | np->msgout[2] = M_X_WIDE_REQ; | ||
4239 | np->msgout[3] = wide; | ||
4240 | |||
4241 | np->msgin [0] = M_NOOP; | ||
4242 | |||
4243 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
4244 | sym_print_nego_msg(np, target, "wide msgout", np->msgout); | ||
4245 | } | ||
4246 | |||
4247 | return 0; | ||
4248 | |||
4249 | reject_it: | ||
4250 | return -1; | ||
4251 | } | ||
4252 | |||
4253 | static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) | ||
4254 | { | ||
4255 | int req = 1; | ||
4256 | int result; | ||
4257 | |||
4258 | /* | ||
4259 | * Request or answer ? | ||
4260 | */ | ||
4261 | if (INB(np, HS_PRT) == HS_NEGOTIATE) { | ||
4262 | OUTB(np, HS_PRT, HS_BUSY); | ||
4263 | if (cp->nego_status && cp->nego_status != NS_WIDE) | ||
4264 | goto reject_it; | ||
4265 | req = 0; | ||
4266 | } | ||
4267 | |||
4268 | /* | ||
4269 | * Check and apply new values. | ||
4270 | */ | ||
4271 | result = sym_wide_nego_check(np, req, cp); | ||
4272 | if (result) /* Not acceptable, reject it */ | ||
4273 | goto reject_it; | ||
4274 | if (req) { /* Was a request, send response. */ | ||
4275 | cp->nego_status = NS_WIDE; | ||
4276 | OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp)); | ||
4277 | } else { /* Was a response. */ | ||
4278 | /* | ||
4279 | * Negotiate for SYNC immediately after WIDE response. | ||
4280 | * This allows to negotiate for both WIDE and SYNC on | ||
4281 | * a single SCSI command (Suggested by Justin Gibbs). | ||
4282 | */ | ||
4283 | if (tp->tgoal.offset) { | ||
4284 | np->msgout[0] = M_EXTENDED; | ||
4285 | np->msgout[1] = 3; | ||
4286 | np->msgout[2] = M_X_SYNC_REQ; | ||
4287 | np->msgout[3] = tp->tgoal.period; | ||
4288 | np->msgout[4] = tp->tgoal.offset; | ||
4289 | |||
4290 | if (DEBUG_FLAGS & DEBUG_NEGO) { | ||
4291 | sym_print_nego_msg(np, cp->target, | ||
4292 | "sync msgout", np->msgout); | ||
4293 | } | ||
4294 | |||
4295 | cp->nego_status = NS_SYNC; | ||
4296 | OUTB(np, HS_PRT, HS_NEGOTIATE); | ||
4297 | OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); | ||
4298 | return; | ||
4299 | } else | ||
4300 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | ||
4301 | } | ||
4302 | |||
4303 | return; | ||
4304 | |||
4305 | reject_it: | ||
4306 | OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); | ||
4307 | } | ||
4308 | |||
4309 | /* | ||
4310 | * Reset DT, SYNC or WIDE to default settings. | ||
4311 | * | ||
4312 | * Called when a negotiation does not succeed either | ||
4313 | * on rejection or on protocol error. | ||
4314 | * | ||
4315 | * A target that understands a PPR message should never | ||
4316 | * reject it, and messing with it is very unlikely. | ||
4317 | * So, if a PPR makes problems, we may just want to | ||
4318 | * try a legacy negotiation later. | ||
4319 | */ | ||
4320 | static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) | ||
4321 | { | ||
4322 | switch (cp->nego_status) { | ||
4323 | case NS_PPR: | ||
4324 | #if 0 | ||
4325 | sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); | ||
4326 | #else | ||
4327 | if (tp->tgoal.period < np->minsync) | ||
4328 | tp->tgoal.period = np->minsync; | ||
4329 | if (tp->tgoal.offset > np->maxoffs) | ||
4330 | tp->tgoal.offset = np->maxoffs; | ||
4331 | tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; | ||
4332 | tp->tgoal.check_nego = 1; | ||
4333 | #endif | ||
4334 | break; | ||
4335 | case NS_SYNC: | ||
4336 | sym_setsync (np, cp->target, 0, 0, 0, 0); | ||
4337 | break; | ||
4338 | case NS_WIDE: | ||
4339 | sym_setwide (np, cp->target, 0); | ||
4340 | break; | ||
4341 | } | ||
4342 | np->msgin [0] = M_NOOP; | ||
4343 | np->msgout[0] = M_NOOP; | ||
4344 | cp->nego_status = 0; | ||
4345 | } | ||
4346 | |||
4347 | /* | ||
4348 | * chip handler for MESSAGE REJECT received in response to | ||
4349 | * PPR, WIDE or SYNCHRONOUS negotiation. | ||
4350 | */ | ||
4351 | static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) | ||
4352 | { | ||
4353 | sym_nego_default(np, tp, cp); | ||
4354 | OUTB(np, HS_PRT, HS_BUSY); | ||
4355 | } | ||
4356 | |||
4357 | /* | ||
4358 | * chip exception handler for programmed interrupts. | ||
4359 | */ | ||
4360 | static void sym_int_sir (struct sym_hcb *np) | ||
4361 | { | ||
4362 | u_char num = INB(np, nc_dsps); | ||
4363 | u32 dsa = INL(np, nc_dsa); | ||
4364 | struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); | ||
4365 | u_char target = INB(np, nc_sdid) & 0x0f; | ||
4366 | struct sym_tcb *tp = &np->target[target]; | ||
4367 | int tmp; | ||
4368 | |||
4369 | if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); | ||
4370 | |||
4371 | switch (num) { | ||
4372 | #if SYM_CONF_DMA_ADDRESSING_MODE == 2 | ||
4373 | /* | ||
4374 | * SCRIPTS tell us that we may have to update | ||
4375 | * 64 bit DMA segment registers. | ||
4376 | */ | ||
4377 | case SIR_DMAP_DIRTY: | ||
4378 | sym_update_dmap_regs(np); | ||
4379 | goto out; | ||
4380 | #endif | ||
4381 | /* | ||
4382 | * Command has been completed with error condition | ||
4383 | * or has been auto-sensed. | ||
4384 | */ | ||
4385 | case SIR_COMPLETE_ERROR: | ||
4386 | sym_complete_error(np, cp); | ||
4387 | return; | ||
4388 | /* | ||
4389 | * The C code is currently trying to recover from something. | ||
4390 | * Typically, user want to abort some command. | ||
4391 | */ | ||
4392 | case SIR_SCRIPT_STOPPED: | ||
4393 | case SIR_TARGET_SELECTED: | ||
4394 | case SIR_ABORT_SENT: | ||
4395 | sym_sir_task_recovery(np, num); | ||
4396 | return; | ||
4397 | /* | ||
4398 | * The device didn't go to MSG OUT phase after having | ||
4399 | * been selected with ATN. We donnot want to handle | ||
4400 | * that. | ||
4401 | */ | ||
4402 | case SIR_SEL_ATN_NO_MSG_OUT: | ||
4403 | printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", | ||
4404 | sym_name (np), target); | ||
4405 | goto out_stuck; | ||
4406 | /* | ||
4407 | * The device didn't switch to MSG IN phase after | ||
4408 | * having reseleted the initiator. | ||
4409 | */ | ||
4410 | case SIR_RESEL_NO_MSG_IN: | ||
4411 | printf ("%s:%d: No MSG IN phase after reselection.\n", | ||
4412 | sym_name (np), target); | ||
4413 | goto out_stuck; | ||
4414 | /* | ||
4415 | * After reselection, the device sent a message that wasn't | ||
4416 | * an IDENTIFY. | ||
4417 | */ | ||
4418 | case SIR_RESEL_NO_IDENTIFY: | ||
4419 | printf ("%s:%d: No IDENTIFY after reselection.\n", | ||
4420 | sym_name (np), target); | ||
4421 | goto out_stuck; | ||
4422 | /* | ||
4423 | * The device reselected a LUN we donnot know about. | ||
4424 | */ | ||
4425 | case SIR_RESEL_BAD_LUN: | ||
4426 | np->msgout[0] = M_RESET; | ||
4427 | goto out; | ||
4428 | /* | ||
4429 | * The device reselected for an untagged nexus and we | ||
4430 | * haven't any. | ||
4431 | */ | ||
4432 | case SIR_RESEL_BAD_I_T_L: | ||
4433 | np->msgout[0] = M_ABORT; | ||
4434 | goto out; | ||
4435 | /* | ||
4436 | * The device reselected for a tagged nexus that we donnot | ||
4437 | * have. | ||
4438 | */ | ||
4439 | case SIR_RESEL_BAD_I_T_L_Q: | ||
4440 | np->msgout[0] = M_ABORT_TAG; | ||
4441 | goto out; | ||
4442 | /* | ||
4443 | * The SCRIPTS let us know that the device has grabbed | ||
4444 | * our message and will abort the job. | ||
4445 | */ | ||
4446 | case SIR_RESEL_ABORTED: | ||
4447 | np->lastmsg = np->msgout[0]; | ||
4448 | np->msgout[0] = M_NOOP; | ||
4449 | printf ("%s:%d: message %x sent on bad reselection.\n", | ||
4450 | sym_name (np), target, np->lastmsg); | ||
4451 | goto out; | ||
4452 | /* | ||
4453 | * The SCRIPTS let us know that a message has been | ||
4454 | * successfully sent to the device. | ||
4455 | */ | ||
4456 | case SIR_MSG_OUT_DONE: | ||
4457 | np->lastmsg = np->msgout[0]; | ||
4458 | np->msgout[0] = M_NOOP; | ||
4459 | /* Should we really care of that */ | ||
4460 | if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { | ||
4461 | if (cp) { | ||
4462 | cp->xerr_status &= ~XE_PARITY_ERR; | ||
4463 | if (!cp->xerr_status) | ||
4464 | OUTOFFB(np, HF_PRT, HF_EXT_ERR); | ||
4465 | } | ||
4466 | } | ||
4467 | goto out; | ||
4468 | /* | ||
4469 | * The device didn't send a GOOD SCSI status. | ||
4470 | * We may have some work to do prior to allow | ||
4471 | * the SCRIPTS processor to continue. | ||
4472 | */ | ||
4473 | case SIR_BAD_SCSI_STATUS: | ||
4474 | if (!cp) | ||
4475 | goto out; | ||
4476 | sym_sir_bad_scsi_status(np, num, cp); | ||
4477 | return; | ||
4478 | /* | ||
4479 | * We are asked by the SCRIPTS to prepare a | ||
4480 | * REJECT message. | ||
4481 | */ | ||
4482 | case SIR_REJECT_TO_SEND: | ||
4483 | sym_print_msg(cp, "M_REJECT to send for ", np->msgin); | ||
4484 | np->msgout[0] = M_REJECT; | ||
4485 | goto out; | ||
4486 | /* | ||
4487 | * We have been ODD at the end of a DATA IN | ||
4488 | * transfer and the device didn't send a | ||
4489 | * IGNORE WIDE RESIDUE message. | ||
4490 | * It is a data overrun condition. | ||
4491 | */ | ||
4492 | case SIR_SWIDE_OVERRUN: | ||
4493 | if (cp) { | ||
4494 | OUTONB(np, HF_PRT, HF_EXT_ERR); | ||
4495 | cp->xerr_status |= XE_SWIDE_OVRUN; | ||
4496 | } | ||
4497 | goto out; | ||
4498 | /* | ||
4499 | * We have been ODD at the end of a DATA OUT | ||
4500 | * transfer. | ||
4501 | * It is a data underrun condition. | ||
4502 | */ | ||
4503 | case SIR_SODL_UNDERRUN: | ||
4504 | if (cp) { | ||
4505 | OUTONB(np, HF_PRT, HF_EXT_ERR); | ||
4506 | cp->xerr_status |= XE_SODL_UNRUN; | ||
4507 | } | ||
4508 | goto out; | ||
4509 | /* | ||
4510 | * The device wants us to tranfer more data than | ||
4511 | * expected or in the wrong direction. | ||
4512 | * The number of extra bytes is in scratcha. | ||
4513 | * It is a data overrun condition. | ||
4514 | */ | ||
4515 | case SIR_DATA_OVERRUN: | ||
4516 | if (cp) { | ||
4517 | OUTONB(np, HF_PRT, HF_EXT_ERR); | ||
4518 | cp->xerr_status |= XE_EXTRA_DATA; | ||
4519 | cp->extra_bytes += INL(np, nc_scratcha); | ||
4520 | } | ||
4521 | goto out; | ||
4522 | /* | ||
4523 | * The device switched to an illegal phase (4/5). | ||
4524 | */ | ||
4525 | case SIR_BAD_PHASE: | ||
4526 | if (cp) { | ||
4527 | OUTONB(np, HF_PRT, HF_EXT_ERR); | ||
4528 | cp->xerr_status |= XE_BAD_PHASE; | ||
4529 | } | ||
4530 | goto out; | ||
4531 | /* | ||
4532 | * We received a message. | ||
4533 | */ | ||
4534 | case SIR_MSG_RECEIVED: | ||
4535 | if (!cp) | ||
4536 | goto out_stuck; | ||
4537 | switch (np->msgin [0]) { | ||
4538 | /* | ||
4539 | * We received an extended message. | ||
4540 | * We handle MODIFY DATA POINTER, SDTR, WDTR | ||
4541 | * and reject all other extended messages. | ||
4542 | */ | ||
4543 | case M_EXTENDED: | ||
4544 | switch (np->msgin [2]) { | ||
4545 | case M_X_MODIFY_DP: | ||
4546 | if (DEBUG_FLAGS & DEBUG_POINTER) | ||
4547 | sym_print_msg(cp,"modify DP",np->msgin); | ||
4548 | tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + | ||
4549 | (np->msgin[5]<<8) + (np->msgin[6]); | ||
4550 | sym_modify_dp(np, tp, cp, tmp); | ||
4551 | return; | ||
4552 | case M_X_SYNC_REQ: | ||
4553 | sym_sync_nego(np, tp, cp); | ||
4554 | return; | ||
4555 | case M_X_PPR_REQ: | ||
4556 | sym_ppr_nego(np, tp, cp); | ||
4557 | return; | ||
4558 | case M_X_WIDE_REQ: | ||
4559 | sym_wide_nego(np, tp, cp); | ||
4560 | return; | ||
4561 | default: | ||
4562 | goto out_reject; | ||
4563 | } | ||
4564 | break; | ||
4565 | /* | ||
4566 | * We received a 1/2 byte message not handled from SCRIPTS. | ||
4567 | * We are only expecting MESSAGE REJECT and IGNORE WIDE | ||
4568 | * RESIDUE messages that haven't been anticipated by | ||
4569 | * SCRIPTS on SWIDE full condition. Unanticipated IGNORE | ||
4570 | * WIDE RESIDUE messages are aliased as MODIFY DP (-1). | ||
4571 | */ | ||
4572 | case M_IGN_RESIDUE: | ||
4573 | if (DEBUG_FLAGS & DEBUG_POINTER) | ||
4574 | sym_print_msg(cp,"ign wide residue", np->msgin); | ||
4575 | if (cp->host_flags & HF_SENSE) | ||
4576 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | ||
4577 | else | ||
4578 | sym_modify_dp(np, tp, cp, -1); | ||
4579 | return; | ||
4580 | case M_REJECT: | ||
4581 | if (INB(np, HS_PRT) == HS_NEGOTIATE) | ||
4582 | sym_nego_rejected(np, tp, cp); | ||
4583 | else { | ||
4584 | sym_print_addr(cp->cmd, | ||
4585 | "M_REJECT received (%x:%x).\n", | ||
4586 | scr_to_cpu(np->lastmsg), np->msgout[0]); | ||
4587 | } | ||
4588 | goto out_clrack; | ||
4589 | break; | ||
4590 | default: | ||
4591 | goto out_reject; | ||
4592 | } | ||
4593 | break; | ||
4594 | /* | ||
4595 | * We received an unknown message. | ||
4596 | * Ignore all MSG IN phases and reject it. | ||
4597 | */ | ||
4598 | case SIR_MSG_WEIRD: | ||
4599 | sym_print_msg(cp, "WEIRD message received", np->msgin); | ||
4600 | OUTL_DSP(np, SCRIPTB_BA(np, msg_weird)); | ||
4601 | return; | ||
4602 | /* | ||
4603 | * Negotiation failed. | ||
4604 | * Target does not send us the reply. | ||
4605 | * Remove the HS_NEGOTIATE status. | ||
4606 | */ | ||
4607 | case SIR_NEGO_FAILED: | ||
4608 | OUTB(np, HS_PRT, HS_BUSY); | ||
4609 | /* | ||
4610 | * Negotiation failed. | ||
4611 | * Target does not want answer message. | ||
4612 | */ | ||
4613 | case SIR_NEGO_PROTO: | ||
4614 | sym_nego_default(np, tp, cp); | ||
4615 | goto out; | ||
4616 | } | ||
4617 | |||
4618 | out: | ||
4619 | OUTONB_STD(); | ||
4620 | return; | ||
4621 | out_reject: | ||
4622 | OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); | ||
4623 | return; | ||
4624 | out_clrack: | ||
4625 | OUTL_DSP(np, SCRIPTA_BA(np, clrack)); | ||
4626 | return; | ||
4627 | out_stuck: | ||
4628 | return; | ||
4629 | } | ||
4630 | |||
4631 | /* | ||
4632 | * Acquire a control block | ||
4633 | */ | ||
4634 | struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order) | ||
4635 | { | ||
4636 | u_char tn = cmd->device->id; | ||
4637 | u_char ln = cmd->device->lun; | ||
4638 | struct sym_tcb *tp = &np->target[tn]; | ||
4639 | struct sym_lcb *lp = sym_lp(tp, ln); | ||
4640 | u_short tag = NO_TAG; | ||
4641 | SYM_QUEHEAD *qp; | ||
4642 | struct sym_ccb *cp = NULL; | ||
4643 | |||
4644 | /* | ||
4645 | * Look for a free CCB | ||
4646 | */ | ||
4647 | if (sym_que_empty(&np->free_ccbq)) | ||
4648 | sym_alloc_ccb(np); | ||
4649 | qp = sym_remque_head(&np->free_ccbq); | ||
4650 | if (!qp) | ||
4651 | goto out; | ||
4652 | cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
4653 | |||
4654 | #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4655 | /* | ||
4656 | * If the LCB is not yet available and the LUN | ||
4657 | * has been probed ok, try to allocate the LCB. | ||
4658 | */ | ||
4659 | if (!lp && sym_is_bit(tp->lun_map, ln)) { | ||
4660 | lp = sym_alloc_lcb(np, tn, ln); | ||
4661 | if (!lp) | ||
4662 | goto out_free; | ||
4663 | } | ||
4664 | #endif | ||
4665 | |||
4666 | /* | ||
4667 | * If the LCB is not available here, then the | ||
4668 | * logical unit is not yet discovered. For those | ||
4669 | * ones only accept 1 SCSI IO per logical unit, | ||
4670 | * since we cannot allow disconnections. | ||
4671 | */ | ||
4672 | if (!lp) { | ||
4673 | if (!sym_is_bit(tp->busy0_map, ln)) | ||
4674 | sym_set_bit(tp->busy0_map, ln); | ||
4675 | else | ||
4676 | goto out_free; | ||
4677 | } else { | ||
4678 | /* | ||
4679 | * If we have been asked for a tagged command. | ||
4680 | */ | ||
4681 | if (tag_order) { | ||
4682 | /* | ||
4683 | * Debugging purpose. | ||
4684 | */ | ||
4685 | #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4686 | assert(lp->busy_itl == 0); | ||
4687 | #endif | ||
4688 | /* | ||
4689 | * Allocate resources for tags if not yet. | ||
4690 | */ | ||
4691 | if (!lp->cb_tags) { | ||
4692 | sym_alloc_lcb_tags(np, tn, ln); | ||
4693 | if (!lp->cb_tags) | ||
4694 | goto out_free; | ||
4695 | } | ||
4696 | /* | ||
4697 | * Get a tag for this SCSI IO and set up | ||
4698 | * the CCB bus address for reselection, | ||
4699 | * and count it for this LUN. | ||
4700 | * Toggle reselect path to tagged. | ||
4701 | */ | ||
4702 | if (lp->busy_itlq < SYM_CONF_MAX_TASK) { | ||
4703 | tag = lp->cb_tags[lp->ia_tag]; | ||
4704 | if (++lp->ia_tag == SYM_CONF_MAX_TASK) | ||
4705 | lp->ia_tag = 0; | ||
4706 | ++lp->busy_itlq; | ||
4707 | #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4708 | lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); | ||
4709 | lp->head.resel_sa = | ||
4710 | cpu_to_scr(SCRIPTA_BA(np, resel_tag)); | ||
4711 | #endif | ||
4712 | #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING | ||
4713 | cp->tags_si = lp->tags_si; | ||
4714 | ++lp->tags_sum[cp->tags_si]; | ||
4715 | ++lp->tags_since; | ||
4716 | #endif | ||
4717 | } | ||
4718 | else | ||
4719 | goto out_free; | ||
4720 | } | ||
4721 | /* | ||
4722 | * This command will not be tagged. | ||
4723 | * If we already have either a tagged or untagged | ||
4724 | * one, refuse to overlap this untagged one. | ||
4725 | */ | ||
4726 | else { | ||
4727 | /* | ||
4728 | * Debugging purpose. | ||
4729 | */ | ||
4730 | #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4731 | assert(lp->busy_itl == 0 && lp->busy_itlq == 0); | ||
4732 | #endif | ||
4733 | /* | ||
4734 | * Count this nexus for this LUN. | ||
4735 | * Set up the CCB bus address for reselection. | ||
4736 | * Toggle reselect path to untagged. | ||
4737 | */ | ||
4738 | ++lp->busy_itl; | ||
4739 | #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4740 | if (lp->busy_itl == 1) { | ||
4741 | lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); | ||
4742 | lp->head.resel_sa = | ||
4743 | cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); | ||
4744 | } | ||
4745 | else | ||
4746 | goto out_free; | ||
4747 | #endif | ||
4748 | } | ||
4749 | } | ||
4750 | /* | ||
4751 | * Put the CCB into the busy queue. | ||
4752 | */ | ||
4753 | sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); | ||
4754 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4755 | if (lp) { | ||
4756 | sym_remque(&cp->link2_ccbq); | ||
4757 | sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); | ||
4758 | } | ||
4759 | |||
4760 | #endif | ||
4761 | /* | ||
4762 | * Remember all informations needed to free this CCB. | ||
4763 | */ | ||
4764 | cp->to_abort = 0; | ||
4765 | cp->tag = tag; | ||
4766 | cp->order = tag_order; | ||
4767 | cp->target = tn; | ||
4768 | cp->lun = ln; | ||
4769 | |||
4770 | if (DEBUG_FLAGS & DEBUG_TAGS) { | ||
4771 | sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag); | ||
4772 | } | ||
4773 | |||
4774 | out: | ||
4775 | return cp; | ||
4776 | out_free: | ||
4777 | sym_insque_head(&cp->link_ccbq, &np->free_ccbq); | ||
4778 | return NULL; | ||
4779 | } | ||
4780 | |||
4781 | /* | ||
4782 | * Release one control block | ||
4783 | */ | ||
4784 | void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) | ||
4785 | { | ||
4786 | struct sym_tcb *tp = &np->target[cp->target]; | ||
4787 | struct sym_lcb *lp = sym_lp(tp, cp->lun); | ||
4788 | |||
4789 | if (DEBUG_FLAGS & DEBUG_TAGS) { | ||
4790 | sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n", | ||
4791 | cp, cp->tag); | ||
4792 | } | ||
4793 | |||
4794 | /* | ||
4795 | * If LCB available, | ||
4796 | */ | ||
4797 | if (lp) { | ||
4798 | /* | ||
4799 | * If tagged, release the tag, set the relect path | ||
4800 | */ | ||
4801 | if (cp->tag != NO_TAG) { | ||
4802 | #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING | ||
4803 | --lp->tags_sum[cp->tags_si]; | ||
4804 | #endif | ||
4805 | /* | ||
4806 | * Free the tag value. | ||
4807 | */ | ||
4808 | lp->cb_tags[lp->if_tag] = cp->tag; | ||
4809 | if (++lp->if_tag == SYM_CONF_MAX_TASK) | ||
4810 | lp->if_tag = 0; | ||
4811 | /* | ||
4812 | * Make the reselect path invalid, | ||
4813 | * and uncount this CCB. | ||
4814 | */ | ||
4815 | lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); | ||
4816 | --lp->busy_itlq; | ||
4817 | } else { /* Untagged */ | ||
4818 | /* | ||
4819 | * Make the reselect path invalid, | ||
4820 | * and uncount this CCB. | ||
4821 | */ | ||
4822 | lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); | ||
4823 | --lp->busy_itl; | ||
4824 | } | ||
4825 | /* | ||
4826 | * If no JOB active, make the LUN reselect path invalid. | ||
4827 | */ | ||
4828 | if (lp->busy_itlq == 0 && lp->busy_itl == 0) | ||
4829 | lp->head.resel_sa = | ||
4830 | cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); | ||
4831 | } | ||
4832 | /* | ||
4833 | * Otherwise, we only accept 1 IO per LUN. | ||
4834 | * Clear the bit that keeps track of this IO. | ||
4835 | */ | ||
4836 | else | ||
4837 | sym_clr_bit(tp->busy0_map, cp->lun); | ||
4838 | |||
4839 | /* | ||
4840 | * We donnot queue more than 1 ccb per target | ||
4841 | * with negotiation at any time. If this ccb was | ||
4842 | * used for negotiation, clear this info in the tcb. | ||
4843 | */ | ||
4844 | if (cp == tp->nego_cp) | ||
4845 | tp->nego_cp = NULL; | ||
4846 | |||
4847 | #ifdef SYM_CONF_IARB_SUPPORT | ||
4848 | /* | ||
4849 | * If we just complete the last queued CCB, | ||
4850 | * clear this info that is no longer relevant. | ||
4851 | */ | ||
4852 | if (cp == np->last_cp) | ||
4853 | np->last_cp = 0; | ||
4854 | #endif | ||
4855 | |||
4856 | /* | ||
4857 | * Make this CCB available. | ||
4858 | */ | ||
4859 | cp->cmd = NULL; | ||
4860 | cp->host_status = HS_IDLE; | ||
4861 | sym_remque(&cp->link_ccbq); | ||
4862 | sym_insque_head(&cp->link_ccbq, &np->free_ccbq); | ||
4863 | |||
4864 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4865 | if (lp) { | ||
4866 | sym_remque(&cp->link2_ccbq); | ||
4867 | sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq); | ||
4868 | if (cp->started) { | ||
4869 | if (cp->tag != NO_TAG) | ||
4870 | --lp->started_tags; | ||
4871 | else | ||
4872 | --lp->started_no_tag; | ||
4873 | } | ||
4874 | } | ||
4875 | cp->started = 0; | ||
4876 | #endif | ||
4877 | } | ||
4878 | |||
4879 | /* | ||
4880 | * Allocate a CCB from memory and initialize its fixed part. | ||
4881 | */ | ||
4882 | static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np) | ||
4883 | { | ||
4884 | struct sym_ccb *cp = NULL; | ||
4885 | int hcode; | ||
4886 | |||
4887 | /* | ||
4888 | * Prevent from allocating more CCBs than we can | ||
4889 | * queue to the controller. | ||
4890 | */ | ||
4891 | if (np->actccbs >= SYM_CONF_MAX_START) | ||
4892 | return NULL; | ||
4893 | |||
4894 | /* | ||
4895 | * Allocate memory for this CCB. | ||
4896 | */ | ||
4897 | cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); | ||
4898 | if (!cp) | ||
4899 | goto out_free; | ||
4900 | |||
4901 | /* | ||
4902 | * Count it. | ||
4903 | */ | ||
4904 | np->actccbs++; | ||
4905 | |||
4906 | /* | ||
4907 | * Compute the bus address of this ccb. | ||
4908 | */ | ||
4909 | cp->ccb_ba = vtobus(cp); | ||
4910 | |||
4911 | /* | ||
4912 | * Insert this ccb into the hashed list. | ||
4913 | */ | ||
4914 | hcode = CCB_HASH_CODE(cp->ccb_ba); | ||
4915 | cp->link_ccbh = np->ccbh[hcode]; | ||
4916 | np->ccbh[hcode] = cp; | ||
4917 | |||
4918 | /* | ||
4919 | * Initialyze the start and restart actions. | ||
4920 | */ | ||
4921 | cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); | ||
4922 | cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); | ||
4923 | |||
4924 | /* | ||
4925 | * Initilialyze some other fields. | ||
4926 | */ | ||
4927 | cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); | ||
4928 | |||
4929 | /* | ||
4930 | * Chain into free ccb queue. | ||
4931 | */ | ||
4932 | sym_insque_head(&cp->link_ccbq, &np->free_ccbq); | ||
4933 | |||
4934 | /* | ||
4935 | * Chain into optionnal lists. | ||
4936 | */ | ||
4937 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
4938 | sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq); | ||
4939 | #endif | ||
4940 | return cp; | ||
4941 | out_free: | ||
4942 | if (cp) | ||
4943 | sym_mfree_dma(cp, sizeof(*cp), "CCB"); | ||
4944 | return NULL; | ||
4945 | } | ||
4946 | |||
4947 | /* | ||
4948 | * Look up a CCB from a DSA value. | ||
4949 | */ | ||
4950 | static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa) | ||
4951 | { | ||
4952 | int hcode; | ||
4953 | struct sym_ccb *cp; | ||
4954 | |||
4955 | hcode = CCB_HASH_CODE(dsa); | ||
4956 | cp = np->ccbh[hcode]; | ||
4957 | while (cp) { | ||
4958 | if (cp->ccb_ba == dsa) | ||
4959 | break; | ||
4960 | cp = cp->link_ccbh; | ||
4961 | } | ||
4962 | |||
4963 | return cp; | ||
4964 | } | ||
4965 | |||
4966 | /* | ||
4967 | * Target control block initialisation. | ||
4968 | * Nothing important to do at the moment. | ||
4969 | */ | ||
4970 | static void sym_init_tcb (struct sym_hcb *np, u_char tn) | ||
4971 | { | ||
4972 | #if 0 /* Hmmm... this checking looks paranoid. */ | ||
4973 | /* | ||
4974 | * Check some alignments required by the chip. | ||
4975 | */ | ||
4976 | assert (((offsetof(struct sym_reg, nc_sxfer) ^ | ||
4977 | offsetof(struct sym_tcb, head.sval)) &3) == 0); | ||
4978 | assert (((offsetof(struct sym_reg, nc_scntl3) ^ | ||
4979 | offsetof(struct sym_tcb, head.wval)) &3) == 0); | ||
4980 | #endif | ||
4981 | } | ||
4982 | |||
4983 | /* | ||
4984 | * Lun control block allocation and initialization. | ||
4985 | */ | ||
4986 | struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) | ||
4987 | { | ||
4988 | struct sym_tcb *tp = &np->target[tn]; | ||
4989 | struct sym_lcb *lp = sym_lp(tp, ln); | ||
4990 | |||
4991 | /* | ||
4992 | * Already done, just return. | ||
4993 | */ | ||
4994 | if (lp) | ||
4995 | return lp; | ||
4996 | |||
4997 | /* | ||
4998 | * Donnot allow LUN control block | ||
4999 | * allocation for not probed LUNs. | ||
5000 | */ | ||
5001 | if (!sym_is_bit(tp->lun_map, ln)) | ||
5002 | return NULL; | ||
5003 | |||
5004 | /* | ||
5005 | * Initialize the target control block if not yet. | ||
5006 | */ | ||
5007 | sym_init_tcb (np, tn); | ||
5008 | |||
5009 | /* | ||
5010 | * Allocate the LCB bus address array. | ||
5011 | * Compute the bus address of this table. | ||
5012 | */ | ||
5013 | if (ln && !tp->luntbl) { | ||
5014 | int i; | ||
5015 | |||
5016 | tp->luntbl = sym_calloc_dma(256, "LUNTBL"); | ||
5017 | if (!tp->luntbl) | ||
5018 | goto fail; | ||
5019 | for (i = 0 ; i < 64 ; i++) | ||
5020 | tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); | ||
5021 | tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); | ||
5022 | } | ||
5023 | |||
5024 | /* | ||
5025 | * Allocate the table of pointers for LUN(s) > 0, if needed. | ||
5026 | */ | ||
5027 | if (ln && !tp->lunmp) { | ||
5028 | tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), | ||
5029 | GFP_KERNEL); | ||
5030 | if (!tp->lunmp) | ||
5031 | goto fail; | ||
5032 | } | ||
5033 | |||
5034 | /* | ||
5035 | * Allocate the lcb. | ||
5036 | * Make it available to the chip. | ||
5037 | */ | ||
5038 | lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); | ||
5039 | if (!lp) | ||
5040 | goto fail; | ||
5041 | if (ln) { | ||
5042 | tp->lunmp[ln] = lp; | ||
5043 | tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); | ||
5044 | } | ||
5045 | else { | ||
5046 | tp->lun0p = lp; | ||
5047 | tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); | ||
5048 | } | ||
5049 | |||
5050 | /* | ||
5051 | * Let the itl task point to error handling. | ||
5052 | */ | ||
5053 | lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); | ||
5054 | |||
5055 | /* | ||
5056 | * Set the reselect pattern to our default. :) | ||
5057 | */ | ||
5058 | lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); | ||
5059 | |||
5060 | /* | ||
5061 | * Set user capabilities. | ||
5062 | */ | ||
5063 | lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); | ||
5064 | |||
5065 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
5066 | /* | ||
5067 | * Initialize device queueing. | ||
5068 | */ | ||
5069 | sym_que_init(&lp->waiting_ccbq); | ||
5070 | sym_que_init(&lp->started_ccbq); | ||
5071 | lp->started_max = SYM_CONF_MAX_TASK; | ||
5072 | lp->started_limit = SYM_CONF_MAX_TASK; | ||
5073 | #endif | ||
5074 | /* | ||
5075 | * If we are busy, count the IO. | ||
5076 | */ | ||
5077 | if (sym_is_bit(tp->busy0_map, ln)) { | ||
5078 | lp->busy_itl = 1; | ||
5079 | sym_clr_bit(tp->busy0_map, ln); | ||
5080 | } | ||
5081 | fail: | ||
5082 | return lp; | ||
5083 | } | ||
5084 | |||
5085 | /* | ||
5086 | * Allocate LCB resources for tagged command queuing. | ||
5087 | */ | ||
5088 | static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) | ||
5089 | { | ||
5090 | struct sym_tcb *tp = &np->target[tn]; | ||
5091 | struct sym_lcb *lp = sym_lp(tp, ln); | ||
5092 | int i; | ||
5093 | |||
5094 | /* | ||
5095 | * If LCB not available, try to allocate it. | ||
5096 | */ | ||
5097 | if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) | ||
5098 | goto fail; | ||
5099 | |||
5100 | /* | ||
5101 | * Allocate the task table and and the tag allocation | ||
5102 | * circular buffer. We want both or none. | ||
5103 | */ | ||
5104 | lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); | ||
5105 | if (!lp->itlq_tbl) | ||
5106 | goto fail; | ||
5107 | lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_KERNEL); | ||
5108 | if (!lp->cb_tags) { | ||
5109 | sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); | ||
5110 | lp->itlq_tbl = NULL; | ||
5111 | goto fail; | ||
5112 | } | ||
5113 | |||
5114 | /* | ||
5115 | * Initialize the task table with invalid entries. | ||
5116 | */ | ||
5117 | for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) | ||
5118 | lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); | ||
5119 | |||
5120 | /* | ||
5121 | * Fill up the tag buffer with tag numbers. | ||
5122 | */ | ||
5123 | for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) | ||
5124 | lp->cb_tags[i] = i; | ||
5125 | |||
5126 | /* | ||
5127 | * Make the task table available to SCRIPTS, | ||
5128 | * And accept tagged commands now. | ||
5129 | */ | ||
5130 | lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); | ||
5131 | |||
5132 | return; | ||
5133 | fail: | ||
5134 | return; | ||
5135 | } | ||
5136 | |||
5137 | /* | ||
5138 | * Queue a SCSI IO to the controller. | ||
5139 | */ | ||
5140 | int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) | ||
5141 | { | ||
5142 | struct scsi_device *sdev = cmd->device; | ||
5143 | struct sym_tcb *tp; | ||
5144 | struct sym_lcb *lp; | ||
5145 | u_char *msgptr; | ||
5146 | u_int msglen; | ||
5147 | int can_disconnect; | ||
5148 | |||
5149 | /* | ||
5150 | * Keep track of the IO in our CCB. | ||
5151 | */ | ||
5152 | cp->cmd = cmd; | ||
5153 | |||
5154 | /* | ||
5155 | * Retrieve the target descriptor. | ||
5156 | */ | ||
5157 | tp = &np->target[cp->target]; | ||
5158 | |||
5159 | /* | ||
5160 | * Retrieve the lun descriptor. | ||
5161 | */ | ||
5162 | lp = sym_lp(tp, sdev->lun); | ||
5163 | |||
5164 | can_disconnect = (cp->tag != NO_TAG) || | ||
5165 | (lp && (lp->curr_flags & SYM_DISC_ENABLED)); | ||
5166 | |||
5167 | msgptr = cp->scsi_smsg; | ||
5168 | msglen = 0; | ||
5169 | msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun); | ||
5170 | |||
5171 | /* | ||
5172 | * Build the tag message if present. | ||
5173 | */ | ||
5174 | if (cp->tag != NO_TAG) { | ||
5175 | u_char order = cp->order; | ||
5176 | |||
5177 | switch(order) { | ||
5178 | case M_ORDERED_TAG: | ||
5179 | break; | ||
5180 | case M_HEAD_TAG: | ||
5181 | break; | ||
5182 | default: | ||
5183 | order = M_SIMPLE_TAG; | ||
5184 | } | ||
5185 | #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING | ||
5186 | /* | ||
5187 | * Avoid too much reordering of SCSI commands. | ||
5188 | * The algorithm tries to prevent completion of any | ||
5189 | * tagged command from being delayed against more | ||
5190 | * than 3 times the max number of queued commands. | ||
5191 | */ | ||
5192 | if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) { | ||
5193 | lp->tags_si = !(lp->tags_si); | ||
5194 | if (lp->tags_sum[lp->tags_si]) { | ||
5195 | order = M_ORDERED_TAG; | ||
5196 | if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) { | ||
5197 | sym_print_addr(cmd, | ||
5198 | "ordered tag forced.\n"); | ||
5199 | } | ||
5200 | } | ||
5201 | lp->tags_since = 0; | ||
5202 | } | ||
5203 | #endif | ||
5204 | msgptr[msglen++] = order; | ||
5205 | |||
5206 | /* | ||
5207 | * For less than 128 tags, actual tags are numbered | ||
5208 | * 1,3,5,..2*MAXTAGS+1,since we may have to deal | ||
5209 | * with devices that have problems with #TAG 0 or too | ||
5210 | * great #TAG numbers. For more tags (up to 256), | ||
5211 | * we use directly our tag number. | ||
5212 | */ | ||
5213 | #if SYM_CONF_MAX_TASK > (512/4) | ||
5214 | msgptr[msglen++] = cp->tag; | ||
5215 | #else | ||
5216 | msgptr[msglen++] = (cp->tag << 1) + 1; | ||
5217 | #endif | ||
5218 | } | ||
5219 | |||
5220 | /* | ||
5221 | * Build a negotiation message if needed. | ||
5222 | * (nego_status is filled by sym_prepare_nego()) | ||
5223 | */ | ||
5224 | cp->nego_status = 0; | ||
5225 | if (tp->tgoal.check_nego && !tp->nego_cp && lp) { | ||
5226 | msglen += sym_prepare_nego(np, cp, msgptr + msglen); | ||
5227 | } | ||
5228 | |||
5229 | /* | ||
5230 | * Startqueue | ||
5231 | */ | ||
5232 | cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); | ||
5233 | cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); | ||
5234 | |||
5235 | /* | ||
5236 | * select | ||
5237 | */ | ||
5238 | cp->phys.select.sel_id = cp->target; | ||
5239 | cp->phys.select.sel_scntl3 = tp->head.wval; | ||
5240 | cp->phys.select.sel_sxfer = tp->head.sval; | ||
5241 | cp->phys.select.sel_scntl4 = tp->head.uval; | ||
5242 | |||
5243 | /* | ||
5244 | * message | ||
5245 | */ | ||
5246 | cp->phys.smsg.addr = cpu_to_scr(CCB_BA(cp, scsi_smsg)); | ||
5247 | cp->phys.smsg.size = cpu_to_scr(msglen); | ||
5248 | |||
5249 | /* | ||
5250 | * status | ||
5251 | */ | ||
5252 | cp->host_xflags = 0; | ||
5253 | cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; | ||
5254 | cp->ssss_status = S_ILLEGAL; | ||
5255 | cp->xerr_status = 0; | ||
5256 | cp->host_flags = 0; | ||
5257 | cp->extra_bytes = 0; | ||
5258 | |||
5259 | /* | ||
5260 | * extreme data pointer. | ||
5261 | * shall be positive, so -1 is lower than lowest.:) | ||
5262 | */ | ||
5263 | cp->ext_sg = -1; | ||
5264 | cp->ext_ofs = 0; | ||
5265 | |||
5266 | /* | ||
5267 | * Build the CDB and DATA descriptor block | ||
5268 | * and start the IO. | ||
5269 | */ | ||
5270 | return sym_setup_data_and_start(np, cmd, cp); | ||
5271 | } | ||
5272 | |||
5273 | /* | ||
5274 | * Reset a SCSI target (all LUNs of this target). | ||
5275 | */ | ||
5276 | int sym_reset_scsi_target(struct sym_hcb *np, int target) | ||
5277 | { | ||
5278 | struct sym_tcb *tp; | ||
5279 | |||
5280 | if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET) | ||
5281 | return -1; | ||
5282 | |||
5283 | tp = &np->target[target]; | ||
5284 | tp->to_reset = 1; | ||
5285 | |||
5286 | np->istat_sem = SEM; | ||
5287 | OUTB(np, nc_istat, SIGP|SEM); | ||
5288 | |||
5289 | return 0; | ||
5290 | } | ||
5291 | |||
5292 | /* | ||
5293 | * Abort a SCSI IO. | ||
5294 | */ | ||
5295 | static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out) | ||
5296 | { | ||
5297 | /* | ||
5298 | * Check that the IO is active. | ||
5299 | */ | ||
5300 | if (!cp || !cp->host_status || cp->host_status == HS_WAIT) | ||
5301 | return -1; | ||
5302 | |||
5303 | /* | ||
5304 | * If a previous abort didn't succeed in time, | ||
5305 | * perform a BUS reset. | ||
5306 | */ | ||
5307 | if (cp->to_abort) { | ||
5308 | sym_reset_scsi_bus(np, 1); | ||
5309 | return 0; | ||
5310 | } | ||
5311 | |||
5312 | /* | ||
5313 | * Mark the CCB for abort and allow time for. | ||
5314 | */ | ||
5315 | cp->to_abort = timed_out ? 2 : 1; | ||
5316 | |||
5317 | /* | ||
5318 | * Tell the SCRIPTS processor to stop and synchronize with us. | ||
5319 | */ | ||
5320 | np->istat_sem = SEM; | ||
5321 | OUTB(np, nc_istat, SIGP|SEM); | ||
5322 | return 0; | ||
5323 | } | ||
5324 | |||
5325 | int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out) | ||
5326 | { | ||
5327 | struct sym_ccb *cp; | ||
5328 | SYM_QUEHEAD *qp; | ||
5329 | |||
5330 | /* | ||
5331 | * Look up our CCB control block. | ||
5332 | */ | ||
5333 | cp = NULL; | ||
5334 | FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { | ||
5335 | struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
5336 | if (cp2->cmd == cmd) { | ||
5337 | cp = cp2; | ||
5338 | break; | ||
5339 | } | ||
5340 | } | ||
5341 | |||
5342 | return sym_abort_ccb(np, cp, timed_out); | ||
5343 | } | ||
5344 | |||
5345 | /* | ||
5346 | * Complete execution of a SCSI command with extented | ||
5347 | * error, SCSI status error, or having been auto-sensed. | ||
5348 | * | ||
5349 | * The SCRIPTS processor is not running there, so we | ||
5350 | * can safely access IO registers and remove JOBs from | ||
5351 | * the START queue. | ||
5352 | * SCRATCHA is assumed to have been loaded with STARTPOS | ||
5353 | * before the SCRIPTS called the C code. | ||
5354 | */ | ||
5355 | void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp) | ||
5356 | { | ||
5357 | struct scsi_device *sdev; | ||
5358 | struct scsi_cmnd *cmd; | ||
5359 | struct sym_tcb *tp; | ||
5360 | struct sym_lcb *lp; | ||
5361 | int resid; | ||
5362 | int i; | ||
5363 | |||
5364 | /* | ||
5365 | * Paranoid check. :) | ||
5366 | */ | ||
5367 | if (!cp || !cp->cmd) | ||
5368 | return; | ||
5369 | |||
5370 | cmd = cp->cmd; | ||
5371 | sdev = cmd->device; | ||
5372 | if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { | ||
5373 | dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp, | ||
5374 | cp->host_status, cp->ssss_status, cp->host_flags); | ||
5375 | } | ||
5376 | |||
5377 | /* | ||
5378 | * Get target and lun pointers. | ||
5379 | */ | ||
5380 | tp = &np->target[cp->target]; | ||
5381 | lp = sym_lp(tp, sdev->lun); | ||
5382 | |||
5383 | /* | ||
5384 | * Check for extended errors. | ||
5385 | */ | ||
5386 | if (cp->xerr_status) { | ||
5387 | if (sym_verbose) | ||
5388 | sym_print_xerr(cmd, cp->xerr_status); | ||
5389 | if (cp->host_status == HS_COMPLETE) | ||
5390 | cp->host_status = HS_COMP_ERR; | ||
5391 | } | ||
5392 | |||
5393 | /* | ||
5394 | * Calculate the residual. | ||
5395 | */ | ||
5396 | resid = sym_compute_residual(np, cp); | ||
5397 | |||
5398 | if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */ | ||
5399 | resid = 0; /* throw them away. :) */ | ||
5400 | cp->sv_resid = 0; | ||
5401 | } | ||
5402 | #ifdef DEBUG_2_0_X | ||
5403 | if (resid) | ||
5404 | printf("XXXX RESID= %d - 0x%x\n", resid, resid); | ||
5405 | #endif | ||
5406 | |||
5407 | /* | ||
5408 | * Dequeue all queued CCBs for that device | ||
5409 | * not yet started by SCRIPTS. | ||
5410 | */ | ||
5411 | i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; | ||
5412 | i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); | ||
5413 | |||
5414 | /* | ||
5415 | * Restart the SCRIPTS processor. | ||
5416 | */ | ||
5417 | OUTL_DSP(np, SCRIPTA_BA(np, start)); | ||
5418 | |||
5419 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
5420 | if (cp->host_status == HS_COMPLETE && | ||
5421 | cp->ssss_status == S_QUEUE_FULL) { | ||
5422 | if (!lp || lp->started_tags - i < 2) | ||
5423 | goto weirdness; | ||
5424 | /* | ||
5425 | * Decrease queue depth as needed. | ||
5426 | */ | ||
5427 | lp->started_max = lp->started_tags - i - 1; | ||
5428 | lp->num_sgood = 0; | ||
5429 | |||
5430 | if (sym_verbose >= 2) { | ||
5431 | sym_print_addr(cmd, " queue depth is now %d\n", | ||
5432 | lp->started_max); | ||
5433 | } | ||
5434 | |||
5435 | /* | ||
5436 | * Repair the CCB. | ||
5437 | */ | ||
5438 | cp->host_status = HS_BUSY; | ||
5439 | cp->ssss_status = S_ILLEGAL; | ||
5440 | |||
5441 | /* | ||
5442 | * Let's requeue it to device. | ||
5443 | */ | ||
5444 | sym_set_cam_status(cmd, CAM_REQUEUE_REQ); | ||
5445 | goto finish; | ||
5446 | } | ||
5447 | weirdness: | ||
5448 | #endif | ||
5449 | /* | ||
5450 | * Build result in CAM ccb. | ||
5451 | */ | ||
5452 | sym_set_cam_result_error(np, cp, resid); | ||
5453 | |||
5454 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
5455 | finish: | ||
5456 | #endif | ||
5457 | /* | ||
5458 | * Add this one to the COMP queue. | ||
5459 | */ | ||
5460 | sym_remque(&cp->link_ccbq); | ||
5461 | sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); | ||
5462 | |||
5463 | /* | ||
5464 | * Complete all those commands with either error | ||
5465 | * or requeue condition. | ||
5466 | */ | ||
5467 | sym_flush_comp_queue(np, 0); | ||
5468 | |||
5469 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
5470 | /* | ||
5471 | * Donnot start more than 1 command after an error. | ||
5472 | */ | ||
5473 | if (lp) | ||
5474 | sym_start_next_ccbs(np, lp, 1); | ||
5475 | #endif | ||
5476 | } | ||
5477 | |||
5478 | /* | ||
5479 | * Complete execution of a successful SCSI command. | ||
5480 | * | ||
5481 | * Only successful commands go to the DONE queue, | ||
5482 | * since we need to have the SCRIPTS processor | ||
5483 | * stopped on any error condition. | ||
5484 | * The SCRIPTS processor is running while we are | ||
5485 | * completing successful commands. | ||
5486 | */ | ||
5487 | void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) | ||
5488 | { | ||
5489 | struct sym_tcb *tp; | ||
5490 | struct sym_lcb *lp; | ||
5491 | struct scsi_cmnd *cmd; | ||
5492 | int resid; | ||
5493 | |||
5494 | /* | ||
5495 | * Paranoid check. :) | ||
5496 | */ | ||
5497 | if (!cp || !cp->cmd) | ||
5498 | return; | ||
5499 | assert (cp->host_status == HS_COMPLETE); | ||
5500 | |||
5501 | /* | ||
5502 | * Get user command. | ||
5503 | */ | ||
5504 | cmd = cp->cmd; | ||
5505 | |||
5506 | /* | ||
5507 | * Get target and lun pointers. | ||
5508 | */ | ||
5509 | tp = &np->target[cp->target]; | ||
5510 | lp = sym_lp(tp, cp->lun); | ||
5511 | |||
5512 | /* | ||
5513 | * Assume device discovered on first success. | ||
5514 | */ | ||
5515 | if (!lp) | ||
5516 | sym_set_bit(tp->lun_map, cp->lun); | ||
5517 | |||
5518 | /* | ||
5519 | * If all data have been transferred, given than no | ||
5520 | * extended error did occur, there is no residual. | ||
5521 | */ | ||
5522 | resid = 0; | ||
5523 | if (cp->phys.head.lastp != sym_goalp(cp)) | ||
5524 | resid = sym_compute_residual(np, cp); | ||
5525 | |||
5526 | /* | ||
5527 | * Wrong transfer residuals may be worse than just always | ||
5528 | * returning zero. User can disable this feature in | ||
5529 | * sym53c8xx.h. Residual support is enabled by default. | ||
5530 | */ | ||
5531 | if (!SYM_SETUP_RESIDUAL_SUPPORT) | ||
5532 | resid = 0; | ||
5533 | #ifdef DEBUG_2_0_X | ||
5534 | if (resid) | ||
5535 | printf("XXXX RESID= %d - 0x%x\n", resid, resid); | ||
5536 | #endif | ||
5537 | |||
5538 | /* | ||
5539 | * Build result in CAM ccb. | ||
5540 | */ | ||
5541 | sym_set_cam_result_ok(cp, cmd, resid); | ||
5542 | |||
5543 | #ifdef SYM_OPT_SNIFF_INQUIRY | ||
5544 | /* | ||
5545 | * On standard INQUIRY response (EVPD and CmDt | ||
5546 | * not set), sniff out device capabilities. | ||
5547 | */ | ||
5548 | if (cp->cdb_buf[0] == INQUIRY && !(cp->cdb_buf[1] & 0x3)) | ||
5549 | sym_sniff_inquiry(np, cmd, resid); | ||
5550 | #endif | ||
5551 | |||
5552 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
5553 | /* | ||
5554 | * If max number of started ccbs had been reduced, | ||
5555 | * increase it if 200 good status received. | ||
5556 | */ | ||
5557 | if (lp && lp->started_max < lp->started_limit) { | ||
5558 | ++lp->num_sgood; | ||
5559 | if (lp->num_sgood >= 200) { | ||
5560 | lp->num_sgood = 0; | ||
5561 | ++lp->started_max; | ||
5562 | if (sym_verbose >= 2) { | ||
5563 | sym_print_addr(cmd, " queue depth is now %d\n", | ||
5564 | lp->started_max); | ||
5565 | } | ||
5566 | } | ||
5567 | } | ||
5568 | #endif | ||
5569 | |||
5570 | /* | ||
5571 | * Free our CCB. | ||
5572 | */ | ||
5573 | sym_free_ccb (np, cp); | ||
5574 | |||
5575 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
5576 | /* | ||
5577 | * Requeue a couple of awaiting scsi commands. | ||
5578 | */ | ||
5579 | if (lp && !sym_que_empty(&lp->waiting_ccbq)) | ||
5580 | sym_start_next_ccbs(np, lp, 2); | ||
5581 | #endif | ||
5582 | /* | ||
5583 | * Complete the command. | ||
5584 | */ | ||
5585 | sym_xpt_done(np, cmd); | ||
5586 | } | ||
5587 | |||
5588 | /* | ||
5589 | * Soft-attach the controller. | ||
5590 | */ | ||
5591 | int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram) | ||
5592 | { | ||
5593 | struct sym_hcb *np = sym_get_hcb(shost); | ||
5594 | int i; | ||
5595 | |||
5596 | /* | ||
5597 | * Get some info about the firmware. | ||
5598 | */ | ||
5599 | np->scripta_sz = fw->a_size; | ||
5600 | np->scriptb_sz = fw->b_size; | ||
5601 | np->scriptz_sz = fw->z_size; | ||
5602 | np->fw_setup = fw->setup; | ||
5603 | np->fw_patch = fw->patch; | ||
5604 | np->fw_name = fw->name; | ||
5605 | |||
5606 | /* | ||
5607 | * Save setting of some IO registers, so we will | ||
5608 | * be able to probe specific implementations. | ||
5609 | */ | ||
5610 | sym_save_initial_setting (np); | ||
5611 | |||
5612 | /* | ||
5613 | * Reset the chip now, since it has been reported | ||
5614 | * that SCSI clock calibration may not work properly | ||
5615 | * if the chip is currently active. | ||
5616 | */ | ||
5617 | sym_chip_reset(np); | ||
5618 | |||
5619 | /* | ||
5620 | * Prepare controller and devices settings, according | ||
5621 | * to chip features, user set-up and driver set-up. | ||
5622 | */ | ||
5623 | sym_prepare_setting(shost, np, nvram); | ||
5624 | |||
5625 | /* | ||
5626 | * Check the PCI clock frequency. | ||
5627 | * Must be performed after prepare_setting since it destroys | ||
5628 | * STEST1 that is used to probe for the clock doubler. | ||
5629 | */ | ||
5630 | i = sym_getpciclock(np); | ||
5631 | if (i > 37000 && !(np->features & FE_66MHZ)) | ||
5632 | printf("%s: PCI BUS clock seems too high: %u KHz.\n", | ||
5633 | sym_name(np), i); | ||
5634 | |||
5635 | /* | ||
5636 | * Allocate the start queue. | ||
5637 | */ | ||
5638 | np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); | ||
5639 | if (!np->squeue) | ||
5640 | goto attach_failed; | ||
5641 | np->squeue_ba = vtobus(np->squeue); | ||
5642 | |||
5643 | /* | ||
5644 | * Allocate the done queue. | ||
5645 | */ | ||
5646 | np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); | ||
5647 | if (!np->dqueue) | ||
5648 | goto attach_failed; | ||
5649 | np->dqueue_ba = vtobus(np->dqueue); | ||
5650 | |||
5651 | /* | ||
5652 | * Allocate the target bus address array. | ||
5653 | */ | ||
5654 | np->targtbl = sym_calloc_dma(256, "TARGTBL"); | ||
5655 | if (!np->targtbl) | ||
5656 | goto attach_failed; | ||
5657 | np->targtbl_ba = vtobus(np->targtbl); | ||
5658 | |||
5659 | /* | ||
5660 | * Allocate SCRIPTS areas. | ||
5661 | */ | ||
5662 | np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); | ||
5663 | np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); | ||
5664 | np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0"); | ||
5665 | if (!np->scripta0 || !np->scriptb0 || !np->scriptz0) | ||
5666 | goto attach_failed; | ||
5667 | |||
5668 | /* | ||
5669 | * Allocate the array of lists of CCBs hashed by DSA. | ||
5670 | */ | ||
5671 | np->ccbh = kcalloc(sizeof(struct sym_ccb **), CCB_HASH_SIZE, GFP_KERNEL); | ||
5672 | if (!np->ccbh) | ||
5673 | goto attach_failed; | ||
5674 | |||
5675 | /* | ||
5676 | * Initialyze the CCB free and busy queues. | ||
5677 | */ | ||
5678 | sym_que_init(&np->free_ccbq); | ||
5679 | sym_que_init(&np->busy_ccbq); | ||
5680 | sym_que_init(&np->comp_ccbq); | ||
5681 | |||
5682 | /* | ||
5683 | * Initialization for optional handling | ||
5684 | * of device queueing. | ||
5685 | */ | ||
5686 | #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING | ||
5687 | sym_que_init(&np->dummy_ccbq); | ||
5688 | #endif | ||
5689 | /* | ||
5690 | * Allocate some CCB. We need at least ONE. | ||
5691 | */ | ||
5692 | if (!sym_alloc_ccb(np)) | ||
5693 | goto attach_failed; | ||
5694 | |||
5695 | /* | ||
5696 | * Calculate BUS addresses where we are going | ||
5697 | * to load the SCRIPTS. | ||
5698 | */ | ||
5699 | np->scripta_ba = vtobus(np->scripta0); | ||
5700 | np->scriptb_ba = vtobus(np->scriptb0); | ||
5701 | np->scriptz_ba = vtobus(np->scriptz0); | ||
5702 | |||
5703 | if (np->ram_ba) { | ||
5704 | np->scripta_ba = np->ram_ba; | ||
5705 | if (np->features & FE_RAM8K) { | ||
5706 | np->ram_ws = 8192; | ||
5707 | np->scriptb_ba = np->scripta_ba + 4096; | ||
5708 | #if 0 /* May get useful for 64 BIT PCI addressing */ | ||
5709 | np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); | ||
5710 | #endif | ||
5711 | } | ||
5712 | else | ||
5713 | np->ram_ws = 4096; | ||
5714 | } | ||
5715 | |||
5716 | /* | ||
5717 | * Copy scripts to controller instance. | ||
5718 | */ | ||
5719 | memcpy(np->scripta0, fw->a_base, np->scripta_sz); | ||
5720 | memcpy(np->scriptb0, fw->b_base, np->scriptb_sz); | ||
5721 | memcpy(np->scriptz0, fw->z_base, np->scriptz_sz); | ||
5722 | |||
5723 | /* | ||
5724 | * Setup variable parts in scripts and compute | ||
5725 | * scripts bus addresses used from the C code. | ||
5726 | */ | ||
5727 | np->fw_setup(np, fw); | ||
5728 | |||
5729 | /* | ||
5730 | * Bind SCRIPTS with physical addresses usable by the | ||
5731 | * SCRIPTS processor (as seen from the BUS = BUS addresses). | ||
5732 | */ | ||
5733 | sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); | ||
5734 | sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); | ||
5735 | sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz); | ||
5736 | |||
5737 | #ifdef SYM_CONF_IARB_SUPPORT | ||
5738 | /* | ||
5739 | * If user wants IARB to be set when we win arbitration | ||
5740 | * and have other jobs, compute the max number of consecutive | ||
5741 | * settings of IARB hints before we leave devices a chance to | ||
5742 | * arbitrate for reselection. | ||
5743 | */ | ||
5744 | #ifdef SYM_SETUP_IARB_MAX | ||
5745 | np->iarb_max = SYM_SETUP_IARB_MAX; | ||
5746 | #else | ||
5747 | np->iarb_max = 4; | ||
5748 | #endif | ||
5749 | #endif | ||
5750 | |||
5751 | /* | ||
5752 | * Prepare the idle and invalid task actions. | ||
5753 | */ | ||
5754 | np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); | ||
5755 | np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); | ||
5756 | np->idletask_ba = vtobus(&np->idletask); | ||
5757 | |||
5758 | np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); | ||
5759 | np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); | ||
5760 | np->notask_ba = vtobus(&np->notask); | ||
5761 | |||
5762 | np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle)); | ||
5763 | np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); | ||
5764 | np->bad_itl_ba = vtobus(&np->bad_itl); | ||
5765 | |||
5766 | np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle)); | ||
5767 | np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q)); | ||
5768 | np->bad_itlq_ba = vtobus(&np->bad_itlq); | ||
5769 | |||
5770 | /* | ||
5771 | * Allocate and prepare the lun JUMP table that is used | ||
5772 | * for a target prior the probing of devices (bad lun table). | ||
5773 | * A private table will be allocated for the target on the | ||
5774 | * first INQUIRY response received. | ||
5775 | */ | ||
5776 | np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); | ||
5777 | if (!np->badluntbl) | ||
5778 | goto attach_failed; | ||
5779 | |||
5780 | np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); | ||
5781 | for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ | ||
5782 | np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); | ||
5783 | |||
5784 | /* | ||
5785 | * Prepare the bus address array that contains the bus | ||
5786 | * address of each target control block. | ||
5787 | * For now, assume all logical units are wrong. :) | ||
5788 | */ | ||
5789 | for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { | ||
5790 | np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); | ||
5791 | np->target[i].head.luntbl_sa = | ||
5792 | cpu_to_scr(vtobus(np->badluntbl)); | ||
5793 | np->target[i].head.lun0_sa = | ||
5794 | cpu_to_scr(vtobus(&np->badlun_sa)); | ||
5795 | } | ||
5796 | |||
5797 | /* | ||
5798 | * Now check the cache handling of the pci chipset. | ||
5799 | */ | ||
5800 | if (sym_snooptest (np)) { | ||
5801 | printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); | ||
5802 | goto attach_failed; | ||
5803 | } | ||
5804 | |||
5805 | /* | ||
5806 | * Sigh! we are done. | ||
5807 | */ | ||
5808 | return 0; | ||
5809 | |||
5810 | attach_failed: | ||
5811 | return -ENXIO; | ||
5812 | } | ||
5813 | |||
5814 | /* | ||
5815 | * Free everything that has been allocated for this device. | ||
5816 | */ | ||
5817 | void sym_hcb_free(struct sym_hcb *np) | ||
5818 | { | ||
5819 | SYM_QUEHEAD *qp; | ||
5820 | struct sym_ccb *cp; | ||
5821 | struct sym_tcb *tp; | ||
5822 | struct sym_lcb *lp; | ||
5823 | int target, lun; | ||
5824 | |||
5825 | if (np->scriptz0) | ||
5826 | sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0"); | ||
5827 | if (np->scriptb0) | ||
5828 | sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); | ||
5829 | if (np->scripta0) | ||
5830 | sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); | ||
5831 | if (np->squeue) | ||
5832 | sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); | ||
5833 | if (np->dqueue) | ||
5834 | sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); | ||
5835 | |||
5836 | if (np->actccbs) { | ||
5837 | while ((qp = sym_remque_head(&np->free_ccbq)) != 0) { | ||
5838 | cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | ||
5839 | sym_mfree_dma(cp, sizeof(*cp), "CCB"); | ||
5840 | } | ||
5841 | } | ||
5842 | kfree(np->ccbh); | ||
5843 | |||
5844 | if (np->badluntbl) | ||
5845 | sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); | ||
5846 | |||
5847 | for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { | ||
5848 | tp = &np->target[target]; | ||
5849 | for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { | ||
5850 | lp = sym_lp(tp, lun); | ||
5851 | if (!lp) | ||
5852 | continue; | ||
5853 | if (lp->itlq_tbl) | ||
5854 | sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, | ||
5855 | "ITLQ_TBL"); | ||
5856 | kfree(lp->cb_tags); | ||
5857 | sym_mfree_dma(lp, sizeof(*lp), "LCB"); | ||
5858 | } | ||
5859 | #if SYM_CONF_MAX_LUN > 1 | ||
5860 | kfree(tp->lunmp); | ||
5861 | #endif | ||
5862 | } | ||
5863 | if (np->targtbl) | ||
5864 | sym_mfree_dma(np->targtbl, 256, "TARGTBL"); | ||
5865 | } | ||