diff options
Diffstat (limited to 'drivers/scsi/aic7xxx/aic7xxx_core.c')
-rw-r--r-- | drivers/scsi/aic7xxx/aic7xxx_core.c | 508 |
1 files changed, 506 insertions, 2 deletions
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 64e62ce59c1..d1d006b8b3a 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c | |||
@@ -237,6 +237,510 @@ static void ahc_update_scsiid(struct ahc_softc *ahc, | |||
237 | static int ahc_handle_target_cmd(struct ahc_softc *ahc, | 237 | static int ahc_handle_target_cmd(struct ahc_softc *ahc, |
238 | struct target_cmd *cmd); | 238 | struct target_cmd *cmd); |
239 | #endif | 239 | #endif |
240 | |||
241 | /************************* Sequencer Execution Control ************************/ | ||
242 | /* | ||
243 | * Work around any chip bugs related to halting sequencer execution. | ||
244 | * On Ultra2 controllers, we must clear the CIOBUS stretch signal by | ||
245 | * reading a register that will set this signal and deassert it. | ||
246 | * Without this workaround, if the chip is paused, by an interrupt or | ||
247 | * manual pause while accessing scb ram, accesses to certain registers | ||
248 | * will hang the system (infinite pci retries). | ||
249 | */ | ||
250 | void | ||
251 | ahc_pause_bug_fix(struct ahc_softc *ahc) | ||
252 | { | ||
253 | if ((ahc->features & AHC_ULTRA2) != 0) | ||
254 | (void)ahc_inb(ahc, CCSCBCTL); | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Determine whether the sequencer has halted code execution. | ||
259 | * Returns non-zero status if the sequencer is stopped. | ||
260 | */ | ||
261 | int | ||
262 | ahc_is_paused(struct ahc_softc *ahc) | ||
263 | { | ||
264 | return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Request that the sequencer stop and wait, indefinitely, for it | ||
269 | * to stop. The sequencer will only acknowledge that it is paused | ||
270 | * once it has reached an instruction boundary and PAUSEDIS is | ||
271 | * cleared in the SEQCTL register. The sequencer may use PAUSEDIS | ||
272 | * for critical sections. | ||
273 | */ | ||
274 | void | ||
275 | ahc_pause(struct ahc_softc *ahc) | ||
276 | { | ||
277 | ahc_outb(ahc, HCNTRL, ahc->pause); | ||
278 | |||
279 | /* | ||
280 | * Since the sequencer can disable pausing in a critical section, we | ||
281 | * must loop until it actually stops. | ||
282 | */ | ||
283 | while (ahc_is_paused(ahc) == 0) | ||
284 | ; | ||
285 | |||
286 | ahc_pause_bug_fix(ahc); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Allow the sequencer to continue program execution. | ||
291 | * We check here to ensure that no additional interrupt | ||
292 | * sources that would cause the sequencer to halt have been | ||
293 | * asserted. If, for example, a SCSI bus reset is detected | ||
294 | * while we are fielding a different, pausing, interrupt type, | ||
295 | * we don't want to release the sequencer before going back | ||
296 | * into our interrupt handler and dealing with this new | ||
297 | * condition. | ||
298 | */ | ||
299 | void | ||
300 | ahc_unpause(struct ahc_softc *ahc) | ||
301 | { | ||
302 | if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) | ||
303 | ahc_outb(ahc, HCNTRL, ahc->unpause); | ||
304 | } | ||
305 | |||
306 | /************************** Memory mapping routines ***************************/ | ||
307 | struct ahc_dma_seg * | ||
308 | ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) | ||
309 | { | ||
310 | int sg_index; | ||
311 | |||
312 | sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); | ||
313 | /* sg_list_phys points to entry 1, not 0 */ | ||
314 | sg_index++; | ||
315 | |||
316 | return (&scb->sg_list[sg_index]); | ||
317 | } | ||
318 | |||
319 | uint32_t | ||
320 | ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) | ||
321 | { | ||
322 | int sg_index; | ||
323 | |||
324 | /* sg_list_phys points to entry 1, not 0 */ | ||
325 | sg_index = sg - &scb->sg_list[1]; | ||
326 | |||
327 | return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); | ||
328 | } | ||
329 | |||
330 | uint32_t | ||
331 | ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) | ||
332 | { | ||
333 | return (ahc->scb_data->hscb_busaddr | ||
334 | + (sizeof(struct hardware_scb) * index)); | ||
335 | } | ||
336 | |||
337 | void | ||
338 | ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) | ||
339 | { | ||
340 | ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, | ||
341 | ahc->scb_data->hscb_dmamap, | ||
342 | /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), | ||
343 | /*len*/sizeof(*scb->hscb), op); | ||
344 | } | ||
345 | |||
346 | void | ||
347 | ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) | ||
348 | { | ||
349 | if (scb->sg_count == 0) | ||
350 | return; | ||
351 | |||
352 | ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, | ||
353 | /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) | ||
354 | * sizeof(struct ahc_dma_seg), | ||
355 | /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); | ||
356 | } | ||
357 | |||
358 | uint32_t | ||
359 | ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) | ||
360 | { | ||
361 | return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); | ||
362 | } | ||
363 | |||
364 | /*********************** Miscelaneous Support Functions ***********************/ | ||
365 | /* | ||
366 | * Determine whether the sequencer reported a residual | ||
367 | * for this SCB/transaction. | ||
368 | */ | ||
369 | void | ||
370 | ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) | ||
371 | { | ||
372 | uint32_t sgptr; | ||
373 | |||
374 | sgptr = ahc_le32toh(scb->hscb->sgptr); | ||
375 | if ((sgptr & SG_RESID_VALID) != 0) | ||
376 | ahc_calc_residual(ahc, scb); | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * Return pointers to the transfer negotiation information | ||
381 | * for the specified our_id/remote_id pair. | ||
382 | */ | ||
383 | struct ahc_initiator_tinfo * | ||
384 | ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, | ||
385 | u_int remote_id, struct ahc_tmode_tstate **tstate) | ||
386 | { | ||
387 | /* | ||
388 | * Transfer data structures are stored from the perspective | ||
389 | * of the target role. Since the parameters for a connection | ||
390 | * in the initiator role to a given target are the same as | ||
391 | * when the roles are reversed, we pretend we are the target. | ||
392 | */ | ||
393 | if (channel == 'B') | ||
394 | our_id += 8; | ||
395 | *tstate = ahc->enabled_targets[our_id]; | ||
396 | return (&(*tstate)->transinfo[remote_id]); | ||
397 | } | ||
398 | |||
399 | uint16_t | ||
400 | ahc_inw(struct ahc_softc *ahc, u_int port) | ||
401 | { | ||
402 | uint16_t r = ahc_inb(ahc, port+1) << 8; | ||
403 | return r | ahc_inb(ahc, port); | ||
404 | } | ||
405 | |||
406 | void | ||
407 | ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) | ||
408 | { | ||
409 | ahc_outb(ahc, port, value & 0xFF); | ||
410 | ahc_outb(ahc, port+1, (value >> 8) & 0xFF); | ||
411 | } | ||
412 | |||
413 | uint32_t | ||
414 | ahc_inl(struct ahc_softc *ahc, u_int port) | ||
415 | { | ||
416 | return ((ahc_inb(ahc, port)) | ||
417 | | (ahc_inb(ahc, port+1) << 8) | ||
418 | | (ahc_inb(ahc, port+2) << 16) | ||
419 | | (ahc_inb(ahc, port+3) << 24)); | ||
420 | } | ||
421 | |||
422 | void | ||
423 | ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) | ||
424 | { | ||
425 | ahc_outb(ahc, port, (value) & 0xFF); | ||
426 | ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); | ||
427 | ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); | ||
428 | ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); | ||
429 | } | ||
430 | |||
431 | uint64_t | ||
432 | ahc_inq(struct ahc_softc *ahc, u_int port) | ||
433 | { | ||
434 | return ((ahc_inb(ahc, port)) | ||
435 | | (ahc_inb(ahc, port+1) << 8) | ||
436 | | (ahc_inb(ahc, port+2) << 16) | ||
437 | | (ahc_inb(ahc, port+3) << 24) | ||
438 | | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | ||
439 | | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | ||
440 | | (((uint64_t)ahc_inb(ahc, port+6)) << 48) | ||
441 | | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); | ||
442 | } | ||
443 | |||
444 | void | ||
445 | ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) | ||
446 | { | ||
447 | ahc_outb(ahc, port, value & 0xFF); | ||
448 | ahc_outb(ahc, port+1, (value >> 8) & 0xFF); | ||
449 | ahc_outb(ahc, port+2, (value >> 16) & 0xFF); | ||
450 | ahc_outb(ahc, port+3, (value >> 24) & 0xFF); | ||
451 | ahc_outb(ahc, port+4, (value >> 32) & 0xFF); | ||
452 | ahc_outb(ahc, port+5, (value >> 40) & 0xFF); | ||
453 | ahc_outb(ahc, port+6, (value >> 48) & 0xFF); | ||
454 | ahc_outb(ahc, port+7, (value >> 56) & 0xFF); | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * Get a free scb. If there are none, see if we can allocate a new SCB. | ||
459 | */ | ||
460 | struct scb * | ||
461 | ahc_get_scb(struct ahc_softc *ahc) | ||
462 | { | ||
463 | struct scb *scb; | ||
464 | |||
465 | if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { | ||
466 | ahc_alloc_scbs(ahc); | ||
467 | scb = SLIST_FIRST(&ahc->scb_data->free_scbs); | ||
468 | if (scb == NULL) | ||
469 | return (NULL); | ||
470 | } | ||
471 | SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); | ||
472 | return (scb); | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * Return an SCB resource to the free list. | ||
477 | */ | ||
478 | void | ||
479 | ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) | ||
480 | { | ||
481 | struct hardware_scb *hscb; | ||
482 | |||
483 | hscb = scb->hscb; | ||
484 | /* Clean up for the next user */ | ||
485 | ahc->scb_data->scbindex[hscb->tag] = NULL; | ||
486 | scb->flags = SCB_FREE; | ||
487 | hscb->control = 0; | ||
488 | |||
489 | SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); | ||
490 | |||
491 | /* Notify the OSM that a resource is now available. */ | ||
492 | ahc_platform_scb_free(ahc, scb); | ||
493 | } | ||
494 | |||
495 | struct scb * | ||
496 | ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) | ||
497 | { | ||
498 | struct scb* scb; | ||
499 | |||
500 | scb = ahc->scb_data->scbindex[tag]; | ||
501 | if (scb != NULL) | ||
502 | ahc_sync_scb(ahc, scb, | ||
503 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | ||
504 | return (scb); | ||
505 | } | ||
506 | |||
507 | void | ||
508 | ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) | ||
509 | { | ||
510 | struct hardware_scb *q_hscb; | ||
511 | u_int saved_tag; | ||
512 | |||
513 | /* | ||
514 | * Our queuing method is a bit tricky. The card | ||
515 | * knows in advance which HSCB to download, and we | ||
516 | * can't disappoint it. To achieve this, the next | ||
517 | * SCB to download is saved off in ahc->next_queued_scb. | ||
518 | * When we are called to queue "an arbitrary scb", | ||
519 | * we copy the contents of the incoming HSCB to the one | ||
520 | * the sequencer knows about, swap HSCB pointers and | ||
521 | * finally assign the SCB to the tag indexed location | ||
522 | * in the scb_array. This makes sure that we can still | ||
523 | * locate the correct SCB by SCB_TAG. | ||
524 | */ | ||
525 | q_hscb = ahc->next_queued_scb->hscb; | ||
526 | saved_tag = q_hscb->tag; | ||
527 | memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); | ||
528 | if ((scb->flags & SCB_CDB32_PTR) != 0) { | ||
529 | q_hscb->shared_data.cdb_ptr = | ||
530 | ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) | ||
531 | + offsetof(struct hardware_scb, cdb32)); | ||
532 | } | ||
533 | q_hscb->tag = saved_tag; | ||
534 | q_hscb->next = scb->hscb->tag; | ||
535 | |||
536 | /* Now swap HSCB pointers. */ | ||
537 | ahc->next_queued_scb->hscb = scb->hscb; | ||
538 | scb->hscb = q_hscb; | ||
539 | |||
540 | /* Now define the mapping from tag to SCB in the scbindex */ | ||
541 | ahc->scb_data->scbindex[scb->hscb->tag] = scb; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Tell the sequencer about a new transaction to execute. | ||
546 | */ | ||
547 | void | ||
548 | ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) | ||
549 | { | ||
550 | ahc_swap_with_next_hscb(ahc, scb); | ||
551 | |||
552 | if (scb->hscb->tag == SCB_LIST_NULL | ||
553 | || scb->hscb->next == SCB_LIST_NULL) | ||
554 | panic("Attempt to queue invalid SCB tag %x:%x\n", | ||
555 | scb->hscb->tag, scb->hscb->next); | ||
556 | |||
557 | /* | ||
558 | * Setup data "oddness". | ||
559 | */ | ||
560 | scb->hscb->lun &= LID; | ||
561 | if (ahc_get_transfer_length(scb) & 0x1) | ||
562 | scb->hscb->lun |= SCB_XFERLEN_ODD; | ||
563 | |||
564 | /* | ||
565 | * Keep a history of SCBs we've downloaded in the qinfifo. | ||
566 | */ | ||
567 | ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; | ||
568 | |||
569 | /* | ||
570 | * Make sure our data is consistent from the | ||
571 | * perspective of the adapter. | ||
572 | */ | ||
573 | ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | ||
574 | |||
575 | /* Tell the adapter about the newly queued SCB */ | ||
576 | if ((ahc->features & AHC_QUEUE_REGS) != 0) { | ||
577 | ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); | ||
578 | } else { | ||
579 | if ((ahc->features & AHC_AUTOPAUSE) == 0) | ||
580 | ahc_pause(ahc); | ||
581 | ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); | ||
582 | if ((ahc->features & AHC_AUTOPAUSE) == 0) | ||
583 | ahc_unpause(ahc); | ||
584 | } | ||
585 | } | ||
586 | |||
587 | struct scsi_sense_data * | ||
588 | ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) | ||
589 | { | ||
590 | int offset; | ||
591 | |||
592 | offset = scb - ahc->scb_data->scbarray; | ||
593 | return (&ahc->scb_data->sense[offset]); | ||
594 | } | ||
595 | |||
596 | uint32_t | ||
597 | ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) | ||
598 | { | ||
599 | int offset; | ||
600 | |||
601 | offset = scb - ahc->scb_data->scbarray; | ||
602 | return (ahc->scb_data->sense_busaddr | ||
603 | + (offset * sizeof(struct scsi_sense_data))); | ||
604 | } | ||
605 | |||
606 | /************************** Interrupt Processing ******************************/ | ||
607 | void | ||
608 | ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) | ||
609 | { | ||
610 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, | ||
611 | /*offset*/0, /*len*/256, op); | ||
612 | } | ||
613 | |||
614 | void | ||
615 | ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) | ||
616 | { | ||
617 | #ifdef AHC_TARGET_MODE | ||
618 | if ((ahc->flags & AHC_TARGETROLE) != 0) { | ||
619 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, | ||
620 | ahc->shared_data_dmamap, | ||
621 | ahc_targetcmd_offset(ahc, 0), | ||
622 | sizeof(struct target_cmd) * AHC_TMODE_CMDS, | ||
623 | op); | ||
624 | } | ||
625 | #endif | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * See if the firmware has posted any completed commands | ||
630 | * into our in-core command complete fifos. | ||
631 | */ | ||
632 | #define AHC_RUN_QOUTFIFO 0x1 | ||
633 | #define AHC_RUN_TQINFIFO 0x2 | ||
634 | u_int | ||
635 | ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) | ||
636 | { | ||
637 | u_int retval; | ||
638 | |||
639 | retval = 0; | ||
640 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, | ||
641 | /*offset*/ahc->qoutfifonext, /*len*/1, | ||
642 | BUS_DMASYNC_POSTREAD); | ||
643 | if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) | ||
644 | retval |= AHC_RUN_QOUTFIFO; | ||
645 | #ifdef AHC_TARGET_MODE | ||
646 | if ((ahc->flags & AHC_TARGETROLE) != 0 | ||
647 | && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { | ||
648 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, | ||
649 | ahc->shared_data_dmamap, | ||
650 | ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), | ||
651 | /*len*/sizeof(struct target_cmd), | ||
652 | BUS_DMASYNC_POSTREAD); | ||
653 | if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) | ||
654 | retval |= AHC_RUN_TQINFIFO; | ||
655 | } | ||
656 | #endif | ||
657 | return (retval); | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * Catch an interrupt from the adapter | ||
662 | */ | ||
663 | int | ||
664 | ahc_intr(struct ahc_softc *ahc) | ||
665 | { | ||
666 | u_int intstat; | ||
667 | |||
668 | if ((ahc->pause & INTEN) == 0) { | ||
669 | /* | ||
670 | * Our interrupt is not enabled on the chip | ||
671 | * and may be disabled for re-entrancy reasons, | ||
672 | * so just return. This is likely just a shared | ||
673 | * interrupt. | ||
674 | */ | ||
675 | return (0); | ||
676 | } | ||
677 | /* | ||
678 | * Instead of directly reading the interrupt status register, | ||
679 | * infer the cause of the interrupt by checking our in-core | ||
680 | * completion queues. This avoids a costly PCI bus read in | ||
681 | * most cases. | ||
682 | */ | ||
683 | if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 | ||
684 | && (ahc_check_cmdcmpltqueues(ahc) != 0)) | ||
685 | intstat = CMDCMPLT; | ||
686 | else { | ||
687 | intstat = ahc_inb(ahc, INTSTAT); | ||
688 | } | ||
689 | |||
690 | if ((intstat & INT_PEND) == 0) { | ||
691 | #if AHC_PCI_CONFIG > 0 | ||
692 | if (ahc->unsolicited_ints > 500) { | ||
693 | ahc->unsolicited_ints = 0; | ||
694 | if ((ahc->chip & AHC_PCI) != 0 | ||
695 | && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) | ||
696 | ahc->bus_intr(ahc); | ||
697 | } | ||
698 | #endif | ||
699 | ahc->unsolicited_ints++; | ||
700 | return (0); | ||
701 | } | ||
702 | ahc->unsolicited_ints = 0; | ||
703 | |||
704 | if (intstat & CMDCMPLT) { | ||
705 | ahc_outb(ahc, CLRINT, CLRCMDINT); | ||
706 | |||
707 | /* | ||
708 | * Ensure that the chip sees that we've cleared | ||
709 | * this interrupt before we walk the output fifo. | ||
710 | * Otherwise, we may, due to posted bus writes, | ||
711 | * clear the interrupt after we finish the scan, | ||
712 | * and after the sequencer has added new entries | ||
713 | * and asserted the interrupt again. | ||
714 | */ | ||
715 | ahc_flush_device_writes(ahc); | ||
716 | ahc_run_qoutfifo(ahc); | ||
717 | #ifdef AHC_TARGET_MODE | ||
718 | if ((ahc->flags & AHC_TARGETROLE) != 0) | ||
719 | ahc_run_tqinfifo(ahc, /*paused*/FALSE); | ||
720 | #endif | ||
721 | } | ||
722 | |||
723 | /* | ||
724 | * Handle statuses that may invalidate our cached | ||
725 | * copy of INTSTAT separately. | ||
726 | */ | ||
727 | if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { | ||
728 | /* Hot eject. Do nothing */ | ||
729 | } else if (intstat & BRKADRINT) { | ||
730 | ahc_handle_brkadrint(ahc); | ||
731 | } else if ((intstat & (SEQINT|SCSIINT)) != 0) { | ||
732 | |||
733 | ahc_pause_bug_fix(ahc); | ||
734 | |||
735 | if ((intstat & SEQINT) != 0) | ||
736 | ahc_handle_seqint(ahc, intstat); | ||
737 | |||
738 | if ((intstat & SCSIINT) != 0) | ||
739 | ahc_handle_scsiint(ahc, intstat); | ||
740 | } | ||
741 | return (1); | ||
742 | } | ||
743 | |||
240 | /************************* Sequencer Execution Control ************************/ | 744 | /************************* Sequencer Execution Control ************************/ |
241 | /* | 745 | /* |
242 | * Restart the sequencer program from address zero | 746 | * Restart the sequencer program from address zero |
@@ -2655,7 +3159,7 @@ proto_violation_reset: | |||
2655 | */ | 3159 | */ |
2656 | static void | 3160 | static void |
2657 | ahc_handle_message_phase(struct ahc_softc *ahc) | 3161 | ahc_handle_message_phase(struct ahc_softc *ahc) |
2658 | { | 3162 | { |
2659 | struct ahc_devinfo devinfo; | 3163 | struct ahc_devinfo devinfo; |
2660 | u_int bus_phase; | 3164 | u_int bus_phase; |
2661 | int end_session; | 3165 | int end_session; |
@@ -5707,7 +6211,7 @@ ahc_add_curscb_to_free_list(struct ahc_softc *ahc) | |||
5707 | */ | 6211 | */ |
5708 | static u_int | 6212 | static u_int |
5709 | ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) | 6213 | ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) |
5710 | { | 6214 | { |
5711 | u_int curscb, next; | 6215 | u_int curscb, next; |
5712 | 6216 | ||
5713 | /* | 6217 | /* |