aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDenys Vlasenko <vda.linux@googlemail.com>2008-03-22 23:41:22 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-04-24 10:09:18 -0400
commitbe0d67680d524981dd65c661efe3c9cbd52a684f (patch)
treec9f48421ee7396bcc593c0a0ef8415dd18e1eaba
parent93c20a59af4624aedf53f8320606b355aa951bc1 (diff)
[SCSI] aic7xxx, aic79xx: deinline functions
Deinlines and moves big functions from .h to .c files. Adds prototypes for ahc_lookup_scb and ahd_lookup_scb to .h files. Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c771
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h903
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c162
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h176
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c508
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h609
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c77
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h137
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c51
11 files changed, 1719 insertions, 1679 deletions
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index be14e2ecb8f7..6ab514d7f980 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -3649,7 +3649,7 @@ scratch_ram {
3649 KERNEL_TQINPOS { 3649 KERNEL_TQINPOS {
3650 size 1 3650 size 1
3651 } 3651 }
3652 TQINPOS { 3652 TQINPOS {
3653 size 1 3653 size 1
3654 } 3654 }
3655 /* 3655 /*
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index ade0fb8fbdb2..336f4bea251b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -266,8 +266,752 @@ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
266 int target, char channel, int lun, 266 int target, char channel, int lun,
267 u_int tag, role_t role); 267 u_int tag, role_t role);
268 268
269/******************************** Private Inlines *****************************/ 269/************************ Sequencer Execution Control *************************/
270void
271ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
272{
273 if (ahd->src_mode == src && ahd->dst_mode == dst)
274 return;
275#ifdef AHD_DEBUG
276 if (ahd->src_mode == AHD_MODE_UNKNOWN
277 || ahd->dst_mode == AHD_MODE_UNKNOWN)
278 panic("Setting mode prior to saving it.\n");
279 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
280 printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
281 ahd_build_mode_state(ahd, src, dst));
282#endif
283 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
284 ahd->src_mode = src;
285 ahd->dst_mode = dst;
286}
287
288void
289ahd_update_modes(struct ahd_softc *ahd)
290{
291 ahd_mode_state mode_ptr;
292 ahd_mode src;
293 ahd_mode dst;
294
295 mode_ptr = ahd_inb(ahd, MODE_PTR);
296#ifdef AHD_DEBUG
297 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
298 printf("Reading mode 0x%x\n", mode_ptr);
299#endif
300 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
301 ahd_known_modes(ahd, src, dst);
302}
303
304void
305ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
306 ahd_mode dstmode, const char *file, int line)
307{
308#ifdef AHD_DEBUG
309 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
310 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
311 panic("%s:%s:%d: Mode assertion failed.\n",
312 ahd_name(ahd), file, line);
313 }
314#endif
315}
316
317#define AHD_ASSERT_MODES(ahd, source, dest) \
318 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
319
320ahd_mode_state
321ahd_save_modes(struct ahd_softc *ahd)
322{
323 if (ahd->src_mode == AHD_MODE_UNKNOWN
324 || ahd->dst_mode == AHD_MODE_UNKNOWN)
325 ahd_update_modes(ahd);
326
327 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
328}
329
330void
331ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
332{
333 ahd_mode src;
334 ahd_mode dst;
335
336 ahd_extract_mode_state(ahd, state, &src, &dst);
337 ahd_set_modes(ahd, src, dst);
338}
339
340/*
341 * Determine whether the sequencer has halted code execution.
342 * Returns non-zero status if the sequencer is stopped.
343 */
344int
345ahd_is_paused(struct ahd_softc *ahd)
346{
347 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
348}
349
350/*
351 * Request that the sequencer stop and wait, indefinitely, for it
352 * to stop. The sequencer will only acknowledge that it is paused
353 * once it has reached an instruction boundary and PAUSEDIS is
354 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
355 * for critical sections.
356 */
357void
358ahd_pause(struct ahd_softc *ahd)
359{
360 ahd_outb(ahd, HCNTRL, ahd->pause);
361
362 /*
363 * Since the sequencer can disable pausing in a critical section, we
364 * must loop until it actually stops.
365 */
366 while (ahd_is_paused(ahd) == 0)
367 ;
368}
369
370/*
371 * Allow the sequencer to continue program execution.
372 * We check here to ensure that no additional interrupt
373 * sources that would cause the sequencer to halt have been
374 * asserted. If, for example, a SCSI bus reset is detected
375 * while we are fielding a different, pausing, interrupt type,
376 * we don't want to release the sequencer before going back
377 * into our interrupt handler and dealing with this new
378 * condition.
379 */
380void
381ahd_unpause(struct ahd_softc *ahd)
382{
383 /*
384 * Automatically restore our modes to those saved
385 * prior to the first change of the mode.
386 */
387 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
388 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
389 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
390 ahd_reset_cmds_pending(ahd);
391 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
392 }
393
394 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
395 ahd_outb(ahd, HCNTRL, ahd->unpause);
396
397 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
398}
399
400/*********************** Scatter Gather List Handling *************************/
401void *
402ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
403 void *sgptr, dma_addr_t addr, bus_size_t len, int last)
404{
405 scb->sg_count++;
406 if (sizeof(dma_addr_t) > 4
407 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
408 struct ahd_dma64_seg *sg;
409
410 sg = (struct ahd_dma64_seg *)sgptr;
411 sg->addr = ahd_htole64(addr);
412 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
413 return (sg + 1);
414 } else {
415 struct ahd_dma_seg *sg;
416
417 sg = (struct ahd_dma_seg *)sgptr;
418 sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
419 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
420 | (last ? AHD_DMA_LAST_SEG : 0));
421 return (sg + 1);
422 }
423}
424
425void
426ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
427{
428 /* XXX Handle target mode SCBs. */
429 scb->crc_retry_count = 0;
430 if ((scb->flags & SCB_PACKETIZED) != 0) {
431 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
432 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
433 } else {
434 if (ahd_get_transfer_length(scb) & 0x01)
435 scb->hscb->task_attribute = SCB_XFERLEN_ODD;
436 else
437 scb->hscb->task_attribute = 0;
438 }
439
440 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
441 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
442 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
443 ahd_htole32(scb->sense_busaddr);
444}
445
446void
447ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
448{
449 /*
450 * Copy the first SG into the "current" data ponter area.
451 */
452 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
453 struct ahd_dma64_seg *sg;
454
455 sg = (struct ahd_dma64_seg *)scb->sg_list;
456 scb->hscb->dataptr = sg->addr;
457 scb->hscb->datacnt = sg->len;
458 } else {
459 struct ahd_dma_seg *sg;
460 uint32_t *dataptr_words;
461
462 sg = (struct ahd_dma_seg *)scb->sg_list;
463 dataptr_words = (uint32_t*)&scb->hscb->dataptr;
464 dataptr_words[0] = sg->addr;
465 dataptr_words[1] = 0;
466 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
467 uint64_t high_addr;
468
469 high_addr = ahd_le32toh(sg->len) & 0x7F000000;
470 scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
471 }
472 scb->hscb->datacnt = sg->len;
473 }
474 /*
475 * Note where to find the SG entries in bus space.
476 * We also set the full residual flag which the
477 * sequencer will clear as soon as a data transfer
478 * occurs.
479 */
480 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
481}
482
483void
484ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
485{
486 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
487 scb->hscb->dataptr = 0;
488 scb->hscb->datacnt = 0;
489}
490
491/************************** Memory mapping routines ***************************/
492void *
493ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
494{
495 dma_addr_t sg_offset;
496
497 /* sg_list_phys points to entry 1, not 0 */
498 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
499 return ((uint8_t *)scb->sg_list + sg_offset);
500}
501
502uint32_t
503ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
504{
505 dma_addr_t sg_offset;
506
507 /* sg_list_phys points to entry 1, not 0 */
508 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
509 - ahd_sg_size(ahd);
510
511 return (scb->sg_list_busaddr + sg_offset);
512}
513
514void
515ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
516{
517 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
518 scb->hscb_map->dmamap,
519 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
520 /*len*/sizeof(*scb->hscb), op);
521}
522
523void
524ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
525{
526 if (scb->sg_count == 0)
527 return;
528
529 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
530 scb->sg_map->dmamap,
531 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
532 /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
533}
534
535void
536ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
537{
538 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
539 scb->sense_map->dmamap,
540 /*offset*/scb->sense_busaddr,
541 /*len*/AHD_SENSE_BUFSIZE, op);
542}
543
544uint32_t
545ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
546{
547 return (((uint8_t *)&ahd->targetcmds[index])
548 - (uint8_t *)ahd->qoutfifo);
549}
550
551/*********************** Miscelaneous Support Functions ***********************/
552/*
553 * Return pointers to the transfer negotiation information
554 * for the specified our_id/remote_id pair.
555 */
556struct ahd_initiator_tinfo *
557ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
558 u_int remote_id, struct ahd_tmode_tstate **tstate)
559{
560 /*
561 * Transfer data structures are stored from the perspective
562 * of the target role. Since the parameters for a connection
563 * in the initiator role to a given target are the same as
564 * when the roles are reversed, we pretend we are the target.
565 */
566 if (channel == 'B')
567 our_id += 8;
568 *tstate = ahd->enabled_targets[our_id];
569 return (&(*tstate)->transinfo[remote_id]);
570}
571
572uint16_t
573ahd_inw(struct ahd_softc *ahd, u_int port)
574{
575 /*
576 * Read high byte first as some registers increment
577 * or have other side effects when the low byte is
578 * read.
579 */
580 uint16_t r = ahd_inb(ahd, port+1) << 8;
581 return r | ahd_inb(ahd, port);
582}
583
584void
585ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
586{
587 /*
588 * Write low byte first to accomodate registers
589 * such as PRGMCNT where the order maters.
590 */
591 ahd_outb(ahd, port, value & 0xFF);
592 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
593}
594
595uint32_t
596ahd_inl(struct ahd_softc *ahd, u_int port)
597{
598 return ((ahd_inb(ahd, port))
599 | (ahd_inb(ahd, port+1) << 8)
600 | (ahd_inb(ahd, port+2) << 16)
601 | (ahd_inb(ahd, port+3) << 24));
602}
603
604void
605ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
606{
607 ahd_outb(ahd, port, (value) & 0xFF);
608 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
609 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
610 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
611}
612
613uint64_t
614ahd_inq(struct ahd_softc *ahd, u_int port)
615{
616 return ((ahd_inb(ahd, port))
617 | (ahd_inb(ahd, port+1) << 8)
618 | (ahd_inb(ahd, port+2) << 16)
619 | (ahd_inb(ahd, port+3) << 24)
620 | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
621 | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
622 | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
623 | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
624}
625
626void
627ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
628{
629 ahd_outb(ahd, port, value & 0xFF);
630 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
631 ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
632 ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
633 ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
634 ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
635 ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
636 ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
637}
638
639u_int
640ahd_get_scbptr(struct ahd_softc *ahd)
641{
642 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
643 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
644 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
645}
646
647void
648ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
649{
650 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
651 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
652 ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
653 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
654}
655
656u_int
657ahd_get_hnscb_qoff(struct ahd_softc *ahd)
658{
659 return (ahd_inw_atomic(ahd, HNSCB_QOFF));
660}
270 661
662void
663ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
664{
665 ahd_outw_atomic(ahd, HNSCB_QOFF, value);
666}
667
668u_int
669ahd_get_hescb_qoff(struct ahd_softc *ahd)
670{
671 return (ahd_inb(ahd, HESCB_QOFF));
672}
673
674void
675ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
676{
677 ahd_outb(ahd, HESCB_QOFF, value);
678}
679
680u_int
681ahd_get_snscb_qoff(struct ahd_softc *ahd)
682{
683 u_int oldvalue;
684
685 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
686 oldvalue = ahd_inw(ahd, SNSCB_QOFF);
687 ahd_outw(ahd, SNSCB_QOFF, oldvalue);
688 return (oldvalue);
689}
690
691void
692ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
693{
694 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
695 ahd_outw(ahd, SNSCB_QOFF, value);
696}
697
698u_int
699ahd_get_sescb_qoff(struct ahd_softc *ahd)
700{
701 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
702 return (ahd_inb(ahd, SESCB_QOFF));
703}
704
705void
706ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
707{
708 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
709 ahd_outb(ahd, SESCB_QOFF, value);
710}
711
712u_int
713ahd_get_sdscb_qoff(struct ahd_softc *ahd)
714{
715 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
716 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
717}
718
719void
720ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
721{
722 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
723 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
724 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
725}
726
727u_int
728ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
729{
730 u_int value;
731
732 /*
733 * Workaround PCI-X Rev A. hardware bug.
734 * After a host read of SCB memory, the chip
735 * may become confused into thinking prefetch
736 * was required. This starts the discard timer
737 * running and can cause an unexpected discard
738 * timer interrupt. The work around is to read
739 * a normal register prior to the exhaustion of
740 * the discard timer. The mode pointer register
741 * has no side effects and so serves well for
742 * this purpose.
743 *
744 * Razor #528
745 */
746 value = ahd_inb(ahd, offset);
747 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
748 ahd_inb(ahd, MODE_PTR);
749 return (value);
750}
751
752u_int
753ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
754{
755 return (ahd_inb_scbram(ahd, offset)
756 | (ahd_inb_scbram(ahd, offset+1) << 8));
757}
758
759uint32_t
760ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
761{
762 return (ahd_inw_scbram(ahd, offset)
763 | (ahd_inw_scbram(ahd, offset+2) << 16));
764}
765
766uint64_t
767ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
768{
769 return (ahd_inl_scbram(ahd, offset)
770 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
771}
772
773struct scb *
774ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
775{
776 struct scb* scb;
777
778 if (tag >= AHD_SCB_MAX)
779 return (NULL);
780 scb = ahd->scb_data.scbindex[tag];
781 if (scb != NULL)
782 ahd_sync_scb(ahd, scb,
783 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
784 return (scb);
785}
786
787void
788ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
789{
790 struct hardware_scb *q_hscb;
791 struct map_node *q_hscb_map;
792 uint32_t saved_hscb_busaddr;
793
794 /*
795 * Our queuing method is a bit tricky. The card
796 * knows in advance which HSCB (by address) to download,
797 * and we can't disappoint it. To achieve this, the next
798 * HSCB to download is saved off in ahd->next_queued_hscb.
799 * When we are called to queue "an arbitrary scb",
800 * we copy the contents of the incoming HSCB to the one
801 * the sequencer knows about, swap HSCB pointers and
802 * finally assign the SCB to the tag indexed location
803 * in the scb_array. This makes sure that we can still
804 * locate the correct SCB by SCB_TAG.
805 */
806 q_hscb = ahd->next_queued_hscb;
807 q_hscb_map = ahd->next_queued_hscb_map;
808 saved_hscb_busaddr = q_hscb->hscb_busaddr;
809 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
810 q_hscb->hscb_busaddr = saved_hscb_busaddr;
811 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
812
813 /* Now swap HSCB pointers. */
814 ahd->next_queued_hscb = scb->hscb;
815 ahd->next_queued_hscb_map = scb->hscb_map;
816 scb->hscb = q_hscb;
817 scb->hscb_map = q_hscb_map;
818
819 /* Now define the mapping from tag to SCB in the scbindex */
820 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
821}
822
823/*
824 * Tell the sequencer about a new transaction to execute.
825 */
826void
827ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
828{
829 ahd_swap_with_next_hscb(ahd, scb);
830
831 if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
832 panic("Attempt to queue invalid SCB tag %x\n",
833 SCB_GET_TAG(scb));
834
835 /*
836 * Keep a history of SCBs we've downloaded in the qinfifo.
837 */
838 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
839 ahd->qinfifonext++;
840
841 if (scb->sg_count != 0)
842 ahd_setup_data_scb(ahd, scb);
843 else
844 ahd_setup_noxfer_scb(ahd, scb);
845 ahd_setup_scb_common(ahd, scb);
846
847 /*
848 * Make sure our data is consistent from the
849 * perspective of the adapter.
850 */
851 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
852
853#ifdef AHD_DEBUG
854 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
855 uint64_t host_dataptr;
856
857 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
858 printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
859 ahd_name(ahd),
860 SCB_GET_TAG(scb), scb->hscb->scsiid,
861 ahd_le32toh(scb->hscb->hscb_busaddr),
862 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
863 (u_int)(host_dataptr & 0xFFFFFFFF),
864 ahd_le32toh(scb->hscb->datacnt));
865 }
866#endif
867 /* Tell the adapter about the newly queued SCB */
868 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
869}
870
871/************************** Interrupt Processing ******************************/
872void
873ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
874{
875 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
876 /*offset*/0,
877 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
878}
879
880void
881ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
882{
883#ifdef AHD_TARGET_MODE
884 if ((ahd->flags & AHD_TARGETROLE) != 0) {
885 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
886 ahd->shared_data_map.dmamap,
887 ahd_targetcmd_offset(ahd, 0),
888 sizeof(struct target_cmd) * AHD_TMODE_CMDS,
889 op);
890 }
891#endif
892}
893
894/*
895 * See if the firmware has posted any completed commands
896 * into our in-core command complete fifos.
897 */
898#define AHD_RUN_QOUTFIFO 0x1
899#define AHD_RUN_TQINFIFO 0x2
900u_int
901ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
902{
903 u_int retval;
904
905 retval = 0;
906 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
907 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
908 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
909 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
910 == ahd->qoutfifonext_valid_tag)
911 retval |= AHD_RUN_QOUTFIFO;
912#ifdef AHD_TARGET_MODE
913 if ((ahd->flags & AHD_TARGETROLE) != 0
914 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
915 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
916 ahd->shared_data_map.dmamap,
917 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
918 /*len*/sizeof(struct target_cmd),
919 BUS_DMASYNC_POSTREAD);
920 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
921 retval |= AHD_RUN_TQINFIFO;
922 }
923#endif
924 return (retval);
925}
926
927/*
928 * Catch an interrupt from the adapter
929 */
930int
931ahd_intr(struct ahd_softc *ahd)
932{
933 u_int intstat;
934
935 if ((ahd->pause & INTEN) == 0) {
936 /*
937 * Our interrupt is not enabled on the chip
938 * and may be disabled for re-entrancy reasons,
939 * so just return. This is likely just a shared
940 * interrupt.
941 */
942 return (0);
943 }
944
945 /*
946 * Instead of directly reading the interrupt status register,
947 * infer the cause of the interrupt by checking our in-core
948 * completion queues. This avoids a costly PCI bus read in
949 * most cases.
950 */
951 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
952 && (ahd_check_cmdcmpltqueues(ahd) != 0))
953 intstat = CMDCMPLT;
954 else
955 intstat = ahd_inb(ahd, INTSTAT);
956
957 if ((intstat & INT_PEND) == 0)
958 return (0);
959
960 if (intstat & CMDCMPLT) {
961 ahd_outb(ahd, CLRINT, CLRCMDINT);
962
963 /*
964 * Ensure that the chip sees that we've cleared
965 * this interrupt before we walk the output fifo.
966 * Otherwise, we may, due to posted bus writes,
967 * clear the interrupt after we finish the scan,
968 * and after the sequencer has added new entries
969 * and asserted the interrupt again.
970 */
971 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
972 if (ahd_is_paused(ahd)) {
973 /*
974 * Potentially lost SEQINT.
975 * If SEQINTCODE is non-zero,
976 * simulate the SEQINT.
977 */
978 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
979 intstat |= SEQINT;
980 }
981 } else {
982 ahd_flush_device_writes(ahd);
983 }
984 ahd_run_qoutfifo(ahd);
985 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
986 ahd->cmdcmplt_total++;
987#ifdef AHD_TARGET_MODE
988 if ((ahd->flags & AHD_TARGETROLE) != 0)
989 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
990#endif
991 }
992
993 /*
994 * Handle statuses that may invalidate our cached
995 * copy of INTSTAT separately.
996 */
997 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
998 /* Hot eject. Do nothing */
999 } else if (intstat & HWERRINT) {
1000 ahd_handle_hwerrint(ahd);
1001 } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
1002 ahd->bus_intr(ahd);
1003 } else {
1004
1005 if ((intstat & SEQINT) != 0)
1006 ahd_handle_seqint(ahd, intstat);
1007
1008 if ((intstat & SCSIINT) != 0)
1009 ahd_handle_scsiint(ahd, intstat);
1010 }
1011 return (1);
1012}
1013
1014/******************************** Private Inlines *****************************/
271static __inline void 1015static __inline void
272ahd_assert_atn(struct ahd_softc *ahd) 1016ahd_assert_atn(struct ahd_softc *ahd)
273{ 1017{
@@ -280,7 +1024,7 @@ ahd_assert_atn(struct ahd_softc *ahd)
280 * are currently in a packetized transfer. We could 1024 * are currently in a packetized transfer. We could
281 * just as easily be sending or receiving a message. 1025 * just as easily be sending or receiving a message.
282 */ 1026 */
283static __inline int 1027static int
284ahd_currently_packetized(struct ahd_softc *ahd) 1028ahd_currently_packetized(struct ahd_softc *ahd)
285{ 1029{
286 ahd_mode_state saved_modes; 1030 ahd_mode_state saved_modes;
@@ -3941,7 +4685,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
3941 */ 4685 */
3942static void 4686static void
3943ahd_handle_message_phase(struct ahd_softc *ahd) 4687ahd_handle_message_phase(struct ahd_softc *ahd)
3944{ 4688{
3945 struct ahd_devinfo devinfo; 4689 struct ahd_devinfo devinfo;
3946 u_int bus_phase; 4690 u_int bus_phase;
3947 int end_session; 4691 int end_session;
@@ -5983,8 +6727,7 @@ found:
5983 */ 6727 */
5984void 6728void
5985ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) 6729ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
5986{ 6730{
5987
5988 /* Clean up for the next user */ 6731 /* Clean up for the next user */
5989 scb->flags = SCB_FLAG_NONE; 6732 scb->flags = SCB_FLAG_NONE;
5990 scb->hscb->control = 0; 6733 scb->hscb->control = 0;
@@ -6272,6 +7015,24 @@ static const char *termstat_strings[] = {
6272 "Not Configured" 7015 "Not Configured"
6273}; 7016};
6274 7017
7018/***************************** Timer Facilities *******************************/
7019#define ahd_timer_init init_timer
7020#define ahd_timer_stop del_timer_sync
7021typedef void ahd_linux_callback_t (u_long);
7022
7023static void
7024ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
7025{
7026 struct ahd_softc *ahd;
7027
7028 ahd = (struct ahd_softc *)arg;
7029 del_timer(timer);
7030 timer->data = (u_long)arg;
7031 timer->expires = jiffies + (usec * HZ)/1000000;
7032 timer->function = (ahd_linux_callback_t*)func;
7033 add_timer(timer);
7034}
7035
6275/* 7036/*
6276 * Start the board, ready for normal operation 7037 * Start the board, ready for normal operation
6277 */ 7038 */
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
index 45e55575a0fa..875137262156 100644
--- a/drivers/scsi/aic7xxx/aic79xx_inline.h
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -63,18 +63,19 @@ static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
63static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, 63static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
64 ahd_mode_state state, 64 ahd_mode_state state,
65 ahd_mode *src, ahd_mode *dst); 65 ahd_mode *src, ahd_mode *dst);
66static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, 66
67 ahd_mode dst); 67void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
68static __inline void ahd_update_modes(struct ahd_softc *ahd); 68 ahd_mode dst);
69static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, 69void ahd_update_modes(struct ahd_softc *ahd);
70 ahd_mode dstmode, const char *file, 70void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
71 int line); 71 ahd_mode dstmode, const char *file,
72static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd); 72 int line);
73static __inline void ahd_restore_modes(struct ahd_softc *ahd, 73ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
74 ahd_mode_state state); 74void ahd_restore_modes(struct ahd_softc *ahd,
75static __inline int ahd_is_paused(struct ahd_softc *ahd); 75 ahd_mode_state state);
76static __inline void ahd_pause(struct ahd_softc *ahd); 76int ahd_is_paused(struct ahd_softc *ahd);
77static __inline void ahd_unpause(struct ahd_softc *ahd); 77void ahd_pause(struct ahd_softc *ahd);
78void ahd_unpause(struct ahd_softc *ahd);
78 79
79static __inline void 80static __inline void
80ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 81ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
@@ -99,256 +100,37 @@ ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
99 *dst = (state & DST_MODE) >> DST_MODE_SHIFT; 100 *dst = (state & DST_MODE) >> DST_MODE_SHIFT;
100} 101}
101 102
102static __inline void
103ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
104{
105 if (ahd->src_mode == src && ahd->dst_mode == dst)
106 return;
107#ifdef AHD_DEBUG
108 if (ahd->src_mode == AHD_MODE_UNKNOWN
109 || ahd->dst_mode == AHD_MODE_UNKNOWN)
110 panic("Setting mode prior to saving it.\n");
111 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
112 printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
113 ahd_build_mode_state(ahd, src, dst));
114#endif
115 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
116 ahd->src_mode = src;
117 ahd->dst_mode = dst;
118}
119
120static __inline void
121ahd_update_modes(struct ahd_softc *ahd)
122{
123 ahd_mode_state mode_ptr;
124 ahd_mode src;
125 ahd_mode dst;
126
127 mode_ptr = ahd_inb(ahd, MODE_PTR);
128#ifdef AHD_DEBUG
129 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
130 printf("Reading mode 0x%x\n", mode_ptr);
131#endif
132 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
133 ahd_known_modes(ahd, src, dst);
134}
135
136static __inline void
137ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
138 ahd_mode dstmode, const char *file, int line)
139{
140#ifdef AHD_DEBUG
141 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
142 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
143 panic("%s:%s:%d: Mode assertion failed.\n",
144 ahd_name(ahd), file, line);
145 }
146#endif
147}
148
149static __inline ahd_mode_state
150ahd_save_modes(struct ahd_softc *ahd)
151{
152 if (ahd->src_mode == AHD_MODE_UNKNOWN
153 || ahd->dst_mode == AHD_MODE_UNKNOWN)
154 ahd_update_modes(ahd);
155
156 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
157}
158
159static __inline void
160ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
161{
162 ahd_mode src;
163 ahd_mode dst;
164
165 ahd_extract_mode_state(ahd, state, &src, &dst);
166 ahd_set_modes(ahd, src, dst);
167}
168
169#define AHD_ASSERT_MODES(ahd, source, dest) \
170 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
171
172/*
173 * Determine whether the sequencer has halted code execution.
174 * Returns non-zero status if the sequencer is stopped.
175 */
176static __inline int
177ahd_is_paused(struct ahd_softc *ahd)
178{
179 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
180}
181
182/*
183 * Request that the sequencer stop and wait, indefinitely, for it
184 * to stop. The sequencer will only acknowledge that it is paused
185 * once it has reached an instruction boundary and PAUSEDIS is
186 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
187 * for critical sections.
188 */
189static __inline void
190ahd_pause(struct ahd_softc *ahd)
191{
192 ahd_outb(ahd, HCNTRL, ahd->pause);
193
194 /*
195 * Since the sequencer can disable pausing in a critical section, we
196 * must loop until it actually stops.
197 */
198 while (ahd_is_paused(ahd) == 0)
199 ;
200}
201
202/*
203 * Allow the sequencer to continue program execution.
204 * We check here to ensure that no additional interrupt
205 * sources that would cause the sequencer to halt have been
206 * asserted. If, for example, a SCSI bus reset is detected
207 * while we are fielding a different, pausing, interrupt type,
208 * we don't want to release the sequencer before going back
209 * into our interrupt handler and dealing with this new
210 * condition.
211 */
212static __inline void
213ahd_unpause(struct ahd_softc *ahd)
214{
215 /*
216 * Automatically restore our modes to those saved
217 * prior to the first change of the mode.
218 */
219 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
220 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
221 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
222 ahd_reset_cmds_pending(ahd);
223 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
224 }
225
226 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
227 ahd_outb(ahd, HCNTRL, ahd->unpause);
228
229 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
230}
231
232/*********************** Scatter Gather List Handling *************************/ 103/*********************** Scatter Gather List Handling *************************/
233static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, 104void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
234 void *sgptr, dma_addr_t addr, 105 void *sgptr, dma_addr_t addr,
235 bus_size_t len, int last); 106 bus_size_t len, int last);
236static __inline void ahd_setup_scb_common(struct ahd_softc *ahd, 107void ahd_setup_scb_common(struct ahd_softc *ahd,
237 struct scb *scb); 108 struct scb *scb);
238static __inline void ahd_setup_data_scb(struct ahd_softc *ahd, 109void ahd_setup_data_scb(struct ahd_softc *ahd,
239 struct scb *scb); 110 struct scb *scb);
240static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd, 111void ahd_setup_noxfer_scb(struct ahd_softc *ahd,
241 struct scb *scb); 112 struct scb *scb);
242
243static __inline void *
244ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
245 void *sgptr, dma_addr_t addr, bus_size_t len, int last)
246{
247 scb->sg_count++;
248 if (sizeof(dma_addr_t) > 4
249 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
250 struct ahd_dma64_seg *sg;
251
252 sg = (struct ahd_dma64_seg *)sgptr;
253 sg->addr = ahd_htole64(addr);
254 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
255 return (sg + 1);
256 } else {
257 struct ahd_dma_seg *sg;
258
259 sg = (struct ahd_dma_seg *)sgptr;
260 sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
261 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
262 | (last ? AHD_DMA_LAST_SEG : 0));
263 return (sg + 1);
264 }
265}
266
267static __inline void
268ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
269{
270 /* XXX Handle target mode SCBs. */
271 scb->crc_retry_count = 0;
272 if ((scb->flags & SCB_PACKETIZED) != 0) {
273 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
274 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
275 } else {
276 if (ahd_get_transfer_length(scb) & 0x01)
277 scb->hscb->task_attribute = SCB_XFERLEN_ODD;
278 else
279 scb->hscb->task_attribute = 0;
280 }
281
282 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
283 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
284 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
285 ahd_htole32(scb->sense_busaddr);
286}
287
288static __inline void
289ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
290{
291 /*
292 * Copy the first SG into the "current" data ponter area.
293 */
294 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
295 struct ahd_dma64_seg *sg;
296
297 sg = (struct ahd_dma64_seg *)scb->sg_list;
298 scb->hscb->dataptr = sg->addr;
299 scb->hscb->datacnt = sg->len;
300 } else {
301 struct ahd_dma_seg *sg;
302 uint32_t *dataptr_words;
303
304 sg = (struct ahd_dma_seg *)scb->sg_list;
305 dataptr_words = (uint32_t*)&scb->hscb->dataptr;
306 dataptr_words[0] = sg->addr;
307 dataptr_words[1] = 0;
308 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
309 uint64_t high_addr;
310
311 high_addr = ahd_le32toh(sg->len) & 0x7F000000;
312 scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
313 }
314 scb->hscb->datacnt = sg->len;
315 }
316 /*
317 * Note where to find the SG entries in bus space.
318 * We also set the full residual flag which the
319 * sequencer will clear as soon as a data transfer
320 * occurs.
321 */
322 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
323}
324
325static __inline void
326ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
327{
328 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
329 scb->hscb->dataptr = 0;
330 scb->hscb->datacnt = 0;
331}
332 113
333/************************** Memory mapping routines ***************************/ 114/************************** Memory mapping routines ***************************/
334static __inline size_t ahd_sg_size(struct ahd_softc *ahd); 115static __inline size_t ahd_sg_size(struct ahd_softc *ahd);
335static __inline void * 116
336 ahd_sg_bus_to_virt(struct ahd_softc *ahd, 117void *
337 struct scb *scb, 118 ahd_sg_bus_to_virt(struct ahd_softc *ahd,
338 uint32_t sg_busaddr); 119 struct scb *scb,
339static __inline uint32_t 120 uint32_t sg_busaddr);
340 ahd_sg_virt_to_bus(struct ahd_softc *ahd, 121uint32_t
341 struct scb *scb, 122 ahd_sg_virt_to_bus(struct ahd_softc *ahd,
342 void *sg); 123 struct scb *scb,
343static __inline void ahd_sync_scb(struct ahd_softc *ahd, 124 void *sg);
344 struct scb *scb, int op); 125void ahd_sync_scb(struct ahd_softc *ahd,
345static __inline void ahd_sync_sglist(struct ahd_softc *ahd, 126 struct scb *scb, int op);
346 struct scb *scb, int op); 127void ahd_sync_sglist(struct ahd_softc *ahd,
347static __inline void ahd_sync_sense(struct ahd_softc *ahd, 128 struct scb *scb, int op);
348 struct scb *scb, int op); 129void ahd_sync_sense(struct ahd_softc *ahd,
349static __inline uint32_t 130 struct scb *scb, int op);
350 ahd_targetcmd_offset(struct ahd_softc *ahd, 131uint32_t
351 u_int index); 132 ahd_targetcmd_offset(struct ahd_softc *ahd,
133 u_int index);
352 134
353static __inline size_t 135static __inline size_t
354ahd_sg_size(struct ahd_softc *ahd) 136ahd_sg_size(struct ahd_softc *ahd)
@@ -358,104 +140,48 @@ ahd_sg_size(struct ahd_softc *ahd)
358 return (sizeof(struct ahd_dma_seg)); 140 return (sizeof(struct ahd_dma_seg));
359} 141}
360 142
361static __inline void *
362ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
363{
364 dma_addr_t sg_offset;
365
366 /* sg_list_phys points to entry 1, not 0 */
367 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
368 return ((uint8_t *)scb->sg_list + sg_offset);
369}
370
371static __inline uint32_t
372ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
373{
374 dma_addr_t sg_offset;
375
376 /* sg_list_phys points to entry 1, not 0 */
377 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
378 - ahd_sg_size(ahd);
379
380 return (scb->sg_list_busaddr + sg_offset);
381}
382
383static __inline void
384ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
385{
386 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
387 scb->hscb_map->dmamap,
388 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
389 /*len*/sizeof(*scb->hscb), op);
390}
391
392static __inline void
393ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
394{
395 if (scb->sg_count == 0)
396 return;
397
398 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
399 scb->sg_map->dmamap,
400 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
401 /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
402}
403
404static __inline void
405ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
406{
407 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
408 scb->sense_map->dmamap,
409 /*offset*/scb->sense_busaddr,
410 /*len*/AHD_SENSE_BUFSIZE, op);
411}
412
413static __inline uint32_t
414ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
415{
416 return (((uint8_t *)&ahd->targetcmds[index])
417 - (uint8_t *)ahd->qoutfifo);
418}
419
420/*********************** Miscellaneous Support Functions ***********************/ 143/*********************** Miscellaneous Support Functions ***********************/
421static __inline struct ahd_initiator_tinfo * 144struct ahd_initiator_tinfo *
422 ahd_fetch_transinfo(struct ahd_softc *ahd, 145 ahd_fetch_transinfo(struct ahd_softc *ahd,
423 char channel, u_int our_id, 146 char channel, u_int our_id,
424 u_int remote_id, 147 u_int remote_id,
425 struct ahd_tmode_tstate **tstate); 148 struct ahd_tmode_tstate **tstate);
426static __inline uint16_t 149uint16_t
427 ahd_inw(struct ahd_softc *ahd, u_int port); 150 ahd_inw(struct ahd_softc *ahd, u_int port);
428static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, 151void ahd_outw(struct ahd_softc *ahd, u_int port,
429 u_int value); 152 u_int value);
430static __inline uint32_t 153uint32_t
431 ahd_inl(struct ahd_softc *ahd, u_int port); 154 ahd_inl(struct ahd_softc *ahd, u_int port);
432static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, 155void ahd_outl(struct ahd_softc *ahd, u_int port,
433 uint32_t value); 156 uint32_t value);
434static __inline uint64_t 157uint64_t
435 ahd_inq(struct ahd_softc *ahd, u_int port); 158 ahd_inq(struct ahd_softc *ahd, u_int port);
436static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, 159void ahd_outq(struct ahd_softc *ahd, u_int port,
437 uint64_t value); 160 uint64_t value);
438static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd); 161u_int ahd_get_scbptr(struct ahd_softc *ahd);
439static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr); 162void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
440static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd); 163u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd);
441static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value); 164void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value);
442static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd); 165u_int ahd_get_hescb_qoff(struct ahd_softc *ahd);
443static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value); 166void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value);
444static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd); 167u_int ahd_get_snscb_qoff(struct ahd_softc *ahd);
445static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value); 168void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value);
446static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd); 169u_int ahd_get_sescb_qoff(struct ahd_softc *ahd);
447static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value); 170void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
448static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd); 171u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd);
449static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value); 172void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
450static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset); 173u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
451static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset); 174u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
452static __inline uint32_t 175uint32_t
453 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset); 176 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
454static __inline uint64_t 177uint64_t
455 ahd_inq_scbram(struct ahd_softc *ahd, u_int offset); 178 ahd_inq_scbram(struct ahd_softc *ahd, u_int offset);
456static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd, 179struct scb *
457 struct scb *scb); 180 ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
458static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb); 181void ahd_swap_with_next_hscb(struct ahd_softc *ahd,
182 struct scb *scb);
183void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
184
459static __inline uint8_t * 185static __inline uint8_t *
460 ahd_get_sense_buf(struct ahd_softc *ahd, 186 ahd_get_sense_buf(struct ahd_softc *ahd,
461 struct scb *scb); 187 struct scb *scb);
@@ -463,25 +189,7 @@ static __inline uint32_t
463 ahd_get_sense_bufaddr(struct ahd_softc *ahd, 189 ahd_get_sense_bufaddr(struct ahd_softc *ahd,
464 struct scb *scb); 190 struct scb *scb);
465 191
466/* 192#if 0 /* unused */
467 * Return pointers to the transfer negotiation information
468 * for the specified our_id/remote_id pair.
469 */
470static __inline struct ahd_initiator_tinfo *
471ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
472 u_int remote_id, struct ahd_tmode_tstate **tstate)
473{
474 /*
475 * Transfer data structures are stored from the perspective
476 * of the target role. Since the parameters for a connection
477 * in the initiator role to a given target are the same as
478 * when the roles are reversed, we pretend we are the target.
479 */
480 if (channel == 'B')
481 our_id += 8;
482 *tstate = ahd->enabled_targets[our_id];
483 return (&(*tstate)->transinfo[remote_id]);
484}
485 193
486#define AHD_COPY_COL_IDX(dst, src) \ 194#define AHD_COPY_COL_IDX(dst, src) \
487do { \ 195do { \
@@ -489,304 +197,7 @@ do { \
489 dst->hscb->lun = src->hscb->lun; \ 197 dst->hscb->lun = src->hscb->lun; \
490} while (0) 198} while (0)
491 199
492static __inline uint16_t
493ahd_inw(struct ahd_softc *ahd, u_int port)
494{
495 /*
496 * Read high byte first as some registers increment
497 * or have other side effects when the low byte is
498 * read.
499 */
500 uint16_t r = ahd_inb(ahd, port+1) << 8;
501 return r | ahd_inb(ahd, port);
502}
503
504static __inline void
505ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
506{
507 /*
508 * Write low byte first to accomodate registers
509 * such as PRGMCNT where the order maters.
510 */
511 ahd_outb(ahd, port, value & 0xFF);
512 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
513}
514
515static __inline uint32_t
516ahd_inl(struct ahd_softc *ahd, u_int port)
517{
518 return ((ahd_inb(ahd, port))
519 | (ahd_inb(ahd, port+1) << 8)
520 | (ahd_inb(ahd, port+2) << 16)
521 | (ahd_inb(ahd, port+3) << 24));
522}
523
524static __inline void
525ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
526{
527 ahd_outb(ahd, port, (value) & 0xFF);
528 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
529 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
530 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
531}
532
533static __inline uint64_t
534ahd_inq(struct ahd_softc *ahd, u_int port)
535{
536 return ((ahd_inb(ahd, port))
537 | (ahd_inb(ahd, port+1) << 8)
538 | (ahd_inb(ahd, port+2) << 16)
539 | (ahd_inb(ahd, port+3) << 24)
540 | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
541 | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
542 | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
543 | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
544}
545
546static __inline void
547ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
548{
549 ahd_outb(ahd, port, value & 0xFF);
550 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
551 ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
552 ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
553 ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
554 ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
555 ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
556 ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
557}
558
559static __inline u_int
560ahd_get_scbptr(struct ahd_softc *ahd)
561{
562 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
563 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
564 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
565}
566
567static __inline void
568ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
569{
570 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
571 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
572 ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
573 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
574}
575
576static __inline u_int
577ahd_get_hnscb_qoff(struct ahd_softc *ahd)
578{
579 return (ahd_inw_atomic(ahd, HNSCB_QOFF));
580}
581
582static __inline void
583ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
584{
585 ahd_outw_atomic(ahd, HNSCB_QOFF, value);
586}
587
588static __inline u_int
589ahd_get_hescb_qoff(struct ahd_softc *ahd)
590{
591 return (ahd_inb(ahd, HESCB_QOFF));
592}
593
594static __inline void
595ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
596{
597 ahd_outb(ahd, HESCB_QOFF, value);
598}
599
600static __inline u_int
601ahd_get_snscb_qoff(struct ahd_softc *ahd)
602{
603 u_int oldvalue;
604
605 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
606 oldvalue = ahd_inw(ahd, SNSCB_QOFF);
607 ahd_outw(ahd, SNSCB_QOFF, oldvalue);
608 return (oldvalue);
609}
610
611static __inline void
612ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
613{
614 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
615 ahd_outw(ahd, SNSCB_QOFF, value);
616}
617
618static __inline u_int
619ahd_get_sescb_qoff(struct ahd_softc *ahd)
620{
621 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
622 return (ahd_inb(ahd, SESCB_QOFF));
623}
624
625static __inline void
626ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
627{
628 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
629 ahd_outb(ahd, SESCB_QOFF, value);
630}
631
632static __inline u_int
633ahd_get_sdscb_qoff(struct ahd_softc *ahd)
634{
635 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
636 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
637}
638
639static __inline void
640ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
641{
642 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
643 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
644 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
645}
646
647static __inline u_int
648ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
649{
650 u_int value;
651
652 /*
653 * Workaround PCI-X Rev A. hardware bug.
654 * After a host read of SCB memory, the chip
655 * may become confused into thinking prefetch
656 * was required. This starts the discard timer
657 * running and can cause an unexpected discard
658 * timer interrupt. The work around is to read
659 * a normal register prior to the exhaustion of
660 * the discard timer. The mode pointer register
661 * has no side effects and so serves well for
662 * this purpose.
663 *
664 * Razor #528
665 */
666 value = ahd_inb(ahd, offset);
667 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
668 ahd_inb(ahd, MODE_PTR);
669 return (value);
670}
671
672static __inline u_int
673ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
674{
675 return (ahd_inb_scbram(ahd, offset)
676 | (ahd_inb_scbram(ahd, offset+1) << 8));
677}
678
679static __inline uint32_t
680ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
681{
682 return (ahd_inw_scbram(ahd, offset)
683 | (ahd_inw_scbram(ahd, offset+2) << 16));
684}
685
686static __inline uint64_t
687ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
688{
689 return (ahd_inl_scbram(ahd, offset)
690 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
691}
692
693static __inline struct scb *
694ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
695{
696 struct scb* scb;
697
698 if (tag >= AHD_SCB_MAX)
699 return (NULL);
700 scb = ahd->scb_data.scbindex[tag];
701 if (scb != NULL)
702 ahd_sync_scb(ahd, scb,
703 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
704 return (scb);
705}
706
707static __inline void
708ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
709{
710 struct hardware_scb *q_hscb;
711 struct map_node *q_hscb_map;
712 uint32_t saved_hscb_busaddr;
713
714 /*
715 * Our queuing method is a bit tricky. The card
716 * knows in advance which HSCB (by address) to download,
717 * and we can't disappoint it. To achieve this, the next
718 * HSCB to download is saved off in ahd->next_queued_hscb.
719 * When we are called to queue "an arbitrary scb",
720 * we copy the contents of the incoming HSCB to the one
721 * the sequencer knows about, swap HSCB pointers and
722 * finally assign the SCB to the tag indexed location
723 * in the scb_array. This makes sure that we can still
724 * locate the correct SCB by SCB_TAG.
725 */
726 q_hscb = ahd->next_queued_hscb;
727 q_hscb_map = ahd->next_queued_hscb_map;
728 saved_hscb_busaddr = q_hscb->hscb_busaddr;
729 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
730 q_hscb->hscb_busaddr = saved_hscb_busaddr;
731 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
732
733 /* Now swap HSCB pointers. */
734 ahd->next_queued_hscb = scb->hscb;
735 ahd->next_queued_hscb_map = scb->hscb_map;
736 scb->hscb = q_hscb;
737 scb->hscb_map = q_hscb_map;
738
739 /* Now define the mapping from tag to SCB in the scbindex */
740 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
741}
742
743/*
744 * Tell the sequencer about a new transaction to execute.
745 */
746static __inline void
747ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
748{
749 ahd_swap_with_next_hscb(ahd, scb);
750
751 if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
752 panic("Attempt to queue invalid SCB tag %x\n",
753 SCB_GET_TAG(scb));
754
755 /*
756 * Keep a history of SCBs we've downloaded in the qinfifo.
757 */
758 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
759 ahd->qinfifonext++;
760
761 if (scb->sg_count != 0)
762 ahd_setup_data_scb(ahd, scb);
763 else
764 ahd_setup_noxfer_scb(ahd, scb);
765 ahd_setup_scb_common(ahd, scb);
766
767 /*
768 * Make sure our data is consistent from the
769 * perspective of the adapter.
770 */
771 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
772
773#ifdef AHD_DEBUG
774 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
775 uint64_t host_dataptr;
776
777 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
778 printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
779 ahd_name(ahd),
780 SCB_GET_TAG(scb), scb->hscb->scsiid,
781 ahd_le32toh(scb->hscb->hscb_busaddr),
782 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
783 (u_int)(host_dataptr & 0xFFFFFFFF),
784 ahd_le32toh(scb->hscb->datacnt));
785 }
786#endif 200#endif
787 /* Tell the adapter about the newly queued SCB */
788 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
789}
790 201
791static __inline uint8_t * 202static __inline uint8_t *
792ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) 203ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
@@ -801,151 +212,9 @@ ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
801} 212}
802 213
803/************************** Interrupt Processing ******************************/ 214/************************** Interrupt Processing ******************************/
804static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op); 215void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op);
805static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op); 216void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
806static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd); 217u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
807static __inline int ahd_intr(struct ahd_softc *ahd); 218int ahd_intr(struct ahd_softc *ahd);
808
809static __inline void
810ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
811{
812 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
813 /*offset*/0,
814 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
815}
816
817static __inline void
818ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
819{
820#ifdef AHD_TARGET_MODE
821 if ((ahd->flags & AHD_TARGETROLE) != 0) {
822 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
823 ahd->shared_data_map.dmamap,
824 ahd_targetcmd_offset(ahd, 0),
825 sizeof(struct target_cmd) * AHD_TMODE_CMDS,
826 op);
827 }
828#endif
829}
830
831/*
832 * See if the firmware has posted any completed commands
833 * into our in-core command complete fifos.
834 */
835#define AHD_RUN_QOUTFIFO 0x1
836#define AHD_RUN_TQINFIFO 0x2
837static __inline u_int
838ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
839{
840 u_int retval;
841
842 retval = 0;
843 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
844 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
845 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
846 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
847 == ahd->qoutfifonext_valid_tag)
848 retval |= AHD_RUN_QOUTFIFO;
849#ifdef AHD_TARGET_MODE
850 if ((ahd->flags & AHD_TARGETROLE) != 0
851 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
852 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
853 ahd->shared_data_map.dmamap,
854 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
855 /*len*/sizeof(struct target_cmd),
856 BUS_DMASYNC_POSTREAD);
857 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
858 retval |= AHD_RUN_TQINFIFO;
859 }
860#endif
861 return (retval);
862}
863
864/*
865 * Catch an interrupt from the adapter
866 */
867static __inline int
868ahd_intr(struct ahd_softc *ahd)
869{
870 u_int intstat;
871
872 if ((ahd->pause & INTEN) == 0) {
873 /*
874 * Our interrupt is not enabled on the chip
875 * and may be disabled for re-entrancy reasons,
876 * so just return. This is likely just a shared
877 * interrupt.
878 */
879 return (0);
880 }
881
882 /*
883 * Instead of directly reading the interrupt status register,
884 * infer the cause of the interrupt by checking our in-core
885 * completion queues. This avoids a costly PCI bus read in
886 * most cases.
887 */
888 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
889 && (ahd_check_cmdcmpltqueues(ahd) != 0))
890 intstat = CMDCMPLT;
891 else
892 intstat = ahd_inb(ahd, INTSTAT);
893
894 if ((intstat & INT_PEND) == 0)
895 return (0);
896
897 if (intstat & CMDCMPLT) {
898 ahd_outb(ahd, CLRINT, CLRCMDINT);
899
900 /*
901 * Ensure that the chip sees that we've cleared
902 * this interrupt before we walk the output fifo.
903 * Otherwise, we may, due to posted bus writes,
904 * clear the interrupt after we finish the scan,
905 * and after the sequencer has added new entries
906 * and asserted the interrupt again.
907 */
908 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
909 if (ahd_is_paused(ahd)) {
910 /*
911 * Potentially lost SEQINT.
912 * If SEQINTCODE is non-zero,
913 * simulate the SEQINT.
914 */
915 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
916 intstat |= SEQINT;
917 }
918 } else {
919 ahd_flush_device_writes(ahd);
920 }
921 ahd_run_qoutfifo(ahd);
922 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
923 ahd->cmdcmplt_total++;
924#ifdef AHD_TARGET_MODE
925 if ((ahd->flags & AHD_TARGETROLE) != 0)
926 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
927#endif
928 }
929
930 /*
931 * Handle statuses that may invalidate our cached
932 * copy of INTSTAT separately.
933 */
934 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
935 /* Hot eject. Do nothing */
936 } else if (intstat & HWERRINT) {
937 ahd_handle_hwerrint(ahd);
938 } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
939 ahd->bus_intr(ahd);
940 } else {
941
942 if ((intstat & SEQINT) != 0)
943 ahd_handle_seqint(ahd, intstat);
944
945 if ((intstat & SCSIINT) != 0)
946 ahd_handle_scsiint(ahd, intstat);
947 }
948 return (1);
949}
950 219
951#endif /* _AIC79XX_INLINE_H_ */ 220#endif /* _AIC79XX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0081aa357c8b..6c5287722465 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -369,10 +369,166 @@ static void ahd_release_simq(struct ahd_softc *ahd);
369static int ahd_linux_unit; 369static int ahd_linux_unit;
370 370
371 371
372/************************** OS Utility Wrappers *******************************/
373void ahd_delay(long);
374void
375ahd_delay(long usec)
376{
377 /*
378 * udelay on Linux can have problems for
379 * multi-millisecond waits. Wait at most
380 * 1024us per call.
381 */
382 while (usec > 0) {
383 udelay(usec % 1024);
384 usec -= 1024;
385 }
386}
387
388
389/***************************** Low Level I/O **********************************/
390uint8_t ahd_inb(struct ahd_softc * ahd, long port);
391uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
392void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
393void ahd_outw_atomic(struct ahd_softc * ahd,
394 long port, uint16_t val);
395void ahd_outsb(struct ahd_softc * ahd, long port,
396 uint8_t *, int count);
397void ahd_insb(struct ahd_softc * ahd, long port,
398 uint8_t *, int count);
399
400uint8_t
401ahd_inb(struct ahd_softc * ahd, long port)
402{
403 uint8_t x;
404
405 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
406 x = readb(ahd->bshs[0].maddr + port);
407 } else {
408 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
409 }
410 mb();
411 return (x);
412}
413
414uint16_t
415ahd_inw_atomic(struct ahd_softc * ahd, long port)
416{
417 uint8_t x;
418
419 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
420 x = readw(ahd->bshs[0].maddr + port);
421 } else {
422 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
423 }
424 mb();
425 return (x);
426}
427
428void
429ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
430{
431 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
432 writeb(val, ahd->bshs[0].maddr + port);
433 } else {
434 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
435 }
436 mb();
437}
438
439void
440ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
441{
442 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
443 writew(val, ahd->bshs[0].maddr + port);
444 } else {
445 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
446 }
447 mb();
448}
449
450void
451ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
452{
453 int i;
454
455 /*
456 * There is probably a more efficient way to do this on Linux
457 * but we don't use this for anything speed critical and this
458 * should work.
459 */
460 for (i = 0; i < count; i++)
461 ahd_outb(ahd, port, *array++);
462}
463
464void
465ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
466{
467 int i;
468
469 /*
470 * There is probably a more efficient way to do this on Linux
471 * but we don't use this for anything speed critical and this
472 * should work.
473 */
474 for (i = 0; i < count; i++)
475 *array++ = ahd_inb(ahd, port);
476}
477
478/******************************* PCI Routines *********************************/
479uint32_t
480ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
481{
482 switch (width) {
483 case 1:
484 {
485 uint8_t retval;
486
487 pci_read_config_byte(pci, reg, &retval);
488 return (retval);
489 }
490 case 2:
491 {
492 uint16_t retval;
493 pci_read_config_word(pci, reg, &retval);
494 return (retval);
495 }
496 case 4:
497 {
498 uint32_t retval;
499 pci_read_config_dword(pci, reg, &retval);
500 return (retval);
501 }
502 default:
503 panic("ahd_pci_read_config: Read size too big");
504 /* NOTREACHED */
505 return (0);
506 }
507}
508
509void
510ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
511{
512 switch (width) {
513 case 1:
514 pci_write_config_byte(pci, reg, value);
515 break;
516 case 2:
517 pci_write_config_word(pci, reg, value);
518 break;
519 case 4:
520 pci_write_config_dword(pci, reg, value);
521 break;
522 default:
523 panic("ahd_pci_write_config: Write size too big");
524 /* NOTREACHED */
525 }
526}
527
372/****************************** Inlines ***************************************/ 528/****************************** Inlines ***************************************/
373static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*); 529static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
374 530
375static __inline void 531static void
376ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) 532ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
377{ 533{
378 struct scsi_cmnd *cmd; 534 struct scsi_cmnd *cmd;
@@ -432,7 +588,7 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
432 return rtn; 588 return rtn;
433} 589}
434 590
435static inline struct scsi_target ** 591static struct scsi_target **
436ahd_linux_target_in_softc(struct scsi_target *starget) 592ahd_linux_target_in_softc(struct scsi_target *starget)
437{ 593{
438 struct ahd_softc *ahd = 594 struct ahd_softc *ahd =
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 853998be1474..b252803e05f6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -222,22 +222,6 @@ typedef struct timer_list ahd_timer_t;
222/***************************** Timer Facilities *******************************/ 222/***************************** Timer Facilities *******************************/
223#define ahd_timer_init init_timer 223#define ahd_timer_init init_timer
224#define ahd_timer_stop del_timer_sync 224#define ahd_timer_stop del_timer_sync
225typedef void ahd_linux_callback_t (u_long);
226static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
227 ahd_callback_t *func, void *arg);
228
229static __inline void
230ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
231{
232 struct ahd_softc *ahd;
233
234 ahd = (struct ahd_softc *)arg;
235 del_timer(timer);
236 timer->data = (u_long)arg;
237 timer->expires = jiffies + (usec * HZ)/1000000;
238 timer->function = (ahd_linux_callback_t*)func;
239 add_timer(timer);
240}
241 225
242/***************************** SMP support ************************************/ 226/***************************** SMP support ************************************/
243#include <linux/spinlock.h> 227#include <linux/spinlock.h>
@@ -386,111 +370,19 @@ struct ahd_platform_data {
386#define malloc(size, type, flags) kmalloc(size, flags) 370#define malloc(size, type, flags) kmalloc(size, flags)
387#define free(ptr, type) kfree(ptr) 371#define free(ptr, type) kfree(ptr)
388 372
389static __inline void ahd_delay(long); 373void ahd_delay(long);
390static __inline void
391ahd_delay(long usec)
392{
393 /*
394 * udelay on Linux can have problems for
395 * multi-millisecond waits. Wait at most
396 * 1024us per call.
397 */
398 while (usec > 0) {
399 udelay(usec % 1024);
400 usec -= 1024;
401 }
402}
403
404 374
405/***************************** Low Level I/O **********************************/ 375/***************************** Low Level I/O **********************************/
406static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port); 376uint8_t ahd_inb(struct ahd_softc * ahd, long port);
407static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port); 377uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
408static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val); 378void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
409static __inline void ahd_outw_atomic(struct ahd_softc * ahd, 379void ahd_outw_atomic(struct ahd_softc * ahd,
410 long port, uint16_t val); 380 long port, uint16_t val);
411static __inline void ahd_outsb(struct ahd_softc * ahd, long port, 381void ahd_outsb(struct ahd_softc * ahd, long port,
412 uint8_t *, int count); 382 uint8_t *, int count);
413static __inline void ahd_insb(struct ahd_softc * ahd, long port, 383void ahd_insb(struct ahd_softc * ahd, long port,
414 uint8_t *, int count); 384 uint8_t *, int count);
415 385
416static __inline uint8_t
417ahd_inb(struct ahd_softc * ahd, long port)
418{
419 uint8_t x;
420
421 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
422 x = readb(ahd->bshs[0].maddr + port);
423 } else {
424 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
425 }
426 mb();
427 return (x);
428}
429
430static __inline uint16_t
431ahd_inw_atomic(struct ahd_softc * ahd, long port)
432{
433 uint8_t x;
434
435 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
436 x = readw(ahd->bshs[0].maddr + port);
437 } else {
438 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
439 }
440 mb();
441 return (x);
442}
443
444static __inline void
445ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
446{
447 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
448 writeb(val, ahd->bshs[0].maddr + port);
449 } else {
450 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
451 }
452 mb();
453}
454
455static __inline void
456ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
457{
458 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
459 writew(val, ahd->bshs[0].maddr + port);
460 } else {
461 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
462 }
463 mb();
464}
465
466static __inline void
467ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
468{
469 int i;
470
471 /*
472 * There is probably a more efficient way to do this on Linux
473 * but we don't use this for anything speed critical and this
474 * should work.
475 */
476 for (i = 0; i < count; i++)
477 ahd_outb(ahd, port, *array++);
478}
479
480static __inline void
481ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
482{
483 int i;
484
485 /*
486 * There is probably a more efficient way to do this on Linux
487 * but we don't use this for anything speed critical and this
488 * should work.
489 */
490 for (i = 0; i < count; i++)
491 *array++ = ahd_inb(ahd, port);
492}
493
494/**************************** Initialization **********************************/ 386/**************************** Initialization **********************************/
495int ahd_linux_register_host(struct ahd_softc *, 387int ahd_linux_register_host(struct ahd_softc *,
496 struct scsi_host_template *); 388 struct scsi_host_template *);
@@ -593,62 +485,12 @@ void ahd_linux_pci_exit(void);
593int ahd_pci_map_registers(struct ahd_softc *ahd); 485int ahd_pci_map_registers(struct ahd_softc *ahd);
594int ahd_pci_map_int(struct ahd_softc *ahd); 486int ahd_pci_map_int(struct ahd_softc *ahd);
595 487
596static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci, 488uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
597 int reg, int width); 489 int reg, int width);
598 490void ahd_pci_write_config(ahd_dev_softc_t pci,
599static __inline uint32_t
600ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
601{
602 switch (width) {
603 case 1:
604 {
605 uint8_t retval;
606
607 pci_read_config_byte(pci, reg, &retval);
608 return (retval);
609 }
610 case 2:
611 {
612 uint16_t retval;
613 pci_read_config_word(pci, reg, &retval);
614 return (retval);
615 }
616 case 4:
617 {
618 uint32_t retval;
619 pci_read_config_dword(pci, reg, &retval);
620 return (retval);
621 }
622 default:
623 panic("ahd_pci_read_config: Read size too big");
624 /* NOTREACHED */
625 return (0);
626 }
627}
628
629static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
630 int reg, uint32_t value, 491 int reg, uint32_t value,
631 int width); 492 int width);
632 493
633static __inline void
634ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
635{
636 switch (width) {
637 case 1:
638 pci_write_config_byte(pci, reg, value);
639 break;
640 case 2:
641 pci_write_config_word(pci, reg, value);
642 break;
643 case 4:
644 pci_write_config_dword(pci, reg, value);
645 break;
646 default:
647 panic("ahd_pci_write_config: Write size too big");
648 /* NOTREACHED */
649 }
650}
651
652static __inline int ahd_get_pci_function(ahd_dev_softc_t); 494static __inline int ahd_get_pci_function(ahd_dev_softc_t);
653static __inline int 495static __inline int
654ahd_get_pci_function(ahd_dev_softc_t pci) 496ahd_get_pci_function(ahd_dev_softc_t pci)
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index e196d83b93c7..2a103534df9f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -1436,7 +1436,7 @@ scratch_ram {
1436 KERNEL_TQINPOS { 1436 KERNEL_TQINPOS {
1437 size 1 1437 size 1
1438 } 1438 }
1439 TQINPOS { 1439 TQINPOS {
1440 size 1 1440 size 1
1441 } 1441 }
1442 ARG_1 { 1442 ARG_1 {
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64e62ce59c15..d1d006b8b3a9 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -237,6 +237,510 @@ static void ahc_update_scsiid(struct ahc_softc *ahc,
237static int ahc_handle_target_cmd(struct ahc_softc *ahc, 237static int ahc_handle_target_cmd(struct ahc_softc *ahc,
238 struct target_cmd *cmd); 238 struct target_cmd *cmd);
239#endif 239#endif
240
241/************************* Sequencer Execution Control ************************/
242/*
243 * Work around any chip bugs related to halting sequencer execution.
244 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
245 * reading a register that will set this signal and deassert it.
246 * Without this workaround, if the chip is paused, by an interrupt or
247 * manual pause while accessing scb ram, accesses to certain registers
248 * will hang the system (infinite pci retries).
249 */
250void
251ahc_pause_bug_fix(struct ahc_softc *ahc)
252{
253 if ((ahc->features & AHC_ULTRA2) != 0)
254 (void)ahc_inb(ahc, CCSCBCTL);
255}
256
257/*
258 * Determine whether the sequencer has halted code execution.
259 * Returns non-zero status if the sequencer is stopped.
260 */
261int
262ahc_is_paused(struct ahc_softc *ahc)
263{
264 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
265}
266
267/*
268 * Request that the sequencer stop and wait, indefinitely, for it
269 * to stop. The sequencer will only acknowledge that it is paused
270 * once it has reached an instruction boundary and PAUSEDIS is
271 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
272 * for critical sections.
273 */
274void
275ahc_pause(struct ahc_softc *ahc)
276{
277 ahc_outb(ahc, HCNTRL, ahc->pause);
278
279 /*
280 * Since the sequencer can disable pausing in a critical section, we
281 * must loop until it actually stops.
282 */
283 while (ahc_is_paused(ahc) == 0)
284 ;
285
286 ahc_pause_bug_fix(ahc);
287}
288
289/*
290 * Allow the sequencer to continue program execution.
291 * We check here to ensure that no additional interrupt
292 * sources that would cause the sequencer to halt have been
293 * asserted. If, for example, a SCSI bus reset is detected
294 * while we are fielding a different, pausing, interrupt type,
295 * we don't want to release the sequencer before going back
296 * into our interrupt handler and dealing with this new
297 * condition.
298 */
299void
300ahc_unpause(struct ahc_softc *ahc)
301{
302 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
303 ahc_outb(ahc, HCNTRL, ahc->unpause);
304}
305
306/************************** Memory mapping routines ***************************/
307struct ahc_dma_seg *
308ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
309{
310 int sg_index;
311
312 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
313 /* sg_list_phys points to entry 1, not 0 */
314 sg_index++;
315
316 return (&scb->sg_list[sg_index]);
317}
318
319uint32_t
320ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
321{
322 int sg_index;
323
324 /* sg_list_phys points to entry 1, not 0 */
325 sg_index = sg - &scb->sg_list[1];
326
327 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
328}
329
330uint32_t
331ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
332{
333 return (ahc->scb_data->hscb_busaddr
334 + (sizeof(struct hardware_scb) * index));
335}
336
337void
338ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
339{
340 ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
341 ahc->scb_data->hscb_dmamap,
342 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
343 /*len*/sizeof(*scb->hscb), op);
344}
345
346void
347ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
348{
349 if (scb->sg_count == 0)
350 return;
351
352 ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
353 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
354 * sizeof(struct ahc_dma_seg),
355 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
356}
357
358uint32_t
359ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
360{
361 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
362}
363
364/*********************** Miscelaneous Support Functions ***********************/
365/*
366 * Determine whether the sequencer reported a residual
367 * for this SCB/transaction.
368 */
369void
370ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
371{
372 uint32_t sgptr;
373
374 sgptr = ahc_le32toh(scb->hscb->sgptr);
375 if ((sgptr & SG_RESID_VALID) != 0)
376 ahc_calc_residual(ahc, scb);
377}
378
379/*
380 * Return pointers to the transfer negotiation information
381 * for the specified our_id/remote_id pair.
382 */
383struct ahc_initiator_tinfo *
384ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
385 u_int remote_id, struct ahc_tmode_tstate **tstate)
386{
387 /*
388 * Transfer data structures are stored from the perspective
389 * of the target role. Since the parameters for a connection
390 * in the initiator role to a given target are the same as
391 * when the roles are reversed, we pretend we are the target.
392 */
393 if (channel == 'B')
394 our_id += 8;
395 *tstate = ahc->enabled_targets[our_id];
396 return (&(*tstate)->transinfo[remote_id]);
397}
398
399uint16_t
400ahc_inw(struct ahc_softc *ahc, u_int port)
401{
402 uint16_t r = ahc_inb(ahc, port+1) << 8;
403 return r | ahc_inb(ahc, port);
404}
405
406void
407ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
408{
409 ahc_outb(ahc, port, value & 0xFF);
410 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
411}
412
413uint32_t
414ahc_inl(struct ahc_softc *ahc, u_int port)
415{
416 return ((ahc_inb(ahc, port))
417 | (ahc_inb(ahc, port+1) << 8)
418 | (ahc_inb(ahc, port+2) << 16)
419 | (ahc_inb(ahc, port+3) << 24));
420}
421
422void
423ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
424{
425 ahc_outb(ahc, port, (value) & 0xFF);
426 ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
427 ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
428 ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
429}
430
431uint64_t
432ahc_inq(struct ahc_softc *ahc, u_int port)
433{
434 return ((ahc_inb(ahc, port))
435 | (ahc_inb(ahc, port+1) << 8)
436 | (ahc_inb(ahc, port+2) << 16)
437 | (ahc_inb(ahc, port+3) << 24)
438 | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
439 | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
440 | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
441 | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
442}
443
444void
445ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
446{
447 ahc_outb(ahc, port, value & 0xFF);
448 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
449 ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
450 ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
451 ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
452 ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
453 ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
454 ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
455}
456
457/*
458 * Get a free scb. If there are none, see if we can allocate a new SCB.
459 */
460struct scb *
461ahc_get_scb(struct ahc_softc *ahc)
462{
463 struct scb *scb;
464
465 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
466 ahc_alloc_scbs(ahc);
467 scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
468 if (scb == NULL)
469 return (NULL);
470 }
471 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
472 return (scb);
473}
474
475/*
476 * Return an SCB resource to the free list.
477 */
478void
479ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
480{
481 struct hardware_scb *hscb;
482
483 hscb = scb->hscb;
484 /* Clean up for the next user */
485 ahc->scb_data->scbindex[hscb->tag] = NULL;
486 scb->flags = SCB_FREE;
487 hscb->control = 0;
488
489 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
490
491 /* Notify the OSM that a resource is now available. */
492 ahc_platform_scb_free(ahc, scb);
493}
494
495struct scb *
496ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
497{
498 struct scb* scb;
499
500 scb = ahc->scb_data->scbindex[tag];
501 if (scb != NULL)
502 ahc_sync_scb(ahc, scb,
503 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
504 return (scb);
505}
506
507void
508ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
509{
510 struct hardware_scb *q_hscb;
511 u_int saved_tag;
512
513 /*
514 * Our queuing method is a bit tricky. The card
515 * knows in advance which HSCB to download, and we
516 * can't disappoint it. To achieve this, the next
517 * SCB to download is saved off in ahc->next_queued_scb.
518 * When we are called to queue "an arbitrary scb",
519 * we copy the contents of the incoming HSCB to the one
520 * the sequencer knows about, swap HSCB pointers and
521 * finally assign the SCB to the tag indexed location
522 * in the scb_array. This makes sure that we can still
523 * locate the correct SCB by SCB_TAG.
524 */
525 q_hscb = ahc->next_queued_scb->hscb;
526 saved_tag = q_hscb->tag;
527 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
528 if ((scb->flags & SCB_CDB32_PTR) != 0) {
529 q_hscb->shared_data.cdb_ptr =
530 ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
531 + offsetof(struct hardware_scb, cdb32));
532 }
533 q_hscb->tag = saved_tag;
534 q_hscb->next = scb->hscb->tag;
535
536 /* Now swap HSCB pointers. */
537 ahc->next_queued_scb->hscb = scb->hscb;
538 scb->hscb = q_hscb;
539
540 /* Now define the mapping from tag to SCB in the scbindex */
541 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
542}
543
544/*
545 * Tell the sequencer about a new transaction to execute.
546 */
547void
548ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
549{
550 ahc_swap_with_next_hscb(ahc, scb);
551
552 if (scb->hscb->tag == SCB_LIST_NULL
553 || scb->hscb->next == SCB_LIST_NULL)
554 panic("Attempt to queue invalid SCB tag %x:%x\n",
555 scb->hscb->tag, scb->hscb->next);
556
557 /*
558 * Setup data "oddness".
559 */
560 scb->hscb->lun &= LID;
561 if (ahc_get_transfer_length(scb) & 0x1)
562 scb->hscb->lun |= SCB_XFERLEN_ODD;
563
564 /*
565 * Keep a history of SCBs we've downloaded in the qinfifo.
566 */
567 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
568
569 /*
570 * Make sure our data is consistent from the
571 * perspective of the adapter.
572 */
573 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
574
575 /* Tell the adapter about the newly queued SCB */
576 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
577 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
578 } else {
579 if ((ahc->features & AHC_AUTOPAUSE) == 0)
580 ahc_pause(ahc);
581 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
582 if ((ahc->features & AHC_AUTOPAUSE) == 0)
583 ahc_unpause(ahc);
584 }
585}
586
587struct scsi_sense_data *
588ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
589{
590 int offset;
591
592 offset = scb - ahc->scb_data->scbarray;
593 return (&ahc->scb_data->sense[offset]);
594}
595
596uint32_t
597ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
598{
599 int offset;
600
601 offset = scb - ahc->scb_data->scbarray;
602 return (ahc->scb_data->sense_busaddr
603 + (offset * sizeof(struct scsi_sense_data)));
604}
605
606/************************** Interrupt Processing ******************************/
607void
608ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
609{
610 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
611 /*offset*/0, /*len*/256, op);
612}
613
614void
615ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
616{
617#ifdef AHC_TARGET_MODE
618 if ((ahc->flags & AHC_TARGETROLE) != 0) {
619 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
620 ahc->shared_data_dmamap,
621 ahc_targetcmd_offset(ahc, 0),
622 sizeof(struct target_cmd) * AHC_TMODE_CMDS,
623 op);
624 }
625#endif
626}
627
628/*
629 * See if the firmware has posted any completed commands
630 * into our in-core command complete fifos.
631 */
632#define AHC_RUN_QOUTFIFO 0x1
633#define AHC_RUN_TQINFIFO 0x2
634u_int
635ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
636{
637 u_int retval;
638
639 retval = 0;
640 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
641 /*offset*/ahc->qoutfifonext, /*len*/1,
642 BUS_DMASYNC_POSTREAD);
643 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
644 retval |= AHC_RUN_QOUTFIFO;
645#ifdef AHC_TARGET_MODE
646 if ((ahc->flags & AHC_TARGETROLE) != 0
647 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
648 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
649 ahc->shared_data_dmamap,
650 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
651 /*len*/sizeof(struct target_cmd),
652 BUS_DMASYNC_POSTREAD);
653 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
654 retval |= AHC_RUN_TQINFIFO;
655 }
656#endif
657 return (retval);
658}
659
660/*
661 * Catch an interrupt from the adapter
662 */
663int
664ahc_intr(struct ahc_softc *ahc)
665{
666 u_int intstat;
667
668 if ((ahc->pause & INTEN) == 0) {
669 /*
670 * Our interrupt is not enabled on the chip
671 * and may be disabled for re-entrancy reasons,
672 * so just return. This is likely just a shared
673 * interrupt.
674 */
675 return (0);
676 }
677 /*
678 * Instead of directly reading the interrupt status register,
679 * infer the cause of the interrupt by checking our in-core
680 * completion queues. This avoids a costly PCI bus read in
681 * most cases.
682 */
683 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
684 && (ahc_check_cmdcmpltqueues(ahc) != 0))
685 intstat = CMDCMPLT;
686 else {
687 intstat = ahc_inb(ahc, INTSTAT);
688 }
689
690 if ((intstat & INT_PEND) == 0) {
691#if AHC_PCI_CONFIG > 0
692 if (ahc->unsolicited_ints > 500) {
693 ahc->unsolicited_ints = 0;
694 if ((ahc->chip & AHC_PCI) != 0
695 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
696 ahc->bus_intr(ahc);
697 }
698#endif
699 ahc->unsolicited_ints++;
700 return (0);
701 }
702 ahc->unsolicited_ints = 0;
703
704 if (intstat & CMDCMPLT) {
705 ahc_outb(ahc, CLRINT, CLRCMDINT);
706
707 /*
708 * Ensure that the chip sees that we've cleared
709 * this interrupt before we walk the output fifo.
710 * Otherwise, we may, due to posted bus writes,
711 * clear the interrupt after we finish the scan,
712 * and after the sequencer has added new entries
713 * and asserted the interrupt again.
714 */
715 ahc_flush_device_writes(ahc);
716 ahc_run_qoutfifo(ahc);
717#ifdef AHC_TARGET_MODE
718 if ((ahc->flags & AHC_TARGETROLE) != 0)
719 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
720#endif
721 }
722
723 /*
724 * Handle statuses that may invalidate our cached
725 * copy of INTSTAT separately.
726 */
727 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
728 /* Hot eject. Do nothing */
729 } else if (intstat & BRKADRINT) {
730 ahc_handle_brkadrint(ahc);
731 } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
732
733 ahc_pause_bug_fix(ahc);
734
735 if ((intstat & SEQINT) != 0)
736 ahc_handle_seqint(ahc, intstat);
737
738 if ((intstat & SCSIINT) != 0)
739 ahc_handle_scsiint(ahc, intstat);
740 }
741 return (1);
742}
743
240/************************* Sequencer Execution Control ************************/ 744/************************* Sequencer Execution Control ************************/
241/* 745/*
242 * Restart the sequencer program from address zero 746 * Restart the sequencer program from address zero
@@ -2655,7 +3159,7 @@ proto_violation_reset:
2655 */ 3159 */
2656static void 3160static void
2657ahc_handle_message_phase(struct ahc_softc *ahc) 3161ahc_handle_message_phase(struct ahc_softc *ahc)
2658{ 3162{
2659 struct ahc_devinfo devinfo; 3163 struct ahc_devinfo devinfo;
2660 u_int bus_phase; 3164 u_int bus_phase;
2661 int end_session; 3165 int end_session;
@@ -5707,7 +6211,7 @@ ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5707 */ 6211 */
5708static u_int 6212static u_int
5709ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 6213ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5710{ 6214{
5711 u_int curscb, next; 6215 u_int curscb, next;
5712 6216
5713 /* 6217 /*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
index cba2f23bbe79..d18cd743618d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_inline.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -46,74 +46,10 @@
46#define _AIC7XXX_INLINE_H_ 46#define _AIC7XXX_INLINE_H_
47 47
48/************************* Sequencer Execution Control ************************/ 48/************************* Sequencer Execution Control ************************/
49static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); 49void ahc_pause_bug_fix(struct ahc_softc *ahc);
50static __inline int ahc_is_paused(struct ahc_softc *ahc); 50int ahc_is_paused(struct ahc_softc *ahc);
51static __inline void ahc_pause(struct ahc_softc *ahc); 51void ahc_pause(struct ahc_softc *ahc);
52static __inline void ahc_unpause(struct ahc_softc *ahc); 52void ahc_unpause(struct ahc_softc *ahc);
53
54/*
55 * Work around any chip bugs related to halting sequencer execution.
56 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
57 * reading a register that will set this signal and deassert it.
58 * Without this workaround, if the chip is paused, by an interrupt or
59 * manual pause while accessing scb ram, accesses to certain registers
60 * will hang the system (infinite pci retries).
61 */
62static __inline void
63ahc_pause_bug_fix(struct ahc_softc *ahc)
64{
65 if ((ahc->features & AHC_ULTRA2) != 0)
66 (void)ahc_inb(ahc, CCSCBCTL);
67}
68
69/*
70 * Determine whether the sequencer has halted code execution.
71 * Returns non-zero status if the sequencer is stopped.
72 */
73static __inline int
74ahc_is_paused(struct ahc_softc *ahc)
75{
76 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
77}
78
79/*
80 * Request that the sequencer stop and wait, indefinitely, for it
81 * to stop. The sequencer will only acknowledge that it is paused
82 * once it has reached an instruction boundary and PAUSEDIS is
83 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
84 * for critical sections.
85 */
86static __inline void
87ahc_pause(struct ahc_softc *ahc)
88{
89 ahc_outb(ahc, HCNTRL, ahc->pause);
90
91 /*
92 * Since the sequencer can disable pausing in a critical section, we
93 * must loop until it actually stops.
94 */
95 while (ahc_is_paused(ahc) == 0)
96 ;
97
98 ahc_pause_bug_fix(ahc);
99}
100
101/*
102 * Allow the sequencer to continue program execution.
103 * We check here to ensure that no additional interrupt
104 * sources that would cause the sequencer to halt have been
105 * asserted. If, for example, a SCSI bus reset is detected
106 * while we are fielding a different, pausing, interrupt type,
107 * we don't want to release the sequencer before going back
108 * into our interrupt handler and dealing with this new
109 * condition.
110 */
111static __inline void
112ahc_unpause(struct ahc_softc *ahc)
113{
114 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
115 ahc_outb(ahc, HCNTRL, ahc->unpause);
116}
117 53
118/*********************** Untagged Transaction Routines ************************/ 54/*********************** Untagged Transaction Routines ************************/
119static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); 55static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
@@ -147,78 +83,21 @@ ahc_release_untagged_queues(struct ahc_softc *ahc)
147} 83}
148 84
149/************************** Memory mapping routines ***************************/ 85/************************** Memory mapping routines ***************************/
150static __inline struct ahc_dma_seg * 86struct ahc_dma_seg *
151 ahc_sg_bus_to_virt(struct scb *scb, 87 ahc_sg_bus_to_virt(struct scb *scb,
152 uint32_t sg_busaddr); 88 uint32_t sg_busaddr);
153static __inline uint32_t 89uint32_t
154 ahc_sg_virt_to_bus(struct scb *scb, 90 ahc_sg_virt_to_bus(struct scb *scb,
155 struct ahc_dma_seg *sg); 91 struct ahc_dma_seg *sg);
156static __inline uint32_t 92uint32_t
157 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); 93 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
158static __inline void ahc_sync_scb(struct ahc_softc *ahc, 94void ahc_sync_scb(struct ahc_softc *ahc,
159 struct scb *scb, int op); 95 struct scb *scb, int op);
160static __inline void ahc_sync_sglist(struct ahc_softc *ahc, 96void ahc_sync_sglist(struct ahc_softc *ahc,
161 struct scb *scb, int op); 97 struct scb *scb, int op);
162static __inline uint32_t 98uint32_t
163 ahc_targetcmd_offset(struct ahc_softc *ahc, 99 ahc_targetcmd_offset(struct ahc_softc *ahc,
164 u_int index); 100 u_int index);
165
166static __inline struct ahc_dma_seg *
167ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
168{
169 int sg_index;
170
171 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
172 /* sg_list_phys points to entry 1, not 0 */
173 sg_index++;
174
175 return (&scb->sg_list[sg_index]);
176}
177
178static __inline uint32_t
179ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
180{
181 int sg_index;
182
183 /* sg_list_phys points to entry 1, not 0 */
184 sg_index = sg - &scb->sg_list[1];
185
186 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
187}
188
189static __inline uint32_t
190ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
191{
192 return (ahc->scb_data->hscb_busaddr
193 + (sizeof(struct hardware_scb) * index));
194}
195
196static __inline void
197ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
198{
199 ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
200 ahc->scb_data->hscb_dmamap,
201 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
202 /*len*/sizeof(*scb->hscb), op);
203}
204
205static __inline void
206ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
207{
208 if (scb->sg_count == 0)
209 return;
210
211 ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
212 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
213 * sizeof(struct ahc_dma_seg),
214 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
215}
216
217static __inline uint32_t
218ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
219{
220 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
221}
222 101
223/******************************** Debugging ***********************************/ 102/******************************** Debugging ***********************************/
224static __inline char *ahc_name(struct ahc_softc *ahc); 103static __inline char *ahc_name(struct ahc_softc *ahc);
@@ -231,420 +110,44 @@ ahc_name(struct ahc_softc *ahc)
231 110
232/*********************** Miscellaneous Support Functions ***********************/ 111/*********************** Miscellaneous Support Functions ***********************/
233 112
234static __inline void ahc_update_residual(struct ahc_softc *ahc, 113void ahc_update_residual(struct ahc_softc *ahc,
235 struct scb *scb); 114 struct scb *scb);
236static __inline struct ahc_initiator_tinfo * 115struct ahc_initiator_tinfo *
237 ahc_fetch_transinfo(struct ahc_softc *ahc, 116 ahc_fetch_transinfo(struct ahc_softc *ahc,
238 char channel, u_int our_id, 117 char channel, u_int our_id,
239 u_int remote_id, 118 u_int remote_id,
240 struct ahc_tmode_tstate **tstate); 119 struct ahc_tmode_tstate **tstate);
241static __inline uint16_t 120uint16_t
242 ahc_inw(struct ahc_softc *ahc, u_int port); 121 ahc_inw(struct ahc_softc *ahc, u_int port);
243static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, 122void ahc_outw(struct ahc_softc *ahc, u_int port,
244 u_int value); 123 u_int value);
245static __inline uint32_t 124uint32_t
246 ahc_inl(struct ahc_softc *ahc, u_int port); 125 ahc_inl(struct ahc_softc *ahc, u_int port);
247static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, 126void ahc_outl(struct ahc_softc *ahc, u_int port,
248 uint32_t value); 127 uint32_t value);
249static __inline uint64_t 128uint64_t
250 ahc_inq(struct ahc_softc *ahc, u_int port); 129 ahc_inq(struct ahc_softc *ahc, u_int port);
251static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, 130void ahc_outq(struct ahc_softc *ahc, u_int port,
252 uint64_t value); 131 uint64_t value);
253static __inline struct scb* 132struct scb*
254 ahc_get_scb(struct ahc_softc *ahc); 133 ahc_get_scb(struct ahc_softc *ahc);
255static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); 134void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
256static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, 135struct scb *
257 struct scb *scb); 136 ahc_lookup_scb(struct ahc_softc *ahc, u_int tag);
258static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); 137void ahc_swap_with_next_hscb(struct ahc_softc *ahc,
259static __inline struct scsi_sense_data * 138 struct scb *scb);
260 ahc_get_sense_buf(struct ahc_softc *ahc, 139void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
261 struct scb *scb); 140struct scsi_sense_data *
262static __inline uint32_t 141 ahc_get_sense_buf(struct ahc_softc *ahc,
263 ahc_get_sense_bufaddr(struct ahc_softc *ahc, 142 struct scb *scb);
264 struct scb *scb); 143uint32_t
265 144 ahc_get_sense_bufaddr(struct ahc_softc *ahc,
266/* 145 struct scb *scb);
267 * Determine whether the sequencer reported a residual
268 * for this SCB/transaction.
269 */
270static __inline void
271ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
272{
273 uint32_t sgptr;
274
275 sgptr = ahc_le32toh(scb->hscb->sgptr);
276 if ((sgptr & SG_RESID_VALID) != 0)
277 ahc_calc_residual(ahc, scb);
278}
279
280/*
281 * Return pointers to the transfer negotiation information
282 * for the specified our_id/remote_id pair.
283 */
284static __inline struct ahc_initiator_tinfo *
285ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
286 u_int remote_id, struct ahc_tmode_tstate **tstate)
287{
288 /*
289 * Transfer data structures are stored from the perspective
290 * of the target role. Since the parameters for a connection
291 * in the initiator role to a given target are the same as
292 * when the roles are reversed, we pretend we are the target.
293 */
294 if (channel == 'B')
295 our_id += 8;
296 *tstate = ahc->enabled_targets[our_id];
297 return (&(*tstate)->transinfo[remote_id]);
298}
299
300static __inline uint16_t
301ahc_inw(struct ahc_softc *ahc, u_int port)
302{
303 uint16_t r = ahc_inb(ahc, port+1) << 8;
304 return r | ahc_inb(ahc, port);
305}
306
307static __inline void
308ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
309{
310 ahc_outb(ahc, port, value & 0xFF);
311 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
312}
313
314static __inline uint32_t
315ahc_inl(struct ahc_softc *ahc, u_int port)
316{
317 return ((ahc_inb(ahc, port))
318 | (ahc_inb(ahc, port+1) << 8)
319 | (ahc_inb(ahc, port+2) << 16)
320 | (ahc_inb(ahc, port+3) << 24));
321}
322
323static __inline void
324ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
325{
326 ahc_outb(ahc, port, (value) & 0xFF);
327 ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
328 ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
329 ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
330}
331
332static __inline uint64_t
333ahc_inq(struct ahc_softc *ahc, u_int port)
334{
335 return ((ahc_inb(ahc, port))
336 | (ahc_inb(ahc, port+1) << 8)
337 | (ahc_inb(ahc, port+2) << 16)
338 | (ahc_inb(ahc, port+3) << 24)
339 | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
340 | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
341 | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
342 | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
343}
344
345static __inline void
346ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
347{
348 ahc_outb(ahc, port, value & 0xFF);
349 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
350 ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
351 ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
352 ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
353 ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
354 ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
355 ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
356}
357
358/*
359 * Get a free scb. If there are none, see if we can allocate a new SCB.
360 */
361static __inline struct scb *
362ahc_get_scb(struct ahc_softc *ahc)
363{
364 struct scb *scb;
365
366 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
367 ahc_alloc_scbs(ahc);
368 scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
369 if (scb == NULL)
370 return (NULL);
371 }
372 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
373 return (scb);
374}
375
376/*
377 * Return an SCB resource to the free list.
378 */
379static __inline void
380ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
381{
382 struct hardware_scb *hscb;
383
384 hscb = scb->hscb;
385 /* Clean up for the next user */
386 ahc->scb_data->scbindex[hscb->tag] = NULL;
387 scb->flags = SCB_FREE;
388 hscb->control = 0;
389
390 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
391
392 /* Notify the OSM that a resource is now available. */
393 ahc_platform_scb_free(ahc, scb);
394}
395
396static __inline struct scb *
397ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
398{
399 struct scb* scb;
400
401 scb = ahc->scb_data->scbindex[tag];
402 if (scb != NULL)
403 ahc_sync_scb(ahc, scb,
404 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
405 return (scb);
406}
407
408static __inline void
409ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
410{
411 struct hardware_scb *q_hscb;
412 u_int saved_tag;
413
414 /*
415 * Our queuing method is a bit tricky. The card
416 * knows in advance which HSCB to download, and we
417 * can't disappoint it. To achieve this, the next
418 * SCB to download is saved off in ahc->next_queued_scb.
419 * When we are called to queue "an arbitrary scb",
420 * we copy the contents of the incoming HSCB to the one
421 * the sequencer knows about, swap HSCB pointers and
422 * finally assign the SCB to the tag indexed location
423 * in the scb_array. This makes sure that we can still
424 * locate the correct SCB by SCB_TAG.
425 */
426 q_hscb = ahc->next_queued_scb->hscb;
427 saved_tag = q_hscb->tag;
428 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
429 if ((scb->flags & SCB_CDB32_PTR) != 0) {
430 q_hscb->shared_data.cdb_ptr =
431 ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
432 + offsetof(struct hardware_scb, cdb32));
433 }
434 q_hscb->tag = saved_tag;
435 q_hscb->next = scb->hscb->tag;
436
437 /* Now swap HSCB pointers. */
438 ahc->next_queued_scb->hscb = scb->hscb;
439 scb->hscb = q_hscb;
440
441 /* Now define the mapping from tag to SCB in the scbindex */
442 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
443}
444
445/*
446 * Tell the sequencer about a new transaction to execute.
447 */
448static __inline void
449ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
450{
451 ahc_swap_with_next_hscb(ahc, scb);
452
453 if (scb->hscb->tag == SCB_LIST_NULL
454 || scb->hscb->next == SCB_LIST_NULL)
455 panic("Attempt to queue invalid SCB tag %x:%x\n",
456 scb->hscb->tag, scb->hscb->next);
457
458 /*
459 * Setup data "oddness".
460 */
461 scb->hscb->lun &= LID;
462 if (ahc_get_transfer_length(scb) & 0x1)
463 scb->hscb->lun |= SCB_XFERLEN_ODD;
464
465 /*
466 * Keep a history of SCBs we've downloaded in the qinfifo.
467 */
468 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
469
470 /*
471 * Make sure our data is consistent from the
472 * perspective of the adapter.
473 */
474 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
475
476 /* Tell the adapter about the newly queued SCB */
477 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
478 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
479 } else {
480 if ((ahc->features & AHC_AUTOPAUSE) == 0)
481 ahc_pause(ahc);
482 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
483 if ((ahc->features & AHC_AUTOPAUSE) == 0)
484 ahc_unpause(ahc);
485 }
486}
487
488static __inline struct scsi_sense_data *
489ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
490{
491 int offset;
492
493 offset = scb - ahc->scb_data->scbarray;
494 return (&ahc->scb_data->sense[offset]);
495}
496
497static __inline uint32_t
498ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
499{
500 int offset;
501
502 offset = scb - ahc->scb_data->scbarray;
503 return (ahc->scb_data->sense_busaddr
504 + (offset * sizeof(struct scsi_sense_data)));
505}
506 146
507/************************** Interrupt Processing ******************************/ 147/************************** Interrupt Processing ******************************/
508static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); 148void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op);
509static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op); 149void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op);
510static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); 150u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc);
511static __inline int ahc_intr(struct ahc_softc *ahc); 151int ahc_intr(struct ahc_softc *ahc);
512
513static __inline void
514ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
515{
516 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
517 /*offset*/0, /*len*/256, op);
518}
519
520static __inline void
521ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
522{
523#ifdef AHC_TARGET_MODE
524 if ((ahc->flags & AHC_TARGETROLE) != 0) {
525 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
526 ahc->shared_data_dmamap,
527 ahc_targetcmd_offset(ahc, 0),
528 sizeof(struct target_cmd) * AHC_TMODE_CMDS,
529 op);
530 }
531#endif
532}
533
534/*
535 * See if the firmware has posted any completed commands
536 * into our in-core command complete fifos.
537 */
538#define AHC_RUN_QOUTFIFO 0x1
539#define AHC_RUN_TQINFIFO 0x2
540static __inline u_int
541ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
542{
543 u_int retval;
544
545 retval = 0;
546 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
547 /*offset*/ahc->qoutfifonext, /*len*/1,
548 BUS_DMASYNC_POSTREAD);
549 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
550 retval |= AHC_RUN_QOUTFIFO;
551#ifdef AHC_TARGET_MODE
552 if ((ahc->flags & AHC_TARGETROLE) != 0
553 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
554 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
555 ahc->shared_data_dmamap,
556 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
557 /*len*/sizeof(struct target_cmd),
558 BUS_DMASYNC_POSTREAD);
559 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
560 retval |= AHC_RUN_TQINFIFO;
561 }
562#endif
563 return (retval);
564}
565
566/*
567 * Catch an interrupt from the adapter
568 */
569static __inline int
570ahc_intr(struct ahc_softc *ahc)
571{
572 u_int intstat;
573
574 if ((ahc->pause & INTEN) == 0) {
575 /*
576 * Our interrupt is not enabled on the chip
577 * and may be disabled for re-entrancy reasons,
578 * so just return. This is likely just a shared
579 * interrupt.
580 */
581 return (0);
582 }
583 /*
584 * Instead of directly reading the interrupt status register,
585 * infer the cause of the interrupt by checking our in-core
586 * completion queues. This avoids a costly PCI bus read in
587 * most cases.
588 */
589 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
590 && (ahc_check_cmdcmpltqueues(ahc) != 0))
591 intstat = CMDCMPLT;
592 else {
593 intstat = ahc_inb(ahc, INTSTAT);
594 }
595
596 if ((intstat & INT_PEND) == 0) {
597#if AHC_PCI_CONFIG > 0
598 if (ahc->unsolicited_ints > 500) {
599 ahc->unsolicited_ints = 0;
600 if ((ahc->chip & AHC_PCI) != 0
601 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
602 ahc->bus_intr(ahc);
603 }
604#endif
605 ahc->unsolicited_ints++;
606 return (0);
607 }
608 ahc->unsolicited_ints = 0;
609
610 if (intstat & CMDCMPLT) {
611 ahc_outb(ahc, CLRINT, CLRCMDINT);
612
613 /*
614 * Ensure that the chip sees that we've cleared
615 * this interrupt before we walk the output fifo.
616 * Otherwise, we may, due to posted bus writes,
617 * clear the interrupt after we finish the scan,
618 * and after the sequencer has added new entries
619 * and asserted the interrupt again.
620 */
621 ahc_flush_device_writes(ahc);
622 ahc_run_qoutfifo(ahc);
623#ifdef AHC_TARGET_MODE
624 if ((ahc->flags & AHC_TARGETROLE) != 0)
625 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
626#endif
627 }
628
629 /*
630 * Handle statuses that may invalidate our cached
631 * copy of INTSTAT separately.
632 */
633 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
634 /* Hot eject. Do nothing */
635 } else if (intstat & BRKADRINT) {
636 ahc_handle_brkadrint(ahc);
637 } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
638
639 ahc_pause_bug_fix(ahc);
640
641 if ((intstat & SEQINT) != 0)
642 ahc_handle_seqint(ahc, intstat);
643
644 if ((intstat & SCSIINT) != 0)
645 ahc_handle_scsiint(ahc, intstat);
646 }
647 return (1);
648}
649 152
650#endif /* _AIC7XXX_INLINE_H_ */ 153#endif /* _AIC7XXX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 42ad48e09f02..c5a354b39d88 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -388,14 +388,83 @@ static int aic7xxx_setup(char *s);
388static int ahc_linux_unit; 388static int ahc_linux_unit;
389 389
390 390
391/************************** OS Utility Wrappers *******************************/
392void
393ahc_delay(long usec)
394{
395 /*
396 * udelay on Linux can have problems for
397 * multi-millisecond waits. Wait at most
398 * 1024us per call.
399 */
400 while (usec > 0) {
401 udelay(usec % 1024);
402 usec -= 1024;
403 }
404}
405
406/***************************** Low Level I/O **********************************/
407uint8_t
408ahc_inb(struct ahc_softc * ahc, long port)
409{
410 uint8_t x;
411
412 if (ahc->tag == BUS_SPACE_MEMIO) {
413 x = readb(ahc->bsh.maddr + port);
414 } else {
415 x = inb(ahc->bsh.ioport + port);
416 }
417 mb();
418 return (x);
419}
420
421void
422ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
423{
424 if (ahc->tag == BUS_SPACE_MEMIO) {
425 writeb(val, ahc->bsh.maddr + port);
426 } else {
427 outb(val, ahc->bsh.ioport + port);
428 }
429 mb();
430}
431
432void
433ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
434{
435 int i;
436
437 /*
438 * There is probably a more efficient way to do this on Linux
439 * but we don't use this for anything speed critical and this
440 * should work.
441 */
442 for (i = 0; i < count; i++)
443 ahc_outb(ahc, port, *array++);
444}
445
446void
447ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
448{
449 int i;
450
451 /*
452 * There is probably a more efficient way to do this on Linux
453 * but we don't use this for anything speed critical and this
454 * should work.
455 */
456 for (i = 0; i < count; i++)
457 *array++ = ahc_inb(ahc, port);
458}
459
391/********************************* Inlines ************************************/ 460/********************************* Inlines ************************************/
392static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); 461static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
393 462
394static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 463static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
395 struct ahc_dma_seg *sg, 464 struct ahc_dma_seg *sg,
396 dma_addr_t addr, bus_size_t len); 465 dma_addr_t addr, bus_size_t len);
397 466
398static __inline void 467static void
399ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) 468ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
400{ 469{
401 struct scsi_cmnd *cmd; 470 struct scsi_cmnd *cmd;
@@ -406,7 +475,7 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
406 scsi_dma_unmap(cmd); 475 scsi_dma_unmap(cmd);
407} 476}
408 477
409static __inline int 478static int
410ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 479ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
411 struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len) 480 struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
412{ 481{
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index b48dab447bde..9d6e0660ddbc 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -375,82 +375,16 @@ struct ahc_platform_data {
375#define malloc(size, type, flags) kmalloc(size, flags) 375#define malloc(size, type, flags) kmalloc(size, flags)
376#define free(ptr, type) kfree(ptr) 376#define free(ptr, type) kfree(ptr)
377 377
378static __inline void ahc_delay(long); 378void ahc_delay(long);
379static __inline void
380ahc_delay(long usec)
381{
382 /*
383 * udelay on Linux can have problems for
384 * multi-millisecond waits. Wait at most
385 * 1024us per call.
386 */
387 while (usec > 0) {
388 udelay(usec % 1024);
389 usec -= 1024;
390 }
391}
392 379
393 380
394/***************************** Low Level I/O **********************************/ 381/***************************** Low Level I/O **********************************/
395static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port); 382uint8_t ahc_inb(struct ahc_softc * ahc, long port);
396static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val); 383void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
397static __inline void ahc_outsb(struct ahc_softc * ahc, long port, 384void ahc_outsb(struct ahc_softc * ahc, long port,
398 uint8_t *, int count); 385 uint8_t *, int count);
399static __inline void ahc_insb(struct ahc_softc * ahc, long port, 386void ahc_insb(struct ahc_softc * ahc, long port,
400 uint8_t *, int count); 387 uint8_t *, int count);
401
402static __inline uint8_t
403ahc_inb(struct ahc_softc * ahc, long port)
404{
405 uint8_t x;
406
407 if (ahc->tag == BUS_SPACE_MEMIO) {
408 x = readb(ahc->bsh.maddr + port);
409 } else {
410 x = inb(ahc->bsh.ioport + port);
411 }
412 mb();
413 return (x);
414}
415
416static __inline void
417ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
418{
419 if (ahc->tag == BUS_SPACE_MEMIO) {
420 writeb(val, ahc->bsh.maddr + port);
421 } else {
422 outb(val, ahc->bsh.ioport + port);
423 }
424 mb();
425}
426
427static __inline void
428ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
429{
430 int i;
431
432 /*
433 * There is probably a more efficient way to do this on Linux
434 * but we don't use this for anything speed critical and this
435 * should work.
436 */
437 for (i = 0; i < count; i++)
438 ahc_outb(ahc, port, *array++);
439}
440
441static __inline void
442ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
443{
444 int i;
445
446 /*
447 * There is probably a more efficient way to do this on Linux
448 * but we don't use this for anything speed critical and this
449 * should work.
450 */
451 for (i = 0; i < count; i++)
452 *array++ = ahc_inb(ahc, port);
453}
454 388
455/**************************** Initialization **********************************/ 389/**************************** Initialization **********************************/
456int ahc_linux_register_host(struct ahc_softc *, 390int ahc_linux_register_host(struct ahc_softc *,
@@ -555,61 +489,12 @@ void ahc_linux_pci_exit(void);
555int ahc_pci_map_registers(struct ahc_softc *ahc); 489int ahc_pci_map_registers(struct ahc_softc *ahc);
556int ahc_pci_map_int(struct ahc_softc *ahc); 490int ahc_pci_map_int(struct ahc_softc *ahc);
557 491
558static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci, 492uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
559 int reg, int width); 493 int reg, int width);
560 494
561static __inline uint32_t 495void ahc_pci_write_config(ahc_dev_softc_t pci,
562ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width) 496 int reg, uint32_t value,
563{ 497 int width);
564 switch (width) {
565 case 1:
566 {
567 uint8_t retval;
568
569 pci_read_config_byte(pci, reg, &retval);
570 return (retval);
571 }
572 case 2:
573 {
574 uint16_t retval;
575 pci_read_config_word(pci, reg, &retval);
576 return (retval);
577 }
578 case 4:
579 {
580 uint32_t retval;
581 pci_read_config_dword(pci, reg, &retval);
582 return (retval);
583 }
584 default:
585 panic("ahc_pci_read_config: Read size too big");
586 /* NOTREACHED */
587 return (0);
588 }
589}
590
591static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
592 int reg, uint32_t value,
593 int width);
594
595static __inline void
596ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
597{
598 switch (width) {
599 case 1:
600 pci_write_config_byte(pci, reg, value);
601 break;
602 case 2:
603 pci_write_config_word(pci, reg, value);
604 break;
605 case 4:
606 pci_write_config_dword(pci, reg, value);
607 break;
608 default:
609 panic("ahc_pci_write_config: Write size too big");
610 /* NOTREACHED */
611 }
612}
613 498
614static __inline int ahc_get_pci_function(ahc_dev_softc_t); 499static __inline int ahc_get_pci_function(ahc_dev_softc_t);
615static __inline int 500static __inline int
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 3d3eaef65fb3..bd422a80e9d5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -269,6 +269,57 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
269 return (0); 269 return (0);
270} 270}
271 271
272/******************************* PCI Routines *********************************/
273uint32_t
274ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
275{
276 switch (width) {
277 case 1:
278 {
279 uint8_t retval;
280
281 pci_read_config_byte(pci, reg, &retval);
282 return (retval);
283 }
284 case 2:
285 {
286 uint16_t retval;
287 pci_read_config_word(pci, reg, &retval);
288 return (retval);
289 }
290 case 4:
291 {
292 uint32_t retval;
293 pci_read_config_dword(pci, reg, &retval);
294 return (retval);
295 }
296 default:
297 panic("ahc_pci_read_config: Read size too big");
298 /* NOTREACHED */
299 return (0);
300 }
301}
302
303void
304ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
305{
306 switch (width) {
307 case 1:
308 pci_write_config_byte(pci, reg, value);
309 break;
310 case 2:
311 pci_write_config_word(pci, reg, value);
312 break;
313 case 4:
314 pci_write_config_dword(pci, reg, value);
315 break;
316 default:
317 panic("ahc_pci_write_config: Write size too big");
318 /* NOTREACHED */
319 }
320}
321
322
272static struct pci_driver aic7xxx_pci_driver = { 323static struct pci_driver aic7xxx_pci_driver = {
273 .name = "aic7xxx", 324 .name = "aic7xxx",
274 .probe = ahc_linux_pci_dev_probe, 325 .probe = ahc_linux_pci_dev_probe,