diff options
Diffstat (limited to 'drivers/scsi/aic7xxx/aic7xxx_osm.c')
-rw-r--r-- | drivers/scsi/aic7xxx/aic7xxx_osm.c | 1403 |
1 files changed, 339 insertions, 1064 deletions
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index d978e4a3e973..c13e56320010 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
@@ -134,11 +134,6 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL; | |||
134 | #include "aiclib.c" | 134 | #include "aiclib.c" |
135 | 135 | ||
136 | #include <linux/init.h> /* __setup */ | 136 | #include <linux/init.h> /* __setup */ |
137 | |||
138 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
139 | #include "sd.h" /* For geometry detection */ | ||
140 | #endif | ||
141 | |||
142 | #include <linux/mm.h> /* For fetching system memory size */ | 137 | #include <linux/mm.h> /* For fetching system memory size */ |
143 | #include <linux/blkdev.h> /* For block_size() */ | 138 | #include <linux/blkdev.h> /* For block_size() */ |
144 | #include <linux/delay.h> /* For ssleep/msleep */ | 139 | #include <linux/delay.h> /* For ssleep/msleep */ |
@@ -148,11 +143,6 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL; | |||
148 | */ | 143 | */ |
149 | spinlock_t ahc_list_spinlock; | 144 | spinlock_t ahc_list_spinlock; |
150 | 145 | ||
151 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
152 | /* For dynamic sglist size calculation. */ | ||
153 | u_int ahc_linux_nseg; | ||
154 | #endif | ||
155 | |||
156 | /* | 146 | /* |
157 | * Set this to the delay in seconds after SCSI bus reset. | 147 | * Set this to the delay in seconds after SCSI bus reset. |
158 | * Note, we honor this only for the initial bus reset. | 148 | * Note, we honor this only for the initial bus reset. |
@@ -436,15 +426,12 @@ static void ahc_linux_handle_scsi_status(struct ahc_softc *, | |||
436 | struct ahc_linux_device *, | 426 | struct ahc_linux_device *, |
437 | struct scb *); | 427 | struct scb *); |
438 | static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, | 428 | static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, |
439 | Scsi_Cmnd *cmd); | 429 | struct scsi_cmnd *cmd); |
440 | static void ahc_linux_sem_timeout(u_long arg); | 430 | static void ahc_linux_sem_timeout(u_long arg); |
441 | static void ahc_linux_freeze_simq(struct ahc_softc *ahc); | 431 | static void ahc_linux_freeze_simq(struct ahc_softc *ahc); |
442 | static void ahc_linux_release_simq(u_long arg); | 432 | static void ahc_linux_release_simq(u_long arg); |
443 | static void ahc_linux_dev_timed_unfreeze(u_long arg); | 433 | static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag); |
444 | static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag); | ||
445 | static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc); | 434 | static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc); |
446 | static void ahc_linux_size_nseg(void); | ||
447 | static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc); | ||
448 | static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, | 435 | static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, |
449 | struct ahc_devinfo *devinfo); | 436 | struct ahc_devinfo *devinfo); |
450 | static void ahc_linux_device_queue_depth(struct ahc_softc *ahc, | 437 | static void ahc_linux_device_queue_depth(struct ahc_softc *ahc, |
@@ -458,54 +445,27 @@ static struct ahc_linux_device* ahc_linux_alloc_device(struct ahc_softc*, | |||
458 | u_int); | 445 | u_int); |
459 | static void ahc_linux_free_device(struct ahc_softc*, | 446 | static void ahc_linux_free_device(struct ahc_softc*, |
460 | struct ahc_linux_device*); | 447 | struct ahc_linux_device*); |
461 | static void ahc_linux_run_device_queue(struct ahc_softc*, | 448 | static int ahc_linux_run_command(struct ahc_softc*, |
462 | struct ahc_linux_device*); | 449 | struct ahc_linux_device *, |
450 | struct scsi_cmnd *); | ||
463 | static void ahc_linux_setup_tag_info_global(char *p); | 451 | static void ahc_linux_setup_tag_info_global(char *p); |
464 | static aic_option_callback_t ahc_linux_setup_tag_info; | 452 | static aic_option_callback_t ahc_linux_setup_tag_info; |
465 | static int aic7xxx_setup(char *s); | 453 | static int aic7xxx_setup(char *s); |
466 | static int ahc_linux_next_unit(void); | 454 | static int ahc_linux_next_unit(void); |
467 | static void ahc_runq_tasklet(unsigned long data); | ||
468 | static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc); | ||
469 | 455 | ||
470 | /********************************* Inlines ************************************/ | 456 | /********************************* Inlines ************************************/ |
471 | static __inline void ahc_schedule_runq(struct ahc_softc *ahc); | ||
472 | static __inline struct ahc_linux_device* | 457 | static __inline struct ahc_linux_device* |
473 | ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, | 458 | ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, |
474 | u_int target, u_int lun, int alloc); | 459 | u_int target, u_int lun); |
475 | static __inline void ahc_schedule_completeq(struct ahc_softc *ahc); | ||
476 | static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc, | ||
477 | struct ahc_linux_device *dev); | ||
478 | static __inline struct ahc_linux_device * | ||
479 | ahc_linux_next_device_to_run(struct ahc_softc *ahc); | ||
480 | static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc); | ||
481 | static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); | 460 | static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); |
482 | 461 | ||
483 | static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, | 462 | static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, |
484 | struct ahc_dma_seg *sg, | 463 | struct ahc_dma_seg *sg, |
485 | dma_addr_t addr, bus_size_t len); | 464 | dma_addr_t addr, bus_size_t len); |
486 | 465 | ||
487 | static __inline void | ||
488 | ahc_schedule_completeq(struct ahc_softc *ahc) | ||
489 | { | ||
490 | if ((ahc->platform_data->flags & AHC_RUN_CMPLT_Q_TIMER) == 0) { | ||
491 | ahc->platform_data->flags |= AHC_RUN_CMPLT_Q_TIMER; | ||
492 | ahc->platform_data->completeq_timer.expires = jiffies; | ||
493 | add_timer(&ahc->platform_data->completeq_timer); | ||
494 | } | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * Must be called with our lock held. | ||
499 | */ | ||
500 | static __inline void | ||
501 | ahc_schedule_runq(struct ahc_softc *ahc) | ||
502 | { | ||
503 | tasklet_schedule(&ahc->platform_data->runq_tasklet); | ||
504 | } | ||
505 | |||
506 | static __inline struct ahc_linux_device* | 466 | static __inline struct ahc_linux_device* |
507 | ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, | 467 | ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, |
508 | u_int lun, int alloc) | 468 | u_int lun) |
509 | { | 469 | { |
510 | struct ahc_linux_target *targ; | 470 | struct ahc_linux_target *targ; |
511 | struct ahc_linux_device *dev; | 471 | struct ahc_linux_device *dev; |
@@ -515,102 +475,15 @@ ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, | |||
515 | if (channel != 0) | 475 | if (channel != 0) |
516 | target_offset += 8; | 476 | target_offset += 8; |
517 | targ = ahc->platform_data->targets[target_offset]; | 477 | targ = ahc->platform_data->targets[target_offset]; |
518 | if (targ == NULL) { | 478 | BUG_ON(targ == NULL); |
519 | if (alloc != 0) { | ||
520 | targ = ahc_linux_alloc_target(ahc, channel, target); | ||
521 | if (targ == NULL) | ||
522 | return (NULL); | ||
523 | } else | ||
524 | return (NULL); | ||
525 | } | ||
526 | dev = targ->devices[lun]; | 479 | dev = targ->devices[lun]; |
527 | if (dev == NULL && alloc != 0) | 480 | return dev; |
528 | dev = ahc_linux_alloc_device(ahc, targ, lun); | ||
529 | return (dev); | ||
530 | } | ||
531 | |||
532 | #define AHC_LINUX_MAX_RETURNED_ERRORS 4 | ||
533 | static struct ahc_cmd * | ||
534 | ahc_linux_run_complete_queue(struct ahc_softc *ahc) | ||
535 | { | ||
536 | struct ahc_cmd *acmd; | ||
537 | u_long done_flags; | ||
538 | int with_errors; | ||
539 | |||
540 | with_errors = 0; | ||
541 | ahc_done_lock(ahc, &done_flags); | ||
542 | while ((acmd = TAILQ_FIRST(&ahc->platform_data->completeq)) != NULL) { | ||
543 | Scsi_Cmnd *cmd; | ||
544 | |||
545 | if (with_errors > AHC_LINUX_MAX_RETURNED_ERRORS) { | ||
546 | /* | ||
547 | * Linux uses stack recursion to requeue | ||
548 | * commands that need to be retried. Avoid | ||
549 | * blowing out the stack by "spoon feeding" | ||
550 | * commands that completed with error back | ||
551 | * the operating system in case they are going | ||
552 | * to be retried. "ick" | ||
553 | */ | ||
554 | ahc_schedule_completeq(ahc); | ||
555 | break; | ||
556 | } | ||
557 | TAILQ_REMOVE(&ahc->platform_data->completeq, | ||
558 | acmd, acmd_links.tqe); | ||
559 | cmd = &acmd_scsi_cmd(acmd); | ||
560 | cmd->host_scribble = NULL; | ||
561 | if (ahc_cmd_get_transaction_status(cmd) != DID_OK | ||
562 | || (cmd->result & 0xFF) != SCSI_STATUS_OK) | ||
563 | with_errors++; | ||
564 | |||
565 | cmd->scsi_done(cmd); | ||
566 | } | ||
567 | ahc_done_unlock(ahc, &done_flags); | ||
568 | return (acmd); | ||
569 | } | ||
570 | |||
571 | static __inline void | ||
572 | ahc_linux_check_device_queue(struct ahc_softc *ahc, | ||
573 | struct ahc_linux_device *dev) | ||
574 | { | ||
575 | if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0 | ||
576 | && dev->active == 0) { | ||
577 | dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY; | ||
578 | dev->qfrozen--; | ||
579 | } | ||
580 | |||
581 | if (TAILQ_FIRST(&dev->busyq) == NULL | ||
582 | || dev->openings == 0 || dev->qfrozen != 0) | ||
583 | return; | ||
584 | |||
585 | ahc_linux_run_device_queue(ahc, dev); | ||
586 | } | ||
587 | |||
588 | static __inline struct ahc_linux_device * | ||
589 | ahc_linux_next_device_to_run(struct ahc_softc *ahc) | ||
590 | { | ||
591 | |||
592 | if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 | ||
593 | || (ahc->platform_data->qfrozen != 0)) | ||
594 | return (NULL); | ||
595 | return (TAILQ_FIRST(&ahc->platform_data->device_runq)); | ||
596 | } | ||
597 | |||
598 | static __inline void | ||
599 | ahc_linux_run_device_queues(struct ahc_softc *ahc) | ||
600 | { | ||
601 | struct ahc_linux_device *dev; | ||
602 | |||
603 | while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) { | ||
604 | TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links); | ||
605 | dev->flags &= ~AHC_DEV_ON_RUN_LIST; | ||
606 | ahc_linux_check_device_queue(ahc, dev); | ||
607 | } | ||
608 | } | 481 | } |
609 | 482 | ||
610 | static __inline void | 483 | static __inline void |
611 | ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) | 484 | ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) |
612 | { | 485 | { |
613 | Scsi_Cmnd *cmd; | 486 | struct scsi_cmnd *cmd; |
614 | 487 | ||
615 | cmd = scb->io_ctx; | 488 | cmd = scb->io_ctx; |
616 | ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); | 489 | ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); |
@@ -650,109 +523,15 @@ ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, | |||
650 | return (consumed); | 523 | return (consumed); |
651 | } | 524 | } |
652 | 525 | ||
653 | /************************ Host template entry points *************************/ | ||
654 | static int ahc_linux_detect(Scsi_Host_Template *); | ||
655 | static int ahc_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); | ||
656 | static const char *ahc_linux_info(struct Scsi_Host *); | ||
657 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
658 | static int ahc_linux_slave_alloc(Scsi_Device *); | ||
659 | static int ahc_linux_slave_configure(Scsi_Device *); | ||
660 | static void ahc_linux_slave_destroy(Scsi_Device *); | ||
661 | #if defined(__i386__) | ||
662 | static int ahc_linux_biosparam(struct scsi_device*, | ||
663 | struct block_device*, | ||
664 | sector_t, int[]); | ||
665 | #endif | ||
666 | #else | ||
667 | static int ahc_linux_release(struct Scsi_Host *); | ||
668 | static void ahc_linux_select_queue_depth(struct Scsi_Host *host, | ||
669 | Scsi_Device *scsi_devs); | ||
670 | #if defined(__i386__) | ||
671 | static int ahc_linux_biosparam(Disk *, kdev_t, int[]); | ||
672 | #endif | ||
673 | #endif | ||
674 | static int ahc_linux_bus_reset(Scsi_Cmnd *); | ||
675 | static int ahc_linux_dev_reset(Scsi_Cmnd *); | ||
676 | static int ahc_linux_abort(Scsi_Cmnd *); | ||
677 | |||
678 | /* | ||
679 | * Calculate a safe value for AHC_NSEG (as expressed through ahc_linux_nseg). | ||
680 | * | ||
681 | * In pre-2.5.X... | ||
682 | * The midlayer allocates an S/G array dynamically when a command is issued | ||
683 | * using SCSI malloc. This array, which is in an OS dependent format that | ||
684 | * must later be copied to our private S/G list, is sized to house just the | ||
685 | * number of segments needed for the current transfer. Since the code that | ||
686 | * sizes the SCSI malloc pool does not take into consideration fragmentation | ||
687 | * of the pool, executing transactions numbering just a fraction of our | ||
688 | * concurrent transaction limit with list lengths aproaching AHC_NSEG will | ||
689 | * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the | ||
690 | * mid-layer does not properly handle this scsi malloc failures for the S/G | ||
691 | * array and the result can be a lockup of the I/O subsystem. We try to size | ||
692 | * our S/G list so that it satisfies our drivers allocation requirements in | ||
693 | * addition to avoiding fragmentation of the SCSI malloc pool. | ||
694 | */ | ||
695 | static void | ||
696 | ahc_linux_size_nseg(void) | ||
697 | { | ||
698 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
699 | u_int cur_size; | ||
700 | u_int best_size; | ||
701 | |||
702 | /* | ||
703 | * The SCSI allocator rounds to the nearest 512 bytes | ||
704 | * an cannot allocate across a page boundary. Our algorithm | ||
705 | * is to start at 1K of scsi malloc space per-command and | ||
706 | * loop through all factors of the PAGE_SIZE and pick the best. | ||
707 | */ | ||
708 | best_size = 0; | ||
709 | for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) { | ||
710 | u_int nseg; | ||
711 | |||
712 | nseg = cur_size / sizeof(struct scatterlist); | ||
713 | if (nseg < AHC_LINUX_MIN_NSEG) | ||
714 | continue; | ||
715 | |||
716 | if (best_size == 0) { | ||
717 | best_size = cur_size; | ||
718 | ahc_linux_nseg = nseg; | ||
719 | } else { | ||
720 | u_int best_rem; | ||
721 | u_int cur_rem; | ||
722 | |||
723 | /* | ||
724 | * Compare the traits of the current "best_size" | ||
725 | * with the current size to determine if the | ||
726 | * current size is a better size. | ||
727 | */ | ||
728 | best_rem = best_size % sizeof(struct scatterlist); | ||
729 | cur_rem = cur_size % sizeof(struct scatterlist); | ||
730 | if (cur_rem < best_rem) { | ||
731 | best_size = cur_size; | ||
732 | ahc_linux_nseg = nseg; | ||
733 | } | ||
734 | } | ||
735 | } | ||
736 | #endif | ||
737 | } | ||
738 | |||
739 | /* | 526 | /* |
740 | * Try to detect an Adaptec 7XXX controller. | 527 | * Try to detect an Adaptec 7XXX controller. |
741 | */ | 528 | */ |
742 | static int | 529 | static int |
743 | ahc_linux_detect(Scsi_Host_Template *template) | 530 | ahc_linux_detect(struct scsi_host_template *template) |
744 | { | 531 | { |
745 | struct ahc_softc *ahc; | 532 | struct ahc_softc *ahc; |
746 | int found = 0; | 533 | int found = 0; |
747 | 534 | ||
748 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
749 | /* | ||
750 | * It is a bug that the upper layer takes | ||
751 | * this lock just prior to calling us. | ||
752 | */ | ||
753 | spin_unlock_irq(&io_request_lock); | ||
754 | #endif | ||
755 | |||
756 | /* | 535 | /* |
757 | * Sanity checking of Linux SCSI data structures so | 536 | * Sanity checking of Linux SCSI data structures so |
758 | * that some of our hacks^H^H^H^H^Hassumptions aren't | 537 | * that some of our hacks^H^H^H^H^Hassumptions aren't |
@@ -764,7 +543,6 @@ ahc_linux_detect(Scsi_Host_Template *template) | |||
764 | printf("ahc_linux_detect: Unable to attach\n"); | 543 | printf("ahc_linux_detect: Unable to attach\n"); |
765 | return (0); | 544 | return (0); |
766 | } | 545 | } |
767 | ahc_linux_size_nseg(); | ||
768 | /* | 546 | /* |
769 | * If we've been passed any parameters, process them now. | 547 | * If we've been passed any parameters, process them now. |
770 | */ | 548 | */ |
@@ -793,48 +571,11 @@ ahc_linux_detect(Scsi_Host_Template *template) | |||
793 | found++; | 571 | found++; |
794 | } | 572 | } |
795 | 573 | ||
796 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
797 | spin_lock_irq(&io_request_lock); | ||
798 | #endif | ||
799 | aic7xxx_detect_complete++; | 574 | aic7xxx_detect_complete++; |
800 | 575 | ||
801 | return (found); | 576 | return (found); |
802 | } | 577 | } |
803 | 578 | ||
804 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
805 | /* | ||
806 | * Free the passed in Scsi_Host memory structures prior to unloading the | ||
807 | * module. | ||
808 | */ | ||
809 | int | ||
810 | ahc_linux_release(struct Scsi_Host * host) | ||
811 | { | ||
812 | struct ahc_softc *ahc; | ||
813 | u_long l; | ||
814 | |||
815 | ahc_list_lock(&l); | ||
816 | if (host != NULL) { | ||
817 | |||
818 | /* | ||
819 | * We should be able to just perform | ||
820 | * the free directly, but check our | ||
821 | * list for extra sanity. | ||
822 | */ | ||
823 | ahc = ahc_find_softc(*(struct ahc_softc **)host->hostdata); | ||
824 | if (ahc != NULL) { | ||
825 | u_long s; | ||
826 | |||
827 | ahc_lock(ahc, &s); | ||
828 | ahc_intr_enable(ahc, FALSE); | ||
829 | ahc_unlock(ahc, &s); | ||
830 | ahc_free(ahc); | ||
831 | } | ||
832 | } | ||
833 | ahc_list_unlock(&l); | ||
834 | return (0); | ||
835 | } | ||
836 | #endif | ||
837 | |||
838 | /* | 579 | /* |
839 | * Return a string describing the driver. | 580 | * Return a string describing the driver. |
840 | */ | 581 | */ |
@@ -867,11 +608,10 @@ ahc_linux_info(struct Scsi_Host *host) | |||
867 | * Queue an SCB to the controller. | 608 | * Queue an SCB to the controller. |
868 | */ | 609 | */ |
869 | static int | 610 | static int |
870 | ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *)) | 611 | ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) |
871 | { | 612 | { |
872 | struct ahc_softc *ahc; | 613 | struct ahc_softc *ahc; |
873 | struct ahc_linux_device *dev; | 614 | struct ahc_linux_device *dev; |
874 | u_long flags; | ||
875 | 615 | ||
876 | ahc = *(struct ahc_softc **)cmd->device->host->hostdata; | 616 | ahc = *(struct ahc_softc **)cmd->device->host->hostdata; |
877 | 617 | ||
@@ -880,205 +620,152 @@ ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *)) | |||
880 | */ | 620 | */ |
881 | cmd->scsi_done = scsi_done; | 621 | cmd->scsi_done = scsi_done; |
882 | 622 | ||
883 | ahc_midlayer_entrypoint_lock(ahc, &flags); | ||
884 | |||
885 | /* | 623 | /* |
886 | * Close the race of a command that was in the process of | 624 | * Close the race of a command that was in the process of |
887 | * being queued to us just as our simq was frozen. Let | 625 | * being queued to us just as our simq was frozen. Let |
888 | * DV commands through so long as we are only frozen to | 626 | * DV commands through so long as we are only frozen to |
889 | * perform DV. | 627 | * perform DV. |
890 | */ | 628 | */ |
891 | if (ahc->platform_data->qfrozen != 0) { | 629 | if (ahc->platform_data->qfrozen != 0) |
630 | return SCSI_MLQUEUE_HOST_BUSY; | ||
892 | 631 | ||
893 | ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ); | ||
894 | ahc_linux_queue_cmd_complete(ahc, cmd); | ||
895 | ahc_schedule_completeq(ahc); | ||
896 | ahc_midlayer_entrypoint_unlock(ahc, &flags); | ||
897 | return (0); | ||
898 | } | ||
899 | dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, | 632 | dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, |
900 | cmd->device->lun, /*alloc*/TRUE); | 633 | cmd->device->lun); |
901 | if (dev == NULL) { | 634 | BUG_ON(dev == NULL); |
902 | ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL); | 635 | |
903 | ahc_linux_queue_cmd_complete(ahc, cmd); | ||
904 | ahc_schedule_completeq(ahc); | ||
905 | ahc_midlayer_entrypoint_unlock(ahc, &flags); | ||
906 | printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n", | ||
907 | ahc_name(ahc)); | ||
908 | return (0); | ||
909 | } | ||
910 | cmd->result = CAM_REQ_INPROG << 16; | 636 | cmd->result = CAM_REQ_INPROG << 16; |
911 | TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe); | 637 | |
912 | if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { | 638 | return ahc_linux_run_command(ahc, dev, cmd); |
913 | TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); | ||
914 | dev->flags |= AHC_DEV_ON_RUN_LIST; | ||
915 | ahc_linux_run_device_queues(ahc); | ||
916 | } | ||
917 | ahc_midlayer_entrypoint_unlock(ahc, &flags); | ||
918 | return (0); | ||
919 | } | 639 | } |
920 | 640 | ||
921 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
922 | static int | 641 | static int |
923 | ahc_linux_slave_alloc(Scsi_Device *device) | 642 | ahc_linux_slave_alloc(struct scsi_device *device) |
924 | { | 643 | { |
925 | struct ahc_softc *ahc; | 644 | struct ahc_softc *ahc; |
645 | struct ahc_linux_target *targ; | ||
646 | struct scsi_target *starget = device->sdev_target; | ||
647 | struct ahc_linux_device *dev; | ||
648 | unsigned int target_offset; | ||
649 | unsigned long flags; | ||
650 | int retval = -ENOMEM; | ||
651 | |||
652 | target_offset = starget->id; | ||
653 | if (starget->channel != 0) | ||
654 | target_offset += 8; | ||
926 | 655 | ||
927 | ahc = *((struct ahc_softc **)device->host->hostdata); | 656 | ahc = *((struct ahc_softc **)device->host->hostdata); |
928 | if (bootverbose) | 657 | if (bootverbose) |
929 | printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id); | 658 | printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id); |
930 | return (0); | 659 | ahc_lock(ahc, &flags); |
660 | targ = ahc->platform_data->targets[target_offset]; | ||
661 | if (targ == NULL) { | ||
662 | struct seeprom_config *sc; | ||
663 | |||
664 | targ = ahc_linux_alloc_target(ahc, starget->channel, | ||
665 | starget->id); | ||
666 | sc = ahc->seep_config; | ||
667 | if (targ == NULL) | ||
668 | goto out; | ||
669 | |||
670 | if (sc) { | ||
671 | unsigned short scsirate; | ||
672 | struct ahc_devinfo devinfo; | ||
673 | struct ahc_initiator_tinfo *tinfo; | ||
674 | struct ahc_tmode_tstate *tstate; | ||
675 | char channel = starget->channel + 'A'; | ||
676 | unsigned int our_id = ahc->our_id; | ||
677 | |||
678 | if (starget->channel) | ||
679 | our_id = ahc->our_id_b; | ||
680 | |||
681 | if ((ahc->features & AHC_ULTRA2) != 0) { | ||
682 | scsirate = sc->device_flags[target_offset] & CFXFER; | ||
683 | } else { | ||
684 | scsirate = (sc->device_flags[target_offset] & CFXFER) << 4; | ||
685 | if (sc->device_flags[target_offset] & CFSYNCH) | ||
686 | scsirate |= SOFS; | ||
687 | } | ||
688 | if (sc->device_flags[target_offset] & CFWIDEB) { | ||
689 | scsirate |= WIDEXFER; | ||
690 | spi_max_width(starget) = 1; | ||
691 | } else | ||
692 | spi_max_width(starget) = 0; | ||
693 | spi_min_period(starget) = | ||
694 | ahc_find_period(ahc, scsirate, AHC_SYNCRATE_DT); | ||
695 | tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, | ||
696 | targ->target, &tstate); | ||
697 | ahc_compile_devinfo(&devinfo, our_id, targ->target, | ||
698 | CAM_LUN_WILDCARD, channel, | ||
699 | ROLE_INITIATOR); | ||
700 | ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0, | ||
701 | AHC_TRANS_GOAL, /*paused*/FALSE); | ||
702 | ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, | ||
703 | AHC_TRANS_GOAL, /*paused*/FALSE); | ||
704 | } | ||
705 | |||
706 | } | ||
707 | dev = targ->devices[device->lun]; | ||
708 | if (dev == NULL) { | ||
709 | dev = ahc_linux_alloc_device(ahc, targ, device->lun); | ||
710 | if (dev == NULL) | ||
711 | goto out; | ||
712 | } | ||
713 | retval = 0; | ||
714 | |||
715 | out: | ||
716 | ahc_unlock(ahc, &flags); | ||
717 | return retval; | ||
931 | } | 718 | } |
932 | 719 | ||
933 | static int | 720 | static int |
934 | ahc_linux_slave_configure(Scsi_Device *device) | 721 | ahc_linux_slave_configure(struct scsi_device *device) |
935 | { | 722 | { |
936 | struct ahc_softc *ahc; | 723 | struct ahc_softc *ahc; |
937 | struct ahc_linux_device *dev; | 724 | struct ahc_linux_device *dev; |
938 | u_long flags; | ||
939 | 725 | ||
940 | ahc = *((struct ahc_softc **)device->host->hostdata); | 726 | ahc = *((struct ahc_softc **)device->host->hostdata); |
727 | |||
941 | if (bootverbose) | 728 | if (bootverbose) |
942 | printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id); | 729 | printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id); |
943 | ahc_midlayer_entrypoint_lock(ahc, &flags); | 730 | |
944 | /* | 731 | dev = ahc_linux_get_device(ahc, device->channel, device->id, |
945 | * Since Linux has attached to the device, configure | 732 | device->lun); |
946 | * it so we don't free and allocate the device | 733 | dev->scsi_device = device; |
947 | * structure on every command. | 734 | ahc_linux_device_queue_depth(ahc, dev); |
948 | */ | ||
949 | dev = ahc_linux_get_device(ahc, device->channel, | ||
950 | device->id, device->lun, | ||
951 | /*alloc*/TRUE); | ||
952 | if (dev != NULL) { | ||
953 | dev->flags &= ~AHC_DEV_UNCONFIGURED; | ||
954 | dev->scsi_device = device; | ||
955 | ahc_linux_device_queue_depth(ahc, dev); | ||
956 | } | ||
957 | ahc_midlayer_entrypoint_unlock(ahc, &flags); | ||
958 | 735 | ||
959 | /* Initial Domain Validation */ | 736 | /* Initial Domain Validation */ |
960 | if (!spi_initial_dv(device->sdev_target)) | 737 | if (!spi_initial_dv(device->sdev_target)) |
961 | spi_dv_device(device); | 738 | spi_dv_device(device); |
962 | 739 | ||
963 | return (0); | 740 | return 0; |
964 | } | 741 | } |
965 | 742 | ||
966 | static void | 743 | static void |
967 | ahc_linux_slave_destroy(Scsi_Device *device) | 744 | ahc_linux_slave_destroy(struct scsi_device *device) |
968 | { | 745 | { |
969 | struct ahc_softc *ahc; | 746 | struct ahc_softc *ahc; |
970 | struct ahc_linux_device *dev; | 747 | struct ahc_linux_device *dev; |
971 | u_long flags; | ||
972 | 748 | ||
973 | ahc = *((struct ahc_softc **)device->host->hostdata); | 749 | ahc = *((struct ahc_softc **)device->host->hostdata); |
974 | if (bootverbose) | 750 | if (bootverbose) |
975 | printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id); | 751 | printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id); |
976 | ahc_midlayer_entrypoint_lock(ahc, &flags); | ||
977 | dev = ahc_linux_get_device(ahc, device->channel, | 752 | dev = ahc_linux_get_device(ahc, device->channel, |
978 | device->id, device->lun, | 753 | device->id, device->lun); |
979 | /*alloc*/FALSE); | ||
980 | /* | ||
981 | * Filter out "silly" deletions of real devices by only | ||
982 | * deleting devices that have had slave_configure() | ||
983 | * called on them. All other devices that have not | ||
984 | * been configured will automatically be deleted by | ||
985 | * the refcounting process. | ||
986 | */ | ||
987 | if (dev != NULL | ||
988 | && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) { | ||
989 | dev->flags |= AHC_DEV_UNCONFIGURED; | ||
990 | if (TAILQ_EMPTY(&dev->busyq) | ||
991 | && dev->active == 0 | ||
992 | && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0) | ||
993 | ahc_linux_free_device(ahc, dev); | ||
994 | } | ||
995 | ahc_midlayer_entrypoint_unlock(ahc, &flags); | ||
996 | } | ||
997 | #else | ||
998 | /* | ||
999 | * Sets the queue depth for each SCSI device hanging | ||
1000 | * off the input host adapter. | ||
1001 | */ | ||
1002 | static void | ||
1003 | ahc_linux_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs) | ||
1004 | { | ||
1005 | Scsi_Device *device; | ||
1006 | Scsi_Device *ldev; | ||
1007 | struct ahc_softc *ahc; | ||
1008 | u_long flags; | ||
1009 | 754 | ||
1010 | ahc = *((struct ahc_softc **)host->hostdata); | 755 | BUG_ON(dev->active); |
1011 | ahc_lock(ahc, &flags); | ||
1012 | for (device = scsi_devs; device != NULL; device = device->next) { | ||
1013 | 756 | ||
1014 | /* | 757 | ahc_linux_free_device(ahc, dev); |
1015 | * Watch out for duplicate devices. This works around | ||
1016 | * some quirks in how the SCSI scanning code does its | ||
1017 | * device management. | ||
1018 | */ | ||
1019 | for (ldev = scsi_devs; ldev != device; ldev = ldev->next) { | ||
1020 | if (ldev->host == device->host | ||
1021 | && ldev->channel == device->channel | ||
1022 | && ldev->id == device->id | ||
1023 | && ldev->lun == device->lun) | ||
1024 | break; | ||
1025 | } | ||
1026 | /* Skip duplicate. */ | ||
1027 | if (ldev != device) | ||
1028 | continue; | ||
1029 | |||
1030 | if (device->host == host) { | ||
1031 | struct ahc_linux_device *dev; | ||
1032 | |||
1033 | /* | ||
1034 | * Since Linux has attached to the device, configure | ||
1035 | * it so we don't free and allocate the device | ||
1036 | * structure on every command. | ||
1037 | */ | ||
1038 | dev = ahc_linux_get_device(ahc, device->channel, | ||
1039 | device->id, device->lun, | ||
1040 | /*alloc*/TRUE); | ||
1041 | if (dev != NULL) { | ||
1042 | dev->flags &= ~AHC_DEV_UNCONFIGURED; | ||
1043 | dev->scsi_device = device; | ||
1044 | ahc_linux_device_queue_depth(ahc, dev); | ||
1045 | device->queue_depth = dev->openings | ||
1046 | + dev->active; | ||
1047 | if ((dev->flags & (AHC_DEV_Q_BASIC | ||
1048 | | AHC_DEV_Q_TAGGED)) == 0) { | ||
1049 | /* | ||
1050 | * We allow the OS to queue 2 untagged | ||
1051 | * transactions to us at any time even | ||
1052 | * though we can only execute them | ||
1053 | * serially on the controller/device. | ||
1054 | * This should remove some latency. | ||
1055 | */ | ||
1056 | device->queue_depth = 2; | ||
1057 | } | ||
1058 | } | ||
1059 | } | ||
1060 | } | ||
1061 | ahc_unlock(ahc, &flags); | ||
1062 | } | 758 | } |
1063 | #endif | ||
1064 | 759 | ||
1065 | #if defined(__i386__) | 760 | #if defined(__i386__) |
1066 | /* | 761 | /* |
1067 | * Return the disk geometry for the given SCSI device. | 762 | * Return the disk geometry for the given SCSI device. |
1068 | */ | 763 | */ |
1069 | static int | 764 | static int |
1070 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
1071 | ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, | 765 | ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, |
1072 | sector_t capacity, int geom[]) | 766 | sector_t capacity, int geom[]) |
1073 | { | 767 | { |
1074 | uint8_t *bh; | 768 | uint8_t *bh; |
1075 | #else | ||
1076 | ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[]) | ||
1077 | { | ||
1078 | struct scsi_device *sdev = disk->device; | ||
1079 | u_long capacity = disk->capacity; | ||
1080 | struct buffer_head *bh; | ||
1081 | #endif | ||
1082 | int heads; | 769 | int heads; |
1083 | int sectors; | 770 | int sectors; |
1084 | int cylinders; | 771 | int cylinders; |
@@ -1090,22 +777,11 @@ ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[]) | |||
1090 | ahc = *((struct ahc_softc **)sdev->host->hostdata); | 777 | ahc = *((struct ahc_softc **)sdev->host->hostdata); |
1091 | channel = sdev->channel; | 778 | channel = sdev->channel; |
1092 | 779 | ||
1093 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
1094 | bh = scsi_bios_ptable(bdev); | 780 | bh = scsi_bios_ptable(bdev); |
1095 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17) | ||
1096 | bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev)); | ||
1097 | #else | ||
1098 | bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024); | ||
1099 | #endif | ||
1100 | |||
1101 | if (bh) { | 781 | if (bh) { |
1102 | ret = scsi_partsize(bh, capacity, | 782 | ret = scsi_partsize(bh, capacity, |
1103 | &geom[2], &geom[0], &geom[1]); | 783 | &geom[2], &geom[0], &geom[1]); |
1104 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
1105 | kfree(bh); | 784 | kfree(bh); |
1106 | #else | ||
1107 | brelse(bh); | ||
1108 | #endif | ||
1109 | if (ret != -1) | 785 | if (ret != -1) |
1110 | return (ret); | 786 | return (ret); |
1111 | } | 787 | } |
@@ -1135,7 +811,7 @@ ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[]) | |||
1135 | * Abort the current SCSI command(s). | 811 | * Abort the current SCSI command(s). |
1136 | */ | 812 | */ |
1137 | static int | 813 | static int |
1138 | ahc_linux_abort(Scsi_Cmnd *cmd) | 814 | ahc_linux_abort(struct scsi_cmnd *cmd) |
1139 | { | 815 | { |
1140 | int error; | 816 | int error; |
1141 | 817 | ||
@@ -1149,7 +825,7 @@ ahc_linux_abort(Scsi_Cmnd *cmd) | |||
1149 | * Attempt to send a target reset message to the device that timed out. | 825 | * Attempt to send a target reset message to the device that timed out. |
1150 | */ | 826 | */ |
1151 | static int | 827 | static int |
1152 | ahc_linux_dev_reset(Scsi_Cmnd *cmd) | 828 | ahc_linux_dev_reset(struct scsi_cmnd *cmd) |
1153 | { | 829 | { |
1154 | int error; | 830 | int error; |
1155 | 831 | ||
@@ -1163,18 +839,14 @@ ahc_linux_dev_reset(Scsi_Cmnd *cmd) | |||
1163 | * Reset the SCSI bus. | 839 | * Reset the SCSI bus. |
1164 | */ | 840 | */ |
1165 | static int | 841 | static int |
1166 | ahc_linux_bus_reset(Scsi_Cmnd *cmd) | 842 | ahc_linux_bus_reset(struct scsi_cmnd *cmd) |
1167 | { | 843 | { |
1168 | struct ahc_softc *ahc; | 844 | struct ahc_softc *ahc; |
1169 | u_long s; | ||
1170 | int found; | 845 | int found; |
1171 | 846 | ||
1172 | ahc = *(struct ahc_softc **)cmd->device->host->hostdata; | 847 | ahc = *(struct ahc_softc **)cmd->device->host->hostdata; |
1173 | ahc_midlayer_entrypoint_lock(ahc, &s); | ||
1174 | found = ahc_reset_channel(ahc, cmd->device->channel + 'A', | 848 | found = ahc_reset_channel(ahc, cmd->device->channel + 'A', |
1175 | /*initiate reset*/TRUE); | 849 | /*initiate reset*/TRUE); |
1176 | ahc_linux_run_complete_queue(ahc); | ||
1177 | ahc_midlayer_entrypoint_unlock(ahc, &s); | ||
1178 | 850 | ||
1179 | if (bootverbose) | 851 | if (bootverbose) |
1180 | printf("%s: SCSI bus reset delivered. " | 852 | printf("%s: SCSI bus reset delivered. " |
@@ -1183,7 +855,7 @@ ahc_linux_bus_reset(Scsi_Cmnd *cmd) | |||
1183 | return SUCCESS; | 855 | return SUCCESS; |
1184 | } | 856 | } |
1185 | 857 | ||
1186 | Scsi_Host_Template aic7xxx_driver_template = { | 858 | struct scsi_host_template aic7xxx_driver_template = { |
1187 | .module = THIS_MODULE, | 859 | .module = THIS_MODULE, |
1188 | .name = "aic7xxx", | 860 | .name = "aic7xxx", |
1189 | .proc_info = ahc_linux_proc_info, | 861 | .proc_info = ahc_linux_proc_info, |
@@ -1206,33 +878,6 @@ Scsi_Host_Template aic7xxx_driver_template = { | |||
1206 | 878 | ||
1207 | /**************************** Tasklet Handler *********************************/ | 879 | /**************************** Tasklet Handler *********************************/ |
1208 | 880 | ||
1209 | /* | ||
1210 | * In 2.4.X and above, this routine is called from a tasklet, | ||
1211 | * so we must re-acquire our lock prior to executing this code. | ||
1212 | * In all prior kernels, ahc_schedule_runq() calls this routine | ||
1213 | * directly and ahc_schedule_runq() is called with our lock held. | ||
1214 | */ | ||
1215 | static void | ||
1216 | ahc_runq_tasklet(unsigned long data) | ||
1217 | { | ||
1218 | struct ahc_softc* ahc; | ||
1219 | struct ahc_linux_device *dev; | ||
1220 | u_long flags; | ||
1221 | |||
1222 | ahc = (struct ahc_softc *)data; | ||
1223 | ahc_lock(ahc, &flags); | ||
1224 | while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) { | ||
1225 | |||
1226 | TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links); | ||
1227 | dev->flags &= ~AHC_DEV_ON_RUN_LIST; | ||
1228 | ahc_linux_check_device_queue(ahc, dev); | ||
1229 | /* Yeild to our interrupt handler */ | ||
1230 | ahc_unlock(ahc, &flags); | ||
1231 | ahc_lock(ahc, &flags); | ||
1232 | } | ||
1233 | ahc_unlock(ahc, &flags); | ||
1234 | } | ||
1235 | |||
1236 | /******************************** Macros **************************************/ | 881 | /******************************** Macros **************************************/ |
1237 | #define BUILD_SCSIID(ahc, cmd) \ | 882 | #define BUILD_SCSIID(ahc, cmd) \ |
1238 | ((((cmd)->device->id << TID_SHIFT) & TID) \ | 883 | ((((cmd)->device->id << TID_SHIFT) & TID) \ |
@@ -1278,37 +923,11 @@ int | |||
1278 | ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, | 923 | ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, |
1279 | int flags, bus_dmamap_t *mapp) | 924 | int flags, bus_dmamap_t *mapp) |
1280 | { | 925 | { |
1281 | bus_dmamap_t map; | ||
1282 | |||
1283 | map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT); | ||
1284 | if (map == NULL) | ||
1285 | return (ENOMEM); | ||
1286 | /* | ||
1287 | * Although we can dma data above 4GB, our | ||
1288 | * "consistent" memory is below 4GB for | ||
1289 | * space efficiency reasons (only need a 4byte | ||
1290 | * address). For this reason, we have to reset | ||
1291 | * our dma mask when doing allocations. | ||
1292 | */ | ||
1293 | if (ahc->dev_softc != NULL) | ||
1294 | if (pci_set_dma_mask(ahc->dev_softc, 0xFFFFFFFF)) { | ||
1295 | printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); | ||
1296 | kfree(map); | ||
1297 | return (ENODEV); | ||
1298 | } | ||
1299 | *vaddr = pci_alloc_consistent(ahc->dev_softc, | 926 | *vaddr = pci_alloc_consistent(ahc->dev_softc, |
1300 | dmat->maxsize, &map->bus_addr); | 927 | dmat->maxsize, mapp); |
1301 | if (ahc->dev_softc != NULL) | ||
1302 | if (pci_set_dma_mask(ahc->dev_softc, | ||
1303 | ahc->platform_data->hw_dma_mask)) { | ||
1304 | printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); | ||
1305 | kfree(map); | ||
1306 | return (ENODEV); | ||
1307 | } | ||
1308 | if (*vaddr == NULL) | 928 | if (*vaddr == NULL) |
1309 | return (ENOMEM); | 929 | return ENOMEM; |
1310 | *mapp = map; | 930 | return 0; |
1311 | return(0); | ||
1312 | } | 931 | } |
1313 | 932 | ||
1314 | void | 933 | void |
@@ -1316,7 +935,7 @@ ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, | |||
1316 | void* vaddr, bus_dmamap_t map) | 935 | void* vaddr, bus_dmamap_t map) |
1317 | { | 936 | { |
1318 | pci_free_consistent(ahc->dev_softc, dmat->maxsize, | 937 | pci_free_consistent(ahc->dev_softc, dmat->maxsize, |
1319 | vaddr, map->bus_addr); | 938 | vaddr, map); |
1320 | } | 939 | } |
1321 | 940 | ||
1322 | int | 941 | int |
@@ -1330,7 +949,7 @@ ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map, | |||
1330 | */ | 949 | */ |
1331 | bus_dma_segment_t stack_sg; | 950 | bus_dma_segment_t stack_sg; |
1332 | 951 | ||
1333 | stack_sg.ds_addr = map->bus_addr; | 952 | stack_sg.ds_addr = map; |
1334 | stack_sg.ds_len = dmat->maxsize; | 953 | stack_sg.ds_len = dmat->maxsize; |
1335 | cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); | 954 | cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); |
1336 | return (0); | 955 | return (0); |
@@ -1339,12 +958,6 @@ ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map, | |||
1339 | void | 958 | void |
1340 | ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) | 959 | ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) |
1341 | { | 960 | { |
1342 | /* | ||
1343 | * The map may is NULL in our < 2.3.X implementation. | ||
1344 | * Now it's 2.6.5, but just in case... | ||
1345 | */ | ||
1346 | BUG_ON(map == NULL); | ||
1347 | free(map, M_DEVBUF); | ||
1348 | } | 961 | } |
1349 | 962 | ||
1350 | int | 963 | int |
@@ -1550,7 +1163,7 @@ __setup("aic7xxx=", aic7xxx_setup); | |||
1550 | uint32_t aic7xxx_verbose; | 1163 | uint32_t aic7xxx_verbose; |
1551 | 1164 | ||
1552 | int | 1165 | int |
1553 | ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template) | 1166 | ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template) |
1554 | { | 1167 | { |
1555 | char buf[80]; | 1168 | char buf[80]; |
1556 | struct Scsi_Host *host; | 1169 | struct Scsi_Host *host; |
@@ -1564,11 +1177,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template) | |||
1564 | 1177 | ||
1565 | *((struct ahc_softc **)host->hostdata) = ahc; | 1178 | *((struct ahc_softc **)host->hostdata) = ahc; |
1566 | ahc_lock(ahc, &s); | 1179 | ahc_lock(ahc, &s); |
1567 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
1568 | scsi_assign_lock(host, &ahc->platform_data->spin_lock); | 1180 | scsi_assign_lock(host, &ahc->platform_data->spin_lock); |
1569 | #elif AHC_SCSI_HAS_HOST_LOCK != 0 | ||
1570 | host->lock = &ahc->platform_data->spin_lock; | ||
1571 | #endif | ||
1572 | ahc->platform_data->host = host; | 1181 | ahc->platform_data->host = host; |
1573 | host->can_queue = AHC_MAX_QUEUE; | 1182 | host->can_queue = AHC_MAX_QUEUE; |
1574 | host->cmd_per_lun = 2; | 1183 | host->cmd_per_lun = 2; |
@@ -1587,19 +1196,14 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template) | |||
1587 | ahc_set_name(ahc, new_name); | 1196 | ahc_set_name(ahc, new_name); |
1588 | } | 1197 | } |
1589 | host->unique_id = ahc->unit; | 1198 | host->unique_id = ahc->unit; |
1590 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
1591 | scsi_set_pci_device(host, ahc->dev_softc); | ||
1592 | #endif | ||
1593 | ahc_linux_initialize_scsi_bus(ahc); | 1199 | ahc_linux_initialize_scsi_bus(ahc); |
1594 | ahc_intr_enable(ahc, TRUE); | 1200 | ahc_intr_enable(ahc, TRUE); |
1595 | ahc_unlock(ahc, &s); | 1201 | ahc_unlock(ahc, &s); |
1596 | 1202 | ||
1597 | host->transportt = ahc_linux_transport_template; | 1203 | host->transportt = ahc_linux_transport_template; |
1598 | 1204 | ||
1599 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
1600 | scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */ | 1205 | scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */ |
1601 | scsi_scan_host(host); | 1206 | scsi_scan_host(host); |
1602 | #endif | ||
1603 | return (0); | 1207 | return (0); |
1604 | } | 1208 | } |
1605 | 1209 | ||
@@ -1717,19 +1321,9 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) | |||
1717 | if (ahc->platform_data == NULL) | 1321 | if (ahc->platform_data == NULL) |
1718 | return (ENOMEM); | 1322 | return (ENOMEM); |
1719 | memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data)); | 1323 | memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data)); |
1720 | TAILQ_INIT(&ahc->platform_data->completeq); | ||
1721 | TAILQ_INIT(&ahc->platform_data->device_runq); | ||
1722 | ahc->platform_data->irq = AHC_LINUX_NOIRQ; | 1324 | ahc->platform_data->irq = AHC_LINUX_NOIRQ; |
1723 | ahc->platform_data->hw_dma_mask = 0xFFFFFFFF; | ||
1724 | ahc_lockinit(ahc); | 1325 | ahc_lockinit(ahc); |
1725 | ahc_done_lockinit(ahc); | ||
1726 | init_timer(&ahc->platform_data->completeq_timer); | ||
1727 | ahc->platform_data->completeq_timer.data = (u_long)ahc; | ||
1728 | ahc->platform_data->completeq_timer.function = | ||
1729 | (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue; | ||
1730 | init_MUTEX_LOCKED(&ahc->platform_data->eh_sem); | 1326 | init_MUTEX_LOCKED(&ahc->platform_data->eh_sem); |
1731 | tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet, | ||
1732 | (unsigned long)ahc); | ||
1733 | ahc->seltime = (aic7xxx_seltime & 0x3) << 4; | 1327 | ahc->seltime = (aic7xxx_seltime & 0x3) << 4; |
1734 | ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; | 1328 | ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; |
1735 | if (aic7xxx_pci_parity == 0) | 1329 | if (aic7xxx_pci_parity == 0) |
@@ -1746,12 +1340,8 @@ ahc_platform_free(struct ahc_softc *ahc) | |||
1746 | int i, j; | 1340 | int i, j; |
1747 | 1341 | ||
1748 | if (ahc->platform_data != NULL) { | 1342 | if (ahc->platform_data != NULL) { |
1749 | del_timer_sync(&ahc->platform_data->completeq_timer); | ||
1750 | tasklet_kill(&ahc->platform_data->runq_tasklet); | ||
1751 | if (ahc->platform_data->host != NULL) { | 1343 | if (ahc->platform_data->host != NULL) { |
1752 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
1753 | scsi_remove_host(ahc->platform_data->host); | 1344 | scsi_remove_host(ahc->platform_data->host); |
1754 | #endif | ||
1755 | scsi_host_put(ahc->platform_data->host); | 1345 | scsi_host_put(ahc->platform_data->host); |
1756 | } | 1346 | } |
1757 | 1347 | ||
@@ -1787,16 +1377,7 @@ ahc_platform_free(struct ahc_softc *ahc) | |||
1787 | release_mem_region(ahc->platform_data->mem_busaddr, | 1377 | release_mem_region(ahc->platform_data->mem_busaddr, |
1788 | 0x1000); | 1378 | 0x1000); |
1789 | } | 1379 | } |
1790 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | 1380 | |
1791 | /* | ||
1792 | * In 2.4 we detach from the scsi midlayer before the PCI | ||
1793 | * layer invokes our remove callback. No per-instance | ||
1794 | * detach is provided, so we must reach inside the PCI | ||
1795 | * subsystem's internals and detach our driver manually. | ||
1796 | */ | ||
1797 | if (ahc->dev_softc != NULL) | ||
1798 | ahc->dev_softc->driver = NULL; | ||
1799 | #endif | ||
1800 | free(ahc->platform_data, M_DEVBUF); | 1381 | free(ahc->platform_data, M_DEVBUF); |
1801 | } | 1382 | } |
1802 | } | 1383 | } |
@@ -1820,7 +1401,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, | |||
1820 | 1401 | ||
1821 | dev = ahc_linux_get_device(ahc, devinfo->channel - 'A', | 1402 | dev = ahc_linux_get_device(ahc, devinfo->channel - 'A', |
1822 | devinfo->target, | 1403 | devinfo->target, |
1823 | devinfo->lun, /*alloc*/FALSE); | 1404 | devinfo->lun); |
1824 | if (dev == NULL) | 1405 | if (dev == NULL) |
1825 | return; | 1406 | return; |
1826 | was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); | 1407 | was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); |
@@ -1873,7 +1454,6 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, | |||
1873 | dev->maxtags = 0; | 1454 | dev->maxtags = 0; |
1874 | dev->openings = 1 - dev->active; | 1455 | dev->openings = 1 - dev->active; |
1875 | } | 1456 | } |
1876 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
1877 | if (dev->scsi_device != NULL) { | 1457 | if (dev->scsi_device != NULL) { |
1878 | switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { | 1458 | switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { |
1879 | case AHC_DEV_Q_BASIC: | 1459 | case AHC_DEV_Q_BASIC: |
@@ -1899,90 +1479,13 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, | |||
1899 | break; | 1479 | break; |
1900 | } | 1480 | } |
1901 | } | 1481 | } |
1902 | #endif | ||
1903 | } | 1482 | } |
1904 | 1483 | ||
1905 | int | 1484 | int |
1906 | ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, | 1485 | ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, |
1907 | int lun, u_int tag, role_t role, uint32_t status) | 1486 | int lun, u_int tag, role_t role, uint32_t status) |
1908 | { | 1487 | { |
1909 | int chan; | 1488 | return 0; |
1910 | int maxchan; | ||
1911 | int targ; | ||
1912 | int maxtarg; | ||
1913 | int clun; | ||
1914 | int maxlun; | ||
1915 | int count; | ||
1916 | |||
1917 | if (tag != SCB_LIST_NULL) | ||
1918 | return (0); | ||
1919 | |||
1920 | chan = 0; | ||
1921 | if (channel != ALL_CHANNELS) { | ||
1922 | chan = channel - 'A'; | ||
1923 | maxchan = chan + 1; | ||
1924 | } else { | ||
1925 | maxchan = (ahc->features & AHC_TWIN) ? 2 : 1; | ||
1926 | } | ||
1927 | targ = 0; | ||
1928 | if (target != CAM_TARGET_WILDCARD) { | ||
1929 | targ = target; | ||
1930 | maxtarg = targ + 1; | ||
1931 | } else { | ||
1932 | maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8; | ||
1933 | } | ||
1934 | clun = 0; | ||
1935 | if (lun != CAM_LUN_WILDCARD) { | ||
1936 | clun = lun; | ||
1937 | maxlun = clun + 1; | ||
1938 | } else { | ||
1939 | maxlun = AHC_NUM_LUNS; | ||
1940 | } | ||
1941 | |||
1942 | count = 0; | ||
1943 | for (; chan < maxchan; chan++) { | ||
1944 | |||
1945 | for (; targ < maxtarg; targ++) { | ||
1946 | |||
1947 | for (; clun < maxlun; clun++) { | ||
1948 | struct ahc_linux_device *dev; | ||
1949 | struct ahc_busyq *busyq; | ||
1950 | struct ahc_cmd *acmd; | ||
1951 | |||
1952 | dev = ahc_linux_get_device(ahc, chan, | ||
1953 | targ, clun, | ||
1954 | /*alloc*/FALSE); | ||
1955 | if (dev == NULL) | ||
1956 | continue; | ||
1957 | |||
1958 | busyq = &dev->busyq; | ||
1959 | while ((acmd = TAILQ_FIRST(busyq)) != NULL) { | ||
1960 | Scsi_Cmnd *cmd; | ||
1961 | |||
1962 | cmd = &acmd_scsi_cmd(acmd); | ||
1963 | TAILQ_REMOVE(busyq, acmd, | ||
1964 | acmd_links.tqe); | ||
1965 | count++; | ||
1966 | cmd->result = status << 16; | ||
1967 | ahc_linux_queue_cmd_complete(ahc, cmd); | ||
1968 | } | ||
1969 | } | ||
1970 | } | ||
1971 | } | ||
1972 | |||
1973 | return (count); | ||
1974 | } | ||
1975 | |||
1976 | static void | ||
1977 | ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc) | ||
1978 | { | ||
1979 | u_long flags; | ||
1980 | |||
1981 | ahc_lock(ahc, &flags); | ||
1982 | del_timer(&ahc->platform_data->completeq_timer); | ||
1983 | ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER; | ||
1984 | ahc_linux_run_complete_queue(ahc); | ||
1985 | ahc_unlock(ahc, &flags); | ||
1986 | } | 1489 | } |
1987 | 1490 | ||
1988 | static u_int | 1491 | static u_int |
@@ -2045,213 +1548,200 @@ ahc_linux_device_queue_depth(struct ahc_softc *ahc, | |||
2045 | } | 1548 | } |
2046 | } | 1549 | } |
2047 | 1550 | ||
2048 | static void | 1551 | static int |
2049 | ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev) | 1552 | ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, |
1553 | struct scsi_cmnd *cmd) | ||
2050 | { | 1554 | { |
2051 | struct ahc_cmd *acmd; | ||
2052 | struct scsi_cmnd *cmd; | ||
2053 | struct scb *scb; | 1555 | struct scb *scb; |
2054 | struct hardware_scb *hscb; | 1556 | struct hardware_scb *hscb; |
2055 | struct ahc_initiator_tinfo *tinfo; | 1557 | struct ahc_initiator_tinfo *tinfo; |
2056 | struct ahc_tmode_tstate *tstate; | 1558 | struct ahc_tmode_tstate *tstate; |
2057 | uint16_t mask; | 1559 | uint16_t mask; |
1560 | struct scb_tailq *untagged_q = NULL; | ||
2058 | 1561 | ||
2059 | if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0) | 1562 | /* |
2060 | panic("running device on run list"); | 1563 | * Schedule us to run later. The only reason we are not |
1564 | * running is because the whole controller Q is frozen. | ||
1565 | */ | ||
1566 | if (ahc->platform_data->qfrozen != 0) | ||
1567 | return SCSI_MLQUEUE_HOST_BUSY; | ||
2061 | 1568 | ||
2062 | while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL | 1569 | /* |
2063 | && dev->openings > 0 && dev->qfrozen == 0) { | 1570 | * We only allow one untagged transaction |
1571 | * per target in the initiator role unless | ||
1572 | * we are storing a full busy target *lun* | ||
1573 | * table in SCB space. | ||
1574 | */ | ||
1575 | if (!blk_rq_tagged(cmd->request) | ||
1576 | && (ahc->features & AHC_SCB_BTT) == 0) { | ||
1577 | int target_offset; | ||
2064 | 1578 | ||
2065 | /* | 1579 | target_offset = cmd->device->id + cmd->device->channel * 8; |
2066 | * Schedule us to run later. The only reason we are not | 1580 | untagged_q = &(ahc->untagged_queues[target_offset]); |
2067 | * running is because the whole controller Q is frozen. | 1581 | if (!TAILQ_EMPTY(untagged_q)) |
2068 | */ | 1582 | /* if we're already executing an untagged command |
2069 | if (ahc->platform_data->qfrozen != 0) { | 1583 | * we're busy to another */ |
2070 | TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, | 1584 | return SCSI_MLQUEUE_DEVICE_BUSY; |
2071 | dev, links); | 1585 | } |
2072 | dev->flags |= AHC_DEV_ON_RUN_LIST; | ||
2073 | return; | ||
2074 | } | ||
2075 | /* | ||
2076 | * Get an scb to use. | ||
2077 | */ | ||
2078 | if ((scb = ahc_get_scb(ahc)) == NULL) { | ||
2079 | TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, | ||
2080 | dev, links); | ||
2081 | dev->flags |= AHC_DEV_ON_RUN_LIST; | ||
2082 | ahc->flags |= AHC_RESOURCE_SHORTAGE; | ||
2083 | return; | ||
2084 | } | ||
2085 | TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe); | ||
2086 | cmd = &acmd_scsi_cmd(acmd); | ||
2087 | scb->io_ctx = cmd; | ||
2088 | scb->platform_data->dev = dev; | ||
2089 | hscb = scb->hscb; | ||
2090 | cmd->host_scribble = (char *)scb; | ||
2091 | 1586 | ||
2092 | /* | 1587 | /* |
2093 | * Fill out basics of the HSCB. | 1588 | * Get an scb to use. |
2094 | */ | 1589 | */ |
2095 | hscb->control = 0; | 1590 | if ((scb = ahc_get_scb(ahc)) == NULL) { |
2096 | hscb->scsiid = BUILD_SCSIID(ahc, cmd); | 1591 | ahc->flags |= AHC_RESOURCE_SHORTAGE; |
2097 | hscb->lun = cmd->device->lun; | 1592 | return SCSI_MLQUEUE_HOST_BUSY; |
2098 | mask = SCB_GET_TARGET_MASK(ahc, scb); | 1593 | } |
2099 | tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), | ||
2100 | SCB_GET_OUR_ID(scb), | ||
2101 | SCB_GET_TARGET(ahc, scb), &tstate); | ||
2102 | hscb->scsirate = tinfo->scsirate; | ||
2103 | hscb->scsioffset = tinfo->curr.offset; | ||
2104 | if ((tstate->ultraenb & mask) != 0) | ||
2105 | hscb->control |= ULTRAENB; | ||
2106 | |||
2107 | if ((ahc->user_discenable & mask) != 0) | ||
2108 | hscb->control |= DISCENB; | ||
2109 | |||
2110 | if ((tstate->auto_negotiate & mask) != 0) { | ||
2111 | scb->flags |= SCB_AUTO_NEGOTIATE; | ||
2112 | scb->hscb->control |= MK_MESSAGE; | ||
2113 | } | ||
2114 | 1594 | ||
2115 | if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { | 1595 | scb->io_ctx = cmd; |
2116 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | 1596 | scb->platform_data->dev = dev; |
2117 | int msg_bytes; | 1597 | hscb = scb->hscb; |
2118 | uint8_t tag_msgs[2]; | 1598 | cmd->host_scribble = (char *)scb; |
2119 | 1599 | ||
2120 | msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); | 1600 | /* |
2121 | if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { | 1601 | * Fill out basics of the HSCB. |
2122 | hscb->control |= tag_msgs[0]; | 1602 | */ |
2123 | if (tag_msgs[0] == MSG_ORDERED_TASK) | 1603 | hscb->control = 0; |
2124 | dev->commands_since_idle_or_otag = 0; | 1604 | hscb->scsiid = BUILD_SCSIID(ahc, cmd); |
2125 | } else | 1605 | hscb->lun = cmd->device->lun; |
2126 | #endif | 1606 | mask = SCB_GET_TARGET_MASK(ahc, scb); |
2127 | if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH | 1607 | tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), |
2128 | && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { | 1608 | SCB_GET_OUR_ID(scb), |
2129 | hscb->control |= MSG_ORDERED_TASK; | 1609 | SCB_GET_TARGET(ahc, scb), &tstate); |
1610 | hscb->scsirate = tinfo->scsirate; | ||
1611 | hscb->scsioffset = tinfo->curr.offset; | ||
1612 | if ((tstate->ultraenb & mask) != 0) | ||
1613 | hscb->control |= ULTRAENB; | ||
1614 | |||
1615 | if ((ahc->user_discenable & mask) != 0) | ||
1616 | hscb->control |= DISCENB; | ||
1617 | |||
1618 | if ((tstate->auto_negotiate & mask) != 0) { | ||
1619 | scb->flags |= SCB_AUTO_NEGOTIATE; | ||
1620 | scb->hscb->control |= MK_MESSAGE; | ||
1621 | } | ||
1622 | |||
1623 | if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { | ||
1624 | int msg_bytes; | ||
1625 | uint8_t tag_msgs[2]; | ||
1626 | |||
1627 | msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); | ||
1628 | if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { | ||
1629 | hscb->control |= tag_msgs[0]; | ||
1630 | if (tag_msgs[0] == MSG_ORDERED_TASK) | ||
2130 | dev->commands_since_idle_or_otag = 0; | 1631 | dev->commands_since_idle_or_otag = 0; |
2131 | } else { | 1632 | } else if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH |
2132 | hscb->control |= MSG_SIMPLE_TASK; | 1633 | && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { |
2133 | } | 1634 | hscb->control |= MSG_ORDERED_TASK; |
2134 | } | 1635 | dev->commands_since_idle_or_otag = 0; |
2135 | |||
2136 | hscb->cdb_len = cmd->cmd_len; | ||
2137 | if (hscb->cdb_len <= 12) { | ||
2138 | memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); | ||
2139 | } else { | 1636 | } else { |
2140 | memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); | 1637 | hscb->control |= MSG_SIMPLE_TASK; |
2141 | scb->flags |= SCB_CDB32_PTR; | ||
2142 | } | 1638 | } |
1639 | } | ||
2143 | 1640 | ||
2144 | scb->platform_data->xfer_len = 0; | 1641 | hscb->cdb_len = cmd->cmd_len; |
2145 | ahc_set_residual(scb, 0); | 1642 | if (hscb->cdb_len <= 12) { |
2146 | ahc_set_sense_residual(scb, 0); | 1643 | memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); |
2147 | scb->sg_count = 0; | 1644 | } else { |
2148 | if (cmd->use_sg != 0) { | 1645 | memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); |
2149 | struct ahc_dma_seg *sg; | 1646 | scb->flags |= SCB_CDB32_PTR; |
2150 | struct scatterlist *cur_seg; | 1647 | } |
2151 | struct scatterlist *end_seg; | ||
2152 | int nseg; | ||
2153 | |||
2154 | cur_seg = (struct scatterlist *)cmd->request_buffer; | ||
2155 | nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, | ||
2156 | cmd->sc_data_direction); | ||
2157 | end_seg = cur_seg + nseg; | ||
2158 | /* Copy the segments into the SG list. */ | ||
2159 | sg = scb->sg_list; | ||
2160 | /* | ||
2161 | * The sg_count may be larger than nseg if | ||
2162 | * a transfer crosses a 32bit page. | ||
2163 | */ | ||
2164 | while (cur_seg < end_seg) { | ||
2165 | dma_addr_t addr; | ||
2166 | bus_size_t len; | ||
2167 | int consumed; | ||
2168 | |||
2169 | addr = sg_dma_address(cur_seg); | ||
2170 | len = sg_dma_len(cur_seg); | ||
2171 | consumed = ahc_linux_map_seg(ahc, scb, | ||
2172 | sg, addr, len); | ||
2173 | sg += consumed; | ||
2174 | scb->sg_count += consumed; | ||
2175 | cur_seg++; | ||
2176 | } | ||
2177 | sg--; | ||
2178 | sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); | ||
2179 | |||
2180 | /* | ||
2181 | * Reset the sg list pointer. | ||
2182 | */ | ||
2183 | scb->hscb->sgptr = | ||
2184 | ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); | ||
2185 | 1648 | ||
2186 | /* | 1649 | scb->platform_data->xfer_len = 0; |
2187 | * Copy the first SG into the "current" | 1650 | ahc_set_residual(scb, 0); |
2188 | * data pointer area. | 1651 | ahc_set_sense_residual(scb, 0); |
2189 | */ | 1652 | scb->sg_count = 0; |
2190 | scb->hscb->dataptr = scb->sg_list->addr; | 1653 | if (cmd->use_sg != 0) { |
2191 | scb->hscb->datacnt = scb->sg_list->len; | 1654 | struct ahc_dma_seg *sg; |
2192 | } else if (cmd->request_bufflen != 0) { | 1655 | struct scatterlist *cur_seg; |
2193 | struct ahc_dma_seg *sg; | 1656 | struct scatterlist *end_seg; |
1657 | int nseg; | ||
1658 | |||
1659 | cur_seg = (struct scatterlist *)cmd->request_buffer; | ||
1660 | nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, | ||
1661 | cmd->sc_data_direction); | ||
1662 | end_seg = cur_seg + nseg; | ||
1663 | /* Copy the segments into the SG list. */ | ||
1664 | sg = scb->sg_list; | ||
1665 | /* | ||
1666 | * The sg_count may be larger than nseg if | ||
1667 | * a transfer crosses a 32bit page. | ||
1668 | */ | ||
1669 | while (cur_seg < end_seg) { | ||
2194 | dma_addr_t addr; | 1670 | dma_addr_t addr; |
2195 | 1671 | bus_size_t len; | |
2196 | sg = scb->sg_list; | 1672 | int consumed; |
2197 | addr = pci_map_single(ahc->dev_softc, | 1673 | |
2198 | cmd->request_buffer, | 1674 | addr = sg_dma_address(cur_seg); |
2199 | cmd->request_bufflen, | 1675 | len = sg_dma_len(cur_seg); |
2200 | cmd->sc_data_direction); | 1676 | consumed = ahc_linux_map_seg(ahc, scb, |
2201 | scb->platform_data->buf_busaddr = addr; | 1677 | sg, addr, len); |
2202 | scb->sg_count = ahc_linux_map_seg(ahc, scb, | 1678 | sg += consumed; |
2203 | sg, addr, | 1679 | scb->sg_count += consumed; |
2204 | cmd->request_bufflen); | 1680 | cur_seg++; |
2205 | sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); | ||
2206 | |||
2207 | /* | ||
2208 | * Reset the sg list pointer. | ||
2209 | */ | ||
2210 | scb->hscb->sgptr = | ||
2211 | ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); | ||
2212 | |||
2213 | /* | ||
2214 | * Copy the first SG into the "current" | ||
2215 | * data pointer area. | ||
2216 | */ | ||
2217 | scb->hscb->dataptr = sg->addr; | ||
2218 | scb->hscb->datacnt = sg->len; | ||
2219 | } else { | ||
2220 | scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); | ||
2221 | scb->hscb->dataptr = 0; | ||
2222 | scb->hscb->datacnt = 0; | ||
2223 | scb->sg_count = 0; | ||
2224 | } | 1681 | } |
1682 | sg--; | ||
1683 | sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); | ||
2225 | 1684 | ||
2226 | ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE); | 1685 | /* |
2227 | LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); | 1686 | * Reset the sg list pointer. |
2228 | dev->openings--; | 1687 | */ |
2229 | dev->active++; | 1688 | scb->hscb->sgptr = |
2230 | dev->commands_issued++; | 1689 | ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); |
2231 | if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) | 1690 | |
2232 | dev->commands_since_idle_or_otag++; | 1691 | /* |
1692 | * Copy the first SG into the "current" | ||
1693 | * data pointer area. | ||
1694 | */ | ||
1695 | scb->hscb->dataptr = scb->sg_list->addr; | ||
1696 | scb->hscb->datacnt = scb->sg_list->len; | ||
1697 | } else if (cmd->request_bufflen != 0) { | ||
1698 | struct ahc_dma_seg *sg; | ||
1699 | dma_addr_t addr; | ||
1700 | |||
1701 | sg = scb->sg_list; | ||
1702 | addr = pci_map_single(ahc->dev_softc, | ||
1703 | cmd->request_buffer, | ||
1704 | cmd->request_bufflen, | ||
1705 | cmd->sc_data_direction); | ||
1706 | scb->platform_data->buf_busaddr = addr; | ||
1707 | scb->sg_count = ahc_linux_map_seg(ahc, scb, | ||
1708 | sg, addr, | ||
1709 | cmd->request_bufflen); | ||
1710 | sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); | ||
2233 | 1711 | ||
2234 | /* | 1712 | /* |
2235 | * We only allow one untagged transaction | 1713 | * Reset the sg list pointer. |
2236 | * per target in the initiator role unless | ||
2237 | * we are storing a full busy target *lun* | ||
2238 | * table in SCB space. | ||
2239 | */ | 1714 | */ |
2240 | if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 | 1715 | scb->hscb->sgptr = |
2241 | && (ahc->features & AHC_SCB_BTT) == 0) { | 1716 | ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); |
2242 | struct scb_tailq *untagged_q; | 1717 | |
2243 | int target_offset; | 1718 | /* |
2244 | 1719 | * Copy the first SG into the "current" | |
2245 | target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); | 1720 | * data pointer area. |
2246 | untagged_q = &(ahc->untagged_queues[target_offset]); | 1721 | */ |
2247 | TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); | 1722 | scb->hscb->dataptr = sg->addr; |
2248 | scb->flags |= SCB_UNTAGGEDQ; | 1723 | scb->hscb->datacnt = sg->len; |
2249 | if (TAILQ_FIRST(untagged_q) != scb) | 1724 | } else { |
2250 | continue; | 1725 | scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); |
2251 | } | 1726 | scb->hscb->dataptr = 0; |
2252 | scb->flags |= SCB_ACTIVE; | 1727 | scb->hscb->datacnt = 0; |
2253 | ahc_queue_scb(ahc, scb); | 1728 | scb->sg_count = 0; |
2254 | } | 1729 | } |
1730 | |||
1731 | LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); | ||
1732 | dev->openings--; | ||
1733 | dev->active++; | ||
1734 | dev->commands_issued++; | ||
1735 | if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) | ||
1736 | dev->commands_since_idle_or_otag++; | ||
1737 | |||
1738 | scb->flags |= SCB_ACTIVE; | ||
1739 | if (untagged_q) { | ||
1740 | TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); | ||
1741 | scb->flags |= SCB_UNTAGGEDQ; | ||
1742 | } | ||
1743 | ahc_queue_scb(ahc, scb); | ||
1744 | return 0; | ||
2255 | } | 1745 | } |
2256 | 1746 | ||
2257 | /* | 1747 | /* |
@@ -2267,9 +1757,6 @@ ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs) | |||
2267 | ahc = (struct ahc_softc *) dev_id; | 1757 | ahc = (struct ahc_softc *) dev_id; |
2268 | ahc_lock(ahc, &flags); | 1758 | ahc_lock(ahc, &flags); |
2269 | ours = ahc_intr(ahc); | 1759 | ours = ahc_intr(ahc); |
2270 | if (ahc_linux_next_device_to_run(ahc) != NULL) | ||
2271 | ahc_schedule_runq(ahc); | ||
2272 | ahc_linux_run_complete_queue(ahc); | ||
2273 | ahc_unlock(ahc, &flags); | 1760 | ahc_unlock(ahc, &flags); |
2274 | return IRQ_RETVAL(ours); | 1761 | return IRQ_RETVAL(ours); |
2275 | } | 1762 | } |
@@ -2278,8 +1765,6 @@ void | |||
2278 | ahc_platform_flushwork(struct ahc_softc *ahc) | 1765 | ahc_platform_flushwork(struct ahc_softc *ahc) |
2279 | { | 1766 | { |
2280 | 1767 | ||
2281 | while (ahc_linux_run_complete_queue(ahc) != NULL) | ||
2282 | ; | ||
2283 | } | 1768 | } |
2284 | 1769 | ||
2285 | static struct ahc_linux_target* | 1770 | static struct ahc_linux_target* |
@@ -2348,9 +1833,6 @@ ahc_linux_alloc_device(struct ahc_softc *ahc, | |||
2348 | if (dev == NULL) | 1833 | if (dev == NULL) |
2349 | return (NULL); | 1834 | return (NULL); |
2350 | memset(dev, 0, sizeof(*dev)); | 1835 | memset(dev, 0, sizeof(*dev)); |
2351 | init_timer(&dev->timer); | ||
2352 | TAILQ_INIT(&dev->busyq); | ||
2353 | dev->flags = AHC_DEV_UNCONFIGURED; | ||
2354 | dev->lun = lun; | 1836 | dev->lun = lun; |
2355 | dev->target = targ; | 1837 | dev->target = targ; |
2356 | 1838 | ||
@@ -2373,7 +1855,7 @@ ahc_linux_alloc_device(struct ahc_softc *ahc, | |||
2373 | } | 1855 | } |
2374 | 1856 | ||
2375 | static void | 1857 | static void |
2376 | __ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev) | 1858 | ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev) |
2377 | { | 1859 | { |
2378 | struct ahc_linux_target *targ; | 1860 | struct ahc_linux_target *targ; |
2379 | 1861 | ||
@@ -2385,13 +1867,6 @@ __ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev) | |||
2385 | ahc_linux_free_target(ahc, targ); | 1867 | ahc_linux_free_target(ahc, targ); |
2386 | } | 1868 | } |
2387 | 1869 | ||
2388 | static void | ||
2389 | ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev) | ||
2390 | { | ||
2391 | del_timer_sync(&dev->timer); | ||
2392 | __ahc_linux_free_device(ahc, dev); | ||
2393 | } | ||
2394 | |||
2395 | void | 1870 | void |
2396 | ahc_send_async(struct ahc_softc *ahc, char channel, | 1871 | ahc_send_async(struct ahc_softc *ahc, char channel, |
2397 | u_int target, u_int lun, ac_code code, void *arg) | 1872 | u_int target, u_int lun, ac_code code, void *arg) |
@@ -2463,28 +1938,9 @@ ahc_send_async(struct ahc_softc *ahc, char channel, | |||
2463 | } | 1938 | } |
2464 | case AC_SENT_BDR: | 1939 | case AC_SENT_BDR: |
2465 | { | 1940 | { |
2466 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
2467 | WARN_ON(lun != CAM_LUN_WILDCARD); | 1941 | WARN_ON(lun != CAM_LUN_WILDCARD); |
2468 | scsi_report_device_reset(ahc->platform_data->host, | 1942 | scsi_report_device_reset(ahc->platform_data->host, |
2469 | channel - 'A', target); | 1943 | channel - 'A', target); |
2470 | #else | ||
2471 | Scsi_Device *scsi_dev; | ||
2472 | |||
2473 | /* | ||
2474 | * Find the SCSI device associated with this | ||
2475 | * request and indicate that a UA is expected. | ||
2476 | */ | ||
2477 | for (scsi_dev = ahc->platform_data->host->host_queue; | ||
2478 | scsi_dev != NULL; scsi_dev = scsi_dev->next) { | ||
2479 | if (channel - 'A' == scsi_dev->channel | ||
2480 | && target == scsi_dev->id | ||
2481 | && (lun == CAM_LUN_WILDCARD | ||
2482 | || lun == scsi_dev->lun)) { | ||
2483 | scsi_dev->was_reset = 1; | ||
2484 | scsi_dev->expecting_cc_ua = 1; | ||
2485 | } | ||
2486 | } | ||
2487 | #endif | ||
2488 | break; | 1944 | break; |
2489 | } | 1945 | } |
2490 | case AC_BUS_RESET: | 1946 | case AC_BUS_RESET: |
@@ -2504,7 +1960,7 @@ ahc_send_async(struct ahc_softc *ahc, char channel, | |||
2504 | void | 1960 | void |
2505 | ahc_done(struct ahc_softc *ahc, struct scb *scb) | 1961 | ahc_done(struct ahc_softc *ahc, struct scb *scb) |
2506 | { | 1962 | { |
2507 | Scsi_Cmnd *cmd; | 1963 | struct scsi_cmnd *cmd; |
2508 | struct ahc_linux_device *dev; | 1964 | struct ahc_linux_device *dev; |
2509 | 1965 | ||
2510 | LIST_REMOVE(scb, pending_links); | 1966 | LIST_REMOVE(scb, pending_links); |
@@ -2515,7 +1971,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb) | |||
2515 | target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); | 1971 | target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); |
2516 | untagged_q = &(ahc->untagged_queues[target_offset]); | 1972 | untagged_q = &(ahc->untagged_queues[target_offset]); |
2517 | TAILQ_REMOVE(untagged_q, scb, links.tqe); | 1973 | TAILQ_REMOVE(untagged_q, scb, links.tqe); |
2518 | ahc_run_untagged_queue(ahc, untagged_q); | 1974 | BUG_ON(!TAILQ_EMPTY(untagged_q)); |
2519 | } | 1975 | } |
2520 | 1976 | ||
2521 | if ((scb->flags & SCB_ACTIVE) == 0) { | 1977 | if ((scb->flags & SCB_ACTIVE) == 0) { |
@@ -2583,8 +2039,6 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb) | |||
2583 | } | 2039 | } |
2584 | } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { | 2040 | } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { |
2585 | ahc_linux_handle_scsi_status(ahc, dev, scb); | 2041 | ahc_linux_handle_scsi_status(ahc, dev, scb); |
2586 | } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) { | ||
2587 | dev->flags |= AHC_DEV_UNCONFIGURED; | ||
2588 | } | 2042 | } |
2589 | 2043 | ||
2590 | if (dev->openings == 1 | 2044 | if (dev->openings == 1 |
@@ -2606,16 +2060,6 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb) | |||
2606 | if (dev->active == 0) | 2060 | if (dev->active == 0) |
2607 | dev->commands_since_idle_or_otag = 0; | 2061 | dev->commands_since_idle_or_otag = 0; |
2608 | 2062 | ||
2609 | if (TAILQ_EMPTY(&dev->busyq)) { | ||
2610 | if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0 | ||
2611 | && dev->active == 0 | ||
2612 | && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0) | ||
2613 | ahc_linux_free_device(ahc, dev); | ||
2614 | } else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { | ||
2615 | TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); | ||
2616 | dev->flags |= AHC_DEV_ON_RUN_LIST; | ||
2617 | } | ||
2618 | |||
2619 | if ((scb->flags & SCB_RECOVERY_SCB) != 0) { | 2063 | if ((scb->flags & SCB_RECOVERY_SCB) != 0) { |
2620 | printf("Recovery SCB completes\n"); | 2064 | printf("Recovery SCB completes\n"); |
2621 | if (ahc_get_transaction_status(scb) == CAM_BDR_SENT | 2065 | if (ahc_get_transaction_status(scb) == CAM_BDR_SENT |
@@ -2659,7 +2103,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc, | |||
2659 | case SCSI_STATUS_CHECK_COND: | 2103 | case SCSI_STATUS_CHECK_COND: |
2660 | case SCSI_STATUS_CMD_TERMINATED: | 2104 | case SCSI_STATUS_CMD_TERMINATED: |
2661 | { | 2105 | { |
2662 | Scsi_Cmnd *cmd; | 2106 | struct scsi_cmnd *cmd; |
2663 | 2107 | ||
2664 | /* | 2108 | /* |
2665 | * Copy sense information to the OS's cmd | 2109 | * Copy sense information to the OS's cmd |
@@ -2754,52 +2198,15 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc, | |||
2754 | ahc_platform_set_tags(ahc, &devinfo, | 2198 | ahc_platform_set_tags(ahc, &devinfo, |
2755 | (dev->flags & AHC_DEV_Q_BASIC) | 2199 | (dev->flags & AHC_DEV_Q_BASIC) |
2756 | ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); | 2200 | ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); |
2757 | /* FALLTHROUGH */ | ||
2758 | } | ||
2759 | case SCSI_STATUS_BUSY: | ||
2760 | { | ||
2761 | /* | ||
2762 | * Set a short timer to defer sending commands for | ||
2763 | * a bit since Linux will not delay in this case. | ||
2764 | */ | ||
2765 | if ((dev->flags & AHC_DEV_TIMER_ACTIVE) != 0) { | ||
2766 | printf("%s:%c:%d: Device Timer still active during " | ||
2767 | "busy processing\n", ahc_name(ahc), | ||
2768 | dev->target->channel, dev->target->target); | ||
2769 | break; | ||
2770 | } | ||
2771 | dev->flags |= AHC_DEV_TIMER_ACTIVE; | ||
2772 | dev->qfrozen++; | ||
2773 | init_timer(&dev->timer); | ||
2774 | dev->timer.data = (u_long)dev; | ||
2775 | dev->timer.expires = jiffies + (HZ/2); | ||
2776 | dev->timer.function = ahc_linux_dev_timed_unfreeze; | ||
2777 | add_timer(&dev->timer); | ||
2778 | break; | 2201 | break; |
2779 | } | 2202 | } |
2780 | } | 2203 | } |
2781 | } | 2204 | } |
2782 | 2205 | ||
2783 | static void | 2206 | static void |
2784 | ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd) | 2207 | ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd) |
2785 | { | 2208 | { |
2786 | /* | 2209 | /* |
2787 | * Typically, the complete queue has very few entries | ||
2788 | * queued to it before the queue is emptied by | ||
2789 | * ahc_linux_run_complete_queue, so sorting the entries | ||
2790 | * by generation number should be inexpensive. | ||
2791 | * We perform the sort so that commands that complete | ||
2792 | * with an error are retuned in the order origionally | ||
2793 | * queued to the controller so that any subsequent retries | ||
2794 | * are performed in order. The underlying ahc routines do | ||
2795 | * not guarantee the order that aborted commands will be | ||
2796 | * returned to us. | ||
2797 | */ | ||
2798 | struct ahc_completeq *completeq; | ||
2799 | struct ahc_cmd *list_cmd; | ||
2800 | struct ahc_cmd *acmd; | ||
2801 | |||
2802 | /* | ||
2803 | * Map CAM error codes into Linux Error codes. We | 2210 | * Map CAM error codes into Linux Error codes. We |
2804 | * avoid the conversion so that the DV code has the | 2211 | * avoid the conversion so that the DV code has the |
2805 | * full error information available when making | 2212 | * full error information available when making |
@@ -2852,26 +2259,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd) | |||
2852 | new_status = DID_ERROR; | 2259 | new_status = DID_ERROR; |
2853 | break; | 2260 | break; |
2854 | case CAM_REQUEUE_REQ: | 2261 | case CAM_REQUEUE_REQ: |
2855 | /* | 2262 | new_status = DID_REQUEUE; |
2856 | * If we want the request requeued, make sure there | ||
2857 | * are sufficent retries. In the old scsi error code, | ||
2858 | * we used to be able to specify a result code that | ||
2859 | * bypassed the retry count. Now we must use this | ||
2860 | * hack. We also "fake" a check condition with | ||
2861 | * a sense code of ABORTED COMMAND. This seems to | ||
2862 | * evoke a retry even if this command is being sent | ||
2863 | * via the eh thread. Ick! Ick! Ick! | ||
2864 | */ | ||
2865 | if (cmd->retries > 0) | ||
2866 | cmd->retries--; | ||
2867 | new_status = DID_OK; | ||
2868 | ahc_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND); | ||
2869 | cmd->result |= (DRIVER_SENSE << 24); | ||
2870 | memset(cmd->sense_buffer, 0, | ||
2871 | sizeof(cmd->sense_buffer)); | ||
2872 | cmd->sense_buffer[0] = SSD_ERRCODE_VALID | ||
2873 | | SSD_CURRENT_ERROR; | ||
2874 | cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND; | ||
2875 | break; | 2263 | break; |
2876 | default: | 2264 | default: |
2877 | /* We should never get here */ | 2265 | /* We should never get here */ |
@@ -2882,17 +2270,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd) | |||
2882 | ahc_cmd_set_transaction_status(cmd, new_status); | 2270 | ahc_cmd_set_transaction_status(cmd, new_status); |
2883 | } | 2271 | } |
2884 | 2272 | ||
2885 | completeq = &ahc->platform_data->completeq; | 2273 | cmd->scsi_done(cmd); |
2886 | list_cmd = TAILQ_FIRST(completeq); | ||
2887 | acmd = (struct ahc_cmd *)cmd; | ||
2888 | while (list_cmd != NULL | ||
2889 | && acmd_scsi_cmd(list_cmd).serial_number | ||
2890 | < acmd_scsi_cmd(acmd).serial_number) | ||
2891 | list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe); | ||
2892 | if (list_cmd != NULL) | ||
2893 | TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe); | ||
2894 | else | ||
2895 | TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe); | ||
2896 | } | 2274 | } |
2897 | 2275 | ||
2898 | static void | 2276 | static void |
@@ -2940,7 +2318,6 @@ ahc_linux_release_simq(u_long arg) | |||
2940 | ahc->platform_data->qfrozen--; | 2318 | ahc->platform_data->qfrozen--; |
2941 | if (ahc->platform_data->qfrozen == 0) | 2319 | if (ahc->platform_data->qfrozen == 0) |
2942 | unblock_reqs = 1; | 2320 | unblock_reqs = 1; |
2943 | ahc_schedule_runq(ahc); | ||
2944 | ahc_unlock(ahc, &s); | 2321 | ahc_unlock(ahc, &s); |
2945 | /* | 2322 | /* |
2946 | * There is still a race here. The mid-layer | 2323 | * There is still a race here. The mid-layer |
@@ -2952,37 +2329,12 @@ ahc_linux_release_simq(u_long arg) | |||
2952 | scsi_unblock_requests(ahc->platform_data->host); | 2329 | scsi_unblock_requests(ahc->platform_data->host); |
2953 | } | 2330 | } |
2954 | 2331 | ||
2955 | static void | ||
2956 | ahc_linux_dev_timed_unfreeze(u_long arg) | ||
2957 | { | ||
2958 | struct ahc_linux_device *dev; | ||
2959 | struct ahc_softc *ahc; | ||
2960 | u_long s; | ||
2961 | |||
2962 | dev = (struct ahc_linux_device *)arg; | ||
2963 | ahc = dev->target->ahc; | ||
2964 | ahc_lock(ahc, &s); | ||
2965 | dev->flags &= ~AHC_DEV_TIMER_ACTIVE; | ||
2966 | if (dev->qfrozen > 0) | ||
2967 | dev->qfrozen--; | ||
2968 | if (dev->qfrozen == 0 | ||
2969 | && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0) | ||
2970 | ahc_linux_run_device_queue(ahc, dev); | ||
2971 | if (TAILQ_EMPTY(&dev->busyq) | ||
2972 | && dev->active == 0) | ||
2973 | __ahc_linux_free_device(ahc, dev); | ||
2974 | ahc_unlock(ahc, &s); | ||
2975 | } | ||
2976 | |||
2977 | static int | 2332 | static int |
2978 | ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) | 2333 | ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) |
2979 | { | 2334 | { |
2980 | struct ahc_softc *ahc; | 2335 | struct ahc_softc *ahc; |
2981 | struct ahc_cmd *acmd; | ||
2982 | struct ahc_cmd *list_acmd; | ||
2983 | struct ahc_linux_device *dev; | 2336 | struct ahc_linux_device *dev; |
2984 | struct scb *pending_scb; | 2337 | struct scb *pending_scb; |
2985 | u_long s; | ||
2986 | u_int saved_scbptr; | 2338 | u_int saved_scbptr; |
2987 | u_int active_scb_index; | 2339 | u_int active_scb_index; |
2988 | u_int last_phase; | 2340 | u_int last_phase; |
@@ -2998,7 +2350,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) | |||
2998 | paused = FALSE; | 2350 | paused = FALSE; |
2999 | wait = FALSE; | 2351 | wait = FALSE; |
3000 | ahc = *(struct ahc_softc **)cmd->device->host->hostdata; | 2352 | ahc = *(struct ahc_softc **)cmd->device->host->hostdata; |
3001 | acmd = (struct ahc_cmd *)cmd; | ||
3002 | 2353 | ||
3003 | printf("%s:%d:%d:%d: Attempting to queue a%s message\n", | 2354 | printf("%s:%d:%d:%d: Attempting to queue a%s message\n", |
3004 | ahc_name(ahc), cmd->device->channel, | 2355 | ahc_name(ahc), cmd->device->channel, |
@@ -3011,22 +2362,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) | |||
3011 | printf("\n"); | 2362 | printf("\n"); |
3012 | 2363 | ||
3013 | /* | 2364 | /* |
3014 | * In all versions of Linux, we have to work around | ||
3015 | * a major flaw in how the mid-layer is locked down | ||
3016 | * if we are to sleep successfully in our error handler | ||
3017 | * while allowing our interrupt handler to run. Since | ||
3018 | * the midlayer acquires either the io_request_lock or | ||
3019 | * our lock prior to calling us, we must use the | ||
3020 | * spin_unlock_irq() method for unlocking our lock. | ||
3021 | * This will force interrupts to be enabled on the | ||
3022 | * current CPU. Since the EH thread should not have | ||
3023 | * been running with CPU interrupts disabled other than | ||
3024 | * by acquiring either the io_request_lock or our own | ||
3025 | * lock, this *should* be safe. | ||
3026 | */ | ||
3027 | ahc_midlayer_entrypoint_lock(ahc, &s); | ||
3028 | |||
3029 | /* | ||
3030 | * First determine if we currently own this command. | 2365 | * First determine if we currently own this command. |
3031 | * Start by searching the device queue. If not found | 2366 | * Start by searching the device queue. If not found |
3032 | * there, check the pending_scb list. If not found | 2367 | * there, check the pending_scb list. If not found |
@@ -3034,7 +2369,7 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) | |||
3034 | * command, return success. | 2369 | * command, return success. |
3035 | */ | 2370 | */ |
3036 | dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, | 2371 | dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, |
3037 | cmd->device->lun, /*alloc*/FALSE); | 2372 | cmd->device->lun); |
3038 | 2373 | ||
3039 | if (dev == NULL) { | 2374 | if (dev == NULL) { |
3040 | /* | 2375 | /* |
@@ -3048,24 +2383,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) | |||
3048 | goto no_cmd; | 2383 | goto no_cmd; |
3049 | } | 2384 | } |
3050 | 2385 | ||
3051 | TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) { | ||
3052 | if (list_acmd == acmd) | ||
3053 | break; | ||
3054 | } | ||
3055 | |||
3056 | if (list_acmd != NULL) { | ||
3057 | printf("%s:%d:%d:%d: Command found on device queue\n", | ||
3058 | ahc_name(ahc), cmd->device->channel, cmd->device->id, | ||
3059 | cmd->device->lun); | ||
3060 | if (flag == SCB_ABORT) { | ||
3061 | TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe); | ||
3062 | cmd->result = DID_ABORT << 16; | ||
3063 | ahc_linux_queue_cmd_complete(ahc, cmd); | ||
3064 | retval = SUCCESS; | ||
3065 | goto done; | ||
3066 | } | ||
3067 | } | ||
3068 | |||
3069 | if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 | 2386 | if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 |
3070 | && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, | 2387 | && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, |
3071 | cmd->device->channel + 'A', | 2388 | cmd->device->channel + 'A', |
@@ -3299,53 +2616,42 @@ done: | |||
3299 | } | 2616 | } |
3300 | spin_lock_irq(&ahc->platform_data->spin_lock); | 2617 | spin_lock_irq(&ahc->platform_data->spin_lock); |
3301 | } | 2618 | } |
3302 | ahc_schedule_runq(ahc); | ||
3303 | ahc_linux_run_complete_queue(ahc); | ||
3304 | ahc_midlayer_entrypoint_unlock(ahc, &s); | ||
3305 | return (retval); | 2619 | return (retval); |
3306 | } | 2620 | } |
3307 | 2621 | ||
3308 | void | 2622 | void |
3309 | ahc_platform_dump_card_state(struct ahc_softc *ahc) | 2623 | ahc_platform_dump_card_state(struct ahc_softc *ahc) |
3310 | { | 2624 | { |
3311 | struct ahc_linux_device *dev; | 2625 | } |
3312 | int channel; | ||
3313 | int maxchannel; | ||
3314 | int target; | ||
3315 | int maxtarget; | ||
3316 | int lun; | ||
3317 | int i; | ||
3318 | |||
3319 | maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0; | ||
3320 | maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7; | ||
3321 | for (channel = 0; channel <= maxchannel; channel++) { | ||
3322 | 2626 | ||
3323 | for (target = 0; target <=maxtarget; target++) { | 2627 | static void ahc_linux_exit(void); |
3324 | 2628 | ||
3325 | for (lun = 0; lun < AHC_NUM_LUNS; lun++) { | 2629 | static void ahc_linux_get_width(struct scsi_target *starget) |
3326 | struct ahc_cmd *acmd; | 2630 | { |
2631 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | ||
2632 | struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); | ||
2633 | struct ahc_tmode_tstate *tstate; | ||
2634 | struct ahc_initiator_tinfo *tinfo | ||
2635 | = ahc_fetch_transinfo(ahc, | ||
2636 | starget->channel + 'A', | ||
2637 | shost->this_id, starget->id, &tstate); | ||
2638 | spi_width(starget) = tinfo->curr.width; | ||
2639 | } | ||
3327 | 2640 | ||
3328 | dev = ahc_linux_get_device(ahc, channel, target, | 2641 | static void ahc_linux_set_width(struct scsi_target *starget, int width) |
3329 | lun, /*alloc*/FALSE); | 2642 | { |
3330 | if (dev == NULL) | 2643 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
3331 | continue; | 2644 | struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); |
2645 | struct ahc_devinfo devinfo; | ||
2646 | unsigned long flags; | ||
3332 | 2647 | ||
3333 | printf("DevQ(%d:%d:%d): ", | 2648 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, |
3334 | channel, target, lun); | 2649 | starget->channel + 'A', ROLE_INITIATOR); |
3335 | i = 0; | 2650 | ahc_lock(ahc, &flags); |
3336 | TAILQ_FOREACH(acmd, &dev->busyq, | 2651 | ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE); |
3337 | acmd_links.tqe) { | 2652 | ahc_unlock(ahc, &flags); |
3338 | if (i++ > AHC_SCB_MAX) | ||
3339 | break; | ||
3340 | } | ||
3341 | printf("%d waiting\n", i); | ||
3342 | } | ||
3343 | } | ||
3344 | } | ||
3345 | } | 2653 | } |
3346 | 2654 | ||
3347 | static void ahc_linux_exit(void); | ||
3348 | |||
3349 | static void ahc_linux_get_period(struct scsi_target *starget) | 2655 | static void ahc_linux_get_period(struct scsi_target *starget) |
3350 | { | 2656 | { |
3351 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 2657 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
@@ -3376,8 +2682,21 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period) | |||
3376 | if (offset == 0) | 2682 | if (offset == 0) |
3377 | offset = MAX_OFFSET; | 2683 | offset = MAX_OFFSET; |
3378 | 2684 | ||
2685 | if (period < 9) | ||
2686 | period = 9; /* 12.5ns is our minimum */ | ||
2687 | if (period == 9) | ||
2688 | ppr_options |= MSG_EXT_PPR_DT_REQ; | ||
2689 | |||
3379 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, | 2690 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, |
3380 | starget->channel + 'A', ROLE_INITIATOR); | 2691 | starget->channel + 'A', ROLE_INITIATOR); |
2692 | |||
2693 | /* all PPR requests apart from QAS require wide transfers */ | ||
2694 | if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) { | ||
2695 | ahc_linux_get_width(starget); | ||
2696 | if (spi_width(starget) == 0) | ||
2697 | ppr_options &= MSG_EXT_PPR_QAS_REQ; | ||
2698 | } | ||
2699 | |||
3381 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); | 2700 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); |
3382 | ahc_lock(ahc, &flags); | 2701 | ahc_lock(ahc, &flags); |
3383 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, | 2702 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, |
@@ -3425,32 +2744,6 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset) | |||
3425 | ahc_unlock(ahc, &flags); | 2744 | ahc_unlock(ahc, &flags); |
3426 | } | 2745 | } |
3427 | 2746 | ||
3428 | static void ahc_linux_get_width(struct scsi_target *starget) | ||
3429 | { | ||
3430 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | ||
3431 | struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); | ||
3432 | struct ahc_tmode_tstate *tstate; | ||
3433 | struct ahc_initiator_tinfo *tinfo | ||
3434 | = ahc_fetch_transinfo(ahc, | ||
3435 | starget->channel + 'A', | ||
3436 | shost->this_id, starget->id, &tstate); | ||
3437 | spi_width(starget) = tinfo->curr.width; | ||
3438 | } | ||
3439 | |||
3440 | static void ahc_linux_set_width(struct scsi_target *starget, int width) | ||
3441 | { | ||
3442 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | ||
3443 | struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); | ||
3444 | struct ahc_devinfo devinfo; | ||
3445 | unsigned long flags; | ||
3446 | |||
3447 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, | ||
3448 | starget->channel + 'A', ROLE_INITIATOR); | ||
3449 | ahc_lock(ahc, &flags); | ||
3450 | ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE); | ||
3451 | ahc_unlock(ahc, &flags); | ||
3452 | } | ||
3453 | |||
3454 | static void ahc_linux_get_dt(struct scsi_target *starget) | 2747 | static void ahc_linux_get_dt(struct scsi_target *starget) |
3455 | { | 2748 | { |
3456 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 2749 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
@@ -3479,10 +2772,15 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt) | |||
3479 | unsigned long flags; | 2772 | unsigned long flags; |
3480 | struct ahc_syncrate *syncrate; | 2773 | struct ahc_syncrate *syncrate; |
3481 | 2774 | ||
2775 | if (dt) { | ||
2776 | period = 9; /* 12.5ns is the only period valid for DT */ | ||
2777 | ppr_options |= MSG_EXT_PPR_DT_REQ; | ||
2778 | } else if (period == 9) | ||
2779 | period = 10; /* if resetting DT, period must be >= 25ns */ | ||
2780 | |||
3482 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, | 2781 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, |
3483 | starget->channel + 'A', ROLE_INITIATOR); | 2782 | starget->channel + 'A', ROLE_INITIATOR); |
3484 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, | 2783 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT); |
3485 | dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2); | ||
3486 | ahc_lock(ahc, &flags); | 2784 | ahc_lock(ahc, &flags); |
3487 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, | 2785 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, |
3488 | ppr_options, AHC_TRANS_GOAL, FALSE); | 2786 | ppr_options, AHC_TRANS_GOAL, FALSE); |
@@ -3514,7 +2812,6 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas) | |||
3514 | unsigned int ppr_options = tinfo->curr.ppr_options | 2812 | unsigned int ppr_options = tinfo->curr.ppr_options |
3515 | & ~MSG_EXT_PPR_QAS_REQ; | 2813 | & ~MSG_EXT_PPR_QAS_REQ; |
3516 | unsigned int period = tinfo->curr.period; | 2814 | unsigned int period = tinfo->curr.period; |
3517 | unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; | ||
3518 | unsigned long flags; | 2815 | unsigned long flags; |
3519 | struct ahc_syncrate *syncrate; | 2816 | struct ahc_syncrate *syncrate; |
3520 | 2817 | ||
@@ -3523,8 +2820,7 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas) | |||
3523 | 2820 | ||
3524 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, | 2821 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, |
3525 | starget->channel + 'A', ROLE_INITIATOR); | 2822 | starget->channel + 'A', ROLE_INITIATOR); |
3526 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, | 2823 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); |
3527 | dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2); | ||
3528 | ahc_lock(ahc, &flags); | 2824 | ahc_lock(ahc, &flags); |
3529 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, | 2825 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, |
3530 | ppr_options, AHC_TRANS_GOAL, FALSE); | 2826 | ppr_options, AHC_TRANS_GOAL, FALSE); |
@@ -3556,7 +2852,6 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu) | |||
3556 | unsigned int ppr_options = tinfo->curr.ppr_options | 2852 | unsigned int ppr_options = tinfo->curr.ppr_options |
3557 | & ~MSG_EXT_PPR_IU_REQ; | 2853 | & ~MSG_EXT_PPR_IU_REQ; |
3558 | unsigned int period = tinfo->curr.period; | 2854 | unsigned int period = tinfo->curr.period; |
3559 | unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; | ||
3560 | unsigned long flags; | 2855 | unsigned long flags; |
3561 | struct ahc_syncrate *syncrate; | 2856 | struct ahc_syncrate *syncrate; |
3562 | 2857 | ||
@@ -3565,8 +2860,7 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu) | |||
3565 | 2860 | ||
3566 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, | 2861 | ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, |
3567 | starget->channel + 'A', ROLE_INITIATOR); | 2862 | starget->channel + 'A', ROLE_INITIATOR); |
3568 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, | 2863 | syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); |
3569 | dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2); | ||
3570 | ahc_lock(ahc, &flags); | 2864 | ahc_lock(ahc, &flags); |
3571 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, | 2865 | ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, |
3572 | ppr_options, AHC_TRANS_GOAL, FALSE); | 2866 | ppr_options, AHC_TRANS_GOAL, FALSE); |
@@ -3599,7 +2893,6 @@ static struct spi_function_template ahc_linux_transport_functions = { | |||
3599 | static int __init | 2893 | static int __init |
3600 | ahc_linux_init(void) | 2894 | ahc_linux_init(void) |
3601 | { | 2895 | { |
3602 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) | ||
3603 | ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions); | 2896 | ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions); |
3604 | if (!ahc_linux_transport_template) | 2897 | if (!ahc_linux_transport_template) |
3605 | return -ENODEV; | 2898 | return -ENODEV; |
@@ -3608,29 +2901,11 @@ ahc_linux_init(void) | |||
3608 | spi_release_transport(ahc_linux_transport_template); | 2901 | spi_release_transport(ahc_linux_transport_template); |
3609 | ahc_linux_exit(); | 2902 | ahc_linux_exit(); |
3610 | return -ENODEV; | 2903 | return -ENODEV; |
3611 | #else | ||
3612 | scsi_register_module(MODULE_SCSI_HA, &aic7xxx_driver_template); | ||
3613 | if (aic7xxx_driver_template.present == 0) { | ||
3614 | scsi_unregister_module(MODULE_SCSI_HA, | ||
3615 | &aic7xxx_driver_template); | ||
3616 | return (-ENODEV); | ||
3617 | } | ||
3618 | |||
3619 | return (0); | ||
3620 | #endif | ||
3621 | } | 2904 | } |
3622 | 2905 | ||
3623 | static void | 2906 | static void |
3624 | ahc_linux_exit(void) | 2907 | ahc_linux_exit(void) |
3625 | { | 2908 | { |
3626 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
3627 | /* | ||
3628 | * In 2.4 we have to unregister from the PCI core _after_ | ||
3629 | * unregistering from the scsi midlayer to avoid dangling | ||
3630 | * references. | ||
3631 | */ | ||
3632 | scsi_unregister_module(MODULE_SCSI_HA, &aic7xxx_driver_template); | ||
3633 | #endif | ||
3634 | ahc_linux_pci_exit(); | 2909 | ahc_linux_pci_exit(); |
3635 | ahc_linux_eisa_exit(); | 2910 | ahc_linux_eisa_exit(); |
3636 | spi_release_transport(ahc_linux_transport_template); | 2911 | spi_release_transport(ahc_linux_transport_template); |