aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 12:06:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 12:06:50 -0400
commit5fadd053d9bb4345ec6f405d24db4e7eb49cf81e (patch)
tree73924189ef46511301d004946fbdd9937b00c484
parente5dfa9282f3db461a896a6692b529e1823ba98c6 (diff)
parent26ba2a7a9f4d8921f095af646a7d22c15a148028 (diff)
Merge branch 'upstream' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
-rw-r--r--Documentation/DocBook/libata.tmpl1072
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c41
-rw-r--r--drivers/scsi/ata_piix.c4
-rw-r--r--drivers/scsi/libata-core.c874
-rw-r--r--drivers/scsi/libata-scsi.c730
-rw-r--r--drivers/scsi/libata.h19
-rw-r--r--drivers/scsi/pdc_adma.c739
-rw-r--r--drivers/scsi/sata_mv.c1145
-rw-r--r--drivers/scsi/sata_nv.c8
-rw-r--r--drivers/scsi/sata_promise.c26
-rw-r--r--drivers/scsi/sata_qstor.c8
-rw-r--r--drivers/scsi/sata_sil.c6
-rw-r--r--drivers/scsi/sata_sil24.c875
-rw-r--r--drivers/scsi/sata_sis.c4
-rw-r--r--drivers/scsi/sata_svw.c4
-rw-r--r--drivers/scsi/sata_sx4.c29
-rw-r--r--drivers/scsi/sata_uli.c4
-rw-r--r--drivers/scsi/sata_via.c4
-rw-r--r--drivers/scsi/sata_vsc.c14
-rw-r--r--include/linux/ata.h41
-rw-r--r--include/linux/libata.h113
23 files changed, 4969 insertions, 815 deletions
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index 375ae760dc1e..d260d92089ad 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -415,6 +415,362 @@ and other resources, etc.
415 </sect1> 415 </sect1>
416 </chapter> 416 </chapter>
417 417
418 <chapter id="libataEH">
419 <title>Error handling</title>
420
421 <para>
422 This chapter describes how errors are handled under libata.
423 Readers are advised to read SCSI EH
424 (Documentation/scsi/scsi_eh.txt) and ATA exceptions doc first.
425 </para>
426
427 <sect1><title>Origins of commands</title>
428 <para>
429 In libata, a command is represented with struct ata_queued_cmd
430 or qc. qc's are preallocated during port initialization and
431 repetitively used for command executions. Currently only one
432 qc is allocated per port but yet-to-be-merged NCQ branch
433 allocates one for each tag and maps each qc to NCQ tag 1-to-1.
434 </para>
435 <para>
436 libata commands can originate from two sources - libata itself
437 and SCSI midlayer. libata internal commands are used for
438 initialization and error handling. All normal blk requests
439 and commands for SCSI emulation are passed as SCSI commands
440 through queuecommand callback of SCSI host template.
441 </para>
442 </sect1>
443
444 <sect1><title>How commands are issued</title>
445
446 <variablelist>
447
448 <varlistentry><term>Internal commands</term>
449 <listitem>
450 <para>
451 First, qc is allocated and initialized using
452 ata_qc_new_init(). Although ata_qc_new_init() doesn't
453 implement any wait or retry mechanism when qc is not
454 available, internal commands are currently issued only during
455 initialization and error recovery, so no other command is
456 active and allocation is guaranteed to succeed.
457 </para>
458 <para>
459 Once allocated qc's taskfile is initialized for the command to
460 be executed. qc currently has two mechanisms to notify
461 completion. One is via qc->complete_fn() callback and the
462 other is completion qc->waiting. qc->complete_fn() callback
463 is the asynchronous path used by normal SCSI translated
464 commands and qc->waiting is the synchronous (issuer sleeps in
465 process context) path used by internal commands.
466 </para>
467 <para>
468 Once initialization is complete, host_set lock is acquired
469 and the qc is issued.
470 </para>
471 </listitem>
472 </varlistentry>
473
474 <varlistentry><term>SCSI commands</term>
475 <listitem>
476 <para>
477 All libata drivers use ata_scsi_queuecmd() as
478 hostt->queuecommand callback. scmds can either be simulated
479 or translated. No qc is involved in processing a simulated
480 scmd. The result is computed right away and the scmd is
481 completed.
482 </para>
483 <para>
484 For a translated scmd, ata_qc_new_init() is invoked to
485 allocate a qc and the scmd is translated into the qc. SCSI
486 midlayer's completion notification function pointer is stored
487 into qc->scsidone.
488 </para>
489 <para>
490 qc->complete_fn() callback is used for completion
491 notification. ATA commands use ata_scsi_qc_complete() while
492 ATAPI commands use atapi_qc_complete(). Both functions end up
493 calling qc->scsidone to notify upper layer when the qc is
494 finished. After translation is completed, the qc is issued
495 with ata_qc_issue().
496 </para>
497 <para>
498 Note that SCSI midlayer invokes hostt->queuecommand while
499 holding host_set lock, so all above occur while holding
500 host_set lock.
501 </para>
502 </listitem>
503 </varlistentry>
504
505 </variablelist>
506 </sect1>
507
508 <sect1><title>How commands are processed</title>
509 <para>
510 Depending on which protocol and which controller are used,
511 commands are processed differently. For the purpose of
512 discussion, a controller which uses taskfile interface and all
513 standard callbacks is assumed.
514 </para>
515 <para>
516 Currently 6 ATA command protocols are used. They can be
517 sorted into the following four categories according to how
518 they are processed.
519 </para>
520
521 <variablelist>
522 <varlistentry><term>ATA NO DATA or DMA</term>
523 <listitem>
524 <para>
525 ATA_PROT_NODATA and ATA_PROT_DMA fall into this category.
526 These types of commands don't require any software
527 intervention once issued. Device will raise interrupt on
528 completion.
529 </para>
530 </listitem>
531 </varlistentry>
532
533 <varlistentry><term>ATA PIO</term>
534 <listitem>
535 <para>
536 ATA_PROT_PIO is in this category. libata currently
537 implements PIO with polling. ATA_NIEN bit is set to turn
538 off interrupt and pio_task on ata_wq performs polling and
539 IO.
540 </para>
541 </listitem>
542 </varlistentry>
543
544 <varlistentry><term>ATAPI NODATA or DMA</term>
545 <listitem>
546 <para>
547 ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this
548 category. packet_task is used to poll BSY bit after
549 issuing PACKET command. Once BSY is turned off by the
550 device, packet_task transfers CDB and hands off processing
551 to interrupt handler.
552 </para>
553 </listitem>
554 </varlistentry>
555
556 <varlistentry><term>ATAPI PIO</term>
557 <listitem>
558 <para>
559 ATA_PROT_ATAPI is in this category. ATA_NIEN bit is set
560 and, as in ATAPI NODATA or DMA, packet_task submits cdb.
561 However, after submitting cdb, further processing (data
562 transfer) is handed off to pio_task.
563 </para>
564 </listitem>
565 </varlistentry>
566 </variablelist>
567 </sect1>
568
569 <sect1><title>How commands are completed</title>
570 <para>
571 Once issued, all qc's are either completed with
572 ata_qc_complete() or time out. For commands which are handled
573 by interrupts, ata_host_intr() invokes ata_qc_complete(), and,
574 for PIO tasks, pio_task invokes ata_qc_complete(). In error
575 cases, packet_task may also complete commands.
576 </para>
577 <para>
578 ata_qc_complete() does the following.
579 </para>
580
581 <orderedlist>
582
583 <listitem>
584 <para>
585 DMA memory is unmapped.
586 </para>
587 </listitem>
588
589 <listitem>
590 <para>
591 ATA_QCFLAG_ACTIVE is clared from qc->flags.
592 </para>
593 </listitem>
594
595 <listitem>
596 <para>
597 qc->complete_fn() callback is invoked. If the return value of
598 the callback is not zero. Completion is short circuited and
599 ata_qc_complete() returns.
600 </para>
601 </listitem>
602
603 <listitem>
604 <para>
605 __ata_qc_complete() is called, which does
606 <orderedlist>
607
608 <listitem>
609 <para>
610 qc->flags is cleared to zero.
611 </para>
612 </listitem>
613
614 <listitem>
615 <para>
616 ap->active_tag and qc->tag are poisoned.
617 </para>
618 </listitem>
619
620 <listitem>
621 <para>
622 qc->waiting is claread &amp; completed (in that order).
623 </para>
624 </listitem>
625
626 <listitem>
627 <para>
628 qc is deallocated by clearing appropriate bit in ap->qactive.
629 </para>
630 </listitem>
631
632 </orderedlist>
633 </para>
634 </listitem>
635
636 </orderedlist>
637
638 <para>
639 So, it basically notifies upper layer and deallocates qc. One
640 exception is short-circuit path in #3 which is used by
641 atapi_qc_complete().
642 </para>
643 <para>
644 For all non-ATAPI commands, whether it fails or not, almost
645 the same code path is taken and very little error handling
646 takes place. A qc is completed with success status if it
647 succeeded, with failed status otherwise.
648 </para>
649 <para>
650 However, failed ATAPI commands require more handling as
651 REQUEST SENSE is needed to acquire sense data. If an ATAPI
652 command fails, ata_qc_complete() is invoked with error status,
653 which in turn invokes atapi_qc_complete() via
654 qc->complete_fn() callback.
655 </para>
656 <para>
657 This makes atapi_qc_complete() set scmd->result to
658 SAM_STAT_CHECK_CONDITION, complete the scmd and return 1. As
659 the sense data is empty but scmd->result is CHECK CONDITION,
660 SCSI midlayer will invoke EH for the scmd, and returning 1
661 makes ata_qc_complete() to return without deallocating the qc.
662 This leads us to ata_scsi_error() with partially completed qc.
663 </para>
664
665 </sect1>
666
667 <sect1><title>ata_scsi_error()</title>
668 <para>
669 ata_scsi_error() is the current hostt->eh_strategy_handler()
670 for libata. As discussed above, this will be entered in two
671 cases - timeout and ATAPI error completion. This function
672 calls low level libata driver's eng_timeout() callback, the
673 standard callback for which is ata_eng_timeout(). It checks
674 if a qc is active and calls ata_qc_timeout() on the qc if so.
675 Actual error handling occurs in ata_qc_timeout().
676 </para>
677 <para>
678 If EH is invoked for timeout, ata_qc_timeout() stops BMDMA and
679 completes the qc. Note that as we're currently in EH, we
680 cannot call scsi_done. As described in SCSI EH doc, a
681 recovered scmd should be either retried with
682 scsi_queue_insert() or finished with scsi_finish_command().
683 Here, we override qc->scsidone with scsi_finish_command() and
684 calls ata_qc_complete().
685 </para>
686 <para>
687 If EH is invoked due to a failed ATAPI qc, the qc here is
688 completed but not deallocated. The purpose of this
689 half-completion is to use the qc as place holder to make EH
690 code reach this place. This is a bit hackish, but it works.
691 </para>
692 <para>
693 Once control reaches here, the qc is deallocated by invoking
694 __ata_qc_complete() explicitly. Then, internal qc for REQUEST
695 SENSE is issued. Once sense data is acquired, scmd is
696 finished by directly invoking scsi_finish_command() on the
697 scmd. Note that as we already have completed and deallocated
698 the qc which was associated with the scmd, we don't need
699 to/cannot call ata_qc_complete() again.
700 </para>
701
702 </sect1>
703
704 <sect1><title>Problems with the current EH</title>
705
706 <itemizedlist>
707
708 <listitem>
709 <para>
710 Error representation is too crude. Currently any and all
711 error conditions are represented with ATA STATUS and ERROR
712 registers. Errors which aren't ATA device errors are treated
713 as ATA device errors by setting ATA_ERR bit. Better error
714 descriptor which can properly represent ATA and other
715 errors/exceptions is needed.
716 </para>
717 </listitem>
718
719 <listitem>
720 <para>
721 When handling timeouts, no action is taken to make device
722 forget about the timed out command and ready for new commands.
723 </para>
724 </listitem>
725
726 <listitem>
727 <para>
728 EH handling via ata_scsi_error() is not properly protected
729 from usual command processing. On EH entrance, the device is
730 not in quiescent state. Timed out commands may succeed or
731 fail any time. pio_task and atapi_task may still be running.
732 </para>
733 </listitem>
734
735 <listitem>
736 <para>
737 Too weak error recovery. Devices / controllers causing HSM
738 mismatch errors and other errors quite often require reset to
739 return to known state. Also, advanced error handling is
740 necessary to support features like NCQ and hotplug.
741 </para>
742 </listitem>
743
744 <listitem>
745 <para>
746 ATA errors are directly handled in the interrupt handler and
747 PIO errors in pio_task. This is problematic for advanced
748 error handling for the following reasons.
749 </para>
750 <para>
751 First, advanced error handling often requires context and
752 internal qc execution.
753 </para>
754 <para>
755 Second, even a simple failure (say, CRC error) needs
756 information gathering and could trigger complex error handling
757 (say, resetting &amp; reconfiguring). Having multiple code
758 paths to gather information, enter EH and trigger actions
759 makes life painful.
760 </para>
761 <para>
762 Third, scattered EH code makes implementing low level drivers
763 difficult. Low level drivers override libata callbacks. If
764 EH is scattered over several places, each affected callbacks
765 should perform its part of error handling. This can be error
766 prone and painful.
767 </para>
768 </listitem>
769
770 </itemizedlist>
771 </sect1>
772 </chapter>
773
418 <chapter id="libataExt"> 774 <chapter id="libataExt">
419 <title>libata Library</title> 775 <title>libata Library</title>
420!Edrivers/scsi/libata-core.c 776!Edrivers/scsi/libata-core.c
@@ -431,6 +787,722 @@ and other resources, etc.
431!Idrivers/scsi/libata-scsi.c 787!Idrivers/scsi/libata-scsi.c
432 </chapter> 788 </chapter>
433 789
790 <chapter id="ataExceptions">
791 <title>ATA errors &amp; exceptions</title>
792
793 <para>
794 This chapter tries to identify what error/exception conditions exist
795 for ATA/ATAPI devices and describe how they should be handled in
796 implementation-neutral way.
797 </para>
798
799 <para>
800 The term 'error' is used to describe conditions where either an
801 explicit error condition is reported from device or a command has
802 timed out.
803 </para>
804
805 <para>
806 The term 'exception' is either used to describe exceptional
807 conditions which are not errors (say, power or hotplug events), or
808 to describe both errors and non-error exceptional conditions. Where
809 explicit distinction between error and exception is necessary, the
810 term 'non-error exception' is used.
811 </para>
812
813 <sect1 id="excat">
814 <title>Exception categories</title>
815 <para>
816 Exceptions are described primarily with respect to legacy
817 taskfile + bus master IDE interface. If a controller provides
818 other better mechanism for error reporting, mapping those into
819 categories described below shouldn't be difficult.
820 </para>
821
822 <para>
823 In the following sections, two recovery actions - reset and
824 reconfiguring transport - are mentioned. These are described
825 further in <xref linkend="exrec"/>.
826 </para>
827
828 <sect2 id="excatHSMviolation">
829 <title>HSM violation</title>
830 <para>
831 This error is indicated when STATUS value doesn't match HSM
832 requirement during issuing or excution any ATA/ATAPI command.
833 </para>
834
835 <itemizedlist>
836 <title>Examples</title>
837
838 <listitem>
839 <para>
840 ATA_STATUS doesn't contain !BSY &amp;&amp; DRDY &amp;&amp; !DRQ while trying
841 to issue a command.
842 </para>
843 </listitem>
844
845 <listitem>
846 <para>
847 !BSY &amp;&amp; !DRQ during PIO data transfer.
848 </para>
849 </listitem>
850
851 <listitem>
852 <para>
853 DRQ on command completion.
854 </para>
855 </listitem>
856
857 <listitem>
858 <para>
859 !BSY &amp;&amp; ERR after CDB tranfer starts but before the
860 last byte of CDB is transferred. ATA/ATAPI standard states
861 that &quot;The device shall not terminate the PACKET command
862 with an error before the last byte of the command packet has
863 been written&quot; in the error outputs description of PACKET
864 command and the state diagram doesn't include such
865 transitions.
866 </para>
867 </listitem>
868
869 </itemizedlist>
870
871 <para>
872 In these cases, HSM is violated and not much information
873 regarding the error can be acquired from STATUS or ERROR
874 register. IOW, this error can be anything - driver bug,
875 faulty device, controller and/or cable.
876 </para>
877
878 <para>
879 As HSM is violated, reset is necessary to restore known state.
880 Reconfiguring transport for lower speed might be helpful too
881 as transmission errors sometimes cause this kind of errors.
882 </para>
883 </sect2>
884
885 <sect2 id="excatDevErr">
886 <title>ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION)</title>
887
888 <para>
889 These are errors detected and reported by ATA/ATAPI devices
890 indicating device problems. For this type of errors, STATUS
891 and ERROR register values are valid and describe error
892 condition. Note that some of ATA bus errors are detected by
893 ATA/ATAPI devices and reported using the same mechanism as
894 device errors. Those cases are described later in this
895 section.
896 </para>
897
898 <para>
899 For ATA commands, this type of errors are indicated by !BSY
900 &amp;&amp; ERR during command execution and on completion.
901 </para>
902
903 <para>For ATAPI commands,</para>
904
905 <itemizedlist>
906
907 <listitem>
908 <para>
909 !BSY &amp;&amp; ERR &amp;&amp; ABRT right after issuing PACKET
910 indicates that PACKET command is not supported and falls in
911 this category.
912 </para>
913 </listitem>
914
915 <listitem>
916 <para>
917 !BSY &amp;&amp; ERR(==CHK) &amp;&amp; !ABRT after the last
918 byte of CDB is transferred indicates CHECK CONDITION and
919 doesn't fall in this category.
920 </para>
921 </listitem>
922
923 <listitem>
924 <para>
925 !BSY &amp;&amp; ERR(==CHK) &amp;&amp; ABRT after the last byte
926 of CDB is transferred *probably* indicates CHECK CONDITION and
927 doesn't fall in this category.
928 </para>
929 </listitem>
930
931 </itemizedlist>
932
933 <para>
934 Of errors detected as above, the followings are not ATA/ATAPI
935 device errors but ATA bus errors and should be handled
936 according to <xref linkend="excatATAbusErr"/>.
937 </para>
938
939 <variablelist>
940
941 <varlistentry>
942 <term>CRC error during data transfer</term>
943 <listitem>
944 <para>
945 This is indicated by ICRC bit in the ERROR register and
946 means that corruption occurred during data transfer. Upto
947 ATA/ATAPI-7, the standard specifies that this bit is only
948 applicable to UDMA transfers but ATA/ATAPI-8 draft revision
949 1f says that the bit may be applicable to multiword DMA and
950 PIO.
951 </para>
952 </listitem>
953 </varlistentry>
954
955 <varlistentry>
956 <term>ABRT error during data transfer or on completion</term>
957 <listitem>
958 <para>
959 Upto ATA/ATAPI-7, the standard specifies that ABRT could be
960 set on ICRC errors and on cases where a device is not able
961 to complete a command. Combined with the fact that MWDMA
962 and PIO transfer errors aren't allowed to use ICRC bit upto
963 ATA/ATAPI-7, it seems to imply that ABRT bit alone could
964 indicate tranfer errors.
965 </para>
966 <para>
967 However, ATA/ATAPI-8 draft revision 1f removes the part
968 that ICRC errors can turn on ABRT. So, this is kind of
969 gray area. Some heuristics are needed here.
970 </para>
971 </listitem>
972 </varlistentry>
973
974 </variablelist>
975
976 <para>
977 ATA/ATAPI device errors can be further categorized as follows.
978 </para>
979
980 <variablelist>
981
982 <varlistentry>
983 <term>Media errors</term>
984 <listitem>
985 <para>
986 This is indicated by UNC bit in the ERROR register. ATA
987 devices reports UNC error only after certain number of
988 retries cannot recover the data, so there's nothing much
989 else to do other than notifying upper layer.
990 </para>
991 <para>
992 READ and WRITE commands report CHS or LBA of the first
993 failed sector but ATA/ATAPI standard specifies that the
994 amount of transferred data on error completion is
995 indeterminate, so we cannot assume that sectors preceding
996 the failed sector have been transferred and thus cannot
997 complete those sectors successfully as SCSI does.
998 </para>
999 </listitem>
1000 </varlistentry>
1001
1002 <varlistentry>
1003 <term>Media changed / media change requested error</term>
1004 <listitem>
1005 <para>
1006 &lt;&lt;TODO: fill here&gt;&gt;
1007 </para>
1008 </listitem>
1009 </varlistentry>
1010
1011 <varlistentry><term>Address error</term>
1012 <listitem>
1013 <para>
1014 This is indicated by IDNF bit in the ERROR register.
1015 Report to upper layer.
1016 </para>
1017 </listitem>
1018 </varlistentry>
1019
1020 <varlistentry><term>Other errors</term>
1021 <listitem>
1022 <para>
1023 This can be invalid command or parameter indicated by ABRT
1024 ERROR bit or some other error condition. Note that ABRT
1025 bit can indicate a lot of things including ICRC and Address
1026 errors. Heuristics needed.
1027 </para>
1028 </listitem>
1029 </varlistentry>
1030
1031 </variablelist>
1032
1033 <para>
1034 Depending on commands, not all STATUS/ERROR bits are
1035 applicable. These non-applicable bits are marked with
1036 &quot;na&quot; in the output descriptions but upto ATA/ATAPI-7
1037 no definition of &quot;na&quot; can be found. However,
1038 ATA/ATAPI-8 draft revision 1f describes &quot;N/A&quot; as
1039 follows.
1040 </para>
1041
1042 <blockquote>
1043 <variablelist>
1044 <varlistentry><term>3.2.3.3a N/A</term>
1045 <listitem>
1046 <para>
1047 A keyword the indicates a field has no defined value in
1048 this standard and should not be checked by the host or
1049 device. N/A fields should be cleared to zero.
1050 </para>
1051 </listitem>
1052 </varlistentry>
1053 </variablelist>
1054 </blockquote>
1055
1056 <para>
1057 So, it seems reasonable to assume that &quot;na&quot; bits are
1058 cleared to zero by devices and thus need no explicit masking.
1059 </para>
1060
1061 </sect2>
1062
1063 <sect2 id="excatATAPIcc">
1064 <title>ATAPI device CHECK CONDITION</title>
1065
1066 <para>
1067 ATAPI device CHECK CONDITION error is indicated by set CHK bit
1068 (ERR bit) in the STATUS register after the last byte of CDB is
1069 transferred for a PACKET command. For this kind of errors,
1070 sense data should be acquired to gather information regarding
1071 the errors. REQUEST SENSE packet command should be used to
1072 acquire sense data.
1073 </para>
1074
1075 <para>
1076 Once sense data is acquired, this type of errors can be
1077 handled similary to other SCSI errors. Note that sense data
1078 may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
1079 &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such
1080 cases, the error should be considered as an ATA bus error and
1081 handled according to <xref linkend="excatATAbusErr"/>.
1082 </para>
1083
1084 </sect2>
1085
1086 <sect2 id="excatNCQerr">
1087 <title>ATA device error (NCQ)</title>
1088
1089 <para>
1090 NCQ command error is indicated by cleared BSY and set ERR bit
1091 during NCQ command phase (one or more NCQ commands
1092 outstanding). Although STATUS and ERROR registers will
1093 contain valid values describing the error, READ LOG EXT is
1094 required to clear the error condition, determine which command
1095 has failed and acquire more information.
1096 </para>
1097
1098 <para>
1099 READ LOG EXT Log Page 10h reports which tag has failed and
1100 taskfile register values describing the error. With this
1101 information the failed command can be handled as a normal ATA
1102 command error as in <xref linkend="excatDevErr"/> and all
1103 other in-flight commands must be retried. Note that this
1104 retry should not be counted - it's likely that commands
1105 retried this way would have completed normally if it were not
1106 for the failed command.
1107 </para>
1108
1109 <para>
1110 Note that ATA bus errors can be reported as ATA device NCQ
1111 errors. This should be handled as described in <xref
1112 linkend="excatATAbusErr"/>.
1113 </para>
1114
1115 <para>
1116 If READ LOG EXT Log Page 10h fails or reports NQ, we're
1117 thoroughly screwed. This condition should be treated
1118 according to <xref linkend="excatHSMviolation"/>.
1119 </para>
1120
1121 </sect2>
1122
1123 <sect2 id="excatATAbusErr">
1124 <title>ATA bus error</title>
1125
1126 <para>
1127 ATA bus error means that data corruption occurred during
1128 transmission over ATA bus (SATA or PATA). This type of errors
1129 can be indicated by
1130 </para>
1131
1132 <itemizedlist>
1133
1134 <listitem>
1135 <para>
1136 ICRC or ABRT error as described in <xref linkend="excatDevErr"/>.
1137 </para>
1138 </listitem>
1139
1140 <listitem>
1141 <para>
1142 Controller-specific error completion with error information
1143 indicating transmission error.
1144 </para>
1145 </listitem>
1146
1147 <listitem>
1148 <para>
1149 On some controllers, command timeout. In this case, there may
1150 be a mechanism to determine that the timeout is due to
1151 transmission error.
1152 </para>
1153 </listitem>
1154
1155 <listitem>
1156 <para>
1157 Unknown/random errors, timeouts and all sorts of weirdities.
1158 </para>
1159 </listitem>
1160
1161 </itemizedlist>
1162
1163 <para>
1164 As described above, transmission errors can cause wide variety
1165 of symptoms ranging from device ICRC error to random device
1166 lockup, and, for many cases, there is no way to tell if an
1167 error condition is due to transmission error or not;
1168 therefore, it's necessary to employ some kind of heuristic
1169 when dealing with errors and timeouts. For example,
1170 encountering repetitive ABRT errors for known supported
1171 command is likely to indicate ATA bus error.
1172 </para>
1173
1174 <para>
1175 Once it's determined that ATA bus errors have possibly
1176 occurred, lowering ATA bus transmission speed is one of
1177 actions which may alleviate the problem. See <xref
1178 linkend="exrecReconf"/> for more information.
1179 </para>
1180
1181 </sect2>
1182
1183 <sect2 id="excatPCIbusErr">
1184 <title>PCI bus error</title>
1185
1186 <para>
1187 Data corruption or other failures during transmission over PCI
1188 (or other system bus). For standard BMDMA, this is indicated
1189 by Error bit in the BMDMA Status register. This type of
1190 errors must be logged as it indicates something is very wrong
1191 with the system. Resetting host controller is recommended.
1192 </para>
1193
1194 </sect2>
1195
1196 <sect2 id="excatLateCompletion">
1197 <title>Late completion</title>
1198
1199 <para>
1200 This occurs when timeout occurs and the timeout handler finds
1201 out that the timed out command has completed successfully or
1202 with error. This is usually caused by lost interrupts. This
1203 type of errors must be logged. Resetting host controller is
1204 recommended.
1205 </para>
1206
1207 </sect2>
1208
1209 <sect2 id="excatUnknown">
1210 <title>Unknown error (timeout)</title>
1211
1212 <para>
1213 This is when timeout occurs and the command is still
1214 processing or the host and device are in unknown state. When
1215 this occurs, HSM could be in any valid or invalid state. To
1216 bring the device to known state and make it forget about the
1217 timed out command, resetting is necessary. The timed out
1218 command may be retried.
1219 </para>
1220
1221 <para>
1222 Timeouts can also be caused by transmission errors. Refer to
1223 <xref linkend="excatATAbusErr"/> for more details.
1224 </para>
1225
1226 </sect2>
1227
1228 <sect2 id="excatHoplugPM">
1229 <title>Hotplug and power management exceptions</title>
1230
1231 <para>
1232 &lt;&lt;TODO: fill here&gt;&gt;
1233 </para>
1234
1235 </sect2>
1236
1237 </sect1>
1238
1239 <sect1 id="exrec">
1240 <title>EH recovery actions</title>
1241
1242 <para>
1243 This section discusses several important recovery actions.
1244 </para>
1245
1246 <sect2 id="exrecClr">
1247 <title>Clearing error condition</title>
1248
1249 <para>
1250 Many controllers require its error registers to be cleared by
1251 error handler. Different controllers may have different
1252 requirements.
1253 </para>
1254
1255 <para>
1256 For SATA, it's strongly recommended to clear at least SError
1257 register during error handling.
1258 </para>
1259 </sect2>
1260
1261 <sect2 id="exrecRst">
1262 <title>Reset</title>
1263
1264 <para>
1265 During EH, resetting is necessary in the following cases.
1266 </para>
1267
1268 <itemizedlist>
1269
1270 <listitem>
1271 <para>
1272 HSM is in unknown or invalid state
1273 </para>
1274 </listitem>
1275
1276 <listitem>
1277 <para>
1278 HBA is in unknown or invalid state
1279 </para>
1280 </listitem>
1281
1282 <listitem>
1283 <para>
1284 EH needs to make HBA/device forget about in-flight commands
1285 </para>
1286 </listitem>
1287
1288 <listitem>
1289 <para>
1290 HBA/device behaves weirdly
1291 </para>
1292 </listitem>
1293
1294 </itemizedlist>
1295
1296 <para>
1297 Resetting during EH might be a good idea regardless of error
1298 condition to improve EH robustness. Whether to reset both or
1299 either one of HBA and device depends on situation but the
1300 following scheme is recommended.
1301 </para>
1302
1303 <itemizedlist>
1304
1305 <listitem>
1306 <para>
1307 When it's known that HBA is in ready state but ATA/ATAPI
1308 device in in unknown state, reset only device.
1309 </para>
1310 </listitem>
1311
1312 <listitem>
1313 <para>
1314 If HBA is in unknown state, reset both HBA and device.
1315 </para>
1316 </listitem>
1317
1318 </itemizedlist>
1319
1320 <para>
1321 HBA resetting is implementation specific. For a controller
1322 complying to taskfile/BMDMA PCI IDE, stopping active DMA
1323 transaction may be sufficient iff BMDMA state is the only HBA
1324 context. But even mostly taskfile/BMDMA PCI IDE complying
1325 controllers may have implementation specific requirements and
1326 mechanism to reset themselves. This must be addressed by
1327 specific drivers.
1328 </para>
1329
1330 <para>
1331 OTOH, ATA/ATAPI standard describes in detail ways to reset
1332 ATA/ATAPI devices.
1333 </para>
1334
1335 <variablelist>
1336
1337 <varlistentry><term>PATA hardware reset</term>
1338 <listitem>
1339 <para>
1340 This is hardware initiated device reset signalled with
1341 asserted PATA RESET- signal. There is no standard way to
1342 initiate hardware reset from software although some
1343 hardware provides registers that allow driver to directly
1344 tweak the RESET- signal.
1345 </para>
1346 </listitem>
1347 </varlistentry>
1348
1349 <varlistentry><term>Software reset</term>
1350 <listitem>
1351 <para>
1352 This is achieved by turning CONTROL SRST bit on for at
1353 least 5us. Both PATA and SATA support it but, in case of
1354 SATA, this may require controller-specific support as the
1355 second Register FIS to clear SRST should be transmitted
1356 while BSY bit is still set. Note that on PATA, this resets
1357 both master and slave devices on a channel.
1358 </para>
1359 </listitem>
1360 </varlistentry>
1361
1362 <varlistentry><term>EXECUTE DEVICE DIAGNOSTIC command</term>
1363 <listitem>
1364 <para>
1365 Although ATA/ATAPI standard doesn't describe exactly, EDD
1366 implies some level of resetting, possibly similar level
1367 with software reset. Host-side EDD protocol can be handled
1368 with normal command processing and most SATA controllers
1369 should be able to handle EDD's just like other commands.
1370 As in software reset, EDD affects both devices on a PATA
1371 bus.
1372 </para>
1373 <para>
1374 Although EDD does reset devices, this doesn't suit error
1375 handling as EDD cannot be issued while BSY is set and it's
1376 unclear how it will act when device is in unknown/weird
1377 state.
1378 </para>
1379 </listitem>
1380 </varlistentry>
1381
1382 <varlistentry><term>ATAPI DEVICE RESET command</term>
1383 <listitem>
1384 <para>
1385 This is very similar to software reset except that reset
1386 can be restricted to the selected device without affecting
1387 the other device sharing the cable.
1388 </para>
1389 </listitem>
1390 </varlistentry>
1391
1392 <varlistentry><term>SATA phy reset</term>
1393 <listitem>
1394 <para>
1395 This is the preferred way of resetting a SATA device. In
1396 effect, it's identical to PATA hardware reset. Note that
1397 this can be done with the standard SCR Control register.
1398 As such, it's usually easier to implement than software
1399 reset.
1400 </para>
1401 </listitem>
1402 </varlistentry>
1403
1404 </variablelist>
1405
1406 <para>
1407 One more thing to consider when resetting devices is that
1408 resetting clears certain configuration parameters and they
1409 need to be set to their previous or newly adjusted values
1410 after reset.
1411 </para>
1412
1413 <para>
1414 Parameters affected are.
1415 </para>
1416
1417 <itemizedlist>
1418
1419 <listitem>
1420 <para>
1421 CHS set up with INITIALIZE DEVICE PARAMETERS (seldomly used)
1422 </para>
1423 </listitem>
1424
1425 <listitem>
1426 <para>
1427 Parameters set with SET FEATURES including transfer mode setting
1428 </para>
1429 </listitem>
1430
1431 <listitem>
1432 <para>
1433 Block count set with SET MULTIPLE MODE
1434 </para>
1435 </listitem>
1436
1437 <listitem>
1438 <para>
1439 Other parameters (SET MAX, MEDIA LOCK...)
1440 </para>
1441 </listitem>
1442
1443 </itemizedlist>
1444
1445 <para>
1446 ATA/ATAPI standard specifies that some parameters must be
1447 maintained across hardware or software reset, but doesn't
1448 strictly specify all of them. Always reconfiguring needed
1449 parameters after reset is required for robustness. Note that
1450 this also applies when resuming from deep sleep (power-off).
1451 </para>
1452
1453 <para>
1454 Also, ATA/ATAPI standard requires that IDENTIFY DEVICE /
1455 IDENTIFY PACKET DEVICE is issued after any configuration
1456 parameter is updated or a hardware reset and the result used
1457 for further operation. OS driver is required to implement
1458 revalidation mechanism to support this.
1459 </para>
1460
1461 </sect2>
1462
1463 <sect2 id="exrecReconf">
1464 <title>Reconfigure transport</title>
1465
1466 <para>
1467 For both PATA and SATA, a lot of corners are cut for cheap
1468 connectors, cables or controllers and it's quite common to see
1469 high transmission error rate. This can be mitigated by
1470 lowering transmission speed.
1471 </para>
1472
1473 <para>
1474 The following is a possible scheme Jeff Garzik suggested.
1475 </para>
1476
1477 <blockquote>
1478 <para>
1479 If more than $N (3?) transmission errors happen in 15 minutes,
1480 </para>
1481 <itemizedlist>
1482 <listitem>
1483 <para>
1484 if SATA, decrease SATA PHY speed. if speed cannot be decreased,
1485 </para>
1486 </listitem>
1487 <listitem>
1488 <para>
1489 decrease UDMA xfer speed. if at UDMA0, switch to PIO4,
1490 </para>
1491 </listitem>
1492 <listitem>
1493 <para>
1494 decrease PIO xfer speed. if at PIO3, complain, but continue
1495 </para>
1496 </listitem>
1497 </itemizedlist>
1498 </blockquote>
1499
1500 </sect2>
1501
1502 </sect1>
1503
1504 </chapter>
1505
434 <chapter id="PiixInt"> 1506 <chapter id="PiixInt">
435 <title>ata_piix Internals</title> 1507 <title>ata_piix Internals</title>
436!Idrivers/scsi/ata_piix.c 1508!Idrivers/scsi/ata_piix.c
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3ee9b8b33be0..9c9f162bd6ed 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -489,11 +489,11 @@ config SCSI_SATA_NV
489 489
490 If unsure, say N. 490 If unsure, say N.
491 491
492config SCSI_SATA_PROMISE 492config SCSI_PDC_ADMA
493 tristate "Promise SATA TX2/TX4 support" 493 tristate "Pacific Digital ADMA support"
494 depends on SCSI_SATA && PCI 494 depends on SCSI_SATA && PCI
495 help 495 help
496 This option enables support for Promise Serial ATA TX2/TX4. 496 This option enables support for Pacific Digital ADMA controllers
497 497
498 If unsure, say N. 498 If unsure, say N.
499 499
@@ -505,6 +505,14 @@ config SCSI_SATA_QSTOR
505 505
506 If unsure, say N. 506 If unsure, say N.
507 507
508config SCSI_SATA_PROMISE
509 tristate "Promise SATA TX2/TX4 support"
510 depends on SCSI_SATA && PCI
511 help
512 This option enables support for Promise Serial ATA TX2/TX4.
513
514 If unsure, say N.
515
508config SCSI_SATA_SX4 516config SCSI_SATA_SX4
509 tristate "Promise SATA SX4 support" 517 tristate "Promise SATA SX4 support"
510 depends on SCSI_SATA && PCI && EXPERIMENTAL 518 depends on SCSI_SATA && PCI && EXPERIMENTAL
@@ -521,6 +529,14 @@ config SCSI_SATA_SIL
521 529
522 If unsure, say N. 530 If unsure, say N.
523 531
532config SCSI_SATA_SIL24
533 tristate "Silicon Image 3124/3132 SATA support"
534 depends on SCSI_SATA && PCI && EXPERIMENTAL
535 help
536 This option enables support for Silicon Image 3124/3132 Serial ATA.
537
538 If unsure, say N.
539
524config SCSI_SATA_SIS 540config SCSI_SATA_SIS
525 tristate "SiS 964/180 SATA support" 541 tristate "SiS 964/180 SATA support"
526 depends on SCSI_SATA && PCI && EXPERIMENTAL 542 depends on SCSI_SATA && PCI && EXPERIMENTAL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 48529d180ca8..2d4439826c08 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
130obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o 130obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
131obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o 131obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
132obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o 132obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
133obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
133obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o 134obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
134obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o 135obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
135obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o 136obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
@@ -137,6 +138,7 @@ obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
137obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o 138obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
138obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o 139obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
139obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o 140obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
141obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
140 142
141obj-$(CONFIG_ARM) += arm/ 143obj-$(CONFIG_ARM) += arm/
142 144
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index c2c8fa828e24..fe8187d6f58b 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -216,7 +216,7 @@ static Scsi_Host_Template ahci_sht = {
216 .ordered_flush = 1, 216 .ordered_flush = 1,
217}; 217};
218 218
219static struct ata_port_operations ahci_ops = { 219static const struct ata_port_operations ahci_ops = {
220 .port_disable = ata_port_disable, 220 .port_disable = ata_port_disable,
221 221
222 .check_status = ahci_check_status, 222 .check_status = ahci_check_status,
@@ -407,7 +407,7 @@ static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
407 return 0xffffffffU; 407 return 0xffffffffU;
408 } 408 }
409 409
410 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 410 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
411} 411}
412 412
413 413
@@ -425,7 +425,7 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
425 return; 425 return;
426 } 426 }
427 427
428 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 428 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
429} 429}
430 430
431static void ahci_phy_reset(struct ata_port *ap) 431static void ahci_phy_reset(struct ata_port *ap)
@@ -453,14 +453,14 @@ static void ahci_phy_reset(struct ata_port *ap)
453 453
454static u8 ahci_check_status(struct ata_port *ap) 454static u8 ahci_check_status(struct ata_port *ap)
455{ 455{
456 void *mmio = (void *) ap->ioaddr.cmd_addr; 456 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
457 457
458 return readl(mmio + PORT_TFDATA) & 0xFF; 458 return readl(mmio + PORT_TFDATA) & 0xFF;
459} 459}
460 460
461static u8 ahci_check_err(struct ata_port *ap) 461static u8 ahci_check_err(struct ata_port *ap)
462{ 462{
463 void *mmio = (void *) ap->ioaddr.cmd_addr; 463 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
464 464
465 return (readl(mmio + PORT_TFDATA) >> 8) & 0xFF; 465 return (readl(mmio + PORT_TFDATA) >> 8) & 0xFF;
466} 466}
@@ -672,17 +672,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
672 672
673 for (i = 0; i < host_set->n_ports; i++) { 673 for (i = 0; i < host_set->n_ports; i++) {
674 struct ata_port *ap; 674 struct ata_port *ap;
675 u32 tmp;
676 675
677 VPRINTK("port %u\n", i); 676 if (!(irq_stat & (1 << i)))
677 continue;
678
678 ap = host_set->ports[i]; 679 ap = host_set->ports[i];
679 tmp = irq_stat & (1 << i); 680 if (ap) {
680 if (tmp && ap) {
681 struct ata_queued_cmd *qc; 681 struct ata_queued_cmd *qc;
682 qc = ata_qc_from_tag(ap, ap->active_tag); 682 qc = ata_qc_from_tag(ap, ap->active_tag);
683 if (ahci_host_intr(ap, qc)) 683 if (!ahci_host_intr(ap, qc))
684 irq_ack |= (1 << i); 684 if (ata_ratelimit()) {
685 struct pci_dev *pdev =
686 to_pci_dev(ap->host_set->dev);
687 printk(KERN_WARNING
688 "ahci(%s): unhandled interrupt on port %u\n",
689 pci_name(pdev), i);
690 }
691
692 VPRINTK("port %u\n", i);
693 } else {
694 VPRINTK("port %u (no irq)\n", i);
695 if (ata_ratelimit()) {
696 struct pci_dev *pdev =
697 to_pci_dev(ap->host_set->dev);
698 printk(KERN_WARNING
699 "ahci(%s): interrupt on disabled port %u\n",
700 pci_name(pdev), i);
701 }
685 } 702 }
703
704 irq_ack |= (1 << i);
686 } 705 }
687 706
688 if (irq_ack) { 707 if (irq_ack) {
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index d71cef767cec..be021478f416 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -147,7 +147,7 @@ static Scsi_Host_Template piix_sht = {
147 .ordered_flush = 1, 147 .ordered_flush = 1,
148}; 148};
149 149
150static struct ata_port_operations piix_pata_ops = { 150static const struct ata_port_operations piix_pata_ops = {
151 .port_disable = ata_port_disable, 151 .port_disable = ata_port_disable,
152 .set_piomode = piix_set_piomode, 152 .set_piomode = piix_set_piomode,
153 .set_dmamode = piix_set_dmamode, 153 .set_dmamode = piix_set_dmamode,
@@ -177,7 +177,7 @@ static struct ata_port_operations piix_pata_ops = {
177 .host_stop = ata_host_stop, 177 .host_stop = ata_host_stop,
178}; 178};
179 179
180static struct ata_port_operations piix_sata_ops = { 180static const struct ata_port_operations piix_sata_ops = {
181 .port_disable = ata_port_disable, 181 .port_disable = ata_port_disable,
182 182
183 .tf_load = ata_tf_load, 183 .tf_load = ata_tf_load,
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index e5b01997117a..f53d7b8ac33f 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -48,6 +48,7 @@
48#include <linux/completion.h> 48#include <linux/completion.h>
49#include <linux/suspend.h> 49#include <linux/suspend.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/jiffies.h>
51#include <scsi/scsi.h> 52#include <scsi/scsi.h>
52#include "scsi.h" 53#include "scsi.h"
53#include "scsi_priv.h" 54#include "scsi_priv.h"
@@ -62,14 +63,15 @@
62static unsigned int ata_busy_sleep (struct ata_port *ap, 63static unsigned int ata_busy_sleep (struct ata_port *ap,
63 unsigned long tmout_pat, 64 unsigned long tmout_pat,
64 unsigned long tmout); 65 unsigned long tmout);
66static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
67static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
65static void ata_set_mode(struct ata_port *ap); 68static void ata_set_mode(struct ata_port *ap);
66static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 69static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
67static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); 70static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
68static int fgb(u32 bitmap); 71static int fgb(u32 bitmap);
69static int ata_choose_xfer_mode(struct ata_port *ap, 72static int ata_choose_xfer_mode(const struct ata_port *ap,
70 u8 *xfer_mode_out, 73 u8 *xfer_mode_out,
71 unsigned int *xfer_shift_out); 74 unsigned int *xfer_shift_out);
72static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
73static void __ata_qc_complete(struct ata_queued_cmd *qc); 75static void __ata_qc_complete(struct ata_queued_cmd *qc);
74 76
75static unsigned int ata_unique_id = 1; 77static unsigned int ata_unique_id = 1;
@@ -85,7 +87,7 @@ MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_VERSION); 87MODULE_VERSION(DRV_VERSION);
86 88
87/** 89/**
88 * ata_tf_load - send taskfile registers to host controller 90 * ata_tf_load_pio - send taskfile registers to host controller
89 * @ap: Port to which output is sent 91 * @ap: Port to which output is sent
90 * @tf: ATA taskfile register set 92 * @tf: ATA taskfile register set
91 * 93 *
@@ -95,7 +97,7 @@ MODULE_VERSION(DRV_VERSION);
95 * Inherited from caller. 97 * Inherited from caller.
96 */ 98 */
97 99
98static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) 100static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
99{ 101{
100 struct ata_ioports *ioaddr = &ap->ioaddr; 102 struct ata_ioports *ioaddr = &ap->ioaddr;
101 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 103 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -153,7 +155,7 @@ static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
153 * Inherited from caller. 155 * Inherited from caller.
154 */ 156 */
155 157
156static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) 158static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
157{ 159{
158 struct ata_ioports *ioaddr = &ap->ioaddr; 160 struct ata_ioports *ioaddr = &ap->ioaddr;
159 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 161 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -222,7 +224,7 @@ static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
222 * LOCKING: 224 * LOCKING:
223 * Inherited from caller. 225 * Inherited from caller.
224 */ 226 */
225void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) 227void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
226{ 228{
227 if (ap->flags & ATA_FLAG_MMIO) 229 if (ap->flags & ATA_FLAG_MMIO)
228 ata_tf_load_mmio(ap, tf); 230 ata_tf_load_mmio(ap, tf);
@@ -242,7 +244,7 @@ void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
242 * spin_lock_irqsave(host_set lock) 244 * spin_lock_irqsave(host_set lock)
243 */ 245 */
244 246
245static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) 247static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
246{ 248{
247 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 249 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
248 250
@@ -263,7 +265,7 @@ static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
263 * spin_lock_irqsave(host_set lock) 265 * spin_lock_irqsave(host_set lock)
264 */ 266 */
265 267
266static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) 268static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
267{ 269{
268 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 270 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
269 271
@@ -283,7 +285,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
283 * LOCKING: 285 * LOCKING:
284 * spin_lock_irqsave(host_set lock) 286 * spin_lock_irqsave(host_set lock)
285 */ 287 */
286void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf) 288void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
287{ 289{
288 if (ap->flags & ATA_FLAG_MMIO) 290 if (ap->flags & ATA_FLAG_MMIO)
289 ata_exec_command_mmio(ap, tf); 291 ata_exec_command_mmio(ap, tf);
@@ -303,7 +305,7 @@ void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
303 * Obtains host_set lock. 305 * Obtains host_set lock.
304 */ 306 */
305 307
306static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) 308static inline void ata_exec(struct ata_port *ap, const struct ata_taskfile *tf)
307{ 309{
308 unsigned long flags; 310 unsigned long flags;
309 311
@@ -326,7 +328,7 @@ static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
326 * Obtains host_set lock. 328 * Obtains host_set lock.
327 */ 329 */
328 330
329static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) 331static void ata_tf_to_host(struct ata_port *ap, const struct ata_taskfile *tf)
330{ 332{
331 ap->ops->tf_load(ap, tf); 333 ap->ops->tf_load(ap, tf);
332 334
@@ -346,7 +348,7 @@ static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
346 * spin_lock_irqsave(host_set lock) 348 * spin_lock_irqsave(host_set lock)
347 */ 349 */
348 350
349void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) 351void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf)
350{ 352{
351 ap->ops->tf_load(ap, tf); 353 ap->ops->tf_load(ap, tf);
352 ap->ops->exec_command(ap, tf); 354 ap->ops->exec_command(ap, tf);
@@ -556,7 +558,7 @@ u8 ata_chk_err(struct ata_port *ap)
556 * Inherited from caller. 558 * Inherited from caller.
557 */ 559 */
558 560
559void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp) 561void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
560{ 562{
561 fis[0] = 0x27; /* Register - Host to Device FIS */ 563 fis[0] = 0x27; /* Register - Host to Device FIS */
562 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, 564 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
@@ -597,7 +599,7 @@ void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
597 * Inherited from caller. 599 * Inherited from caller.
598 */ 600 */
599 601
600void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf) 602void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
601{ 603{
602 tf->command = fis[2]; /* status */ 604 tf->command = fis[2]; /* status */
603 tf->feature = fis[3]; /* error */ 605 tf->feature = fis[3]; /* error */
@@ -615,79 +617,53 @@ void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
615 tf->hob_nsect = fis[13]; 617 tf->hob_nsect = fis[13];
616} 618}
617 619
618/** 620static const u8 ata_rw_cmds[] = {
619 * ata_prot_to_cmd - determine which read/write opcodes to use 621 /* pio multi */
620 * @protocol: ATA_PROT_xxx taskfile protocol 622 ATA_CMD_READ_MULTI,
621 * @lba48: true is lba48 is present 623 ATA_CMD_WRITE_MULTI,
622 * 624 ATA_CMD_READ_MULTI_EXT,
623 * Given necessary input, determine which read/write commands 625 ATA_CMD_WRITE_MULTI_EXT,
624 * to use to transfer data. 626 /* pio */
625 * 627 ATA_CMD_PIO_READ,
626 * LOCKING: 628 ATA_CMD_PIO_WRITE,
627 * None. 629 ATA_CMD_PIO_READ_EXT,
628 */ 630 ATA_CMD_PIO_WRITE_EXT,
629static int ata_prot_to_cmd(int protocol, int lba48) 631 /* dma */
630{ 632 ATA_CMD_READ,
631 int rcmd = 0, wcmd = 0; 633 ATA_CMD_WRITE,
632 634 ATA_CMD_READ_EXT,
633 switch (protocol) { 635 ATA_CMD_WRITE_EXT
634 case ATA_PROT_PIO: 636};
635 if (lba48) {
636 rcmd = ATA_CMD_PIO_READ_EXT;
637 wcmd = ATA_CMD_PIO_WRITE_EXT;
638 } else {
639 rcmd = ATA_CMD_PIO_READ;
640 wcmd = ATA_CMD_PIO_WRITE;
641 }
642 break;
643
644 case ATA_PROT_DMA:
645 if (lba48) {
646 rcmd = ATA_CMD_READ_EXT;
647 wcmd = ATA_CMD_WRITE_EXT;
648 } else {
649 rcmd = ATA_CMD_READ;
650 wcmd = ATA_CMD_WRITE;
651 }
652 break;
653
654 default:
655 return -1;
656 }
657
658 return rcmd | (wcmd << 8);
659}
660 637
661/** 638/**
662 * ata_dev_set_protocol - set taskfile protocol and r/w commands 639 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
663 * @dev: device to examine and configure 640 * @qc: command to examine and configure
664 * 641 *
665 * Examine the device configuration, after we have 642 * Examine the device configuration and tf->flags to calculate
666 * read the identify-device page and configured the 643 * the proper read/write commands and protocol to use.
667 * data transfer mode. Set internal state related to
668 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
669 * and calculate the proper read/write commands to use.
670 * 644 *
671 * LOCKING: 645 * LOCKING:
672 * caller. 646 * caller.
673 */ 647 */
674static void ata_dev_set_protocol(struct ata_device *dev) 648void ata_rwcmd_protocol(struct ata_queued_cmd *qc)
675{ 649{
676 int pio = (dev->flags & ATA_DFLAG_PIO); 650 struct ata_taskfile *tf = &qc->tf;
677 int lba48 = (dev->flags & ATA_DFLAG_LBA48); 651 struct ata_device *dev = qc->dev;
678 int proto, cmd;
679 652
680 if (pio) 653 int index, lba48, write;
681 proto = dev->xfer_protocol = ATA_PROT_PIO; 654
682 else 655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
683 proto = dev->xfer_protocol = ATA_PROT_DMA; 656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
684 657
685 cmd = ata_prot_to_cmd(proto, lba48); 658 if (dev->flags & ATA_DFLAG_PIO) {
686 if (cmd < 0) 659 tf->protocol = ATA_PROT_PIO;
687 BUG(); 660 index = dev->multi_count ? 0 : 4;
661 } else {
662 tf->protocol = ATA_PROT_DMA;
663 index = 8;
664 }
688 665
689 dev->read_cmd = cmd & 0xff; 666 tf->command = ata_rw_cmds[index + lba48 + write];
690 dev->write_cmd = (cmd >> 8) & 0xff;
691} 667}
692 668
693static const char * xfer_mode_str[] = { 669static const char * xfer_mode_str[] = {
@@ -869,7 +845,7 @@ static unsigned int ata_devchk(struct ata_port *ap,
869 * the event of failure. 845 * the event of failure.
870 */ 846 */
871 847
872unsigned int ata_dev_classify(struct ata_taskfile *tf) 848unsigned int ata_dev_classify(const struct ata_taskfile *tf)
873{ 849{
874 /* Apple's open source Darwin code hints that some devices only 850 /* Apple's open source Darwin code hints that some devices only
875 * put a proper signature into the LBA mid/high registers, 851 * put a proper signature into the LBA mid/high registers,
@@ -961,7 +937,7 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
961 * caller. 937 * caller.
962 */ 938 */
963 939
964void ata_dev_id_string(u16 *id, unsigned char *s, 940void ata_dev_id_string(const u16 *id, unsigned char *s,
965 unsigned int ofs, unsigned int len) 941 unsigned int ofs, unsigned int len)
966{ 942{
967 unsigned int c; 943 unsigned int c;
@@ -1078,7 +1054,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1078 * caller. 1054 * caller.
1079 */ 1055 */
1080 1056
1081static inline void ata_dump_id(struct ata_device *dev) 1057static inline void ata_dump_id(const struct ata_device *dev)
1082{ 1058{
1083 DPRINTK("49==0x%04x " 1059 DPRINTK("49==0x%04x "
1084 "53==0x%04x " 1060 "53==0x%04x "
@@ -1106,6 +1082,31 @@ static inline void ata_dump_id(struct ata_device *dev)
1106 dev->id[93]); 1082 dev->id[93]);
1107} 1083}
1108 1084
1085/*
1086 * Compute the PIO modes available for this device. This is not as
1087 * trivial as it seems if we must consider early devices correctly.
1088 *
1089 * FIXME: pre IDE drive timing (do we care ?).
1090 */
1091
1092static unsigned int ata_pio_modes(const struct ata_device *adev)
1093{
1094 u16 modes;
1095
1096 /* Usual case. Word 53 indicates word 88 is valid */
1097 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
1098 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
1099 modes <<= 3;
1100 modes |= 0x7;
1101 return modes;
1102 }
1103
1104 /* If word 88 isn't valid then Word 51 holds the PIO timing number
1105 for the maximum. Turn it into a mask and return it */
1106 modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
1107 return modes;
1108}
1109
1109/** 1110/**
1110 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1111 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1111 * @ap: port on which device we wish to probe resides 1112 * @ap: port on which device we wish to probe resides
@@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev)
1131static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1132static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1132{ 1133{
1133 struct ata_device *dev = &ap->device[device]; 1134 struct ata_device *dev = &ap->device[device];
1134 unsigned int i; 1135 unsigned int major_version;
1135 u16 tmp; 1136 u16 tmp;
1136 unsigned long xfer_modes; 1137 unsigned long xfer_modes;
1137 u8 status; 1138 u8 status;
@@ -1229,9 +1230,9 @@ retry:
1229 * common ATA, ATAPI feature tests 1230 * common ATA, ATAPI feature tests
1230 */ 1231 */
1231 1232
1232 /* we require LBA and DMA support (bits 8 & 9 of word 49) */ 1233 /* we require DMA support (bits 8 of word 49) */
1233 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { 1234 if (!ata_id_has_dma(dev->id)) {
1234 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); 1235 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1235 goto err_out_nosup; 1236 goto err_out_nosup;
1236 } 1237 }
1237 1238
@@ -1239,10 +1240,8 @@ retry:
1239 xfer_modes = dev->id[ATA_ID_UDMA_MODES]; 1240 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1240 if (!xfer_modes) 1241 if (!xfer_modes)
1241 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; 1242 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1242 if (!xfer_modes) { 1243 if (!xfer_modes)
1243 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3); 1244 xfer_modes = ata_pio_modes(dev);
1244 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1245 }
1246 1245
1247 ata_dump_id(dev); 1246 ata_dump_id(dev);
1248 1247
@@ -1251,32 +1250,75 @@ retry:
1251 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1250 if (!ata_id_is_ata(dev->id)) /* sanity check */
1252 goto err_out_nosup; 1251 goto err_out_nosup;
1253 1252
1253 /* get major version */
1254 tmp = dev->id[ATA_ID_MAJOR_VER]; 1254 tmp = dev->id[ATA_ID_MAJOR_VER];
1255 for (i = 14; i >= 1; i--) 1255 for (major_version = 14; major_version >= 1; major_version--)
1256 if (tmp & (1 << i)) 1256 if (tmp & (1 << major_version))
1257 break; 1257 break;
1258 1258
1259 /* we require at least ATA-3 */ 1259 /*
1260 if (i < 3) { 1260 * The exact sequence expected by certain pre-ATA4 drives is:
1261 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); 1261 * SRST RESET
1262 goto err_out_nosup; 1262 * IDENTIFY
1263 * INITIALIZE DEVICE PARAMETERS
1264 * anything else..
1265 * Some drives were very specific about that exact sequence.
1266 */
1267 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1268 ata_dev_init_params(ap, dev);
1269
1270 /* current CHS translation info (id[53-58]) might be
1271 * changed. reread the identify device info.
1272 */
1273 ata_dev_reread_id(ap, dev);
1263 } 1274 }
1264 1275
1265 if (ata_id_has_lba48(dev->id)) { 1276 if (ata_id_has_lba(dev->id)) {
1266 dev->flags |= ATA_DFLAG_LBA48; 1277 dev->flags |= ATA_DFLAG_LBA;
1267 dev->n_sectors = ata_id_u64(dev->id, 100); 1278
1268 } else { 1279 if (ata_id_has_lba48(dev->id)) {
1269 dev->n_sectors = ata_id_u32(dev->id, 60); 1280 dev->flags |= ATA_DFLAG_LBA48;
1281 dev->n_sectors = ata_id_u64(dev->id, 100);
1282 } else {
1283 dev->n_sectors = ata_id_u32(dev->id, 60);
1284 }
1285
1286 /* print device info to dmesg */
1287 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1288 ap->id, device,
1289 major_version,
1290 ata_mode_string(xfer_modes),
1291 (unsigned long long)dev->n_sectors,
1292 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1293 } else {
1294 /* CHS */
1295
1296 /* Default translation */
1297 dev->cylinders = dev->id[1];
1298 dev->heads = dev->id[3];
1299 dev->sectors = dev->id[6];
1300 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1301
1302 if (ata_id_current_chs_valid(dev->id)) {
1303 /* Current CHS translation is valid. */
1304 dev->cylinders = dev->id[54];
1305 dev->heads = dev->id[55];
1306 dev->sectors = dev->id[56];
1307
1308 dev->n_sectors = ata_id_u32(dev->id, 57);
1309 }
1310
1311 /* print device info to dmesg */
1312 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1313 ap->id, device,
1314 major_version,
1315 ata_mode_string(xfer_modes),
1316 (unsigned long long)dev->n_sectors,
1317 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1318
1270 } 1319 }
1271 1320
1272 ap->host->max_cmd_len = 16; 1321 ap->host->max_cmd_len = 16;
1273
1274 /* print device info to dmesg */
1275 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1276 ap->id, device,
1277 ata_mode_string(xfer_modes),
1278 (unsigned long long)dev->n_sectors,
1279 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1280 } 1322 }
1281 1323
1282 /* ATAPI-specific feature tests */ 1324 /* ATAPI-specific feature tests */
@@ -1310,7 +1352,7 @@ err_out:
1310} 1352}
1311 1353
1312 1354
1313static inline u8 ata_dev_knobble(struct ata_port *ap) 1355static inline u8 ata_dev_knobble(const struct ata_port *ap)
1314{ 1356{
1315 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); 1357 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1316} 1358}
@@ -1496,7 +1538,153 @@ void ata_port_disable(struct ata_port *ap)
1496 ap->flags |= ATA_FLAG_PORT_DISABLED; 1538 ap->flags |= ATA_FLAG_PORT_DISABLED;
1497} 1539}
1498 1540
1499static struct { 1541/*
1542 * This mode timing computation functionality is ported over from
1543 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1544 */
1545/*
1546 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1547 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1548 * for PIO 5, which is a nonstandard extension and UDMA6, which
1549 * is currently supported only by Maxtor drives.
1550 */
1551
1552static const struct ata_timing ata_timing[] = {
1553
1554 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1555 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1556 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1557 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1558
1559 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1560 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1561 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1562
1563/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1564
1565 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1566 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1567 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1568
1569 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1570 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1571 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1572
1573/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1574 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1575 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1576
1577 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1578 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1579 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1580
1581/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1582
1583 { 0xFF }
1584};
1585
1586#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1587#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1588
1589static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1590{
1591 q->setup = EZ(t->setup * 1000, T);
1592 q->act8b = EZ(t->act8b * 1000, T);
1593 q->rec8b = EZ(t->rec8b * 1000, T);
1594 q->cyc8b = EZ(t->cyc8b * 1000, T);
1595 q->active = EZ(t->active * 1000, T);
1596 q->recover = EZ(t->recover * 1000, T);
1597 q->cycle = EZ(t->cycle * 1000, T);
1598 q->udma = EZ(t->udma * 1000, UT);
1599}
1600
1601void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1602 struct ata_timing *m, unsigned int what)
1603{
1604 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1605 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1606 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1607 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1608 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1609 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1610 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1611 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1612}
1613
1614static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1615{
1616 const struct ata_timing *t;
1617
1618 for (t = ata_timing; t->mode != speed; t++)
1619 if (t->mode == 0xFF)
1620 return NULL;
1621 return t;
1622}
1623
1624int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1625 struct ata_timing *t, int T, int UT)
1626{
1627 const struct ata_timing *s;
1628 struct ata_timing p;
1629
1630 /*
1631 * Find the mode.
1632 */
1633
1634 if (!(s = ata_timing_find_mode(speed)))
1635 return -EINVAL;
1636
1637 /*
1638 * If the drive is an EIDE drive, it can tell us it needs extended
1639 * PIO/MW_DMA cycle timing.
1640 */
1641
1642 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1643 memset(&p, 0, sizeof(p));
1644 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1645 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1646 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1647 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1648 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1649 }
1650 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1651 }
1652
1653 /*
1654 * Convert the timing to bus clock counts.
1655 */
1656
1657 ata_timing_quantize(s, t, T, UT);
1658
1659 /*
1660 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
1661 * and some other commands. We have to ensure that the DMA cycle timing is
1662 * slower/equal than the fastest PIO timing.
1663 */
1664
1665 if (speed > XFER_PIO_4) {
1666 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1667 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1668 }
1669
1670 /*
1671 * Lenghten active & recovery time so that cycle time is correct.
1672 */
1673
1674 if (t->act8b + t->rec8b < t->cyc8b) {
1675 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1676 t->rec8b = t->cyc8b - t->act8b;
1677 }
1678
1679 if (t->active + t->recover < t->cycle) {
1680 t->active += (t->cycle - (t->active + t->recover)) / 2;
1681 t->recover = t->cycle - t->active;
1682 }
1683
1684 return 0;
1685}
1686
1687static const struct {
1500 unsigned int shift; 1688 unsigned int shift;
1501 u8 base; 1689 u8 base;
1502} xfer_mode_classes[] = { 1690} xfer_mode_classes[] = {
@@ -1603,7 +1791,7 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1603 */ 1791 */
1604static void ata_set_mode(struct ata_port *ap) 1792static void ata_set_mode(struct ata_port *ap)
1605{ 1793{
1606 unsigned int i, xfer_shift; 1794 unsigned int xfer_shift;
1607 u8 xfer_mode; 1795 u8 xfer_mode;
1608 int rc; 1796 int rc;
1609 1797
@@ -1632,11 +1820,6 @@ static void ata_set_mode(struct ata_port *ap)
1632 if (ap->ops->post_set_mode) 1820 if (ap->ops->post_set_mode)
1633 ap->ops->post_set_mode(ap); 1821 ap->ops->post_set_mode(ap);
1634 1822
1635 for (i = 0; i < 2; i++) {
1636 struct ata_device *dev = &ap->device[i];
1637 ata_dev_set_protocol(dev);
1638 }
1639
1640 return; 1823 return;
1641 1824
1642err_out: 1825err_out:
@@ -1910,7 +2093,8 @@ err_out:
1910 DPRINTK("EXIT\n"); 2093 DPRINTK("EXIT\n");
1911} 2094}
1912 2095
1913static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev) 2096static void ata_pr_blacklisted(const struct ata_port *ap,
2097 const struct ata_device *dev)
1914{ 2098{
1915 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", 2099 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1916 ap->id, dev->devno); 2100 ap->id, dev->devno);
@@ -1948,7 +2132,7 @@ static const char * ata_dma_blacklist [] = {
1948 "_NEC DV5800A", 2132 "_NEC DV5800A",
1949}; 2133};
1950 2134
1951static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev) 2135static int ata_dma_blacklisted(const struct ata_device *dev)
1952{ 2136{
1953 unsigned char model_num[40]; 2137 unsigned char model_num[40];
1954 char *s; 2138 char *s;
@@ -1973,9 +2157,9 @@ static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1973 return 0; 2157 return 0;
1974} 2158}
1975 2159
1976static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) 2160static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
1977{ 2161{
1978 struct ata_device *master, *slave; 2162 const struct ata_device *master, *slave;
1979 unsigned int mask; 2163 unsigned int mask;
1980 2164
1981 master = &ap->device[0]; 2165 master = &ap->device[0];
@@ -1987,14 +2171,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1987 mask = ap->udma_mask; 2171 mask = ap->udma_mask;
1988 if (ata_dev_present(master)) { 2172 if (ata_dev_present(master)) {
1989 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); 2173 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1990 if (ata_dma_blacklisted(ap, master)) { 2174 if (ata_dma_blacklisted(master)) {
1991 mask = 0; 2175 mask = 0;
1992 ata_pr_blacklisted(ap, master); 2176 ata_pr_blacklisted(ap, master);
1993 } 2177 }
1994 } 2178 }
1995 if (ata_dev_present(slave)) { 2179 if (ata_dev_present(slave)) {
1996 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); 2180 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1997 if (ata_dma_blacklisted(ap, slave)) { 2181 if (ata_dma_blacklisted(slave)) {
1998 mask = 0; 2182 mask = 0;
1999 ata_pr_blacklisted(ap, slave); 2183 ata_pr_blacklisted(ap, slave);
2000 } 2184 }
@@ -2004,14 +2188,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
2004 mask = ap->mwdma_mask; 2188 mask = ap->mwdma_mask;
2005 if (ata_dev_present(master)) { 2189 if (ata_dev_present(master)) {
2006 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); 2190 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2007 if (ata_dma_blacklisted(ap, master)) { 2191 if (ata_dma_blacklisted(master)) {
2008 mask = 0; 2192 mask = 0;
2009 ata_pr_blacklisted(ap, master); 2193 ata_pr_blacklisted(ap, master);
2010 } 2194 }
2011 } 2195 }
2012 if (ata_dev_present(slave)) { 2196 if (ata_dev_present(slave)) {
2013 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); 2197 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2014 if (ata_dma_blacklisted(ap, slave)) { 2198 if (ata_dma_blacklisted(slave)) {
2015 mask = 0; 2199 mask = 0;
2016 ata_pr_blacklisted(ap, slave); 2200 ata_pr_blacklisted(ap, slave);
2017 } 2201 }
@@ -2075,7 +2259,7 @@ static int fgb(u32 bitmap)
2075 * Zero on success, negative on error. 2259 * Zero on success, negative on error.
2076 */ 2260 */
2077 2261
2078static int ata_choose_xfer_mode(struct ata_port *ap, 2262static int ata_choose_xfer_mode(const struct ata_port *ap,
2079 u8 *xfer_mode_out, 2263 u8 *xfer_mode_out,
2080 unsigned int *xfer_shift_out) 2264 unsigned int *xfer_shift_out)
2081{ 2265{
@@ -2144,6 +2328,110 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2144} 2328}
2145 2329
2146/** 2330/**
2331 * ata_dev_reread_id - Reread the device identify device info
2332 * @ap: port where the device is
2333 * @dev: device to reread the identify device info
2334 *
2335 * LOCKING:
2336 */
2337
2338static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2339{
2340 DECLARE_COMPLETION(wait);
2341 struct ata_queued_cmd *qc;
2342 unsigned long flags;
2343 int rc;
2344
2345 qc = ata_qc_new_init(ap, dev);
2346 BUG_ON(qc == NULL);
2347
2348 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
2349 qc->dma_dir = DMA_FROM_DEVICE;
2350
2351 if (dev->class == ATA_DEV_ATA) {
2352 qc->tf.command = ATA_CMD_ID_ATA;
2353 DPRINTK("do ATA identify\n");
2354 } else {
2355 qc->tf.command = ATA_CMD_ID_ATAPI;
2356 DPRINTK("do ATAPI identify\n");
2357 }
2358
2359 qc->tf.flags |= ATA_TFLAG_DEVICE;
2360 qc->tf.protocol = ATA_PROT_PIO;
2361 qc->nsect = 1;
2362
2363 qc->waiting = &wait;
2364 qc->complete_fn = ata_qc_complete_noop;
2365
2366 spin_lock_irqsave(&ap->host_set->lock, flags);
2367 rc = ata_qc_issue(qc);
2368 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2369
2370 if (rc)
2371 goto err_out;
2372
2373 wait_for_completion(&wait);
2374
2375 swap_buf_le16(dev->id, ATA_ID_WORDS);
2376
2377 ata_dump_id(dev);
2378
2379 DPRINTK("EXIT\n");
2380
2381 return;
2382err_out:
2383 ata_port_disable(ap);
2384}
2385
2386/**
2387 * ata_dev_init_params - Issue INIT DEV PARAMS command
2388 * @ap: Port associated with device @dev
2389 * @dev: Device to which command will be sent
2390 *
2391 * LOCKING:
2392 */
2393
2394static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2395{
2396 DECLARE_COMPLETION(wait);
2397 struct ata_queued_cmd *qc;
2398 int rc;
2399 unsigned long flags;
2400 u16 sectors = dev->id[6];
2401 u16 heads = dev->id[3];
2402
2403 /* Number of sectors per track 1-255. Number of heads 1-16 */
2404 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2405 return;
2406
2407 /* set up init dev params taskfile */
2408 DPRINTK("init dev params \n");
2409
2410 qc = ata_qc_new_init(ap, dev);
2411 BUG_ON(qc == NULL);
2412
2413 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2414 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2415 qc->tf.protocol = ATA_PROT_NODATA;
2416 qc->tf.nsect = sectors;
2417 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2418
2419 qc->waiting = &wait;
2420 qc->complete_fn = ata_qc_complete_noop;
2421
2422 spin_lock_irqsave(&ap->host_set->lock, flags);
2423 rc = ata_qc_issue(qc);
2424 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2425
2426 if (rc)
2427 ata_port_disable(ap);
2428 else
2429 wait_for_completion(&wait);
2430
2431 DPRINTK("EXIT\n");
2432}
2433
2434/**
2147 * ata_sg_clean - Unmap DMA memory associated with command 2435 * ata_sg_clean - Unmap DMA memory associated with command
2148 * @qc: Command containing DMA memory to be released 2436 * @qc: Command containing DMA memory to be released
2149 * 2437 *
@@ -2413,32 +2701,32 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2413 2701
2414/** 2702/**
2415 * ata_pio_poll - 2703 * ata_pio_poll -
2416 * @ap: 2704 * @ap: the target ata_port
2417 * 2705 *
2418 * LOCKING: 2706 * LOCKING:
2419 * None. (executing in kernel thread context) 2707 * None. (executing in kernel thread context)
2420 * 2708 *
2421 * RETURNS: 2709 * RETURNS:
2422 * 2710 * timeout value to use
2423 */ 2711 */
2424 2712
2425static unsigned long ata_pio_poll(struct ata_port *ap) 2713static unsigned long ata_pio_poll(struct ata_port *ap)
2426{ 2714{
2427 u8 status; 2715 u8 status;
2428 unsigned int poll_state = PIO_ST_UNKNOWN; 2716 unsigned int poll_state = HSM_ST_UNKNOWN;
2429 unsigned int reg_state = PIO_ST_UNKNOWN; 2717 unsigned int reg_state = HSM_ST_UNKNOWN;
2430 const unsigned int tmout_state = PIO_ST_TMOUT; 2718 const unsigned int tmout_state = HSM_ST_TMOUT;
2431 2719
2432 switch (ap->pio_task_state) { 2720 switch (ap->hsm_task_state) {
2433 case PIO_ST: 2721 case HSM_ST:
2434 case PIO_ST_POLL: 2722 case HSM_ST_POLL:
2435 poll_state = PIO_ST_POLL; 2723 poll_state = HSM_ST_POLL;
2436 reg_state = PIO_ST; 2724 reg_state = HSM_ST;
2437 break; 2725 break;
2438 case PIO_ST_LAST: 2726 case HSM_ST_LAST:
2439 case PIO_ST_LAST_POLL: 2727 case HSM_ST_LAST_POLL:
2440 poll_state = PIO_ST_LAST_POLL; 2728 poll_state = HSM_ST_LAST_POLL;
2441 reg_state = PIO_ST_LAST; 2729 reg_state = HSM_ST_LAST;
2442 break; 2730 break;
2443 default: 2731 default:
2444 BUG(); 2732 BUG();
@@ -2448,20 +2736,20 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2448 status = ata_chk_status(ap); 2736 status = ata_chk_status(ap);
2449 if (status & ATA_BUSY) { 2737 if (status & ATA_BUSY) {
2450 if (time_after(jiffies, ap->pio_task_timeout)) { 2738 if (time_after(jiffies, ap->pio_task_timeout)) {
2451 ap->pio_task_state = tmout_state; 2739 ap->hsm_task_state = tmout_state;
2452 return 0; 2740 return 0;
2453 } 2741 }
2454 ap->pio_task_state = poll_state; 2742 ap->hsm_task_state = poll_state;
2455 return ATA_SHORT_PAUSE; 2743 return ATA_SHORT_PAUSE;
2456 } 2744 }
2457 2745
2458 ap->pio_task_state = reg_state; 2746 ap->hsm_task_state = reg_state;
2459 return 0; 2747 return 0;
2460} 2748}
2461 2749
2462/** 2750/**
2463 * ata_pio_complete - 2751 * ata_pio_complete - check if drive is busy or idle
2464 * @ap: 2752 * @ap: the target ata_port
2465 * 2753 *
2466 * LOCKING: 2754 * LOCKING:
2467 * None. (executing in kernel thread context) 2755 * None. (executing in kernel thread context)
@@ -2480,14 +2768,14 @@ static int ata_pio_complete (struct ata_port *ap)
2480 * we enter, BSY will be cleared in a chk-status or two. If not, 2768 * we enter, BSY will be cleared in a chk-status or two. If not,
2481 * the drive is probably seeking or something. Snooze for a couple 2769 * the drive is probably seeking or something. Snooze for a couple
2482 * msecs, then chk-status again. If still busy, fall back to 2770 * msecs, then chk-status again. If still busy, fall back to
2483 * PIO_ST_POLL state. 2771 * HSM_ST_POLL state.
2484 */ 2772 */
2485 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2773 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2486 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2774 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2487 msleep(2); 2775 msleep(2);
2488 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2776 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2489 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2777 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2490 ap->pio_task_state = PIO_ST_LAST_POLL; 2778 ap->hsm_task_state = HSM_ST_LAST_POLL;
2491 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2779 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2492 return 0; 2780 return 0;
2493 } 2781 }
@@ -2495,14 +2783,14 @@ static int ata_pio_complete (struct ata_port *ap)
2495 2783
2496 drv_stat = ata_wait_idle(ap); 2784 drv_stat = ata_wait_idle(ap);
2497 if (!ata_ok(drv_stat)) { 2785 if (!ata_ok(drv_stat)) {
2498 ap->pio_task_state = PIO_ST_ERR; 2786 ap->hsm_task_state = HSM_ST_ERR;
2499 return 0; 2787 return 0;
2500 } 2788 }
2501 2789
2502 qc = ata_qc_from_tag(ap, ap->active_tag); 2790 qc = ata_qc_from_tag(ap, ap->active_tag);
2503 assert(qc != NULL); 2791 assert(qc != NULL);
2504 2792
2505 ap->pio_task_state = PIO_ST_IDLE; 2793 ap->hsm_task_state = HSM_ST_IDLE;
2506 2794
2507 ata_poll_qc_complete(qc, drv_stat); 2795 ata_poll_qc_complete(qc, drv_stat);
2508 2796
@@ -2513,7 +2801,7 @@ static int ata_pio_complete (struct ata_port *ap)
2513 2801
2514 2802
2515/** 2803/**
2516 * swap_buf_le16 - 2804 * swap_buf_le16 - swap halves of 16-words in place
2517 * @buf: Buffer to swap 2805 * @buf: Buffer to swap
2518 * @buf_words: Number of 16-bit words in buffer. 2806 * @buf_words: Number of 16-bit words in buffer.
2519 * 2807 *
@@ -2522,6 +2810,7 @@ static int ata_pio_complete (struct ata_port *ap)
2522 * vice-versa. 2810 * vice-versa.
2523 * 2811 *
2524 * LOCKING: 2812 * LOCKING:
2813 * Inherited from caller.
2525 */ 2814 */
2526void swap_buf_le16(u16 *buf, unsigned int buf_words) 2815void swap_buf_le16(u16 *buf, unsigned int buf_words)
2527{ 2816{
@@ -2544,7 +2833,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
2544 * 2833 *
2545 * LOCKING: 2834 * LOCKING:
2546 * Inherited from caller. 2835 * Inherited from caller.
2547 *
2548 */ 2836 */
2549 2837
2550static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 2838static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2590,7 +2878,6 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2590 * 2878 *
2591 * LOCKING: 2879 * LOCKING:
2592 * Inherited from caller. 2880 * Inherited from caller.
2593 *
2594 */ 2881 */
2595 2882
2596static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 2883static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2630,7 +2917,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2630 * 2917 *
2631 * LOCKING: 2918 * LOCKING:
2632 * Inherited from caller. 2919 * Inherited from caller.
2633 *
2634 */ 2920 */
2635 2921
2636static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 2922static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2662,7 +2948,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2662 unsigned char *buf; 2948 unsigned char *buf;
2663 2949
2664 if (qc->cursect == (qc->nsect - 1)) 2950 if (qc->cursect == (qc->nsect - 1))
2665 ap->pio_task_state = PIO_ST_LAST; 2951 ap->hsm_task_state = HSM_ST_LAST;
2666 2952
2667 page = sg[qc->cursg].page; 2953 page = sg[qc->cursg].page;
2668 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 2954 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
@@ -2712,7 +2998,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2712 unsigned int offset, count; 2998 unsigned int offset, count;
2713 2999
2714 if (qc->curbytes + bytes >= qc->nbytes) 3000 if (qc->curbytes + bytes >= qc->nbytes)
2715 ap->pio_task_state = PIO_ST_LAST; 3001 ap->hsm_task_state = HSM_ST_LAST;
2716 3002
2717next_sg: 3003next_sg:
2718 if (unlikely(qc->cursg >= qc->n_elem)) { 3004 if (unlikely(qc->cursg >= qc->n_elem)) {
@@ -2734,7 +3020,7 @@ next_sg:
2734 for (i = 0; i < words; i++) 3020 for (i = 0; i < words; i++)
2735 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3021 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
2736 3022
2737 ap->pio_task_state = PIO_ST_LAST; 3023 ap->hsm_task_state = HSM_ST_LAST;
2738 return; 3024 return;
2739 } 3025 }
2740 3026
@@ -2783,7 +3069,6 @@ next_sg:
2783 * 3069 *
2784 * LOCKING: 3070 * LOCKING:
2785 * Inherited from caller. 3071 * Inherited from caller.
2786 *
2787 */ 3072 */
2788 3073
2789static void atapi_pio_bytes(struct ata_queued_cmd *qc) 3074static void atapi_pio_bytes(struct ata_queued_cmd *qc)
@@ -2815,12 +3100,12 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2815err_out: 3100err_out:
2816 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3101 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2817 ap->id, dev->devno); 3102 ap->id, dev->devno);
2818 ap->pio_task_state = PIO_ST_ERR; 3103 ap->hsm_task_state = HSM_ST_ERR;
2819} 3104}
2820 3105
2821/** 3106/**
2822 * ata_pio_sector - 3107 * ata_pio_block - start PIO on a block
2823 * @ap: 3108 * @ap: the target ata_port
2824 * 3109 *
2825 * LOCKING: 3110 * LOCKING:
2826 * None. (executing in kernel thread context) 3111 * None. (executing in kernel thread context)
@@ -2832,19 +3117,19 @@ static void ata_pio_block(struct ata_port *ap)
2832 u8 status; 3117 u8 status;
2833 3118
2834 /* 3119 /*
2835 * This is purely hueristic. This is a fast path. 3120 * This is purely heuristic. This is a fast path.
2836 * Sometimes when we enter, BSY will be cleared in 3121 * Sometimes when we enter, BSY will be cleared in
2837 * a chk-status or two. If not, the drive is probably seeking 3122 * a chk-status or two. If not, the drive is probably seeking
2838 * or something. Snooze for a couple msecs, then 3123 * or something. Snooze for a couple msecs, then
2839 * chk-status again. If still busy, fall back to 3124 * chk-status again. If still busy, fall back to
2840 * PIO_ST_POLL state. 3125 * HSM_ST_POLL state.
2841 */ 3126 */
2842 status = ata_busy_wait(ap, ATA_BUSY, 5); 3127 status = ata_busy_wait(ap, ATA_BUSY, 5);
2843 if (status & ATA_BUSY) { 3128 if (status & ATA_BUSY) {
2844 msleep(2); 3129 msleep(2);
2845 status = ata_busy_wait(ap, ATA_BUSY, 10); 3130 status = ata_busy_wait(ap, ATA_BUSY, 10);
2846 if (status & ATA_BUSY) { 3131 if (status & ATA_BUSY) {
2847 ap->pio_task_state = PIO_ST_POLL; 3132 ap->hsm_task_state = HSM_ST_POLL;
2848 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 3133 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2849 return; 3134 return;
2850 } 3135 }
@@ -2856,7 +3141,7 @@ static void ata_pio_block(struct ata_port *ap)
2856 if (is_atapi_taskfile(&qc->tf)) { 3141 if (is_atapi_taskfile(&qc->tf)) {
2857 /* no more data to transfer or unsupported ATAPI command */ 3142 /* no more data to transfer or unsupported ATAPI command */
2858 if ((status & ATA_DRQ) == 0) { 3143 if ((status & ATA_DRQ) == 0) {
2859 ap->pio_task_state = PIO_ST_LAST; 3144 ap->hsm_task_state = HSM_ST_LAST;
2860 return; 3145 return;
2861 } 3146 }
2862 3147
@@ -2864,7 +3149,7 @@ static void ata_pio_block(struct ata_port *ap)
2864 } else { 3149 } else {
2865 /* handle BSY=0, DRQ=0 as error */ 3150 /* handle BSY=0, DRQ=0 as error */
2866 if ((status & ATA_DRQ) == 0) { 3151 if ((status & ATA_DRQ) == 0) {
2867 ap->pio_task_state = PIO_ST_ERR; 3152 ap->hsm_task_state = HSM_ST_ERR;
2868 return; 3153 return;
2869 } 3154 }
2870 3155
@@ -2884,7 +3169,7 @@ static void ata_pio_error(struct ata_port *ap)
2884 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", 3169 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2885 ap->id, drv_stat); 3170 ap->id, drv_stat);
2886 3171
2887 ap->pio_task_state = PIO_ST_IDLE; 3172 ap->hsm_task_state = HSM_ST_IDLE;
2888 3173
2889 ata_poll_qc_complete(qc, drv_stat | ATA_ERR); 3174 ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
2890} 3175}
@@ -2899,25 +3184,25 @@ fsm_start:
2899 timeout = 0; 3184 timeout = 0;
2900 qc_completed = 0; 3185 qc_completed = 0;
2901 3186
2902 switch (ap->pio_task_state) { 3187 switch (ap->hsm_task_state) {
2903 case PIO_ST_IDLE: 3188 case HSM_ST_IDLE:
2904 return; 3189 return;
2905 3190
2906 case PIO_ST: 3191 case HSM_ST:
2907 ata_pio_block(ap); 3192 ata_pio_block(ap);
2908 break; 3193 break;
2909 3194
2910 case PIO_ST_LAST: 3195 case HSM_ST_LAST:
2911 qc_completed = ata_pio_complete(ap); 3196 qc_completed = ata_pio_complete(ap);
2912 break; 3197 break;
2913 3198
2914 case PIO_ST_POLL: 3199 case HSM_ST_POLL:
2915 case PIO_ST_LAST_POLL: 3200 case HSM_ST_LAST_POLL:
2916 timeout = ata_pio_poll(ap); 3201 timeout = ata_pio_poll(ap);
2917 break; 3202 break;
2918 3203
2919 case PIO_ST_TMOUT: 3204 case HSM_ST_TMOUT:
2920 case PIO_ST_ERR: 3205 case HSM_ST_ERR:
2921 ata_pio_error(ap); 3206 ata_pio_error(ap);
2922 return; 3207 return;
2923 } 3208 }
@@ -2928,52 +3213,6 @@ fsm_start:
2928 goto fsm_start; 3213 goto fsm_start;
2929} 3214}
2930 3215
2931static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2932 struct scsi_cmnd *cmd)
2933{
2934 DECLARE_COMPLETION(wait);
2935 struct ata_queued_cmd *qc;
2936 unsigned long flags;
2937 int rc;
2938
2939 DPRINTK("ATAPI request sense\n");
2940
2941 qc = ata_qc_new_init(ap, dev);
2942 BUG_ON(qc == NULL);
2943
2944 /* FIXME: is this needed? */
2945 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2946
2947 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2948 qc->dma_dir = DMA_FROM_DEVICE;
2949
2950 memset(&qc->cdb, 0, ap->cdb_len);
2951 qc->cdb[0] = REQUEST_SENSE;
2952 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2953
2954 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2955 qc->tf.command = ATA_CMD_PACKET;
2956
2957 qc->tf.protocol = ATA_PROT_ATAPI;
2958 qc->tf.lbam = (8 * 1024) & 0xff;
2959 qc->tf.lbah = (8 * 1024) >> 8;
2960 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2961
2962 qc->waiting = &wait;
2963 qc->complete_fn = ata_qc_complete_noop;
2964
2965 spin_lock_irqsave(&ap->host_set->lock, flags);
2966 rc = ata_qc_issue(qc);
2967 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2968
2969 if (rc)
2970 ata_port_disable(ap);
2971 else
2972 wait_for_completion(&wait);
2973
2974 DPRINTK("EXIT\n");
2975}
2976
2977/** 3216/**
2978 * ata_qc_timeout - Handle timeout of queued command 3217 * ata_qc_timeout - Handle timeout of queued command
2979 * @qc: Command that timed out 3218 * @qc: Command that timed out
@@ -3091,14 +3330,14 @@ void ata_eng_timeout(struct ata_port *ap)
3091 DPRINTK("ENTER\n"); 3330 DPRINTK("ENTER\n");
3092 3331
3093 qc = ata_qc_from_tag(ap, ap->active_tag); 3332 qc = ata_qc_from_tag(ap, ap->active_tag);
3094 if (!qc) { 3333 if (qc)
3334 ata_qc_timeout(qc);
3335 else {
3095 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 3336 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3096 ap->id); 3337 ap->id);
3097 goto out; 3338 goto out;
3098 } 3339 }
3099 3340
3100 ata_qc_timeout(qc);
3101
3102out: 3341out:
3103 DPRINTK("EXIT\n"); 3342 DPRINTK("EXIT\n");
3104} 3343}
@@ -3155,15 +3394,12 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3155 qc->nbytes = qc->curbytes = 0; 3394 qc->nbytes = qc->curbytes = 0;
3156 3395
3157 ata_tf_init(ap, &qc->tf, dev->devno); 3396 ata_tf_init(ap, &qc->tf, dev->devno);
3158
3159 if (dev->flags & ATA_DFLAG_LBA48)
3160 qc->tf.flags |= ATA_TFLAG_LBA48;
3161 } 3397 }
3162 3398
3163 return qc; 3399 return qc;
3164} 3400}
3165 3401
3166static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) 3402int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3167{ 3403{
3168 return 0; 3404 return 0;
3169} 3405}
@@ -3201,7 +3437,6 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3201 * 3437 *
3202 * LOCKING: 3438 * LOCKING:
3203 * spin_lock_irqsave(host_set lock) 3439 * spin_lock_irqsave(host_set lock)
3204 *
3205 */ 3440 */
3206void ata_qc_free(struct ata_queued_cmd *qc) 3441void ata_qc_free(struct ata_queued_cmd *qc)
3207{ 3442{
@@ -3221,7 +3456,6 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3221 * 3456 *
3222 * LOCKING: 3457 * LOCKING:
3223 * spin_lock_irqsave(host_set lock) 3458 * spin_lock_irqsave(host_set lock)
3224 *
3225 */ 3459 */
3226 3460
3227void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 3461void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
@@ -3360,7 +3594,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3360 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3594 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3361 ata_qc_set_polling(qc); 3595 ata_qc_set_polling(qc);
3362 ata_tf_to_host_nolock(ap, &qc->tf); 3596 ata_tf_to_host_nolock(ap, &qc->tf);
3363 ap->pio_task_state = PIO_ST; 3597 ap->hsm_task_state = HSM_ST;
3364 queue_work(ata_wq, &ap->pio_task); 3598 queue_work(ata_wq, &ap->pio_task);
3365 break; 3599 break;
3366 3600
@@ -3586,7 +3820,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
3586 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3820 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3587 host_stat = readb(mmio + ATA_DMA_STATUS); 3821 host_stat = readb(mmio + ATA_DMA_STATUS);
3588 } else 3822 } else
3589 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3823 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3590 return host_stat; 3824 return host_stat;
3591} 3825}
3592 3826
@@ -3715,7 +3949,6 @@ idle_irq:
3715 * 3949 *
3716 * RETURNS: 3950 * RETURNS:
3717 * IRQ_NONE or IRQ_HANDLED. 3951 * IRQ_NONE or IRQ_HANDLED.
3718 *
3719 */ 3952 */
3720 3953
3721irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 3954irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
@@ -3806,7 +4039,7 @@ static void atapi_packet_task(void *_data)
3806 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4039 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3807 4040
3808 /* PIO commands are handled by polling */ 4041 /* PIO commands are handled by polling */
3809 ap->pio_task_state = PIO_ST; 4042 ap->hsm_task_state = HSM_ST;
3810 queue_work(ata_wq, &ap->pio_task); 4043 queue_work(ata_wq, &ap->pio_task);
3811 } 4044 }
3812 4045
@@ -3827,6 +4060,7 @@ err_out:
3827 * May be used as the port_start() entry in ata_port_operations. 4060 * May be used as the port_start() entry in ata_port_operations.
3828 * 4061 *
3829 * LOCKING: 4062 * LOCKING:
4063 * Inherited from caller.
3830 */ 4064 */
3831 4065
3832int ata_port_start (struct ata_port *ap) 4066int ata_port_start (struct ata_port *ap)
@@ -3852,6 +4086,7 @@ int ata_port_start (struct ata_port *ap)
3852 * May be used as the port_stop() entry in ata_port_operations. 4086 * May be used as the port_stop() entry in ata_port_operations.
3853 * 4087 *
3854 * LOCKING: 4088 * LOCKING:
4089 * Inherited from caller.
3855 */ 4090 */
3856 4091
3857void ata_port_stop (struct ata_port *ap) 4092void ata_port_stop (struct ata_port *ap)
@@ -3874,6 +4109,7 @@ void ata_host_stop (struct ata_host_set *host_set)
3874 * @do_unregister: 1 if we fully unregister, 0 to just stop the port 4109 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3875 * 4110 *
3876 * LOCKING: 4111 * LOCKING:
4112 * Inherited from caller.
3877 */ 4113 */
3878 4114
3879static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) 4115static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
@@ -3901,12 +4137,11 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3901 * 4137 *
3902 * LOCKING: 4138 * LOCKING:
3903 * Inherited from caller. 4139 * Inherited from caller.
3904 *
3905 */ 4140 */
3906 4141
3907static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 4142static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3908 struct ata_host_set *host_set, 4143 struct ata_host_set *host_set,
3909 struct ata_probe_ent *ent, unsigned int port_no) 4144 const struct ata_probe_ent *ent, unsigned int port_no)
3910{ 4145{
3911 unsigned int i; 4146 unsigned int i;
3912 4147
@@ -3962,10 +4197,9 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3962 * 4197 *
3963 * RETURNS: 4198 * RETURNS:
3964 * New ata_port on success, for NULL on error. 4199 * New ata_port on success, for NULL on error.
3965 *
3966 */ 4200 */
3967 4201
3968static struct ata_port * ata_host_add(struct ata_probe_ent *ent, 4202static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
3969 struct ata_host_set *host_set, 4203 struct ata_host_set *host_set,
3970 unsigned int port_no) 4204 unsigned int port_no)
3971{ 4205{
@@ -4010,10 +4244,9 @@ err_out:
4010 * 4244 *
4011 * RETURNS: 4245 * RETURNS:
4012 * Number of ports registered. Zero on error (no ports registered). 4246 * Number of ports registered. Zero on error (no ports registered).
4013 *
4014 */ 4247 */
4015 4248
4016int ata_device_add(struct ata_probe_ent *ent) 4249int ata_device_add(const struct ata_probe_ent *ent)
4017{ 4250{
4018 unsigned int count = 0, i; 4251 unsigned int count = 0, i;
4019 struct device *dev = ent->dev; 4252 struct device *dev = ent->dev;
@@ -4113,7 +4346,7 @@ int ata_device_add(struct ata_probe_ent *ent)
4113 for (i = 0; i < count; i++) { 4346 for (i = 0; i < count; i++) {
4114 struct ata_port *ap = host_set->ports[i]; 4347 struct ata_port *ap = host_set->ports[i];
4115 4348
4116 scsi_scan_host(ap->host); 4349 ata_scsi_scan_host(ap);
4117 } 4350 }
4118 4351
4119 dev_set_drvdata(dev, host_set); 4352 dev_set_drvdata(dev, host_set);
@@ -4142,7 +4375,6 @@ err_out:
4142 * Inherited from calling layer (may sleep). 4375 * Inherited from calling layer (may sleep).
4143 */ 4376 */
4144 4377
4145
4146void ata_host_set_remove(struct ata_host_set *host_set) 4378void ata_host_set_remove(struct ata_host_set *host_set)
4147{ 4379{
4148 struct ata_port *ap; 4380 struct ata_port *ap;
@@ -4232,7 +4464,7 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4232} 4464}
4233 4465
4234static struct ata_probe_ent * 4466static struct ata_probe_ent *
4235ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port) 4467ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4236{ 4468{
4237 struct ata_probe_ent *probe_ent; 4469 struct ata_probe_ent *probe_ent;
4238 4470
@@ -4273,85 +4505,86 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4273 * ata_pci_init_native_mode - Initialize native-mode driver 4505 * ata_pci_init_native_mode - Initialize native-mode driver
4274 * @pdev: pci device to be initialized 4506 * @pdev: pci device to be initialized
4275 * @port: array[2] of pointers to port info structures. 4507 * @port: array[2] of pointers to port info structures.
4508 * @ports: bitmap of ports present
4276 * 4509 *
4277 * Utility function which allocates and initializes an 4510 * Utility function which allocates and initializes an
4278 * ata_probe_ent structure for a standard dual-port 4511 * ata_probe_ent structure for a standard dual-port
4279 * PIO-based IDE controller. The returned ata_probe_ent 4512 * PIO-based IDE controller. The returned ata_probe_ent
4280 * structure can be passed to ata_device_add(). The returned 4513 * structure can be passed to ata_device_add(). The returned
4281 * ata_probe_ent structure should then be freed with kfree(). 4514 * ata_probe_ent structure should then be freed with kfree().
4515 *
4516 * The caller need only pass the address of the primary port, the
4517 * secondary will be deduced automatically. If the device has non
4518 * standard secondary port mappings this function can be called twice,
4519 * once for each interface.
4282 */ 4520 */
4283 4521
4284struct ata_probe_ent * 4522struct ata_probe_ent *
4285ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) 4523ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4286{ 4524{
4287 struct ata_probe_ent *probe_ent = 4525 struct ata_probe_ent *probe_ent =
4288 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4526 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4527 int p = 0;
4528
4289 if (!probe_ent) 4529 if (!probe_ent)
4290 return NULL; 4530 return NULL;
4291 4531
4292 probe_ent->n_ports = 2;
4293 probe_ent->irq = pdev->irq; 4532 probe_ent->irq = pdev->irq;
4294 probe_ent->irq_flags = SA_SHIRQ; 4533 probe_ent->irq_flags = SA_SHIRQ;
4295 4534
4296 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); 4535 if (ports & ATA_PORT_PRIMARY) {
4297 probe_ent->port[0].altstatus_addr = 4536 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4298 probe_ent->port[0].ctl_addr = 4537 probe_ent->port[p].altstatus_addr =
4299 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 4538 probe_ent->port[p].ctl_addr =
4300 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4539 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4301 4540 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4302 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); 4541 ata_std_ports(&probe_ent->port[p]);
4303 probe_ent->port[1].altstatus_addr = 4542 p++;
4304 probe_ent->port[1].ctl_addr = 4543 }
4305 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4306 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4307 4544
4308 ata_std_ports(&probe_ent->port[0]); 4545 if (ports & ATA_PORT_SECONDARY) {
4309 ata_std_ports(&probe_ent->port[1]); 4546 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4547 probe_ent->port[p].altstatus_addr =
4548 probe_ent->port[p].ctl_addr =
4549 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4550 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4551 ata_std_ports(&probe_ent->port[p]);
4552 p++;
4553 }
4310 4554
4555 probe_ent->n_ports = p;
4311 return probe_ent; 4556 return probe_ent;
4312} 4557}
4313 4558
4314static struct ata_probe_ent * 4559static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num)
4315ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4316 struct ata_probe_ent **ppe2)
4317{ 4560{
4318 struct ata_probe_ent *probe_ent, *probe_ent2; 4561 struct ata_probe_ent *probe_ent;
4319 4562
4320 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4563 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4321 if (!probe_ent) 4564 if (!probe_ent)
4322 return NULL; 4565 return NULL;
4323 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4324 if (!probe_ent2) {
4325 kfree(probe_ent);
4326 return NULL;
4327 }
4328
4329 probe_ent->n_ports = 1;
4330 probe_ent->irq = 14;
4331 4566
4332 probe_ent->hard_port_no = 0;
4333 probe_ent->legacy_mode = 1; 4567 probe_ent->legacy_mode = 1;
4334 4568 probe_ent->n_ports = 1;
4335 probe_ent2->n_ports = 1; 4569 probe_ent->hard_port_no = port_num;
4336 probe_ent2->irq = 15; 4570
4337 4571 switch(port_num)
4338 probe_ent2->hard_port_no = 1; 4572 {
4339 probe_ent2->legacy_mode = 1; 4573 case 0:
4340 4574 probe_ent->irq = 14;
4341 probe_ent->port[0].cmd_addr = 0x1f0; 4575 probe_ent->port[0].cmd_addr = 0x1f0;
4342 probe_ent->port[0].altstatus_addr = 4576 probe_ent->port[0].altstatus_addr =
4343 probe_ent->port[0].ctl_addr = 0x3f6; 4577 probe_ent->port[0].ctl_addr = 0x3f6;
4344 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4578 break;
4345 4579 case 1:
4346 probe_ent2->port[0].cmd_addr = 0x170; 4580 probe_ent->irq = 15;
4347 probe_ent2->port[0].altstatus_addr = 4581 probe_ent->port[0].cmd_addr = 0x170;
4348 probe_ent2->port[0].ctl_addr = 0x376; 4582 probe_ent->port[0].altstatus_addr =
4349 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; 4583 probe_ent->port[0].ctl_addr = 0x376;
4350 4584 break;
4585 }
4586 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4351 ata_std_ports(&probe_ent->port[0]); 4587 ata_std_ports(&probe_ent->port[0]);
4352 ata_std_ports(&probe_ent2->port[0]);
4353
4354 *ppe2 = probe_ent2;
4355 return probe_ent; 4588 return probe_ent;
4356} 4589}
4357 4590
@@ -4374,13 +4607,12 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4374 * 4607 *
4375 * RETURNS: 4608 * RETURNS:
4376 * Zero on success, negative on errno-based value on error. 4609 * Zero on success, negative on errno-based value on error.
4377 *
4378 */ 4610 */
4379 4611
4380int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 4612int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4381 unsigned int n_ports) 4613 unsigned int n_ports)
4382{ 4614{
4383 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; 4615 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4384 struct ata_port_info *port[2]; 4616 struct ata_port_info *port[2];
4385 u8 tmp8, mask; 4617 u8 tmp8, mask;
4386 unsigned int legacy_mode = 0; 4618 unsigned int legacy_mode = 0;
@@ -4397,7 +4629,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4397 4629
4398 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 4630 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4399 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 4631 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4400 /* TODO: support transitioning to native mode? */ 4632 /* TODO: What if one channel is in native mode ... */
4401 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 4633 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4402 mask = (1 << 2) | (1 << 0); 4634 mask = (1 << 2) | (1 << 0);
4403 if ((tmp8 & mask) != mask) 4635 if ((tmp8 & mask) != mask)
@@ -4405,11 +4637,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4405 } 4637 }
4406 4638
4407 /* FIXME... */ 4639 /* FIXME... */
4408 if ((!legacy_mode) && (n_ports > 1)) { 4640 if ((!legacy_mode) && (n_ports > 2)) {
4409 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); 4641 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4410 return -EINVAL; 4642 n_ports = 2;
4643 /* For now */
4411 } 4644 }
4412 4645
4646 /* FIXME: Really for ATA it isn't safe because the device may be
4647 multi-purpose and we want to leave it alone if it was already
4648 enabled. Secondly for shared use as Arjan says we want refcounting
4649
4650 Checking dev->is_enabled is insufficient as this is not set at
4651 boot for the primary video which is BIOS enabled
4652 */
4653
4413 rc = pci_enable_device(pdev); 4654 rc = pci_enable_device(pdev);
4414 if (rc) 4655 if (rc)
4415 return rc; 4656 return rc;
@@ -4420,6 +4661,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4420 goto err_out; 4661 goto err_out;
4421 } 4662 }
4422 4663
4664 /* FIXME: Should use platform specific mappers for legacy port ranges */
4423 if (legacy_mode) { 4665 if (legacy_mode) {
4424 if (!request_region(0x1f0, 8, "libata")) { 4666 if (!request_region(0x1f0, 8, "libata")) {
4425 struct resource *conflict, res; 4667 struct resource *conflict, res;
@@ -4464,10 +4706,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4464 goto err_out_regions; 4706 goto err_out_regions;
4465 4707
4466 if (legacy_mode) { 4708 if (legacy_mode) {
4467 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); 4709 if (legacy_mode & (1 << 0))
4468 } else 4710 probe_ent = ata_pci_init_legacy_port(pdev, port, 0);
4469 probe_ent = ata_pci_init_native_mode(pdev, port); 4711 if (legacy_mode & (1 << 1))
4470 if (!probe_ent) { 4712 probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1);
4713 } else {
4714 if (n_ports == 2)
4715 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4716 else
4717 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4718 }
4719 if (!probe_ent && !probe_ent2) {
4471 rc = -ENOMEM; 4720 rc = -ENOMEM;
4472 goto err_out_regions; 4721 goto err_out_regions;
4473 } 4722 }
@@ -4505,7 +4754,7 @@ err_out:
4505 * @pdev: PCI device that was removed 4754 * @pdev: PCI device that was removed
4506 * 4755 *
4507 * PCI layer indicates to libata via this hook that 4756 * PCI layer indicates to libata via this hook that
4508 * hot-unplug or module unload event has occured. 4757 * hot-unplug or module unload event has occurred.
4509 * Handle this by unregistering all objects associated 4758 * Handle this by unregistering all objects associated
4510 * with this PCI device. Free those objects. Then finally 4759 * with this PCI device. Free those objects. Then finally
4511 * release PCI resources and disable device. 4760 * release PCI resources and disable device.
@@ -4526,7 +4775,7 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4526} 4775}
4527 4776
4528/* move to PCI subsystem */ 4777/* move to PCI subsystem */
4529int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) 4778int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4530{ 4779{
4531 unsigned long tmp = 0; 4780 unsigned long tmp = 0;
4532 4781
@@ -4579,6 +4828,27 @@ static void __exit ata_exit(void)
4579module_init(ata_init); 4828module_init(ata_init);
4580module_exit(ata_exit); 4829module_exit(ata_exit);
4581 4830
4831static unsigned long ratelimit_time;
4832static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4833
4834int ata_ratelimit(void)
4835{
4836 int rc;
4837 unsigned long flags;
4838
4839 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4840
4841 if (time_after(jiffies, ratelimit_time)) {
4842 rc = 1;
4843 ratelimit_time = jiffies + (HZ/5);
4844 } else
4845 rc = 0;
4846
4847 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4848
4849 return rc;
4850}
4851
4582/* 4852/*
4583 * libata is essentially a library of internal helper functions for 4853 * libata is essentially a library of internal helper functions for
4584 * low-level ATA host controller drivers. As such, the API/ABI is 4854 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4620,6 +4890,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset);
4620EXPORT_SYMBOL_GPL(__sata_phy_reset); 4890EXPORT_SYMBOL_GPL(__sata_phy_reset);
4621EXPORT_SYMBOL_GPL(ata_bus_reset); 4891EXPORT_SYMBOL_GPL(ata_bus_reset);
4622EXPORT_SYMBOL_GPL(ata_port_disable); 4892EXPORT_SYMBOL_GPL(ata_port_disable);
4893EXPORT_SYMBOL_GPL(ata_ratelimit);
4623EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4894EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4624EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4895EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4625EXPORT_SYMBOL_GPL(ata_scsi_error); 4896EXPORT_SYMBOL_GPL(ata_scsi_error);
@@ -4631,6 +4902,9 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
4631EXPORT_SYMBOL_GPL(ata_dev_config); 4902EXPORT_SYMBOL_GPL(ata_dev_config);
4632EXPORT_SYMBOL_GPL(ata_scsi_simulate); 4903EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4633 4904
4905EXPORT_SYMBOL_GPL(ata_timing_compute);
4906EXPORT_SYMBOL_GPL(ata_timing_merge);
4907
4634#ifdef CONFIG_PCI 4908#ifdef CONFIG_PCI
4635EXPORT_SYMBOL_GPL(pci_test_config_bits); 4909EXPORT_SYMBOL_GPL(pci_test_config_bits);
4636EXPORT_SYMBOL_GPL(ata_pci_host_stop); 4910EXPORT_SYMBOL_GPL(ata_pci_host_stop);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 104fd9a63e73..58858886d751 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -44,11 +44,19 @@
44 44
45#include "libata.h" 45#include "libata.h"
46 46
47typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, u8 *scsicmd); 47typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
48static struct ata_device * 48static struct ata_device *
49ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev); 49ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
50 50
51 51
52static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
53 void (*done)(struct scsi_cmnd *))
54{
55 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
56 /* "Invalid field in cbd" */
57 done(cmd);
58}
59
52/** 60/**
53 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 61 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
54 * @sdev: SCSI device for which BIOS geometry is to be determined 62 * @sdev: SCSI device for which BIOS geometry is to be determined
@@ -182,7 +190,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
182{ 190{
183 struct scsi_cmnd *cmd = qc->scsicmd; 191 struct scsi_cmnd *cmd = qc->scsicmd;
184 u8 err = 0; 192 u8 err = 0;
185 unsigned char *sb = cmd->sense_buffer;
186 /* Based on the 3ware driver translation table */ 193 /* Based on the 3ware driver translation table */
187 static unsigned char sense_table[][4] = { 194 static unsigned char sense_table[][4] = {
188 /* BBD|ECC|ID|MAR */ 195 /* BBD|ECC|ID|MAR */
@@ -225,8 +232,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
225 }; 232 };
226 int i = 0; 233 int i = 0;
227 234
228 cmd->result = SAM_STAT_CHECK_CONDITION;
229
230 /* 235 /*
231 * Is this an error we can process/parse 236 * Is this an error we can process/parse
232 */ 237 */
@@ -281,11 +286,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
281 /* Look for best matches first */ 286 /* Look for best matches first */
282 if((sense_table[i][0] & err) == sense_table[i][0]) 287 if((sense_table[i][0] & err) == sense_table[i][0])
283 { 288 {
284 sb[0] = 0x70; 289 ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
285 sb[2] = sense_table[i][1]; 290 sense_table[i][2] /* asc */,
286 sb[7] = 0x0a; 291 sense_table[i][3] /* ascq */ );
287 sb[12] = sense_table[i][2];
288 sb[13] = sense_table[i][3];
289 return; 292 return;
290 } 293 }
291 i++; 294 i++;
@@ -300,11 +303,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
300 { 303 {
301 if(stat_table[i][0] & drv_stat) 304 if(stat_table[i][0] & drv_stat)
302 { 305 {
303 sb[0] = 0x70; 306 ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
304 sb[2] = stat_table[i][1]; 307 sense_table[i][2] /* asc */,
305 sb[7] = 0x0a; 308 sense_table[i][3] /* ascq */ );
306 sb[12] = stat_table[i][2];
307 sb[13] = stat_table[i][3];
308 return; 309 return;
309 } 310 }
310 i++; 311 i++;
@@ -313,15 +314,12 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
313 printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat); 314 printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat);
314 /* additional-sense-code[-qualifier] */ 315 /* additional-sense-code[-qualifier] */
315 316
316 sb[0] = 0x70;
317 sb[2] = MEDIUM_ERROR;
318 sb[7] = 0x0A;
319 if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 317 if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
320 sb[12] = 0x11; /* "unrecovered read error" */ 318 ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0x11, 0x4);
321 sb[13] = 0x04; 319 /* "unrecovered read error" */
322 } else { 320 } else {
323 sb[12] = 0x0C; /* "write error - */ 321 ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0xc, 0x2);
324 sb[13] = 0x02; /* auto-reallocation failed" */ 322 /* "write error - auto-reallocation failed" */
325 } 323 }
326} 324}
327 325
@@ -420,7 +418,7 @@ int ata_scsi_error(struct Scsi_Host *host)
420 */ 418 */
421 419
422static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, 420static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
423 u8 *scsicmd) 421 const u8 *scsicmd)
424{ 422{
425 struct ata_taskfile *tf = &qc->tf; 423 struct ata_taskfile *tf = &qc->tf;
426 424
@@ -430,15 +428,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
430 ; /* ignore IMMED bit, violates sat-r05 */ 428 ; /* ignore IMMED bit, violates sat-r05 */
431 } 429 }
432 if (scsicmd[4] & 0x2) 430 if (scsicmd[4] & 0x2)
433 return 1; /* LOEJ bit set not supported */ 431 goto invalid_fld; /* LOEJ bit set not supported */
434 if (((scsicmd[4] >> 4) & 0xf) != 0) 432 if (((scsicmd[4] >> 4) & 0xf) != 0)
435 return 1; /* power conditions not supported */ 433 goto invalid_fld; /* power conditions not supported */
436 if (scsicmd[4] & 0x1) { 434 if (scsicmd[4] & 0x1) {
437 tf->nsect = 1; /* 1 sector, lba=0 */ 435 tf->nsect = 1; /* 1 sector, lba=0 */
438 tf->lbah = 0x0; 436
439 tf->lbam = 0x0; 437 if (qc->dev->flags & ATA_DFLAG_LBA) {
440 tf->lbal = 0x0; 438 qc->tf.flags |= ATA_TFLAG_LBA;
441 tf->device |= ATA_LBA; 439
440 tf->lbah = 0x0;
441 tf->lbam = 0x0;
442 tf->lbal = 0x0;
443 tf->device |= ATA_LBA;
444 } else {
445 /* CHS */
446 tf->lbal = 0x1; /* sect */
447 tf->lbam = 0x0; /* cyl low */
448 tf->lbah = 0x0; /* cyl high */
449 }
450
442 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 451 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
443 } else { 452 } else {
444 tf->nsect = 0; /* time period value (0 implies now) */ 453 tf->nsect = 0; /* time period value (0 implies now) */
@@ -453,6 +462,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
453 */ 462 */
454 463
455 return 0; 464 return 0;
465
466invalid_fld:
467 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
468 /* "Invalid field in cbd" */
469 return 1;
456} 470}
457 471
458 472
@@ -471,14 +485,14 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
471 * Zero on success, non-zero on error. 485 * Zero on success, non-zero on error.
472 */ 486 */
473 487
474static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 488static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
475{ 489{
476 struct ata_taskfile *tf = &qc->tf; 490 struct ata_taskfile *tf = &qc->tf;
477 491
478 tf->flags |= ATA_TFLAG_DEVICE; 492 tf->flags |= ATA_TFLAG_DEVICE;
479 tf->protocol = ATA_PROT_NODATA; 493 tf->protocol = ATA_PROT_NODATA;
480 494
481 if ((tf->flags & ATA_TFLAG_LBA48) && 495 if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
482 (ata_id_has_flush_ext(qc->dev->id))) 496 (ata_id_has_flush_ext(qc->dev->id)))
483 tf->command = ATA_CMD_FLUSH_EXT; 497 tf->command = ATA_CMD_FLUSH_EXT;
484 else 498 else
@@ -488,6 +502,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
488} 502}
489 503
490/** 504/**
505 * scsi_6_lba_len - Get LBA and transfer length
506 * @scsicmd: SCSI command to translate
507 *
508 * Calculate LBA and transfer length for 6-byte commands.
509 *
510 * RETURNS:
511 * @plba: the LBA
512 * @plen: the transfer length
513 */
514
515static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
516{
517 u64 lba = 0;
518 u32 len = 0;
519
520 VPRINTK("six-byte command\n");
521
522 lba |= ((u64)scsicmd[2]) << 8;
523 lba |= ((u64)scsicmd[3]);
524
525 len |= ((u32)scsicmd[4]);
526
527 *plba = lba;
528 *plen = len;
529}
530
531/**
532 * scsi_10_lba_len - Get LBA and transfer length
533 * @scsicmd: SCSI command to translate
534 *
535 * Calculate LBA and transfer length for 10-byte commands.
536 *
537 * RETURNS:
538 * @plba: the LBA
539 * @plen: the transfer length
540 */
541
542static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
543{
544 u64 lba = 0;
545 u32 len = 0;
546
547 VPRINTK("ten-byte command\n");
548
549 lba |= ((u64)scsicmd[2]) << 24;
550 lba |= ((u64)scsicmd[3]) << 16;
551 lba |= ((u64)scsicmd[4]) << 8;
552 lba |= ((u64)scsicmd[5]);
553
554 len |= ((u32)scsicmd[7]) << 8;
555 len |= ((u32)scsicmd[8]);
556
557 *plba = lba;
558 *plen = len;
559}
560
561/**
562 * scsi_16_lba_len - Get LBA and transfer length
563 * @scsicmd: SCSI command to translate
564 *
565 * Calculate LBA and transfer length for 16-byte commands.
566 *
567 * RETURNS:
568 * @plba: the LBA
569 * @plen: the transfer length
570 */
571
572static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
573{
574 u64 lba = 0;
575 u32 len = 0;
576
577 VPRINTK("sixteen-byte command\n");
578
579 lba |= ((u64)scsicmd[2]) << 56;
580 lba |= ((u64)scsicmd[3]) << 48;
581 lba |= ((u64)scsicmd[4]) << 40;
582 lba |= ((u64)scsicmd[5]) << 32;
583 lba |= ((u64)scsicmd[6]) << 24;
584 lba |= ((u64)scsicmd[7]) << 16;
585 lba |= ((u64)scsicmd[8]) << 8;
586 lba |= ((u64)scsicmd[9]);
587
588 len |= ((u32)scsicmd[10]) << 24;
589 len |= ((u32)scsicmd[11]) << 16;
590 len |= ((u32)scsicmd[12]) << 8;
591 len |= ((u32)scsicmd[13]);
592
593 *plba = lba;
594 *plen = len;
595}
596
597/**
491 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 598 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
492 * @qc: Storage for translated ATA taskfile 599 * @qc: Storage for translated ATA taskfile
493 * @scsicmd: SCSI command to translate 600 * @scsicmd: SCSI command to translate
@@ -501,82 +608,110 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
501 * Zero on success, non-zero on error. 608 * Zero on success, non-zero on error.
502 */ 609 */
503 610
504static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 611static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
505{ 612{
506 struct ata_taskfile *tf = &qc->tf; 613 struct ata_taskfile *tf = &qc->tf;
507 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 614 struct ata_device *dev = qc->dev;
508 u64 dev_sectors = qc->dev->n_sectors; 615 u64 dev_sectors = qc->dev->n_sectors;
509 u64 sect = 0; 616 u64 block;
510 u32 n_sect = 0; 617 u32 n_block;
511 618
512 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 619 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
513 tf->protocol = ATA_PROT_NODATA; 620 tf->protocol = ATA_PROT_NODATA;
514 tf->device |= ATA_LBA;
515 621
516 if (scsicmd[0] == VERIFY) { 622 if (scsicmd[0] == VERIFY)
517 sect |= ((u64)scsicmd[2]) << 24; 623 scsi_10_lba_len(scsicmd, &block, &n_block);
518 sect |= ((u64)scsicmd[3]) << 16; 624 else if (scsicmd[0] == VERIFY_16)
519 sect |= ((u64)scsicmd[4]) << 8; 625 scsi_16_lba_len(scsicmd, &block, &n_block);
520 sect |= ((u64)scsicmd[5]); 626 else
627 goto invalid_fld;
521 628
522 n_sect |= ((u32)scsicmd[7]) << 8; 629 if (!n_block)
523 n_sect |= ((u32)scsicmd[8]); 630 goto nothing_to_do;
524 } 631 if (block >= dev_sectors)
632 goto out_of_range;
633 if ((block + n_block) > dev_sectors)
634 goto out_of_range;
525 635
526 else if (scsicmd[0] == VERIFY_16) { 636 if (dev->flags & ATA_DFLAG_LBA) {
527 sect |= ((u64)scsicmd[2]) << 56; 637 tf->flags |= ATA_TFLAG_LBA;
528 sect |= ((u64)scsicmd[3]) << 48;
529 sect |= ((u64)scsicmd[4]) << 40;
530 sect |= ((u64)scsicmd[5]) << 32;
531 sect |= ((u64)scsicmd[6]) << 24;
532 sect |= ((u64)scsicmd[7]) << 16;
533 sect |= ((u64)scsicmd[8]) << 8;
534 sect |= ((u64)scsicmd[9]);
535
536 n_sect |= ((u32)scsicmd[10]) << 24;
537 n_sect |= ((u32)scsicmd[11]) << 16;
538 n_sect |= ((u32)scsicmd[12]) << 8;
539 n_sect |= ((u32)scsicmd[13]);
540 }
541 638
542 else 639 if (dev->flags & ATA_DFLAG_LBA48) {
543 return 1; 640 if (n_block > (64 * 1024))
641 goto invalid_fld;
544 642
545 if (!n_sect) 643 /* use LBA48 */
546 return 1; 644 tf->flags |= ATA_TFLAG_LBA48;
547 if (sect >= dev_sectors) 645 tf->command = ATA_CMD_VERIFY_EXT;
548 return 1;
549 if ((sect + n_sect) > dev_sectors)
550 return 1;
551 if (lba48) {
552 if (n_sect > (64 * 1024))
553 return 1;
554 } else {
555 if (n_sect > 256)
556 return 1;
557 }
558 646
559 if (lba48) { 647 tf->hob_nsect = (n_block >> 8) & 0xff;
560 tf->command = ATA_CMD_VERIFY_EXT;
561 648
562 tf->hob_nsect = (n_sect >> 8) & 0xff; 649 tf->hob_lbah = (block >> 40) & 0xff;
650 tf->hob_lbam = (block >> 32) & 0xff;
651 tf->hob_lbal = (block >> 24) & 0xff;
652 } else {
653 if (n_block > 256)
654 goto invalid_fld;
563 655
564 tf->hob_lbah = (sect >> 40) & 0xff; 656 /* use LBA28 */
565 tf->hob_lbam = (sect >> 32) & 0xff; 657 tf->command = ATA_CMD_VERIFY;
566 tf->hob_lbal = (sect >> 24) & 0xff; 658
659 tf->device |= (block >> 24) & 0xf;
660 }
661
662 tf->nsect = n_block & 0xff;
663
664 tf->lbah = (block >> 16) & 0xff;
665 tf->lbam = (block >> 8) & 0xff;
666 tf->lbal = block & 0xff;
667
668 tf->device |= ATA_LBA;
567 } else { 669 } else {
670 /* CHS */
671 u32 sect, head, cyl, track;
672
673 if (n_block > 256)
674 goto invalid_fld;
675
676 /* Convert LBA to CHS */
677 track = (u32)block / dev->sectors;
678 cyl = track / dev->heads;
679 head = track % dev->heads;
680 sect = (u32)block % dev->sectors + 1;
681
682 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
683 (u32)block, track, cyl, head, sect);
684
685 /* Check whether the converted CHS can fit.
686 Cylinder: 0-65535
687 Head: 0-15
688 Sector: 1-255*/
689 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
690 goto out_of_range;
691
568 tf->command = ATA_CMD_VERIFY; 692 tf->command = ATA_CMD_VERIFY;
569 693 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
570 tf->device |= (sect >> 24) & 0xf; 694 tf->lbal = sect;
695 tf->lbam = cyl;
696 tf->lbah = cyl >> 8;
697 tf->device |= head;
571 } 698 }
572 699
573 tf->nsect = n_sect & 0xff; 700 return 0;
701
702invalid_fld:
703 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
704 /* "Invalid field in cbd" */
705 return 1;
574 706
575 tf->lbah = (sect >> 16) & 0xff; 707out_of_range:
576 tf->lbam = (sect >> 8) & 0xff; 708 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
577 tf->lbal = sect & 0xff; 709 /* "Logical Block Address out of range" */
710 return 1;
578 711
579 return 0; 712nothing_to_do:
713 qc->scsicmd->result = SAM_STAT_GOOD;
714 return 1;
580} 715}
581 716
582/** 717/**
@@ -599,106 +734,137 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
599 * Zero on success, non-zero on error. 734 * Zero on success, non-zero on error.
600 */ 735 */
601 736
602static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 737static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
603{ 738{
604 struct ata_taskfile *tf = &qc->tf; 739 struct ata_taskfile *tf = &qc->tf;
605 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 740 struct ata_device *dev = qc->dev;
741 u64 block;
742 u32 n_block;
606 743
607 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 744 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
608 tf->protocol = qc->dev->xfer_protocol;
609 tf->device |= ATA_LBA;
610 745
611 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || 746 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
612 scsicmd[0] == READ_16) { 747 scsicmd[0] == WRITE_16)
613 tf->command = qc->dev->read_cmd;
614 } else {
615 tf->command = qc->dev->write_cmd;
616 tf->flags |= ATA_TFLAG_WRITE; 748 tf->flags |= ATA_TFLAG_WRITE;
617 }
618 749
619 if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { 750 /* Calculate the SCSI LBA and transfer length. */
620 if (lba48) { 751 switch (scsicmd[0]) {
621 tf->hob_nsect = scsicmd[7]; 752 case READ_10:
622 tf->hob_lbal = scsicmd[2]; 753 case WRITE_10:
754 scsi_10_lba_len(scsicmd, &block, &n_block);
755 break;
756 case READ_6:
757 case WRITE_6:
758 scsi_6_lba_len(scsicmd, &block, &n_block);
623 759
624 qc->nsect = ((unsigned int)scsicmd[7] << 8) | 760 /* for 6-byte r/w commands, transfer length 0
625 scsicmd[8]; 761 * means 256 blocks of data, not 0 block.
626 } else { 762 */
627 /* if we don't support LBA48 addressing, the request 763 if (!n_block)
628 * -may- be too large. */ 764 n_block = 256;
629 if ((scsicmd[2] & 0xf0) || scsicmd[7]) 765 break;
630 return 1; 766 case READ_16:
767 case WRITE_16:
768 scsi_16_lba_len(scsicmd, &block, &n_block);
769 break;
770 default:
771 DPRINTK("no-byte command\n");
772 goto invalid_fld;
773 }
631 774
632 /* stores LBA27:24 in lower 4 bits of device reg */ 775 /* Check and compose ATA command */
633 tf->device |= scsicmd[2]; 776 if (!n_block)
777 /* For 10-byte and 16-byte SCSI R/W commands, transfer
778 * length 0 means transfer 0 block of data.
779 * However, for ATA R/W commands, sector count 0 means
780 * 256 or 65536 sectors, not 0 sectors as in SCSI.
781 */
782 goto nothing_to_do;
634 783
635 qc->nsect = scsicmd[8]; 784 if (dev->flags & ATA_DFLAG_LBA) {
636 } 785 tf->flags |= ATA_TFLAG_LBA;
637 786
638 tf->nsect = scsicmd[8]; 787 if (dev->flags & ATA_DFLAG_LBA48) {
639 tf->lbal = scsicmd[5]; 788 /* The request -may- be too large for LBA48. */
640 tf->lbam = scsicmd[4]; 789 if ((block >> 48) || (n_block > 65536))
641 tf->lbah = scsicmd[3]; 790 goto out_of_range;
642 791
643 VPRINTK("ten-byte command\n"); 792 /* use LBA48 */
644 if (qc->nsect == 0) /* we don't support length==0 cmds */ 793 tf->flags |= ATA_TFLAG_LBA48;
645 return 1;
646 return 0;
647 }
648 794
649 if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { 795 tf->hob_nsect = (n_block >> 8) & 0xff;
650 qc->nsect = tf->nsect = scsicmd[4];
651 if (!qc->nsect) {
652 qc->nsect = 256;
653 if (lba48)
654 tf->hob_nsect = 1;
655 }
656 796
657 tf->lbal = scsicmd[3]; 797 tf->hob_lbah = (block >> 40) & 0xff;
658 tf->lbam = scsicmd[2]; 798 tf->hob_lbam = (block >> 32) & 0xff;
659 tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ 799 tf->hob_lbal = (block >> 24) & 0xff;
800 } else {
801 /* use LBA28 */
660 802
661 VPRINTK("six-byte command\n"); 803 /* The request -may- be too large for LBA28. */
662 return 0; 804 if ((block >> 28) || (n_block > 256))
663 } 805 goto out_of_range;
664 806
665 if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { 807 tf->device |= (block >> 24) & 0xf;
666 /* rule out impossible LBAs and sector counts */ 808 }
667 if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
668 return 1;
669 809
670 if (lba48) { 810 ata_rwcmd_protocol(qc);
671 tf->hob_nsect = scsicmd[12];
672 tf->hob_lbal = scsicmd[6];
673 tf->hob_lbam = scsicmd[5];
674 tf->hob_lbah = scsicmd[4];
675 811
676 qc->nsect = ((unsigned int)scsicmd[12] << 8) | 812 qc->nsect = n_block;
677 scsicmd[13]; 813 tf->nsect = n_block & 0xff;
678 } else {
679 /* once again, filter out impossible non-zero values */
680 if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
681 (scsicmd[6] & 0xf0))
682 return 1;
683 814
684 /* stores LBA27:24 in lower 4 bits of device reg */ 815 tf->lbah = (block >> 16) & 0xff;
685 tf->device |= scsicmd[6]; 816 tf->lbam = (block >> 8) & 0xff;
817 tf->lbal = block & 0xff;
686 818
687 qc->nsect = scsicmd[13]; 819 tf->device |= ATA_LBA;
688 } 820 } else {
821 /* CHS */
822 u32 sect, head, cyl, track;
823
824 /* The request -may- be too large for CHS addressing. */
825 if ((block >> 28) || (n_block > 256))
826 goto out_of_range;
827
828 ata_rwcmd_protocol(qc);
829
830 /* Convert LBA to CHS */
831 track = (u32)block / dev->sectors;
832 cyl = track / dev->heads;
833 head = track % dev->heads;
834 sect = (u32)block % dev->sectors + 1;
835
836 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
837 (u32)block, track, cyl, head, sect);
838
839 /* Check whether the converted CHS can fit.
840 Cylinder: 0-65535
841 Head: 0-15
842 Sector: 1-255*/
843 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
844 goto out_of_range;
845
846 qc->nsect = n_block;
847 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
848 tf->lbal = sect;
849 tf->lbam = cyl;
850 tf->lbah = cyl >> 8;
851 tf->device |= head;
852 }
689 853
690 tf->nsect = scsicmd[13]; 854 return 0;
691 tf->lbal = scsicmd[9];
692 tf->lbam = scsicmd[8];
693 tf->lbah = scsicmd[7];
694 855
695 VPRINTK("sixteen-byte command\n"); 856invalid_fld:
696 if (qc->nsect == 0) /* we don't support length==0 cmds */ 857 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
697 return 1; 858 /* "Invalid field in cbd" */
698 return 0; 859 return 1;
699 } 860
861out_of_range:
862 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
863 /* "Logical Block Address out of range" */
864 return 1;
700 865
701 DPRINTK("no-byte command\n"); 866nothing_to_do:
867 qc->scsicmd->result = SAM_STAT_GOOD;
702 return 1; 868 return 1;
703} 869}
704 870
@@ -731,6 +897,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
731 * This function sets up an ata_queued_cmd structure for the 897 * This function sets up an ata_queued_cmd structure for the
732 * SCSI command, and sends that ata_queued_cmd to the hardware. 898 * SCSI command, and sends that ata_queued_cmd to the hardware.
733 * 899 *
900 * The xlat_func argument (actor) returns 0 if ready to execute
901 * ATA command, else 1 to finish translation. If 1 is returned
902 * then cmd->result (and possibly cmd->sense_buffer) are assumed
903 * to be set reflecting an error condition or clean (early)
904 * termination.
905 *
734 * LOCKING: 906 * LOCKING:
735 * spin_lock_irqsave(host_set lock) 907 * spin_lock_irqsave(host_set lock)
736 */ 908 */
@@ -747,7 +919,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
747 919
748 qc = ata_scsi_qc_new(ap, dev, cmd, done); 920 qc = ata_scsi_qc_new(ap, dev, cmd, done);
749 if (!qc) 921 if (!qc)
750 return; 922 goto err_mem;
751 923
752 /* data is present; dma-map it */ 924 /* data is present; dma-map it */
753 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 925 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
@@ -755,7 +927,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
755 if (unlikely(cmd->request_bufflen < 1)) { 927 if (unlikely(cmd->request_bufflen < 1)) {
756 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 928 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
757 ap->id, dev->devno); 929 ap->id, dev->devno);
758 goto err_out; 930 goto err_did;
759 } 931 }
760 932
761 if (cmd->use_sg) 933 if (cmd->use_sg)
@@ -770,19 +942,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
770 qc->complete_fn = ata_scsi_qc_complete; 942 qc->complete_fn = ata_scsi_qc_complete;
771 943
772 if (xlat_func(qc, scsicmd)) 944 if (xlat_func(qc, scsicmd))
773 goto err_out; 945 goto early_finish;
774 946
775 /* select device, send command to hardware */ 947 /* select device, send command to hardware */
776 if (ata_qc_issue(qc)) 948 if (ata_qc_issue(qc))
777 goto err_out; 949 goto err_did;
778 950
779 VPRINTK("EXIT\n"); 951 VPRINTK("EXIT\n");
780 return; 952 return;
781 953
782err_out: 954early_finish:
955 ata_qc_free(qc);
956 done(cmd);
957 DPRINTK("EXIT - early finish (good or error)\n");
958 return;
959
960err_did:
783 ata_qc_free(qc); 961 ata_qc_free(qc);
784 ata_bad_cdb(cmd, done); 962err_mem:
785 DPRINTK("EXIT - badcmd\n"); 963 cmd->result = (DID_ERROR << 16);
964 done(cmd);
965 DPRINTK("EXIT - internal\n");
966 return;
786} 967}
787 968
788/** 969/**
@@ -849,7 +1030,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
849 * Mapping the response buffer, calling the command's handler, 1030 * Mapping the response buffer, calling the command's handler,
850 * and handling the handler's return value. This return value 1031 * and handling the handler's return value. This return value
851 * indicates whether the handler wishes the SCSI command to be 1032 * indicates whether the handler wishes the SCSI command to be
852 * completed successfully, or not. 1033 * completed successfully (0), or not (in which case cmd->result
1034 * and sense buffer are assumed to be set).
853 * 1035 *
854 * LOCKING: 1036 * LOCKING:
855 * spin_lock_irqsave(host_set lock) 1037 * spin_lock_irqsave(host_set lock)
@@ -868,12 +1050,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
868 rc = actor(args, rbuf, buflen); 1050 rc = actor(args, rbuf, buflen);
869 ata_scsi_rbuf_put(cmd, rbuf); 1051 ata_scsi_rbuf_put(cmd, rbuf);
870 1052
871 if (rc) 1053 if (rc == 0)
872 ata_bad_cdb(cmd, args->done);
873 else {
874 cmd->result = SAM_STAT_GOOD; 1054 cmd->result = SAM_STAT_GOOD;
875 args->done(cmd); 1055 args->done(cmd);
876 }
877} 1056}
878 1057
879/** 1058/**
@@ -1179,8 +1358,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1179 * in the same manner) 1358 * in the same manner)
1180 */ 1359 */
1181 page_control = scsicmd[2] >> 6; 1360 page_control = scsicmd[2] >> 6;
1182 if ((page_control != 0) && (page_control != 3)) 1361 switch (page_control) {
1183 return 1; 1362 case 0: /* current */
1363 break; /* supported */
1364 case 3: /* saved */
1365 goto saving_not_supp;
1366 case 1: /* changeable */
1367 case 2: /* defaults */
1368 default:
1369 goto invalid_fld;
1370 }
1184 1371
1185 if (six_byte) 1372 if (six_byte)
1186 output_len = 4; 1373 output_len = 4;
@@ -1211,7 +1398,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1211 break; 1398 break;
1212 1399
1213 default: /* invalid page code */ 1400 default: /* invalid page code */
1214 return 1; 1401 goto invalid_fld;
1215 } 1402 }
1216 1403
1217 if (six_byte) { 1404 if (six_byte) {
@@ -1224,6 +1411,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1224 } 1411 }
1225 1412
1226 return 0; 1413 return 0;
1414
1415invalid_fld:
1416 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
1417 /* "Invalid field in cbd" */
1418 return 1;
1419
1420saving_not_supp:
1421 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
1422 /* "Saving parameters not supported" */
1423 return 1;
1227} 1424}
1228 1425
1229/** 1426/**
@@ -1246,10 +1443,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
1246 1443
1247 VPRINTK("ENTER\n"); 1444 VPRINTK("ENTER\n");
1248 1445
1249 if (ata_id_has_lba48(args->id)) 1446 if (ata_id_has_lba(args->id)) {
1250 n_sectors = ata_id_u64(args->id, 100); 1447 if (ata_id_has_lba48(args->id))
1251 else 1448 n_sectors = ata_id_u64(args->id, 100);
1252 n_sectors = ata_id_u32(args->id, 60); 1449 else
1450 n_sectors = ata_id_u32(args->id, 60);
1451 } else {
1452 /* CHS default translation */
1453 n_sectors = args->id[1] * args->id[3] * args->id[6];
1454
1455 if (ata_id_current_chs_valid(args->id))
1456 /* CHS current translation */
1457 n_sectors = ata_id_u32(args->id, 57);
1458 }
1459
1253 n_sectors--; /* ATA TotalUserSectors - 1 */ 1460 n_sectors--; /* ATA TotalUserSectors - 1 */
1254 1461
1255 if (args->cmd->cmnd[0] == READ_CAPACITY) { 1462 if (args->cmd->cmnd[0] == READ_CAPACITY) {
@@ -1313,6 +1520,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1313} 1520}
1314 1521
1315/** 1522/**
1523 * ata_scsi_set_sense - Set SCSI sense data and status
1524 * @cmd: SCSI request to be handled
1525 * @sk: SCSI-defined sense key
1526 * @asc: SCSI-defined additional sense code
1527 * @ascq: SCSI-defined additional sense code qualifier
1528 *
1529 * Helper function that builds a valid fixed format, current
1530 * response code and the given sense key (sk), additional sense
1531 * code (asc) and additional sense code qualifier (ascq) with
1532 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
1533 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
1534 *
1535 * LOCKING:
1536 * Not required
1537 */
1538
1539void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
1540{
1541 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1542
1543 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
1544 cmd->sense_buffer[2] = sk;
1545 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
1546 cmd->sense_buffer[12] = asc;
1547 cmd->sense_buffer[13] = ascq;
1548}
1549
1550/**
1316 * ata_scsi_badcmd - End a SCSI request with an error 1551 * ata_scsi_badcmd - End a SCSI request with an error
1317 * @cmd: SCSI request to be handled 1552 * @cmd: SCSI request to be handled
1318 * @done: SCSI command completion function 1553 * @done: SCSI command completion function
@@ -1330,30 +1565,84 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1330void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 1565void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
1331{ 1566{
1332 DPRINTK("ENTER\n"); 1567 DPRINTK("ENTER\n");
1333 cmd->result = SAM_STAT_CHECK_CONDITION; 1568 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
1334
1335 cmd->sense_buffer[0] = 0x70;
1336 cmd->sense_buffer[2] = ILLEGAL_REQUEST;
1337 cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
1338 cmd->sense_buffer[12] = asc;
1339 cmd->sense_buffer[13] = ascq;
1340 1569
1341 done(cmd); 1570 done(cmd);
1342} 1571}
1343 1572
1573void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
1574 struct scsi_cmnd *cmd)
1575{
1576 DECLARE_COMPLETION(wait);
1577 struct ata_queued_cmd *qc;
1578 unsigned long flags;
1579 int rc;
1580
1581 DPRINTK("ATAPI request sense\n");
1582
1583 qc = ata_qc_new_init(ap, dev);
1584 BUG_ON(qc == NULL);
1585
1586 /* FIXME: is this needed? */
1587 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
1588
1589 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
1590 qc->dma_dir = DMA_FROM_DEVICE;
1591
1592 memset(&qc->cdb, 0, ap->cdb_len);
1593 qc->cdb[0] = REQUEST_SENSE;
1594 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
1595
1596 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1597 qc->tf.command = ATA_CMD_PACKET;
1598
1599 qc->tf.protocol = ATA_PROT_ATAPI;
1600 qc->tf.lbam = (8 * 1024) & 0xff;
1601 qc->tf.lbah = (8 * 1024) >> 8;
1602 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
1603
1604 qc->waiting = &wait;
1605 qc->complete_fn = ata_qc_complete_noop;
1606
1607 spin_lock_irqsave(&ap->host_set->lock, flags);
1608 rc = ata_qc_issue(qc);
1609 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1610
1611 if (rc)
1612 ata_port_disable(ap);
1613 else
1614 wait_for_completion(&wait);
1615
1616 DPRINTK("EXIT\n");
1617}
1618
1344static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 1619static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1345{ 1620{
1346 struct scsi_cmnd *cmd = qc->scsicmd; 1621 struct scsi_cmnd *cmd = qc->scsicmd;
1347 1622
1348 if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { 1623 VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat);
1624
1625 if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ)))
1626 ata_to_sense_error(qc, drv_stat);
1627
1628 else if (unlikely(drv_stat & ATA_ERR)) {
1349 DPRINTK("request check condition\n"); 1629 DPRINTK("request check condition\n");
1350 1630
1631 /* FIXME: command completion with check condition
1632 * but no sense causes the error handler to run,
1633 * which then issues REQUEST SENSE, fills in the sense
1634 * buffer, and completes the command (for the second
1635 * time). We need to issue REQUEST SENSE some other
1636 * way, to avoid completing the command twice.
1637 */
1351 cmd->result = SAM_STAT_CHECK_CONDITION; 1638 cmd->result = SAM_STAT_CHECK_CONDITION;
1352 1639
1353 qc->scsidone(cmd); 1640 qc->scsidone(cmd);
1354 1641
1355 return 1; 1642 return 1;
1356 } else { 1643 }
1644
1645 else {
1357 u8 *scsicmd = cmd->cmnd; 1646 u8 *scsicmd = cmd->cmnd;
1358 1647
1359 if (scsicmd[0] == INQUIRY) { 1648 if (scsicmd[0] == INQUIRY) {
@@ -1361,15 +1650,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1361 unsigned int buflen; 1650 unsigned int buflen;
1362 1651
1363 buflen = ata_scsi_rbuf_get(cmd, &buf); 1652 buflen = ata_scsi_rbuf_get(cmd, &buf);
1364 buf[2] = 0x5; 1653
1365 buf[3] = (buf[3] & 0xf0) | 2; 1654 /* ATAPI devices typically report zero for their SCSI version,
1655 * and sometimes deviate from the spec WRT response data
1656 * format. If SCSI version is reported as zero like normal,
1657 * then we make the following fixups: 1) Fake MMC-5 version,
1658 * to indicate to the Linux scsi midlayer this is a modern
1659 * device. 2) Ensure response data format / ATAPI information
1660 * are always correct.
1661 */
1662 /* FIXME: do we ever override EVPD pages and the like, with
1663 * this code?
1664 */
1665 if (buf[2] == 0) {
1666 buf[2] = 0x5;
1667 buf[3] = 0x32;
1668 }
1669
1366 ata_scsi_rbuf_put(cmd, buf); 1670 ata_scsi_rbuf_put(cmd, buf);
1367 } 1671 }
1672
1368 cmd->result = SAM_STAT_GOOD; 1673 cmd->result = SAM_STAT_GOOD;
1369 } 1674 }
1370 1675
1371 qc->scsidone(cmd); 1676 qc->scsidone(cmd);
1372
1373 return 0; 1677 return 0;
1374} 1678}
1375/** 1679/**
@@ -1384,7 +1688,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1384 * Zero on success, non-zero on failure. 1688 * Zero on success, non-zero on failure.
1385 */ 1689 */
1386 1690
1387static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 1691static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1388{ 1692{
1389 struct scsi_cmnd *cmd = qc->scsicmd; 1693 struct scsi_cmnd *cmd = qc->scsicmd;
1390 struct ata_device *dev = qc->dev; 1694 struct ata_device *dev = qc->dev;
@@ -1453,7 +1757,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
1453 */ 1757 */
1454 1758
1455static struct ata_device * 1759static struct ata_device *
1456ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev) 1760ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
1457{ 1761{
1458 struct ata_device *dev; 1762 struct ata_device *dev;
1459 1763
@@ -1610,7 +1914,7 @@ void ata_scsi_simulate(u16 *id,
1610 void (*done)(struct scsi_cmnd *)) 1914 void (*done)(struct scsi_cmnd *))
1611{ 1915{
1612 struct ata_scsi_args args; 1916 struct ata_scsi_args args;
1613 u8 *scsicmd = cmd->cmnd; 1917 const u8 *scsicmd = cmd->cmnd;
1614 1918
1615 args.id = id; 1919 args.id = id;
1616 args.cmd = cmd; 1920 args.cmd = cmd;
@@ -1630,7 +1934,7 @@ void ata_scsi_simulate(u16 *id,
1630 1934
1631 case INQUIRY: 1935 case INQUIRY:
1632 if (scsicmd[1] & 2) /* is CmdDt set? */ 1936 if (scsicmd[1] & 2) /* is CmdDt set? */
1633 ata_bad_cdb(cmd, done); 1937 ata_scsi_invalid_field(cmd, done);
1634 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 1938 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
1635 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 1939 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
1636 else if (scsicmd[2] == 0x00) 1940 else if (scsicmd[2] == 0x00)
@@ -1640,7 +1944,7 @@ void ata_scsi_simulate(u16 *id,
1640 else if (scsicmd[2] == 0x83) 1944 else if (scsicmd[2] == 0x83)
1641 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 1945 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
1642 else 1946 else
1643 ata_bad_cdb(cmd, done); 1947 ata_scsi_invalid_field(cmd, done);
1644 break; 1948 break;
1645 1949
1646 case MODE_SENSE: 1950 case MODE_SENSE:
@@ -1650,7 +1954,7 @@ void ata_scsi_simulate(u16 *id,
1650 1954
1651 case MODE_SELECT: /* unconditionally return */ 1955 case MODE_SELECT: /* unconditionally return */
1652 case MODE_SELECT_10: /* bad-field-in-cdb */ 1956 case MODE_SELECT_10: /* bad-field-in-cdb */
1653 ata_bad_cdb(cmd, done); 1957 ata_scsi_invalid_field(cmd, done);
1654 break; 1958 break;
1655 1959
1656 case READ_CAPACITY: 1960 case READ_CAPACITY:
@@ -1661,7 +1965,7 @@ void ata_scsi_simulate(u16 *id,
1661 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 1965 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
1662 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 1966 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
1663 else 1967 else
1664 ata_bad_cdb(cmd, done); 1968 ata_scsi_invalid_field(cmd, done);
1665 break; 1969 break;
1666 1970
1667 case REPORT_LUNS: 1971 case REPORT_LUNS:
@@ -1673,8 +1977,26 @@ void ata_scsi_simulate(u16 *id,
1673 1977
1674 /* all other commands */ 1978 /* all other commands */
1675 default: 1979 default:
1676 ata_bad_scsiop(cmd, done); 1980 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
1981 /* "Invalid command operation code" */
1982 done(cmd);
1677 break; 1983 break;
1678 } 1984 }
1679} 1985}
1680 1986
1987void ata_scsi_scan_host(struct ata_port *ap)
1988{
1989 struct ata_device *dev;
1990 unsigned int i;
1991
1992 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1993 return;
1994
1995 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1996 dev = &ap->device[i];
1997
1998 if (ata_dev_present(dev))
1999 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
2000 }
2001}
2002
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index d608b3a0f6fe..3d60190584ba 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -39,18 +39,23 @@ struct ata_scsi_args {
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled; 41extern int atapi_enabled;
42extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 43extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
43 struct ata_device *dev); 44 struct ata_device *dev);
45extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc);
44extern void ata_qc_free(struct ata_queued_cmd *qc); 46extern void ata_qc_free(struct ata_queued_cmd *qc);
45extern int ata_qc_issue(struct ata_queued_cmd *qc); 47extern int ata_qc_issue(struct ata_queued_cmd *qc);
46extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 48extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
47extern void ata_dev_select(struct ata_port *ap, unsigned int device, 49extern void ata_dev_select(struct ata_port *ap, unsigned int device,
48 unsigned int wait, unsigned int can_sleep); 50 unsigned int wait, unsigned int can_sleep);
49extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf); 51extern void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf);
50extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 52extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
51 53
52 54
53/* libata-scsi.c */ 55/* libata-scsi.c */
56extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
57 struct scsi_cmnd *cmd);
58extern void ata_scsi_scan_host(struct ata_port *ap);
54extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); 59extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat);
55extern int ata_scsi_error(struct Scsi_Host *host); 60extern int ata_scsi_error(struct Scsi_Host *host);
56extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 61extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -76,18 +81,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
76extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, 81extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
77 void (*done)(struct scsi_cmnd *), 82 void (*done)(struct scsi_cmnd *),
78 u8 asc, u8 ascq); 83 u8 asc, u8 ascq);
84extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
85 u8 sk, u8 asc, u8 ascq);
79extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 86extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
80 unsigned int (*actor) (struct ata_scsi_args *args, 87 unsigned int (*actor) (struct ata_scsi_args *args,
81 u8 *rbuf, unsigned int buflen)); 88 u8 *rbuf, unsigned int buflen));
82 89
83static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
84{
85 ata_scsi_badcmd(cmd, done, 0x20, 0x00);
86}
87
88static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
89{
90 ata_scsi_badcmd(cmd, done, 0x24, 0x00);
91}
92
93#endif /* __LIBATA_H__ */ 90#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
new file mode 100644
index 000000000000..9820f272f889
--- /dev/null
+++ b/drivers/scsi/pdc_adma.c
@@ -0,0 +1,739 @@
1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Mark Lord
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 *
27 * Supports ATA disks in single-packet ADMA mode.
28 * Uses PIO for everything else.
29 *
30 * TODO: Use ADMA transfers for ATAPI devices, when possible.
31 * This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include "scsi.h"
44#include <scsi/scsi_host.h>
45#include <asm/io.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.01"
50
51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
53
54/* macro to calculate base address for ADMA regs */
55#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
56
57enum {
58 ADMA_PORTS = 2,
59 ADMA_CPB_BYTES = 40,
60 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
61 ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
62
63 ADMA_DMA_BOUNDARY = 0xffffffff,
64
65 /* global register offsets */
66 ADMA_MODE_LOCK = 0x00c7,
67
68 /* per-channel register offsets */
69 ADMA_CONTROL = 0x0000, /* ADMA control */
70 ADMA_STATUS = 0x0002, /* ADMA status */
71 ADMA_CPB_COUNT = 0x0004, /* CPB count */
72 ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
73 ADMA_CPB_NEXT = 0x000c, /* next CPB address */
74 ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
75 ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
76 ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
77
78 /* ADMA_CONTROL register bits */
79 aNIEN = (1 << 8), /* irq mask: 1==masked */
80 aGO = (1 << 7), /* packet trigger ("Go!") */
81 aRSTADM = (1 << 5), /* ADMA logic reset */
82 aRSTA = (1 << 2), /* ATA hard reset */
83 aPIOMD4 = 0x0003, /* PIO mode 4 */
84
85 /* ADMA_STATUS register bits */
86 aPSD = (1 << 6),
87 aUIRQ = (1 << 4),
88 aPERR = (1 << 0),
89
90 /* CPB bits */
91 cDONE = (1 << 0),
92 cVLD = (1 << 0),
93 cDAT = (1 << 2),
94 cIEN = (1 << 3),
95
96 /* PRD bits */
97 pORD = (1 << 4),
98 pDIRO = (1 << 5),
99 pEND = (1 << 7),
100
101 /* ATA register flags */
102 rIGN = (1 << 5),
103 rEND = (1 << 7),
104
105 /* ATA register addresses */
106 ADMA_REGS_CONTROL = 0x0e,
107 ADMA_REGS_SECTOR_COUNT = 0x12,
108 ADMA_REGS_LBA_LOW = 0x13,
109 ADMA_REGS_LBA_MID = 0x14,
110 ADMA_REGS_LBA_HIGH = 0x15,
111 ADMA_REGS_DEVICE = 0x16,
112 ADMA_REGS_COMMAND = 0x17,
113
114 /* PCI device IDs */
115 board_1841_idx = 0, /* ADMA 2-port controller */
116};
117
118typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
119
120struct adma_port_priv {
121 u8 *pkt;
122 dma_addr_t pkt_dma;
123 adma_state_t state;
124};
125
126static int adma_ata_init_one (struct pci_dev *pdev,
127 const struct pci_device_id *ent);
128static irqreturn_t adma_intr (int irq, void *dev_instance,
129 struct pt_regs *regs);
130static int adma_port_start(struct ata_port *ap);
131static void adma_host_stop(struct ata_host_set *host_set);
132static void adma_port_stop(struct ata_port *ap);
133static void adma_phy_reset(struct ata_port *ap);
134static void adma_qc_prep(struct ata_queued_cmd *qc);
135static int adma_qc_issue(struct ata_queued_cmd *qc);
136static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
137static void adma_bmdma_stop(struct ata_queued_cmd *qc);
138static u8 adma_bmdma_status(struct ata_port *ap);
139static void adma_irq_clear(struct ata_port *ap);
140static void adma_eng_timeout(struct ata_port *ap);
141
142static Scsi_Host_Template adma_ata_sht = {
143 .module = THIS_MODULE,
144 .name = DRV_NAME,
145 .ioctl = ata_scsi_ioctl,
146 .queuecommand = ata_scsi_queuecmd,
147 .eh_strategy_handler = ata_scsi_error,
148 .can_queue = ATA_DEF_QUEUE,
149 .this_id = ATA_SHT_THIS_ID,
150 .sg_tablesize = LIBATA_MAX_PRD,
151 .max_sectors = ATA_MAX_SECTORS,
152 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
153 .emulated = ATA_SHT_EMULATED,
154 .use_clustering = ENABLE_CLUSTERING,
155 .proc_name = DRV_NAME,
156 .dma_boundary = ADMA_DMA_BOUNDARY,
157 .slave_configure = ata_scsi_slave_config,
158 .bios_param = ata_std_bios_param,
159};
160
161static const struct ata_port_operations adma_ata_ops = {
162 .port_disable = ata_port_disable,
163 .tf_load = ata_tf_load,
164 .tf_read = ata_tf_read,
165 .check_status = ata_check_status,
166 .check_atapi_dma = adma_check_atapi_dma,
167 .exec_command = ata_exec_command,
168 .dev_select = ata_std_dev_select,
169 .phy_reset = adma_phy_reset,
170 .qc_prep = adma_qc_prep,
171 .qc_issue = adma_qc_issue,
172 .eng_timeout = adma_eng_timeout,
173 .irq_handler = adma_intr,
174 .irq_clear = adma_irq_clear,
175 .port_start = adma_port_start,
176 .port_stop = adma_port_stop,
177 .host_stop = adma_host_stop,
178 .bmdma_stop = adma_bmdma_stop,
179 .bmdma_status = adma_bmdma_status,
180};
181
182static struct ata_port_info adma_port_info[] = {
183 /* board_1841_idx */
184 {
185 .sht = &adma_ata_sht,
186 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
187 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO,
188 .pio_mask = 0x10, /* pio4 */
189 .udma_mask = 0x1f, /* udma0-4 */
190 .port_ops = &adma_ata_ops,
191 },
192};
193
194static struct pci_device_id adma_ata_pci_tbl[] = {
195 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
196 board_1841_idx },
197
198 { } /* terminate list */
199};
200
201static struct pci_driver adma_ata_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = adma_ata_pci_tbl,
204 .probe = adma_ata_init_one,
205 .remove = ata_pci_remove_one,
206};
207
208static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
209{
210 return 1; /* ATAPI DMA not yet supported */
211}
212
213static void adma_bmdma_stop(struct ata_queued_cmd *qc)
214{
215 /* nothing */
216}
217
218static u8 adma_bmdma_status(struct ata_port *ap)
219{
220 return 0;
221}
222
223static void adma_irq_clear(struct ata_port *ap)
224{
225 /* nothing */
226}
227
228static void adma_reset_engine(void __iomem *chan)
229{
230 /* reset ADMA to idle state */
231 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
232 udelay(2);
233 writew(aPIOMD4, chan + ADMA_CONTROL);
234 udelay(2);
235}
236
237static void adma_reinit_engine(struct ata_port *ap)
238{
239 struct adma_port_priv *pp = ap->private_data;
240 void __iomem *mmio_base = ap->host_set->mmio_base;
241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
242
243 /* mask/clear ATA interrupts */
244 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
245 ata_check_status(ap);
246
247 /* reset the ADMA engine */
248 adma_reset_engine(chan);
249
250 /* set in-FIFO threshold to 0x100 */
251 writew(0x100, chan + ADMA_FIFO_IN);
252
253 /* set CPB pointer */
254 writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
255
256 /* set out-FIFO threshold to 0x100 */
257 writew(0x100, chan + ADMA_FIFO_OUT);
258
259 /* set CPB count */
260 writew(1, chan + ADMA_CPB_COUNT);
261
262 /* read/discard ADMA status */
263 readb(chan + ADMA_STATUS);
264}
265
266static inline void adma_enter_reg_mode(struct ata_port *ap)
267{
268 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
269
270 writew(aPIOMD4, chan + ADMA_CONTROL);
271 readb(chan + ADMA_STATUS); /* flush */
272}
273
274static void adma_phy_reset(struct ata_port *ap)
275{
276 struct adma_port_priv *pp = ap->private_data;
277
278 pp->state = adma_state_idle;
279 adma_reinit_engine(ap);
280 ata_port_probe(ap);
281 ata_bus_reset(ap);
282}
283
284static void adma_eng_timeout(struct ata_port *ap)
285{
286 struct adma_port_priv *pp = ap->private_data;
287
288 if (pp->state != adma_state_idle) /* healthy paranoia */
289 pp->state = adma_state_mmio;
290 adma_reinit_engine(ap);
291 ata_eng_timeout(ap);
292}
293
294static int adma_fill_sg(struct ata_queued_cmd *qc)
295{
296 struct scatterlist *sg = qc->sg;
297 struct ata_port *ap = qc->ap;
298 struct adma_port_priv *pp = ap->private_data;
299 u8 *buf = pp->pkt;
300 int nelem, i = (2 + buf[3]) * 8;
301 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
302
303 for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) {
304 u32 addr;
305 u32 len;
306
307 addr = (u32)sg_dma_address(sg);
308 *(__le32 *)(buf + i) = cpu_to_le32(addr);
309 i += 4;
310
311 len = sg_dma_len(sg) >> 3;
312 *(__le32 *)(buf + i) = cpu_to_le32(len);
313 i += 4;
314
315 if ((nelem + 1) == qc->n_elem)
316 pFLAGS |= pEND;
317 buf[i++] = pFLAGS;
318 buf[i++] = qc->dev->dma_mode & 0xf;
319 buf[i++] = 0; /* pPKLW */
320 buf[i++] = 0; /* reserved */
321
322 *(__le32 *)(buf + i)
323 = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
324 i += 4;
325
326 VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", nelem,
327 (unsigned long)addr, len);
328 }
329 return i;
330}
331
332static void adma_qc_prep(struct ata_queued_cmd *qc)
333{
334 struct adma_port_priv *pp = qc->ap->private_data;
335 u8 *buf = pp->pkt;
336 u32 pkt_dma = (u32)pp->pkt_dma;
337 int i = 0;
338
339 VPRINTK("ENTER\n");
340
341 adma_enter_reg_mode(qc->ap);
342 if (qc->tf.protocol != ATA_PROT_DMA) {
343 ata_qc_prep(qc);
344 return;
345 }
346
347 buf[i++] = 0; /* Response flags */
348 buf[i++] = 0; /* reserved */
349 buf[i++] = cVLD | cDAT | cIEN;
350 i++; /* cLEN, gets filled in below */
351
352 *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
353 i += 4; /* cNCPB */
354 i += 4; /* cPRD, gets filled in below */
355
356 buf[i++] = 0; /* reserved */
357 buf[i++] = 0; /* reserved */
358 buf[i++] = 0; /* reserved */
359 buf[i++] = 0; /* reserved */
360
361 /* ATA registers; must be a multiple of 4 */
362 buf[i++] = qc->tf.device;
363 buf[i++] = ADMA_REGS_DEVICE;
364 if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
365 buf[i++] = qc->tf.hob_nsect;
366 buf[i++] = ADMA_REGS_SECTOR_COUNT;
367 buf[i++] = qc->tf.hob_lbal;
368 buf[i++] = ADMA_REGS_LBA_LOW;
369 buf[i++] = qc->tf.hob_lbam;
370 buf[i++] = ADMA_REGS_LBA_MID;
371 buf[i++] = qc->tf.hob_lbah;
372 buf[i++] = ADMA_REGS_LBA_HIGH;
373 }
374 buf[i++] = qc->tf.nsect;
375 buf[i++] = ADMA_REGS_SECTOR_COUNT;
376 buf[i++] = qc->tf.lbal;
377 buf[i++] = ADMA_REGS_LBA_LOW;
378 buf[i++] = qc->tf.lbam;
379 buf[i++] = ADMA_REGS_LBA_MID;
380 buf[i++] = qc->tf.lbah;
381 buf[i++] = ADMA_REGS_LBA_HIGH;
382 buf[i++] = 0;
383 buf[i++] = ADMA_REGS_CONTROL;
384 buf[i++] = rIGN;
385 buf[i++] = 0;
386 buf[i++] = qc->tf.command;
387 buf[i++] = ADMA_REGS_COMMAND | rEND;
388
389 buf[3] = (i >> 3) - 2; /* cLEN */
390 *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
391
392 i = adma_fill_sg(qc);
393 wmb(); /* flush PRDs and pkt to memory */
394#if 0
395 /* dump out CPB + PRDs for debug */
396 {
397 int j, len = 0;
398 static char obuf[2048];
399 for (j = 0; j < i; ++j) {
400 len += sprintf(obuf+len, "%02x ", buf[j]);
401 if ((j & 7) == 7) {
402 printk("%s\n", obuf);
403 len = 0;
404 }
405 }
406 if (len)
407 printk("%s\n", obuf);
408 }
409#endif
410}
411
412static inline void adma_packet_start(struct ata_queued_cmd *qc)
413{
414 struct ata_port *ap = qc->ap;
415 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
416
417 VPRINTK("ENTER, ap %p\n", ap);
418
419 /* fire up the ADMA engine */
420 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
421}
422
423static int adma_qc_issue(struct ata_queued_cmd *qc)
424{
425 struct adma_port_priv *pp = qc->ap->private_data;
426
427 switch (qc->tf.protocol) {
428 case ATA_PROT_DMA:
429 pp->state = adma_state_pkt;
430 adma_packet_start(qc);
431 return 0;
432
433 case ATA_PROT_ATAPI_DMA:
434 BUG();
435 break;
436
437 default:
438 break;
439 }
440
441 pp->state = adma_state_mmio;
442 return ata_qc_issue_prot(qc);
443}
444
445static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
446{
447 unsigned int handled = 0, port_no;
448 u8 __iomem *mmio_base = host_set->mmio_base;
449
450 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
451 struct ata_port *ap = host_set->ports[port_no];
452 struct adma_port_priv *pp;
453 struct ata_queued_cmd *qc;
454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
455 u8 drv_stat, status = readb(chan + ADMA_STATUS);
456
457 if (status == 0)
458 continue;
459 handled = 1;
460 adma_enter_reg_mode(ap);
461 if ((ap->flags & ATA_FLAG_PORT_DISABLED))
462 continue;
463 pp = ap->private_data;
464 if (!pp || pp->state != adma_state_pkt)
465 continue;
466 qc = ata_qc_from_tag(ap, ap->active_tag);
467 drv_stat = 0;
468 if ((status & (aPERR | aPSD | aUIRQ)))
469 drv_stat = ATA_ERR;
470 else if (pp->pkt[0] != cDONE)
471 drv_stat = ATA_ERR;
472 ata_qc_complete(qc, drv_stat);
473 }
474 return handled;
475}
476
477static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
478{
479 unsigned int handled = 0, port_no;
480
481 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
482 struct ata_port *ap;
483 ap = host_set->ports[port_no];
484 if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) {
485 struct ata_queued_cmd *qc;
486 struct adma_port_priv *pp = ap->private_data;
487 if (!pp || pp->state != adma_state_mmio)
488 continue;
489 qc = ata_qc_from_tag(ap, ap->active_tag);
490 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
491
492 /* check main status, clearing INTRQ */
493 u8 status = ata_chk_status(ap);
494 if ((status & ATA_BUSY))
495 continue;
496 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
497 ap->id, qc->tf.protocol, status);
498
499 /* complete taskfile transaction */
500 pp->state = adma_state_idle;
501 ata_qc_complete(qc, status);
502 handled = 1;
503 }
504 }
505 }
506 return handled;
507}
508
509static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
510{
511 struct ata_host_set *host_set = dev_instance;
512 unsigned int handled = 0;
513
514 VPRINTK("ENTER\n");
515
516 spin_lock(&host_set->lock);
517 handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
518 spin_unlock(&host_set->lock);
519
520 VPRINTK("EXIT\n");
521
522 return IRQ_RETVAL(handled);
523}
524
525static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
526{
527 port->cmd_addr =
528 port->data_addr = base + 0x000;
529 port->error_addr =
530 port->feature_addr = base + 0x004;
531 port->nsect_addr = base + 0x008;
532 port->lbal_addr = base + 0x00c;
533 port->lbam_addr = base + 0x010;
534 port->lbah_addr = base + 0x014;
535 port->device_addr = base + 0x018;
536 port->status_addr =
537 port->command_addr = base + 0x01c;
538 port->altstatus_addr =
539 port->ctl_addr = base + 0x038;
540}
541
542static int adma_port_start(struct ata_port *ap)
543{
544 struct device *dev = ap->host_set->dev;
545 struct adma_port_priv *pp;
546 int rc;
547
548 rc = ata_port_start(ap);
549 if (rc)
550 return rc;
551 adma_enter_reg_mode(ap);
552 rc = -ENOMEM;
553 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
554 if (!pp)
555 goto err_out;
556 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
557 GFP_KERNEL);
558 if (!pp->pkt)
559 goto err_out_kfree;
560 /* paranoia? */
561 if ((pp->pkt_dma & 7) != 0) {
562 printk("bad alignment for pp->pkt_dma: %08x\n",
563 (u32)pp->pkt_dma);
564 goto err_out_kfree2;
565 }
566 memset(pp->pkt, 0, ADMA_PKT_BYTES);
567 ap->private_data = pp;
568 adma_reinit_engine(ap);
569 return 0;
570
571err_out_kfree2:
572 kfree(pp);
573err_out_kfree:
574 kfree(pp);
575err_out:
576 ata_port_stop(ap);
577 return rc;
578}
579
580static void adma_port_stop(struct ata_port *ap)
581{
582 struct device *dev = ap->host_set->dev;
583 struct adma_port_priv *pp = ap->private_data;
584
585 adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
586 if (pp != NULL) {
587 ap->private_data = NULL;
588 if (pp->pkt != NULL)
589 dma_free_coherent(dev, ADMA_PKT_BYTES,
590 pp->pkt, pp->pkt_dma);
591 kfree(pp);
592 }
593 ata_port_stop(ap);
594}
595
596static void adma_host_stop(struct ata_host_set *host_set)
597{
598 unsigned int port_no;
599
600 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
601 adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
602
603 ata_pci_host_stop(host_set);
604}
605
606static void adma_host_init(unsigned int chip_id,
607 struct ata_probe_ent *probe_ent)
608{
609 unsigned int port_no;
610 void __iomem *mmio_base = probe_ent->mmio_base;
611
612 /* enable/lock aGO operation */
613 writeb(7, mmio_base + ADMA_MODE_LOCK);
614
615 /* reset the ADMA logic */
616 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
617 adma_reset_engine(ADMA_REGS(mmio_base, port_no));
618}
619
620static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
621{
622 int rc;
623
624 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
625 if (rc) {
626 printk(KERN_ERR DRV_NAME
627 "(%s): 32-bit DMA enable failed\n",
628 pci_name(pdev));
629 return rc;
630 }
631 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
632 if (rc) {
633 printk(KERN_ERR DRV_NAME
634 "(%s): 32-bit consistent DMA enable failed\n",
635 pci_name(pdev));
636 return rc;
637 }
638 return 0;
639}
640
641static int adma_ata_init_one(struct pci_dev *pdev,
642 const struct pci_device_id *ent)
643{
644 static int printed_version;
645 struct ata_probe_ent *probe_ent = NULL;
646 void __iomem *mmio_base;
647 unsigned int board_idx = (unsigned int) ent->driver_data;
648 int rc, port_no;
649
650 if (!printed_version++)
651 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
652
653 rc = pci_enable_device(pdev);
654 if (rc)
655 return rc;
656
657 rc = pci_request_regions(pdev, DRV_NAME);
658 if (rc)
659 goto err_out;
660
661 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
662 rc = -ENODEV;
663 goto err_out_regions;
664 }
665
666 mmio_base = pci_iomap(pdev, 4, 0);
667 if (mmio_base == NULL) {
668 rc = -ENOMEM;
669 goto err_out_regions;
670 }
671
672 rc = adma_set_dma_masks(pdev, mmio_base);
673 if (rc)
674 goto err_out_iounmap;
675
676 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
677 if (probe_ent == NULL) {
678 rc = -ENOMEM;
679 goto err_out_iounmap;
680 }
681
682 probe_ent->dev = pci_dev_to_dev(pdev);
683 INIT_LIST_HEAD(&probe_ent->node);
684
685 probe_ent->sht = adma_port_info[board_idx].sht;
686 probe_ent->host_flags = adma_port_info[board_idx].host_flags;
687 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
688 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
689 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
690 probe_ent->port_ops = adma_port_info[board_idx].port_ops;
691
692 probe_ent->irq = pdev->irq;
693 probe_ent->irq_flags = SA_SHIRQ;
694 probe_ent->mmio_base = mmio_base;
695 probe_ent->n_ports = ADMA_PORTS;
696
697 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
698 adma_ata_setup_port(&probe_ent->port[port_no],
699 ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
700 }
701
702 pci_set_master(pdev);
703
704 /* initialize adapter */
705 adma_host_init(board_idx, probe_ent);
706
707 rc = ata_device_add(probe_ent);
708 kfree(probe_ent);
709 if (rc != ADMA_PORTS)
710 goto err_out_iounmap;
711 return 0;
712
713err_out_iounmap:
714 pci_iounmap(pdev, mmio_base);
715err_out_regions:
716 pci_release_regions(pdev);
717err_out:
718 pci_disable_device(pdev);
719 return rc;
720}
721
722static int __init adma_ata_init(void)
723{
724 return pci_module_init(&adma_ata_pci_driver);
725}
726
727static void __exit adma_ata_exit(void)
728{
729 pci_unregister_driver(&adma_ata_pci_driver);
730}
731
732MODULE_AUTHOR("Mark Lord");
733MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
734MODULE_LICENSE("GPL");
735MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
736MODULE_VERSION(DRV_VERSION);
737
738module_init(adma_ata_init);
739module_exit(adma_ata_exit);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index ea76fe44585e..422e0b6f603a 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -35,7 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36 36
37#define DRV_NAME "sata_mv" 37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12" 38#define DRV_VERSION "0.25"
39 39
40enum { 40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */ 41 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -55,31 +55,61 @@ enum {
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57 57
58 MV_Q_CT = 32, 58 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61 59
62 MV_DMA_BOUNDARY = 0xffffffffU, 60 MV_MAX_Q_DEPTH = 32,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), 61 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
62
63 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
64 * CRPB needs alignment on a 256B boundary. Size == 256B
65 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
66 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
67 */
68 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
69 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
70 MV_MAX_SG_CT = 176,
71 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
72 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
73
74 /* Our DMA boundary is determined by an ePRD being unable to handle
75 * anything larger than 64KB
76 */
77 MV_DMA_BOUNDARY = 0xffffU,
64 78
65 MV_PORTS_PER_HC = 4, 79 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2, 81 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ 82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
69 MV_PORT_MASK = 3, 83 MV_PORT_MASK = 3,
70 84
71 /* Host Flags */ 85 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 86 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ 88 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
89 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
90 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
91 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
92 MV_FLAG_GLBL_SFT_RST),
75 93
76 chip_504x = 0, 94 chip_504x = 0,
77 chip_508x = 1, 95 chip_508x = 1,
78 chip_604x = 2, 96 chip_604x = 2,
79 chip_608x = 3, 97 chip_608x = 3,
80 98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
81 /* PCI interface registers */ 109 /* PCI interface registers */
82 110
111 PCI_COMMAND_OFS = 0xc00,
112
83 PCI_MAIN_CMD_STS_OFS = 0xd30, 113 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2), 114 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3), 115 PCI_MASTER_EMPTY = (1 << 3),
@@ -111,20 +141,13 @@ enum {
111 HC_CFG_OFS = 0, 141 HC_CFG_OFS = 0,
112 142
113 HC_IRQ_CAUSE_OFS = 0x14, 143 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */ 144 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 145 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */ 146 DEV_IRQ = (1 << 8), /* shift by port # */
117 147
118 /* Shadow block registers */ 148 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100, 149 SHD_BLK_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104, 150 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128 151
129 /* SATA registers */ 152 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 153 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
@@ -132,6 +155,11 @@ enum {
132 155
133 /* Port registers */ 156 /* Port registers */
134 EDMA_CFG_OFS = 0, 157 EDMA_CFG_OFS = 0,
158 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
159 EDMA_CFG_NCQ = (1 << 5),
160 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
161 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
162 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
135 163
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 164 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc, 165 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -161,33 +189,85 @@ enum {
161 EDMA_ERR_LNK_DATA_TX | 189 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO), 190 EDMA_ERR_TRANS_PROTO),
163 191
192 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
193 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
194 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
195
196 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
197 EDMA_REQ_Q_PTR_SHIFT = 5,
198
199 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
200 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
201 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
202 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
203 EDMA_RSP_Q_PTR_SHIFT = 3,
204
164 EDMA_CMD_OFS = 0x28, 205 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0), 206 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1), 207 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2), 208 ATA_RST = (1 << 2),
168 209
169 /* BDMA is 6xxx part only */ 210 /* Host private flags (hp_flags) */
170 BDMA_CMD_OFS = 0x224, 211 MV_HP_FLAG_MSI = (1 << 0),
171 BDMA_START = (1 << 0),
172 212
173 MV_UNDEF = 0, 213 /* Port private flags (pp_flags) */
214 MV_PP_FLAG_EDMA_EN = (1 << 0),
215 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
174}; 216};
175 217
176struct mv_port_priv { 218/* Command ReQuest Block: 32B */
219struct mv_crqb {
220 u32 sg_addr;
221 u32 sg_addr_hi;
222 u16 ctrl_flags;
223 u16 ata_cmd[11];
224};
177 225
226/* Command ResPonse Block: 8B */
227struct mv_crpb {
228 u16 id;
229 u16 flags;
230 u32 tmstmp;
178}; 231};
179 232
180struct mv_host_priv { 233/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
234struct mv_sg {
235 u32 addr;
236 u32 flags_size;
237 u32 addr_hi;
238 u32 reserved;
239};
181 240
241struct mv_port_priv {
242 struct mv_crqb *crqb;
243 dma_addr_t crqb_dma;
244 struct mv_crpb *crpb;
245 dma_addr_t crpb_dma;
246 struct mv_sg *sg_tbl;
247 dma_addr_t sg_tbl_dma;
248
249 unsigned req_producer; /* cp of req_in_ptr */
250 unsigned rsp_consumer; /* cp of rsp_out_ptr */
251 u32 pp_flags;
252};
253
254struct mv_host_priv {
255 u32 hp_flags;
182}; 256};
183 257
184static void mv_irq_clear(struct ata_port *ap); 258static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 259static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 260static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
261static u8 mv_check_err(struct ata_port *ap);
187static void mv_phy_reset(struct ata_port *ap); 262static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base); 263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap);
265static void mv_port_stop(struct ata_port *ap);
266static void mv_qc_prep(struct ata_queued_cmd *qc);
267static int mv_qc_issue(struct ata_queued_cmd *qc);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance, 268static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs); 269 struct pt_regs *regs);
270static void mv_eng_timeout(struct ata_port *ap);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 271static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192 272
193static Scsi_Host_Template mv_sht = { 273static Scsi_Host_Template mv_sht = {
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = {
196 .ioctl = ata_scsi_ioctl, 276 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd, 277 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error, 278 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE, 279 .can_queue = MV_USE_Q_DEPTH,
200 .this_id = ATA_SHT_THIS_ID, 280 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF, 281 .sg_tablesize = MV_MAX_SG_CT,
202 .max_sectors = ATA_MAX_SECTORS, 282 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 283 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED, 284 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF, 285 .use_clustering = ATA_SHT_USE_CLUSTERING,
206 .proc_name = DRV_NAME, 286 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY, 287 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config, 288 .slave_configure = ata_scsi_slave_config,
@@ -210,21 +290,22 @@ static Scsi_Host_Template mv_sht = {
210 .ordered_flush = 1, 290 .ordered_flush = 1,
211}; 291};
212 292
213static struct ata_port_operations mv_ops = { 293static const struct ata_port_operations mv_ops = {
214 .port_disable = ata_port_disable, 294 .port_disable = ata_port_disable,
215 295
216 .tf_load = ata_tf_load, 296 .tf_load = ata_tf_load,
217 .tf_read = ata_tf_read, 297 .tf_read = ata_tf_read,
218 .check_status = ata_check_status, 298 .check_status = ata_check_status,
299 .check_err = mv_check_err,
219 .exec_command = ata_exec_command, 300 .exec_command = ata_exec_command,
220 .dev_select = ata_std_dev_select, 301 .dev_select = ata_std_dev_select,
221 302
222 .phy_reset = mv_phy_reset, 303 .phy_reset = mv_phy_reset,
223 304
224 .qc_prep = ata_qc_prep, 305 .qc_prep = mv_qc_prep,
225 .qc_issue = ata_qc_issue_prot, 306 .qc_issue = mv_qc_issue,
226 307
227 .eng_timeout = ata_eng_timeout, 308 .eng_timeout = mv_eng_timeout,
228 309
229 .irq_handler = mv_interrupt, 310 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear, 311 .irq_clear = mv_irq_clear,
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = {
232 .scr_read = mv_scr_read, 313 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write, 314 .scr_write = mv_scr_write,
234 315
235 .port_start = ata_port_start, 316 .port_start = mv_port_start,
236 .port_stop = ata_port_stop, 317 .port_stop = mv_port_stop,
237 .host_stop = ata_host_stop, 318 .host_stop = mv_host_stop,
238}; 319};
239 320
240static struct ata_port_info mv_port_info[] = { 321static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */ 322 { /* chip_504x */
242 .sht = &mv_sht, 323 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 324 .host_flags = MV_COMMON_FLAGS,
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 325 .pio_mask = 0x1f, /* pio0-4 */
245 .pio_mask = 0x1f, /* pio4-0 */ 326 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops, 327 .port_ops = &mv_ops,
248 }, 328 },
249 { /* chip_508x */ 329 { /* chip_508x */
250 .sht = &mv_sht, 330 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 331 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 332 .pio_mask = 0x1f, /* pio0-4 */
253 MV_FLAG_DUAL_HC), 333 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops, 334 .port_ops = &mv_ops,
257 }, 335 },
258 { /* chip_604x */ 336 { /* chip_604x */
259 .sht = &mv_sht, 337 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 339 .pio_mask = 0x1f, /* pio0-4 */
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), 340 .udma_mask = 0x7f, /* udma0-6 */
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops, 341 .port_ops = &mv_ops,
266 }, 342 },
267 { /* chip_608x */ 343 { /* chip_608x */
268 .sht = &mv_sht, 344 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 345 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 346 MV_FLAG_DUAL_HC),
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | 347 .pio_mask = 0x1f, /* pio0-4 */
272 MV_FLAG_BDMA), 348 .udma_mask = 0x7f, /* udma0-6 */
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops, 349 .port_ops = &mv_ops,
276 }, 350 },
277}; 351};
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
306 (void) readl(addr); /* flush to avoid PCI posted write */ 380 (void) readl(addr); /* flush to avoid PCI posted write */
307} 381}
308 382
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 383static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{ 384{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 385 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
@@ -329,24 +397,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap)
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 397 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330} 398}
331 399
332static inline int mv_get_hc_count(unsigned long flags) 400static inline int mv_get_hc_count(unsigned long hp_flags)
333{ 401{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); 402 return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335} 403}
336 404
337static inline int mv_is_edma_active(struct ata_port *ap) 405static void mv_irq_clear(struct ata_port *ap)
406{
407}
408
409/**
410 * mv_start_dma - Enable eDMA engine
411 * @base: port base address
412 * @pp: port private data
413 *
414 * Verify the local cache of the eDMA state is accurate with an
415 * assert.
416 *
417 * LOCKING:
418 * Inherited from caller.
419 */
420static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
421{
422 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
423 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
424 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
425 }
426 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
427}
428
429/**
430 * mv_stop_dma - Disable eDMA engine
431 * @ap: ATA channel to manipulate
432 *
433 * Verify the local cache of the eDMA state is accurate with an
434 * assert.
435 *
436 * LOCKING:
437 * Inherited from caller.
438 */
439static void mv_stop_dma(struct ata_port *ap)
338{ 440{
339 void __iomem *port_mmio = mv_ap_base(ap); 441 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 442 struct mv_port_priv *pp = ap->private_data;
443 u32 reg;
444 int i;
445
446 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
447 /* Disable EDMA if active. The disable bit auto clears.
448 */
449 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
450 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
451 } else {
452 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
453 }
454
455 /* now properly wait for the eDMA to stop */
456 for (i = 1000; i > 0; i--) {
457 reg = readl(port_mmio + EDMA_CMD_OFS);
458 if (!(EDMA_EN & reg)) {
459 break;
460 }
461 udelay(100);
462 }
463
464 if (EDMA_EN & reg) {
465 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
466 /* FIXME: Consider doing a reset here to recover */
467 }
341} 468}
342 469
343static inline int mv_port_bdma_capable(struct ata_port *ap) 470#ifdef ATA_DEBUG
471static void mv_dump_mem(void __iomem *start, unsigned bytes)
344{ 472{
345 return (ap->flags & MV_FLAG_BDMA); 473 int b, w;
474 for (b = 0; b < bytes; ) {
475 DPRINTK("%p: ", start + b);
476 for (w = 0; b < bytes && w < 4; w++) {
477 printk("%08x ",readl(start + b));
478 b += sizeof(u32);
479 }
480 printk("\n");
481 }
346} 482}
483#endif
347 484
348static void mv_irq_clear(struct ata_port *ap) 485static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
486{
487#ifdef ATA_DEBUG
488 int b, w;
489 u32 dw;
490 for (b = 0; b < bytes; ) {
491 DPRINTK("%02x: ", b);
492 for (w = 0; b < bytes && w < 4; w++) {
493 (void) pci_read_config_dword(pdev,b,&dw);
494 printk("%08x ",dw);
495 b += sizeof(u32);
496 }
497 printk("\n");
498 }
499#endif
500}
501static void mv_dump_all_regs(void __iomem *mmio_base, int port,
502 struct pci_dev *pdev)
349{ 503{
504#ifdef ATA_DEBUG
505 void __iomem *hc_base = mv_hc_base(mmio_base,
506 port >> MV_PORT_HC_SHIFT);
507 void __iomem *port_base;
508 int start_port, num_ports, p, start_hc, num_hcs, hc;
509
510 if (0 > port) {
511 start_hc = start_port = 0;
512 num_ports = 8; /* shld be benign for 4 port devs */
513 num_hcs = 2;
514 } else {
515 start_hc = port >> MV_PORT_HC_SHIFT;
516 start_port = port;
517 num_ports = num_hcs = 1;
518 }
519 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
520 num_ports > 1 ? num_ports - 1 : start_port);
521
522 if (NULL != pdev) {
523 DPRINTK("PCI config space regs:\n");
524 mv_dump_pci_cfg(pdev, 0x68);
525 }
526 DPRINTK("PCI regs:\n");
527 mv_dump_mem(mmio_base+0xc00, 0x3c);
528 mv_dump_mem(mmio_base+0xd00, 0x34);
529 mv_dump_mem(mmio_base+0xf00, 0x4);
530 mv_dump_mem(mmio_base+0x1d00, 0x6c);
531 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
532 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
533 DPRINTK("HC regs (HC %i):\n", hc);
534 mv_dump_mem(hc_base, 0x1c);
535 }
536 for (p = start_port; p < start_port + num_ports; p++) {
537 port_base = mv_port_base(mmio_base, p);
538 DPRINTK("EDMA regs (port %i):\n",p);
539 mv_dump_mem(port_base, 0x54);
540 DPRINTK("SATA regs (port %i):\n",p);
541 mv_dump_mem(port_base+0x300, 0x60);
542 }
543#endif
350} 544}
351 545
352static unsigned int mv_scr_offset(unsigned int sc_reg_in) 546static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
389 } 583 }
390} 584}
391 585
392static int mv_master_reset(void __iomem *mmio_base) 586/**
587 * mv_global_soft_reset - Perform the 6xxx global soft reset
588 * @mmio_base: base address of the HBA
589 *
590 * This routine only applies to 6xxx parts.
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595static int mv_global_soft_reset(void __iomem *mmio_base)
393{ 596{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; 597 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0; 598 int i, rc = 0;
396 u32 t; 599 u32 t;
397 600
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status 601 /* Following procedure defined in PCI "main command and status
401 * register" table. 602 * register" table.
402 */ 603 */
403 t = readl(reg); 604 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg); 605 writel(t | STOP_PCI_MASTER, reg);
405 606
406 for (i = 0; i < 100; i++) { 607 for (i = 0; i < 1000; i++) {
407 msleep(10); 608 udelay(1);
408 t = readl(reg); 609 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) { 610 if (PCI_MASTER_EMPTY & t) {
410 break; 611 break;
411 } 612 }
412 } 613 }
413 if (!(PCI_MASTER_EMPTY & t)) { 614 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); 615 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
415 rc = 1; /* broken HW? */ 616 rc = 1;
416 goto done; 617 goto done;
417 } 618 }
418 619
@@ -425,39 +626,399 @@ static int mv_master_reset(void __iomem *mmio_base)
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 626 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426 627
427 if (!(GLOB_SFT_RST & t)) { 628 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n"); 629 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
429 rc = 1; /* broken HW? */ 630 rc = 1;
430 goto done; 631 goto done;
431 } 632 }
432 633
433 /* clear reset */ 634 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
434 i = 5; 635 i = 5;
435 do { 636 do {
436 writel(t & ~GLOB_SFT_RST, reg); 637 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
437 t = readl(reg); 638 t = readl(reg);
438 udelay(1); 639 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 640 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440 641
441 if (GLOB_SFT_RST & t) { 642 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n"); 643 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
443 rc = 1; /* broken HW? */ 644 rc = 1;
444 } 645 }
445 646done:
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc; 647 return rc;
449} 648}
450 649
451static void mv_err_intr(struct ata_port *ap) 650/**
651 * mv_host_stop - Host specific cleanup/stop routine.
652 * @host_set: host data structure
653 *
654 * Disable ints, cleanup host memory, call general purpose
655 * host_stop.
656 *
657 * LOCKING:
658 * Inherited from caller.
659 */
660static void mv_host_stop(struct ata_host_set *host_set)
452{ 661{
453 void __iomem *port_mmio; 662 struct mv_host_priv *hpriv = host_set->private_data;
454 u32 edma_err_cause, serr = 0; 663 struct pci_dev *pdev = to_pci_dev(host_set->dev);
664
665 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
666 pci_disable_msi(pdev);
667 } else {
668 pci_intx(pdev, 0);
669 }
670 kfree(hpriv);
671 ata_host_stop(host_set);
672}
673
674/**
675 * mv_port_start - Port specific init/start routine.
676 * @ap: ATA channel to manipulate
677 *
678 * Allocate and point to DMA memory, init port private memory,
679 * zero indices.
680 *
681 * LOCKING:
682 * Inherited from caller.
683 */
684static int mv_port_start(struct ata_port *ap)
685{
686 struct device *dev = ap->host_set->dev;
687 struct mv_port_priv *pp;
688 void __iomem *port_mmio = mv_ap_base(ap);
689 void *mem;
690 dma_addr_t mem_dma;
691
692 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
693 if (!pp) {
694 return -ENOMEM;
695 }
696 memset(pp, 0, sizeof(*pp));
697
698 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
699 GFP_KERNEL);
700 if (!mem) {
701 kfree(pp);
702 return -ENOMEM;
703 }
704 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
705
706 /* First item in chunk of DMA memory:
707 * 32-slot command request table (CRQB), 32 bytes each in size
708 */
709 pp->crqb = mem;
710 pp->crqb_dma = mem_dma;
711 mem += MV_CRQB_Q_SZ;
712 mem_dma += MV_CRQB_Q_SZ;
713
714 /* Second item:
715 * 32-slot command response table (CRPB), 8 bytes each in size
716 */
717 pp->crpb = mem;
718 pp->crpb_dma = mem_dma;
719 mem += MV_CRPB_Q_SZ;
720 mem_dma += MV_CRPB_Q_SZ;
721
722 /* Third item:
723 * Table of scatter-gather descriptors (ePRD), 16 bytes each
724 */
725 pp->sg_tbl = mem;
726 pp->sg_tbl_dma = mem_dma;
727
728 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
729 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
730
731 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
732 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
733 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
734
735 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
736 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
737
738 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
739 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
740 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
741
742 pp->req_producer = pp->rsp_consumer = 0;
743
744 /* Don't turn on EDMA here...do it before DMA commands only. Else
745 * we'll be unable to send non-data, PIO, etc due to restricted access
746 * to shadow regs.
747 */
748 ap->private_data = pp;
749 return 0;
750}
751
752/**
753 * mv_port_stop - Port specific cleanup/stop routine.
754 * @ap: ATA channel to manipulate
755 *
756 * Stop DMA, cleanup port memory.
757 *
758 * LOCKING:
759 * This routine uses the host_set lock to protect the DMA stop.
760 */
761static void mv_port_stop(struct ata_port *ap)
762{
763 struct device *dev = ap->host_set->dev;
764 struct mv_port_priv *pp = ap->private_data;
765 unsigned long flags;
766
767 spin_lock_irqsave(&ap->host_set->lock, flags);
768 mv_stop_dma(ap);
769 spin_unlock_irqrestore(&ap->host_set->lock, flags);
770
771 ap->private_data = NULL;
772 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
773 kfree(pp);
774}
775
776/**
777 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
778 * @qc: queued command whose SG list to source from
779 *
780 * Populate the SG list and mark the last entry.
781 *
782 * LOCKING:
783 * Inherited from caller.
784 */
785static void mv_fill_sg(struct ata_queued_cmd *qc)
786{
787 struct mv_port_priv *pp = qc->ap->private_data;
788 unsigned int i;
789
790 for (i = 0; i < qc->n_elem; i++) {
791 u32 sg_len;
792 dma_addr_t addr;
793
794 addr = sg_dma_address(&qc->sg[i]);
795 sg_len = sg_dma_len(&qc->sg[i]);
796
797 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
798 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
799 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
800 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
801 }
802 if (0 < qc->n_elem) {
803 pp->sg_tbl[qc->n_elem - 1].flags_size |=
804 cpu_to_le32(EPRD_FLAG_END_OF_TBL);
805 }
806}
807
808static inline unsigned mv_inc_q_index(unsigned *index)
809{
810 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
811 return *index;
812}
813
814static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
815{
816 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
817 (last ? CRQB_CMD_LAST : 0);
818}
455 819
456 /* bug here b/c we got an err int on a port we don't know about, 820/**
457 * so there's no way to clear it 821 * mv_qc_prep - Host specific command preparation.
822 * @qc: queued command to prepare
823 *
824 * This routine simply redirects to the general purpose routine
825 * if command is not DMA. Else, it handles prep of the CRQB
826 * (command request block), does some sanity checking, and calls
827 * the SG load routine.
828 *
829 * LOCKING:
830 * Inherited from caller.
831 */
832static void mv_qc_prep(struct ata_queued_cmd *qc)
833{
834 struct ata_port *ap = qc->ap;
835 struct mv_port_priv *pp = ap->private_data;
836 u16 *cw;
837 struct ata_taskfile *tf;
838 u16 flags = 0;
839
840 if (ATA_PROT_DMA != qc->tf.protocol) {
841 return;
842 }
843
844 /* the req producer index should be the same as we remember it */
845 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
846 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
847 pp->req_producer);
848
849 /* Fill in command request block
458 */ 850 */
459 BUG_ON(NULL == ap); 851 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
460 port_mmio = mv_ap_base(ap); 852 flags |= CRQB_FLAG_READ;
853 }
854 assert(MV_MAX_Q_DEPTH > qc->tag);
855 flags |= qc->tag << CRQB_TAG_SHIFT;
856
857 pp->crqb[pp->req_producer].sg_addr =
858 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
859 pp->crqb[pp->req_producer].sg_addr_hi =
860 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
861 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
862
863 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
864 tf = &qc->tf;
865
866 /* Sadly, the CRQB cannot accomodate all registers--there are
867 * only 11 bytes...so we must pick and choose required
868 * registers based on the command. So, we drop feature and
869 * hob_feature for [RW] DMA commands, but they are needed for
870 * NCQ. NCQ will drop hob_nsect.
871 */
872 switch (tf->command) {
873 case ATA_CMD_READ:
874 case ATA_CMD_READ_EXT:
875 case ATA_CMD_WRITE:
876 case ATA_CMD_WRITE_EXT:
877 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
878 break;
879#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
880 case ATA_CMD_FPDMA_READ:
881 case ATA_CMD_FPDMA_WRITE:
882 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
883 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
884 break;
885#endif /* FIXME: remove this line when NCQ added */
886 default:
887 /* The only other commands EDMA supports in non-queued and
888 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
889 * of which are defined/used by Linux. If we get here, this
890 * driver needs work.
891 *
892 * FIXME: modify libata to give qc_prep a return value and
893 * return error here.
894 */
895 BUG_ON(tf->command);
896 break;
897 }
898 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
899 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
900 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
901 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
902 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
903 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
904 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
905 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
906 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
907
908 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
909 return;
910 }
911 mv_fill_sg(qc);
912}
913
914/**
915 * mv_qc_issue - Initiate a command to the host
916 * @qc: queued command to start
917 *
918 * This routine simply redirects to the general purpose routine
919 * if command is not DMA. Else, it sanity checks our local
920 * caches of the request producer/consumer indices then enables
921 * DMA and bumps the request producer index.
922 *
923 * LOCKING:
924 * Inherited from caller.
925 */
926static int mv_qc_issue(struct ata_queued_cmd *qc)
927{
928 void __iomem *port_mmio = mv_ap_base(qc->ap);
929 struct mv_port_priv *pp = qc->ap->private_data;
930 u32 in_ptr;
931
932 if (ATA_PROT_DMA != qc->tf.protocol) {
933 /* We're about to send a non-EDMA capable command to the
934 * port. Turn off EDMA so there won't be problems accessing
935 * shadow block, etc registers.
936 */
937 mv_stop_dma(qc->ap);
938 return ata_qc_issue_prot(qc);
939 }
940
941 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
942
943 /* the req producer index should be the same as we remember it */
944 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
945 pp->req_producer);
946 /* until we do queuing, the queue should be empty at this point */
947 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
948 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
949 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
950
951 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
952
953 mv_start_dma(port_mmio, pp);
954
955 /* and write the request in pointer to kick the EDMA to life */
956 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
957 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
958 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
959
960 return 0;
961}
962
963/**
964 * mv_get_crpb_status - get status from most recently completed cmd
965 * @ap: ATA channel to manipulate
966 *
967 * This routine is for use when the port is in DMA mode, when it
968 * will be using the CRPB (command response block) method of
969 * returning command completion information. We assert indices
970 * are good, grab status, and bump the response consumer index to
971 * prove that we're up to date.
972 *
973 * LOCKING:
974 * Inherited from caller.
975 */
976static u8 mv_get_crpb_status(struct ata_port *ap)
977{
978 void __iomem *port_mmio = mv_ap_base(ap);
979 struct mv_port_priv *pp = ap->private_data;
980 u32 out_ptr;
981
982 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
983
984 /* the response consumer index should be the same as we remember it */
985 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
986 pp->rsp_consumer);
987
988 /* increment our consumer index... */
989 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
990
991 /* and, until we do NCQ, there should only be 1 CRPB waiting */
992 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
993 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
994 pp->rsp_consumer);
995
996 /* write out our inc'd consumer index so EDMA knows we're caught up */
997 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
998 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
999 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1000
1001 /* Return ATA status register for completed CRPB */
1002 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
1003}
1004
1005/**
1006 * mv_err_intr - Handle error interrupts on the port
1007 * @ap: ATA channel to manipulate
1008 *
1009 * In most cases, just clear the interrupt and move on. However,
1010 * some cases require an eDMA reset, which is done right before
1011 * the COMRESET in mv_phy_reset(). The SERR case requires a
1012 * clear of pending errors in the SATA SERROR register. Finally,
1013 * if the port disabled DMA, update our cached copy to match.
1014 *
1015 * LOCKING:
1016 * Inherited from caller.
1017 */
1018static void mv_err_intr(struct ata_port *ap)
1019{
1020 void __iomem *port_mmio = mv_ap_base(ap);
1021 u32 edma_err_cause, serr = 0;
461 1022
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1023 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463 1024
@@ -465,8 +1026,12 @@ static void mv_err_intr(struct ata_port *ap)
465 serr = scr_read(ap, SCR_ERROR); 1026 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr); 1027 scr_write_flush(ap, SCR_ERROR, serr);
467 } 1028 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", 1029 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
469 ap->port_no, edma_err_cause, serr); 1030 struct mv_port_priv *pp = ap->private_data;
1031 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1032 }
1033 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1034 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
470 1035
471 /* Clear EDMA now that SERR cleanup done */ 1036 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1037 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -477,7 +1042,21 @@ static void mv_err_intr(struct ata_port *ap)
477 } 1042 }
478} 1043}
479 1044
480/* Handle any outstanding interrupts in a single SATAHC 1045/**
1046 * mv_host_intr - Handle all interrupts on the given host controller
1047 * @host_set: host specific structure
1048 * @relevant: port error bits relevant to this host controller
1049 * @hc: which host controller we're to look at
1050 *
1051 * Read then write clear the HC interrupt status then walk each
1052 * port connected to the HC and see if it needs servicing. Port
1053 * success ints are reported in the HC interrupt status reg, the
1054 * port error ints are reported in the higher level main
1055 * interrupt status register and thus are passed in via the
1056 * 'relevant' argument.
1057 *
1058 * LOCKING:
1059 * Inherited from caller.
481 */ 1060 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1061static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc) 1062 unsigned int hc)
@@ -487,8 +1066,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
487 struct ata_port *ap; 1066 struct ata_port *ap;
488 struct ata_queued_cmd *qc; 1067 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause; 1068 u32 hc_irq_cause;
490 int shift, port, port0, hard_port; 1069 int shift, port, port0, hard_port, handled;
491 u8 ata_status; 1070 u8 ata_status = 0;
492 1071
493 if (hc == 0) { 1072 if (hc == 0) {
494 port0 = 0; 1073 port0 = 0;
@@ -499,7 +1078,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
499 /* we'll need the HC success int register in most cases */ 1078 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1079 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) { 1080 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 1081 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
503 } 1082 }
504 1083
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1084 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
@@ -508,35 +1087,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1087 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port]; 1088 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1089 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU; 1090 handled = 0; /* ensure ata_status is set if handled++ */
512 1091
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { 1092 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap); 1093 /* new CRPB on the queue; just one at a time until NCQ
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */ 1094 */
516 /* This is needed to clear the ATA INTRQ. 1095 ata_status = mv_get_crpb_status(ap);
517 * FIXME: don't read the status reg in EDMA mode! 1096 handled++;
1097 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1098 /* received ATA IRQ; read the status reg to clear INTRQ
518 */ 1099 */
519 ata_status = readb((void __iomem *) 1100 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr); 1101 ap->ioaddr.status_addr);
1102 handled++;
521 } 1103 }
522 1104
523 shift = port * 2; 1105 shift = port << 1; /* (port * 2) */
524 if (port >= MV_PORTS_PER_HC) { 1106 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1107 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 } 1108 }
527 if ((PORT0_ERR << shift) & relevant) { 1109 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap); 1110 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */ 1111 /* OR in ATA_ERR to ensure libata knows we took one */
530 ata_status = readb((void __iomem *) 1112 ata_status = readb((void __iomem *)
531 ap->ioaddr.status_addr) | ATA_ERR; 1113 ap->ioaddr.status_addr) | ATA_ERR;
1114 handled++;
532 } 1115 }
533 1116
534 if (ap) { 1117 if (handled && ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag); 1118 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) { 1119 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, " 1120 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status); 1121 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */ 1122 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status); 1123 ata_qc_complete(qc, ata_status);
542 } 1124 }
@@ -545,17 +1127,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
545 VPRINTK("EXIT\n"); 1127 VPRINTK("EXIT\n");
546} 1128}
547 1129
1130/**
1131 * mv_interrupt -
1132 * @irq: unused
1133 * @dev_instance: private data; in this case the host structure
1134 * @regs: unused
1135 *
1136 * Read the read only register to determine if any host
1137 * controllers have pending interrupts. If so, call lower level
1138 * routine to handle. Also check for PCI errors which are only
1139 * reported here.
1140 *
1141 * LOCKING:
1142 * This routine holds the host_set lock while processing pending
1143 * interrupts.
1144 */
548static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1145static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs) 1146 struct pt_regs *regs)
550{ 1147{
551 struct ata_host_set *host_set = dev_instance; 1148 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs; 1149 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio; 1150 void __iomem *mmio = host_set->mmio_base;
554 u32 irq_stat; 1151 u32 irq_stat;
555 1152
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1153 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559 1154
560 /* check the cases where we either have nothing pending or have read 1155 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault 1156 * a bogus register value which can indicate HW removal or PCI fault
@@ -564,64 +1159,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
564 return IRQ_NONE; 1159 return IRQ_NONE;
565 } 1160 }
566 1161
1162 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
567 spin_lock(&host_set->lock); 1163 spin_lock(&host_set->lock);
568 1164
569 for (hc = 0; hc < n_hcs; hc++) { 1165 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1166 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) { 1167 if (relevant) {
572 mv_host_intr(host_set, relevant, hc); 1168 mv_host_intr(host_set, relevant, hc);
573 handled = 1; 1169 handled++;
574 } 1170 }
575 } 1171 }
576 if (PCI_ERR & irq_stat) { 1172 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need 1173 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
578 * to recover from them properly. 1174 readl(mmio + PCI_IRQ_CAUSE_OFS));
579 */
580 }
581 1175
1176 DPRINTK("All regs @ PCI error\n");
1177 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1178
1179 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1180 handled++;
1181 }
582 spin_unlock(&host_set->lock); 1182 spin_unlock(&host_set->lock);
583 1183
584 return IRQ_RETVAL(handled); 1184 return IRQ_RETVAL(handled);
585} 1185}
586 1186
1187/**
1188 * mv_check_err - Return the error shadow register to caller.
1189 * @ap: ATA channel to manipulate
1190 *
1191 * Marvell requires DMA to be stopped before accessing shadow
1192 * registers. So we do that, then return the needed register.
1193 *
1194 * LOCKING:
1195 * Inherited from caller. FIXME: protect mv_stop_dma with lock?
1196 */
1197static u8 mv_check_err(struct ata_port *ap)
1198{
1199 mv_stop_dma(ap); /* can't read shadow regs if DMA on */
1200 return readb((void __iomem *) ap->ioaddr.error_addr);
1201}
1202
1203/**
1204 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1205 * @ap: ATA channel to manipulate
1206 *
1207 * Part of this is taken from __sata_phy_reset and modified to
1208 * not sleep since this routine gets called from interrupt level.
1209 *
1210 * LOCKING:
1211 * Inherited from caller. This is coded to safe to call at
1212 * interrupt level, i.e. it does not sleep.
1213 */
587static void mv_phy_reset(struct ata_port *ap) 1214static void mv_phy_reset(struct ata_port *ap)
588{ 1215{
589 void __iomem *port_mmio = mv_ap_base(ap); 1216 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf; 1217 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0]; 1218 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma; 1219 unsigned long timeout;
593 1220
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 1221 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595 1222
596 edma = readl(port_mmio + EDMA_CMD_OFS); 1223 mv_stop_dma(ap);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607 1224
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); 1225 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */ 1226 udelay(25); /* allow reset propagation */
610 1227
611 /* Spec never mentions clearing the bit. Marvell's driver does 1228 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however. 1229 * clear the bit, however.
613 */ 1230 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); 1231 writelfl(0, port_mmio + EDMA_CMD_OFS);
615 1232
616 VPRINTK("Done. Now calling __sata_phy_reset()\n"); 1233 VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1234 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1235 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
617 1236
618 /* proceed to init communications via the scr_control reg */ 1237 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap); 1238 scr_write_flush(ap, SCR_CONTROL, 0x301);
1239 mdelay(1);
1240 scr_write_flush(ap, SCR_CONTROL, 0x300);
1241 timeout = jiffies + (HZ * 1);
1242 do {
1243 mdelay(10);
1244 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1245 break;
1246 } while (time_before(jiffies, timeout));
620 1247
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1248 VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
622 VPRINTK("Port disabled pre-sig. Exiting.\n"); 1249 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1250 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1251
1252 if (sata_dev_present(ap)) {
1253 ata_port_probe(ap);
1254 } else {
1255 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1256 ap->id, scr_read(ap, SCR_STATUS));
1257 ata_port_disable(ap);
623 return; 1258 return;
624 } 1259 }
1260 ap->cbl = ATA_CBL_SATA;
625 1261
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1262 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1263 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
@@ -636,37 +1272,118 @@ static void mv_phy_reset(struct ata_port *ap)
636 VPRINTK("EXIT\n"); 1272 VPRINTK("EXIT\n");
637} 1273}
638 1274
639static void mv_port_init(struct ata_ioports *port, unsigned long base) 1275/**
1276 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1277 * @ap: ATA channel to manipulate
1278 *
1279 * Intent is to clear all pending error conditions, reset the
1280 * chip/bus, fail the command, and move on.
1281 *
1282 * LOCKING:
1283 * This routine holds the host_set lock while failing the command.
1284 */
1285static void mv_eng_timeout(struct ata_port *ap)
1286{
1287 struct ata_queued_cmd *qc;
1288 unsigned long flags;
1289
1290 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1291 DPRINTK("All regs @ start of eng_timeout\n");
1292 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1293 to_pci_dev(ap->host_set->dev));
1294
1295 qc = ata_qc_from_tag(ap, ap->active_tag);
1296 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1297 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1298 &qc->scsicmd->cmnd);
1299
1300 mv_err_intr(ap);
1301 mv_phy_reset(ap);
1302
1303 if (!qc) {
1304 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1305 ap->id);
1306 } else {
1307 /* hack alert! We cannot use the supplied completion
1308 * function from inside the ->eh_strategy_handler() thread.
1309 * libata is the only user of ->eh_strategy_handler() in
1310 * any kernel, so the default scsi_done() assumes it is
1311 * not being called from the SCSI EH.
1312 */
1313 spin_lock_irqsave(&ap->host_set->lock, flags);
1314 qc->scsidone = scsi_finish_command;
1315 ata_qc_complete(qc, ATA_ERR);
1316 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1317 }
1318}
1319
1320/**
1321 * mv_port_init - Perform some early initialization on a single port.
1322 * @port: libata data structure storing shadow register addresses
1323 * @port_mmio: base address of the port
1324 *
1325 * Initialize shadow register mmio addresses, clear outstanding
1326 * interrupts on the port, and unmask interrupts for the future
1327 * start of the port.
1328 *
1329 * LOCKING:
1330 * Inherited from caller.
1331 */
1332static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
640{ 1333{
641 /* PIO related setup */ 1334 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
642 port->data_addr = base + SHD_PIO_DATA_OFS; 1335 unsigned serr_ofs;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; 1336
644 port->nsect_addr = base + SHD_SECT_CNT_OFS; 1337 /* PIO related setup
645 port->lbal_addr = base + SHD_LBA_L_OFS; 1338 */
646 port->lbam_addr = base + SHD_LBA_M_OFS; 1339 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
647 port->lbah_addr = base + SHD_LBA_H_OFS; 1340 port->error_addr =
648 port->device_addr = base + SHD_DEV_HD_OFS; 1341 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; 1342 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; 1343 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
651 /* unused */ 1344 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1345 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1346 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1347 port->status_addr =
1348 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1349 /* special case: control/altstatus doesn't have ATA_REG_ address */
1350 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1351
1352 /* unused: */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 1353 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653 1354
1355 /* Clear any currently outstanding port interrupt conditions */
1356 serr_ofs = mv_scr_offset(SCR_ERROR);
1357 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1358 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1359
654 /* unmask all EDMA error interrupts */ 1360 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); 1361 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
656 1362
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 1363 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS), 1364 readl(port_mmio + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), 1365 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); 1366 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
661} 1367}
662 1368
1369/**
1370 * mv_host_init - Perform some early initialization of the host.
1371 * @probe_ent: early data struct representing the host
1372 *
1373 * If possible, do an early global reset of the host. Then do
1374 * our port init and clear/unmask all/relevant host interrupts.
1375 *
1376 * LOCKING:
1377 * Inherited from caller.
1378 */
663static int mv_host_init(struct ata_probe_ent *probe_ent) 1379static int mv_host_init(struct ata_probe_ent *probe_ent)
664{ 1380{
665 int rc = 0, n_hc, port, hc; 1381 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base; 1382 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio; 1383 void __iomem *port_mmio;
668 1384
669 if (mv_master_reset(probe_ent->mmio_base)) { 1385 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1386 mv_global_soft_reset(probe_ent->mmio_base)) {
670 rc = 1; 1387 rc = 1;
671 goto done; 1388 goto done;
672 } 1389 }
@@ -676,17 +1393,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
676 1393
677 for (port = 0; port < probe_ent->n_ports; port++) { 1394 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port); 1395 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); 1396 mv_port_init(&probe_ent->port[port], port_mmio);
680 } 1397 }
681 1398
682 for (hc = 0; hc < n_hc; hc++) { 1399 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, 1400 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), 1401
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); 1402 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
1403 "(before clear)=0x%08x\n", hc,
1404 readl(hc_mmio + HC_CFG_OFS),
1405 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
1406
1407 /* Clear any currently outstanding hc interrupt conditions */
1408 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
686 } 1409 }
687 1410
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 1411 /* Clear any currently outstanding host interrupt conditions */
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 1412 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1413
1414 /* and unmask interrupt generation for host regs */
1415 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
1416 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
690 1417
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 1418 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n", 1419 "PCI int cause/mask=0x%08x/0x%08x\n",
@@ -694,11 +1421,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 1421 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS), 1422 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS)); 1423 readl(mmio + PCI_IRQ_MASK_OFS));
697 1424done:
698 done:
699 return rc; 1425 return rc;
700} 1426}
701 1427
1428/**
1429 * mv_print_info - Dump key info to kernel log for perusal.
1430 * @probe_ent: early data struct representing the host
1431 *
1432 * FIXME: complete this.
1433 *
1434 * LOCKING:
1435 * Inherited from caller.
1436 */
1437static void mv_print_info(struct ata_probe_ent *probe_ent)
1438{
1439 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1440 struct mv_host_priv *hpriv = probe_ent->private_data;
1441 u8 rev_id, scc;
1442 const char *scc_s;
1443
1444 /* Use this to determine the HW stepping of the chip so we know
1445 * what errata to workaround
1446 */
1447 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1448
1449 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
1450 if (scc == 0)
1451 scc_s = "SCSI";
1452 else if (scc == 0x01)
1453 scc_s = "RAID";
1454 else
1455 scc_s = "unknown";
1456
1457 printk(KERN_INFO DRV_NAME
1458 "(%s) %u slots %u ports %s mode IRQ via %s\n",
1459 pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1460 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1461}
1462
1463/**
1464 * mv_init_one - handle a positive probe of a Marvell host
1465 * @pdev: PCI device found
1466 * @ent: PCI device ID entry for the matched host
1467 *
1468 * LOCKING:
1469 * Inherited from caller.
1470 */
702static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1471static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
703{ 1472{
704 static int printed_version = 0; 1473 static int printed_version = 0;
@@ -706,16 +1475,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
706 struct mv_host_priv *hpriv; 1475 struct mv_host_priv *hpriv;
707 unsigned int board_idx = (unsigned int)ent->driver_data; 1476 unsigned int board_idx = (unsigned int)ent->driver_data;
708 void __iomem *mmio_base; 1477 void __iomem *mmio_base;
709 int pci_dev_busy = 0; 1478 int pci_dev_busy = 0, rc;
710 int rc;
711 1479
712 if (!printed_version++) { 1480 if (!printed_version++) {
713 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1481 printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n");
714 } 1482 }
715 1483
716 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
717 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
718
719 rc = pci_enable_device(pdev); 1484 rc = pci_enable_device(pdev);
720 if (rc) { 1485 if (rc) {
721 return rc; 1486 return rc;
@@ -727,8 +1492,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
727 goto err_out; 1492 goto err_out;
728 } 1493 }
729 1494
730 pci_intx(pdev, 1);
731
732 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1495 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
733 if (probe_ent == NULL) { 1496 if (probe_ent == NULL) {
734 rc = -ENOMEM; 1497 rc = -ENOMEM;
@@ -739,8 +1502,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
739 probe_ent->dev = pci_dev_to_dev(pdev); 1502 probe_ent->dev = pci_dev_to_dev(pdev);
740 INIT_LIST_HEAD(&probe_ent->node); 1503 INIT_LIST_HEAD(&probe_ent->node);
741 1504
742 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), 1505 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
743 pci_resource_len(pdev, MV_PRIMARY_BAR));
744 if (mmio_base == NULL) { 1506 if (mmio_base == NULL) {
745 rc = -ENOMEM; 1507 rc = -ENOMEM;
746 goto err_out_free_ent; 1508 goto err_out_free_ent;
@@ -769,37 +1531,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
769 if (rc) { 1531 if (rc) {
770 goto err_out_hpriv; 1532 goto err_out_hpriv;
771 } 1533 }
772/* mv_print_info(probe_ent); */
773 1534
774 { 1535 /* Enable interrupts */
775 int b, w; 1536 if (pci_enable_msi(pdev) == 0) {
776 u32 dw[4]; /* hold a line of 16b */ 1537 hpriv->hp_flags |= MV_HP_FLAG_MSI;
777 VPRINTK("PCI config space:\n"); 1538 } else {
778 for (b = 0; b < 0x40; ) { 1539 pci_intx(pdev, 1);
779 for (w = 0; w < 4; w++) {
780 (void) pci_read_config_dword(pdev,b,&dw[w]);
781 b += sizeof(*dw);
782 }
783 VPRINTK("%08x %08x %08x %08x\n",
784 dw[0],dw[1],dw[2],dw[3]);
785 }
786 } 1540 }
787 1541
788 /* FIXME: check ata_device_add return value */ 1542 mv_dump_pci_cfg(pdev, 0x68);
789 ata_device_add(probe_ent); 1543 mv_print_info(probe_ent);
790 kfree(probe_ent); 1544
1545 if (ata_device_add(probe_ent) == 0) {
1546 rc = -ENODEV; /* No devices discovered */
1547 goto err_out_dev_add;
1548 }
791 1549
1550 kfree(probe_ent);
792 return 0; 1551 return 0;
793 1552
794 err_out_hpriv: 1553err_out_dev_add:
1554 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
1555 pci_disable_msi(pdev);
1556 } else {
1557 pci_intx(pdev, 0);
1558 }
1559err_out_hpriv:
795 kfree(hpriv); 1560 kfree(hpriv);
796 err_out_iounmap: 1561err_out_iounmap:
797 iounmap(mmio_base); 1562 pci_iounmap(pdev, mmio_base);
798 err_out_free_ent: 1563err_out_free_ent:
799 kfree(probe_ent); 1564 kfree(probe_ent);
800 err_out_regions: 1565err_out_regions:
801 pci_release_regions(pdev); 1566 pci_release_regions(pdev);
802 err_out: 1567err_out:
803 if (!pci_dev_busy) { 1568 if (!pci_dev_busy) {
804 pci_disable_device(pdev); 1569 pci_disable_device(pdev);
805 } 1570 }
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index cb832b03ec5e..1a56d6c79ddd 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -238,7 +238,7 @@ static Scsi_Host_Template nv_sht = {
238 .ordered_flush = 1, 238 .ordered_flush = 1,
239}; 239};
240 240
241static struct ata_port_operations nv_ops = { 241static const struct ata_port_operations nv_ops = {
242 .port_disable = ata_port_disable, 242 .port_disable = ata_port_disable,
243 .tf_load = ata_tf_load, 243 .tf_load = ata_tf_load,
244 .tf_read = ata_tf_read, 244 .tf_read = ata_tf_read,
@@ -331,7 +331,7 @@ static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
331 return 0xffffffffU; 331 return 0xffffffffU;
332 332
333 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) 333 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
334 return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4)); 334 return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
335 else 335 else
336 return inl(ap->ioaddr.scr_addr + (sc_reg * 4)); 336 return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
337} 337}
@@ -345,7 +345,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
345 return; 345 return;
346 346
347 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) 347 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
348 writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4)); 348 writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
349 else 349 else
350 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 350 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
351} 351}
@@ -405,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
405 rc = -ENOMEM; 405 rc = -ENOMEM;
406 406
407 ppi = &nv_port_info; 407 ppi = &nv_port_info;
408 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 408 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
409 if (!probe_ent) 409 if (!probe_ent)
410 goto err_out_regions; 410 goto err_out_regions;
411 411
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 538ad727bd2e..eee93b0016df 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -87,8 +87,8 @@ static void pdc_port_stop(struct ata_port *ap);
87static void pdc_pata_phy_reset(struct ata_port *ap); 87static void pdc_pata_phy_reset(struct ata_port *ap);
88static void pdc_sata_phy_reset(struct ata_port *ap); 88static void pdc_sata_phy_reset(struct ata_port *ap);
89static void pdc_qc_prep(struct ata_queued_cmd *qc); 89static void pdc_qc_prep(struct ata_queued_cmd *qc);
90static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); 90static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
91static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); 91static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
92static void pdc_irq_clear(struct ata_port *ap); 92static void pdc_irq_clear(struct ata_port *ap);
93static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 93static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
94 94
@@ -113,7 +113,7 @@ static Scsi_Host_Template pdc_ata_sht = {
113 .ordered_flush = 1, 113 .ordered_flush = 1,
114}; 114};
115 115
116static struct ata_port_operations pdc_sata_ops = { 116static const struct ata_port_operations pdc_sata_ops = {
117 .port_disable = ata_port_disable, 117 .port_disable = ata_port_disable,
118 .tf_load = pdc_tf_load_mmio, 118 .tf_load = pdc_tf_load_mmio,
119 .tf_read = ata_tf_read, 119 .tf_read = ata_tf_read,
@@ -136,7 +136,7 @@ static struct ata_port_operations pdc_sata_ops = {
136 .host_stop = ata_pci_host_stop, 136 .host_stop = ata_pci_host_stop,
137}; 137};
138 138
139static struct ata_port_operations pdc_pata_ops = { 139static const struct ata_port_operations pdc_pata_ops = {
140 .port_disable = ata_port_disable, 140 .port_disable = ata_port_disable,
141 .tf_load = pdc_tf_load_mmio, 141 .tf_load = pdc_tf_load_mmio,
142 .tf_read = ata_tf_read, 142 .tf_read = ata_tf_read,
@@ -324,7 +324,7 @@ static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
324{ 324{
325 if (sc_reg > SCR_CONTROL) 325 if (sc_reg > SCR_CONTROL)
326 return 0xffffffffU; 326 return 0xffffffffU;
327 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 327 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
328} 328}
329 329
330 330
@@ -333,7 +333,7 @@ static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
333{ 333{
334 if (sc_reg > SCR_CONTROL) 334 if (sc_reg > SCR_CONTROL)
335 return; 335 return;
336 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 336 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
337} 337}
338 338
339static void pdc_qc_prep(struct ata_queued_cmd *qc) 339static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
438 break; 438 break;
439 439
440 default: 440 default:
441 ap->stats.idle_irq++; 441 ap->stats.idle_irq++;
442 break; 442 break;
443 } 443 }
444 444
445 return handled; 445 return handled;
446} 446}
447 447
448static void pdc_irq_clear(struct ata_port *ap) 448static void pdc_irq_clear(struct ata_port *ap)
@@ -523,8 +523,8 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
523 523
524 pp->pkt[2] = seq; 524 pp->pkt[2] = seq;
525 wmb(); /* flush PRD, pkt writes */ 525 wmb(); /* flush PRD, pkt writes */
526 writel(pp->pkt_dma, (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 526 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
527 readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 527 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
528} 528}
529 529
530static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 530static int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
@@ -546,7 +546,7 @@ static int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
546 return ata_qc_issue_prot(qc); 546 return ata_qc_issue_prot(qc);
547} 547}
548 548
549static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) 549static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
550{ 550{
551 WARN_ON (tf->protocol == ATA_PROT_DMA || 551 WARN_ON (tf->protocol == ATA_PROT_DMA ||
552 tf->protocol == ATA_PROT_NODATA); 552 tf->protocol == ATA_PROT_NODATA);
@@ -554,7 +554,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
554} 554}
555 555
556 556
557static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) 557static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
558{ 558{
559 WARN_ON (tf->protocol == ATA_PROT_DMA || 559 WARN_ON (tf->protocol == ATA_PROT_DMA ||
560 tf->protocol == ATA_PROT_NODATA); 560 tf->protocol == ATA_PROT_NODATA);
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index ffcdeb68641c..250dafa6bc36 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -51,8 +51,6 @@ enum {
51 QS_PRD_BYTES = QS_MAX_PRD * 16, 51 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES, 52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
53 53
54 QS_DMA_BOUNDARY = ~0UL,
55
56 /* global register offsets */ 54 /* global register offsets */
57 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */ 55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
58 QS_HID_HPHY = 0x0004, /* host physical interface info */ 56 QS_HID_HPHY = 0x0004, /* host physical interface info */
@@ -101,6 +99,10 @@ enum {
101 board_2068_idx = 0, /* QStor 4-port SATA/RAID */ 99 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
102}; 100};
103 101
102enum {
103 QS_DMA_BOUNDARY = ~0UL
104};
105
104typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t; 106typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
105 107
106struct qs_port_priv { 108struct qs_port_priv {
@@ -145,7 +147,7 @@ static Scsi_Host_Template qs_ata_sht = {
145 .bios_param = ata_std_bios_param, 147 .bios_param = ata_std_bios_param,
146}; 148};
147 149
148static struct ata_port_operations qs_ata_ops = { 150static const struct ata_port_operations qs_ata_ops = {
149 .port_disable = ata_port_disable, 151 .port_disable = ata_port_disable,
150 .tf_load = ata_tf_load, 152 .tf_load = ata_tf_load,
151 .tf_read = ata_tf_read, 153 .tf_read = ata_tf_read,
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index ba98a175ee3a..3a056173fb95 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -150,7 +150,7 @@ static Scsi_Host_Template sil_sht = {
150 .ordered_flush = 1, 150 .ordered_flush = 1,
151}; 151};
152 152
153static struct ata_port_operations sil_ops = { 153static const struct ata_port_operations sil_ops = {
154 .port_disable = ata_port_disable, 154 .port_disable = ata_port_disable,
155 .dev_config = sil_dev_config, 155 .dev_config = sil_dev_config,
156 .tf_load = ata_tf_load, 156 .tf_load = ata_tf_load,
@@ -289,7 +289,7 @@ static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_re
289 289
290static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) 290static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
291{ 291{
292 void *mmio = (void *) sil_scr_addr(ap, sc_reg); 292 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
293 if (mmio) 293 if (mmio)
294 return readl(mmio); 294 return readl(mmio);
295 return 0xffffffffU; 295 return 0xffffffffU;
@@ -297,7 +297,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
297 297
298static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 298static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
299{ 299{
300 void *mmio = (void *) sil_scr_addr(ap, sc_reg); 300 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
301 if (mmio) 301 if (mmio)
302 writel(val, mmio); 302 writel(val, mmio);
303} 303}
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
new file mode 100644
index 000000000000..32d730bd5bb6
--- /dev/null
+++ b/drivers/scsi/sata_sil24.c
@@ -0,0 +1,875 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * NOTE: No NCQ/ATAPI support yet. The preview driver didn't support
9 * NCQ nor ATAPI, and, unfortunately, I couldn't find out how to make
10 * those work. Enabling those shouldn't be difficult. Basic
11 * structure is all there (in libata-dev tree). If you have any
12 * information about this hardware, please contact me or linux-ide.
13 * Info is needed on...
14 *
15 * - How to issue tagged commands and turn on sactive on issue accordingly.
16 * - Where to put an ATAPI command and how to tell the device to send it.
17 * - How to enable/use 64bit.
18 *
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2, or (at your option) any
22 * later version.
23 *
24 * This program is distributed in the hope that it will be useful, but
25 * WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 * General Public License for more details.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/dma-mapping.h>
38#include <scsi/scsi_host.h>
39#include "scsi.h"
40#include <linux/libata.h>
41#include <asm/io.h>
42
43#define DRV_NAME "sata_sil24"
44#define DRV_VERSION "0.22" /* Silicon Image's preview driver was 0.10 */
45
46/*
47 * Port request block (PRB) 32 bytes
48 */
49struct sil24_prb {
50 u16 ctrl;
51 u16 prot;
52 u32 rx_cnt;
53 u8 fis[6 * 4];
54};
55
56/*
57 * Scatter gather entry (SGE) 16 bytes
58 */
59struct sil24_sge {
60 u64 addr;
61 u32 cnt;
62 u32 flags;
63};
64
65/*
66 * Port multiplier
67 */
68struct sil24_port_multiplier {
69 u32 diag;
70 u32 sactive;
71};
72
73enum {
74 /*
75 * Global controller registers (128 bytes @ BAR0)
76 */
77 /* 32 bit regs */
78 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
79 HOST_CTRL = 0x40,
80 HOST_IRQ_STAT = 0x44,
81 HOST_PHY_CFG = 0x48,
82 HOST_BIST_CTRL = 0x50,
83 HOST_BIST_PTRN = 0x54,
84 HOST_BIST_STAT = 0x58,
85 HOST_MEM_BIST_STAT = 0x5c,
86 HOST_FLASH_CMD = 0x70,
87 /* 8 bit regs */
88 HOST_FLASH_DATA = 0x74,
89 HOST_TRANSITION_DETECT = 0x75,
90 HOST_GPIO_CTRL = 0x76,
91 HOST_I2C_ADDR = 0x78, /* 32 bit */
92 HOST_I2C_DATA = 0x7c,
93 HOST_I2C_XFER_CNT = 0x7e,
94 HOST_I2C_CTRL = 0x7f,
95
96 /* HOST_SLOT_STAT bits */
97 HOST_SSTAT_ATTN = (1 << 31),
98
99 /*
100 * Port registers
101 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
102 */
103 PORT_REGS_SIZE = 0x2000,
104 PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */
105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
107 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
110 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
111 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
112 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
113 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
114 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
115 PORT_CMD_ERR = 0x1024, /* command error number */
116 PORT_FIS_CFG = 0x1028,
117 PORT_FIFO_THRES = 0x102c,
118 /* 16 bit regs */
119 PORT_DECODE_ERR_CNT = 0x1040,
120 PORT_DECODE_ERR_THRESH = 0x1042,
121 PORT_CRC_ERR_CNT = 0x1044,
122 PORT_CRC_ERR_THRESH = 0x1046,
123 PORT_HSHK_ERR_CNT = 0x1048,
124 PORT_HSHK_ERR_THRESH = 0x104a,
125 /* 32 bit regs */
126 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00,
132 PORT_SSTATUS = 0x1f04,
133 PORT_SERROR = 0x1f08,
134 PORT_SACTIVE = 0x1f0c,
135
136 /* PORT_CTRL_STAT bits */
137 PORT_CS_PORT_RST = (1 << 0), /* port reset */
138 PORT_CS_DEV_RST = (1 << 1), /* device reset */
139 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_RESUME = (1 << 6), /* port resume */
142 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
143 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
144 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
145
146 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
147 /* bits[11:0] are masked */
148 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
149 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
150 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
151 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
152 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
153 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
154 PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */
155 PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */
156
157 /* bits[27:16] are unmasked (raw) */
158 PORT_IRQ_RAW_SHIFT = 16,
159 PORT_IRQ_MASKED_MASK = 0x7ff,
160 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
161
162 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
163 PORT_IRQ_STEER_SHIFT = 30,
164 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
165
166 /* PORT_CMD_ERR constants */
167 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
168 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
169 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
170 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
171 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
172 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
173 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
174 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
175 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
176 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
177 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
178 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
179 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
180 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
181 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
182 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
183 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
184 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
185 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
186 PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */
187 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
188 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
189
190 /*
191 * Other constants
192 */
193 SGE_TRM = (1 << 31), /* Last SGE in chain */
194 PRB_SOFT_RST = (1 << 7), /* Soft reset request (ign BSY?) */
195
196 /* board id */
197 BID_SIL3124 = 0,
198 BID_SIL3132 = 1,
199 BID_SIL3131 = 2,
200
201 IRQ_STAT_4PORTS = 0xf,
202};
203
204struct sil24_cmd_block {
205 struct sil24_prb prb;
206 struct sil24_sge sge[LIBATA_MAX_PRD];
207};
208
209/*
210 * ap->private_data
211 *
212 * The preview driver always returned 0 for status. We emulate it
213 * here from the previous interrupt.
214 */
215struct sil24_port_priv {
216 struct sil24_cmd_block *cmd_block; /* 32 cmd blocks */
217 dma_addr_t cmd_block_dma; /* DMA base addr for them */
218 struct ata_taskfile tf; /* Cached taskfile registers */
219};
220
221/* ap->host_set->private_data */
222struct sil24_host_priv {
223 void *host_base; /* global controller control (128 bytes @BAR0) */
224 void *port_base; /* port registers (4 * 8192 bytes @BAR2) */
225};
226
227static u8 sil24_check_status(struct ata_port *ap);
228static u8 sil24_check_err(struct ata_port *ap);
229static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
230static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
231static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
232static void sil24_phy_reset(struct ata_port *ap);
233static void sil24_qc_prep(struct ata_queued_cmd *qc);
234static int sil24_qc_issue(struct ata_queued_cmd *qc);
235static void sil24_irq_clear(struct ata_port *ap);
236static void sil24_eng_timeout(struct ata_port *ap);
237static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
238static int sil24_port_start(struct ata_port *ap);
239static void sil24_port_stop(struct ata_port *ap);
240static void sil24_host_stop(struct ata_host_set *host_set);
241static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
242
243static struct pci_device_id sil24_pci_tbl[] = {
244 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
245 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
246 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
247 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
248 { } /* terminate list */
249};
250
251static struct pci_driver sil24_pci_driver = {
252 .name = DRV_NAME,
253 .id_table = sil24_pci_tbl,
254 .probe = sil24_init_one,
255 .remove = ata_pci_remove_one, /* safe? */
256};
257
258static Scsi_Host_Template sil24_sht = {
259 .module = THIS_MODULE,
260 .name = DRV_NAME,
261 .ioctl = ata_scsi_ioctl,
262 .queuecommand = ata_scsi_queuecmd,
263 .eh_strategy_handler = ata_scsi_error,
264 .can_queue = ATA_DEF_QUEUE,
265 .this_id = ATA_SHT_THIS_ID,
266 .sg_tablesize = LIBATA_MAX_PRD,
267 .max_sectors = ATA_MAX_SECTORS,
268 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
269 .emulated = ATA_SHT_EMULATED,
270 .use_clustering = ATA_SHT_USE_CLUSTERING,
271 .proc_name = DRV_NAME,
272 .dma_boundary = ATA_DMA_BOUNDARY,
273 .slave_configure = ata_scsi_slave_config,
274 .bios_param = ata_std_bios_param,
275 .ordered_flush = 1, /* NCQ not supported yet */
276};
277
278static const struct ata_port_operations sil24_ops = {
279 .port_disable = ata_port_disable,
280
281 .check_status = sil24_check_status,
282 .check_altstatus = sil24_check_status,
283 .check_err = sil24_check_err,
284 .dev_select = ata_noop_dev_select,
285
286 .tf_read = sil24_tf_read,
287
288 .phy_reset = sil24_phy_reset,
289
290 .qc_prep = sil24_qc_prep,
291 .qc_issue = sil24_qc_issue,
292
293 .eng_timeout = sil24_eng_timeout,
294
295 .irq_handler = sil24_interrupt,
296 .irq_clear = sil24_irq_clear,
297
298 .scr_read = sil24_scr_read,
299 .scr_write = sil24_scr_write,
300
301 .port_start = sil24_port_start,
302 .port_stop = sil24_port_stop,
303 .host_stop = sil24_host_stop,
304};
305
306/*
307 * Use bits 30-31 of host_flags to encode available port numbers.
308 * Current maxium is 4.
309 */
310#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
311#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
312
313static struct ata_port_info sil24_port_info[] = {
314 /* sil_3124 */
315 {
316 .sht = &sil24_sht,
317 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
318 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
319 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4),
320 .pio_mask = 0x1f, /* pio0-4 */
321 .mwdma_mask = 0x07, /* mwdma0-2 */
322 .udma_mask = 0x3f, /* udma0-5 */
323 .port_ops = &sil24_ops,
324 },
325 /* sil_3132 */
326 {
327 .sht = &sil24_sht,
328 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
329 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
330 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2),
331 .pio_mask = 0x1f, /* pio0-4 */
332 .mwdma_mask = 0x07, /* mwdma0-2 */
333 .udma_mask = 0x3f, /* udma0-5 */
334 .port_ops = &sil24_ops,
335 },
336 /* sil_3131/sil_3531 */
337 {
338 .sht = &sil24_sht,
339 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
340 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
341 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1),
342 .pio_mask = 0x1f, /* pio0-4 */
343 .mwdma_mask = 0x07, /* mwdma0-2 */
344 .udma_mask = 0x3f, /* udma0-5 */
345 .port_ops = &sil24_ops,
346 },
347};
348
349static inline void sil24_update_tf(struct ata_port *ap)
350{
351 struct sil24_port_priv *pp = ap->private_data;
352 void *port = (void *)ap->ioaddr.cmd_addr;
353 struct sil24_prb *prb = port;
354
355 ata_tf_from_fis(prb->fis, &pp->tf);
356}
357
358static u8 sil24_check_status(struct ata_port *ap)
359{
360 struct sil24_port_priv *pp = ap->private_data;
361 return pp->tf.command;
362}
363
364static u8 sil24_check_err(struct ata_port *ap)
365{
366 struct sil24_port_priv *pp = ap->private_data;
367 return pp->tf.feature;
368}
369
370static int sil24_scr_map[] = {
371 [SCR_CONTROL] = 0,
372 [SCR_STATUS] = 1,
373 [SCR_ERROR] = 2,
374 [SCR_ACTIVE] = 3,
375};
376
377static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
378{
379 void *scr_addr = (void *)ap->ioaddr.scr_addr;
380 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
381 void *addr;
382 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
383 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
384 }
385 return 0xffffffffU;
386}
387
388static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
389{
390 void *scr_addr = (void *)ap->ioaddr.scr_addr;
391 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
392 void *addr;
393 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
394 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
395 }
396}
397
398static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
399{
400 struct sil24_port_priv *pp = ap->private_data;
401 *tf = pp->tf;
402}
403
404static void sil24_phy_reset(struct ata_port *ap)
405{
406 __sata_phy_reset(ap);
407 /*
408 * No ATAPI yet. Just unconditionally indicate ATA device.
409 * If ATAPI device is attached, it will fail ATA_CMD_ID_ATA
410 * and libata core will ignore the device.
411 */
412 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
413 ap->device[0].class = ATA_DEV_ATA;
414}
415
416static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
417 struct sil24_cmd_block *cb)
418{
419 struct scatterlist *sg = qc->sg;
420 struct sil24_sge *sge = cb->sge;
421 unsigned i;
422
423 for (i = 0; i < qc->n_elem; i++, sg++, sge++) {
424 sge->addr = cpu_to_le64(sg_dma_address(sg));
425 sge->cnt = cpu_to_le32(sg_dma_len(sg));
426 sge->flags = 0;
427 sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM);
428 }
429}
430
431static void sil24_qc_prep(struct ata_queued_cmd *qc)
432{
433 struct ata_port *ap = qc->ap;
434 struct sil24_port_priv *pp = ap->private_data;
435 struct sil24_cmd_block *cb = pp->cmd_block + qc->tag;
436 struct sil24_prb *prb = &cb->prb;
437
438 switch (qc->tf.protocol) {
439 case ATA_PROT_PIO:
440 case ATA_PROT_DMA:
441 case ATA_PROT_NODATA:
442 break;
443 default:
444 /* ATAPI isn't supported yet */
445 BUG();
446 }
447
448 ata_tf_to_fis(&qc->tf, prb->fis, 0);
449
450 if (qc->flags & ATA_QCFLAG_DMAMAP)
451 sil24_fill_sg(qc, cb);
452}
453
454static int sil24_qc_issue(struct ata_queued_cmd *qc)
455{
456 struct ata_port *ap = qc->ap;
457 void *port = (void *)ap->ioaddr.cmd_addr;
458 struct sil24_port_priv *pp = ap->private_data;
459 dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block);
460
461 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
462 return 0;
463}
464
465static void sil24_irq_clear(struct ata_port *ap)
466{
467 /* unused */
468}
469
470static int __sil24_reset_controller(void *port)
471{
472 int cnt;
473 u32 tmp;
474
475 /* Reset controller state. Is this correct? */
476 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
477 readl(port + PORT_CTRL_STAT); /* sync */
478
479 /* Max ~100ms */
480 for (cnt = 0; cnt < 1000; cnt++) {
481 udelay(100);
482 tmp = readl(port + PORT_CTRL_STAT);
483 if (!(tmp & PORT_CS_DEV_RST))
484 break;
485 }
486
487 if (tmp & PORT_CS_DEV_RST)
488 return -1;
489 return 0;
490}
491
492static void sil24_reset_controller(struct ata_port *ap)
493{
494 printk(KERN_NOTICE DRV_NAME
495 " ata%u: resetting controller...\n", ap->id);
496 if (__sil24_reset_controller((void *)ap->ioaddr.cmd_addr))
497 printk(KERN_ERR DRV_NAME
498 " ata%u: failed to reset controller\n", ap->id);
499}
500
501static void sil24_eng_timeout(struct ata_port *ap)
502{
503 struct ata_queued_cmd *qc;
504
505 qc = ata_qc_from_tag(ap, ap->active_tag);
506 if (!qc) {
507 printk(KERN_ERR "ata%u: BUG: tiemout without command\n",
508 ap->id);
509 return;
510 }
511
512 /*
513 * hack alert! We cannot use the supplied completion
514 * function from inside the ->eh_strategy_handler() thread.
515 * libata is the only user of ->eh_strategy_handler() in
516 * any kernel, so the default scsi_done() assumes it is
517 * not being called from the SCSI EH.
518 */
519 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
520 qc->scsidone = scsi_finish_command;
521 ata_qc_complete(qc, ATA_ERR);
522
523 sil24_reset_controller(ap);
524}
525
526static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
527{
528 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
529 struct sil24_port_priv *pp = ap->private_data;
530 void *port = (void *)ap->ioaddr.cmd_addr;
531 u32 irq_stat, cmd_err, sstatus, serror;
532
533 irq_stat = readl(port + PORT_IRQ_STAT);
534 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
535
536 if (!(irq_stat & PORT_IRQ_ERROR)) {
537 /* ignore non-completion, non-error irqs for now */
538 printk(KERN_WARNING DRV_NAME
539 "ata%u: non-error exception irq (irq_stat %x)\n",
540 ap->id, irq_stat);
541 return;
542 }
543
544 cmd_err = readl(port + PORT_CMD_ERR);
545 sstatus = readl(port + PORT_SSTATUS);
546 serror = readl(port + PORT_SERROR);
547 if (serror)
548 writel(serror, port + PORT_SERROR);
549
550 printk(KERN_ERR DRV_NAME " ata%u: error interrupt on port%d\n"
551 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
552 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
553
554 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
555 /*
556 * Device is reporting error, tf registers are valid.
557 */
558 sil24_update_tf(ap);
559 } else {
560 /*
561 * Other errors. libata currently doesn't have any
562 * mechanism to report these errors. Just turn on
563 * ATA_ERR.
564 */
565 pp->tf.command = ATA_ERR;
566 }
567
568 if (qc)
569 ata_qc_complete(qc, pp->tf.command);
570
571 sil24_reset_controller(ap);
572}
573
574static inline void sil24_host_intr(struct ata_port *ap)
575{
576 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
577 void *port = (void *)ap->ioaddr.cmd_addr;
578 u32 slot_stat;
579
580 slot_stat = readl(port + PORT_SLOT_STAT);
581 if (!(slot_stat & HOST_SSTAT_ATTN)) {
582 struct sil24_port_priv *pp = ap->private_data;
583 /*
584 * !HOST_SSAT_ATTN guarantees successful completion,
585 * so reading back tf registers is unnecessary for
586 * most commands. TODO: read tf registers for
587 * commands which require these values on successful
588 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
589 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
590 */
591 sil24_update_tf(ap);
592
593 if (qc)
594 ata_qc_complete(qc, pp->tf.command);
595 } else
596 sil24_error_intr(ap, slot_stat);
597}
598
599static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
600{
601 struct ata_host_set *host_set = dev_instance;
602 struct sil24_host_priv *hpriv = host_set->private_data;
603 unsigned handled = 0;
604 u32 status;
605 int i;
606
607 status = readl(hpriv->host_base + HOST_IRQ_STAT);
608
609 if (status == 0xffffffff) {
610 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
611 "PCI fault or device removal?\n");
612 goto out;
613 }
614
615 if (!(status & IRQ_STAT_4PORTS))
616 goto out;
617
618 spin_lock(&host_set->lock);
619
620 for (i = 0; i < host_set->n_ports; i++)
621 if (status & (1 << i)) {
622 struct ata_port *ap = host_set->ports[i];
623 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
624 sil24_host_intr(host_set->ports[i]);
625 handled++;
626 } else
627 printk(KERN_ERR DRV_NAME
628 ": interrupt from disabled port %d\n", i);
629 }
630
631 spin_unlock(&host_set->lock);
632 out:
633 return IRQ_RETVAL(handled);
634}
635
636static int sil24_port_start(struct ata_port *ap)
637{
638 struct device *dev = ap->host_set->dev;
639 struct sil24_port_priv *pp;
640 struct sil24_cmd_block *cb;
641 size_t cb_size = sizeof(*cb);
642 dma_addr_t cb_dma;
643
644 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
645 if (!pp)
646 return -ENOMEM;
647 memset(pp, 0, sizeof(*pp));
648
649 pp->tf.command = ATA_DRDY;
650
651 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
652 if (!cb) {
653 kfree(pp);
654 return -ENOMEM;
655 }
656 memset(cb, 0, cb_size);
657
658 pp->cmd_block = cb;
659 pp->cmd_block_dma = cb_dma;
660
661 ap->private_data = pp;
662
663 return 0;
664}
665
666static void sil24_port_stop(struct ata_port *ap)
667{
668 struct device *dev = ap->host_set->dev;
669 struct sil24_port_priv *pp = ap->private_data;
670 size_t cb_size = sizeof(*pp->cmd_block);
671
672 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
673 kfree(pp);
674}
675
676static void sil24_host_stop(struct ata_host_set *host_set)
677{
678 struct sil24_host_priv *hpriv = host_set->private_data;
679
680 iounmap(hpriv->host_base);
681 iounmap(hpriv->port_base);
682 kfree(hpriv);
683}
684
685static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
686{
687 static int printed_version = 0;
688 unsigned int board_id = (unsigned int)ent->driver_data;
689 struct ata_port_info *pinfo = &sil24_port_info[board_id];
690 struct ata_probe_ent *probe_ent = NULL;
691 struct sil24_host_priv *hpriv = NULL;
692 void *host_base = NULL, *port_base = NULL;
693 int i, rc;
694
695 if (!printed_version++)
696 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
697
698 rc = pci_enable_device(pdev);
699 if (rc)
700 return rc;
701
702 rc = pci_request_regions(pdev, DRV_NAME);
703 if (rc)
704 goto out_disable;
705
706 rc = -ENOMEM;
707 /* ioremap mmio registers */
708 host_base = ioremap(pci_resource_start(pdev, 0),
709 pci_resource_len(pdev, 0));
710 if (!host_base)
711 goto out_free;
712 port_base = ioremap(pci_resource_start(pdev, 2),
713 pci_resource_len(pdev, 2));
714 if (!port_base)
715 goto out_free;
716
717 /* allocate & init probe_ent and hpriv */
718 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
719 if (!probe_ent)
720 goto out_free;
721
722 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
723 if (!hpriv)
724 goto out_free;
725
726 memset(probe_ent, 0, sizeof(*probe_ent));
727 probe_ent->dev = pci_dev_to_dev(pdev);
728 INIT_LIST_HEAD(&probe_ent->node);
729
730 probe_ent->sht = pinfo->sht;
731 probe_ent->host_flags = pinfo->host_flags;
732 probe_ent->pio_mask = pinfo->pio_mask;
733 probe_ent->udma_mask = pinfo->udma_mask;
734 probe_ent->port_ops = pinfo->port_ops;
735 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
736
737 probe_ent->irq = pdev->irq;
738 probe_ent->irq_flags = SA_SHIRQ;
739 probe_ent->mmio_base = port_base;
740 probe_ent->private_data = hpriv;
741
742 memset(hpriv, 0, sizeof(*hpriv));
743 hpriv->host_base = host_base;
744 hpriv->port_base = port_base;
745
746 /*
747 * Configure the device
748 */
749 /*
750 * FIXME: This device is certainly 64-bit capable. We just
751 * don't know how to use it. After fixing 32bit activation in
752 * this function, enable 64bit masks here.
753 */
754 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
755 if (rc) {
756 printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n",
757 pci_name(pdev));
758 goto out_free;
759 }
760 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
761 if (rc) {
762 printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n",
763 pci_name(pdev));
764 goto out_free;
765 }
766
767 /* GPIO off */
768 writel(0, host_base + HOST_FLASH_CMD);
769
770 /* Mask interrupts during initialization */
771 writel(0, host_base + HOST_CTRL);
772
773 for (i = 0; i < probe_ent->n_ports; i++) {
774 void *port = port_base + i * PORT_REGS_SIZE;
775 unsigned long portu = (unsigned long)port;
776 u32 tmp;
777 int cnt;
778
779 probe_ent->port[i].cmd_addr = portu + PORT_PRB;
780 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
781
782 ata_std_ports(&probe_ent->port[i]);
783
784 /* Initial PHY setting */
785 writel(0x20c, port + PORT_PHY_CFG);
786
787 /* Clear port RST */
788 tmp = readl(port + PORT_CTRL_STAT);
789 if (tmp & PORT_CS_PORT_RST) {
790 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
791 readl(port + PORT_CTRL_STAT); /* sync */
792 for (cnt = 0; cnt < 10; cnt++) {
793 msleep(10);
794 tmp = readl(port + PORT_CTRL_STAT);
795 if (!(tmp & PORT_CS_PORT_RST))
796 break;
797 }
798 if (tmp & PORT_CS_PORT_RST)
799 printk(KERN_ERR DRV_NAME
800 "(%s): failed to clear port RST\n",
801 pci_name(pdev));
802 }
803
804 /* Zero error counters. */
805 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
806 writel(0x8000, port + PORT_CRC_ERR_THRESH);
807 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
808 writel(0x0000, port + PORT_DECODE_ERR_CNT);
809 writel(0x0000, port + PORT_CRC_ERR_CNT);
810 writel(0x0000, port + PORT_HSHK_ERR_CNT);
811
812 /* FIXME: 32bit activation? */
813 writel(0, port + PORT_ACTIVATE_UPPER_ADDR);
814 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
815
816 /* Configure interrupts */
817 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
818 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
819 port + PORT_IRQ_ENABLE_SET);
820
821 /* Clear interrupts */
822 writel(0x0fff0fff, port + PORT_IRQ_STAT);
823 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
824
825 /* Clear port multiplier enable and resume bits */
826 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
827
828 /* Reset itself */
829 if (__sil24_reset_controller(port))
830 printk(KERN_ERR DRV_NAME
831 "(%s): failed to reset controller\n",
832 pci_name(pdev));
833 }
834
835 /* Turn on interrupts */
836 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
837
838 pci_set_master(pdev);
839
840 /* FIXME: check ata_device_add return value */
841 ata_device_add(probe_ent);
842
843 kfree(probe_ent);
844 return 0;
845
846 out_free:
847 if (host_base)
848 iounmap(host_base);
849 if (port_base)
850 iounmap(port_base);
851 kfree(probe_ent);
852 kfree(hpriv);
853 pci_release_regions(pdev);
854 out_disable:
855 pci_disable_device(pdev);
856 return rc;
857}
858
859static int __init sil24_init(void)
860{
861 return pci_module_init(&sil24_pci_driver);
862}
863
864static void __exit sil24_exit(void)
865{
866 pci_unregister_driver(&sil24_pci_driver);
867}
868
869MODULE_AUTHOR("Tejun Heo");
870MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
871MODULE_LICENSE("GPL");
872MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
873
874module_init(sil24_init);
875module_exit(sil24_exit);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index b227e51d12f4..057f7b98b6c4 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -102,7 +102,7 @@ static Scsi_Host_Template sis_sht = {
102 .ordered_flush = 1, 102 .ordered_flush = 1,
103}; 103};
104 104
105static struct ata_port_operations sis_ops = { 105static const struct ata_port_operations sis_ops = {
106 .port_disable = ata_port_disable, 106 .port_disable = ata_port_disable,
107 .tf_load = ata_tf_load, 107 .tf_load = ata_tf_load,
108 .tf_read = ata_tf_read, 108 .tf_read = ata_tf_read,
@@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
263 goto err_out_regions; 263 goto err_out_regions;
264 264
265 ppi = &sis_port_info; 265 ppi = &sis_port_info;
266 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 266 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
267 if (!probe_ent) { 267 if (!probe_ent) {
268 rc = -ENOMEM; 268 rc = -ENOMEM;
269 goto err_out_regions; 269 goto err_out_regions;
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d89d968bedac..e0f9570bc6dd 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -102,7 +102,7 @@ static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
102} 102}
103 103
104 104
105static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) 105static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
106{ 106{
107 struct ata_ioports *ioaddr = &ap->ioaddr; 107 struct ata_ioports *ioaddr = &ap->ioaddr;
108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -297,7 +297,7 @@ static Scsi_Host_Template k2_sata_sht = {
297}; 297};
298 298
299 299
300static struct ata_port_operations k2_sata_ops = { 300static const struct ata_port_operations k2_sata_ops = {
301 .port_disable = ata_port_disable, 301 .port_disable = ata_port_disable,
302 .tf_load = k2_sata_tf_load, 302 .tf_load = k2_sata_tf_load,
303 .tf_read = k2_sata_tf_read, 303 .tf_read = k2_sata_tf_read,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 540a85191172..af08f4f650c1 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -137,7 +137,7 @@ struct pdc_port_priv {
137}; 137};
138 138
139struct pdc_host_priv { 139struct pdc_host_priv {
140 void *dimm_mmio; 140 void __iomem *dimm_mmio;
141 141
142 unsigned int doing_hdma; 142 unsigned int doing_hdma;
143 unsigned int hdma_prod; 143 unsigned int hdma_prod;
@@ -157,8 +157,8 @@ static void pdc_20621_phy_reset (struct ata_port *ap);
157static int pdc_port_start(struct ata_port *ap); 157static int pdc_port_start(struct ata_port *ap);
158static void pdc_port_stop(struct ata_port *ap); 158static void pdc_port_stop(struct ata_port *ap);
159static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 159static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
160static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); 160static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
161static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); 161static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc20621_host_stop(struct ata_host_set *host_set); 162static void pdc20621_host_stop(struct ata_host_set *host_set);
163static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); 163static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
164static int pdc20621_detect_dimm(struct ata_probe_ent *pe); 164static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
@@ -196,7 +196,7 @@ static Scsi_Host_Template pdc_sata_sht = {
196 .ordered_flush = 1, 196 .ordered_flush = 1,
197}; 197};
198 198
199static struct ata_port_operations pdc_20621_ops = { 199static const struct ata_port_operations pdc_20621_ops = {
200 .port_disable = ata_port_disable, 200 .port_disable = ata_port_disable,
201 .tf_load = pdc_tf_load_mmio, 201 .tf_load = pdc_tf_load_mmio,
202 .tf_read = ata_tf_read, 202 .tf_read = ata_tf_read,
@@ -247,7 +247,7 @@ static void pdc20621_host_stop(struct ata_host_set *host_set)
247{ 247{
248 struct pci_dev *pdev = to_pci_dev(host_set->dev); 248 struct pci_dev *pdev = to_pci_dev(host_set->dev);
249 struct pdc_host_priv *hpriv = host_set->private_data; 249 struct pdc_host_priv *hpriv = host_set->private_data;
250 void *dimm_mmio = hpriv->dimm_mmio; 250 void __iomem *dimm_mmio = hpriv->dimm_mmio;
251 251
252 pci_iounmap(pdev, dimm_mmio); 252 pci_iounmap(pdev, dimm_mmio);
253 kfree(hpriv); 253 kfree(hpriv);
@@ -669,8 +669,8 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ 669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670 670
671 writel(port_ofs + PDC_DIMM_ATA_PKT, 671 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 672 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 673 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n", 674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT, 675 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT, 676 port_ofs + PDC_DIMM_ATA_PKT,
@@ -747,8 +747,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
747 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 747 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
748 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); 748 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
749 writel(port_ofs + PDC_DIMM_ATA_PKT, 749 writel(port_ofs + PDC_DIMM_ATA_PKT,
750 (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 750 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
751 readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 751 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 } 752 }
753 753
754 /* step two - execute ATA command */ 754 /* step two - execute ATA command */
@@ -899,7 +899,7 @@ out:
899 DPRINTK("EXIT\n"); 899 DPRINTK("EXIT\n");
900} 900}
901 901
902static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) 902static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
903{ 903{
904 WARN_ON (tf->protocol == ATA_PROT_DMA || 904 WARN_ON (tf->protocol == ATA_PROT_DMA ||
905 tf->protocol == ATA_PROT_NODATA); 905 tf->protocol == ATA_PROT_NODATA);
@@ -907,7 +907,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
907} 907}
908 908
909 909
910static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) 910static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
911{ 911{
912 WARN_ON (tf->protocol == ATA_PROT_DMA || 912 WARN_ON (tf->protocol == ATA_PROT_DMA ||
913 tf->protocol == ATA_PROT_NODATA); 913 tf->protocol == ATA_PROT_NODATA);
@@ -1014,7 +1014,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1014 idx++; 1014 idx++;
1015 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : 1015 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1016 (long) (window_size - offset); 1016 (long) (window_size - offset);
1017 memcpy_toio((char *) (dimm_mmio + offset / 4), (char *) psource, dist); 1017 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1018 writel(0x01, mmio + PDC_GENERAL_CTLR); 1018 writel(0x01, mmio + PDC_GENERAL_CTLR);
1019 readl(mmio + PDC_GENERAL_CTLR); 1019 readl(mmio + PDC_GENERAL_CTLR);
1020 1020
@@ -1023,8 +1023,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1023 for (; (long) size >= (long) window_size ;) { 1023 for (; (long) size >= (long) window_size ;) {
1024 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1024 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1025 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1025 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1026 memcpy_toio((char *) (dimm_mmio), (char *) psource, 1026 memcpy_toio(dimm_mmio, psource, window_size / 4);
1027 window_size / 4);
1028 writel(0x01, mmio + PDC_GENERAL_CTLR); 1027 writel(0x01, mmio + PDC_GENERAL_CTLR);
1029 readl(mmio + PDC_GENERAL_CTLR); 1028 readl(mmio + PDC_GENERAL_CTLR);
1030 psource += window_size; 1029 psource += window_size;
@@ -1035,7 +1034,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1035 if (size) { 1034 if (size) {
1036 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1035 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1037 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1036 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1038 memcpy_toio((char *) (dimm_mmio), (char *) psource, size / 4); 1037 memcpy_toio(dimm_mmio, psource, size / 4);
1039 writel(0x01, mmio + PDC_GENERAL_CTLR); 1038 writel(0x01, mmio + PDC_GENERAL_CTLR);
1040 readl(mmio + PDC_GENERAL_CTLR); 1039 readl(mmio + PDC_GENERAL_CTLR);
1041 } 1040 }
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 4c9fb8b71be1..d68dc7d3422c 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -90,7 +90,7 @@ static Scsi_Host_Template uli_sht = {
90 .ordered_flush = 1, 90 .ordered_flush = 1,
91}; 91};
92 92
93static struct ata_port_operations uli_ops = { 93static const struct ata_port_operations uli_ops = {
94 .port_disable = ata_port_disable, 94 .port_disable = ata_port_disable,
95 95
96 .tf_load = ata_tf_load, 96 .tf_load = ata_tf_load,
@@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
202 goto err_out_regions; 202 goto err_out_regions;
203 203
204 ppi = &uli_port_info; 204 ppi = &uli_port_info;
205 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 205 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
206 if (!probe_ent) { 206 if (!probe_ent) {
207 rc = -ENOMEM; 207 rc = -ENOMEM;
208 goto err_out_regions; 208 goto err_out_regions;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 128b996b07b7..80e291a909a9 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -109,7 +109,7 @@ static Scsi_Host_Template svia_sht = {
109 .ordered_flush = 1, 109 .ordered_flush = 1,
110}; 110};
111 111
112static struct ata_port_operations svia_sata_ops = { 112static const struct ata_port_operations svia_sata_ops = {
113 .port_disable = ata_port_disable, 113 .port_disable = ata_port_disable,
114 114
115 .tf_load = ata_tf_load, 115 .tf_load = ata_tf_load,
@@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
212 struct ata_probe_ent *probe_ent; 212 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 213 struct ata_port_info *ppi = &svia_port_info;
214 214
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 216 if (!probe_ent)
217 return NULL; 217 return NULL;
218 218
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index cf94e0158a8d..5af05fdf8544 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -86,7 +86,7 @@ static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
86{ 86{
87 if (sc_reg > SCR_CONTROL) 87 if (sc_reg > SCR_CONTROL)
88 return 0xffffffffU; 88 return 0xffffffffU;
89 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 89 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
90} 90}
91 91
92 92
@@ -95,16 +95,16 @@ static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
95{ 95{
96 if (sc_reg > SCR_CONTROL) 96 if (sc_reg > SCR_CONTROL)
97 return; 97 return;
98 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 98 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
99} 99}
100 100
101 101
102static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) 102static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
103{ 103{
104 unsigned long mask_addr; 104 void __iomem *mask_addr;
105 u8 mask; 105 u8 mask;
106 106
107 mask_addr = (unsigned long) ap->host_set->mmio_base + 107 mask_addr = ap->host_set->mmio_base +
108 VSC_SATA_INT_MASK_OFFSET + ap->port_no; 108 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
109 mask = readb(mask_addr); 109 mask = readb(mask_addr);
110 if (ctl & ATA_NIEN) 110 if (ctl & ATA_NIEN)
@@ -115,7 +115,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
115} 115}
116 116
117 117
118static void vsc_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) 118static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
119{ 119{
120 struct ata_ioports *ioaddr = &ap->ioaddr; 120 struct ata_ioports *ioaddr = &ap->ioaddr;
121 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 121 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -231,7 +231,7 @@ static Scsi_Host_Template vsc_sata_sht = {
231}; 231};
232 232
233 233
234static struct ata_port_operations vsc_sata_ops = { 234static const struct ata_port_operations vsc_sata_ops = {
235 .port_disable = ata_port_disable, 235 .port_disable = ata_port_disable,
236 .tf_load = vsc_sata_tf_load, 236 .tf_load = vsc_sata_tf_load,
237 .tf_read = vsc_sata_tf_read, 237 .tf_read = vsc_sata_tf_read,
@@ -283,7 +283,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
283 struct ata_probe_ent *probe_ent = NULL; 283 struct ata_probe_ent *probe_ent = NULL;
284 unsigned long base; 284 unsigned long base;
285 int pci_dev_busy = 0; 285 int pci_dev_busy = 0;
286 void *mmio_base; 286 void __iomem *mmio_base;
287 int rc; 287 int rc;
288 288
289 if (!printed_version++) 289 if (!printed_version++)
diff --git a/include/linux/ata.h b/include/linux/ata.h
index a5b74efab067..d2873b732bb1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -42,13 +42,18 @@ enum {
42 ATA_SECT_SIZE = 512, 42 ATA_SECT_SIZE = 512,
43 43
44 ATA_ID_WORDS = 256, 44 ATA_ID_WORDS = 256,
45 ATA_ID_PROD_OFS = 27,
46 ATA_ID_FW_REV_OFS = 23,
47 ATA_ID_SERNO_OFS = 10, 45 ATA_ID_SERNO_OFS = 10,
48 ATA_ID_MAJOR_VER = 80, 46 ATA_ID_FW_REV_OFS = 23,
49 ATA_ID_PIO_MODES = 64, 47 ATA_ID_PROD_OFS = 27,
48 ATA_ID_OLD_PIO_MODES = 51,
49 ATA_ID_FIELD_VALID = 53,
50 ATA_ID_MWDMA_MODES = 63, 50 ATA_ID_MWDMA_MODES = 63,
51 ATA_ID_PIO_MODES = 64,
52 ATA_ID_EIDE_DMA_MIN = 65,
53 ATA_ID_EIDE_PIO = 67,
54 ATA_ID_EIDE_PIO_IORDY = 68,
51 ATA_ID_UDMA_MODES = 88, 55 ATA_ID_UDMA_MODES = 88,
56 ATA_ID_MAJOR_VER = 80,
52 ATA_ID_PIO4 = (1 << 1), 57 ATA_ID_PIO4 = (1 << 1),
53 58
54 ATA_PCI_CTL_OFS = 2, 59 ATA_PCI_CTL_OFS = 2,
@@ -128,10 +133,15 @@ enum {
128 ATA_CMD_PIO_READ_EXT = 0x24, 133 ATA_CMD_PIO_READ_EXT = 0x24,
129 ATA_CMD_PIO_WRITE = 0x30, 134 ATA_CMD_PIO_WRITE = 0x30,
130 ATA_CMD_PIO_WRITE_EXT = 0x34, 135 ATA_CMD_PIO_WRITE_EXT = 0x34,
136 ATA_CMD_READ_MULTI = 0xC4,
137 ATA_CMD_READ_MULTI_EXT = 0x29,
138 ATA_CMD_WRITE_MULTI = 0xC5,
139 ATA_CMD_WRITE_MULTI_EXT = 0x39,
131 ATA_CMD_SET_FEATURES = 0xEF, 140 ATA_CMD_SET_FEATURES = 0xEF,
132 ATA_CMD_PACKET = 0xA0, 141 ATA_CMD_PACKET = 0xA0,
133 ATA_CMD_VERIFY = 0x40, 142 ATA_CMD_VERIFY = 0x40,
134 ATA_CMD_VERIFY_EXT = 0x42, 143 ATA_CMD_VERIFY_EXT = 0x42,
144 ATA_CMD_INIT_DEV_PARAMS = 0x91,
135 145
136 /* SETFEATURES stuff */ 146 /* SETFEATURES stuff */
137 SETFEATURES_XFER = 0x03, 147 SETFEATURES_XFER = 0x03,
@@ -146,14 +156,14 @@ enum {
146 XFER_MW_DMA_2 = 0x22, 156 XFER_MW_DMA_2 = 0x22,
147 XFER_MW_DMA_1 = 0x21, 157 XFER_MW_DMA_1 = 0x21,
148 XFER_MW_DMA_0 = 0x20, 158 XFER_MW_DMA_0 = 0x20,
159 XFER_SW_DMA_2 = 0x12,
160 XFER_SW_DMA_1 = 0x11,
161 XFER_SW_DMA_0 = 0x10,
149 XFER_PIO_4 = 0x0C, 162 XFER_PIO_4 = 0x0C,
150 XFER_PIO_3 = 0x0B, 163 XFER_PIO_3 = 0x0B,
151 XFER_PIO_2 = 0x0A, 164 XFER_PIO_2 = 0x0A,
152 XFER_PIO_1 = 0x09, 165 XFER_PIO_1 = 0x09,
153 XFER_PIO_0 = 0x08, 166 XFER_PIO_0 = 0x08,
154 XFER_SW_DMA_2 = 0x12,
155 XFER_SW_DMA_1 = 0x11,
156 XFER_SW_DMA_0 = 0x10,
157 XFER_PIO_SLOW = 0x00, 167 XFER_PIO_SLOW = 0x00,
158 168
159 /* ATAPI stuff */ 169 /* ATAPI stuff */
@@ -181,6 +191,7 @@ enum {
181 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ 191 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
182 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ 192 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
183 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 193 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
194 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
184}; 195};
185 196
186enum ata_tf_protocols { 197enum ata_tf_protocols {
@@ -250,7 +261,19 @@ struct ata_taskfile {
250 ((u64) (id)[(n) + 1] << 16) | \ 261 ((u64) (id)[(n) + 1] << 16) | \
251 ((u64) (id)[(n) + 0]) ) 262 ((u64) (id)[(n) + 0]) )
252 263
253static inline int atapi_cdb_len(u16 *dev_id) 264static inline int ata_id_current_chs_valid(const u16 *id)
265{
266 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
267 has not been issued to the device then the values of
268 id[54] to id[56] are vendor specific. */
269 return (id[53] & 0x01) && /* Current translation valid */
270 id[54] && /* cylinders in current translation */
271 id[55] && /* heads in current translation */
272 id[55] <= 16 &&
273 id[56]; /* sectors in current translation */
274}
275
276static inline int atapi_cdb_len(const u16 *dev_id)
254{ 277{
255 u16 tmp = dev_id[0] & 0x3; 278 u16 tmp = dev_id[0] & 0x3;
256 switch (tmp) { 279 switch (tmp) {
@@ -260,7 +283,7 @@ static inline int atapi_cdb_len(u16 *dev_id)
260 } 283 }
261} 284}
262 285
263static inline int is_atapi_taskfile(struct ata_taskfile *tf) 286static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
264{ 287{
265 return (tf->protocol == ATA_PROT_ATAPI) || 288 return (tf->protocol == ATA_PROT_ATAPI) ||
266 (tf->protocol == ATA_PROT_ATAPI_NODATA) || 289 (tf->protocol == ATA_PROT_ATAPI_NODATA) ||
diff --git a/include/linux/libata.h b/include/linux/libata.h
index ceee1fc42c60..00a8a5738858 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -91,12 +91,13 @@ enum {
91 ATA_SHT_EMULATED = 1, 91 ATA_SHT_EMULATED = 1,
92 ATA_SHT_CMD_PER_LUN = 1, 92 ATA_SHT_CMD_PER_LUN = 1,
93 ATA_SHT_THIS_ID = -1, 93 ATA_SHT_THIS_ID = -1,
94 ATA_SHT_USE_CLUSTERING = 0, 94 ATA_SHT_USE_CLUSTERING = 1,
95 95
96 /* struct ata_device stuff */ 96 /* struct ata_device stuff */
97 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 97 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */
100 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
100 101
101 ATA_DEV_UNKNOWN = 0, /* unknown device */ 102 ATA_DEV_UNKNOWN = 0, /* unknown device */
102 ATA_DEV_ATA = 1, /* ATA device */ 103 ATA_DEV_ATA = 1, /* ATA device */
@@ -154,17 +155,21 @@ enum {
154 ATA_SHIFT_UDMA = 0, 155 ATA_SHIFT_UDMA = 0,
155 ATA_SHIFT_MWDMA = 8, 156 ATA_SHIFT_MWDMA = 8,
156 ATA_SHIFT_PIO = 11, 157 ATA_SHIFT_PIO = 11,
158
159 /* Masks for port functions */
160 ATA_PORT_PRIMARY = (1 << 0),
161 ATA_PORT_SECONDARY = (1 << 1),
157}; 162};
158 163
159enum pio_task_states { 164enum hsm_task_states {
160 PIO_ST_UNKNOWN, 165 HSM_ST_UNKNOWN,
161 PIO_ST_IDLE, 166 HSM_ST_IDLE,
162 PIO_ST_POLL, 167 HSM_ST_POLL,
163 PIO_ST_TMOUT, 168 HSM_ST_TMOUT,
164 PIO_ST, 169 HSM_ST,
165 PIO_ST_LAST, 170 HSM_ST_LAST,
166 PIO_ST_LAST_POLL, 171 HSM_ST_LAST_POLL,
167 PIO_ST_ERR, 172 HSM_ST_ERR,
168}; 173};
169 174
170/* forward declarations */ 175/* forward declarations */
@@ -197,7 +202,7 @@ struct ata_ioports {
197struct ata_probe_ent { 202struct ata_probe_ent {
198 struct list_head node; 203 struct list_head node;
199 struct device *dev; 204 struct device *dev;
200 struct ata_port_operations *port_ops; 205 const struct ata_port_operations *port_ops;
201 Scsi_Host_Template *sht; 206 Scsi_Host_Template *sht;
202 struct ata_ioports port[ATA_MAX_PORTS]; 207 struct ata_ioports port[ATA_MAX_PORTS];
203 unsigned int n_ports; 208 unsigned int n_ports;
@@ -220,7 +225,7 @@ struct ata_host_set {
220 void __iomem *mmio_base; 225 void __iomem *mmio_base;
221 unsigned int n_ports; 226 unsigned int n_ports;
222 void *private_data; 227 void *private_data;
223 struct ata_port_operations *ops; 228 const struct ata_port_operations *ops;
224 struct ata_port * ports[0]; 229 struct ata_port * ports[0];
225}; 230};
226 231
@@ -278,15 +283,18 @@ struct ata_device {
278 u8 xfer_mode; 283 u8 xfer_mode;
279 unsigned int xfer_shift; /* ATA_SHIFT_xxx */ 284 unsigned int xfer_shift; /* ATA_SHIFT_xxx */
280 285
281 /* cache info about current transfer mode */ 286 unsigned int multi_count; /* sectors count for
282 u8 xfer_protocol; /* taskfile xfer protocol */ 287 READ/WRITE MULTIPLE */
283 u8 read_cmd; /* opcode to use on read */ 288
284 u8 write_cmd; /* opcode to use on write */ 289 /* for CHS addressing */
290 u16 cylinders; /* Number of cylinders */
291 u16 heads; /* Number of heads */
292 u16 sectors; /* Number of sectors per track */
285}; 293};
286 294
287struct ata_port { 295struct ata_port {
288 struct Scsi_Host *host; /* our co-allocated scsi host */ 296 struct Scsi_Host *host; /* our co-allocated scsi host */
289 struct ata_port_operations *ops; 297 const struct ata_port_operations *ops;
290 unsigned long flags; /* ATA_FLAG_xxx */ 298 unsigned long flags; /* ATA_FLAG_xxx */
291 unsigned int id; /* unique id req'd by scsi midlyr */ 299 unsigned int id; /* unique id req'd by scsi midlyr */
292 unsigned int port_no; /* unique port #; from zero */ 300 unsigned int port_no; /* unique port #; from zero */
@@ -319,7 +327,7 @@ struct ata_port {
319 struct work_struct packet_task; 327 struct work_struct packet_task;
320 328
321 struct work_struct pio_task; 329 struct work_struct pio_task;
322 unsigned int pio_task_state; 330 unsigned int hsm_task_state;
323 unsigned long pio_task_timeout; 331 unsigned long pio_task_timeout;
324 332
325 void *private_data; 333 void *private_data;
@@ -333,10 +341,10 @@ struct ata_port_operations {
333 void (*set_piomode) (struct ata_port *, struct ata_device *); 341 void (*set_piomode) (struct ata_port *, struct ata_device *);
334 void (*set_dmamode) (struct ata_port *, struct ata_device *); 342 void (*set_dmamode) (struct ata_port *, struct ata_device *);
335 343
336 void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf); 344 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
337 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); 345 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
338 346
339 void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf); 347 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
340 u8 (*check_status)(struct ata_port *ap); 348 u8 (*check_status)(struct ata_port *ap);
341 u8 (*check_altstatus)(struct ata_port *ap); 349 u8 (*check_altstatus)(struct ata_port *ap);
342 u8 (*check_err)(struct ata_port *ap); 350 u8 (*check_err)(struct ata_port *ap);
@@ -377,9 +385,22 @@ struct ata_port_info {
377 unsigned long pio_mask; 385 unsigned long pio_mask;
378 unsigned long mwdma_mask; 386 unsigned long mwdma_mask;
379 unsigned long udma_mask; 387 unsigned long udma_mask;
380 struct ata_port_operations *port_ops; 388 const struct ata_port_operations *port_ops;
389};
390
391struct ata_timing {
392 unsigned short mode; /* ATA mode */
393 unsigned short setup; /* t1 */
394 unsigned short act8b; /* t2 for 8-bit I/O */
395 unsigned short rec8b; /* t2i for 8-bit I/O */
396 unsigned short cyc8b; /* t0 for 8-bit I/O */
397 unsigned short active; /* t2 or tD */
398 unsigned short recover; /* t2i or tK */
399 unsigned short cycle; /* t0 */
400 unsigned short udma; /* t2CYCTYP/2 */
381}; 401};
382 402
403#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
383 404
384extern void ata_port_probe(struct ata_port *); 405extern void ata_port_probe(struct ata_port *);
385extern void __sata_phy_reset(struct ata_port *ap); 406extern void __sata_phy_reset(struct ata_port *ap);
@@ -392,7 +413,7 @@ extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_i
392 unsigned int n_ports); 413 unsigned int n_ports);
393extern void ata_pci_remove_one (struct pci_dev *pdev); 414extern void ata_pci_remove_one (struct pci_dev *pdev);
394#endif /* CONFIG_PCI */ 415#endif /* CONFIG_PCI */
395extern int ata_device_add(struct ata_probe_ent *ent); 416extern int ata_device_add(const struct ata_probe_ent *ent);
396extern void ata_host_set_remove(struct ata_host_set *host_set); 417extern void ata_host_set_remove(struct ata_host_set *host_set);
397extern int ata_scsi_detect(Scsi_Host_Template *sht); 418extern int ata_scsi_detect(Scsi_Host_Template *sht);
398extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 419extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
@@ -400,19 +421,21 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn
400extern int ata_scsi_error(struct Scsi_Host *host); 421extern int ata_scsi_error(struct Scsi_Host *host);
401extern int ata_scsi_release(struct Scsi_Host *host); 422extern int ata_scsi_release(struct Scsi_Host *host);
402extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 423extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
424extern int ata_ratelimit(void);
425
403/* 426/*
404 * Default driver ops implementations 427 * Default driver ops implementations
405 */ 428 */
406extern void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf); 429extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
407extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 430extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
408extern void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp); 431extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
409extern void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf); 432extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
410extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); 433extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
411extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 434extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
412extern u8 ata_check_status(struct ata_port *ap); 435extern u8 ata_check_status(struct ata_port *ap);
413extern u8 ata_altstatus(struct ata_port *ap); 436extern u8 ata_altstatus(struct ata_port *ap);
414extern u8 ata_chk_err(struct ata_port *ap); 437extern u8 ata_chk_err(struct ata_port *ap);
415extern void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf); 438extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
416extern int ata_port_start (struct ata_port *ap); 439extern int ata_port_start (struct ata_port *ap);
417extern void ata_port_stop (struct ata_port *ap); 440extern void ata_port_stop (struct ata_port *ap);
418extern void ata_host_stop (struct ata_host_set *host_set); 441extern void ata_host_stop (struct ata_host_set *host_set);
@@ -423,8 +446,8 @@ extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
423 unsigned int buflen); 446 unsigned int buflen);
424extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 447extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
425 unsigned int n_elem); 448 unsigned int n_elem);
426extern unsigned int ata_dev_classify(struct ata_taskfile *tf); 449extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
427extern void ata_dev_id_string(u16 *id, unsigned char *s, 450extern void ata_dev_id_string(const u16 *id, unsigned char *s,
428 unsigned int ofs, unsigned int len); 451 unsigned int ofs, unsigned int len);
429extern void ata_dev_config(struct ata_port *ap, unsigned int i); 452extern void ata_dev_config(struct ata_port *ap, unsigned int i);
430extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 453extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
@@ -441,6 +464,32 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
441 sector_t capacity, int geom[]); 464 sector_t capacity, int geom[]);
442extern int ata_scsi_slave_config(struct scsi_device *sdev); 465extern int ata_scsi_slave_config(struct scsi_device *sdev);
443 466
467/*
468 * Timing helpers
469 */
470extern int ata_timing_compute(struct ata_device *, unsigned short,
471 struct ata_timing *, int, int);
472extern void ata_timing_merge(const struct ata_timing *,
473 const struct ata_timing *, struct ata_timing *,
474 unsigned int);
475
476enum {
477 ATA_TIMING_SETUP = (1 << 0),
478 ATA_TIMING_ACT8B = (1 << 1),
479 ATA_TIMING_REC8B = (1 << 2),
480 ATA_TIMING_CYC8B = (1 << 3),
481 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
482 ATA_TIMING_CYC8B,
483 ATA_TIMING_ACTIVE = (1 << 4),
484 ATA_TIMING_RECOVER = (1 << 5),
485 ATA_TIMING_CYCLE = (1 << 6),
486 ATA_TIMING_UDMA = (1 << 7),
487 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
488 ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
489 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
490 ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
491};
492
444 493
445#ifdef CONFIG_PCI 494#ifdef CONFIG_PCI
446struct pci_bits { 495struct pci_bits {
@@ -452,8 +501,8 @@ struct pci_bits {
452 501
453extern void ata_pci_host_stop (struct ata_host_set *host_set); 502extern void ata_pci_host_stop (struct ata_host_set *host_set);
454extern struct ata_probe_ent * 503extern struct ata_probe_ent *
455ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port); 504ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
456extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); 505extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
457 506
458#endif /* CONFIG_PCI */ 507#endif /* CONFIG_PCI */
459 508
@@ -463,7 +512,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
463 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 512 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
464} 513}
465 514
466static inline unsigned int ata_dev_present(struct ata_device *dev) 515static inline unsigned int ata_dev_present(const struct ata_device *dev)
467{ 516{
468 return ((dev->class == ATA_DEV_ATA) || 517 return ((dev->class == ATA_DEV_ATA) ||
469 (dev->class == ATA_DEV_ATAPI)); 518 (dev->class == ATA_DEV_ATAPI));
@@ -662,7 +711,7 @@ static inline unsigned int sata_dev_present(struct ata_port *ap)
662 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0; 711 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
663} 712}
664 713
665static inline int ata_try_flush_cache(struct ata_device *dev) 714static inline int ata_try_flush_cache(const struct ata_device *dev)
666{ 715{
667 return ata_id_wcache_enabled(dev->id) || 716 return ata_id_wcache_enabled(dev->id) ||
668 ata_id_has_flush(dev->id) || 717 ata_id_has_flush(dev->id) ||