aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/libata.tmpl1072
-rw-r--r--drivers/scsi/ahci.c31
-rw-r--r--drivers/scsi/libata-core.c433
-rw-r--r--drivers/scsi/libata-scsi.c491
-rw-r--r--drivers/scsi/libata.h4
-rw-r--r--drivers/scsi/sata_mv.c1142
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c6
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--include/linux/ata.h20
-rw-r--r--include/linux/libata.h34
13 files changed, 2720 insertions, 521 deletions
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index 375ae760dc1e..d260d92089ad 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -415,6 +415,362 @@ and other resources, etc.
415 </sect1> 415 </sect1>
416 </chapter> 416 </chapter>
417 417
418 <chapter id="libataEH">
419 <title>Error handling</title>
420
421 <para>
422 This chapter describes how errors are handled under libata.
423 Readers are advised to read SCSI EH
424 (Documentation/scsi/scsi_eh.txt) and ATA exceptions doc first.
425 </para>
426
427 <sect1><title>Origins of commands</title>
428 <para>
429 In libata, a command is represented with struct ata_queued_cmd
430 or qc. qc's are preallocated during port initialization and
431 repetitively used for command executions. Currently only one
432 qc is allocated per port but yet-to-be-merged NCQ branch
433 allocates one for each tag and maps each qc to NCQ tag 1-to-1.
434 </para>
435 <para>
436 libata commands can originate from two sources - libata itself
437 and SCSI midlayer. libata internal commands are used for
438 initialization and error handling. All normal blk requests
439 and commands for SCSI emulation are passed as SCSI commands
440 through queuecommand callback of SCSI host template.
441 </para>
442 </sect1>
443
444 <sect1><title>How commands are issued</title>
445
446 <variablelist>
447
448 <varlistentry><term>Internal commands</term>
449 <listitem>
450 <para>
451 First, qc is allocated and initialized using
452 ata_qc_new_init(). Although ata_qc_new_init() doesn't
453 implement any wait or retry mechanism when qc is not
454 available, internal commands are currently issued only during
455 initialization and error recovery, so no other command is
456 active and allocation is guaranteed to succeed.
457 </para>
458 <para>
459 Once allocated qc's taskfile is initialized for the command to
460 be executed. qc currently has two mechanisms to notify
461 completion. One is via qc->complete_fn() callback and the
462 other is completion qc->waiting. qc->complete_fn() callback
463 is the asynchronous path used by normal SCSI translated
464 commands and qc->waiting is the synchronous (issuer sleeps in
465 process context) path used by internal commands.
466 </para>
467 <para>
468 Once initialization is complete, host_set lock is acquired
469 and the qc is issued.
470 </para>
471 </listitem>
472 </varlistentry>
473
474 <varlistentry><term>SCSI commands</term>
475 <listitem>
476 <para>
477 All libata drivers use ata_scsi_queuecmd() as
478 hostt->queuecommand callback. scmds can either be simulated
479 or translated. No qc is involved in processing a simulated
480 scmd. The result is computed right away and the scmd is
481 completed.
482 </para>
483 <para>
484 For a translated scmd, ata_qc_new_init() is invoked to
485 allocate a qc and the scmd is translated into the qc. SCSI
486 midlayer's completion notification function pointer is stored
487 into qc->scsidone.
488 </para>
489 <para>
490 qc->complete_fn() callback is used for completion
491 notification. ATA commands use ata_scsi_qc_complete() while
492 ATAPI commands use atapi_qc_complete(). Both functions end up
493 calling qc->scsidone to notify upper layer when the qc is
494 finished. After translation is completed, the qc is issued
495 with ata_qc_issue().
496 </para>
497 <para>
498 Note that SCSI midlayer invokes hostt->queuecommand while
499 holding host_set lock, so all above occur while holding
500 host_set lock.
501 </para>
502 </listitem>
503 </varlistentry>
504
505 </variablelist>
506 </sect1>
507
508 <sect1><title>How commands are processed</title>
509 <para>
510 Depending on which protocol and which controller are used,
511 commands are processed differently. For the purpose of
512 discussion, a controller which uses taskfile interface and all
513 standard callbacks is assumed.
514 </para>
515 <para>
516 Currently 6 ATA command protocols are used. They can be
517 sorted into the following four categories according to how
518 they are processed.
519 </para>
520
521 <variablelist>
522 <varlistentry><term>ATA NO DATA or DMA</term>
523 <listitem>
524 <para>
525 ATA_PROT_NODATA and ATA_PROT_DMA fall into this category.
526 These types of commands don't require any software
527 intervention once issued. Device will raise interrupt on
528 completion.
529 </para>
530 </listitem>
531 </varlistentry>
532
533 <varlistentry><term>ATA PIO</term>
534 <listitem>
535 <para>
536 ATA_PROT_PIO is in this category. libata currently
537 implements PIO with polling. ATA_NIEN bit is set to turn
538 off interrupt and pio_task on ata_wq performs polling and
539 IO.
540 </para>
541 </listitem>
542 </varlistentry>
543
544 <varlistentry><term>ATAPI NODATA or DMA</term>
545 <listitem>
546 <para>
547 ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this
548 category. packet_task is used to poll BSY bit after
549 issuing PACKET command. Once BSY is turned off by the
550 device, packet_task transfers CDB and hands off processing
551 to interrupt handler.
552 </para>
553 </listitem>
554 </varlistentry>
555
556 <varlistentry><term>ATAPI PIO</term>
557 <listitem>
558 <para>
559 ATA_PROT_ATAPI is in this category. ATA_NIEN bit is set
560 and, as in ATAPI NODATA or DMA, packet_task submits cdb.
561 However, after submitting cdb, further processing (data
562 transfer) is handed off to pio_task.
563 </para>
564 </listitem>
565 </varlistentry>
566 </variablelist>
567 </sect1>
568
569 <sect1><title>How commands are completed</title>
570 <para>
571 Once issued, all qc's are either completed with
572 ata_qc_complete() or time out. For commands which are handled
573 by interrupts, ata_host_intr() invokes ata_qc_complete(), and,
574 for PIO tasks, pio_task invokes ata_qc_complete(). In error
575 cases, packet_task may also complete commands.
576 </para>
577 <para>
578 ata_qc_complete() does the following.
579 </para>
580
581 <orderedlist>
582
583 <listitem>
584 <para>
585 DMA memory is unmapped.
586 </para>
587 </listitem>
588
589 <listitem>
590 <para>
591 ATA_QCFLAG_ACTIVE is clared from qc->flags.
592 </para>
593 </listitem>
594
595 <listitem>
596 <para>
597 qc->complete_fn() callback is invoked. If the return value of
598 the callback is not zero. Completion is short circuited and
599 ata_qc_complete() returns.
600 </para>
601 </listitem>
602
603 <listitem>
604 <para>
605 __ata_qc_complete() is called, which does
606 <orderedlist>
607
608 <listitem>
609 <para>
610 qc->flags is cleared to zero.
611 </para>
612 </listitem>
613
614 <listitem>
615 <para>
616 ap->active_tag and qc->tag are poisoned.
617 </para>
618 </listitem>
619
620 <listitem>
621 <para>
622 qc->waiting is claread &amp; completed (in that order).
623 </para>
624 </listitem>
625
626 <listitem>
627 <para>
628 qc is deallocated by clearing appropriate bit in ap->qactive.
629 </para>
630 </listitem>
631
632 </orderedlist>
633 </para>
634 </listitem>
635
636 </orderedlist>
637
638 <para>
639 So, it basically notifies upper layer and deallocates qc. One
640 exception is short-circuit path in #3 which is used by
641 atapi_qc_complete().
642 </para>
643 <para>
644 For all non-ATAPI commands, whether it fails or not, almost
645 the same code path is taken and very little error handling
646 takes place. A qc is completed with success status if it
647 succeeded, with failed status otherwise.
648 </para>
649 <para>
650 However, failed ATAPI commands require more handling as
651 REQUEST SENSE is needed to acquire sense data. If an ATAPI
652 command fails, ata_qc_complete() is invoked with error status,
653 which in turn invokes atapi_qc_complete() via
654 qc->complete_fn() callback.
655 </para>
656 <para>
657 This makes atapi_qc_complete() set scmd->result to
658 SAM_STAT_CHECK_CONDITION, complete the scmd and return 1. As
659 the sense data is empty but scmd->result is CHECK CONDITION,
660 SCSI midlayer will invoke EH for the scmd, and returning 1
661 makes ata_qc_complete() to return without deallocating the qc.
662 This leads us to ata_scsi_error() with partially completed qc.
663 </para>
664
665 </sect1>
666
667 <sect1><title>ata_scsi_error()</title>
668 <para>
669 ata_scsi_error() is the current hostt->eh_strategy_handler()
670 for libata. As discussed above, this will be entered in two
671 cases - timeout and ATAPI error completion. This function
672 calls low level libata driver's eng_timeout() callback, the
673 standard callback for which is ata_eng_timeout(). It checks
674 if a qc is active and calls ata_qc_timeout() on the qc if so.
675 Actual error handling occurs in ata_qc_timeout().
676 </para>
677 <para>
678 If EH is invoked for timeout, ata_qc_timeout() stops BMDMA and
679 completes the qc. Note that as we're currently in EH, we
680 cannot call scsi_done. As described in SCSI EH doc, a
681 recovered scmd should be either retried with
682 scsi_queue_insert() or finished with scsi_finish_command().
683 Here, we override qc->scsidone with scsi_finish_command() and
684 calls ata_qc_complete().
685 </para>
686 <para>
687 If EH is invoked due to a failed ATAPI qc, the qc here is
688 completed but not deallocated. The purpose of this
689 half-completion is to use the qc as place holder to make EH
690 code reach this place. This is a bit hackish, but it works.
691 </para>
692 <para>
693 Once control reaches here, the qc is deallocated by invoking
694 __ata_qc_complete() explicitly. Then, internal qc for REQUEST
695 SENSE is issued. Once sense data is acquired, scmd is
696 finished by directly invoking scsi_finish_command() on the
697 scmd. Note that as we already have completed and deallocated
698 the qc which was associated with the scmd, we don't need
699 to/cannot call ata_qc_complete() again.
700 </para>
701
702 </sect1>
703
704 <sect1><title>Problems with the current EH</title>
705
706 <itemizedlist>
707
708 <listitem>
709 <para>
710 Error representation is too crude. Currently any and all
711 error conditions are represented with ATA STATUS and ERROR
712 registers. Errors which aren't ATA device errors are treated
713 as ATA device errors by setting ATA_ERR bit. Better error
714 descriptor which can properly represent ATA and other
715 errors/exceptions is needed.
716 </para>
717 </listitem>
718
719 <listitem>
720 <para>
721 When handling timeouts, no action is taken to make device
722 forget about the timed out command and ready for new commands.
723 </para>
724 </listitem>
725
726 <listitem>
727 <para>
728 EH handling via ata_scsi_error() is not properly protected
729 from usual command processing. On EH entrance, the device is
730 not in quiescent state. Timed out commands may succeed or
731 fail any time. pio_task and atapi_task may still be running.
732 </para>
733 </listitem>
734
735 <listitem>
736 <para>
737 Too weak error recovery. Devices / controllers causing HSM
738 mismatch errors and other errors quite often require reset to
739 return to known state. Also, advanced error handling is
740 necessary to support features like NCQ and hotplug.
741 </para>
742 </listitem>
743
744 <listitem>
745 <para>
746 ATA errors are directly handled in the interrupt handler and
747 PIO errors in pio_task. This is problematic for advanced
748 error handling for the following reasons.
749 </para>
750 <para>
751 First, advanced error handling often requires context and
752 internal qc execution.
753 </para>
754 <para>
755 Second, even a simple failure (say, CRC error) needs
756 information gathering and could trigger complex error handling
757 (say, resetting &amp; reconfiguring). Having multiple code
758 paths to gather information, enter EH and trigger actions
759 makes life painful.
760 </para>
761 <para>
762 Third, scattered EH code makes implementing low level drivers
763 difficult. Low level drivers override libata callbacks. If
764 EH is scattered over several places, each affected callbacks
765 should perform its part of error handling. This can be error
766 prone and painful.
767 </para>
768 </listitem>
769
770 </itemizedlist>
771 </sect1>
772 </chapter>
773
418 <chapter id="libataExt"> 774 <chapter id="libataExt">
419 <title>libata Library</title> 775 <title>libata Library</title>
420!Edrivers/scsi/libata-core.c 776!Edrivers/scsi/libata-core.c
@@ -431,6 +787,722 @@ and other resources, etc.
431!Idrivers/scsi/libata-scsi.c 787!Idrivers/scsi/libata-scsi.c
432 </chapter> 788 </chapter>
433 789
790 <chapter id="ataExceptions">
791 <title>ATA errors &amp; exceptions</title>
792
793 <para>
794 This chapter tries to identify what error/exception conditions exist
795 for ATA/ATAPI devices and describe how they should be handled in
796 implementation-neutral way.
797 </para>
798
799 <para>
800 The term 'error' is used to describe conditions where either an
801 explicit error condition is reported from device or a command has
802 timed out.
803 </para>
804
805 <para>
806 The term 'exception' is either used to describe exceptional
807 conditions which are not errors (say, power or hotplug events), or
808 to describe both errors and non-error exceptional conditions. Where
809 explicit distinction between error and exception is necessary, the
810 term 'non-error exception' is used.
811 </para>
812
813 <sect1 id="excat">
814 <title>Exception categories</title>
815 <para>
816 Exceptions are described primarily with respect to legacy
817 taskfile + bus master IDE interface. If a controller provides
818 other better mechanism for error reporting, mapping those into
819 categories described below shouldn't be difficult.
820 </para>
821
822 <para>
823 In the following sections, two recovery actions - reset and
824 reconfiguring transport - are mentioned. These are described
825 further in <xref linkend="exrec"/>.
826 </para>
827
828 <sect2 id="excatHSMviolation">
829 <title>HSM violation</title>
830 <para>
831 This error is indicated when STATUS value doesn't match HSM
832 requirement during issuing or excution any ATA/ATAPI command.
833 </para>
834
835 <itemizedlist>
836 <title>Examples</title>
837
838 <listitem>
839 <para>
840 ATA_STATUS doesn't contain !BSY &amp;&amp; DRDY &amp;&amp; !DRQ while trying
841 to issue a command.
842 </para>
843 </listitem>
844
845 <listitem>
846 <para>
847 !BSY &amp;&amp; !DRQ during PIO data transfer.
848 </para>
849 </listitem>
850
851 <listitem>
852 <para>
853 DRQ on command completion.
854 </para>
855 </listitem>
856
857 <listitem>
858 <para>
859 !BSY &amp;&amp; ERR after CDB tranfer starts but before the
860 last byte of CDB is transferred. ATA/ATAPI standard states
861 that &quot;The device shall not terminate the PACKET command
862 with an error before the last byte of the command packet has
863 been written&quot; in the error outputs description of PACKET
864 command and the state diagram doesn't include such
865 transitions.
866 </para>
867 </listitem>
868
869 </itemizedlist>
870
871 <para>
872 In these cases, HSM is violated and not much information
873 regarding the error can be acquired from STATUS or ERROR
874 register. IOW, this error can be anything - driver bug,
875 faulty device, controller and/or cable.
876 </para>
877
878 <para>
879 As HSM is violated, reset is necessary to restore known state.
880 Reconfiguring transport for lower speed might be helpful too
881 as transmission errors sometimes cause this kind of errors.
882 </para>
883 </sect2>
884
885 <sect2 id="excatDevErr">
886 <title>ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION)</title>
887
888 <para>
889 These are errors detected and reported by ATA/ATAPI devices
890 indicating device problems. For this type of errors, STATUS
891 and ERROR register values are valid and describe error
892 condition. Note that some of ATA bus errors are detected by
893 ATA/ATAPI devices and reported using the same mechanism as
894 device errors. Those cases are described later in this
895 section.
896 </para>
897
898 <para>
899 For ATA commands, this type of errors are indicated by !BSY
900 &amp;&amp; ERR during command execution and on completion.
901 </para>
902
903 <para>For ATAPI commands,</para>
904
905 <itemizedlist>
906
907 <listitem>
908 <para>
909 !BSY &amp;&amp; ERR &amp;&amp; ABRT right after issuing PACKET
910 indicates that PACKET command is not supported and falls in
911 this category.
912 </para>
913 </listitem>
914
915 <listitem>
916 <para>
917 !BSY &amp;&amp; ERR(==CHK) &amp;&amp; !ABRT after the last
918 byte of CDB is transferred indicates CHECK CONDITION and
919 doesn't fall in this category.
920 </para>
921 </listitem>
922
923 <listitem>
924 <para>
925 !BSY &amp;&amp; ERR(==CHK) &amp;&amp; ABRT after the last byte
926 of CDB is transferred *probably* indicates CHECK CONDITION and
927 doesn't fall in this category.
928 </para>
929 </listitem>
930
931 </itemizedlist>
932
933 <para>
934 Of errors detected as above, the followings are not ATA/ATAPI
935 device errors but ATA bus errors and should be handled
936 according to <xref linkend="excatATAbusErr"/>.
937 </para>
938
939 <variablelist>
940
941 <varlistentry>
942 <term>CRC error during data transfer</term>
943 <listitem>
944 <para>
945 This is indicated by ICRC bit in the ERROR register and
946 means that corruption occurred during data transfer. Upto
947 ATA/ATAPI-7, the standard specifies that this bit is only
948 applicable to UDMA transfers but ATA/ATAPI-8 draft revision
949 1f says that the bit may be applicable to multiword DMA and
950 PIO.
951 </para>
952 </listitem>
953 </varlistentry>
954
955 <varlistentry>
956 <term>ABRT error during data transfer or on completion</term>
957 <listitem>
958 <para>
959 Upto ATA/ATAPI-7, the standard specifies that ABRT could be
960 set on ICRC errors and on cases where a device is not able
961 to complete a command. Combined with the fact that MWDMA
962 and PIO transfer errors aren't allowed to use ICRC bit upto
963 ATA/ATAPI-7, it seems to imply that ABRT bit alone could
964 indicate tranfer errors.
965 </para>
966 <para>
967 However, ATA/ATAPI-8 draft revision 1f removes the part
968 that ICRC errors can turn on ABRT. So, this is kind of
969 gray area. Some heuristics are needed here.
970 </para>
971 </listitem>
972 </varlistentry>
973
974 </variablelist>
975
976 <para>
977 ATA/ATAPI device errors can be further categorized as follows.
978 </para>
979
980 <variablelist>
981
982 <varlistentry>
983 <term>Media errors</term>
984 <listitem>
985 <para>
986 This is indicated by UNC bit in the ERROR register. ATA
987 devices reports UNC error only after certain number of
988 retries cannot recover the data, so there's nothing much
989 else to do other than notifying upper layer.
990 </para>
991 <para>
992 READ and WRITE commands report CHS or LBA of the first
993 failed sector but ATA/ATAPI standard specifies that the
994 amount of transferred data on error completion is
995 indeterminate, so we cannot assume that sectors preceding
996 the failed sector have been transferred and thus cannot
997 complete those sectors successfully as SCSI does.
998 </para>
999 </listitem>
1000 </varlistentry>
1001
1002 <varlistentry>
1003 <term>Media changed / media change requested error</term>
1004 <listitem>
1005 <para>
1006 &lt;&lt;TODO: fill here&gt;&gt;
1007 </para>
1008 </listitem>
1009 </varlistentry>
1010
1011 <varlistentry><term>Address error</term>
1012 <listitem>
1013 <para>
1014 This is indicated by IDNF bit in the ERROR register.
1015 Report to upper layer.
1016 </para>
1017 </listitem>
1018 </varlistentry>
1019
1020 <varlistentry><term>Other errors</term>
1021 <listitem>
1022 <para>
1023 This can be invalid command or parameter indicated by ABRT
1024 ERROR bit or some other error condition. Note that ABRT
1025 bit can indicate a lot of things including ICRC and Address
1026 errors. Heuristics needed.
1027 </para>
1028 </listitem>
1029 </varlistentry>
1030
1031 </variablelist>
1032
1033 <para>
1034 Depending on commands, not all STATUS/ERROR bits are
1035 applicable. These non-applicable bits are marked with
1036 &quot;na&quot; in the output descriptions but upto ATA/ATAPI-7
1037 no definition of &quot;na&quot; can be found. However,
1038 ATA/ATAPI-8 draft revision 1f describes &quot;N/A&quot; as
1039 follows.
1040 </para>
1041
1042 <blockquote>
1043 <variablelist>
1044 <varlistentry><term>3.2.3.3a N/A</term>
1045 <listitem>
1046 <para>
1047 A keyword the indicates a field has no defined value in
1048 this standard and should not be checked by the host or
1049 device. N/A fields should be cleared to zero.
1050 </para>
1051 </listitem>
1052 </varlistentry>
1053 </variablelist>
1054 </blockquote>
1055
1056 <para>
1057 So, it seems reasonable to assume that &quot;na&quot; bits are
1058 cleared to zero by devices and thus need no explicit masking.
1059 </para>
1060
1061 </sect2>
1062
1063 <sect2 id="excatATAPIcc">
1064 <title>ATAPI device CHECK CONDITION</title>
1065
1066 <para>
1067 ATAPI device CHECK CONDITION error is indicated by set CHK bit
1068 (ERR bit) in the STATUS register after the last byte of CDB is
1069 transferred for a PACKET command. For this kind of errors,
1070 sense data should be acquired to gather information regarding
1071 the errors. REQUEST SENSE packet command should be used to
1072 acquire sense data.
1073 </para>
1074
1075 <para>
1076 Once sense data is acquired, this type of errors can be
1077 handled similary to other SCSI errors. Note that sense data
1078 may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
1079 &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such
1080 cases, the error should be considered as an ATA bus error and
1081 handled according to <xref linkend="excatATAbusErr"/>.
1082 </para>
1083
1084 </sect2>
1085
1086 <sect2 id="excatNCQerr">
1087 <title>ATA device error (NCQ)</title>
1088
1089 <para>
1090 NCQ command error is indicated by cleared BSY and set ERR bit
1091 during NCQ command phase (one or more NCQ commands
1092 outstanding). Although STATUS and ERROR registers will
1093 contain valid values describing the error, READ LOG EXT is
1094 required to clear the error condition, determine which command
1095 has failed and acquire more information.
1096 </para>
1097
1098 <para>
1099 READ LOG EXT Log Page 10h reports which tag has failed and
1100 taskfile register values describing the error. With this
1101 information the failed command can be handled as a normal ATA
1102 command error as in <xref linkend="excatDevErr"/> and all
1103 other in-flight commands must be retried. Note that this
1104 retry should not be counted - it's likely that commands
1105 retried this way would have completed normally if it were not
1106 for the failed command.
1107 </para>
1108
1109 <para>
1110 Note that ATA bus errors can be reported as ATA device NCQ
1111 errors. This should be handled as described in <xref
1112 linkend="excatATAbusErr"/>.
1113 </para>
1114
1115 <para>
1116 If READ LOG EXT Log Page 10h fails or reports NQ, we're
1117 thoroughly screwed. This condition should be treated
1118 according to <xref linkend="excatHSMviolation"/>.
1119 </para>
1120
1121 </sect2>
1122
1123 <sect2 id="excatATAbusErr">
1124 <title>ATA bus error</title>
1125
1126 <para>
1127 ATA bus error means that data corruption occurred during
1128 transmission over ATA bus (SATA or PATA). This type of errors
1129 can be indicated by
1130 </para>
1131
1132 <itemizedlist>
1133
1134 <listitem>
1135 <para>
1136 ICRC or ABRT error as described in <xref linkend="excatDevErr"/>.
1137 </para>
1138 </listitem>
1139
1140 <listitem>
1141 <para>
1142 Controller-specific error completion with error information
1143 indicating transmission error.
1144 </para>
1145 </listitem>
1146
1147 <listitem>
1148 <para>
1149 On some controllers, command timeout. In this case, there may
1150 be a mechanism to determine that the timeout is due to
1151 transmission error.
1152 </para>
1153 </listitem>
1154
1155 <listitem>
1156 <para>
1157 Unknown/random errors, timeouts and all sorts of weirdities.
1158 </para>
1159 </listitem>
1160
1161 </itemizedlist>
1162
1163 <para>
1164 As described above, transmission errors can cause wide variety
1165 of symptoms ranging from device ICRC error to random device
1166 lockup, and, for many cases, there is no way to tell if an
1167 error condition is due to transmission error or not;
1168 therefore, it's necessary to employ some kind of heuristic
1169 when dealing with errors and timeouts. For example,
1170 encountering repetitive ABRT errors for known supported
1171 command is likely to indicate ATA bus error.
1172 </para>
1173
1174 <para>
1175 Once it's determined that ATA bus errors have possibly
1176 occurred, lowering ATA bus transmission speed is one of
1177 actions which may alleviate the problem. See <xref
1178 linkend="exrecReconf"/> for more information.
1179 </para>
1180
1181 </sect2>
1182
1183 <sect2 id="excatPCIbusErr">
1184 <title>PCI bus error</title>
1185
1186 <para>
1187 Data corruption or other failures during transmission over PCI
1188 (or other system bus). For standard BMDMA, this is indicated
1189 by Error bit in the BMDMA Status register. This type of
1190 errors must be logged as it indicates something is very wrong
1191 with the system. Resetting host controller is recommended.
1192 </para>
1193
1194 </sect2>
1195
1196 <sect2 id="excatLateCompletion">
1197 <title>Late completion</title>
1198
1199 <para>
1200 This occurs when timeout occurs and the timeout handler finds
1201 out that the timed out command has completed successfully or
1202 with error. This is usually caused by lost interrupts. This
1203 type of errors must be logged. Resetting host controller is
1204 recommended.
1205 </para>
1206
1207 </sect2>
1208
1209 <sect2 id="excatUnknown">
1210 <title>Unknown error (timeout)</title>
1211
1212 <para>
1213 This is when timeout occurs and the command is still
1214 processing or the host and device are in unknown state. When
1215 this occurs, HSM could be in any valid or invalid state. To
1216 bring the device to known state and make it forget about the
1217 timed out command, resetting is necessary. The timed out
1218 command may be retried.
1219 </para>
1220
1221 <para>
1222 Timeouts can also be caused by transmission errors. Refer to
1223 <xref linkend="excatATAbusErr"/> for more details.
1224 </para>
1225
1226 </sect2>
1227
1228 <sect2 id="excatHoplugPM">
1229 <title>Hotplug and power management exceptions</title>
1230
1231 <para>
1232 &lt;&lt;TODO: fill here&gt;&gt;
1233 </para>
1234
1235 </sect2>
1236
1237 </sect1>
1238
1239 <sect1 id="exrec">
1240 <title>EH recovery actions</title>
1241
1242 <para>
1243 This section discusses several important recovery actions.
1244 </para>
1245
1246 <sect2 id="exrecClr">
1247 <title>Clearing error condition</title>
1248
1249 <para>
1250 Many controllers require its error registers to be cleared by
1251 error handler. Different controllers may have different
1252 requirements.
1253 </para>
1254
1255 <para>
1256 For SATA, it's strongly recommended to clear at least SError
1257 register during error handling.
1258 </para>
1259 </sect2>
1260
1261 <sect2 id="exrecRst">
1262 <title>Reset</title>
1263
1264 <para>
1265 During EH, resetting is necessary in the following cases.
1266 </para>
1267
1268 <itemizedlist>
1269
1270 <listitem>
1271 <para>
1272 HSM is in unknown or invalid state
1273 </para>
1274 </listitem>
1275
1276 <listitem>
1277 <para>
1278 HBA is in unknown or invalid state
1279 </para>
1280 </listitem>
1281
1282 <listitem>
1283 <para>
1284 EH needs to make HBA/device forget about in-flight commands
1285 </para>
1286 </listitem>
1287
1288 <listitem>
1289 <para>
1290 HBA/device behaves weirdly
1291 </para>
1292 </listitem>
1293
1294 </itemizedlist>
1295
1296 <para>
1297 Resetting during EH might be a good idea regardless of error
1298 condition to improve EH robustness. Whether to reset both or
1299 either one of HBA and device depends on situation but the
1300 following scheme is recommended.
1301 </para>
1302
1303 <itemizedlist>
1304
1305 <listitem>
1306 <para>
1307 When it's known that HBA is in ready state but ATA/ATAPI
1308 device in in unknown state, reset only device.
1309 </para>
1310 </listitem>
1311
1312 <listitem>
1313 <para>
1314 If HBA is in unknown state, reset both HBA and device.
1315 </para>
1316 </listitem>
1317
1318 </itemizedlist>
1319
1320 <para>
1321 HBA resetting is implementation specific. For a controller
1322 complying to taskfile/BMDMA PCI IDE, stopping active DMA
1323 transaction may be sufficient iff BMDMA state is the only HBA
1324 context. But even mostly taskfile/BMDMA PCI IDE complying
1325 controllers may have implementation specific requirements and
1326 mechanism to reset themselves. This must be addressed by
1327 specific drivers.
1328 </para>
1329
1330 <para>
1331 OTOH, ATA/ATAPI standard describes in detail ways to reset
1332 ATA/ATAPI devices.
1333 </para>
1334
1335 <variablelist>
1336
1337 <varlistentry><term>PATA hardware reset</term>
1338 <listitem>
1339 <para>
1340 This is hardware initiated device reset signalled with
1341 asserted PATA RESET- signal. There is no standard way to
1342 initiate hardware reset from software although some
1343 hardware provides registers that allow driver to directly
1344 tweak the RESET- signal.
1345 </para>
1346 </listitem>
1347 </varlistentry>
1348
1349 <varlistentry><term>Software reset</term>
1350 <listitem>
1351 <para>
1352 This is achieved by turning CONTROL SRST bit on for at
1353 least 5us. Both PATA and SATA support it but, in case of
1354 SATA, this may require controller-specific support as the
1355 second Register FIS to clear SRST should be transmitted
1356 while BSY bit is still set. Note that on PATA, this resets
1357 both master and slave devices on a channel.
1358 </para>
1359 </listitem>
1360 </varlistentry>
1361
1362 <varlistentry><term>EXECUTE DEVICE DIAGNOSTIC command</term>
1363 <listitem>
1364 <para>
1365 Although ATA/ATAPI standard doesn't describe exactly, EDD
1366 implies some level of resetting, possibly similar level
1367 with software reset. Host-side EDD protocol can be handled
1368 with normal command processing and most SATA controllers
1369 should be able to handle EDD's just like other commands.
1370 As in software reset, EDD affects both devices on a PATA
1371 bus.
1372 </para>
1373 <para>
1374 Although EDD does reset devices, this doesn't suit error
1375 handling as EDD cannot be issued while BSY is set and it's
1376 unclear how it will act when device is in unknown/weird
1377 state.
1378 </para>
1379 </listitem>
1380 </varlistentry>
1381
1382 <varlistentry><term>ATAPI DEVICE RESET command</term>
1383 <listitem>
1384 <para>
1385 This is very similar to software reset except that reset
1386 can be restricted to the selected device without affecting
1387 the other device sharing the cable.
1388 </para>
1389 </listitem>
1390 </varlistentry>
1391
1392 <varlistentry><term>SATA phy reset</term>
1393 <listitem>
1394 <para>
1395 This is the preferred way of resetting a SATA device. In
1396 effect, it's identical to PATA hardware reset. Note that
1397 this can be done with the standard SCR Control register.
1398 As such, it's usually easier to implement than software
1399 reset.
1400 </para>
1401 </listitem>
1402 </varlistentry>
1403
1404 </variablelist>
1405
1406 <para>
1407 One more thing to consider when resetting devices is that
1408 resetting clears certain configuration parameters and they
1409 need to be set to their previous or newly adjusted values
1410 after reset.
1411 </para>
1412
1413 <para>
1414 Parameters affected are.
1415 </para>
1416
1417 <itemizedlist>
1418
1419 <listitem>
1420 <para>
1421 CHS set up with INITIALIZE DEVICE PARAMETERS (seldomly used)
1422 </para>
1423 </listitem>
1424
1425 <listitem>
1426 <para>
1427 Parameters set with SET FEATURES including transfer mode setting
1428 </para>
1429 </listitem>
1430
1431 <listitem>
1432 <para>
1433 Block count set with SET MULTIPLE MODE
1434 </para>
1435 </listitem>
1436
1437 <listitem>
1438 <para>
1439 Other parameters (SET MAX, MEDIA LOCK...)
1440 </para>
1441 </listitem>
1442
1443 </itemizedlist>
1444
1445 <para>
1446 ATA/ATAPI standard specifies that some parameters must be
1447 maintained across hardware or software reset, but doesn't
1448 strictly specify all of them. Always reconfiguring needed
1449 parameters after reset is required for robustness. Note that
1450 this also applies when resuming from deep sleep (power-off).
1451 </para>
1452
1453 <para>
1454 Also, ATA/ATAPI standard requires that IDENTIFY DEVICE /
1455 IDENTIFY PACKET DEVICE is issued after any configuration
1456 parameter is updated or a hardware reset and the result used
1457 for further operation. OS driver is required to implement
1458 revalidation mechanism to support this.
1459 </para>
1460
1461 </sect2>
1462
1463 <sect2 id="exrecReconf">
1464 <title>Reconfigure transport</title>
1465
1466 <para>
1467 For both PATA and SATA, a lot of corners are cut for cheap
1468 connectors, cables or controllers and it's quite common to see
1469 high transmission error rate. This can be mitigated by
1470 lowering transmission speed.
1471 </para>
1472
1473 <para>
1474 The following is a possible scheme Jeff Garzik suggested.
1475 </para>
1476
1477 <blockquote>
1478 <para>
1479 If more than $N (3?) transmission errors happen in 15 minutes,
1480 </para>
1481 <itemizedlist>
1482 <listitem>
1483 <para>
1484 if SATA, decrease SATA PHY speed. if speed cannot be decreased,
1485 </para>
1486 </listitem>
1487 <listitem>
1488 <para>
1489 decrease UDMA xfer speed. if at UDMA0, switch to PIO4,
1490 </para>
1491 </listitem>
1492 <listitem>
1493 <para>
1494 decrease PIO xfer speed. if at PIO3, complain, but continue
1495 </para>
1496 </listitem>
1497 </itemizedlist>
1498 </blockquote>
1499
1500 </sect2>
1501
1502 </sect1>
1503
1504 </chapter>
1505
434 <chapter id="PiixInt"> 1506 <chapter id="PiixInt">
435 <title>ata_piix Internals</title> 1507 <title>ata_piix Internals</title>
436!Idrivers/scsi/ata_piix.c 1508!Idrivers/scsi/ata_piix.c
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index c2c8fa828e24..5ec866b00479 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -672,17 +672,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
672 672
673 for (i = 0; i < host_set->n_ports; i++) { 673 for (i = 0; i < host_set->n_ports; i++) {
674 struct ata_port *ap; 674 struct ata_port *ap;
675 u32 tmp;
676 675
677 VPRINTK("port %u\n", i); 676 if (!(irq_stat & (1 << i)))
677 continue;
678
678 ap = host_set->ports[i]; 679 ap = host_set->ports[i];
679 tmp = irq_stat & (1 << i); 680 if (ap) {
680 if (tmp && ap) {
681 struct ata_queued_cmd *qc; 681 struct ata_queued_cmd *qc;
682 qc = ata_qc_from_tag(ap, ap->active_tag); 682 qc = ata_qc_from_tag(ap, ap->active_tag);
683 if (ahci_host_intr(ap, qc)) 683 if (!ahci_host_intr(ap, qc))
684 irq_ack |= (1 << i); 684 if (ata_ratelimit()) {
685 struct pci_dev *pdev =
686 to_pci_dev(ap->host_set->dev);
687 printk(KERN_WARNING
688 "ahci(%s): unhandled interrupt on port %u\n",
689 pci_name(pdev), i);
690 }
691
692 VPRINTK("port %u\n", i);
693 } else {
694 VPRINTK("port %u (no irq)\n", i);
695 if (ata_ratelimit()) {
696 struct pci_dev *pdev =
697 to_pci_dev(ap->host_set->dev);
698 printk(KERN_WARNING
699 "ahci(%s): interrupt on disabled port %u\n",
700 pci_name(pdev), i);
701 }
685 } 702 }
703
704 irq_ack |= (1 << i);
686 } 705 }
687 706
688 if (irq_ack) { 707 if (irq_ack) {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index e5b01997117a..d568914c4344 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -48,6 +48,7 @@
48#include <linux/completion.h> 48#include <linux/completion.h>
49#include <linux/suspend.h> 49#include <linux/suspend.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/jiffies.h>
51#include <scsi/scsi.h> 52#include <scsi/scsi.h>
52#include "scsi.h" 53#include "scsi.h"
53#include "scsi_priv.h" 54#include "scsi_priv.h"
@@ -62,6 +63,7 @@
62static unsigned int ata_busy_sleep (struct ata_port *ap, 63static unsigned int ata_busy_sleep (struct ata_port *ap,
63 unsigned long tmout_pat, 64 unsigned long tmout_pat,
64 unsigned long tmout); 65 unsigned long tmout);
66static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
65static void ata_set_mode(struct ata_port *ap); 67static void ata_set_mode(struct ata_port *ap);
66static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 68static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
67static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); 69static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
@@ -69,7 +71,6 @@ static int fgb(u32 bitmap);
69static int ata_choose_xfer_mode(struct ata_port *ap, 71static int ata_choose_xfer_mode(struct ata_port *ap,
70 u8 *xfer_mode_out, 72 u8 *xfer_mode_out,
71 unsigned int *xfer_shift_out); 73 unsigned int *xfer_shift_out);
72static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
73static void __ata_qc_complete(struct ata_queued_cmd *qc); 74static void __ata_qc_complete(struct ata_queued_cmd *qc);
74 75
75static unsigned int ata_unique_id = 1; 76static unsigned int ata_unique_id = 1;
@@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev)
1131static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1132static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1132{ 1133{
1133 struct ata_device *dev = &ap->device[device]; 1134 struct ata_device *dev = &ap->device[device];
1134 unsigned int i; 1135 unsigned int major_version;
1135 u16 tmp; 1136 u16 tmp;
1136 unsigned long xfer_modes; 1137 unsigned long xfer_modes;
1137 u8 status; 1138 u8 status;
@@ -1229,9 +1230,9 @@ retry:
1229 * common ATA, ATAPI feature tests 1230 * common ATA, ATAPI feature tests
1230 */ 1231 */
1231 1232
1232 /* we require LBA and DMA support (bits 8 & 9 of word 49) */ 1233 /* we require DMA support (bits 8 of word 49) */
1233 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { 1234 if (!ata_id_has_dma(dev->id)) {
1234 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); 1235 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1235 goto err_out_nosup; 1236 goto err_out_nosup;
1236 } 1237 }
1237 1238
@@ -1251,32 +1252,69 @@ retry:
1251 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1252 if (!ata_id_is_ata(dev->id)) /* sanity check */
1252 goto err_out_nosup; 1253 goto err_out_nosup;
1253 1254
1255 /* get major version */
1254 tmp = dev->id[ATA_ID_MAJOR_VER]; 1256 tmp = dev->id[ATA_ID_MAJOR_VER];
1255 for (i = 14; i >= 1; i--) 1257 for (major_version = 14; major_version >= 1; major_version--)
1256 if (tmp & (1 << i)) 1258 if (tmp & (1 << major_version))
1257 break; 1259 break;
1258 1260
1259 /* we require at least ATA-3 */ 1261 /*
1260 if (i < 3) { 1262 * The exact sequence expected by certain pre-ATA4 drives is:
1261 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); 1263 * SRST RESET
1262 goto err_out_nosup; 1264 * IDENTIFY
1263 } 1265 * INITIALIZE DEVICE PARAMETERS
1266 * anything else..
1267 * Some drives were very specific about that exact sequence.
1268 */
1269 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1270 ata_dev_init_params(ap, dev);
1271
1272 if (ata_id_has_lba(dev->id)) {
1273 dev->flags |= ATA_DFLAG_LBA;
1274
1275 if (ata_id_has_lba48(dev->id)) {
1276 dev->flags |= ATA_DFLAG_LBA48;
1277 dev->n_sectors = ata_id_u64(dev->id, 100);
1278 } else {
1279 dev->n_sectors = ata_id_u32(dev->id, 60);
1280 }
1281
1282 /* print device info to dmesg */
1283 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1284 ap->id, device,
1285 major_version,
1286 ata_mode_string(xfer_modes),
1287 (unsigned long long)dev->n_sectors,
1288 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1289 } else {
1290 /* CHS */
1291
1292 /* Default translation */
1293 dev->cylinders = dev->id[1];
1294 dev->heads = dev->id[3];
1295 dev->sectors = dev->id[6];
1296 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1297
1298 if (ata_id_current_chs_valid(dev->id)) {
1299 /* Current CHS translation is valid. */
1300 dev->cylinders = dev->id[54];
1301 dev->heads = dev->id[55];
1302 dev->sectors = dev->id[56];
1303
1304 dev->n_sectors = ata_id_u32(dev->id, 57);
1305 }
1306
1307 /* print device info to dmesg */
1308 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1309 ap->id, device,
1310 major_version,
1311 ata_mode_string(xfer_modes),
1312 (unsigned long long)dev->n_sectors,
1313 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1264 1314
1265 if (ata_id_has_lba48(dev->id)) {
1266 dev->flags |= ATA_DFLAG_LBA48;
1267 dev->n_sectors = ata_id_u64(dev->id, 100);
1268 } else {
1269 dev->n_sectors = ata_id_u32(dev->id, 60);
1270 } 1315 }
1271 1316
1272 ap->host->max_cmd_len = 16; 1317 ap->host->max_cmd_len = 16;
1273
1274 /* print device info to dmesg */
1275 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1276 ap->id, device,
1277 ata_mode_string(xfer_modes),
1278 (unsigned long long)dev->n_sectors,
1279 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1280 } 1318 }
1281 1319
1282 /* ATAPI-specific feature tests */ 1320 /* ATAPI-specific feature tests */
@@ -2144,6 +2182,54 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2144} 2182}
2145 2183
2146/** 2184/**
2185 * ata_dev_init_params - Issue INIT DEV PARAMS command
2186 * @ap: Port associated with device @dev
2187 * @dev: Device to which command will be sent
2188 *
2189 * LOCKING:
2190 */
2191
2192static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2193{
2194 DECLARE_COMPLETION(wait);
2195 struct ata_queued_cmd *qc;
2196 int rc;
2197 unsigned long flags;
2198 u16 sectors = dev->id[6];
2199 u16 heads = dev->id[3];
2200
2201 /* Number of sectors per track 1-255. Number of heads 1-16 */
2202 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2203 return;
2204
2205 /* set up init dev params taskfile */
2206 DPRINTK("init dev params \n");
2207
2208 qc = ata_qc_new_init(ap, dev);
2209 BUG_ON(qc == NULL);
2210
2211 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2212 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2213 qc->tf.protocol = ATA_PROT_NODATA;
2214 qc->tf.nsect = sectors;
2215 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2216
2217 qc->waiting = &wait;
2218 qc->complete_fn = ata_qc_complete_noop;
2219
2220 spin_lock_irqsave(&ap->host_set->lock, flags);
2221 rc = ata_qc_issue(qc);
2222 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2223
2224 if (rc)
2225 ata_port_disable(ap);
2226 else
2227 wait_for_completion(&wait);
2228
2229 DPRINTK("EXIT\n");
2230}
2231
2232/**
2147 * ata_sg_clean - Unmap DMA memory associated with command 2233 * ata_sg_clean - Unmap DMA memory associated with command
2148 * @qc: Command containing DMA memory to be released 2234 * @qc: Command containing DMA memory to be released
2149 * 2235 *
@@ -2425,20 +2511,20 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2425static unsigned long ata_pio_poll(struct ata_port *ap) 2511static unsigned long ata_pio_poll(struct ata_port *ap)
2426{ 2512{
2427 u8 status; 2513 u8 status;
2428 unsigned int poll_state = PIO_ST_UNKNOWN; 2514 unsigned int poll_state = HSM_ST_UNKNOWN;
2429 unsigned int reg_state = PIO_ST_UNKNOWN; 2515 unsigned int reg_state = HSM_ST_UNKNOWN;
2430 const unsigned int tmout_state = PIO_ST_TMOUT; 2516 const unsigned int tmout_state = HSM_ST_TMOUT;
2431 2517
2432 switch (ap->pio_task_state) { 2518 switch (ap->hsm_task_state) {
2433 case PIO_ST: 2519 case HSM_ST:
2434 case PIO_ST_POLL: 2520 case HSM_ST_POLL:
2435 poll_state = PIO_ST_POLL; 2521 poll_state = HSM_ST_POLL;
2436 reg_state = PIO_ST; 2522 reg_state = HSM_ST;
2437 break; 2523 break;
2438 case PIO_ST_LAST: 2524 case HSM_ST_LAST:
2439 case PIO_ST_LAST_POLL: 2525 case HSM_ST_LAST_POLL:
2440 poll_state = PIO_ST_LAST_POLL; 2526 poll_state = HSM_ST_LAST_POLL;
2441 reg_state = PIO_ST_LAST; 2527 reg_state = HSM_ST_LAST;
2442 break; 2528 break;
2443 default: 2529 default:
2444 BUG(); 2530 BUG();
@@ -2448,14 +2534,14 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2448 status = ata_chk_status(ap); 2534 status = ata_chk_status(ap);
2449 if (status & ATA_BUSY) { 2535 if (status & ATA_BUSY) {
2450 if (time_after(jiffies, ap->pio_task_timeout)) { 2536 if (time_after(jiffies, ap->pio_task_timeout)) {
2451 ap->pio_task_state = tmout_state; 2537 ap->hsm_task_state = tmout_state;
2452 return 0; 2538 return 0;
2453 } 2539 }
2454 ap->pio_task_state = poll_state; 2540 ap->hsm_task_state = poll_state;
2455 return ATA_SHORT_PAUSE; 2541 return ATA_SHORT_PAUSE;
2456 } 2542 }
2457 2543
2458 ap->pio_task_state = reg_state; 2544 ap->hsm_task_state = reg_state;
2459 return 0; 2545 return 0;
2460} 2546}
2461 2547
@@ -2480,14 +2566,14 @@ static int ata_pio_complete (struct ata_port *ap)
2480 * we enter, BSY will be cleared in a chk-status or two. If not, 2566 * we enter, BSY will be cleared in a chk-status or two. If not,
2481 * the drive is probably seeking or something. Snooze for a couple 2567 * the drive is probably seeking or something. Snooze for a couple
2482 * msecs, then chk-status again. If still busy, fall back to 2568 * msecs, then chk-status again. If still busy, fall back to
2483 * PIO_ST_POLL state. 2569 * HSM_ST_POLL state.
2484 */ 2570 */
2485 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2571 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2486 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2572 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2487 msleep(2); 2573 msleep(2);
2488 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2574 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2489 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2575 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2490 ap->pio_task_state = PIO_ST_LAST_POLL; 2576 ap->hsm_task_state = HSM_ST_LAST_POLL;
2491 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2577 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2492 return 0; 2578 return 0;
2493 } 2579 }
@@ -2495,14 +2581,14 @@ static int ata_pio_complete (struct ata_port *ap)
2495 2581
2496 drv_stat = ata_wait_idle(ap); 2582 drv_stat = ata_wait_idle(ap);
2497 if (!ata_ok(drv_stat)) { 2583 if (!ata_ok(drv_stat)) {
2498 ap->pio_task_state = PIO_ST_ERR; 2584 ap->hsm_task_state = HSM_ST_ERR;
2499 return 0; 2585 return 0;
2500 } 2586 }
2501 2587
2502 qc = ata_qc_from_tag(ap, ap->active_tag); 2588 qc = ata_qc_from_tag(ap, ap->active_tag);
2503 assert(qc != NULL); 2589 assert(qc != NULL);
2504 2590
2505 ap->pio_task_state = PIO_ST_IDLE; 2591 ap->hsm_task_state = HSM_ST_IDLE;
2506 2592
2507 ata_poll_qc_complete(qc, drv_stat); 2593 ata_poll_qc_complete(qc, drv_stat);
2508 2594
@@ -2662,7 +2748,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2662 unsigned char *buf; 2748 unsigned char *buf;
2663 2749
2664 if (qc->cursect == (qc->nsect - 1)) 2750 if (qc->cursect == (qc->nsect - 1))
2665 ap->pio_task_state = PIO_ST_LAST; 2751 ap->hsm_task_state = HSM_ST_LAST;
2666 2752
2667 page = sg[qc->cursg].page; 2753 page = sg[qc->cursg].page;
2668 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 2754 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
@@ -2712,7 +2798,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2712 unsigned int offset, count; 2798 unsigned int offset, count;
2713 2799
2714 if (qc->curbytes + bytes >= qc->nbytes) 2800 if (qc->curbytes + bytes >= qc->nbytes)
2715 ap->pio_task_state = PIO_ST_LAST; 2801 ap->hsm_task_state = HSM_ST_LAST;
2716 2802
2717next_sg: 2803next_sg:
2718 if (unlikely(qc->cursg >= qc->n_elem)) { 2804 if (unlikely(qc->cursg >= qc->n_elem)) {
@@ -2734,7 +2820,7 @@ next_sg:
2734 for (i = 0; i < words; i++) 2820 for (i = 0; i < words; i++)
2735 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 2821 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
2736 2822
2737 ap->pio_task_state = PIO_ST_LAST; 2823 ap->hsm_task_state = HSM_ST_LAST;
2738 return; 2824 return;
2739 } 2825 }
2740 2826
@@ -2815,7 +2901,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2815err_out: 2901err_out:
2816 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 2902 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2817 ap->id, dev->devno); 2903 ap->id, dev->devno);
2818 ap->pio_task_state = PIO_ST_ERR; 2904 ap->hsm_task_state = HSM_ST_ERR;
2819} 2905}
2820 2906
2821/** 2907/**
@@ -2837,14 +2923,14 @@ static void ata_pio_block(struct ata_port *ap)
2837 * a chk-status or two. If not, the drive is probably seeking 2923 * a chk-status or two. If not, the drive is probably seeking
2838 * or something. Snooze for a couple msecs, then 2924 * or something. Snooze for a couple msecs, then
2839 * chk-status again. If still busy, fall back to 2925 * chk-status again. If still busy, fall back to
2840 * PIO_ST_POLL state. 2926 * HSM_ST_POLL state.
2841 */ 2927 */
2842 status = ata_busy_wait(ap, ATA_BUSY, 5); 2928 status = ata_busy_wait(ap, ATA_BUSY, 5);
2843 if (status & ATA_BUSY) { 2929 if (status & ATA_BUSY) {
2844 msleep(2); 2930 msleep(2);
2845 status = ata_busy_wait(ap, ATA_BUSY, 10); 2931 status = ata_busy_wait(ap, ATA_BUSY, 10);
2846 if (status & ATA_BUSY) { 2932 if (status & ATA_BUSY) {
2847 ap->pio_task_state = PIO_ST_POLL; 2933 ap->hsm_task_state = HSM_ST_POLL;
2848 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2934 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2849 return; 2935 return;
2850 } 2936 }
@@ -2856,7 +2942,7 @@ static void ata_pio_block(struct ata_port *ap)
2856 if (is_atapi_taskfile(&qc->tf)) { 2942 if (is_atapi_taskfile(&qc->tf)) {
2857 /* no more data to transfer or unsupported ATAPI command */ 2943 /* no more data to transfer or unsupported ATAPI command */
2858 if ((status & ATA_DRQ) == 0) { 2944 if ((status & ATA_DRQ) == 0) {
2859 ap->pio_task_state = PIO_ST_LAST; 2945 ap->hsm_task_state = HSM_ST_LAST;
2860 return; 2946 return;
2861 } 2947 }
2862 2948
@@ -2864,7 +2950,7 @@ static void ata_pio_block(struct ata_port *ap)
2864 } else { 2950 } else {
2865 /* handle BSY=0, DRQ=0 as error */ 2951 /* handle BSY=0, DRQ=0 as error */
2866 if ((status & ATA_DRQ) == 0) { 2952 if ((status & ATA_DRQ) == 0) {
2867 ap->pio_task_state = PIO_ST_ERR; 2953 ap->hsm_task_state = HSM_ST_ERR;
2868 return; 2954 return;
2869 } 2955 }
2870 2956
@@ -2884,7 +2970,7 @@ static void ata_pio_error(struct ata_port *ap)
2884 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", 2970 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2885 ap->id, drv_stat); 2971 ap->id, drv_stat);
2886 2972
2887 ap->pio_task_state = PIO_ST_IDLE; 2973 ap->hsm_task_state = HSM_ST_IDLE;
2888 2974
2889 ata_poll_qc_complete(qc, drv_stat | ATA_ERR); 2975 ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
2890} 2976}
@@ -2899,25 +2985,25 @@ fsm_start:
2899 timeout = 0; 2985 timeout = 0;
2900 qc_completed = 0; 2986 qc_completed = 0;
2901 2987
2902 switch (ap->pio_task_state) { 2988 switch (ap->hsm_task_state) {
2903 case PIO_ST_IDLE: 2989 case HSM_ST_IDLE:
2904 return; 2990 return;
2905 2991
2906 case PIO_ST: 2992 case HSM_ST:
2907 ata_pio_block(ap); 2993 ata_pio_block(ap);
2908 break; 2994 break;
2909 2995
2910 case PIO_ST_LAST: 2996 case HSM_ST_LAST:
2911 qc_completed = ata_pio_complete(ap); 2997 qc_completed = ata_pio_complete(ap);
2912 break; 2998 break;
2913 2999
2914 case PIO_ST_POLL: 3000 case HSM_ST_POLL:
2915 case PIO_ST_LAST_POLL: 3001 case HSM_ST_LAST_POLL:
2916 timeout = ata_pio_poll(ap); 3002 timeout = ata_pio_poll(ap);
2917 break; 3003 break;
2918 3004
2919 case PIO_ST_TMOUT: 3005 case HSM_ST_TMOUT:
2920 case PIO_ST_ERR: 3006 case HSM_ST_ERR:
2921 ata_pio_error(ap); 3007 ata_pio_error(ap);
2922 return; 3008 return;
2923 } 3009 }
@@ -2928,52 +3014,6 @@ fsm_start:
2928 goto fsm_start; 3014 goto fsm_start;
2929} 3015}
2930 3016
2931static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2932 struct scsi_cmnd *cmd)
2933{
2934 DECLARE_COMPLETION(wait);
2935 struct ata_queued_cmd *qc;
2936 unsigned long flags;
2937 int rc;
2938
2939 DPRINTK("ATAPI request sense\n");
2940
2941 qc = ata_qc_new_init(ap, dev);
2942 BUG_ON(qc == NULL);
2943
2944 /* FIXME: is this needed? */
2945 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2946
2947 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2948 qc->dma_dir = DMA_FROM_DEVICE;
2949
2950 memset(&qc->cdb, 0, ap->cdb_len);
2951 qc->cdb[0] = REQUEST_SENSE;
2952 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2953
2954 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2955 qc->tf.command = ATA_CMD_PACKET;
2956
2957 qc->tf.protocol = ATA_PROT_ATAPI;
2958 qc->tf.lbam = (8 * 1024) & 0xff;
2959 qc->tf.lbah = (8 * 1024) >> 8;
2960 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2961
2962 qc->waiting = &wait;
2963 qc->complete_fn = ata_qc_complete_noop;
2964
2965 spin_lock_irqsave(&ap->host_set->lock, flags);
2966 rc = ata_qc_issue(qc);
2967 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2968
2969 if (rc)
2970 ata_port_disable(ap);
2971 else
2972 wait_for_completion(&wait);
2973
2974 DPRINTK("EXIT\n");
2975}
2976
2977/** 3017/**
2978 * ata_qc_timeout - Handle timeout of queued command 3018 * ata_qc_timeout - Handle timeout of queued command
2979 * @qc: Command that timed out 3019 * @qc: Command that timed out
@@ -3091,14 +3131,14 @@ void ata_eng_timeout(struct ata_port *ap)
3091 DPRINTK("ENTER\n"); 3131 DPRINTK("ENTER\n");
3092 3132
3093 qc = ata_qc_from_tag(ap, ap->active_tag); 3133 qc = ata_qc_from_tag(ap, ap->active_tag);
3094 if (!qc) { 3134 if (qc)
3135 ata_qc_timeout(qc);
3136 else {
3095 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 3137 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3096 ap->id); 3138 ap->id);
3097 goto out; 3139 goto out;
3098 } 3140 }
3099 3141
3100 ata_qc_timeout(qc);
3101
3102out: 3142out:
3103 DPRINTK("EXIT\n"); 3143 DPRINTK("EXIT\n");
3104} 3144}
@@ -3156,14 +3196,18 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3156 3196
3157 ata_tf_init(ap, &qc->tf, dev->devno); 3197 ata_tf_init(ap, &qc->tf, dev->devno);
3158 3198
3159 if (dev->flags & ATA_DFLAG_LBA48) 3199 if (dev->flags & ATA_DFLAG_LBA) {
3160 qc->tf.flags |= ATA_TFLAG_LBA48; 3200 qc->tf.flags |= ATA_TFLAG_LBA;
3201
3202 if (dev->flags & ATA_DFLAG_LBA48)
3203 qc->tf.flags |= ATA_TFLAG_LBA48;
3204 }
3161 } 3205 }
3162 3206
3163 return qc; 3207 return qc;
3164} 3208}
3165 3209
3166static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) 3210int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3167{ 3211{
3168 return 0; 3212 return 0;
3169} 3213}
@@ -3360,7 +3404,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3360 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3404 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3361 ata_qc_set_polling(qc); 3405 ata_qc_set_polling(qc);
3362 ata_tf_to_host_nolock(ap, &qc->tf); 3406 ata_tf_to_host_nolock(ap, &qc->tf);
3363 ap->pio_task_state = PIO_ST; 3407 ap->hsm_task_state = HSM_ST;
3364 queue_work(ata_wq, &ap->pio_task); 3408 queue_work(ata_wq, &ap->pio_task);
3365 break; 3409 break;
3366 3410
@@ -3586,7 +3630,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
3586 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3630 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3587 host_stat = readb(mmio + ATA_DMA_STATUS); 3631 host_stat = readb(mmio + ATA_DMA_STATUS);
3588 } else 3632 } else
3589 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3633 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3590 return host_stat; 3634 return host_stat;
3591} 3635}
3592 3636
@@ -3806,7 +3850,7 @@ static void atapi_packet_task(void *_data)
3806 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 3850 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3807 3851
3808 /* PIO commands are handled by polling */ 3852 /* PIO commands are handled by polling */
3809 ap->pio_task_state = PIO_ST; 3853 ap->hsm_task_state = HSM_ST;
3810 queue_work(ata_wq, &ap->pio_task); 3854 queue_work(ata_wq, &ap->pio_task);
3811 } 3855 }
3812 3856
@@ -4113,7 +4157,7 @@ int ata_device_add(struct ata_probe_ent *ent)
4113 for (i = 0; i < count; i++) { 4157 for (i = 0; i < count; i++) {
4114 struct ata_port *ap = host_set->ports[i]; 4158 struct ata_port *ap = host_set->ports[i];
4115 4159
4116 scsi_scan_host(ap->host); 4160 ata_scsi_scan_host(ap);
4117 } 4161 }
4118 4162
4119 dev_set_drvdata(dev, host_set); 4163 dev_set_drvdata(dev, host_set);
@@ -4273,85 +4317,87 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4273 * ata_pci_init_native_mode - Initialize native-mode driver 4317 * ata_pci_init_native_mode - Initialize native-mode driver
4274 * @pdev: pci device to be initialized 4318 * @pdev: pci device to be initialized
4275 * @port: array[2] of pointers to port info structures. 4319 * @port: array[2] of pointers to port info structures.
4320 * @ports: bitmap of ports present
4276 * 4321 *
4277 * Utility function which allocates and initializes an 4322 * Utility function which allocates and initializes an
4278 * ata_probe_ent structure for a standard dual-port 4323 * ata_probe_ent structure for a standard dual-port
4279 * PIO-based IDE controller. The returned ata_probe_ent 4324 * PIO-based IDE controller. The returned ata_probe_ent
4280 * structure can be passed to ata_device_add(). The returned 4325 * structure can be passed to ata_device_add(). The returned
4281 * ata_probe_ent structure should then be freed with kfree(). 4326 * ata_probe_ent structure should then be freed with kfree().
4327 *
4328 * The caller need only pass the address of the primary port, the
4329 * secondary will be deduced automatically. If the device has non
4330 * standard secondary port mappings this function can be called twice,
4331 * once for each interface.
4282 */ 4332 */
4283 4333
4284struct ata_probe_ent * 4334struct ata_probe_ent *
4285ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) 4335ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4286{ 4336{
4287 struct ata_probe_ent *probe_ent = 4337 struct ata_probe_ent *probe_ent =
4288 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4338 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4339 int p = 0;
4340
4289 if (!probe_ent) 4341 if (!probe_ent)
4290 return NULL; 4342 return NULL;
4291 4343
4292 probe_ent->n_ports = 2;
4293 probe_ent->irq = pdev->irq; 4344 probe_ent->irq = pdev->irq;
4294 probe_ent->irq_flags = SA_SHIRQ; 4345 probe_ent->irq_flags = SA_SHIRQ;
4295 4346
4296 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); 4347 if (ports & ATA_PORT_PRIMARY) {
4297 probe_ent->port[0].altstatus_addr = 4348 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4298 probe_ent->port[0].ctl_addr = 4349 probe_ent->port[p].altstatus_addr =
4299 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 4350 probe_ent->port[p].ctl_addr =
4300 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4351 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4301 4352 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4302 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); 4353 ata_std_ports(&probe_ent->port[p]);
4303 probe_ent->port[1].altstatus_addr = 4354 p++;
4304 probe_ent->port[1].ctl_addr = 4355 }
4305 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4306 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4307 4356
4308 ata_std_ports(&probe_ent->port[0]); 4357 if (ports & ATA_PORT_SECONDARY) {
4309 ata_std_ports(&probe_ent->port[1]); 4358 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4359 probe_ent->port[p].altstatus_addr =
4360 probe_ent->port[p].ctl_addr =
4361 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4362 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4363 ata_std_ports(&probe_ent->port[p]);
4364 p++;
4365 }
4310 4366
4367 probe_ent->n_ports = p;
4311 return probe_ent; 4368 return probe_ent;
4312} 4369}
4313 4370
4314static struct ata_probe_ent * 4371static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num)
4315ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4316 struct ata_probe_ent **ppe2)
4317{ 4372{
4318 struct ata_probe_ent *probe_ent, *probe_ent2; 4373 struct ata_probe_ent *probe_ent;
4319 4374
4320 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4375 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4321 if (!probe_ent) 4376 if (!probe_ent)
4322 return NULL; 4377 return NULL;
4323 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4324 if (!probe_ent2) {
4325 kfree(probe_ent);
4326 return NULL;
4327 }
4328 4378
4329 probe_ent->n_ports = 1; 4379
4330 probe_ent->irq = 14;
4331
4332 probe_ent->hard_port_no = 0;
4333 probe_ent->legacy_mode = 1; 4380 probe_ent->legacy_mode = 1;
4334 4381 probe_ent->n_ports = 1;
4335 probe_ent2->n_ports = 1; 4382 probe_ent->hard_port_no = port_num;
4336 probe_ent2->irq = 15; 4383
4337 4384 switch(port_num)
4338 probe_ent2->hard_port_no = 1; 4385 {
4339 probe_ent2->legacy_mode = 1; 4386 case 0:
4340 4387 probe_ent->irq = 14;
4341 probe_ent->port[0].cmd_addr = 0x1f0; 4388 probe_ent->port[0].cmd_addr = 0x1f0;
4342 probe_ent->port[0].altstatus_addr = 4389 probe_ent->port[0].altstatus_addr =
4343 probe_ent->port[0].ctl_addr = 0x3f6; 4390 probe_ent->port[0].ctl_addr = 0x3f6;
4344 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4391 break;
4345 4392 case 1:
4346 probe_ent2->port[0].cmd_addr = 0x170; 4393 probe_ent->irq = 15;
4347 probe_ent2->port[0].altstatus_addr = 4394 probe_ent->port[0].cmd_addr = 0x170;
4348 probe_ent2->port[0].ctl_addr = 0x376; 4395 probe_ent->port[0].altstatus_addr =
4349 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; 4396 probe_ent->port[0].ctl_addr = 0x376;
4350 4397 break;
4398 }
4399 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4351 ata_std_ports(&probe_ent->port[0]); 4400 ata_std_ports(&probe_ent->port[0]);
4352 ata_std_ports(&probe_ent2->port[0]);
4353
4354 *ppe2 = probe_ent2;
4355 return probe_ent; 4401 return probe_ent;
4356} 4402}
4357 4403
@@ -4380,7 +4426,7 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4380int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 4426int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4381 unsigned int n_ports) 4427 unsigned int n_ports)
4382{ 4428{
4383 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; 4429 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4384 struct ata_port_info *port[2]; 4430 struct ata_port_info *port[2];
4385 u8 tmp8, mask; 4431 u8 tmp8, mask;
4386 unsigned int legacy_mode = 0; 4432 unsigned int legacy_mode = 0;
@@ -4397,7 +4443,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4397 4443
4398 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 4444 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4399 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 4445 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4400 /* TODO: support transitioning to native mode? */ 4446 /* TODO: What if one channel is in native mode ... */
4401 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 4447 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4402 mask = (1 << 2) | (1 << 0); 4448 mask = (1 << 2) | (1 << 0);
4403 if ((tmp8 & mask) != mask) 4449 if ((tmp8 & mask) != mask)
@@ -4405,11 +4451,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4405 } 4451 }
4406 4452
4407 /* FIXME... */ 4453 /* FIXME... */
4408 if ((!legacy_mode) && (n_ports > 1)) { 4454 if ((!legacy_mode) && (n_ports > 2)) {
4409 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); 4455 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4410 return -EINVAL; 4456 n_ports = 2;
4457 /* For now */
4411 } 4458 }
4412 4459
4460 /* FIXME: Really for ATA it isn't safe because the device may be
4461 multi-purpose and we want to leave it alone if it was already
4462 enabled. Secondly for shared use as Arjan says we want refcounting
4463
4464 Checking dev->is_enabled is insufficient as this is not set at
4465 boot for the primary video which is BIOS enabled
4466 */
4467
4413 rc = pci_enable_device(pdev); 4468 rc = pci_enable_device(pdev);
4414 if (rc) 4469 if (rc)
4415 return rc; 4470 return rc;
@@ -4420,6 +4475,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4420 goto err_out; 4475 goto err_out;
4421 } 4476 }
4422 4477
4478 /* FIXME: Should use platform specific mappers for legacy port ranges */
4423 if (legacy_mode) { 4479 if (legacy_mode) {
4424 if (!request_region(0x1f0, 8, "libata")) { 4480 if (!request_region(0x1f0, 8, "libata")) {
4425 struct resource *conflict, res; 4481 struct resource *conflict, res;
@@ -4464,10 +4520,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4464 goto err_out_regions; 4520 goto err_out_regions;
4465 4521
4466 if (legacy_mode) { 4522 if (legacy_mode) {
4467 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); 4523 if (legacy_mode & (1 << 0))
4468 } else 4524 probe_ent = ata_pci_init_legacy_port(pdev, port, 0);
4469 probe_ent = ata_pci_init_native_mode(pdev, port); 4525 if (legacy_mode & (1 << 1))
4470 if (!probe_ent) { 4526 probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1);
4527 } else {
4528 if (n_ports == 2)
4529 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4530 else
4531 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4532 }
4533 if (!probe_ent && !probe_ent2) {
4471 rc = -ENOMEM; 4534 rc = -ENOMEM;
4472 goto err_out_regions; 4535 goto err_out_regions;
4473 } 4536 }
@@ -4579,6 +4642,27 @@ static void __exit ata_exit(void)
4579module_init(ata_init); 4642module_init(ata_init);
4580module_exit(ata_exit); 4643module_exit(ata_exit);
4581 4644
4645static unsigned long ratelimit_time;
4646static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4647
4648int ata_ratelimit(void)
4649{
4650 int rc;
4651 unsigned long flags;
4652
4653 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4654
4655 if (time_after(jiffies, ratelimit_time)) {
4656 rc = 1;
4657 ratelimit_time = jiffies + (HZ/5);
4658 } else
4659 rc = 0;
4660
4661 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4662
4663 return rc;
4664}
4665
4582/* 4666/*
4583 * libata is essentially a library of internal helper functions for 4667 * libata is essentially a library of internal helper functions for
4584 * low-level ATA host controller drivers. As such, the API/ABI is 4668 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4620,6 +4704,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset);
4620EXPORT_SYMBOL_GPL(__sata_phy_reset); 4704EXPORT_SYMBOL_GPL(__sata_phy_reset);
4621EXPORT_SYMBOL_GPL(ata_bus_reset); 4705EXPORT_SYMBOL_GPL(ata_bus_reset);
4622EXPORT_SYMBOL_GPL(ata_port_disable); 4706EXPORT_SYMBOL_GPL(ata_port_disable);
4707EXPORT_SYMBOL_GPL(ata_ratelimit);
4623EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4708EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4624EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4709EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4625EXPORT_SYMBOL_GPL(ata_scsi_error); 4710EXPORT_SYMBOL_GPL(ata_scsi_error);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 104fd9a63e73..1c3a10fb3c44 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -225,7 +225,7 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
225 }; 225 };
226 int i = 0; 226 int i = 0;
227 227
228 cmd->result = SAM_STAT_CHECK_CONDITION; 228 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
229 229
230 /* 230 /*
231 * Is this an error we can process/parse 231 * Is this an error we can process/parse
@@ -435,10 +435,21 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
435 return 1; /* power conditions not supported */ 435 return 1; /* power conditions not supported */
436 if (scsicmd[4] & 0x1) { 436 if (scsicmd[4] & 0x1) {
437 tf->nsect = 1; /* 1 sector, lba=0 */ 437 tf->nsect = 1; /* 1 sector, lba=0 */
438 tf->lbah = 0x0; 438
439 tf->lbam = 0x0; 439 if (qc->dev->flags & ATA_DFLAG_LBA) {
440 tf->lbal = 0x0; 440 qc->tf.flags |= ATA_TFLAG_LBA;
441 tf->device |= ATA_LBA; 441
442 tf->lbah = 0x0;
443 tf->lbam = 0x0;
444 tf->lbal = 0x0;
445 tf->device |= ATA_LBA;
446 } else {
447 /* CHS */
448 tf->lbal = 0x1; /* sect */
449 tf->lbam = 0x0; /* cyl low */
450 tf->lbah = 0x0; /* cyl high */
451 }
452
442 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 453 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
443 } else { 454 } else {
444 tf->nsect = 0; /* time period value (0 implies now) */ 455 tf->nsect = 0; /* time period value (0 implies now) */
@@ -488,6 +499,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
488} 499}
489 500
490/** 501/**
502 * scsi_6_lba_len - Get LBA and transfer length
503 * @scsicmd: SCSI command to translate
504 *
505 * Calculate LBA and transfer length for 6-byte commands.
506 *
507 * RETURNS:
508 * @plba: the LBA
509 * @plen: the transfer length
510 */
511
512static void scsi_6_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
513{
514 u64 lba = 0;
515 u32 len = 0;
516
517 VPRINTK("six-byte command\n");
518
519 lba |= ((u64)scsicmd[2]) << 8;
520 lba |= ((u64)scsicmd[3]);
521
522 len |= ((u32)scsicmd[4]);
523
524 *plba = lba;
525 *plen = len;
526}
527
528/**
529 * scsi_10_lba_len - Get LBA and transfer length
530 * @scsicmd: SCSI command to translate
531 *
532 * Calculate LBA and transfer length for 10-byte commands.
533 *
534 * RETURNS:
535 * @plba: the LBA
536 * @plen: the transfer length
537 */
538
539static void scsi_10_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
540{
541 u64 lba = 0;
542 u32 len = 0;
543
544 VPRINTK("ten-byte command\n");
545
546 lba |= ((u64)scsicmd[2]) << 24;
547 lba |= ((u64)scsicmd[3]) << 16;
548 lba |= ((u64)scsicmd[4]) << 8;
549 lba |= ((u64)scsicmd[5]);
550
551 len |= ((u32)scsicmd[7]) << 8;
552 len |= ((u32)scsicmd[8]);
553
554 *plba = lba;
555 *plen = len;
556}
557
558/**
559 * scsi_16_lba_len - Get LBA and transfer length
560 * @scsicmd: SCSI command to translate
561 *
562 * Calculate LBA and transfer length for 16-byte commands.
563 *
564 * RETURNS:
565 * @plba: the LBA
566 * @plen: the transfer length
567 */
568
569static void scsi_16_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
570{
571 u64 lba = 0;
572 u32 len = 0;
573
574 VPRINTK("sixteen-byte command\n");
575
576 lba |= ((u64)scsicmd[2]) << 56;
577 lba |= ((u64)scsicmd[3]) << 48;
578 lba |= ((u64)scsicmd[4]) << 40;
579 lba |= ((u64)scsicmd[5]) << 32;
580 lba |= ((u64)scsicmd[6]) << 24;
581 lba |= ((u64)scsicmd[7]) << 16;
582 lba |= ((u64)scsicmd[8]) << 8;
583 lba |= ((u64)scsicmd[9]);
584
585 len |= ((u32)scsicmd[10]) << 24;
586 len |= ((u32)scsicmd[11]) << 16;
587 len |= ((u32)scsicmd[12]) << 8;
588 len |= ((u32)scsicmd[13]);
589
590 *plba = lba;
591 *plen = len;
592}
593
594/**
491 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 595 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
492 * @qc: Storage for translated ATA taskfile 596 * @qc: Storage for translated ATA taskfile
493 * @scsicmd: SCSI command to translate 597 * @scsicmd: SCSI command to translate
@@ -504,77 +608,86 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
504static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 608static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
505{ 609{
506 struct ata_taskfile *tf = &qc->tf; 610 struct ata_taskfile *tf = &qc->tf;
611 struct ata_device *dev = qc->dev;
612 unsigned int lba = tf->flags & ATA_TFLAG_LBA;
507 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 613 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
508 u64 dev_sectors = qc->dev->n_sectors; 614 u64 dev_sectors = qc->dev->n_sectors;
509 u64 sect = 0; 615 u64 block;
510 u32 n_sect = 0; 616 u32 n_block;
511 617
512 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 618 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
513 tf->protocol = ATA_PROT_NODATA; 619 tf->protocol = ATA_PROT_NODATA;
514 tf->device |= ATA_LBA;
515
516 if (scsicmd[0] == VERIFY) {
517 sect |= ((u64)scsicmd[2]) << 24;
518 sect |= ((u64)scsicmd[3]) << 16;
519 sect |= ((u64)scsicmd[4]) << 8;
520 sect |= ((u64)scsicmd[5]);
521
522 n_sect |= ((u32)scsicmd[7]) << 8;
523 n_sect |= ((u32)scsicmd[8]);
524 }
525
526 else if (scsicmd[0] == VERIFY_16) {
527 sect |= ((u64)scsicmd[2]) << 56;
528 sect |= ((u64)scsicmd[3]) << 48;
529 sect |= ((u64)scsicmd[4]) << 40;
530 sect |= ((u64)scsicmd[5]) << 32;
531 sect |= ((u64)scsicmd[6]) << 24;
532 sect |= ((u64)scsicmd[7]) << 16;
533 sect |= ((u64)scsicmd[8]) << 8;
534 sect |= ((u64)scsicmd[9]);
535
536 n_sect |= ((u32)scsicmd[10]) << 24;
537 n_sect |= ((u32)scsicmd[11]) << 16;
538 n_sect |= ((u32)scsicmd[12]) << 8;
539 n_sect |= ((u32)scsicmd[13]);
540 }
541 620
621 if (scsicmd[0] == VERIFY)
622 scsi_10_lba_len(scsicmd, &block, &n_block);
623 else if (scsicmd[0] == VERIFY_16)
624 scsi_16_lba_len(scsicmd, &block, &n_block);
542 else 625 else
543 return 1; 626 return 1;
544 627
545 if (!n_sect) 628 if (!n_block)
546 return 1; 629 return 1;
547 if (sect >= dev_sectors) 630 if (block >= dev_sectors)
548 return 1; 631 return 1;
549 if ((sect + n_sect) > dev_sectors) 632 if ((block + n_block) > dev_sectors)
550 return 1; 633 return 1;
551 if (lba48) { 634 if (lba48) {
552 if (n_sect > (64 * 1024)) 635 if (n_block > (64 * 1024))
553 return 1; 636 return 1;
554 } else { 637 } else {
555 if (n_sect > 256) 638 if (n_block > 256)
556 return 1; 639 return 1;
557 } 640 }
558 641
559 if (lba48) { 642 if (lba) {
560 tf->command = ATA_CMD_VERIFY_EXT; 643 if (lba48) {
644 tf->command = ATA_CMD_VERIFY_EXT;
561 645
562 tf->hob_nsect = (n_sect >> 8) & 0xff; 646 tf->hob_nsect = (n_block >> 8) & 0xff;
563 647
564 tf->hob_lbah = (sect >> 40) & 0xff; 648 tf->hob_lbah = (block >> 40) & 0xff;
565 tf->hob_lbam = (sect >> 32) & 0xff; 649 tf->hob_lbam = (block >> 32) & 0xff;
566 tf->hob_lbal = (sect >> 24) & 0xff; 650 tf->hob_lbal = (block >> 24) & 0xff;
567 } else { 651 } else {
568 tf->command = ATA_CMD_VERIFY; 652 tf->command = ATA_CMD_VERIFY;
569 653
570 tf->device |= (sect >> 24) & 0xf; 654 tf->device |= (block >> 24) & 0xf;
571 } 655 }
656
657 tf->nsect = n_block & 0xff;
572 658
573 tf->nsect = n_sect & 0xff; 659 tf->lbah = (block >> 16) & 0xff;
660 tf->lbam = (block >> 8) & 0xff;
661 tf->lbal = block & 0xff;
574 662
575 tf->lbah = (sect >> 16) & 0xff; 663 tf->device |= ATA_LBA;
576 tf->lbam = (sect >> 8) & 0xff; 664 } else {
577 tf->lbal = sect & 0xff; 665 /* CHS */
666 u32 sect, head, cyl, track;
667
668 /* Convert LBA to CHS */
669 track = (u32)block / dev->sectors;
670 cyl = track / dev->heads;
671 head = track % dev->heads;
672 sect = (u32)block % dev->sectors + 1;
673
674 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
675 (u32)block, track, cyl, head, sect);
676
677 /* Check whether the converted CHS can fit.
678 Cylinder: 0-65535
679 Head: 0-15
680 Sector: 1-255*/
681 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
682 return 1;
683
684 tf->command = ATA_CMD_VERIFY;
685 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
686 tf->lbal = sect;
687 tf->lbam = cyl;
688 tf->lbah = cyl >> 8;
689 tf->device |= head;
690 }
578 691
579 return 0; 692 return 0;
580} 693}
@@ -602,11 +715,14 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
602static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 715static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
603{ 716{
604 struct ata_taskfile *tf = &qc->tf; 717 struct ata_taskfile *tf = &qc->tf;
718 struct ata_device *dev = qc->dev;
719 unsigned int lba = tf->flags & ATA_TFLAG_LBA;
605 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 720 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
721 u64 block;
722 u32 n_block;
606 723
607 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 724 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
608 tf->protocol = qc->dev->xfer_protocol; 725 tf->protocol = qc->dev->xfer_protocol;
609 tf->device |= ATA_LBA;
610 726
611 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || 727 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
612 scsicmd[0] == READ_16) { 728 scsicmd[0] == READ_16) {
@@ -616,90 +732,102 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
616 tf->flags |= ATA_TFLAG_WRITE; 732 tf->flags |= ATA_TFLAG_WRITE;
617 } 733 }
618 734
619 if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { 735 /* Calculate the SCSI LBA and transfer length. */
620 if (lba48) { 736 switch (scsicmd[0]) {
621 tf->hob_nsect = scsicmd[7]; 737 case READ_10:
622 tf->hob_lbal = scsicmd[2]; 738 case WRITE_10:
623 739 scsi_10_lba_len(scsicmd, &block, &n_block);
624 qc->nsect = ((unsigned int)scsicmd[7] << 8) | 740 break;
625 scsicmd[8]; 741 case READ_6:
626 } else { 742 case WRITE_6:
627 /* if we don't support LBA48 addressing, the request 743 scsi_6_lba_len(scsicmd, &block, &n_block);
628 * -may- be too large. */
629 if ((scsicmd[2] & 0xf0) || scsicmd[7])
630 return 1;
631 744
632 /* stores LBA27:24 in lower 4 bits of device reg */ 745 /* for 6-byte r/w commands, transfer length 0
633 tf->device |= scsicmd[2]; 746 * means 256 blocks of data, not 0 block.
747 */
748 if (!n_block)
749 n_block = 256;
750 break;
751 case READ_16:
752 case WRITE_16:
753 scsi_16_lba_len(scsicmd, &block, &n_block);
754 break;
755 default:
756 DPRINTK("no-byte command\n");
757 return 1;
758 }
634 759
635 qc->nsect = scsicmd[8]; 760 /* Check and compose ATA command */
636 } 761 if (!n_block)
762 /* For 10-byte and 16-byte SCSI R/W commands, transfer
763 * length 0 means transfer 0 block of data.
764 * However, for ATA R/W commands, sector count 0 means
765 * 256 or 65536 sectors, not 0 sectors as in SCSI.
766 */
767 return 1;
637 768
638 tf->nsect = scsicmd[8]; 769 if (lba) {
639 tf->lbal = scsicmd[5]; 770 if (lba48) {
640 tf->lbam = scsicmd[4]; 771 /* The request -may- be too large for LBA48. */
641 tf->lbah = scsicmd[3]; 772 if ((block >> 48) || (n_block > 65536))
773 return 1;
642 774
643 VPRINTK("ten-byte command\n"); 775 tf->hob_nsect = (n_block >> 8) & 0xff;
644 if (qc->nsect == 0) /* we don't support length==0 cmds */
645 return 1;
646 return 0;
647 }
648 776
649 if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { 777 tf->hob_lbah = (block >> 40) & 0xff;
650 qc->nsect = tf->nsect = scsicmd[4]; 778 tf->hob_lbam = (block >> 32) & 0xff;
651 if (!qc->nsect) { 779 tf->hob_lbal = (block >> 24) & 0xff;
652 qc->nsect = 256; 780 } else {
653 if (lba48) 781 /* LBA28 */
654 tf->hob_nsect = 1;
655 }
656 782
657 tf->lbal = scsicmd[3]; 783 /* The request -may- be too large for LBA28. */
658 tf->lbam = scsicmd[2]; 784 if ((block >> 28) || (n_block > 256))
659 tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ 785 return 1;
660 786
661 VPRINTK("six-byte command\n"); 787 tf->device |= (block >> 24) & 0xf;
662 return 0; 788 }
663 }
664 789
665 if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { 790 qc->nsect = n_block;
666 /* rule out impossible LBAs and sector counts */ 791 tf->nsect = n_block & 0xff;
667 if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
668 return 1;
669 792
670 if (lba48) { 793 tf->lbah = (block >> 16) & 0xff;
671 tf->hob_nsect = scsicmd[12]; 794 tf->lbam = (block >> 8) & 0xff;
672 tf->hob_lbal = scsicmd[6]; 795 tf->lbal = block & 0xff;
673 tf->hob_lbam = scsicmd[5];
674 tf->hob_lbah = scsicmd[4];
675 796
676 qc->nsect = ((unsigned int)scsicmd[12] << 8) | 797 tf->device |= ATA_LBA;
677 scsicmd[13]; 798 } else {
678 } else { 799 /* CHS */
679 /* once again, filter out impossible non-zero values */ 800 u32 sect, head, cyl, track;
680 if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
681 (scsicmd[6] & 0xf0))
682 return 1;
683 801
684 /* stores LBA27:24 in lower 4 bits of device reg */ 802 /* The request -may- be too large for CHS addressing. */
685 tf->device |= scsicmd[6]; 803 if ((block >> 28) || (n_block > 256))
804 return 1;
686 805
687 qc->nsect = scsicmd[13]; 806 /* Convert LBA to CHS */
688 } 807 track = (u32)block / dev->sectors;
808 cyl = track / dev->heads;
809 head = track % dev->heads;
810 sect = (u32)block % dev->sectors + 1;
689 811
690 tf->nsect = scsicmd[13]; 812 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
691 tf->lbal = scsicmd[9]; 813 (u32)block, track, cyl, head, sect);
692 tf->lbam = scsicmd[8];
693 tf->lbah = scsicmd[7];
694 814
695 VPRINTK("sixteen-byte command\n"); 815 /* Check whether the converted CHS can fit.
696 if (qc->nsect == 0) /* we don't support length==0 cmds */ 816 Cylinder: 0-65535
817 Head: 0-15
818 Sector: 1-255*/
819 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
697 return 1; 820 return 1;
698 return 0; 821
822 qc->nsect = n_block;
823 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
824 tf->lbal = sect;
825 tf->lbam = cyl;
826 tf->lbah = cyl >> 8;
827 tf->device |= head;
699 } 828 }
700 829
701 DPRINTK("no-byte command\n"); 830 return 0;
702 return 1;
703} 831}
704 832
705static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 833static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
@@ -1246,10 +1374,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
1246 1374
1247 VPRINTK("ENTER\n"); 1375 VPRINTK("ENTER\n");
1248 1376
1249 if (ata_id_has_lba48(args->id)) 1377 if (ata_id_has_lba(args->id)) {
1250 n_sectors = ata_id_u64(args->id, 100); 1378 if (ata_id_has_lba48(args->id))
1251 else 1379 n_sectors = ata_id_u64(args->id, 100);
1252 n_sectors = ata_id_u32(args->id, 60); 1380 else
1381 n_sectors = ata_id_u32(args->id, 60);
1382 } else {
1383 /* CHS default translation */
1384 n_sectors = args->id[1] * args->id[3] * args->id[6];
1385
1386 if (ata_id_current_chs_valid(args->id))
1387 /* CHS current translation */
1388 n_sectors = ata_id_u32(args->id, 57);
1389 }
1390
1253 n_sectors--; /* ATA TotalUserSectors - 1 */ 1391 n_sectors--; /* ATA TotalUserSectors - 1 */
1254 1392
1255 if (args->cmd->cmnd[0] == READ_CAPACITY) { 1393 if (args->cmd->cmnd[0] == READ_CAPACITY) {
@@ -1330,7 +1468,7 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1330void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 1468void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
1331{ 1469{
1332 DPRINTK("ENTER\n"); 1470 DPRINTK("ENTER\n");
1333 cmd->result = SAM_STAT_CHECK_CONDITION; 1471 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1334 1472
1335 cmd->sense_buffer[0] = 0x70; 1473 cmd->sense_buffer[0] = 0x70;
1336 cmd->sense_buffer[2] = ILLEGAL_REQUEST; 1474 cmd->sense_buffer[2] = ILLEGAL_REQUEST;
@@ -1341,19 +1479,79 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
1341 done(cmd); 1479 done(cmd);
1342} 1480}
1343 1481
1482void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
1483 struct scsi_cmnd *cmd)
1484{
1485 DECLARE_COMPLETION(wait);
1486 struct ata_queued_cmd *qc;
1487 unsigned long flags;
1488 int rc;
1489
1490 DPRINTK("ATAPI request sense\n");
1491
1492 qc = ata_qc_new_init(ap, dev);
1493 BUG_ON(qc == NULL);
1494
1495 /* FIXME: is this needed? */
1496 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
1497
1498 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
1499 qc->dma_dir = DMA_FROM_DEVICE;
1500
1501 memset(&qc->cdb, 0, ap->cdb_len);
1502 qc->cdb[0] = REQUEST_SENSE;
1503 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
1504
1505 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1506 qc->tf.command = ATA_CMD_PACKET;
1507
1508 qc->tf.protocol = ATA_PROT_ATAPI;
1509 qc->tf.lbam = (8 * 1024) & 0xff;
1510 qc->tf.lbah = (8 * 1024) >> 8;
1511 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
1512
1513 qc->waiting = &wait;
1514 qc->complete_fn = ata_qc_complete_noop;
1515
1516 spin_lock_irqsave(&ap->host_set->lock, flags);
1517 rc = ata_qc_issue(qc);
1518 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1519
1520 if (rc)
1521 ata_port_disable(ap);
1522 else
1523 wait_for_completion(&wait);
1524
1525 DPRINTK("EXIT\n");
1526}
1527
1344static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 1528static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1345{ 1529{
1346 struct scsi_cmnd *cmd = qc->scsicmd; 1530 struct scsi_cmnd *cmd = qc->scsicmd;
1347 1531
1348 if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { 1532 VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat);
1533
1534 if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ)))
1535 ata_to_sense_error(qc, drv_stat);
1536
1537 else if (unlikely(drv_stat & ATA_ERR)) {
1349 DPRINTK("request check condition\n"); 1538 DPRINTK("request check condition\n");
1350 1539
1540 /* FIXME: command completion with check condition
1541 * but no sense causes the error handler to run,
1542 * which then issues REQUEST SENSE, fills in the sense
1543 * buffer, and completes the command (for the second
1544 * time). We need to issue REQUEST SENSE some other
1545 * way, to avoid completing the command twice.
1546 */
1351 cmd->result = SAM_STAT_CHECK_CONDITION; 1547 cmd->result = SAM_STAT_CHECK_CONDITION;
1352 1548
1353 qc->scsidone(cmd); 1549 qc->scsidone(cmd);
1354 1550
1355 return 1; 1551 return 1;
1356 } else { 1552 }
1553
1554 else {
1357 u8 *scsicmd = cmd->cmnd; 1555 u8 *scsicmd = cmd->cmnd;
1358 1556
1359 if (scsicmd[0] == INQUIRY) { 1557 if (scsicmd[0] == INQUIRY) {
@@ -1361,15 +1559,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1361 unsigned int buflen; 1559 unsigned int buflen;
1362 1560
1363 buflen = ata_scsi_rbuf_get(cmd, &buf); 1561 buflen = ata_scsi_rbuf_get(cmd, &buf);
1364 buf[2] = 0x5; 1562
1365 buf[3] = (buf[3] & 0xf0) | 2; 1563 /* ATAPI devices typically report zero for their SCSI version,
1564 * and sometimes deviate from the spec WRT response data
1565 * format. If SCSI version is reported as zero like normal,
1566 * then we make the following fixups: 1) Fake MMC-5 version,
1567 * to indicate to the Linux scsi midlayer this is a modern
1568 * device. 2) Ensure response data format / ATAPI information
1569 * are always correct.
1570 */
1571 /* FIXME: do we ever override EVPD pages and the like, with
1572 * this code?
1573 */
1574 if (buf[2] == 0) {
1575 buf[2] = 0x5;
1576 buf[3] = 0x32;
1577 }
1578
1366 ata_scsi_rbuf_put(cmd, buf); 1579 ata_scsi_rbuf_put(cmd, buf);
1367 } 1580 }
1581
1368 cmd->result = SAM_STAT_GOOD; 1582 cmd->result = SAM_STAT_GOOD;
1369 } 1583 }
1370 1584
1371 qc->scsidone(cmd); 1585 qc->scsidone(cmd);
1372
1373 return 0; 1586 return 0;
1374} 1587}
1375/** 1588/**
@@ -1678,3 +1891,19 @@ void ata_scsi_simulate(u16 *id,
1678 } 1891 }
1679} 1892}
1680 1893
1894void ata_scsi_scan_host(struct ata_port *ap)
1895{
1896 struct ata_device *dev;
1897 unsigned int i;
1898
1899 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1900 return;
1901
1902 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1903 dev = &ap->device[i];
1904
1905 if (ata_dev_present(dev))
1906 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
1907 }
1908}
1909
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index d608b3a0f6fe..a4b55dc9c698 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -39,6 +39,7 @@ struct ata_scsi_args {
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled; 41extern int atapi_enabled;
42extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 43extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
43 struct ata_device *dev); 44 struct ata_device *dev);
44extern void ata_qc_free(struct ata_queued_cmd *qc); 45extern void ata_qc_free(struct ata_queued_cmd *qc);
@@ -51,6 +52,9 @@ extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
51 52
52 53
53/* libata-scsi.c */ 54/* libata-scsi.c */
55extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
56 struct scsi_cmnd *cmd);
57extern void ata_scsi_scan_host(struct ata_port *ap);
54extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); 58extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat);
55extern int ata_scsi_error(struct Scsi_Host *host); 59extern int ata_scsi_error(struct Scsi_Host *host);
56extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 60extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index ea76fe44585e..d457f5673476 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -35,7 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36 36
37#define DRV_NAME "sata_mv" 37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12" 38#define DRV_VERSION "0.24"
39 39
40enum { 40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */ 41 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -55,31 +55,61 @@ enum {
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57 57
58 MV_Q_CT = 32, 58 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61 59
62 MV_DMA_BOUNDARY = 0xffffffffU, 60 MV_MAX_Q_DEPTH = 32,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), 61 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
62
63 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
64 * CRPB needs alignment on a 256B boundary. Size == 256B
65 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
66 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
67 */
68 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
69 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
70 MV_MAX_SG_CT = 176,
71 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
72 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
73
74 /* Our DMA boundary is determined by an ePRD being unable to handle
75 * anything larger than 64KB
76 */
77 MV_DMA_BOUNDARY = 0xffffU,
64 78
65 MV_PORTS_PER_HC = 4, 79 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2, 81 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ 82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
69 MV_PORT_MASK = 3, 83 MV_PORT_MASK = 3,
70 84
71 /* Host Flags */ 85 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 86 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ 88 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
89 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
90 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
91 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
92 MV_FLAG_GLBL_SFT_RST),
75 93
76 chip_504x = 0, 94 chip_504x = 0,
77 chip_508x = 1, 95 chip_508x = 1,
78 chip_604x = 2, 96 chip_604x = 2,
79 chip_608x = 3, 97 chip_608x = 3,
80 98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
81 /* PCI interface registers */ 109 /* PCI interface registers */
82 110
111 PCI_COMMAND_OFS = 0xc00,
112
83 PCI_MAIN_CMD_STS_OFS = 0xd30, 113 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2), 114 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3), 115 PCI_MASTER_EMPTY = (1 << 3),
@@ -111,20 +141,13 @@ enum {
111 HC_CFG_OFS = 0, 141 HC_CFG_OFS = 0,
112 142
113 HC_IRQ_CAUSE_OFS = 0x14, 143 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */ 144 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 145 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */ 146 DEV_IRQ = (1 << 8), /* shift by port # */
117 147
118 /* Shadow block registers */ 148 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100, 149 SHD_BLK_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104, 150 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128 151
129 /* SATA registers */ 152 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 153 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
@@ -132,6 +155,11 @@ enum {
132 155
133 /* Port registers */ 156 /* Port registers */
134 EDMA_CFG_OFS = 0, 157 EDMA_CFG_OFS = 0,
158 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
159 EDMA_CFG_NCQ = (1 << 5),
160 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
161 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
162 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
135 163
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 164 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc, 165 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -161,33 +189,85 @@ enum {
161 EDMA_ERR_LNK_DATA_TX | 189 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO), 190 EDMA_ERR_TRANS_PROTO),
163 191
192 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
193 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
194 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
195
196 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
197 EDMA_REQ_Q_PTR_SHIFT = 5,
198
199 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
200 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
201 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
202 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
203 EDMA_RSP_Q_PTR_SHIFT = 3,
204
164 EDMA_CMD_OFS = 0x28, 205 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0), 206 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1), 207 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2), 208 ATA_RST = (1 << 2),
168 209
169 /* BDMA is 6xxx part only */ 210 /* Host private flags (hp_flags) */
170 BDMA_CMD_OFS = 0x224, 211 MV_HP_FLAG_MSI = (1 << 0),
171 BDMA_START = (1 << 0),
172 212
173 MV_UNDEF = 0, 213 /* Port private flags (pp_flags) */
214 MV_PP_FLAG_EDMA_EN = (1 << 0),
215 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
174}; 216};
175 217
176struct mv_port_priv { 218/* Command ReQuest Block: 32B */
219struct mv_crqb {
220 u32 sg_addr;
221 u32 sg_addr_hi;
222 u16 ctrl_flags;
223 u16 ata_cmd[11];
224};
177 225
226/* Command ResPonse Block: 8B */
227struct mv_crpb {
228 u16 id;
229 u16 flags;
230 u32 tmstmp;
178}; 231};
179 232
180struct mv_host_priv { 233/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
234struct mv_sg {
235 u32 addr;
236 u32 flags_size;
237 u32 addr_hi;
238 u32 reserved;
239};
181 240
241struct mv_port_priv {
242 struct mv_crqb *crqb;
243 dma_addr_t crqb_dma;
244 struct mv_crpb *crpb;
245 dma_addr_t crpb_dma;
246 struct mv_sg *sg_tbl;
247 dma_addr_t sg_tbl_dma;
248
249 unsigned req_producer; /* cp of req_in_ptr */
250 unsigned rsp_consumer; /* cp of rsp_out_ptr */
251 u32 pp_flags;
252};
253
254struct mv_host_priv {
255 u32 hp_flags;
182}; 256};
183 257
184static void mv_irq_clear(struct ata_port *ap); 258static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 259static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 260static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
261static u8 mv_check_err(struct ata_port *ap);
187static void mv_phy_reset(struct ata_port *ap); 262static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base); 263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap);
265static void mv_port_stop(struct ata_port *ap);
266static void mv_qc_prep(struct ata_queued_cmd *qc);
267static int mv_qc_issue(struct ata_queued_cmd *qc);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance, 268static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs); 269 struct pt_regs *regs);
270static void mv_eng_timeout(struct ata_port *ap);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 271static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192 272
193static Scsi_Host_Template mv_sht = { 273static Scsi_Host_Template mv_sht = {
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = {
196 .ioctl = ata_scsi_ioctl, 276 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd, 277 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error, 278 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE, 279 .can_queue = MV_USE_Q_DEPTH,
200 .this_id = ATA_SHT_THIS_ID, 280 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF, 281 .sg_tablesize = MV_MAX_SG_CT,
202 .max_sectors = ATA_MAX_SECTORS, 282 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 283 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED, 284 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF, 285 .use_clustering = ATA_SHT_USE_CLUSTERING,
206 .proc_name = DRV_NAME, 286 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY, 287 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config, 288 .slave_configure = ata_scsi_slave_config,
@@ -216,15 +296,16 @@ static struct ata_port_operations mv_ops = {
216 .tf_load = ata_tf_load, 296 .tf_load = ata_tf_load,
217 .tf_read = ata_tf_read, 297 .tf_read = ata_tf_read,
218 .check_status = ata_check_status, 298 .check_status = ata_check_status,
299 .check_err = mv_check_err,
219 .exec_command = ata_exec_command, 300 .exec_command = ata_exec_command,
220 .dev_select = ata_std_dev_select, 301 .dev_select = ata_std_dev_select,
221 302
222 .phy_reset = mv_phy_reset, 303 .phy_reset = mv_phy_reset,
223 304
224 .qc_prep = ata_qc_prep, 305 .qc_prep = mv_qc_prep,
225 .qc_issue = ata_qc_issue_prot, 306 .qc_issue = mv_qc_issue,
226 307
227 .eng_timeout = ata_eng_timeout, 308 .eng_timeout = mv_eng_timeout,
228 309
229 .irq_handler = mv_interrupt, 310 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear, 311 .irq_clear = mv_irq_clear,
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = {
232 .scr_read = mv_scr_read, 313 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write, 314 .scr_write = mv_scr_write,
234 315
235 .port_start = ata_port_start, 316 .port_start = mv_port_start,
236 .port_stop = ata_port_stop, 317 .port_stop = mv_port_stop,
237 .host_stop = ata_host_stop, 318 .host_stop = mv_host_stop,
238}; 319};
239 320
240static struct ata_port_info mv_port_info[] = { 321static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */ 322 { /* chip_504x */
242 .sht = &mv_sht, 323 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 324 .host_flags = MV_COMMON_FLAGS,
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 325 .pio_mask = 0x1f, /* pio0-4 */
245 .pio_mask = 0x1f, /* pio4-0 */ 326 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops, 327 .port_ops = &mv_ops,
248 }, 328 },
249 { /* chip_508x */ 329 { /* chip_508x */
250 .sht = &mv_sht, 330 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 331 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 332 .pio_mask = 0x1f, /* pio0-4 */
253 MV_FLAG_DUAL_HC), 333 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops, 334 .port_ops = &mv_ops,
257 }, 335 },
258 { /* chip_604x */ 336 { /* chip_604x */
259 .sht = &mv_sht, 337 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 339 .pio_mask = 0x1f, /* pio0-4 */
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), 340 .udma_mask = 0x7f, /* udma0-6 */
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops, 341 .port_ops = &mv_ops,
266 }, 342 },
267 { /* chip_608x */ 343 { /* chip_608x */
268 .sht = &mv_sht, 344 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 345 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 346 MV_FLAG_DUAL_HC),
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | 347 .pio_mask = 0x1f, /* pio0-4 */
272 MV_FLAG_BDMA), 348 .udma_mask = 0x7f, /* udma0-6 */
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops, 349 .port_ops = &mv_ops,
276 }, 350 },
277}; 351};
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
306 (void) readl(addr); /* flush to avoid PCI posted write */ 380 (void) readl(addr); /* flush to avoid PCI posted write */
307} 381}
308 382
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 383static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{ 384{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 385 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
@@ -329,24 +397,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap)
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 397 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330} 398}
331 399
332static inline int mv_get_hc_count(unsigned long flags) 400static inline int mv_get_hc_count(unsigned long hp_flags)
333{ 401{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); 402 return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335} 403}
336 404
337static inline int mv_is_edma_active(struct ata_port *ap) 405static void mv_irq_clear(struct ata_port *ap)
406{
407}
408
409/**
410 * mv_start_dma - Enable eDMA engine
411 * @base: port base address
412 * @pp: port private data
413 *
414 * Verify the local cache of the eDMA state is accurate with an
415 * assert.
416 *
417 * LOCKING:
418 * Inherited from caller.
419 */
420static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
421{
422 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
423 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
424 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
425 }
426 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
427}
428
429/**
430 * mv_stop_dma - Disable eDMA engine
431 * @ap: ATA channel to manipulate
432 *
433 * Verify the local cache of the eDMA state is accurate with an
434 * assert.
435 *
436 * LOCKING:
437 * Inherited from caller.
438 */
439static void mv_stop_dma(struct ata_port *ap)
338{ 440{
339 void __iomem *port_mmio = mv_ap_base(ap); 441 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 442 struct mv_port_priv *pp = ap->private_data;
443 u32 reg;
444 int i;
445
446 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
447 /* Disable EDMA if active. The disable bit auto clears.
448 */
449 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
450 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
451 } else {
452 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
453 }
454
455 /* now properly wait for the eDMA to stop */
456 for (i = 1000; i > 0; i--) {
457 reg = readl(port_mmio + EDMA_CMD_OFS);
458 if (!(EDMA_EN & reg)) {
459 break;
460 }
461 udelay(100);
462 }
463
464 if (EDMA_EN & reg) {
465 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
466 /* FIXME: Consider doing a reset here to recover */
467 }
341} 468}
342 469
343static inline int mv_port_bdma_capable(struct ata_port *ap) 470#ifdef ATA_DEBUG
471static void mv_dump_mem(void __iomem *start, unsigned bytes)
344{ 472{
345 return (ap->flags & MV_FLAG_BDMA); 473 int b, w;
474 for (b = 0; b < bytes; ) {
475 DPRINTK("%p: ", start + b);
476 for (w = 0; b < bytes && w < 4; w++) {
477 printk("%08x ",readl(start + b));
478 b += sizeof(u32);
479 }
480 printk("\n");
481 }
346} 482}
483#endif
347 484
348static void mv_irq_clear(struct ata_port *ap) 485static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
486{
487#ifdef ATA_DEBUG
488 int b, w;
489 u32 dw;
490 for (b = 0; b < bytes; ) {
491 DPRINTK("%02x: ", b);
492 for (w = 0; b < bytes && w < 4; w++) {
493 (void) pci_read_config_dword(pdev,b,&dw);
494 printk("%08x ",dw);
495 b += sizeof(u32);
496 }
497 printk("\n");
498 }
499#endif
500}
501static void mv_dump_all_regs(void __iomem *mmio_base, int port,
502 struct pci_dev *pdev)
349{ 503{
504#ifdef ATA_DEBUG
505 void __iomem *hc_base = mv_hc_base(mmio_base,
506 port >> MV_PORT_HC_SHIFT);
507 void __iomem *port_base;
508 int start_port, num_ports, p, start_hc, num_hcs, hc;
509
510 if (0 > port) {
511 start_hc = start_port = 0;
512 num_ports = 8; /* shld be benign for 4 port devs */
513 num_hcs = 2;
514 } else {
515 start_hc = port >> MV_PORT_HC_SHIFT;
516 start_port = port;
517 num_ports = num_hcs = 1;
518 }
519 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
520 num_ports > 1 ? num_ports - 1 : start_port);
521
522 if (NULL != pdev) {
523 DPRINTK("PCI config space regs:\n");
524 mv_dump_pci_cfg(pdev, 0x68);
525 }
526 DPRINTK("PCI regs:\n");
527 mv_dump_mem(mmio_base+0xc00, 0x3c);
528 mv_dump_mem(mmio_base+0xd00, 0x34);
529 mv_dump_mem(mmio_base+0xf00, 0x4);
530 mv_dump_mem(mmio_base+0x1d00, 0x6c);
531 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
532 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
533 DPRINTK("HC regs (HC %i):\n", hc);
534 mv_dump_mem(hc_base, 0x1c);
535 }
536 for (p = start_port; p < start_port + num_ports; p++) {
537 port_base = mv_port_base(mmio_base, p);
538 DPRINTK("EDMA regs (port %i):\n",p);
539 mv_dump_mem(port_base, 0x54);
540 DPRINTK("SATA regs (port %i):\n",p);
541 mv_dump_mem(port_base+0x300, 0x60);
542 }
543#endif
350} 544}
351 545
352static unsigned int mv_scr_offset(unsigned int sc_reg_in) 546static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
389 } 583 }
390} 584}
391 585
392static int mv_master_reset(void __iomem *mmio_base) 586/**
587 * mv_global_soft_reset - Perform the 6xxx global soft reset
588 * @mmio_base: base address of the HBA
589 *
590 * This routine only applies to 6xxx parts.
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595static int mv_global_soft_reset(void __iomem *mmio_base)
393{ 596{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; 597 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0; 598 int i, rc = 0;
396 u32 t; 599 u32 t;
397 600
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status 601 /* Following procedure defined in PCI "main command and status
401 * register" table. 602 * register" table.
402 */ 603 */
403 t = readl(reg); 604 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg); 605 writel(t | STOP_PCI_MASTER, reg);
405 606
406 for (i = 0; i < 100; i++) { 607 for (i = 0; i < 1000; i++) {
407 msleep(10); 608 udelay(1);
408 t = readl(reg); 609 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) { 610 if (PCI_MASTER_EMPTY & t) {
410 break; 611 break;
411 } 612 }
412 } 613 }
413 if (!(PCI_MASTER_EMPTY & t)) { 614 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); 615 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
415 rc = 1; /* broken HW? */ 616 rc = 1;
416 goto done; 617 goto done;
417 } 618 }
418 619
@@ -425,39 +626,398 @@ static int mv_master_reset(void __iomem *mmio_base)
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 626 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426 627
427 if (!(GLOB_SFT_RST & t)) { 628 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n"); 629 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
429 rc = 1; /* broken HW? */ 630 rc = 1;
430 goto done; 631 goto done;
431 } 632 }
432 633
433 /* clear reset */ 634 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
434 i = 5; 635 i = 5;
435 do { 636 do {
436 writel(t & ~GLOB_SFT_RST, reg); 637 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
437 t = readl(reg); 638 t = readl(reg);
438 udelay(1); 639 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 640 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440 641
441 if (GLOB_SFT_RST & t) { 642 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n"); 643 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
443 rc = 1; /* broken HW? */ 644 rc = 1;
444 } 645 }
445 646done:
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc; 647 return rc;
449} 648}
450 649
451static void mv_err_intr(struct ata_port *ap) 650/**
651 * mv_host_stop - Host specific cleanup/stop routine.
652 * @host_set: host data structure
653 *
654 * Disable ints, cleanup host memory, call general purpose
655 * host_stop.
656 *
657 * LOCKING:
658 * Inherited from caller.
659 */
660static void mv_host_stop(struct ata_host_set *host_set)
452{ 661{
453 void __iomem *port_mmio; 662 struct mv_host_priv *hpriv = host_set->private_data;
454 u32 edma_err_cause, serr = 0; 663 struct pci_dev *pdev = to_pci_dev(host_set->dev);
664
665 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
666 pci_disable_msi(pdev);
667 } else {
668 pci_intx(pdev, 0);
669 }
670 kfree(hpriv);
671 ata_host_stop(host_set);
672}
673
674/**
675 * mv_port_start - Port specific init/start routine.
676 * @ap: ATA channel to manipulate
677 *
678 * Allocate and point to DMA memory, init port private memory,
679 * zero indices.
680 *
681 * LOCKING:
682 * Inherited from caller.
683 */
684static int mv_port_start(struct ata_port *ap)
685{
686 struct device *dev = ap->host_set->dev;
687 struct mv_port_priv *pp;
688 void __iomem *port_mmio = mv_ap_base(ap);
689 void *mem;
690 dma_addr_t mem_dma;
691
692 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
693 if (!pp) {
694 return -ENOMEM;
695 }
696 memset(pp, 0, sizeof(*pp));
697
698 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
699 GFP_KERNEL);
700 if (!mem) {
701 kfree(pp);
702 return -ENOMEM;
703 }
704 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
705
706 /* First item in chunk of DMA memory:
707 * 32-slot command request table (CRQB), 32 bytes each in size
708 */
709 pp->crqb = mem;
710 pp->crqb_dma = mem_dma;
711 mem += MV_CRQB_Q_SZ;
712 mem_dma += MV_CRQB_Q_SZ;
713
714 /* Second item:
715 * 32-slot command response table (CRPB), 8 bytes each in size
716 */
717 pp->crpb = mem;
718 pp->crpb_dma = mem_dma;
719 mem += MV_CRPB_Q_SZ;
720 mem_dma += MV_CRPB_Q_SZ;
721
722 /* Third item:
723 * Table of scatter-gather descriptors (ePRD), 16 bytes each
724 */
725 pp->sg_tbl = mem;
726 pp->sg_tbl_dma = mem_dma;
727
728 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
729 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
730
731 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
732 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
733 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
734
735 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
736 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
737
738 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
739 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
740 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
741
742 pp->req_producer = pp->rsp_consumer = 0;
743
744 /* Don't turn on EDMA here...do it before DMA commands only. Else
745 * we'll be unable to send non-data, PIO, etc due to restricted access
746 * to shadow regs.
747 */
748 ap->private_data = pp;
749 return 0;
750}
751
752/**
753 * mv_port_stop - Port specific cleanup/stop routine.
754 * @ap: ATA channel to manipulate
755 *
756 * Stop DMA, cleanup port memory.
757 *
758 * LOCKING:
759 * This routine uses the host_set lock to protect the DMA stop.
760 */
761static void mv_port_stop(struct ata_port *ap)
762{
763 struct device *dev = ap->host_set->dev;
764 struct mv_port_priv *pp = ap->private_data;
765 unsigned long flags;
766
767 spin_lock_irqsave(&ap->host_set->lock, flags);
768 mv_stop_dma(ap);
769 spin_unlock_irqrestore(&ap->host_set->lock, flags);
770
771 ap->private_data = NULL;
772 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
773 kfree(pp);
774}
775
776/**
777 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
778 * @qc: queued command whose SG list to source from
779 *
780 * Populate the SG list and mark the last entry.
781 *
782 * LOCKING:
783 * Inherited from caller.
784 */
785static void mv_fill_sg(struct ata_queued_cmd *qc)
786{
787 struct mv_port_priv *pp = qc->ap->private_data;
788 unsigned int i;
789
790 for (i = 0; i < qc->n_elem; i++) {
791 u32 sg_len;
792 dma_addr_t addr;
793
794 addr = sg_dma_address(&qc->sg[i]);
795 sg_len = sg_dma_len(&qc->sg[i]);
796
797 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
798 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
799 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
800 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
801 }
802 if (0 < qc->n_elem) {
803 pp->sg_tbl[qc->n_elem - 1].flags_size |= EPRD_FLAG_END_OF_TBL;
804 }
805}
806
807static inline unsigned mv_inc_q_index(unsigned *index)
808{
809 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
810 return *index;
811}
812
813static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
814{
815 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
816 (last ? CRQB_CMD_LAST : 0);
817}
455 818
456 /* bug here b/c we got an err int on a port we don't know about, 819/**
457 * so there's no way to clear it 820 * mv_qc_prep - Host specific command preparation.
821 * @qc: queued command to prepare
822 *
823 * This routine simply redirects to the general purpose routine
824 * if command is not DMA. Else, it handles prep of the CRQB
825 * (command request block), does some sanity checking, and calls
826 * the SG load routine.
827 *
828 * LOCKING:
829 * Inherited from caller.
830 */
831static void mv_qc_prep(struct ata_queued_cmd *qc)
832{
833 struct ata_port *ap = qc->ap;
834 struct mv_port_priv *pp = ap->private_data;
835 u16 *cw;
836 struct ata_taskfile *tf;
837 u16 flags = 0;
838
839 if (ATA_PROT_DMA != qc->tf.protocol) {
840 return;
841 }
842
843 /* the req producer index should be the same as we remember it */
844 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
845 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
846 pp->req_producer);
847
848 /* Fill in command request block
458 */ 849 */
459 BUG_ON(NULL == ap); 850 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
460 port_mmio = mv_ap_base(ap); 851 flags |= CRQB_FLAG_READ;
852 }
853 assert(MV_MAX_Q_DEPTH > qc->tag);
854 flags |= qc->tag << CRQB_TAG_SHIFT;
855
856 pp->crqb[pp->req_producer].sg_addr =
857 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
858 pp->crqb[pp->req_producer].sg_addr_hi =
859 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
860 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
861
862 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
863 tf = &qc->tf;
864
865 /* Sadly, the CRQB cannot accomodate all registers--there are
866 * only 11 bytes...so we must pick and choose required
867 * registers based on the command. So, we drop feature and
868 * hob_feature for [RW] DMA commands, but they are needed for
869 * NCQ. NCQ will drop hob_nsect.
870 */
871 switch (tf->command) {
872 case ATA_CMD_READ:
873 case ATA_CMD_READ_EXT:
874 case ATA_CMD_WRITE:
875 case ATA_CMD_WRITE_EXT:
876 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
877 break;
878#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
879 case ATA_CMD_FPDMA_READ:
880 case ATA_CMD_FPDMA_WRITE:
881 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
882 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
883 break;
884#endif /* FIXME: remove this line when NCQ added */
885 default:
886 /* The only other commands EDMA supports in non-queued and
887 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
888 * of which are defined/used by Linux. If we get here, this
889 * driver needs work.
890 *
891 * FIXME: modify libata to give qc_prep a return value and
892 * return error here.
893 */
894 BUG_ON(tf->command);
895 break;
896 }
897 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
898 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
899 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
900 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
901 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
902 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
903 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
904 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
905 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
906
907 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
908 return;
909 }
910 mv_fill_sg(qc);
911}
912
913/**
914 * mv_qc_issue - Initiate a command to the host
915 * @qc: queued command to start
916 *
917 * This routine simply redirects to the general purpose routine
918 * if command is not DMA. Else, it sanity checks our local
919 * caches of the request producer/consumer indices then enables
920 * DMA and bumps the request producer index.
921 *
922 * LOCKING:
923 * Inherited from caller.
924 */
925static int mv_qc_issue(struct ata_queued_cmd *qc)
926{
927 void __iomem *port_mmio = mv_ap_base(qc->ap);
928 struct mv_port_priv *pp = qc->ap->private_data;
929 u32 in_ptr;
930
931 if (ATA_PROT_DMA != qc->tf.protocol) {
932 /* We're about to send a non-EDMA capable command to the
933 * port. Turn off EDMA so there won't be problems accessing
934 * shadow block, etc registers.
935 */
936 mv_stop_dma(qc->ap);
937 return ata_qc_issue_prot(qc);
938 }
939
940 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
941
942 /* the req producer index should be the same as we remember it */
943 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
944 pp->req_producer);
945 /* until we do queuing, the queue should be empty at this point */
946 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
947 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
948 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
949
950 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
951
952 mv_start_dma(port_mmio, pp);
953
954 /* and write the request in pointer to kick the EDMA to life */
955 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
956 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
957 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
958
959 return 0;
960}
961
962/**
963 * mv_get_crpb_status - get status from most recently completed cmd
964 * @ap: ATA channel to manipulate
965 *
966 * This routine is for use when the port is in DMA mode, when it
967 * will be using the CRPB (command response block) method of
968 * returning command completion information. We assert indices
969 * are good, grab status, and bump the response consumer index to
970 * prove that we're up to date.
971 *
972 * LOCKING:
973 * Inherited from caller.
974 */
975static u8 mv_get_crpb_status(struct ata_port *ap)
976{
977 void __iomem *port_mmio = mv_ap_base(ap);
978 struct mv_port_priv *pp = ap->private_data;
979 u32 out_ptr;
980
981 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
982
983 /* the response consumer index should be the same as we remember it */
984 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
985 pp->rsp_consumer);
986
987 /* increment our consumer index... */
988 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
989
990 /* and, until we do NCQ, there should only be 1 CRPB waiting */
991 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
992 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
993 pp->rsp_consumer);
994
995 /* write out our inc'd consumer index so EDMA knows we're caught up */
996 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
997 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
998 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
999
1000 /* Return ATA status register for completed CRPB */
1001 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
1002}
1003
1004/**
1005 * mv_err_intr - Handle error interrupts on the port
1006 * @ap: ATA channel to manipulate
1007 *
1008 * In most cases, just clear the interrupt and move on. However,
1009 * some cases require an eDMA reset, which is done right before
1010 * the COMRESET in mv_phy_reset(). The SERR case requires a
1011 * clear of pending errors in the SATA SERROR register. Finally,
1012 * if the port disabled DMA, update our cached copy to match.
1013 *
1014 * LOCKING:
1015 * Inherited from caller.
1016 */
1017static void mv_err_intr(struct ata_port *ap)
1018{
1019 void __iomem *port_mmio = mv_ap_base(ap);
1020 u32 edma_err_cause, serr = 0;
461 1021
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1022 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463 1023
@@ -465,8 +1025,12 @@ static void mv_err_intr(struct ata_port *ap)
465 serr = scr_read(ap, SCR_ERROR); 1025 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr); 1026 scr_write_flush(ap, SCR_ERROR, serr);
467 } 1027 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", 1028 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
469 ap->port_no, edma_err_cause, serr); 1029 struct mv_port_priv *pp = ap->private_data;
1030 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1031 }
1032 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1033 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
470 1034
471 /* Clear EDMA now that SERR cleanup done */ 1035 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1036 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -477,7 +1041,21 @@ static void mv_err_intr(struct ata_port *ap)
477 } 1041 }
478} 1042}
479 1043
480/* Handle any outstanding interrupts in a single SATAHC 1044/**
1045 * mv_host_intr - Handle all interrupts on the given host controller
1046 * @host_set: host specific structure
1047 * @relevant: port error bits relevant to this host controller
1048 * @hc: which host controller we're to look at
1049 *
1050 * Read then write clear the HC interrupt status then walk each
1051 * port connected to the HC and see if it needs servicing. Port
1052 * success ints are reported in the HC interrupt status reg, the
1053 * port error ints are reported in the higher level main
1054 * interrupt status register and thus are passed in via the
1055 * 'relevant' argument.
1056 *
1057 * LOCKING:
1058 * Inherited from caller.
481 */ 1059 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1060static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc) 1061 unsigned int hc)
@@ -487,8 +1065,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
487 struct ata_port *ap; 1065 struct ata_port *ap;
488 struct ata_queued_cmd *qc; 1066 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause; 1067 u32 hc_irq_cause;
490 int shift, port, port0, hard_port; 1068 int shift, port, port0, hard_port, handled;
491 u8 ata_status; 1069 u8 ata_status = 0;
492 1070
493 if (hc == 0) { 1071 if (hc == 0) {
494 port0 = 0; 1072 port0 = 0;
@@ -499,7 +1077,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
499 /* we'll need the HC success int register in most cases */ 1077 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1078 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) { 1079 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 1080 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
503 } 1081 }
504 1082
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1083 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
@@ -508,35 +1086,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1086 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port]; 1087 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1088 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU; 1089 handled = 0; /* ensure ata_status is set if handled++ */
512 1090
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { 1091 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap); 1092 /* new CRPB on the queue; just one at a time until NCQ
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */ 1093 */
516 /* This is needed to clear the ATA INTRQ. 1094 ata_status = mv_get_crpb_status(ap);
517 * FIXME: don't read the status reg in EDMA mode! 1095 handled++;
1096 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1097 /* received ATA IRQ; read the status reg to clear INTRQ
518 */ 1098 */
519 ata_status = readb((void __iomem *) 1099 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr); 1100 ap->ioaddr.status_addr);
1101 handled++;
521 } 1102 }
522 1103
523 shift = port * 2; 1104 shift = port << 1; /* (port * 2) */
524 if (port >= MV_PORTS_PER_HC) { 1105 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1106 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 } 1107 }
527 if ((PORT0_ERR << shift) & relevant) { 1108 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap); 1109 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */ 1110 /* OR in ATA_ERR to ensure libata knows we took one */
530 ata_status = readb((void __iomem *) 1111 ata_status = readb((void __iomem *)
531 ap->ioaddr.status_addr) | ATA_ERR; 1112 ap->ioaddr.status_addr) | ATA_ERR;
1113 handled++;
532 } 1114 }
533 1115
534 if (ap) { 1116 if (handled && ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag); 1117 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) { 1118 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, " 1119 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status); 1120 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */ 1121 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status); 1122 ata_qc_complete(qc, ata_status);
542 } 1123 }
@@ -545,17 +1126,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
545 VPRINTK("EXIT\n"); 1126 VPRINTK("EXIT\n");
546} 1127}
547 1128
1129/**
1130 * mv_interrupt -
1131 * @irq: unused
1132 * @dev_instance: private data; in this case the host structure
1133 * @regs: unused
1134 *
1135 * Read the read only register to determine if any host
1136 * controllers have pending interrupts. If so, call lower level
1137 * routine to handle. Also check for PCI errors which are only
1138 * reported here.
1139 *
1140 * LOCKING:
1141 * This routine holds the host_set lock while processing pending
1142 * interrupts.
1143 */
548static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1144static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs) 1145 struct pt_regs *regs)
550{ 1146{
551 struct ata_host_set *host_set = dev_instance; 1147 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs; 1148 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio; 1149 void __iomem *mmio = host_set->mmio_base;
554 u32 irq_stat; 1150 u32 irq_stat;
555 1151
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1152 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559 1153
560 /* check the cases where we either have nothing pending or have read 1154 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault 1155 * a bogus register value which can indicate HW removal or PCI fault
@@ -564,64 +1158,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
564 return IRQ_NONE; 1158 return IRQ_NONE;
565 } 1159 }
566 1160
1161 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
567 spin_lock(&host_set->lock); 1162 spin_lock(&host_set->lock);
568 1163
569 for (hc = 0; hc < n_hcs; hc++) { 1164 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1165 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) { 1166 if (relevant) {
572 mv_host_intr(host_set, relevant, hc); 1167 mv_host_intr(host_set, relevant, hc);
573 handled = 1; 1168 handled++;
574 } 1169 }
575 } 1170 }
576 if (PCI_ERR & irq_stat) { 1171 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need 1172 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
578 * to recover from them properly. 1173 readl(mmio + PCI_IRQ_CAUSE_OFS));
579 */
580 }
581 1174
1175 DPRINTK("All regs @ PCI error\n");
1176 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1177
1178 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1179 handled++;
1180 }
582 spin_unlock(&host_set->lock); 1181 spin_unlock(&host_set->lock);
583 1182
584 return IRQ_RETVAL(handled); 1183 return IRQ_RETVAL(handled);
585} 1184}
586 1185
1186/**
1187 * mv_check_err - Return the error shadow register to caller.
1188 * @ap: ATA channel to manipulate
1189 *
1190 * Marvell requires DMA to be stopped before accessing shadow
1191 * registers. So we do that, then return the needed register.
1192 *
1193 * LOCKING:
1194 * Inherited from caller. FIXME: protect mv_stop_dma with lock?
1195 */
1196static u8 mv_check_err(struct ata_port *ap)
1197{
1198 mv_stop_dma(ap); /* can't read shadow regs if DMA on */
1199 return readb((void __iomem *) ap->ioaddr.error_addr);
1200}
1201
1202/**
1203 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1204 * @ap: ATA channel to manipulate
1205 *
1206 * Part of this is taken from __sata_phy_reset and modified to
1207 * not sleep since this routine gets called from interrupt level.
1208 *
1209 * LOCKING:
1210 * Inherited from caller. This is coded to safe to call at
1211 * interrupt level, i.e. it does not sleep.
1212 */
587static void mv_phy_reset(struct ata_port *ap) 1213static void mv_phy_reset(struct ata_port *ap)
588{ 1214{
589 void __iomem *port_mmio = mv_ap_base(ap); 1215 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf; 1216 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0]; 1217 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma; 1218 unsigned long timeout;
593 1219
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 1220 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595 1221
596 edma = readl(port_mmio + EDMA_CMD_OFS); 1222 mv_stop_dma(ap);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607 1223
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); 1224 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */ 1225 udelay(25); /* allow reset propagation */
610 1226
611 /* Spec never mentions clearing the bit. Marvell's driver does 1227 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however. 1228 * clear the bit, however.
613 */ 1229 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); 1230 writelfl(0, port_mmio + EDMA_CMD_OFS);
615 1231
616 VPRINTK("Done. Now calling __sata_phy_reset()\n"); 1232 VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1233 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1234 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
617 1235
618 /* proceed to init communications via the scr_control reg */ 1236 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap); 1237 scr_write_flush(ap, SCR_CONTROL, 0x301);
1238 mdelay(1);
1239 scr_write_flush(ap, SCR_CONTROL, 0x300);
1240 timeout = jiffies + (HZ * 1);
1241 do {
1242 mdelay(10);
1243 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1244 break;
1245 } while (time_before(jiffies, timeout));
620 1246
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1247 VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
622 VPRINTK("Port disabled pre-sig. Exiting.\n"); 1248 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1249 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1250
1251 if (sata_dev_present(ap)) {
1252 ata_port_probe(ap);
1253 } else {
1254 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1255 ap->id, scr_read(ap, SCR_STATUS));
1256 ata_port_disable(ap);
623 return; 1257 return;
624 } 1258 }
1259 ap->cbl = ATA_CBL_SATA;
625 1260
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1261 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1262 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
@@ -636,37 +1271,118 @@ static void mv_phy_reset(struct ata_port *ap)
636 VPRINTK("EXIT\n"); 1271 VPRINTK("EXIT\n");
637} 1272}
638 1273
639static void mv_port_init(struct ata_ioports *port, unsigned long base) 1274/**
1275 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1276 * @ap: ATA channel to manipulate
1277 *
1278 * Intent is to clear all pending error conditions, reset the
1279 * chip/bus, fail the command, and move on.
1280 *
1281 * LOCKING:
1282 * This routine holds the host_set lock while failing the command.
1283 */
1284static void mv_eng_timeout(struct ata_port *ap)
1285{
1286 struct ata_queued_cmd *qc;
1287 unsigned long flags;
1288
1289 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1290 DPRINTK("All regs @ start of eng_timeout\n");
1291 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1292 to_pci_dev(ap->host_set->dev));
1293
1294 qc = ata_qc_from_tag(ap, ap->active_tag);
1295 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1296 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1297 &qc->scsicmd->cmnd);
1298
1299 mv_err_intr(ap);
1300 mv_phy_reset(ap);
1301
1302 if (!qc) {
1303 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1304 ap->id);
1305 } else {
1306 /* hack alert! We cannot use the supplied completion
1307 * function from inside the ->eh_strategy_handler() thread.
1308 * libata is the only user of ->eh_strategy_handler() in
1309 * any kernel, so the default scsi_done() assumes it is
1310 * not being called from the SCSI EH.
1311 */
1312 spin_lock_irqsave(&ap->host_set->lock, flags);
1313 qc->scsidone = scsi_finish_command;
1314 ata_qc_complete(qc, ATA_ERR);
1315 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1316 }
1317}
1318
1319/**
1320 * mv_port_init - Perform some early initialization on a single port.
1321 * @port: libata data structure storing shadow register addresses
1322 * @port_mmio: base address of the port
1323 *
1324 * Initialize shadow register mmio addresses, clear outstanding
1325 * interrupts on the port, and unmask interrupts for the future
1326 * start of the port.
1327 *
1328 * LOCKING:
1329 * Inherited from caller.
1330 */
1331static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
640{ 1332{
641 /* PIO related setup */ 1333 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
642 port->data_addr = base + SHD_PIO_DATA_OFS; 1334 unsigned serr_ofs;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; 1335
644 port->nsect_addr = base + SHD_SECT_CNT_OFS; 1336 /* PIO related setup
645 port->lbal_addr = base + SHD_LBA_L_OFS; 1337 */
646 port->lbam_addr = base + SHD_LBA_M_OFS; 1338 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
647 port->lbah_addr = base + SHD_LBA_H_OFS; 1339 port->error_addr =
648 port->device_addr = base + SHD_DEV_HD_OFS; 1340 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; 1341 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; 1342 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
651 /* unused */ 1343 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1344 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1345 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1346 port->status_addr =
1347 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1348 /* special case: control/altstatus doesn't have ATA_REG_ address */
1349 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1350
1351 /* unused: */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 1352 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653 1353
1354 /* Clear any currently outstanding port interrupt conditions */
1355 serr_ofs = mv_scr_offset(SCR_ERROR);
1356 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1357 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1358
654 /* unmask all EDMA error interrupts */ 1359 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); 1360 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
656 1361
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 1362 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS), 1363 readl(port_mmio + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), 1364 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); 1365 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
661} 1366}
662 1367
1368/**
1369 * mv_host_init - Perform some early initialization of the host.
1370 * @probe_ent: early data struct representing the host
1371 *
1372 * If possible, do an early global reset of the host. Then do
1373 * our port init and clear/unmask all/relevant host interrupts.
1374 *
1375 * LOCKING:
1376 * Inherited from caller.
1377 */
663static int mv_host_init(struct ata_probe_ent *probe_ent) 1378static int mv_host_init(struct ata_probe_ent *probe_ent)
664{ 1379{
665 int rc = 0, n_hc, port, hc; 1380 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base; 1381 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio; 1382 void __iomem *port_mmio;
668 1383
669 if (mv_master_reset(probe_ent->mmio_base)) { 1384 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1385 mv_global_soft_reset(probe_ent->mmio_base)) {
670 rc = 1; 1386 rc = 1;
671 goto done; 1387 goto done;
672 } 1388 }
@@ -676,17 +1392,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
676 1392
677 for (port = 0; port < probe_ent->n_ports; port++) { 1393 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port); 1394 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); 1395 mv_port_init(&probe_ent->port[port], port_mmio);
680 } 1396 }
681 1397
682 for (hc = 0; hc < n_hc; hc++) { 1398 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, 1399 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), 1400
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); 1401 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
1402 "(before clear)=0x%08x\n", hc,
1403 readl(hc_mmio + HC_CFG_OFS),
1404 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
1405
1406 /* Clear any currently outstanding hc interrupt conditions */
1407 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
686 } 1408 }
687 1409
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 1410 /* Clear any currently outstanding host interrupt conditions */
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 1411 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1412
1413 /* and unmask interrupt generation for host regs */
1414 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
1415 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
690 1416
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 1417 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n", 1418 "PCI int cause/mask=0x%08x/0x%08x\n",
@@ -694,11 +1420,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 1420 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS), 1421 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS)); 1422 readl(mmio + PCI_IRQ_MASK_OFS));
697 1423done:
698 done:
699 return rc; 1424 return rc;
700} 1425}
701 1426
1427/**
1428 * mv_print_info - Dump key info to kernel log for perusal.
1429 * @probe_ent: early data struct representing the host
1430 *
1431 * FIXME: complete this.
1432 *
1433 * LOCKING:
1434 * Inherited from caller.
1435 */
1436static void mv_print_info(struct ata_probe_ent *probe_ent)
1437{
1438 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1439 struct mv_host_priv *hpriv = probe_ent->private_data;
1440 u8 rev_id, scc;
1441 const char *scc_s;
1442
1443 /* Use this to determine the HW stepping of the chip so we know
1444 * what errata to workaround
1445 */
1446 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1447
1448 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
1449 if (scc == 0)
1450 scc_s = "SCSI";
1451 else if (scc == 0x01)
1452 scc_s = "RAID";
1453 else
1454 scc_s = "unknown";
1455
1456 printk(KERN_INFO DRV_NAME
1457 "(%s) %u slots %u ports %s mode IRQ via %s\n",
1458 pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1459 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1460}
1461
1462/**
1463 * mv_init_one - handle a positive probe of a Marvell host
1464 * @pdev: PCI device found
1465 * @ent: PCI device ID entry for the matched host
1466 *
1467 * LOCKING:
1468 * Inherited from caller.
1469 */
702static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1470static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
703{ 1471{
704 static int printed_version = 0; 1472 static int printed_version = 0;
@@ -706,16 +1474,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
706 struct mv_host_priv *hpriv; 1474 struct mv_host_priv *hpriv;
707 unsigned int board_idx = (unsigned int)ent->driver_data; 1475 unsigned int board_idx = (unsigned int)ent->driver_data;
708 void __iomem *mmio_base; 1476 void __iomem *mmio_base;
709 int pci_dev_busy = 0; 1477 int pci_dev_busy = 0, rc;
710 int rc;
711 1478
712 if (!printed_version++) { 1479 if (!printed_version++) {
713 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1480 printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n");
714 } 1481 }
715 1482
716 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
717 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
718
719 rc = pci_enable_device(pdev); 1483 rc = pci_enable_device(pdev);
720 if (rc) { 1484 if (rc) {
721 return rc; 1485 return rc;
@@ -727,8 +1491,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
727 goto err_out; 1491 goto err_out;
728 } 1492 }
729 1493
730 pci_intx(pdev, 1);
731
732 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1494 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
733 if (probe_ent == NULL) { 1495 if (probe_ent == NULL) {
734 rc = -ENOMEM; 1496 rc = -ENOMEM;
@@ -739,8 +1501,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
739 probe_ent->dev = pci_dev_to_dev(pdev); 1501 probe_ent->dev = pci_dev_to_dev(pdev);
740 INIT_LIST_HEAD(&probe_ent->node); 1502 INIT_LIST_HEAD(&probe_ent->node);
741 1503
742 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), 1504 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
743 pci_resource_len(pdev, MV_PRIMARY_BAR));
744 if (mmio_base == NULL) { 1505 if (mmio_base == NULL) {
745 rc = -ENOMEM; 1506 rc = -ENOMEM;
746 goto err_out_free_ent; 1507 goto err_out_free_ent;
@@ -769,37 +1530,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
769 if (rc) { 1530 if (rc) {
770 goto err_out_hpriv; 1531 goto err_out_hpriv;
771 } 1532 }
772/* mv_print_info(probe_ent); */
773 1533
774 { 1534 /* Enable interrupts */
775 int b, w; 1535 if (pci_enable_msi(pdev) == 0) {
776 u32 dw[4]; /* hold a line of 16b */ 1536 hpriv->hp_flags |= MV_HP_FLAG_MSI;
777 VPRINTK("PCI config space:\n"); 1537 } else {
778 for (b = 0; b < 0x40; ) { 1538 pci_intx(pdev, 1);
779 for (w = 0; w < 4; w++) {
780 (void) pci_read_config_dword(pdev,b,&dw[w]);
781 b += sizeof(*dw);
782 }
783 VPRINTK("%08x %08x %08x %08x\n",
784 dw[0],dw[1],dw[2],dw[3]);
785 }
786 } 1539 }
787 1540
788 /* FIXME: check ata_device_add return value */ 1541 mv_dump_pci_cfg(pdev, 0x68);
789 ata_device_add(probe_ent); 1542 mv_print_info(probe_ent);
790 kfree(probe_ent); 1543
1544 if (ata_device_add(probe_ent) == 0) {
1545 rc = -ENODEV; /* No devices discovered */
1546 goto err_out_dev_add;
1547 }
791 1548
1549 kfree(probe_ent);
792 return 0; 1550 return 0;
793 1551
794 err_out_hpriv: 1552err_out_dev_add:
1553 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
1554 pci_disable_msi(pdev);
1555 } else {
1556 pci_intx(pdev, 0);
1557 }
1558err_out_hpriv:
795 kfree(hpriv); 1559 kfree(hpriv);
796 err_out_iounmap: 1560err_out_iounmap:
797 iounmap(mmio_base); 1561 pci_iounmap(pdev, mmio_base);
798 err_out_free_ent: 1562err_out_free_ent:
799 kfree(probe_ent); 1563 kfree(probe_ent);
800 err_out_regions: 1564err_out_regions:
801 pci_release_regions(pdev); 1565 pci_release_regions(pdev);
802 err_out: 1566err_out:
803 if (!pci_dev_busy) { 1567 if (!pci_dev_busy) {
804 pci_disable_device(pdev); 1568 pci_disable_device(pdev);
805 } 1569 }
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index c05653c7779d..749ff92d8c63 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -405,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
405 rc = -ENOMEM; 405 rc = -ENOMEM;
406 406
407 ppi = &nv_port_info; 407 ppi = &nv_port_info;
408 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 408 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
409 if (!probe_ent) 409 if (!probe_ent)
410 goto err_out_regions; 410 goto err_out_regions;
411 411
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 538ad727bd2e..def7e0d9dacb 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
438 break; 438 break;
439 439
440 default: 440 default:
441 ap->stats.idle_irq++; 441 ap->stats.idle_irq++;
442 break; 442 break;
443 } 443 }
444 444
445 return handled; 445 return handled;
446} 446}
447 447
448static void pdc_irq_clear(struct ata_port *ap) 448static void pdc_irq_clear(struct ata_port *ap)
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index b227e51d12f4..0761a3234fcf 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
263 goto err_out_regions; 263 goto err_out_regions;
264 264
265 ppi = &sis_port_info; 265 ppi = &sis_port_info;
266 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 266 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
267 if (!probe_ent) { 267 if (!probe_ent) {
268 rc = -ENOMEM; 268 rc = -ENOMEM;
269 goto err_out_regions; 269 goto err_out_regions;
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 4c9fb8b71be1..9c06f2abe7f7 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
202 goto err_out_regions; 202 goto err_out_regions;
203 203
204 ppi = &uli_port_info; 204 ppi = &uli_port_info;
205 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 205 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
206 if (!probe_ent) { 206 if (!probe_ent) {
207 rc = -ENOMEM; 207 rc = -ENOMEM;
208 goto err_out_regions; 208 goto err_out_regions;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 128b996b07b7..565872479b9a 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
212 struct ata_probe_ent *probe_ent; 212 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 213 struct ata_port_info *ppi = &svia_port_info;
214 214
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 216 if (!probe_ent)
217 return NULL; 217 return NULL;
218 218
diff --git a/include/linux/ata.h b/include/linux/ata.h
index a5b74efab067..ecb7346d0c16 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -132,6 +132,7 @@ enum {
132 ATA_CMD_PACKET = 0xA0, 132 ATA_CMD_PACKET = 0xA0,
133 ATA_CMD_VERIFY = 0x40, 133 ATA_CMD_VERIFY = 0x40,
134 ATA_CMD_VERIFY_EXT = 0x42, 134 ATA_CMD_VERIFY_EXT = 0x42,
135 ATA_CMD_INIT_DEV_PARAMS = 0x91,
135 136
136 /* SETFEATURES stuff */ 137 /* SETFEATURES stuff */
137 SETFEATURES_XFER = 0x03, 138 SETFEATURES_XFER = 0x03,
@@ -146,14 +147,14 @@ enum {
146 XFER_MW_DMA_2 = 0x22, 147 XFER_MW_DMA_2 = 0x22,
147 XFER_MW_DMA_1 = 0x21, 148 XFER_MW_DMA_1 = 0x21,
148 XFER_MW_DMA_0 = 0x20, 149 XFER_MW_DMA_0 = 0x20,
150 XFER_SW_DMA_2 = 0x12,
151 XFER_SW_DMA_1 = 0x11,
152 XFER_SW_DMA_0 = 0x10,
149 XFER_PIO_4 = 0x0C, 153 XFER_PIO_4 = 0x0C,
150 XFER_PIO_3 = 0x0B, 154 XFER_PIO_3 = 0x0B,
151 XFER_PIO_2 = 0x0A, 155 XFER_PIO_2 = 0x0A,
152 XFER_PIO_1 = 0x09, 156 XFER_PIO_1 = 0x09,
153 XFER_PIO_0 = 0x08, 157 XFER_PIO_0 = 0x08,
154 XFER_SW_DMA_2 = 0x12,
155 XFER_SW_DMA_1 = 0x11,
156 XFER_SW_DMA_0 = 0x10,
157 XFER_PIO_SLOW = 0x00, 158 XFER_PIO_SLOW = 0x00,
158 159
159 /* ATAPI stuff */ 160 /* ATAPI stuff */
@@ -181,6 +182,7 @@ enum {
181 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ 182 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
182 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ 183 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
183 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 184 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
185 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
184}; 186};
185 187
186enum ata_tf_protocols { 188enum ata_tf_protocols {
@@ -250,6 +252,18 @@ struct ata_taskfile {
250 ((u64) (id)[(n) + 1] << 16) | \ 252 ((u64) (id)[(n) + 1] << 16) | \
251 ((u64) (id)[(n) + 0]) ) 253 ((u64) (id)[(n) + 0]) )
252 254
255static inline int ata_id_current_chs_valid(u16 *id)
256{
257 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
258 has not been issued to the device then the values of
259 id[54] to id[56] are vendor specific. */
260 return (id[53] & 0x01) && /* Current translation valid */
261 id[54] && /* cylinders in current translation */
262 id[55] && /* heads in current translation */
263 id[55] <= 16 &&
264 id[56]; /* sectors in current translation */
265}
266
253static inline int atapi_cdb_len(u16 *dev_id) 267static inline int atapi_cdb_len(u16 *dev_id)
254{ 268{
255 u16 tmp = dev_id[0] & 0x3; 269 u16 tmp = dev_id[0] & 0x3;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index ceee1fc42c60..7929cfc9318d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -97,6 +97,7 @@ enum {
97 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 97 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */
100 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
100 101
101 ATA_DEV_UNKNOWN = 0, /* unknown device */ 102 ATA_DEV_UNKNOWN = 0, /* unknown device */
102 ATA_DEV_ATA = 1, /* ATA device */ 103 ATA_DEV_ATA = 1, /* ATA device */
@@ -154,17 +155,21 @@ enum {
154 ATA_SHIFT_UDMA = 0, 155 ATA_SHIFT_UDMA = 0,
155 ATA_SHIFT_MWDMA = 8, 156 ATA_SHIFT_MWDMA = 8,
156 ATA_SHIFT_PIO = 11, 157 ATA_SHIFT_PIO = 11,
158
159 /* Masks for port functions */
160 ATA_PORT_PRIMARY = (1 << 0),
161 ATA_PORT_SECONDARY = (1 << 1),
157}; 162};
158 163
159enum pio_task_states { 164enum hsm_task_states {
160 PIO_ST_UNKNOWN, 165 HSM_ST_UNKNOWN,
161 PIO_ST_IDLE, 166 HSM_ST_IDLE,
162 PIO_ST_POLL, 167 HSM_ST_POLL,
163 PIO_ST_TMOUT, 168 HSM_ST_TMOUT,
164 PIO_ST, 169 HSM_ST,
165 PIO_ST_LAST, 170 HSM_ST_LAST,
166 PIO_ST_LAST_POLL, 171 HSM_ST_LAST_POLL,
167 PIO_ST_ERR, 172 HSM_ST_ERR,
168}; 173};
169 174
170/* forward declarations */ 175/* forward declarations */
@@ -282,6 +287,11 @@ struct ata_device {
282 u8 xfer_protocol; /* taskfile xfer protocol */ 287 u8 xfer_protocol; /* taskfile xfer protocol */
283 u8 read_cmd; /* opcode to use on read */ 288 u8 read_cmd; /* opcode to use on read */
284 u8 write_cmd; /* opcode to use on write */ 289 u8 write_cmd; /* opcode to use on write */
290
291 /* for CHS addressing */
292 u16 cylinders; /* Number of cylinders */
293 u16 heads; /* Number of heads */
294 u16 sectors; /* Number of sectors per track */
285}; 295};
286 296
287struct ata_port { 297struct ata_port {
@@ -319,7 +329,7 @@ struct ata_port {
319 struct work_struct packet_task; 329 struct work_struct packet_task;
320 330
321 struct work_struct pio_task; 331 struct work_struct pio_task;
322 unsigned int pio_task_state; 332 unsigned int hsm_task_state;
323 unsigned long pio_task_timeout; 333 unsigned long pio_task_timeout;
324 334
325 void *private_data; 335 void *private_data;
@@ -400,6 +410,8 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn
400extern int ata_scsi_error(struct Scsi_Host *host); 410extern int ata_scsi_error(struct Scsi_Host *host);
401extern int ata_scsi_release(struct Scsi_Host *host); 411extern int ata_scsi_release(struct Scsi_Host *host);
402extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 412extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
413extern int ata_ratelimit(void);
414
403/* 415/*
404 * Default driver ops implementations 416 * Default driver ops implementations
405 */ 417 */
@@ -452,7 +464,7 @@ struct pci_bits {
452 464
453extern void ata_pci_host_stop (struct ata_host_set *host_set); 465extern void ata_pci_host_stop (struct ata_host_set *host_set);
454extern struct ata_probe_ent * 466extern struct ata_probe_ent *
455ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port); 467ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
456extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); 468extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits);
457 469
458#endif /* CONFIG_PCI */ 470#endif /* CONFIG_PCI */