diff options
135 files changed, 4484 insertions, 1630 deletions
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl index 375ae760dc1e..d260d92089ad 100644 --- a/Documentation/DocBook/libata.tmpl +++ b/Documentation/DocBook/libata.tmpl | |||
@@ -415,6 +415,362 @@ and other resources, etc. | |||
415 | </sect1> | 415 | </sect1> |
416 | </chapter> | 416 | </chapter> |
417 | 417 | ||
418 | <chapter id="libataEH"> | ||
419 | <title>Error handling</title> | ||
420 | |||
421 | <para> | ||
422 | This chapter describes how errors are handled under libata. | ||
423 | Readers are advised to read SCSI EH | ||
424 | (Documentation/scsi/scsi_eh.txt) and ATA exceptions doc first. | ||
425 | </para> | ||
426 | |||
427 | <sect1><title>Origins of commands</title> | ||
428 | <para> | ||
429 | In libata, a command is represented with struct ata_queued_cmd | ||
430 | or qc. qc's are preallocated during port initialization and | ||
431 | repetitively used for command executions. Currently only one | ||
432 | qc is allocated per port but yet-to-be-merged NCQ branch | ||
433 | allocates one for each tag and maps each qc to NCQ tag 1-to-1. | ||
434 | </para> | ||
435 | <para> | ||
436 | libata commands can originate from two sources - libata itself | ||
437 | and SCSI midlayer. libata internal commands are used for | ||
438 | initialization and error handling. All normal blk requests | ||
439 | and commands for SCSI emulation are passed as SCSI commands | ||
440 | through queuecommand callback of SCSI host template. | ||
441 | </para> | ||
442 | </sect1> | ||
443 | |||
444 | <sect1><title>How commands are issued</title> | ||
445 | |||
446 | <variablelist> | ||
447 | |||
448 | <varlistentry><term>Internal commands</term> | ||
449 | <listitem> | ||
450 | <para> | ||
451 | First, qc is allocated and initialized using | ||
452 | ata_qc_new_init(). Although ata_qc_new_init() doesn't | ||
453 | implement any wait or retry mechanism when qc is not | ||
454 | available, internal commands are currently issued only during | ||
455 | initialization and error recovery, so no other command is | ||
456 | active and allocation is guaranteed to succeed. | ||
457 | </para> | ||
458 | <para> | ||
459 | Once allocated qc's taskfile is initialized for the command to | ||
460 | be executed. qc currently has two mechanisms to notify | ||
461 | completion. One is via qc->complete_fn() callback and the | ||
462 | other is completion qc->waiting. qc->complete_fn() callback | ||
463 | is the asynchronous path used by normal SCSI translated | ||
464 | commands and qc->waiting is the synchronous (issuer sleeps in | ||
465 | process context) path used by internal commands. | ||
466 | </para> | ||
467 | <para> | ||
468 | Once initialization is complete, host_set lock is acquired | ||
469 | and the qc is issued. | ||
470 | </para> | ||
471 | </listitem> | ||
472 | </varlistentry> | ||
473 | |||
474 | <varlistentry><term>SCSI commands</term> | ||
475 | <listitem> | ||
476 | <para> | ||
477 | All libata drivers use ata_scsi_queuecmd() as | ||
478 | hostt->queuecommand callback. scmds can either be simulated | ||
479 | or translated. No qc is involved in processing a simulated | ||
480 | scmd. The result is computed right away and the scmd is | ||
481 | completed. | ||
482 | </para> | ||
483 | <para> | ||
484 | For a translated scmd, ata_qc_new_init() is invoked to | ||
485 | allocate a qc and the scmd is translated into the qc. SCSI | ||
486 | midlayer's completion notification function pointer is stored | ||
487 | into qc->scsidone. | ||
488 | </para> | ||
489 | <para> | ||
490 | qc->complete_fn() callback is used for completion | ||
491 | notification. ATA commands use ata_scsi_qc_complete() while | ||
492 | ATAPI commands use atapi_qc_complete(). Both functions end up | ||
493 | calling qc->scsidone to notify upper layer when the qc is | ||
494 | finished. After translation is completed, the qc is issued | ||
495 | with ata_qc_issue(). | ||
496 | </para> | ||
497 | <para> | ||
498 | Note that SCSI midlayer invokes hostt->queuecommand while | ||
499 | holding host_set lock, so all above occur while holding | ||
500 | host_set lock. | ||
501 | </para> | ||
502 | </listitem> | ||
503 | </varlistentry> | ||
504 | |||
505 | </variablelist> | ||
506 | </sect1> | ||
507 | |||
508 | <sect1><title>How commands are processed</title> | ||
509 | <para> | ||
510 | Depending on which protocol and which controller are used, | ||
511 | commands are processed differently. For the purpose of | ||
512 | discussion, a controller which uses taskfile interface and all | ||
513 | standard callbacks is assumed. | ||
514 | </para> | ||
515 | <para> | ||
516 | Currently 6 ATA command protocols are used. They can be | ||
517 | sorted into the following four categories according to how | ||
518 | they are processed. | ||
519 | </para> | ||
520 | |||
521 | <variablelist> | ||
522 | <varlistentry><term>ATA NO DATA or DMA</term> | ||
523 | <listitem> | ||
524 | <para> | ||
525 | ATA_PROT_NODATA and ATA_PROT_DMA fall into this category. | ||
526 | These types of commands don't require any software | ||
527 | intervention once issued. Device will raise interrupt on | ||
528 | completion. | ||
529 | </para> | ||
530 | </listitem> | ||
531 | </varlistentry> | ||
532 | |||
533 | <varlistentry><term>ATA PIO</term> | ||
534 | <listitem> | ||
535 | <para> | ||
536 | ATA_PROT_PIO is in this category. libata currently | ||
537 | implements PIO with polling. ATA_NIEN bit is set to turn | ||
538 | off interrupt and pio_task on ata_wq performs polling and | ||
539 | IO. | ||
540 | </para> | ||
541 | </listitem> | ||
542 | </varlistentry> | ||
543 | |||
544 | <varlistentry><term>ATAPI NODATA or DMA</term> | ||
545 | <listitem> | ||
546 | <para> | ||
547 | ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this | ||
548 | category. packet_task is used to poll BSY bit after | ||
549 | issuing PACKET command. Once BSY is turned off by the | ||
550 | device, packet_task transfers CDB and hands off processing | ||
551 | to interrupt handler. | ||
552 | </para> | ||
553 | </listitem> | ||
554 | </varlistentry> | ||
555 | |||
556 | <varlistentry><term>ATAPI PIO</term> | ||
557 | <listitem> | ||
558 | <para> | ||
559 | ATA_PROT_ATAPI is in this category. ATA_NIEN bit is set | ||
560 | and, as in ATAPI NODATA or DMA, packet_task submits cdb. | ||
561 | However, after submitting cdb, further processing (data | ||
562 | transfer) is handed off to pio_task. | ||
563 | </para> | ||
564 | </listitem> | ||
565 | </varlistentry> | ||
566 | </variablelist> | ||
567 | </sect1> | ||
568 | |||
569 | <sect1><title>How commands are completed</title> | ||
570 | <para> | ||
571 | Once issued, all qc's are either completed with | ||
572 | ata_qc_complete() or time out. For commands which are handled | ||
573 | by interrupts, ata_host_intr() invokes ata_qc_complete(), and, | ||
574 | for PIO tasks, pio_task invokes ata_qc_complete(). In error | ||
575 | cases, packet_task may also complete commands. | ||
576 | </para> | ||
577 | <para> | ||
578 | ata_qc_complete() does the following. | ||
579 | </para> | ||
580 | |||
581 | <orderedlist> | ||
582 | |||
583 | <listitem> | ||
584 | <para> | ||
585 | DMA memory is unmapped. | ||
586 | </para> | ||
587 | </listitem> | ||
588 | |||
589 | <listitem> | ||
590 | <para> | ||
591 | ATA_QCFLAG_ACTIVE is clared from qc->flags. | ||
592 | </para> | ||
593 | </listitem> | ||
594 | |||
595 | <listitem> | ||
596 | <para> | ||
597 | qc->complete_fn() callback is invoked. If the return value of | ||
598 | the callback is not zero. Completion is short circuited and | ||
599 | ata_qc_complete() returns. | ||
600 | </para> | ||
601 | </listitem> | ||
602 | |||
603 | <listitem> | ||
604 | <para> | ||
605 | __ata_qc_complete() is called, which does | ||
606 | <orderedlist> | ||
607 | |||
608 | <listitem> | ||
609 | <para> | ||
610 | qc->flags is cleared to zero. | ||
611 | </para> | ||
612 | </listitem> | ||
613 | |||
614 | <listitem> | ||
615 | <para> | ||
616 | ap->active_tag and qc->tag are poisoned. | ||
617 | </para> | ||
618 | </listitem> | ||
619 | |||
620 | <listitem> | ||
621 | <para> | ||
622 | qc->waiting is claread & completed (in that order). | ||
623 | </para> | ||
624 | </listitem> | ||
625 | |||
626 | <listitem> | ||
627 | <para> | ||
628 | qc is deallocated by clearing appropriate bit in ap->qactive. | ||
629 | </para> | ||
630 | </listitem> | ||
631 | |||
632 | </orderedlist> | ||
633 | </para> | ||
634 | </listitem> | ||
635 | |||
636 | </orderedlist> | ||
637 | |||
638 | <para> | ||
639 | So, it basically notifies upper layer and deallocates qc. One | ||
640 | exception is short-circuit path in #3 which is used by | ||
641 | atapi_qc_complete(). | ||
642 | </para> | ||
643 | <para> | ||
644 | For all non-ATAPI commands, whether it fails or not, almost | ||
645 | the same code path is taken and very little error handling | ||
646 | takes place. A qc is completed with success status if it | ||
647 | succeeded, with failed status otherwise. | ||
648 | </para> | ||
649 | <para> | ||
650 | However, failed ATAPI commands require more handling as | ||
651 | REQUEST SENSE is needed to acquire sense data. If an ATAPI | ||
652 | command fails, ata_qc_complete() is invoked with error status, | ||
653 | which in turn invokes atapi_qc_complete() via | ||
654 | qc->complete_fn() callback. | ||
655 | </para> | ||
656 | <para> | ||
657 | This makes atapi_qc_complete() set scmd->result to | ||
658 | SAM_STAT_CHECK_CONDITION, complete the scmd and return 1. As | ||
659 | the sense data is empty but scmd->result is CHECK CONDITION, | ||
660 | SCSI midlayer will invoke EH for the scmd, and returning 1 | ||
661 | makes ata_qc_complete() to return without deallocating the qc. | ||
662 | This leads us to ata_scsi_error() with partially completed qc. | ||
663 | </para> | ||
664 | |||
665 | </sect1> | ||
666 | |||
667 | <sect1><title>ata_scsi_error()</title> | ||
668 | <para> | ||
669 | ata_scsi_error() is the current hostt->eh_strategy_handler() | ||
670 | for libata. As discussed above, this will be entered in two | ||
671 | cases - timeout and ATAPI error completion. This function | ||
672 | calls low level libata driver's eng_timeout() callback, the | ||
673 | standard callback for which is ata_eng_timeout(). It checks | ||
674 | if a qc is active and calls ata_qc_timeout() on the qc if so. | ||
675 | Actual error handling occurs in ata_qc_timeout(). | ||
676 | </para> | ||
677 | <para> | ||
678 | If EH is invoked for timeout, ata_qc_timeout() stops BMDMA and | ||
679 | completes the qc. Note that as we're currently in EH, we | ||
680 | cannot call scsi_done. As described in SCSI EH doc, a | ||
681 | recovered scmd should be either retried with | ||
682 | scsi_queue_insert() or finished with scsi_finish_command(). | ||
683 | Here, we override qc->scsidone with scsi_finish_command() and | ||
684 | calls ata_qc_complete(). | ||
685 | </para> | ||
686 | <para> | ||
687 | If EH is invoked due to a failed ATAPI qc, the qc here is | ||
688 | completed but not deallocated. The purpose of this | ||
689 | half-completion is to use the qc as place holder to make EH | ||
690 | code reach this place. This is a bit hackish, but it works. | ||
691 | </para> | ||
692 | <para> | ||
693 | Once control reaches here, the qc is deallocated by invoking | ||
694 | __ata_qc_complete() explicitly. Then, internal qc for REQUEST | ||
695 | SENSE is issued. Once sense data is acquired, scmd is | ||
696 | finished by directly invoking scsi_finish_command() on the | ||
697 | scmd. Note that as we already have completed and deallocated | ||
698 | the qc which was associated with the scmd, we don't need | ||
699 | to/cannot call ata_qc_complete() again. | ||
700 | </para> | ||
701 | |||
702 | </sect1> | ||
703 | |||
704 | <sect1><title>Problems with the current EH</title> | ||
705 | |||
706 | <itemizedlist> | ||
707 | |||
708 | <listitem> | ||
709 | <para> | ||
710 | Error representation is too crude. Currently any and all | ||
711 | error conditions are represented with ATA STATUS and ERROR | ||
712 | registers. Errors which aren't ATA device errors are treated | ||
713 | as ATA device errors by setting ATA_ERR bit. Better error | ||
714 | descriptor which can properly represent ATA and other | ||
715 | errors/exceptions is needed. | ||
716 | </para> | ||
717 | </listitem> | ||
718 | |||
719 | <listitem> | ||
720 | <para> | ||
721 | When handling timeouts, no action is taken to make device | ||
722 | forget about the timed out command and ready for new commands. | ||
723 | </para> | ||
724 | </listitem> | ||
725 | |||
726 | <listitem> | ||
727 | <para> | ||
728 | EH handling via ata_scsi_error() is not properly protected | ||
729 | from usual command processing. On EH entrance, the device is | ||
730 | not in quiescent state. Timed out commands may succeed or | ||
731 | fail any time. pio_task and atapi_task may still be running. | ||
732 | </para> | ||
733 | </listitem> | ||
734 | |||
735 | <listitem> | ||
736 | <para> | ||
737 | Too weak error recovery. Devices / controllers causing HSM | ||
738 | mismatch errors and other errors quite often require reset to | ||
739 | return to known state. Also, advanced error handling is | ||
740 | necessary to support features like NCQ and hotplug. | ||
741 | </para> | ||
742 | </listitem> | ||
743 | |||
744 | <listitem> | ||
745 | <para> | ||
746 | ATA errors are directly handled in the interrupt handler and | ||
747 | PIO errors in pio_task. This is problematic for advanced | ||
748 | error handling for the following reasons. | ||
749 | </para> | ||
750 | <para> | ||
751 | First, advanced error handling often requires context and | ||
752 | internal qc execution. | ||
753 | </para> | ||
754 | <para> | ||
755 | Second, even a simple failure (say, CRC error) needs | ||
756 | information gathering and could trigger complex error handling | ||
757 | (say, resetting & reconfiguring). Having multiple code | ||
758 | paths to gather information, enter EH and trigger actions | ||
759 | makes life painful. | ||
760 | </para> | ||
761 | <para> | ||
762 | Third, scattered EH code makes implementing low level drivers | ||
763 | difficult. Low level drivers override libata callbacks. If | ||
764 | EH is scattered over several places, each affected callbacks | ||
765 | should perform its part of error handling. This can be error | ||
766 | prone and painful. | ||
767 | </para> | ||
768 | </listitem> | ||
769 | |||
770 | </itemizedlist> | ||
771 | </sect1> | ||
772 | </chapter> | ||
773 | |||
418 | <chapter id="libataExt"> | 774 | <chapter id="libataExt"> |
419 | <title>libata Library</title> | 775 | <title>libata Library</title> |
420 | !Edrivers/scsi/libata-core.c | 776 | !Edrivers/scsi/libata-core.c |
@@ -431,6 +787,722 @@ and other resources, etc. | |||
431 | !Idrivers/scsi/libata-scsi.c | 787 | !Idrivers/scsi/libata-scsi.c |
432 | </chapter> | 788 | </chapter> |
433 | 789 | ||
790 | <chapter id="ataExceptions"> | ||
791 | <title>ATA errors & exceptions</title> | ||
792 | |||
793 | <para> | ||
794 | This chapter tries to identify what error/exception conditions exist | ||
795 | for ATA/ATAPI devices and describe how they should be handled in | ||
796 | implementation-neutral way. | ||
797 | </para> | ||
798 | |||
799 | <para> | ||
800 | The term 'error' is used to describe conditions where either an | ||
801 | explicit error condition is reported from device or a command has | ||
802 | timed out. | ||
803 | </para> | ||
804 | |||
805 | <para> | ||
806 | The term 'exception' is either used to describe exceptional | ||
807 | conditions which are not errors (say, power or hotplug events), or | ||
808 | to describe both errors and non-error exceptional conditions. Where | ||
809 | explicit distinction between error and exception is necessary, the | ||
810 | term 'non-error exception' is used. | ||
811 | </para> | ||
812 | |||
813 | <sect1 id="excat"> | ||
814 | <title>Exception categories</title> | ||
815 | <para> | ||
816 | Exceptions are described primarily with respect to legacy | ||
817 | taskfile + bus master IDE interface. If a controller provides | ||
818 | other better mechanism for error reporting, mapping those into | ||
819 | categories described below shouldn't be difficult. | ||
820 | </para> | ||
821 | |||
822 | <para> | ||
823 | In the following sections, two recovery actions - reset and | ||
824 | reconfiguring transport - are mentioned. These are described | ||
825 | further in <xref linkend="exrec"/>. | ||
826 | </para> | ||
827 | |||
828 | <sect2 id="excatHSMviolation"> | ||
829 | <title>HSM violation</title> | ||
830 | <para> | ||
831 | This error is indicated when STATUS value doesn't match HSM | ||
832 | requirement during issuing or excution any ATA/ATAPI command. | ||
833 | </para> | ||
834 | |||
835 | <itemizedlist> | ||
836 | <title>Examples</title> | ||
837 | |||
838 | <listitem> | ||
839 | <para> | ||
840 | ATA_STATUS doesn't contain !BSY && DRDY && !DRQ while trying | ||
841 | to issue a command. | ||
842 | </para> | ||
843 | </listitem> | ||
844 | |||
845 | <listitem> | ||
846 | <para> | ||
847 | !BSY && !DRQ during PIO data transfer. | ||
848 | </para> | ||
849 | </listitem> | ||
850 | |||
851 | <listitem> | ||
852 | <para> | ||
853 | DRQ on command completion. | ||
854 | </para> | ||
855 | </listitem> | ||
856 | |||
857 | <listitem> | ||
858 | <para> | ||
859 | !BSY && ERR after CDB tranfer starts but before the | ||
860 | last byte of CDB is transferred. ATA/ATAPI standard states | ||
861 | that "The device shall not terminate the PACKET command | ||
862 | with an error before the last byte of the command packet has | ||
863 | been written" in the error outputs description of PACKET | ||
864 | command and the state diagram doesn't include such | ||
865 | transitions. | ||
866 | </para> | ||
867 | </listitem> | ||
868 | |||
869 | </itemizedlist> | ||
870 | |||
871 | <para> | ||
872 | In these cases, HSM is violated and not much information | ||
873 | regarding the error can be acquired from STATUS or ERROR | ||
874 | register. IOW, this error can be anything - driver bug, | ||
875 | faulty device, controller and/or cable. | ||
876 | </para> | ||
877 | |||
878 | <para> | ||
879 | As HSM is violated, reset is necessary to restore known state. | ||
880 | Reconfiguring transport for lower speed might be helpful too | ||
881 | as transmission errors sometimes cause this kind of errors. | ||
882 | </para> | ||
883 | </sect2> | ||
884 | |||
885 | <sect2 id="excatDevErr"> | ||
886 | <title>ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION)</title> | ||
887 | |||
888 | <para> | ||
889 | These are errors detected and reported by ATA/ATAPI devices | ||
890 | indicating device problems. For this type of errors, STATUS | ||
891 | and ERROR register values are valid and describe error | ||
892 | condition. Note that some of ATA bus errors are detected by | ||
893 | ATA/ATAPI devices and reported using the same mechanism as | ||
894 | device errors. Those cases are described later in this | ||
895 | section. | ||
896 | </para> | ||
897 | |||
898 | <para> | ||
899 | For ATA commands, this type of errors are indicated by !BSY | ||
900 | && ERR during command execution and on completion. | ||
901 | </para> | ||
902 | |||
903 | <para>For ATAPI commands,</para> | ||
904 | |||
905 | <itemizedlist> | ||
906 | |||
907 | <listitem> | ||
908 | <para> | ||
909 | !BSY && ERR && ABRT right after issuing PACKET | ||
910 | indicates that PACKET command is not supported and falls in | ||
911 | this category. | ||
912 | </para> | ||
913 | </listitem> | ||
914 | |||
915 | <listitem> | ||
916 | <para> | ||
917 | !BSY && ERR(==CHK) && !ABRT after the last | ||
918 | byte of CDB is transferred indicates CHECK CONDITION and | ||
919 | doesn't fall in this category. | ||
920 | </para> | ||
921 | </listitem> | ||
922 | |||
923 | <listitem> | ||
924 | <para> | ||
925 | !BSY && ERR(==CHK) && ABRT after the last byte | ||
926 | of CDB is transferred *probably* indicates CHECK CONDITION and | ||
927 | doesn't fall in this category. | ||
928 | </para> | ||
929 | </listitem> | ||
930 | |||
931 | </itemizedlist> | ||
932 | |||
933 | <para> | ||
934 | Of errors detected as above, the followings are not ATA/ATAPI | ||
935 | device errors but ATA bus errors and should be handled | ||
936 | according to <xref linkend="excatATAbusErr"/>. | ||
937 | </para> | ||
938 | |||
939 | <variablelist> | ||
940 | |||
941 | <varlistentry> | ||
942 | <term>CRC error during data transfer</term> | ||
943 | <listitem> | ||
944 | <para> | ||
945 | This is indicated by ICRC bit in the ERROR register and | ||
946 | means that corruption occurred during data transfer. Upto | ||
947 | ATA/ATAPI-7, the standard specifies that this bit is only | ||
948 | applicable to UDMA transfers but ATA/ATAPI-8 draft revision | ||
949 | 1f says that the bit may be applicable to multiword DMA and | ||
950 | PIO. | ||
951 | </para> | ||
952 | </listitem> | ||
953 | </varlistentry> | ||
954 | |||
955 | <varlistentry> | ||
956 | <term>ABRT error during data transfer or on completion</term> | ||
957 | <listitem> | ||
958 | <para> | ||
959 | Upto ATA/ATAPI-7, the standard specifies that ABRT could be | ||
960 | set on ICRC errors and on cases where a device is not able | ||
961 | to complete a command. Combined with the fact that MWDMA | ||
962 | and PIO transfer errors aren't allowed to use ICRC bit upto | ||
963 | ATA/ATAPI-7, it seems to imply that ABRT bit alone could | ||
964 | indicate tranfer errors. | ||
965 | </para> | ||
966 | <para> | ||
967 | However, ATA/ATAPI-8 draft revision 1f removes the part | ||
968 | that ICRC errors can turn on ABRT. So, this is kind of | ||
969 | gray area. Some heuristics are needed here. | ||
970 | </para> | ||
971 | </listitem> | ||
972 | </varlistentry> | ||
973 | |||
974 | </variablelist> | ||
975 | |||
976 | <para> | ||
977 | ATA/ATAPI device errors can be further categorized as follows. | ||
978 | </para> | ||
979 | |||
980 | <variablelist> | ||
981 | |||
982 | <varlistentry> | ||
983 | <term>Media errors</term> | ||
984 | <listitem> | ||
985 | <para> | ||
986 | This is indicated by UNC bit in the ERROR register. ATA | ||
987 | devices reports UNC error only after certain number of | ||
988 | retries cannot recover the data, so there's nothing much | ||
989 | else to do other than notifying upper layer. | ||
990 | </para> | ||
991 | <para> | ||
992 | READ and WRITE commands report CHS or LBA of the first | ||
993 | failed sector but ATA/ATAPI standard specifies that the | ||
994 | amount of transferred data on error completion is | ||
995 | indeterminate, so we cannot assume that sectors preceding | ||
996 | the failed sector have been transferred and thus cannot | ||
997 | complete those sectors successfully as SCSI does. | ||
998 | </para> | ||
999 | </listitem> | ||
1000 | </varlistentry> | ||
1001 | |||
1002 | <varlistentry> | ||
1003 | <term>Media changed / media change requested error</term> | ||
1004 | <listitem> | ||
1005 | <para> | ||
1006 | <<TODO: fill here>> | ||
1007 | </para> | ||
1008 | </listitem> | ||
1009 | </varlistentry> | ||
1010 | |||
1011 | <varlistentry><term>Address error</term> | ||
1012 | <listitem> | ||
1013 | <para> | ||
1014 | This is indicated by IDNF bit in the ERROR register. | ||
1015 | Report to upper layer. | ||
1016 | </para> | ||
1017 | </listitem> | ||
1018 | </varlistentry> | ||
1019 | |||
1020 | <varlistentry><term>Other errors</term> | ||
1021 | <listitem> | ||
1022 | <para> | ||
1023 | This can be invalid command or parameter indicated by ABRT | ||
1024 | ERROR bit or some other error condition. Note that ABRT | ||
1025 | bit can indicate a lot of things including ICRC and Address | ||
1026 | errors. Heuristics needed. | ||
1027 | </para> | ||
1028 | </listitem> | ||
1029 | </varlistentry> | ||
1030 | |||
1031 | </variablelist> | ||
1032 | |||
1033 | <para> | ||
1034 | Depending on commands, not all STATUS/ERROR bits are | ||
1035 | applicable. These non-applicable bits are marked with | ||
1036 | "na" in the output descriptions but upto ATA/ATAPI-7 | ||
1037 | no definition of "na" can be found. However, | ||
1038 | ATA/ATAPI-8 draft revision 1f describes "N/A" as | ||
1039 | follows. | ||
1040 | </para> | ||
1041 | |||
1042 | <blockquote> | ||
1043 | <variablelist> | ||
1044 | <varlistentry><term>3.2.3.3a N/A</term> | ||
1045 | <listitem> | ||
1046 | <para> | ||
1047 | A keyword the indicates a field has no defined value in | ||
1048 | this standard and should not be checked by the host or | ||
1049 | device. N/A fields should be cleared to zero. | ||
1050 | </para> | ||
1051 | </listitem> | ||
1052 | </varlistentry> | ||
1053 | </variablelist> | ||
1054 | </blockquote> | ||
1055 | |||
1056 | <para> | ||
1057 | So, it seems reasonable to assume that "na" bits are | ||
1058 | cleared to zero by devices and thus need no explicit masking. | ||
1059 | </para> | ||
1060 | |||
1061 | </sect2> | ||
1062 | |||
1063 | <sect2 id="excatATAPIcc"> | ||
1064 | <title>ATAPI device CHECK CONDITION</title> | ||
1065 | |||
1066 | <para> | ||
1067 | ATAPI device CHECK CONDITION error is indicated by set CHK bit | ||
1068 | (ERR bit) in the STATUS register after the last byte of CDB is | ||
1069 | transferred for a PACKET command. For this kind of errors, | ||
1070 | sense data should be acquired to gather information regarding | ||
1071 | the errors. REQUEST SENSE packet command should be used to | ||
1072 | acquire sense data. | ||
1073 | </para> | ||
1074 | |||
1075 | <para> | ||
1076 | Once sense data is acquired, this type of errors can be | ||
1077 | handled similary to other SCSI errors. Note that sense data | ||
1078 | may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR | ||
1079 | && ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such | ||
1080 | cases, the error should be considered as an ATA bus error and | ||
1081 | handled according to <xref linkend="excatATAbusErr"/>. | ||
1082 | </para> | ||
1083 | |||
1084 | </sect2> | ||
1085 | |||
1086 | <sect2 id="excatNCQerr"> | ||
1087 | <title>ATA device error (NCQ)</title> | ||
1088 | |||
1089 | <para> | ||
1090 | NCQ command error is indicated by cleared BSY and set ERR bit | ||
1091 | during NCQ command phase (one or more NCQ commands | ||
1092 | outstanding). Although STATUS and ERROR registers will | ||
1093 | contain valid values describing the error, READ LOG EXT is | ||
1094 | required to clear the error condition, determine which command | ||
1095 | has failed and acquire more information. | ||
1096 | </para> | ||
1097 | |||
1098 | <para> | ||
1099 | READ LOG EXT Log Page 10h reports which tag has failed and | ||
1100 | taskfile register values describing the error. With this | ||
1101 | information the failed command can be handled as a normal ATA | ||
1102 | command error as in <xref linkend="excatDevErr"/> and all | ||
1103 | other in-flight commands must be retried. Note that this | ||
1104 | retry should not be counted - it's likely that commands | ||
1105 | retried this way would have completed normally if it were not | ||
1106 | for the failed command. | ||
1107 | </para> | ||
1108 | |||
1109 | <para> | ||
1110 | Note that ATA bus errors can be reported as ATA device NCQ | ||
1111 | errors. This should be handled as described in <xref | ||
1112 | linkend="excatATAbusErr"/>. | ||
1113 | </para> | ||
1114 | |||
1115 | <para> | ||
1116 | If READ LOG EXT Log Page 10h fails or reports NQ, we're | ||
1117 | thoroughly screwed. This condition should be treated | ||
1118 | according to <xref linkend="excatHSMviolation"/>. | ||
1119 | </para> | ||
1120 | |||
1121 | </sect2> | ||
1122 | |||
1123 | <sect2 id="excatATAbusErr"> | ||
1124 | <title>ATA bus error</title> | ||
1125 | |||
1126 | <para> | ||
1127 | ATA bus error means that data corruption occurred during | ||
1128 | transmission over ATA bus (SATA or PATA). This type of errors | ||
1129 | can be indicated by | ||
1130 | </para> | ||
1131 | |||
1132 | <itemizedlist> | ||
1133 | |||
1134 | <listitem> | ||
1135 | <para> | ||
1136 | ICRC or ABRT error as described in <xref linkend="excatDevErr"/>. | ||
1137 | </para> | ||
1138 | </listitem> | ||
1139 | |||
1140 | <listitem> | ||
1141 | <para> | ||
1142 | Controller-specific error completion with error information | ||
1143 | indicating transmission error. | ||
1144 | </para> | ||
1145 | </listitem> | ||
1146 | |||
1147 | <listitem> | ||
1148 | <para> | ||
1149 | On some controllers, command timeout. In this case, there may | ||
1150 | be a mechanism to determine that the timeout is due to | ||
1151 | transmission error. | ||
1152 | </para> | ||
1153 | </listitem> | ||
1154 | |||
1155 | <listitem> | ||
1156 | <para> | ||
1157 | Unknown/random errors, timeouts and all sorts of weirdities. | ||
1158 | </para> | ||
1159 | </listitem> | ||
1160 | |||
1161 | </itemizedlist> | ||
1162 | |||
1163 | <para> | ||
1164 | As described above, transmission errors can cause wide variety | ||
1165 | of symptoms ranging from device ICRC error to random device | ||
1166 | lockup, and, for many cases, there is no way to tell if an | ||
1167 | error condition is due to transmission error or not; | ||
1168 | therefore, it's necessary to employ some kind of heuristic | ||
1169 | when dealing with errors and timeouts. For example, | ||
1170 | encountering repetitive ABRT errors for known supported | ||
1171 | command is likely to indicate ATA bus error. | ||
1172 | </para> | ||
1173 | |||
1174 | <para> | ||
1175 | Once it's determined that ATA bus errors have possibly | ||
1176 | occurred, lowering ATA bus transmission speed is one of | ||
1177 | actions which may alleviate the problem. See <xref | ||
1178 | linkend="exrecReconf"/> for more information. | ||
1179 | </para> | ||
1180 | |||
1181 | </sect2> | ||
1182 | |||
1183 | <sect2 id="excatPCIbusErr"> | ||
1184 | <title>PCI bus error</title> | ||
1185 | |||
1186 | <para> | ||
1187 | Data corruption or other failures during transmission over PCI | ||
1188 | (or other system bus). For standard BMDMA, this is indicated | ||
1189 | by Error bit in the BMDMA Status register. This type of | ||
1190 | errors must be logged as it indicates something is very wrong | ||
1191 | with the system. Resetting host controller is recommended. | ||
1192 | </para> | ||
1193 | |||
1194 | </sect2> | ||
1195 | |||
1196 | <sect2 id="excatLateCompletion"> | ||
1197 | <title>Late completion</title> | ||
1198 | |||
1199 | <para> | ||
1200 | This occurs when timeout occurs and the timeout handler finds | ||
1201 | out that the timed out command has completed successfully or | ||
1202 | with error. This is usually caused by lost interrupts. This | ||
1203 | type of errors must be logged. Resetting host controller is | ||
1204 | recommended. | ||
1205 | </para> | ||
1206 | |||
1207 | </sect2> | ||
1208 | |||
1209 | <sect2 id="excatUnknown"> | ||
1210 | <title>Unknown error (timeout)</title> | ||
1211 | |||
1212 | <para> | ||
1213 | This is when timeout occurs and the command is still | ||
1214 | processing or the host and device are in unknown state. When | ||
1215 | this occurs, HSM could be in any valid or invalid state. To | ||
1216 | bring the device to known state and make it forget about the | ||
1217 | timed out command, resetting is necessary. The timed out | ||
1218 | command may be retried. | ||
1219 | </para> | ||
1220 | |||
1221 | <para> | ||
1222 | Timeouts can also be caused by transmission errors. Refer to | ||
1223 | <xref linkend="excatATAbusErr"/> for more details. | ||
1224 | </para> | ||
1225 | |||
1226 | </sect2> | ||
1227 | |||
1228 | <sect2 id="excatHoplugPM"> | ||
1229 | <title>Hotplug and power management exceptions</title> | ||
1230 | |||
1231 | <para> | ||
1232 | <<TODO: fill here>> | ||
1233 | </para> | ||
1234 | |||
1235 | </sect2> | ||
1236 | |||
1237 | </sect1> | ||
1238 | |||
1239 | <sect1 id="exrec"> | ||
1240 | <title>EH recovery actions</title> | ||
1241 | |||
1242 | <para> | ||
1243 | This section discusses several important recovery actions. | ||
1244 | </para> | ||
1245 | |||
1246 | <sect2 id="exrecClr"> | ||
1247 | <title>Clearing error condition</title> | ||
1248 | |||
1249 | <para> | ||
1250 | Many controllers require its error registers to be cleared by | ||
1251 | error handler. Different controllers may have different | ||
1252 | requirements. | ||
1253 | </para> | ||
1254 | |||
1255 | <para> | ||
1256 | For SATA, it's strongly recommended to clear at least SError | ||
1257 | register during error handling. | ||
1258 | </para> | ||
1259 | </sect2> | ||
1260 | |||
1261 | <sect2 id="exrecRst"> | ||
1262 | <title>Reset</title> | ||
1263 | |||
1264 | <para> | ||
1265 | During EH, resetting is necessary in the following cases. | ||
1266 | </para> | ||
1267 | |||
1268 | <itemizedlist> | ||
1269 | |||
1270 | <listitem> | ||
1271 | <para> | ||
1272 | HSM is in unknown or invalid state | ||
1273 | </para> | ||
1274 | </listitem> | ||
1275 | |||
1276 | <listitem> | ||
1277 | <para> | ||
1278 | HBA is in unknown or invalid state | ||
1279 | </para> | ||
1280 | </listitem> | ||
1281 | |||
1282 | <listitem> | ||
1283 | <para> | ||
1284 | EH needs to make HBA/device forget about in-flight commands | ||
1285 | </para> | ||
1286 | </listitem> | ||
1287 | |||
1288 | <listitem> | ||
1289 | <para> | ||
1290 | HBA/device behaves weirdly | ||
1291 | </para> | ||
1292 | </listitem> | ||
1293 | |||
1294 | </itemizedlist> | ||
1295 | |||
1296 | <para> | ||
1297 | Resetting during EH might be a good idea regardless of error | ||
1298 | condition to improve EH robustness. Whether to reset both or | ||
1299 | either one of HBA and device depends on situation but the | ||
1300 | following scheme is recommended. | ||
1301 | </para> | ||
1302 | |||
1303 | <itemizedlist> | ||
1304 | |||
1305 | <listitem> | ||
1306 | <para> | ||
1307 | When it's known that HBA is in ready state but ATA/ATAPI | ||
1308 | device in in unknown state, reset only device. | ||
1309 | </para> | ||
1310 | </listitem> | ||
1311 | |||
1312 | <listitem> | ||
1313 | <para> | ||
1314 | If HBA is in unknown state, reset both HBA and device. | ||
1315 | </para> | ||
1316 | </listitem> | ||
1317 | |||
1318 | </itemizedlist> | ||
1319 | |||
1320 | <para> | ||
1321 | HBA resetting is implementation specific. For a controller | ||
1322 | complying to taskfile/BMDMA PCI IDE, stopping active DMA | ||
1323 | transaction may be sufficient iff BMDMA state is the only HBA | ||
1324 | context. But even mostly taskfile/BMDMA PCI IDE complying | ||
1325 | controllers may have implementation specific requirements and | ||
1326 | mechanism to reset themselves. This must be addressed by | ||
1327 | specific drivers. | ||
1328 | </para> | ||
1329 | |||
1330 | <para> | ||
1331 | OTOH, ATA/ATAPI standard describes in detail ways to reset | ||
1332 | ATA/ATAPI devices. | ||
1333 | </para> | ||
1334 | |||
1335 | <variablelist> | ||
1336 | |||
1337 | <varlistentry><term>PATA hardware reset</term> | ||
1338 | <listitem> | ||
1339 | <para> | ||
1340 | This is hardware initiated device reset signalled with | ||
1341 | asserted PATA RESET- signal. There is no standard way to | ||
1342 | initiate hardware reset from software although some | ||
1343 | hardware provides registers that allow driver to directly | ||
1344 | tweak the RESET- signal. | ||
1345 | </para> | ||
1346 | </listitem> | ||
1347 | </varlistentry> | ||
1348 | |||
1349 | <varlistentry><term>Software reset</term> | ||
1350 | <listitem> | ||
1351 | <para> | ||
1352 | This is achieved by turning CONTROL SRST bit on for at | ||
1353 | least 5us. Both PATA and SATA support it but, in case of | ||
1354 | SATA, this may require controller-specific support as the | ||
1355 | second Register FIS to clear SRST should be transmitted | ||
1356 | while BSY bit is still set. Note that on PATA, this resets | ||
1357 | both master and slave devices on a channel. | ||
1358 | </para> | ||
1359 | </listitem> | ||
1360 | </varlistentry> | ||
1361 | |||
1362 | <varlistentry><term>EXECUTE DEVICE DIAGNOSTIC command</term> | ||
1363 | <listitem> | ||
1364 | <para> | ||
1365 | Although ATA/ATAPI standard doesn't describe exactly, EDD | ||
1366 | implies some level of resetting, possibly similar level | ||
1367 | with software reset. Host-side EDD protocol can be handled | ||
1368 | with normal command processing and most SATA controllers | ||
1369 | should be able to handle EDD's just like other commands. | ||
1370 | As in software reset, EDD affects both devices on a PATA | ||
1371 | bus. | ||
1372 | </para> | ||
1373 | <para> | ||
1374 | Although EDD does reset devices, this doesn't suit error | ||
1375 | handling as EDD cannot be issued while BSY is set and it's | ||
1376 | unclear how it will act when device is in unknown/weird | ||
1377 | state. | ||
1378 | </para> | ||
1379 | </listitem> | ||
1380 | </varlistentry> | ||
1381 | |||
1382 | <varlistentry><term>ATAPI DEVICE RESET command</term> | ||
1383 | <listitem> | ||
1384 | <para> | ||
1385 | This is very similar to software reset except that reset | ||
1386 | can be restricted to the selected device without affecting | ||
1387 | the other device sharing the cable. | ||
1388 | </para> | ||
1389 | </listitem> | ||
1390 | </varlistentry> | ||
1391 | |||
1392 | <varlistentry><term>SATA phy reset</term> | ||
1393 | <listitem> | ||
1394 | <para> | ||
1395 | This is the preferred way of resetting a SATA device. In | ||
1396 | effect, it's identical to PATA hardware reset. Note that | ||
1397 | this can be done with the standard SCR Control register. | ||
1398 | As such, it's usually easier to implement than software | ||
1399 | reset. | ||
1400 | </para> | ||
1401 | </listitem> | ||
1402 | </varlistentry> | ||
1403 | |||
1404 | </variablelist> | ||
1405 | |||
1406 | <para> | ||
1407 | One more thing to consider when resetting devices is that | ||
1408 | resetting clears certain configuration parameters and they | ||
1409 | need to be set to their previous or newly adjusted values | ||
1410 | after reset. | ||
1411 | </para> | ||
1412 | |||
1413 | <para> | ||
1414 | Parameters affected are. | ||
1415 | </para> | ||
1416 | |||
1417 | <itemizedlist> | ||
1418 | |||
1419 | <listitem> | ||
1420 | <para> | ||
1421 | CHS set up with INITIALIZE DEVICE PARAMETERS (seldomly used) | ||
1422 | </para> | ||
1423 | </listitem> | ||
1424 | |||
1425 | <listitem> | ||
1426 | <para> | ||
1427 | Parameters set with SET FEATURES including transfer mode setting | ||
1428 | </para> | ||
1429 | </listitem> | ||
1430 | |||
1431 | <listitem> | ||
1432 | <para> | ||
1433 | Block count set with SET MULTIPLE MODE | ||
1434 | </para> | ||
1435 | </listitem> | ||
1436 | |||
1437 | <listitem> | ||
1438 | <para> | ||
1439 | Other parameters (SET MAX, MEDIA LOCK...) | ||
1440 | </para> | ||
1441 | </listitem> | ||
1442 | |||
1443 | </itemizedlist> | ||
1444 | |||
1445 | <para> | ||
1446 | ATA/ATAPI standard specifies that some parameters must be | ||
1447 | maintained across hardware or software reset, but doesn't | ||
1448 | strictly specify all of them. Always reconfiguring needed | ||
1449 | parameters after reset is required for robustness. Note that | ||
1450 | this also applies when resuming from deep sleep (power-off). | ||
1451 | </para> | ||
1452 | |||
1453 | <para> | ||
1454 | Also, ATA/ATAPI standard requires that IDENTIFY DEVICE / | ||
1455 | IDENTIFY PACKET DEVICE is issued after any configuration | ||
1456 | parameter is updated or a hardware reset and the result used | ||
1457 | for further operation. OS driver is required to implement | ||
1458 | revalidation mechanism to support this. | ||
1459 | </para> | ||
1460 | |||
1461 | </sect2> | ||
1462 | |||
1463 | <sect2 id="exrecReconf"> | ||
1464 | <title>Reconfigure transport</title> | ||
1465 | |||
1466 | <para> | ||
1467 | For both PATA and SATA, a lot of corners are cut for cheap | ||
1468 | connectors, cables or controllers and it's quite common to see | ||
1469 | high transmission error rate. This can be mitigated by | ||
1470 | lowering transmission speed. | ||
1471 | </para> | ||
1472 | |||
1473 | <para> | ||
1474 | The following is a possible scheme Jeff Garzik suggested. | ||
1475 | </para> | ||
1476 | |||
1477 | <blockquote> | ||
1478 | <para> | ||
1479 | If more than $N (3?) transmission errors happen in 15 minutes, | ||
1480 | </para> | ||
1481 | <itemizedlist> | ||
1482 | <listitem> | ||
1483 | <para> | ||
1484 | if SATA, decrease SATA PHY speed. if speed cannot be decreased, | ||
1485 | </para> | ||
1486 | </listitem> | ||
1487 | <listitem> | ||
1488 | <para> | ||
1489 | decrease UDMA xfer speed. if at UDMA0, switch to PIO4, | ||
1490 | </para> | ||
1491 | </listitem> | ||
1492 | <listitem> | ||
1493 | <para> | ||
1494 | decrease PIO xfer speed. if at PIO3, complain, but continue | ||
1495 | </para> | ||
1496 | </listitem> | ||
1497 | </itemizedlist> | ||
1498 | </blockquote> | ||
1499 | |||
1500 | </sect2> | ||
1501 | |||
1502 | </sect1> | ||
1503 | |||
1504 | </chapter> | ||
1505 | |||
434 | <chapter id="PiixInt"> | 1506 | <chapter id="PiixInt"> |
435 | <title>ata_piix Internals</title> | 1507 | <title>ata_piix Internals</title> |
436 | !Idrivers/scsi/ata_piix.c | 1508 | !Idrivers/scsi/ata_piix.c |
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 42629ff84f5a..ea569ba482b1 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c | |||
@@ -305,7 +305,7 @@ long execve(const char *filename, char **argv, char **envp) | |||
305 | "Ir" (THREAD_START_SP - sizeof(regs)), | 305 | "Ir" (THREAD_START_SP - sizeof(regs)), |
306 | "r" (®s), | 306 | "r" (®s), |
307 | "Ir" (sizeof(regs)) | 307 | "Ir" (sizeof(regs)) |
308 | : "r0", "r1", "r2", "r3", "ip", "memory"); | 308 | : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); |
309 | 309 | ||
310 | out: | 310 | out: |
311 | return ret; | 311 | return ret; |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index e7d22dbcb691..f6de76e0a45d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -504,7 +504,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
504 | 504 | ||
505 | bad_access: | 505 | bad_access: |
506 | spin_unlock(&mm->page_table_lock); | 506 | spin_unlock(&mm->page_table_lock); |
507 | /* simulate a read access fault */ | 507 | /* simulate a write access fault */ |
508 | do_DataAbort(addr, 15 + (1 << 11), regs); | 508 | do_DataAbort(addr, 15 + (1 << 11), regs); |
509 | return -1; | 509 | return -1; |
510 | } | 510 | } |
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c index 41e5849ae8da..f8a742bb2d5b 100644 --- a/arch/arm/mach-imx/generic.c +++ b/arch/arm/mach-imx/generic.c | |||
@@ -28,14 +28,15 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <asm/arch/imxfb.h> | 29 | #include <asm/arch/imxfb.h> |
30 | #include <asm/hardware.h> | 30 | #include <asm/hardware.h> |
31 | #include <asm/arch/imx-regs.h> | ||
31 | 32 | ||
32 | #include <asm/mach/map.h> | 33 | #include <asm/mach/map.h> |
33 | 34 | ||
34 | void imx_gpio_mode(int gpio_mode) | 35 | void imx_gpio_mode(int gpio_mode) |
35 | { | 36 | { |
36 | unsigned int pin = gpio_mode & GPIO_PIN_MASK; | 37 | unsigned int pin = gpio_mode & GPIO_PIN_MASK; |
37 | unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> 5; | 38 | unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT; |
38 | unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> 10; | 39 | unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> GPIO_OCR_SHIFT; |
39 | unsigned int tmp; | 40 | unsigned int tmp; |
40 | 41 | ||
41 | /* Pullup enable */ | 42 | /* Pullup enable */ |
@@ -57,7 +58,7 @@ void imx_gpio_mode(int gpio_mode) | |||
57 | GPR(port) &= ~(1<<pin); | 58 | GPR(port) &= ~(1<<pin); |
58 | 59 | ||
59 | /* use as gpio? */ | 60 | /* use as gpio? */ |
60 | if( ocr == 3 ) | 61 | if(gpio_mode & GPIO_GIUS) |
61 | GIUS(port) |= (1<<pin); | 62 | GIUS(port) |= (1<<pin); |
62 | else | 63 | else |
63 | GIUS(port) &= ~(1<<pin); | 64 | GIUS(port) &= ~(1<<pin); |
@@ -72,20 +73,20 @@ void imx_gpio_mode(int gpio_mode) | |||
72 | tmp |= (ocr << (pin*2)); | 73 | tmp |= (ocr << (pin*2)); |
73 | OCR1(port) = tmp; | 74 | OCR1(port) = tmp; |
74 | 75 | ||
75 | if( gpio_mode & GPIO_AOUT ) | 76 | ICONFA1(port) &= ~( 3<<(pin*2)); |
76 | ICONFA1(port) &= ~( 3<<(pin*2)); | 77 | ICONFA1(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << (pin * 2); |
77 | if( gpio_mode & GPIO_BOUT ) | 78 | ICONFB1(port) &= ~( 3<<(pin*2)); |
78 | ICONFB1(port) &= ~( 3<<(pin*2)); | 79 | ICONFB1(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << (pin * 2); |
79 | } else { | 80 | } else { |
80 | tmp = OCR2(port); | 81 | tmp = OCR2(port); |
81 | tmp &= ~( 3<<((pin-16)*2)); | 82 | tmp &= ~( 3<<((pin-16)*2)); |
82 | tmp |= (ocr << ((pin-16)*2)); | 83 | tmp |= (ocr << ((pin-16)*2)); |
83 | OCR2(port) = tmp; | 84 | OCR2(port) = tmp; |
84 | 85 | ||
85 | if( gpio_mode & GPIO_AOUT ) | 86 | ICONFA2(port) &= ~( 3<<((pin-16)*2)); |
86 | ICONFA2(port) &= ~( 3<<((pin-16)*2)); | 87 | ICONFA2(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << ((pin-16) * 2); |
87 | if( gpio_mode & GPIO_BOUT ) | 88 | ICONFB2(port) &= ~( 3<<((pin-16)*2)); |
88 | ICONFB2(port) &= ~( 3<<((pin-16)*2)); | 89 | ICONFB2(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << ((pin-16) * 2); |
89 | } | 90 | } |
90 | } | 91 | } |
91 | 92 | ||
diff --git a/arch/arm/mach-imx/mx1ads.c b/arch/arm/mach-imx/mx1ads.c index 5d25434d332c..a7511ddfe364 100644 --- a/arch/arm/mach-imx/mx1ads.c +++ b/arch/arm/mach-imx/mx1ads.c | |||
@@ -55,7 +55,7 @@ static void __init | |||
55 | mx1ads_init(void) | 55 | mx1ads_init(void) |
56 | { | 56 | { |
57 | #ifdef CONFIG_LEDS | 57 | #ifdef CONFIG_LEDS |
58 | imx_gpio_mode(GPIO_PORTA | GPIO_OUT | GPIO_GPIO | 2); | 58 | imx_gpio_mode(GPIO_PORTA | GPIO_OUT | 2); |
59 | #endif | 59 | #endif |
60 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 60 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
61 | } | 61 | } |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index db5e47dfc303..c54e04c995ee 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -370,21 +370,21 @@ config CPU_BIG_ENDIAN | |||
370 | 370 | ||
371 | config CPU_ICACHE_DISABLE | 371 | config CPU_ICACHE_DISABLE |
372 | bool "Disable I-Cache" | 372 | bool "Disable I-Cache" |
373 | depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 | 373 | depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 |
374 | help | 374 | help |
375 | Say Y here to disable the processor instruction cache. Unless | 375 | Say Y here to disable the processor instruction cache. Unless |
376 | you have a reason not to or are unsure, say N. | 376 | you have a reason not to or are unsure, say N. |
377 | 377 | ||
378 | config CPU_DCACHE_DISABLE | 378 | config CPU_DCACHE_DISABLE |
379 | bool "Disable D-Cache" | 379 | bool "Disable D-Cache" |
380 | depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 | 380 | depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 |
381 | help | 381 | help |
382 | Say Y here to disable the processor data cache. Unless | 382 | Say Y here to disable the processor data cache. Unless |
383 | you have a reason not to or are unsure, say N. | 383 | you have a reason not to or are unsure, say N. |
384 | 384 | ||
385 | config CPU_DCACHE_WRITETHROUGH | 385 | config CPU_DCACHE_WRITETHROUGH |
386 | bool "Force write through D-cache" | 386 | bool "Force write through D-cache" |
387 | depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DCACHE_DISABLE | 387 | depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE |
388 | default y if CPU_ARM925T | 388 | default y if CPU_ARM925T |
389 | help | 389 | help |
390 | Say Y here to use the data cache in writethrough mode. Unless you | 390 | Say Y here to use the data cache in writethrough mode. Unless you |
@@ -399,7 +399,7 @@ config CPU_CACHE_ROUND_ROBIN | |||
399 | 399 | ||
400 | config CPU_BPREDICT_DISABLE | 400 | config CPU_BPREDICT_DISABLE |
401 | bool "Disable branch prediction" | 401 | bool "Disable branch prediction" |
402 | depends on CPU_ARM1020 | 402 | depends on CPU_ARM1020 || CPU_V6 |
403 | help | 403 | help |
404 | Say Y here to disable branch prediction. If unsure, say N. | 404 | Say Y here to disable branch prediction. If unsure, say N. |
405 | 405 | ||
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6dc726ad7137..d0a5106fba24 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) | |||
1016 | 1016 | ||
1017 | cmc_polling_enabled = 1; | 1017 | cmc_polling_enabled = 1; |
1018 | spin_unlock(&cmc_history_lock); | 1018 | spin_unlock(&cmc_history_lock); |
1019 | /* If we're being hit with CMC interrupts, we won't | ||
1020 | * ever execute the schedule_work() below. Need to | ||
1021 | * disable CMC interrupts on this processor now. | ||
1022 | */ | ||
1023 | ia64_mca_cmc_vector_disable(NULL); | ||
1019 | schedule_work(&cmc_disable_work); | 1024 | schedule_work(&cmc_disable_work); |
1020 | 1025 | ||
1021 | /* | 1026 | /* |
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c index 778ce4fec368..efb819f9490d 100644 --- a/arch/ppc/platforms/pmac_time.c +++ b/arch/ppc/platforms/pmac_time.c | |||
@@ -195,7 +195,7 @@ via_calibrate_decr(void) | |||
195 | ; | 195 | ; |
196 | dend = get_dec(); | 196 | dend = get_dec(); |
197 | 197 | ||
198 | tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100)); | 198 | tb_ticks_per_jiffy = (dstart - dend) / ((6 * HZ)/100); |
199 | tb_to_us = mulhwu_scale_factor(dstart - dend, 60000); | 199 | tb_to_us = mulhwu_scale_factor(dstart - dend, 60000); |
200 | 200 | ||
201 | printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n", | 201 | printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n", |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index aba05394d30a..6537445dac0e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -25,62 +25,6 @@ source "init/Kconfig" | |||
25 | 25 | ||
26 | menu "General machine setup" | 26 | menu "General machine setup" |
27 | 27 | ||
28 | config VT | ||
29 | bool | ||
30 | select INPUT | ||
31 | default y | ||
32 | ---help--- | ||
33 | If you say Y here, you will get support for terminal devices with | ||
34 | display and keyboard devices. These are called "virtual" because you | ||
35 | can run several virtual terminals (also called virtual consoles) on | ||
36 | one physical terminal. This is rather useful, for example one | ||
37 | virtual terminal can collect system messages and warnings, another | ||
38 | one can be used for a text-mode user session, and a third could run | ||
39 | an X session, all in parallel. Switching between virtual terminals | ||
40 | is done with certain key combinations, usually Alt-<function key>. | ||
41 | |||
42 | The setterm command ("man setterm") can be used to change the | ||
43 | properties (such as colors or beeping) of a virtual terminal. The | ||
44 | man page console_codes(4) ("man console_codes") contains the special | ||
45 | character sequences that can be used to change those properties | ||
46 | directly. The fonts used on virtual terminals can be changed with | ||
47 | the setfont ("man setfont") command and the key bindings are defined | ||
48 | with the loadkeys ("man loadkeys") command. | ||
49 | |||
50 | You need at least one virtual terminal device in order to make use | ||
51 | of your keyboard and monitor. Therefore, only people configuring an | ||
52 | embedded system would want to say N here in order to save some | ||
53 | memory; the only way to log into such a system is then via a serial | ||
54 | or network connection. | ||
55 | |||
56 | If unsure, say Y, or else you won't be able to do much with your new | ||
57 | shiny Linux system :-) | ||
58 | |||
59 | config VT_CONSOLE | ||
60 | bool | ||
61 | default y | ||
62 | ---help--- | ||
63 | The system console is the device which receives all kernel messages | ||
64 | and warnings and which allows logins in single user mode. If you | ||
65 | answer Y here, a virtual terminal (the device used to interact with | ||
66 | a physical terminal) can be used as system console. This is the most | ||
67 | common mode of operations, so you should say Y here unless you want | ||
68 | the kernel messages be output only to a serial port (in which case | ||
69 | you should say Y to "Console on serial port", below). | ||
70 | |||
71 | If you do say Y here, by default the currently visible virtual | ||
72 | terminal (/dev/tty0) will be used as system console. You can change | ||
73 | that with a kernel command line option such as "console=tty3" which | ||
74 | would use the third virtual terminal as system console. (Try "man | ||
75 | bootparam" or see the documentation of your boot loader (lilo or | ||
76 | loadlin) about how to pass options to the kernel at boot time.) | ||
77 | |||
78 | If unsure, say Y. | ||
79 | |||
80 | config HW_CONSOLE | ||
81 | bool | ||
82 | default y | ||
83 | |||
84 | config SMP | 28 | config SMP |
85 | bool "Symmetric multi-processing support (does not work on sun4/sun4c)" | 29 | bool "Symmetric multi-processing support (does not work on sun4/sun4c)" |
86 | depends on BROKEN | 30 | depends on BROKEN |
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index bc015e980341..279a62627c10 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c | |||
@@ -457,7 +457,7 @@ void __init time_init(void) | |||
457 | sbus_time_init(); | 457 | sbus_time_init(); |
458 | } | 458 | } |
459 | 459 | ||
460 | extern __inline__ unsigned long do_gettimeoffset(void) | 460 | static inline unsigned long do_gettimeoffset(void) |
461 | { | 461 | { |
462 | return (*master_l10_counter >> 10) & 0x1fffff; | 462 | return (*master_l10_counter >> 10) & 0x1fffff; |
463 | } | 463 | } |
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c89a803cbc20..c664b962987c 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -260,7 +260,7 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) | |||
260 | { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } | 260 | { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } |
261 | 261 | ||
262 | /* to find an entry in a top-level page table... */ | 262 | /* to find an entry in a top-level page table... */ |
263 | extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) | 263 | static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) |
264 | { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } | 264 | { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } |
265 | 265 | ||
266 | /* Find an entry in the second-level page table.. */ | 266 | /* Find an entry in the second-level page table.. */ |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 2879b1072921..f685035dbdb8 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -97,8 +97,8 @@ do_fpdis: | |||
97 | faddd %f0, %f2, %f4 | 97 | faddd %f0, %f2, %f4 |
98 | fmuld %f0, %f2, %f6 | 98 | fmuld %f0, %f2, %f6 |
99 | ldxa [%g3] ASI_DMMU, %g5 | 99 | ldxa [%g3] ASI_DMMU, %g5 |
100 | cplus_fptrap_insn_1: | 100 | sethi %hi(sparc64_kern_sec_context), %g2 |
101 | sethi %hi(0), %g2 | 101 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
102 | stxa %g2, [%g3] ASI_DMMU | 102 | stxa %g2, [%g3] ASI_DMMU |
103 | membar #Sync | 103 | membar #Sync |
104 | add %g6, TI_FPREGS + 0xc0, %g2 | 104 | add %g6, TI_FPREGS + 0xc0, %g2 |
@@ -126,8 +126,8 @@ cplus_fptrap_insn_1: | |||
126 | fzero %f34 | 126 | fzero %f34 |
127 | ldxa [%g3] ASI_DMMU, %g5 | 127 | ldxa [%g3] ASI_DMMU, %g5 |
128 | add %g6, TI_FPREGS, %g1 | 128 | add %g6, TI_FPREGS, %g1 |
129 | cplus_fptrap_insn_2: | 129 | sethi %hi(sparc64_kern_sec_context), %g2 |
130 | sethi %hi(0), %g2 | 130 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
131 | stxa %g2, [%g3] ASI_DMMU | 131 | stxa %g2, [%g3] ASI_DMMU |
132 | membar #Sync | 132 | membar #Sync |
133 | add %g6, TI_FPREGS + 0x40, %g2 | 133 | add %g6, TI_FPREGS + 0x40, %g2 |
@@ -153,8 +153,8 @@ cplus_fptrap_insn_2: | |||
153 | 3: mov SECONDARY_CONTEXT, %g3 | 153 | 3: mov SECONDARY_CONTEXT, %g3 |
154 | add %g6, TI_FPREGS, %g1 | 154 | add %g6, TI_FPREGS, %g1 |
155 | ldxa [%g3] ASI_DMMU, %g5 | 155 | ldxa [%g3] ASI_DMMU, %g5 |
156 | cplus_fptrap_insn_3: | 156 | sethi %hi(sparc64_kern_sec_context), %g2 |
157 | sethi %hi(0), %g2 | 157 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
158 | stxa %g2, [%g3] ASI_DMMU | 158 | stxa %g2, [%g3] ASI_DMMU |
159 | membar #Sync | 159 | membar #Sync |
160 | mov 0x40, %g2 | 160 | mov 0x40, %g2 |
@@ -319,8 +319,8 @@ do_fptrap_after_fsr: | |||
319 | stx %g3, [%g6 + TI_GSR] | 319 | stx %g3, [%g6 + TI_GSR] |
320 | mov SECONDARY_CONTEXT, %g3 | 320 | mov SECONDARY_CONTEXT, %g3 |
321 | ldxa [%g3] ASI_DMMU, %g5 | 321 | ldxa [%g3] ASI_DMMU, %g5 |
322 | cplus_fptrap_insn_4: | 322 | sethi %hi(sparc64_kern_sec_context), %g2 |
323 | sethi %hi(0), %g2 | 323 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
324 | stxa %g2, [%g3] ASI_DMMU | 324 | stxa %g2, [%g3] ASI_DMMU |
325 | membar #Sync | 325 | membar #Sync |
326 | add %g6, TI_FPREGS, %g2 | 326 | add %g6, TI_FPREGS, %g2 |
@@ -341,33 +341,6 @@ cplus_fptrap_insn_4: | |||
341 | ba,pt %xcc, etrap | 341 | ba,pt %xcc, etrap |
342 | wr %g0, 0, %fprs | 342 | wr %g0, 0, %fprs |
343 | 343 | ||
344 | cplus_fptrap_1: | ||
345 | sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 | ||
346 | |||
347 | .globl cheetah_plus_patch_fpdis | ||
348 | cheetah_plus_patch_fpdis: | ||
349 | /* We configure the dTLB512_0 for 4MB pages and the | ||
350 | * dTLB512_1 for 8K pages when in context zero. | ||
351 | */ | ||
352 | sethi %hi(cplus_fptrap_1), %o0 | ||
353 | lduw [%o0 + %lo(cplus_fptrap_1)], %o1 | ||
354 | |||
355 | set cplus_fptrap_insn_1, %o2 | ||
356 | stw %o1, [%o2] | ||
357 | flush %o2 | ||
358 | set cplus_fptrap_insn_2, %o2 | ||
359 | stw %o1, [%o2] | ||
360 | flush %o2 | ||
361 | set cplus_fptrap_insn_3, %o2 | ||
362 | stw %o1, [%o2] | ||
363 | flush %o2 | ||
364 | set cplus_fptrap_insn_4, %o2 | ||
365 | stw %o1, [%o2] | ||
366 | flush %o2 | ||
367 | |||
368 | retl | ||
369 | nop | ||
370 | |||
371 | /* The registers for cross calls will be: | 344 | /* The registers for cross calls will be: |
372 | * | 345 | * |
373 | * DATA 0: [low 32-bits] Address of function to call, jmp to this | 346 | * DATA 0: [low 32-bits] Address of function to call, jmp to this |
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 50d2af1d98ae..0d8eba21111b 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S | |||
@@ -68,12 +68,8 @@ etrap_irq: | |||
68 | 68 | ||
69 | wrpr %g3, 0, %otherwin | 69 | wrpr %g3, 0, %otherwin |
70 | wrpr %g2, 0, %wstate | 70 | wrpr %g2, 0, %wstate |
71 | cplus_etrap_insn_1: | 71 | sethi %hi(sparc64_kern_pri_context), %g2 |
72 | sethi %hi(0), %g3 | 72 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 |
73 | sllx %g3, 32, %g3 | ||
74 | cplus_etrap_insn_2: | ||
75 | sethi %hi(0), %g2 | ||
76 | or %g3, %g2, %g3 | ||
77 | stxa %g3, [%l4] ASI_DMMU | 73 | stxa %g3, [%l4] ASI_DMMU |
78 | flush %l6 | 74 | flush %l6 |
79 | wr %g0, ASI_AIUS, %asi | 75 | wr %g0, ASI_AIUS, %asi |
@@ -215,12 +211,8 @@ scetrap: rdpr %pil, %g2 | |||
215 | mov PRIMARY_CONTEXT, %l4 | 211 | mov PRIMARY_CONTEXT, %l4 |
216 | wrpr %g3, 0, %otherwin | 212 | wrpr %g3, 0, %otherwin |
217 | wrpr %g2, 0, %wstate | 213 | wrpr %g2, 0, %wstate |
218 | cplus_etrap_insn_3: | 214 | sethi %hi(sparc64_kern_pri_context), %g2 |
219 | sethi %hi(0), %g3 | 215 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 |
220 | sllx %g3, 32, %g3 | ||
221 | cplus_etrap_insn_4: | ||
222 | sethi %hi(0), %g2 | ||
223 | or %g3, %g2, %g3 | ||
224 | stxa %g3, [%l4] ASI_DMMU | 216 | stxa %g3, [%l4] ASI_DMMU |
225 | flush %l6 | 217 | flush %l6 |
226 | 218 | ||
@@ -264,38 +256,3 @@ cplus_etrap_insn_4: | |||
264 | 256 | ||
265 | #undef TASK_REGOFF | 257 | #undef TASK_REGOFF |
266 | #undef ETRAP_PSTATE1 | 258 | #undef ETRAP_PSTATE1 |
267 | |||
268 | cplus_einsn_1: | ||
269 | sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 | ||
270 | cplus_einsn_2: | ||
271 | sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 | ||
272 | |||
273 | .globl cheetah_plus_patch_etrap | ||
274 | cheetah_plus_patch_etrap: | ||
275 | /* We configure the dTLB512_0 for 4MB pages and the | ||
276 | * dTLB512_1 for 8K pages when in context zero. | ||
277 | */ | ||
278 | sethi %hi(cplus_einsn_1), %o0 | ||
279 | sethi %hi(cplus_etrap_insn_1), %o2 | ||
280 | lduw [%o0 + %lo(cplus_einsn_1)], %o1 | ||
281 | or %o2, %lo(cplus_etrap_insn_1), %o2 | ||
282 | stw %o1, [%o2] | ||
283 | flush %o2 | ||
284 | sethi %hi(cplus_etrap_insn_3), %o2 | ||
285 | or %o2, %lo(cplus_etrap_insn_3), %o2 | ||
286 | stw %o1, [%o2] | ||
287 | flush %o2 | ||
288 | |||
289 | sethi %hi(cplus_einsn_2), %o0 | ||
290 | sethi %hi(cplus_etrap_insn_2), %o2 | ||
291 | lduw [%o0 + %lo(cplus_einsn_2)], %o1 | ||
292 | or %o2, %lo(cplus_etrap_insn_2), %o2 | ||
293 | stw %o1, [%o2] | ||
294 | flush %o2 | ||
295 | sethi %hi(cplus_etrap_insn_4), %o2 | ||
296 | or %o2, %lo(cplus_etrap_insn_4), %o2 | ||
297 | stw %o1, [%o2] | ||
298 | flush %o2 | ||
299 | |||
300 | retl | ||
301 | nop | ||
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 89406f9649a9..24340496cdd3 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S | |||
@@ -325,23 +325,7 @@ cheetah_tlb_fixup: | |||
325 | 1: sethi %hi(tlb_type), %g1 | 325 | 1: sethi %hi(tlb_type), %g1 |
326 | stw %g2, [%g1 + %lo(tlb_type)] | 326 | stw %g2, [%g1 + %lo(tlb_type)] |
327 | 327 | ||
328 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) | 328 | /* Patch copy/page operations to cheetah optimized versions. */ |
329 | ba,pt %xcc, 2f | ||
330 | nop | ||
331 | |||
332 | 1: /* Patch context register writes to support nucleus page | ||
333 | * size correctly. | ||
334 | */ | ||
335 | call cheetah_plus_patch_etrap | ||
336 | nop | ||
337 | call cheetah_plus_patch_rtrap | ||
338 | nop | ||
339 | call cheetah_plus_patch_fpdis | ||
340 | nop | ||
341 | call cheetah_plus_patch_winfixup | ||
342 | nop | ||
343 | |||
344 | 2: /* Patch copy/page operations to cheetah optimized versions. */ | ||
345 | call cheetah_patch_copyops | 329 | call cheetah_patch_copyops |
346 | nop | 330 | nop |
347 | call cheetah_patch_copy_page | 331 | call cheetah_patch_copy_page |
@@ -484,20 +468,13 @@ spitfire_vpte_base: | |||
484 | call prom_set_trap_table | 468 | call prom_set_trap_table |
485 | sethi %hi(sparc64_ttable_tl0), %o0 | 469 | sethi %hi(sparc64_ttable_tl0), %o0 |
486 | 470 | ||
487 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) | 471 | /* Start using proper page size encodings in ctx register. */ |
488 | ba,pt %xcc, 2f | 472 | sethi %hi(sparc64_kern_pri_context), %g3 |
489 | nop | 473 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 |
490 | |||
491 | 1: /* Start using proper page size encodings in ctx register. */ | ||
492 | sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 | ||
493 | mov PRIMARY_CONTEXT, %g1 | 474 | mov PRIMARY_CONTEXT, %g1 |
494 | sllx %g3, 32, %g3 | 475 | stxa %g2, [%g1] ASI_DMMU |
495 | sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 | ||
496 | or %g3, %g2, %g3 | ||
497 | stxa %g3, [%g1] ASI_DMMU | ||
498 | membar #Sync | 476 | membar #Sync |
499 | 477 | ||
500 | 2: | ||
501 | rdpr %pstate, %o1 | 478 | rdpr %pstate, %o1 |
502 | or %o1, PSTATE_IE, %o1 | 479 | or %o1, PSTATE_IE, %o1 |
503 | wrpr %o1, 0, %pstate | 480 | wrpr %o1, 0, %pstate |
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index fafd227735fa..ecfb42a69a44 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S | |||
@@ -256,9 +256,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |||
256 | brnz,pn %l3, kern_rtt | 256 | brnz,pn %l3, kern_rtt |
257 | mov PRIMARY_CONTEXT, %l7 | 257 | mov PRIMARY_CONTEXT, %l7 |
258 | ldxa [%l7 + %l7] ASI_DMMU, %l0 | 258 | ldxa [%l7 + %l7] ASI_DMMU, %l0 |
259 | cplus_rtrap_insn_1: | 259 | sethi %hi(sparc64_kern_pri_nuc_bits), %l1 |
260 | sethi %hi(0), %l1 | 260 | ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 |
261 | sllx %l1, 32, %l1 | ||
262 | or %l0, %l1, %l0 | 261 | or %l0, %l1, %l0 |
263 | stxa %l0, [%l7] ASI_DMMU | 262 | stxa %l0, [%l7] ASI_DMMU |
264 | flush %g6 | 263 | flush %g6 |
@@ -345,21 +344,3 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 | |||
345 | wr %g0, FPRS_DU, %fprs | 344 | wr %g0, FPRS_DU, %fprs |
346 | ba,pt %xcc, rt_continue | 345 | ba,pt %xcc, rt_continue |
347 | stb %l5, [%g6 + TI_FPDEPTH] | 346 | stb %l5, [%g6 + TI_FPDEPTH] |
348 | |||
349 | cplus_rinsn_1: | ||
350 | sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1 | ||
351 | |||
352 | .globl cheetah_plus_patch_rtrap | ||
353 | cheetah_plus_patch_rtrap: | ||
354 | /* We configure the dTLB512_0 for 4MB pages and the | ||
355 | * dTLB512_1 for 8K pages when in context zero. | ||
356 | */ | ||
357 | sethi %hi(cplus_rinsn_1), %o0 | ||
358 | sethi %hi(cplus_rtrap_insn_1), %o2 | ||
359 | lduw [%o0 + %lo(cplus_rinsn_1)], %o1 | ||
360 | or %o2, %lo(cplus_rtrap_insn_1), %o2 | ||
361 | stw %o1, [%o2] | ||
362 | flush %o2 | ||
363 | |||
364 | retl | ||
365 | nop | ||
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 4c9c8f241748..c1f34237cdf2 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -187,17 +187,13 @@ int prom_callback(long *args) | |||
187 | } | 187 | } |
188 | 188 | ||
189 | if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { | 189 | if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { |
190 | unsigned long kernel_pctx = 0; | 190 | extern unsigned long sparc64_kern_pri_context; |
191 | |||
192 | if (tlb_type == cheetah_plus) | ||
193 | kernel_pctx |= (CTX_CHEETAH_PLUS_NUC | | ||
194 | CTX_CHEETAH_PLUS_CTX0); | ||
195 | 191 | ||
196 | /* Spitfire Errata #32 workaround */ | 192 | /* Spitfire Errata #32 workaround */ |
197 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 193 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
198 | "flush %%g6" | 194 | "flush %%g6" |
199 | : /* No outputs */ | 195 | : /* No outputs */ |
200 | : "r" (kernel_pctx), | 196 | : "r" (sparc64_kern_pri_context), |
201 | "r" (PRIMARY_CONTEXT), | 197 | "r" (PRIMARY_CONTEXT), |
202 | "i" (ASI_DMMU)); | 198 | "i" (ASI_DMMU)); |
203 | 199 | ||
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 89f2fcfcd662..9478551cb020 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -336,20 +336,13 @@ do_unlock: | |||
336 | call init_irqwork_curcpu | 336 | call init_irqwork_curcpu |
337 | nop | 337 | nop |
338 | 338 | ||
339 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) | 339 | /* Start using proper page size encodings in ctx register. */ |
340 | ba,pt %xcc, 2f | 340 | sethi %hi(sparc64_kern_pri_context), %g3 |
341 | nop | 341 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 |
342 | |||
343 | 1: /* Start using proper page size encodings in ctx register. */ | ||
344 | sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 | ||
345 | mov PRIMARY_CONTEXT, %g1 | 342 | mov PRIMARY_CONTEXT, %g1 |
346 | sllx %g3, 32, %g3 | 343 | stxa %g2, [%g1] ASI_DMMU |
347 | sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 | ||
348 | or %g3, %g2, %g3 | ||
349 | stxa %g3, [%g1] ASI_DMMU | ||
350 | membar #Sync | 344 | membar #Sync |
351 | 345 | ||
352 | 2: | ||
353 | rdpr %pstate, %o1 | 346 | rdpr %pstate, %o1 |
354 | or %o1, PSTATE_IE, %o1 | 347 | or %o1, PSTATE_IE, %o1 |
355 | wrpr %o1, 0, %pstate | 348 | wrpr %o1, 0, %pstate |
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 99c809a1e5ac..39160926267b 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S | |||
@@ -16,23 +16,14 @@ | |||
16 | .text | 16 | .text |
17 | 17 | ||
18 | set_pcontext: | 18 | set_pcontext: |
19 | cplus_winfixup_insn_1: | 19 | sethi %hi(sparc64_kern_pri_context), %l1 |
20 | sethi %hi(0), %l1 | 20 | ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 |
21 | mov PRIMARY_CONTEXT, %g1 | 21 | mov PRIMARY_CONTEXT, %g1 |
22 | sllx %l1, 32, %l1 | ||
23 | cplus_winfixup_insn_2: | ||
24 | sethi %hi(0), %g2 | ||
25 | or %l1, %g2, %l1 | ||
26 | stxa %l1, [%g1] ASI_DMMU | 22 | stxa %l1, [%g1] ASI_DMMU |
27 | flush %g6 | 23 | flush %g6 |
28 | retl | 24 | retl |
29 | nop | 25 | nop |
30 | 26 | ||
31 | cplus_wfinsn_1: | ||
32 | sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1 | ||
33 | cplus_wfinsn_2: | ||
34 | sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 | ||
35 | |||
36 | .align 32 | 27 | .align 32 |
37 | 28 | ||
38 | /* Here are the rules, pay attention. | 29 | /* Here are the rules, pay attention. |
@@ -395,23 +386,3 @@ window_dax_from_user_common: | |||
395 | add %sp, PTREGS_OFF, %o0 | 386 | add %sp, PTREGS_OFF, %o0 |
396 | ba,pt %xcc, rtrap | 387 | ba,pt %xcc, rtrap |
397 | clr %l6 | 388 | clr %l6 |
398 | |||
399 | |||
400 | .globl cheetah_plus_patch_winfixup | ||
401 | cheetah_plus_patch_winfixup: | ||
402 | sethi %hi(cplus_wfinsn_1), %o0 | ||
403 | sethi %hi(cplus_winfixup_insn_1), %o2 | ||
404 | lduw [%o0 + %lo(cplus_wfinsn_1)], %o1 | ||
405 | or %o2, %lo(cplus_winfixup_insn_1), %o2 | ||
406 | stw %o1, [%o2] | ||
407 | flush %o2 | ||
408 | |||
409 | sethi %hi(cplus_wfinsn_2), %o0 | ||
410 | sethi %hi(cplus_winfixup_insn_2), %o2 | ||
411 | lduw [%o0 + %lo(cplus_wfinsn_2)], %o1 | ||
412 | or %o2, %lo(cplus_winfixup_insn_2), %o2 | ||
413 | stw %o1, [%o2] | ||
414 | flush %o2 | ||
415 | |||
416 | retl | ||
417 | nop | ||
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5db50524f20d..0d2e967c7200 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -133,6 +133,12 @@ extern unsigned int sparc_ramdisk_size; | |||
133 | 133 | ||
134 | struct page *mem_map_zero __read_mostly; | 134 | struct page *mem_map_zero __read_mostly; |
135 | 135 | ||
136 | unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; | ||
137 | |||
138 | unsigned long sparc64_kern_pri_context __read_mostly; | ||
139 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | ||
140 | unsigned long sparc64_kern_sec_context __read_mostly; | ||
141 | |||
136 | int bigkernel = 0; | 142 | int bigkernel = 0; |
137 | 143 | ||
138 | /* XXX Tune this... */ | 144 | /* XXX Tune this... */ |
@@ -362,6 +368,7 @@ struct linux_prom_translation { | |||
362 | unsigned long data; | 368 | unsigned long data; |
363 | }; | 369 | }; |
364 | static struct linux_prom_translation prom_trans[512] __initdata; | 370 | static struct linux_prom_translation prom_trans[512] __initdata; |
371 | static unsigned int prom_trans_ents __initdata; | ||
365 | 372 | ||
366 | extern unsigned long prom_boot_page; | 373 | extern unsigned long prom_boot_page; |
367 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); | 374 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); |
@@ -375,57 +382,7 @@ unsigned long kern_locked_tte_data; | |||
375 | unsigned long prom_pmd_phys __read_mostly; | 382 | unsigned long prom_pmd_phys __read_mostly; |
376 | unsigned int swapper_pgd_zero __read_mostly; | 383 | unsigned int swapper_pgd_zero __read_mostly; |
377 | 384 | ||
378 | /* Allocate power-of-2 aligned chunks from the end of the | 385 | static pmd_t *prompmd __read_mostly; |
379 | * kernel image. Return physical address. | ||
380 | */ | ||
381 | static inline unsigned long early_alloc_phys(unsigned long size) | ||
382 | { | ||
383 | unsigned long base; | ||
384 | |||
385 | BUILD_BUG_ON(size & (size - 1)); | ||
386 | |||
387 | kern_size = (kern_size + (size - 1)) & ~(size - 1); | ||
388 | base = kern_base + kern_size; | ||
389 | kern_size += size; | ||
390 | |||
391 | return base; | ||
392 | } | ||
393 | |||
394 | static inline unsigned long load_phys32(unsigned long pa) | ||
395 | { | ||
396 | unsigned long val; | ||
397 | |||
398 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
399 | : "=&r" (val) | ||
400 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
401 | |||
402 | return val; | ||
403 | } | ||
404 | |||
405 | static inline unsigned long load_phys64(unsigned long pa) | ||
406 | { | ||
407 | unsigned long val; | ||
408 | |||
409 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
410 | : "=&r" (val) | ||
411 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
412 | |||
413 | return val; | ||
414 | } | ||
415 | |||
416 | static inline void store_phys32(unsigned long pa, unsigned long val) | ||
417 | { | ||
418 | __asm__ __volatile__("stwa %0, [%1] %2" | ||
419 | : /* no outputs */ | ||
420 | : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
421 | } | ||
422 | |||
423 | static inline void store_phys64(unsigned long pa, unsigned long val) | ||
424 | { | ||
425 | __asm__ __volatile__("stxa %0, [%1] %2" | ||
426 | : /* no outputs */ | ||
427 | : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
428 | } | ||
429 | 386 | ||
430 | #define BASE_PAGE_SIZE 8192 | 387 | #define BASE_PAGE_SIZE 8192 |
431 | 388 | ||
@@ -435,34 +392,28 @@ static inline void store_phys64(unsigned long pa, unsigned long val) | |||
435 | */ | 392 | */ |
436 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | 393 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) |
437 | { | 394 | { |
438 | unsigned long pmd_phys = (prom_pmd_phys + | 395 | pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); |
439 | ((promva >> 23) & 0x7ff) * sizeof(pmd_t)); | 396 | pte_t *ptep; |
440 | unsigned long pte_phys; | ||
441 | pmd_t pmd_ent; | ||
442 | pte_t pte_ent; | ||
443 | unsigned long base; | 397 | unsigned long base; |
444 | 398 | ||
445 | pmd_val(pmd_ent) = load_phys32(pmd_phys); | 399 | if (pmd_none(*pmdp)) { |
446 | if (pmd_none(pmd_ent)) { | ||
447 | if (error) | 400 | if (error) |
448 | *error = 1; | 401 | *error = 1; |
449 | return 0; | 402 | return 0; |
450 | } | 403 | } |
451 | 404 | ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); | |
452 | pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL; | 405 | if (!pte_present(*ptep)) { |
453 | pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t); | ||
454 | pte_val(pte_ent) = load_phys64(pte_phys); | ||
455 | if (!pte_present(pte_ent)) { | ||
456 | if (error) | 406 | if (error) |
457 | *error = 1; | 407 | *error = 1; |
458 | return 0; | 408 | return 0; |
459 | } | 409 | } |
460 | if (error) { | 410 | if (error) { |
461 | *error = 0; | 411 | *error = 0; |
462 | return pte_val(pte_ent); | 412 | return pte_val(*ptep); |
463 | } | 413 | } |
464 | base = pte_val(pte_ent) & _PAGE_PADDR; | 414 | base = pte_val(*ptep) & _PAGE_PADDR; |
465 | return (base + (promva & (BASE_PAGE_SIZE - 1))); | 415 | |
416 | return base + (promva & (BASE_PAGE_SIZE - 1)); | ||
466 | } | 417 | } |
467 | 418 | ||
468 | /* The obp translations are saved based on 8k pagesize, since obp can | 419 | /* The obp translations are saved based on 8k pagesize, since obp can |
@@ -475,25 +426,20 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig | |||
475 | unsigned long vaddr; | 426 | unsigned long vaddr; |
476 | 427 | ||
477 | for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) { | 428 | for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) { |
478 | unsigned long val, pte_phys, pmd_phys; | 429 | unsigned long val; |
479 | pmd_t pmd_ent; | 430 | pmd_t *pmd; |
480 | int i; | 431 | pte_t *pte; |
481 | |||
482 | pmd_phys = (prom_pmd_phys + | ||
483 | (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t))); | ||
484 | pmd_val(pmd_ent) = load_phys32(pmd_phys); | ||
485 | if (pmd_none(pmd_ent)) { | ||
486 | pte_phys = early_alloc_phys(BASE_PAGE_SIZE); | ||
487 | |||
488 | for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++) | ||
489 | store_phys64(pte_phys+i*sizeof(pte_t),0); | ||
490 | 432 | ||
491 | pmd_val(pmd_ent) = pte_phys >> 11UL; | 433 | pmd = prompmd + ((vaddr >> 23) & 0x7ff); |
492 | store_phys32(pmd_phys, pmd_val(pmd_ent)); | 434 | if (pmd_none(*pmd)) { |
435 | pte = __alloc_bootmem(BASE_PAGE_SIZE, BASE_PAGE_SIZE, | ||
436 | PAGE_SIZE); | ||
437 | if (!pte) | ||
438 | prom_halt(); | ||
439 | memset(pte, 0, BASE_PAGE_SIZE); | ||
440 | pmd_set(pmd, pte); | ||
493 | } | 441 | } |
494 | 442 | pte = (pte_t *) __pmd_page(*pmd) + ((vaddr >> 13) & 0x3ff); | |
495 | pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL; | ||
496 | pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t)); | ||
497 | 443 | ||
498 | val = data; | 444 | val = data; |
499 | 445 | ||
@@ -501,7 +447,8 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig | |||
501 | if (tlb_type == spitfire) | 447 | if (tlb_type == spitfire) |
502 | val &= ~0x0003fe0000000000UL; | 448 | val &= ~0x0003fe0000000000UL; |
503 | 449 | ||
504 | store_phys64(pte_phys, val | _PAGE_MODIFIED); | 450 | set_pte_at(&init_mm, vaddr, pte, |
451 | __pte(val | _PAGE_MODIFIED)); | ||
505 | 452 | ||
506 | data += BASE_PAGE_SIZE; | 453 | data += BASE_PAGE_SIZE; |
507 | } | 454 | } |
@@ -514,13 +461,17 @@ static inline int in_obp_range(unsigned long vaddr) | |||
514 | } | 461 | } |
515 | 462 | ||
516 | #define OBP_PMD_SIZE 2048 | 463 | #define OBP_PMD_SIZE 2048 |
517 | static void __init build_obp_pgtable(int prom_trans_ents) | 464 | static void __init build_obp_pgtable(void) |
518 | { | 465 | { |
519 | unsigned long i; | 466 | unsigned long i; |
520 | 467 | ||
521 | prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE); | 468 | prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, PAGE_SIZE); |
522 | for (i = 0; i < OBP_PMD_SIZE; i += 4) | 469 | if (!prompmd) |
523 | store_phys32(prom_pmd_phys + i, 0); | 470 | prom_halt(); |
471 | |||
472 | memset(prompmd, 0, OBP_PMD_SIZE); | ||
473 | |||
474 | prom_pmd_phys = __pa(prompmd); | ||
524 | 475 | ||
525 | for (i = 0; i < prom_trans_ents; i++) { | 476 | for (i = 0; i < prom_trans_ents; i++) { |
526 | unsigned long start, end; | 477 | unsigned long start, end; |
@@ -540,7 +491,7 @@ static void __init build_obp_pgtable(int prom_trans_ents) | |||
540 | /* Read OBP translations property into 'prom_trans[]'. | 491 | /* Read OBP translations property into 'prom_trans[]'. |
541 | * Return the number of entries. | 492 | * Return the number of entries. |
542 | */ | 493 | */ |
543 | static int __init read_obp_translations(void) | 494 | static void __init read_obp_translations(void) |
544 | { | 495 | { |
545 | int n, node; | 496 | int n, node; |
546 | 497 | ||
@@ -561,8 +512,10 @@ static int __init read_obp_translations(void) | |||
561 | prom_printf("prom_mappings: Couldn't get property.\n"); | 512 | prom_printf("prom_mappings: Couldn't get property.\n"); |
562 | prom_halt(); | 513 | prom_halt(); |
563 | } | 514 | } |
515 | |||
564 | n = n / sizeof(struct linux_prom_translation); | 516 | n = n / sizeof(struct linux_prom_translation); |
565 | return n; | 517 | |
518 | prom_trans_ents = n; | ||
566 | } | 519 | } |
567 | 520 | ||
568 | static void __init remap_kernel(void) | 521 | static void __init remap_kernel(void) |
@@ -582,28 +535,38 @@ static void __init remap_kernel(void) | |||
582 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); | 535 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); |
583 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | 536 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); |
584 | if (bigkernel) { | 537 | if (bigkernel) { |
585 | prom_dtlb_load(tlb_ent - 1, | 538 | tlb_ent -= 1; |
539 | prom_dtlb_load(tlb_ent, | ||
586 | tte_data + 0x400000, | 540 | tte_data + 0x400000, |
587 | tte_vaddr + 0x400000); | 541 | tte_vaddr + 0x400000); |
588 | prom_itlb_load(tlb_ent - 1, | 542 | prom_itlb_load(tlb_ent, |
589 | tte_data + 0x400000, | 543 | tte_data + 0x400000, |
590 | tte_vaddr + 0x400000); | 544 | tte_vaddr + 0x400000); |
591 | } | 545 | } |
546 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | ||
547 | if (tlb_type == cheetah_plus) { | ||
548 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | ||
549 | CTX_CHEETAH_PLUS_NUC); | ||
550 | sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; | ||
551 | sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; | ||
552 | } | ||
592 | } | 553 | } |
593 | 554 | ||
594 | static void __init inherit_prom_mappings(void) | ||
595 | { | ||
596 | int n; | ||
597 | 555 | ||
598 | n = read_obp_translations(); | 556 | static void __init inherit_prom_mappings_pre(void) |
599 | build_obp_pgtable(n); | 557 | { |
558 | read_obp_translations(); | ||
600 | 559 | ||
601 | /* Now fixup OBP's idea about where we really are mapped. */ | 560 | /* Now fixup OBP's idea about where we really are mapped. */ |
602 | prom_printf("Remapping the kernel... "); | 561 | prom_printf("Remapping the kernel... "); |
603 | remap_kernel(); | 562 | remap_kernel(); |
604 | 563 | ||
605 | prom_printf("done.\n"); | 564 | prom_printf("done.\n"); |
565 | } | ||
606 | 566 | ||
567 | static void __init inherit_prom_mappings_post(void) | ||
568 | { | ||
569 | build_obp_pgtable(); | ||
607 | register_prom_callbacks(); | 570 | register_prom_callbacks(); |
608 | } | 571 | } |
609 | 572 | ||
@@ -788,8 +751,8 @@ void inherit_locked_prom_mappings(int save_p) | |||
788 | } | 751 | } |
789 | } | 752 | } |
790 | if (tlb_type == spitfire) { | 753 | if (tlb_type == spitfire) { |
791 | int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel; | 754 | int high = sparc64_highest_unlocked_tlb_ent; |
792 | for (i = 0; i < high; i++) { | 755 | for (i = 0; i <= high; i++) { |
793 | unsigned long data; | 756 | unsigned long data; |
794 | 757 | ||
795 | /* Spitfire Errata #32 workaround */ | 758 | /* Spitfire Errata #32 workaround */ |
@@ -877,9 +840,9 @@ void inherit_locked_prom_mappings(int save_p) | |||
877 | } | 840 | } |
878 | } | 841 | } |
879 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 842 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
880 | int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel; | 843 | int high = sparc64_highest_unlocked_tlb_ent; |
881 | 844 | ||
882 | for (i = 0; i < high; i++) { | 845 | for (i = 0; i <= high; i++) { |
883 | unsigned long data; | 846 | unsigned long data; |
884 | 847 | ||
885 | data = cheetah_get_ldtlb_data(i); | 848 | data = cheetah_get_ldtlb_data(i); |
@@ -1556,8 +1519,7 @@ void __init paging_init(void) | |||
1556 | 1519 | ||
1557 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); | 1520 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); |
1558 | 1521 | ||
1559 | /* Inherit non-locked OBP mappings. */ | 1522 | inherit_prom_mappings_pre(); |
1560 | inherit_prom_mappings(); | ||
1561 | 1523 | ||
1562 | /* Ok, we can use our TLB miss and window trap handlers safely. | 1524 | /* Ok, we can use our TLB miss and window trap handlers safely. |
1563 | * We need to do a quick peek here to see if we are on StarFire | 1525 | * We need to do a quick peek here to see if we are on StarFire |
@@ -1568,15 +1530,23 @@ void __init paging_init(void) | |||
1568 | extern void setup_tba(int); | 1530 | extern void setup_tba(int); |
1569 | setup_tba(this_is_starfire); | 1531 | setup_tba(this_is_starfire); |
1570 | } | 1532 | } |
1571 | |||
1572 | inherit_locked_prom_mappings(1); | ||
1573 | |||
1574 | __flush_tlb_all(); | 1533 | __flush_tlb_all(); |
1575 | 1534 | ||
1535 | /* Everything from this point forward, until we are done with | ||
1536 | * inherit_prom_mappings_post(), must complete successfully | ||
1537 | * without calling into the firmware. The firwmare page tables | ||
1538 | * have not been built, but we are running on the Linux kernel's | ||
1539 | * trap table. | ||
1540 | */ | ||
1541 | |||
1576 | /* Setup bootmem... */ | 1542 | /* Setup bootmem... */ |
1577 | pages_avail = 0; | 1543 | pages_avail = 0; |
1578 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | 1544 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); |
1579 | 1545 | ||
1546 | inherit_prom_mappings_post(); | ||
1547 | |||
1548 | inherit_locked_prom_mappings(1); | ||
1549 | |||
1580 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1550 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1581 | kernel_physical_mapping_init(); | 1551 | kernel_physical_mapping_init(); |
1582 | #endif | 1552 | #endif |
diff --git a/arch/um/include/registers.h b/arch/um/include/registers.h index 0a35e6d0baa0..4892e5fcef07 100644 --- a/arch/um/include/registers.h +++ b/arch/um/include/registers.h | |||
@@ -15,16 +15,6 @@ extern void save_registers(int pid, union uml_pt_regs *regs); | |||
15 | extern void restore_registers(int pid, union uml_pt_regs *regs); | 15 | extern void restore_registers(int pid, union uml_pt_regs *regs); |
16 | extern void init_registers(int pid); | 16 | extern void init_registers(int pid); |
17 | extern void get_safe_registers(unsigned long * regs); | 17 | extern void get_safe_registers(unsigned long * regs); |
18 | extern void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer); | ||
18 | 19 | ||
19 | #endif | 20 | #endif |
20 | |||
21 | /* | ||
22 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
23 | * Emacs will notice this stuff at the end of the file and automatically | ||
24 | * adjust the settings for this buffer only. This must remain at the end | ||
25 | * of the file. | ||
26 | * --------------------------------------------------------------------------- | ||
27 | * Local variables: | ||
28 | * c-file-style: "linux" | ||
29 | * End: | ||
30 | */ | ||
diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h index 331aa2d1f3f5..8f0656766c21 100644 --- a/arch/um/include/sysdep-x86_64/ptrace.h +++ b/arch/um/include/sysdep-x86_64/ptrace.h | |||
@@ -218,10 +218,6 @@ struct syscall_args { | |||
218 | case RBP: UPT_RBP(regs) = __upt_val; break; \ | 218 | case RBP: UPT_RBP(regs) = __upt_val; break; \ |
219 | case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \ | 219 | case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \ |
220 | case CS: UPT_CS(regs) = __upt_val; break; \ | 220 | case CS: UPT_CS(regs) = __upt_val; break; \ |
221 | case DS: UPT_DS(regs) = __upt_val; break; \ | ||
222 | case ES: UPT_ES(regs) = __upt_val; break; \ | ||
223 | case FS: UPT_FS(regs) = __upt_val; break; \ | ||
224 | case GS: UPT_GS(regs) = __upt_val; break; \ | ||
225 | case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \ | 221 | case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \ |
226 | default : \ | 222 | default : \ |
227 | panic("Bad register in UPT_SET : %d\n", reg); \ | 223 | panic("Bad register in UPT_SET : %d\n", reg); \ |
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index f80850091e79..b331e970002f 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c | |||
@@ -62,13 +62,7 @@ void show_stack(struct task_struct *task, unsigned long *esp) | |||
62 | 62 | ||
63 | if (esp == NULL) { | 63 | if (esp == NULL) { |
64 | if (task != current && task != NULL) { | 64 | if (task != current && task != NULL) { |
65 | /* XXX: Isn't this bogus? I.e. isn't this the | ||
66 | * *userspace* stack of this task? If not so, use this | ||
67 | * even when task == current (as in i386). | ||
68 | */ | ||
69 | esp = (unsigned long *) KSTK_ESP(task); | 65 | esp = (unsigned long *) KSTK_ESP(task); |
70 | /* Which one? No actual difference - just coding style.*/ | ||
71 | //esp = (unsigned long *) PT_REGS_IP(&task->thread.regs); | ||
72 | } else { | 66 | } else { |
73 | esp = (unsigned long *) &esp; | 67 | esp = (unsigned long *) &esp; |
74 | } | 68 | } |
@@ -84,5 +78,5 @@ void show_stack(struct task_struct *task, unsigned long *esp) | |||
84 | } | 78 | } |
85 | 79 | ||
86 | printk("Call Trace: \n"); | 80 | printk("Call Trace: \n"); |
87 | show_trace(current, esp); | 81 | show_trace(task, esp); |
88 | } | 82 | } |
diff --git a/arch/um/os-Linux/sys-i386/registers.c b/arch/um/os-Linux/sys-i386/registers.c index 3125d320722c..aee4812333c6 100644 --- a/arch/um/os-Linux/sys-i386/registers.c +++ b/arch/um/os-Linux/sys-i386/registers.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <errno.h> | 6 | #include <errno.h> |
7 | #include <string.h> | 7 | #include <string.h> |
8 | #include <setjmp.h> | ||
8 | #include "sysdep/ptrace_user.h" | 9 | #include "sysdep/ptrace_user.h" |
9 | #include "sysdep/ptrace.h" | 10 | #include "sysdep/ptrace.h" |
10 | #include "uml-config.h" | 11 | #include "uml-config.h" |
@@ -126,13 +127,11 @@ void get_safe_registers(unsigned long *regs) | |||
126 | memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); | 127 | memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); |
127 | } | 128 | } |
128 | 129 | ||
129 | /* | 130 | void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer) |
130 | * Overrides for Emacs so that we follow Linus's tabbing style. | 131 | { |
131 | * Emacs will notice this stuff at the end of the file and automatically | 132 | struct __jmp_buf_tag *jmpbuf = buffer; |
132 | * adjust the settings for this buffer only. This must remain at the end | 133 | |
133 | * of the file. | 134 | UPT_SET(uml_regs, EIP, jmpbuf->__jmpbuf[JB_PC]); |
134 | * --------------------------------------------------------------------------- | 135 | UPT_SET(uml_regs, UESP, jmpbuf->__jmpbuf[JB_SP]); |
135 | * Local variables: | 136 | UPT_SET(uml_regs, EBP, jmpbuf->__jmpbuf[JB_BP]); |
136 | * c-file-style: "linux" | 137 | } |
137 | * End: | ||
138 | */ | ||
diff --git a/arch/um/os-Linux/sys-x86_64/registers.c b/arch/um/os-Linux/sys-x86_64/registers.c index 44438d15c3d6..4b638dfb52b0 100644 --- a/arch/um/os-Linux/sys-x86_64/registers.c +++ b/arch/um/os-Linux/sys-x86_64/registers.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <errno.h> | 6 | #include <errno.h> |
7 | #include <string.h> | 7 | #include <string.h> |
8 | #include <setjmp.h> | ||
8 | #include "ptrace_user.h" | 9 | #include "ptrace_user.h" |
9 | #include "uml-config.h" | 10 | #include "uml-config.h" |
10 | #include "skas_ptregs.h" | 11 | #include "skas_ptregs.h" |
@@ -74,13 +75,11 @@ void get_safe_registers(unsigned long *regs) | |||
74 | memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); | 75 | memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); |
75 | } | 76 | } |
76 | 77 | ||
77 | /* | 78 | void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer) |
78 | * Overrides for Emacs so that we follow Linus's tabbing style. | 79 | { |
79 | * Emacs will notice this stuff at the end of the file and automatically | 80 | struct __jmp_buf_tag *jmpbuf = buffer; |
80 | * adjust the settings for this buffer only. This must remain at the end | 81 | |
81 | * of the file. | 82 | UPT_SET(uml_regs, RIP, jmpbuf->__jmpbuf[JB_PC]); |
82 | * --------------------------------------------------------------------------- | 83 | UPT_SET(uml_regs, RSP, jmpbuf->__jmpbuf[JB_RSP]); |
83 | * Local variables: | 84 | UPT_SET(uml_regs, RBP, jmpbuf->__jmpbuf[JB_RBP]); |
84 | * c-file-style: "linux" | 85 | } |
85 | * End: | ||
86 | */ | ||
diff --git a/arch/um/sys-i386/sysrq.c b/arch/um/sys-i386/sysrq.c index e3706d15c4f5..d5244f070539 100644 --- a/arch/um/sys-i386/sysrq.c +++ b/arch/um/sys-i386/sysrq.c | |||
@@ -88,9 +88,7 @@ void show_trace(struct task_struct* task, unsigned long * stack) | |||
88 | task = current; | 88 | task = current; |
89 | 89 | ||
90 | if (task != current) { | 90 | if (task != current) { |
91 | //ebp = (unsigned long) KSTK_EBP(task); | 91 | ebp = (unsigned long) KSTK_EBP(task); |
92 | /* Which one? No actual difference - just coding style.*/ | ||
93 | ebp = (unsigned long) PT_REGS_EBP(&task->thread.regs); | ||
94 | } else { | 92 | } else { |
95 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); | 93 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); |
96 | } | 94 | } |
@@ -99,15 +97,6 @@ void show_trace(struct task_struct* task, unsigned long * stack) | |||
99 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 97 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); |
100 | print_context_stack(context, stack, ebp); | 98 | print_context_stack(context, stack, ebp); |
101 | 99 | ||
102 | /*while (((long) stack & (THREAD_SIZE-1)) != 0) { | ||
103 | addr = *stack; | ||
104 | if (__kernel_text_address(addr)) { | ||
105 | printk("%08lx: [<%08lx>]", (unsigned long) stack, addr); | ||
106 | print_symbol(" %s", addr); | ||
107 | printk("\n"); | ||
108 | } | ||
109 | stack++; | ||
110 | }*/ | ||
111 | printk("\n"); | 100 | printk("\n"); |
112 | } | 101 | } |
113 | 102 | ||
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c index 677fc26a9bbe..26b68675053d 100644 --- a/arch/um/sys-i386/user-offsets.c +++ b/arch/um/sys-i386/user-offsets.c | |||
@@ -46,7 +46,7 @@ void foo(void) | |||
46 | OFFSET(HOST_SC_FP_ST, _fpstate, _st); | 46 | OFFSET(HOST_SC_FP_ST, _fpstate, _st); |
47 | OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env); | 47 | OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env); |
48 | 48 | ||
49 | DEFINE_LONGS(HOST_FRAME_SIZE, FRAME_SIZE); | 49 | DEFINE(HOST_FRAME_SIZE, FRAME_SIZE); |
50 | DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct)); | 50 | DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct)); |
51 | DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct)); | 51 | DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct)); |
52 | 52 | ||
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 4592bf21fcaf..b92e5f45ed46 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S | |||
@@ -270,26 +270,26 @@ ENTRY(level3_kernel_pgt) | |||
270 | .org 0x4000 | 270 | .org 0x4000 |
271 | ENTRY(level2_ident_pgt) | 271 | ENTRY(level2_ident_pgt) |
272 | /* 40MB for bootup. */ | 272 | /* 40MB for bootup. */ |
273 | .quad 0x0000000000000183 | 273 | .quad 0x0000000000000083 |
274 | .quad 0x0000000000200183 | 274 | .quad 0x0000000000200083 |
275 | .quad 0x0000000000400183 | 275 | .quad 0x0000000000400083 |
276 | .quad 0x0000000000600183 | 276 | .quad 0x0000000000600083 |
277 | .quad 0x0000000000800183 | 277 | .quad 0x0000000000800083 |
278 | .quad 0x0000000000A00183 | 278 | .quad 0x0000000000A00083 |
279 | .quad 0x0000000000C00183 | 279 | .quad 0x0000000000C00083 |
280 | .quad 0x0000000000E00183 | 280 | .quad 0x0000000000E00083 |
281 | .quad 0x0000000001000183 | 281 | .quad 0x0000000001000083 |
282 | .quad 0x0000000001200183 | 282 | .quad 0x0000000001200083 |
283 | .quad 0x0000000001400183 | 283 | .quad 0x0000000001400083 |
284 | .quad 0x0000000001600183 | 284 | .quad 0x0000000001600083 |
285 | .quad 0x0000000001800183 | 285 | .quad 0x0000000001800083 |
286 | .quad 0x0000000001A00183 | 286 | .quad 0x0000000001A00083 |
287 | .quad 0x0000000001C00183 | 287 | .quad 0x0000000001C00083 |
288 | .quad 0x0000000001E00183 | 288 | .quad 0x0000000001E00083 |
289 | .quad 0x0000000002000183 | 289 | .quad 0x0000000002000083 |
290 | .quad 0x0000000002200183 | 290 | .quad 0x0000000002200083 |
291 | .quad 0x0000000002400183 | 291 | .quad 0x0000000002400083 |
292 | .quad 0x0000000002600183 | 292 | .quad 0x0000000002600083 |
293 | /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ | 293 | /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ |
294 | .globl temp_boot_pmds | 294 | .globl temp_boot_pmds |
295 | temp_boot_pmds: | 295 | temp_boot_pmds: |
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 2bf723a7b6e6..6f1a83c9d9e0 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c | |||
@@ -178,14 +178,12 @@ fore200e_irq_itoa(int irq) | |||
178 | 178 | ||
179 | 179 | ||
180 | static void* | 180 | static void* |
181 | fore200e_kmalloc(int size, int flags) | 181 | fore200e_kmalloc(int size, unsigned int __nocast flags) |
182 | { | 182 | { |
183 | void* chunk = kmalloc(size, flags); | 183 | void *chunk = kzalloc(size, flags); |
184 | 184 | ||
185 | if (chunk) | 185 | if (!chunk) |
186 | memset(chunk, 0x00, size); | 186 | printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags); |
187 | else | ||
188 | printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags); | ||
189 | 187 | ||
190 | return chunk; | 188 | return chunk; |
191 | } | 189 | } |
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 95a976c96eb8..70458cb061c6 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c | |||
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards"); | |||
47 | MODULE_PARM_DESC(debug, "Enable debug output"); | 47 | MODULE_PARM_DESC(debug, "Enable debug output"); |
48 | 48 | ||
49 | module_param_named(cards_limit, drm_cards_limit, int, 0444); | 49 | module_param_named(cards_limit, drm_cards_limit, int, 0444); |
50 | module_param_named(debug, drm_debug, int, 0666); | 50 | module_param_named(debug, drm_debug, int, 0600); |
51 | 51 | ||
52 | drm_head_t **drm_heads; | 52 | drm_head_t **drm_heads; |
53 | struct drm_sysfs_class *drm_class; | 53 | struct drm_sysfs_class *drm_class; |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index bb0b3a8de14b..1422285d537c 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -69,7 +69,8 @@ int cn_already_initialized = 0; | |||
69 | * a new message. | 69 | * a new message. |
70 | * | 70 | * |
71 | */ | 71 | */ |
72 | int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) | 72 | int cn_netlink_send(struct cn_msg *msg, u32 __group, |
73 | unsigned int __nocast gfp_mask) | ||
73 | { | 74 | { |
74 | struct cn_callback_entry *__cbq; | 75 | struct cn_callback_entry *__cbq; |
75 | unsigned int size; | 76 | unsigned int size; |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index ffbcd40418d5..23a3f56c7899 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -503,6 +503,25 @@ err_free_aux: | |||
503 | return err; | 503 | return err; |
504 | } | 504 | } |
505 | 505 | ||
506 | static void mthca_free_icms(struct mthca_dev *mdev) | ||
507 | { | ||
508 | u8 status; | ||
509 | |||
510 | mthca_free_icm_table(mdev, mdev->mcg_table.table); | ||
511 | if (mdev->mthca_flags & MTHCA_FLAG_SRQ) | ||
512 | mthca_free_icm_table(mdev, mdev->srq_table.table); | ||
513 | mthca_free_icm_table(mdev, mdev->cq_table.table); | ||
514 | mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); | ||
515 | mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); | ||
516 | mthca_free_icm_table(mdev, mdev->qp_table.qp_table); | ||
517 | mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); | ||
518 | mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); | ||
519 | mthca_unmap_eq_icm(mdev); | ||
520 | |||
521 | mthca_UNMAP_ICM_AUX(mdev, &status); | ||
522 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); | ||
523 | } | ||
524 | |||
506 | static int __devinit mthca_init_arbel(struct mthca_dev *mdev) | 525 | static int __devinit mthca_init_arbel(struct mthca_dev *mdev) |
507 | { | 526 | { |
508 | struct mthca_dev_lim dev_lim; | 527 | struct mthca_dev_lim dev_lim; |
@@ -580,18 +599,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) | |||
580 | return 0; | 599 | return 0; |
581 | 600 | ||
582 | err_free_icm: | 601 | err_free_icm: |
583 | if (mdev->mthca_flags & MTHCA_FLAG_SRQ) | 602 | mthca_free_icms(mdev); |
584 | mthca_free_icm_table(mdev, mdev->srq_table.table); | ||
585 | mthca_free_icm_table(mdev, mdev->cq_table.table); | ||
586 | mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); | ||
587 | mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); | ||
588 | mthca_free_icm_table(mdev, mdev->qp_table.qp_table); | ||
589 | mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); | ||
590 | mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); | ||
591 | mthca_unmap_eq_icm(mdev); | ||
592 | |||
593 | mthca_UNMAP_ICM_AUX(mdev, &status); | ||
594 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); | ||
595 | 603 | ||
596 | err_stop_fw: | 604 | err_stop_fw: |
597 | mthca_UNMAP_FA(mdev, &status); | 605 | mthca_UNMAP_FA(mdev, &status); |
@@ -611,18 +619,7 @@ static void mthca_close_hca(struct mthca_dev *mdev) | |||
611 | mthca_CLOSE_HCA(mdev, 0, &status); | 619 | mthca_CLOSE_HCA(mdev, 0, &status); |
612 | 620 | ||
613 | if (mthca_is_memfree(mdev)) { | 621 | if (mthca_is_memfree(mdev)) { |
614 | if (mdev->mthca_flags & MTHCA_FLAG_SRQ) | 622 | mthca_free_icms(mdev); |
615 | mthca_free_icm_table(mdev, mdev->srq_table.table); | ||
616 | mthca_free_icm_table(mdev, mdev->cq_table.table); | ||
617 | mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); | ||
618 | mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); | ||
619 | mthca_free_icm_table(mdev, mdev->qp_table.qp_table); | ||
620 | mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); | ||
621 | mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); | ||
622 | mthca_unmap_eq_icm(mdev); | ||
623 | |||
624 | mthca_UNMAP_ICM_AUX(mdev, &status); | ||
625 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); | ||
626 | 623 | ||
627 | mthca_UNMAP_FA(mdev, &status); | 624 | mthca_UNMAP_FA(mdev, &status); |
628 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); | 625 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 704f48e0b6a7..6c5bf07489f4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -474,7 +474,7 @@ err: | |||
474 | spin_unlock(&priv->lock); | 474 | spin_unlock(&priv->lock); |
475 | } | 475 | } |
476 | 476 | ||
477 | static void path_lookup(struct sk_buff *skb, struct net_device *dev) | 477 | static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) |
478 | { | 478 | { |
479 | struct ipoib_dev_priv *priv = netdev_priv(skb->dev); | 479 | struct ipoib_dev_priv *priv = netdev_priv(skb->dev); |
480 | 480 | ||
@@ -569,7 +569,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
569 | 569 | ||
570 | if (skb->dst && skb->dst->neighbour) { | 570 | if (skb->dst && skb->dst->neighbour) { |
571 | if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { | 571 | if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { |
572 | path_lookup(skb, dev); | 572 | ipoib_path_lookup(skb, dev); |
573 | goto out; | 573 | goto out; |
574 | } | 574 | } |
575 | 575 | ||
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 10f6ce1bc0ab..612564ac6f7b 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c | |||
@@ -642,8 +642,6 @@ static void __exit ucb1x00_exit(void) | |||
642 | module_init(ucb1x00_init); | 642 | module_init(ucb1x00_init); |
643 | module_exit(ucb1x00_exit); | 643 | module_exit(ucb1x00_exit); |
644 | 644 | ||
645 | EXPORT_SYMBOL(ucb1x00_class); | ||
646 | |||
647 | EXPORT_SYMBOL(ucb1x00_io_set_dir); | 645 | EXPORT_SYMBOL(ucb1x00_io_set_dir); |
648 | EXPORT_SYMBOL(ucb1x00_io_write); | 646 | EXPORT_SYMBOL(ucb1x00_io_write); |
649 | EXPORT_SYMBOL(ucb1x00_io_read); | 647 | EXPORT_SYMBOL(ucb1x00_io_read); |
diff --git a/drivers/mfd/ucb1x00.h b/drivers/mfd/ucb1x00.h index 6b632644f59a..9c9a647d8b7b 100644 --- a/drivers/mfd/ucb1x00.h +++ b/drivers/mfd/ucb1x00.h | |||
@@ -106,8 +106,6 @@ struct ucb1x00_irq { | |||
106 | void (*fn)(int, void *); | 106 | void (*fn)(int, void *); |
107 | }; | 107 | }; |
108 | 108 | ||
109 | extern struct class ucb1x00_class; | ||
110 | |||
111 | struct ucb1x00 { | 109 | struct ucb1x00 { |
112 | spinlock_t lock; | 110 | spinlock_t lock; |
113 | struct mcp *mcp; | 111 | struct mcp *mcp; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2a908c4690a7..c748b0e16419 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1655,7 +1655,7 @@ config LAN_SAA9730 | |||
1655 | 1655 | ||
1656 | config NET_POCKET | 1656 | config NET_POCKET |
1657 | bool "Pocket and portable adapters" | 1657 | bool "Pocket and portable adapters" |
1658 | depends on NET_ETHERNET && ISA | 1658 | depends on NET_ETHERNET && PARPORT |
1659 | ---help--- | 1659 | ---help--- |
1660 | Cute little network (Ethernet) devices which attach to the parallel | 1660 | Cute little network (Ethernet) devices which attach to the parallel |
1661 | port ("pocket adapters"), commonly used with laptops. If you have | 1661 | port ("pocket adapters"), commonly used with laptops. If you have |
@@ -1679,7 +1679,7 @@ config NET_POCKET | |||
1679 | 1679 | ||
1680 | config ATP | 1680 | config ATP |
1681 | tristate "AT-LAN-TEC/RealTek pocket adapter support" | 1681 | tristate "AT-LAN-TEC/RealTek pocket adapter support" |
1682 | depends on NET_POCKET && ISA && X86 | 1682 | depends on NET_POCKET && PARPORT && X86 |
1683 | select CRC32 | 1683 | select CRC32 |
1684 | ---help--- | 1684 | ---help--- |
1685 | This is a network (Ethernet) device which attaches to your parallel | 1685 | This is a network (Ethernet) device which attaches to your parallel |
@@ -1694,7 +1694,7 @@ config ATP | |||
1694 | 1694 | ||
1695 | config DE600 | 1695 | config DE600 |
1696 | tristate "D-Link DE600 pocket adapter support" | 1696 | tristate "D-Link DE600 pocket adapter support" |
1697 | depends on NET_POCKET && ISA | 1697 | depends on NET_POCKET && PARPORT |
1698 | ---help--- | 1698 | ---help--- |
1699 | This is a network (Ethernet) device which attaches to your parallel | 1699 | This is a network (Ethernet) device which attaches to your parallel |
1700 | port. Read <file:Documentation/networking/DLINK.txt> as well as the | 1700 | port. Read <file:Documentation/networking/DLINK.txt> as well as the |
@@ -1709,7 +1709,7 @@ config DE600 | |||
1709 | 1709 | ||
1710 | config DE620 | 1710 | config DE620 |
1711 | tristate "D-Link DE620 pocket adapter support" | 1711 | tristate "D-Link DE620 pocket adapter support" |
1712 | depends on NET_POCKET && ISA | 1712 | depends on NET_POCKET && PARPORT |
1713 | ---help--- | 1713 | ---help--- |
1714 | This is a network (Ethernet) device which attaches to your parallel | 1714 | This is a network (Ethernet) device which attaches to your parallel |
1715 | port. Read <file:Documentation/networking/DLINK.txt> as well as the | 1715 | port. Read <file:Documentation/networking/DLINK.txt> as well as the |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bf81cd45e4d4..f0a5b772a386 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -487,6 +487,8 @@ | |||
487 | * * Added xmit_hash_policy_layer34() | 487 | * * Added xmit_hash_policy_layer34() |
488 | * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4. | 488 | * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4. |
489 | * Set version to 2.6.3. | 489 | * Set version to 2.6.3. |
490 | * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com> | ||
491 | * - Removed backwards compatibility for old ifenslaves. Version 2.6.4. | ||
490 | */ | 492 | */ |
491 | 493 | ||
492 | //#define BONDING_DEBUG 1 | 494 | //#define BONDING_DEBUG 1 |
@@ -595,14 +597,7 @@ static int arp_ip_count = 0; | |||
595 | static int bond_mode = BOND_MODE_ROUNDROBIN; | 597 | static int bond_mode = BOND_MODE_ROUNDROBIN; |
596 | static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; | 598 | static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; |
597 | static int lacp_fast = 0; | 599 | static int lacp_fast = 0; |
598 | static int app_abi_ver = 0; | 600 | |
599 | static int orig_app_abi_ver = -1; /* This is used to save the first ABI version | ||
600 | * we receive from the application. Once set, | ||
601 | * it won't be changed, and the module will | ||
602 | * refuse to enslave/release interfaces if the | ||
603 | * command comes from an application using | ||
604 | * another ABI version. | ||
605 | */ | ||
606 | struct bond_parm_tbl { | 601 | struct bond_parm_tbl { |
607 | char *modename; | 602 | char *modename; |
608 | int mode; | 603 | int mode; |
@@ -1294,12 +1289,13 @@ static void bond_mc_list_destroy(struct bonding *bond) | |||
1294 | /* | 1289 | /* |
1295 | * Copy all the Multicast addresses from src to the bonding device dst | 1290 | * Copy all the Multicast addresses from src to the bonding device dst |
1296 | */ | 1291 | */ |
1297 | static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag) | 1292 | static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, |
1293 | unsigned int __nocast gfp_flag) | ||
1298 | { | 1294 | { |
1299 | struct dev_mc_list *dmi, *new_dmi; | 1295 | struct dev_mc_list *dmi, *new_dmi; |
1300 | 1296 | ||
1301 | for (dmi = mc_list; dmi; dmi = dmi->next) { | 1297 | for (dmi = mc_list; dmi; dmi = dmi->next) { |
1302 | new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag); | 1298 | new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag); |
1303 | 1299 | ||
1304 | if (!new_dmi) { | 1300 | if (!new_dmi) { |
1305 | /* FIXME: Potential memory leak !!! */ | 1301 | /* FIXME: Potential memory leak !!! */ |
@@ -1702,51 +1698,29 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de | |||
1702 | } | 1698 | } |
1703 | } | 1699 | } |
1704 | 1700 | ||
1705 | if (app_abi_ver >= 1) { | 1701 | /* |
1706 | /* The application is using an ABI, which requires the | 1702 | * Old ifenslave binaries are no longer supported. These can |
1707 | * slave interface to be closed. | 1703 | * be identified with moderate accurary by the state of the slave: |
1708 | */ | 1704 | * the current ifenslave will set the interface down prior to |
1709 | if ((slave_dev->flags & IFF_UP)) { | 1705 | * enslaving it; the old ifenslave will not. |
1710 | printk(KERN_ERR DRV_NAME | 1706 | */ |
1711 | ": Error: %s is up\n", | 1707 | if ((slave_dev->flags & IFF_UP)) { |
1712 | slave_dev->name); | 1708 | printk(KERN_ERR DRV_NAME ": %s is up. " |
1713 | res = -EPERM; | 1709 | "This may be due to an out of date ifenslave.\n", |
1714 | goto err_undo_flags; | 1710 | slave_dev->name); |
1715 | } | 1711 | res = -EPERM; |
1716 | 1712 | goto err_undo_flags; | |
1717 | if (slave_dev->set_mac_address == NULL) { | 1713 | } |
1718 | printk(KERN_ERR DRV_NAME | ||
1719 | ": Error: The slave device you specified does " | ||
1720 | "not support setting the MAC address.\n"); | ||
1721 | printk(KERN_ERR | ||
1722 | "Your kernel likely does not support slave " | ||
1723 | "devices.\n"); | ||
1724 | 1714 | ||
1725 | res = -EOPNOTSUPP; | 1715 | if (slave_dev->set_mac_address == NULL) { |
1726 | goto err_undo_flags; | 1716 | printk(KERN_ERR DRV_NAME |
1727 | } | 1717 | ": Error: The slave device you specified does " |
1728 | } else { | 1718 | "not support setting the MAC address.\n"); |
1729 | /* The application is not using an ABI, which requires the | 1719 | printk(KERN_ERR |
1730 | * slave interface to be open. | 1720 | "Your kernel likely does not support slave devices.\n"); |
1731 | */ | ||
1732 | if (!(slave_dev->flags & IFF_UP)) { | ||
1733 | printk(KERN_ERR DRV_NAME | ||
1734 | ": Error: %s is not running\n", | ||
1735 | slave_dev->name); | ||
1736 | res = -EINVAL; | ||
1737 | goto err_undo_flags; | ||
1738 | } | ||
1739 | 1721 | ||
1740 | if ((bond->params.mode == BOND_MODE_8023AD) || | 1722 | res = -EOPNOTSUPP; |
1741 | (bond->params.mode == BOND_MODE_TLB) || | 1723 | goto err_undo_flags; |
1742 | (bond->params.mode == BOND_MODE_ALB)) { | ||
1743 | printk(KERN_ERR DRV_NAME | ||
1744 | ": Error: to use %s mode, you must upgrade " | ||
1745 | "ifenslave.\n", | ||
1746 | bond_mode_name(bond->params.mode)); | ||
1747 | res = -EOPNOTSUPP; | ||
1748 | goto err_undo_flags; | ||
1749 | } | ||
1750 | } | 1724 | } |
1751 | 1725 | ||
1752 | new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); | 1726 | new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); |
@@ -1762,41 +1736,36 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de | |||
1762 | */ | 1736 | */ |
1763 | new_slave->original_flags = slave_dev->flags; | 1737 | new_slave->original_flags = slave_dev->flags; |
1764 | 1738 | ||
1765 | if (app_abi_ver >= 1) { | 1739 | /* |
1766 | /* save slave's original ("permanent") mac address for | 1740 | * Save slave's original ("permanent") mac address for modes |
1767 | * modes that needs it, and for restoring it upon release, | 1741 | * that need it, and for restoring it upon release, and then |
1768 | * and then set it to the master's address | 1742 | * set it to the master's address |
1769 | */ | 1743 | */ |
1770 | memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); | 1744 | memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); |
1771 | 1745 | ||
1772 | /* set slave to master's mac address | 1746 | /* |
1773 | * The application already set the master's | 1747 | * Set slave to master's mac address. The application already |
1774 | * mac address to that of the first slave | 1748 | * set the master's mac address to that of the first slave |
1775 | */ | 1749 | */ |
1776 | memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); | 1750 | memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); |
1777 | addr.sa_family = slave_dev->type; | 1751 | addr.sa_family = slave_dev->type; |
1778 | res = dev_set_mac_address(slave_dev, &addr); | 1752 | res = dev_set_mac_address(slave_dev, &addr); |
1779 | if (res) { | 1753 | if (res) { |
1780 | dprintk("Error %d calling set_mac_address\n", res); | 1754 | dprintk("Error %d calling set_mac_address\n", res); |
1781 | goto err_free; | 1755 | goto err_free; |
1782 | } | 1756 | } |
1783 | 1757 | ||
1784 | /* open the slave since the application closed it */ | 1758 | /* open the slave since the application closed it */ |
1785 | res = dev_open(slave_dev); | 1759 | res = dev_open(slave_dev); |
1786 | if (res) { | 1760 | if (res) { |
1787 | dprintk("Openning slave %s failed\n", slave_dev->name); | 1761 | dprintk("Openning slave %s failed\n", slave_dev->name); |
1788 | goto err_restore_mac; | 1762 | goto err_restore_mac; |
1789 | } | ||
1790 | } | 1763 | } |
1791 | 1764 | ||
1792 | res = netdev_set_master(slave_dev, bond_dev); | 1765 | res = netdev_set_master(slave_dev, bond_dev); |
1793 | if (res) { | 1766 | if (res) { |
1794 | dprintk("Error %d calling netdev_set_master\n", res); | 1767 | dprintk("Error %d calling netdev_set_master\n", res); |
1795 | if (app_abi_ver < 1) { | 1768 | goto err_close; |
1796 | goto err_free; | ||
1797 | } else { | ||
1798 | goto err_close; | ||
1799 | } | ||
1800 | } | 1769 | } |
1801 | 1770 | ||
1802 | new_slave->dev = slave_dev; | 1771 | new_slave->dev = slave_dev; |
@@ -1997,39 +1966,6 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de | |||
1997 | 1966 | ||
1998 | write_unlock_bh(&bond->lock); | 1967 | write_unlock_bh(&bond->lock); |
1999 | 1968 | ||
2000 | if (app_abi_ver < 1) { | ||
2001 | /* | ||
2002 | * !!! This is to support old versions of ifenslave. | ||
2003 | * We can remove this in 2.5 because our ifenslave takes | ||
2004 | * care of this for us. | ||
2005 | * We check to see if the master has a mac address yet. | ||
2006 | * If not, we'll give it the mac address of our slave device. | ||
2007 | */ | ||
2008 | int ndx = 0; | ||
2009 | |||
2010 | for (ndx = 0; ndx < bond_dev->addr_len; ndx++) { | ||
2011 | dprintk("Checking ndx=%d of bond_dev->dev_addr\n", | ||
2012 | ndx); | ||
2013 | if (bond_dev->dev_addr[ndx] != 0) { | ||
2014 | dprintk("Found non-zero byte at ndx=%d\n", | ||
2015 | ndx); | ||
2016 | break; | ||
2017 | } | ||
2018 | } | ||
2019 | |||
2020 | if (ndx == bond_dev->addr_len) { | ||
2021 | /* | ||
2022 | * We got all the way through the address and it was | ||
2023 | * all 0's. | ||
2024 | */ | ||
2025 | dprintk("%s doesn't have a MAC address yet. \n", | ||
2026 | bond_dev->name); | ||
2027 | dprintk("Going to give assign it from %s.\n", | ||
2028 | slave_dev->name); | ||
2029 | bond_sethwaddr(bond_dev, slave_dev); | ||
2030 | } | ||
2031 | } | ||
2032 | |||
2033 | printk(KERN_INFO DRV_NAME | 1969 | printk(KERN_INFO DRV_NAME |
2034 | ": %s: enslaving %s as a%s interface with a%s link.\n", | 1970 | ": %s: enslaving %s as a%s interface with a%s link.\n", |
2035 | bond_dev->name, slave_dev->name, | 1971 | bond_dev->name, slave_dev->name, |
@@ -2227,12 +2163,10 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de | |||
2227 | /* close slave before restoring its mac address */ | 2163 | /* close slave before restoring its mac address */ |
2228 | dev_close(slave_dev); | 2164 | dev_close(slave_dev); |
2229 | 2165 | ||
2230 | if (app_abi_ver >= 1) { | 2166 | /* restore original ("permanent") mac address */ |
2231 | /* restore original ("permanent") mac address */ | 2167 | memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); |
2232 | memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); | 2168 | addr.sa_family = slave_dev->type; |
2233 | addr.sa_family = slave_dev->type; | 2169 | dev_set_mac_address(slave_dev, &addr); |
2234 | dev_set_mac_address(slave_dev, &addr); | ||
2235 | } | ||
2236 | 2170 | ||
2237 | /* restore the original state of the | 2171 | /* restore the original state of the |
2238 | * IFF_NOARP flag that might have been | 2172 | * IFF_NOARP flag that might have been |
@@ -2320,12 +2254,10 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2320 | /* close slave before restoring its mac address */ | 2254 | /* close slave before restoring its mac address */ |
2321 | dev_close(slave_dev); | 2255 | dev_close(slave_dev); |
2322 | 2256 | ||
2323 | if (app_abi_ver >= 1) { | 2257 | /* restore original ("permanent") mac address*/ |
2324 | /* restore original ("permanent") mac address*/ | 2258 | memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); |
2325 | memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); | 2259 | addr.sa_family = slave_dev->type; |
2326 | addr.sa_family = slave_dev->type; | 2260 | dev_set_mac_address(slave_dev, &addr); |
2327 | dev_set_mac_address(slave_dev, &addr); | ||
2328 | } | ||
2329 | 2261 | ||
2330 | /* restore the original state of the IFF_NOARP flag that might have | 2262 | /* restore the original state of the IFF_NOARP flag that might have |
2331 | * been set by bond_set_slave_inactive_flags() | 2263 | * been set by bond_set_slave_inactive_flags() |
@@ -2423,57 +2355,6 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi | |||
2423 | return res; | 2355 | return res; |
2424 | } | 2356 | } |
2425 | 2357 | ||
2426 | static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr) | ||
2427 | { | ||
2428 | struct ethtool_drvinfo info; | ||
2429 | void __user *addr = ifr->ifr_data; | ||
2430 | uint32_t cmd; | ||
2431 | |||
2432 | if (get_user(cmd, (uint32_t __user *)addr)) { | ||
2433 | return -EFAULT; | ||
2434 | } | ||
2435 | |||
2436 | switch (cmd) { | ||
2437 | case ETHTOOL_GDRVINFO: | ||
2438 | if (copy_from_user(&info, addr, sizeof(info))) { | ||
2439 | return -EFAULT; | ||
2440 | } | ||
2441 | |||
2442 | if (strcmp(info.driver, "ifenslave") == 0) { | ||
2443 | int new_abi_ver; | ||
2444 | char *endptr; | ||
2445 | |||
2446 | new_abi_ver = simple_strtoul(info.fw_version, | ||
2447 | &endptr, 0); | ||
2448 | if (*endptr) { | ||
2449 | printk(KERN_ERR DRV_NAME | ||
2450 | ": Error: got invalid ABI " | ||
2451 | "version from application\n"); | ||
2452 | |||
2453 | return -EINVAL; | ||
2454 | } | ||
2455 | |||
2456 | if (orig_app_abi_ver == -1) { | ||
2457 | orig_app_abi_ver = new_abi_ver; | ||
2458 | } | ||
2459 | |||
2460 | app_abi_ver = new_abi_ver; | ||
2461 | } | ||
2462 | |||
2463 | strncpy(info.driver, DRV_NAME, 32); | ||
2464 | strncpy(info.version, DRV_VERSION, 32); | ||
2465 | snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION); | ||
2466 | |||
2467 | if (copy_to_user(addr, &info, sizeof(info))) { | ||
2468 | return -EFAULT; | ||
2469 | } | ||
2470 | |||
2471 | return 0; | ||
2472 | default: | ||
2473 | return -EOPNOTSUPP; | ||
2474 | } | ||
2475 | } | ||
2476 | |||
2477 | static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) | 2358 | static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) |
2478 | { | 2359 | { |
2479 | struct bonding *bond = bond_dev->priv; | 2360 | struct bonding *bond = bond_dev->priv; |
@@ -3442,16 +3323,11 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave | |||
3442 | seq_printf(seq, "Link Failure Count: %d\n", | 3323 | seq_printf(seq, "Link Failure Count: %d\n", |
3443 | slave->link_failure_count); | 3324 | slave->link_failure_count); |
3444 | 3325 | ||
3445 | if (app_abi_ver >= 1) { | 3326 | seq_printf(seq, |
3446 | seq_printf(seq, | 3327 | "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", |
3447 | "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", | 3328 | slave->perm_hwaddr[0], slave->perm_hwaddr[1], |
3448 | slave->perm_hwaddr[0], | 3329 | slave->perm_hwaddr[2], slave->perm_hwaddr[3], |
3449 | slave->perm_hwaddr[1], | 3330 | slave->perm_hwaddr[4], slave->perm_hwaddr[5]); |
3450 | slave->perm_hwaddr[2], | ||
3451 | slave->perm_hwaddr[3], | ||
3452 | slave->perm_hwaddr[4], | ||
3453 | slave->perm_hwaddr[5]); | ||
3454 | } | ||
3455 | 3331 | ||
3456 | if (bond->params.mode == BOND_MODE_8023AD) { | 3332 | if (bond->params.mode == BOND_MODE_8023AD) { |
3457 | const struct aggregator *agg | 3333 | const struct aggregator *agg |
@@ -4010,15 +3886,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd | |||
4010 | struct ifslave k_sinfo; | 3886 | struct ifslave k_sinfo; |
4011 | struct ifslave __user *u_sinfo = NULL; | 3887 | struct ifslave __user *u_sinfo = NULL; |
4012 | struct mii_ioctl_data *mii = NULL; | 3888 | struct mii_ioctl_data *mii = NULL; |
4013 | int prev_abi_ver = orig_app_abi_ver; | ||
4014 | int res = 0; | 3889 | int res = 0; |
4015 | 3890 | ||
4016 | dprintk("bond_ioctl: master=%s, cmd=%d\n", | 3891 | dprintk("bond_ioctl: master=%s, cmd=%d\n", |
4017 | bond_dev->name, cmd); | 3892 | bond_dev->name, cmd); |
4018 | 3893 | ||
4019 | switch (cmd) { | 3894 | switch (cmd) { |
4020 | case SIOCETHTOOL: | ||
4021 | return bond_ethtool_ioctl(bond_dev, ifr); | ||
4022 | case SIOCGMIIPHY: | 3895 | case SIOCGMIIPHY: |
4023 | mii = if_mii(ifr); | 3896 | mii = if_mii(ifr); |
4024 | if (!mii) { | 3897 | if (!mii) { |
@@ -4090,21 +3963,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd | |||
4090 | return -EPERM; | 3963 | return -EPERM; |
4091 | } | 3964 | } |
4092 | 3965 | ||
4093 | if (orig_app_abi_ver == -1) { | ||
4094 | /* no orig_app_abi_ver was provided yet, so we'll use the | ||
4095 | * current one from now on, even if it's 0 | ||
4096 | */ | ||
4097 | orig_app_abi_ver = app_abi_ver; | ||
4098 | |||
4099 | } else if (orig_app_abi_ver != app_abi_ver) { | ||
4100 | printk(KERN_ERR DRV_NAME | ||
4101 | ": Error: already using ifenslave ABI version %d; to " | ||
4102 | "upgrade ifenslave to version %d, you must first " | ||
4103 | "reload bonding.\n", | ||
4104 | orig_app_abi_ver, app_abi_ver); | ||
4105 | return -EINVAL; | ||
4106 | } | ||
4107 | |||
4108 | slave_dev = dev_get_by_name(ifr->ifr_slave); | 3966 | slave_dev = dev_get_by_name(ifr->ifr_slave); |
4109 | 3967 | ||
4110 | dprintk("slave_dev=%p: \n", slave_dev); | 3968 | dprintk("slave_dev=%p: \n", slave_dev); |
@@ -4137,14 +3995,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd | |||
4137 | dev_put(slave_dev); | 3995 | dev_put(slave_dev); |
4138 | } | 3996 | } |
4139 | 3997 | ||
4140 | if (res < 0) { | ||
4141 | /* The ioctl failed, so there's no point in changing the | ||
4142 | * orig_app_abi_ver. We'll restore it's value just in case | ||
4143 | * we've changed it earlier in this function. | ||
4144 | */ | ||
4145 | orig_app_abi_ver = prev_abi_ver; | ||
4146 | } | ||
4147 | |||
4148 | return res; | 3998 | return res; |
4149 | } | 3999 | } |
4150 | 4000 | ||
@@ -4578,9 +4428,18 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode) | |||
4578 | } | 4428 | } |
4579 | } | 4429 | } |
4580 | 4430 | ||
4431 | static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, | ||
4432 | struct ethtool_drvinfo *drvinfo) | ||
4433 | { | ||
4434 | strncpy(drvinfo->driver, DRV_NAME, 32); | ||
4435 | strncpy(drvinfo->version, DRV_VERSION, 32); | ||
4436 | snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION); | ||
4437 | } | ||
4438 | |||
4581 | static struct ethtool_ops bond_ethtool_ops = { | 4439 | static struct ethtool_ops bond_ethtool_ops = { |
4582 | .get_tx_csum = ethtool_op_get_tx_csum, | 4440 | .get_tx_csum = ethtool_op_get_tx_csum, |
4583 | .get_sg = ethtool_op_get_sg, | 4441 | .get_sg = ethtool_op_get_sg, |
4442 | .get_drvinfo = bond_ethtool_get_drvinfo, | ||
4584 | }; | 4443 | }; |
4585 | 4444 | ||
4586 | /* | 4445 | /* |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 388196980862..bbf9da8af624 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -40,8 +40,8 @@ | |||
40 | #include "bond_3ad.h" | 40 | #include "bond_3ad.h" |
41 | #include "bond_alb.h" | 41 | #include "bond_alb.h" |
42 | 42 | ||
43 | #define DRV_VERSION "2.6.3" | 43 | #define DRV_VERSION "2.6.4" |
44 | #define DRV_RELDATE "June 8, 2005" | 44 | #define DRV_RELDATE "September 26, 2005" |
45 | #define DRV_NAME "bonding" | 45 | #define DRV_NAME "bonding" |
46 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | 46 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" |
47 | 47 | ||
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index 0de3bb906174..14e9b6315f20 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c | |||
@@ -1875,6 +1875,9 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal) | |||
1875 | rc = -ENODEV; | 1875 | rc = -ENODEV; |
1876 | goto bail; | 1876 | goto bail; |
1877 | } | 1877 | } |
1878 | |||
1879 | /* Disable any PHY features not supported by the platform */ | ||
1880 | ep->phy_mii.def->features &= ~emacdata->phy_feat_exc; | ||
1878 | 1881 | ||
1879 | /* Setup initial PHY config & startup aneg */ | 1882 | /* Setup initial PHY config & startup aneg */ |
1880 | if (ep->phy_mii.def->ops->init) | 1883 | if (ep->phy_mii.def->ops->init) |
@@ -1882,6 +1885,34 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal) | |||
1882 | netif_carrier_off(ndev); | 1885 | netif_carrier_off(ndev); |
1883 | if (ep->phy_mii.def->features & SUPPORTED_Autoneg) | 1886 | if (ep->phy_mii.def->features & SUPPORTED_Autoneg) |
1884 | ep->want_autoneg = 1; | 1887 | ep->want_autoneg = 1; |
1888 | else { | ||
1889 | ep->want_autoneg = 0; | ||
1890 | |||
1891 | /* Select highest supported speed/duplex */ | ||
1892 | if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) { | ||
1893 | ep->phy_mii.speed = SPEED_1000; | ||
1894 | ep->phy_mii.duplex = DUPLEX_FULL; | ||
1895 | } else if (ep->phy_mii.def->features & | ||
1896 | SUPPORTED_1000baseT_Half) { | ||
1897 | ep->phy_mii.speed = SPEED_1000; | ||
1898 | ep->phy_mii.duplex = DUPLEX_HALF; | ||
1899 | } else if (ep->phy_mii.def->features & | ||
1900 | SUPPORTED_100baseT_Full) { | ||
1901 | ep->phy_mii.speed = SPEED_100; | ||
1902 | ep->phy_mii.duplex = DUPLEX_FULL; | ||
1903 | } else if (ep->phy_mii.def->features & | ||
1904 | SUPPORTED_100baseT_Half) { | ||
1905 | ep->phy_mii.speed = SPEED_100; | ||
1906 | ep->phy_mii.duplex = DUPLEX_HALF; | ||
1907 | } else if (ep->phy_mii.def->features & | ||
1908 | SUPPORTED_10baseT_Full) { | ||
1909 | ep->phy_mii.speed = SPEED_10; | ||
1910 | ep->phy_mii.duplex = DUPLEX_FULL; | ||
1911 | } else { | ||
1912 | ep->phy_mii.speed = SPEED_10; | ||
1913 | ep->phy_mii.duplex = DUPLEX_HALF; | ||
1914 | } | ||
1915 | } | ||
1885 | emac_start_link(ep, NULL); | 1916 | emac_start_link(ep, NULL); |
1886 | 1917 | ||
1887 | /* read the MAC Address */ | 1918 | /* read the MAC Address */ |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index e64df4d0800b..83334db2921c 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb) | |||
584 | return 0; | 584 | return 0; |
585 | } | 585 | } |
586 | 586 | ||
587 | static inline int rx_refill(struct net_device *ndev, int gfp) | 587 | static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp) |
588 | { | 588 | { |
589 | struct ns83820 *dev = PRIV(ndev); | 589 | struct ns83820 *dev = PRIV(ndev); |
590 | unsigned i; | 590 | unsigned i; |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index d652e1eddb45..c7cca842e5ee 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -1832,7 +1832,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs, | |||
1832 | { | 1832 | { |
1833 | struct dev_mc_list *mc_addr; | 1833 | struct dev_mc_list *mc_addr; |
1834 | 1834 | ||
1835 | for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) { | 1835 | for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) { |
1836 | u_int position = ether_crc(6, mc_addr->dmi_addr); | 1836 | u_int position = ether_crc(6, mc_addr->dmi_addr); |
1837 | #ifndef final_version /* Verify multicast address. */ | 1837 | #ifndef final_version /* Verify multicast address. */ |
1838 | if ((mc_addr->dmi_addr[0] & 1) == 0) | 1838 | if ((mc_addr->dmi_addr[0] & 1) == 0) |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index fd398da4993b..c2e6484ef138 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2837,21 +2837,29 @@ static void skge_netpoll(struct net_device *dev) | |||
2837 | static int skge_set_mac_address(struct net_device *dev, void *p) | 2837 | static int skge_set_mac_address(struct net_device *dev, void *p) |
2838 | { | 2838 | { |
2839 | struct skge_port *skge = netdev_priv(dev); | 2839 | struct skge_port *skge = netdev_priv(dev); |
2840 | struct sockaddr *addr = p; | 2840 | struct skge_hw *hw = skge->hw; |
2841 | int err = 0; | 2841 | unsigned port = skge->port; |
2842 | const struct sockaddr *addr = p; | ||
2842 | 2843 | ||
2843 | if (!is_valid_ether_addr(addr->sa_data)) | 2844 | if (!is_valid_ether_addr(addr->sa_data)) |
2844 | return -EADDRNOTAVAIL; | 2845 | return -EADDRNOTAVAIL; |
2845 | 2846 | ||
2846 | skge_down(dev); | 2847 | spin_lock_bh(&hw->phy_lock); |
2847 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 2848 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
2848 | memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8, | 2849 | memcpy_toio(hw->regs + B2_MAC_1 + port*8, |
2849 | dev->dev_addr, ETH_ALEN); | 2850 | dev->dev_addr, ETH_ALEN); |
2850 | memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8, | 2851 | memcpy_toio(hw->regs + B2_MAC_2 + port*8, |
2851 | dev->dev_addr, ETH_ALEN); | 2852 | dev->dev_addr, ETH_ALEN); |
2852 | if (dev->flags & IFF_UP) | 2853 | |
2853 | err = skge_up(dev); | 2854 | if (hw->chip_id == CHIP_ID_GENESIS) |
2854 | return err; | 2855 | xm_outaddr(hw, port, XM_SA, dev->dev_addr); |
2856 | else { | ||
2857 | gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); | ||
2858 | gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); | ||
2859 | } | ||
2860 | spin_unlock_bh(&hw->phy_lock); | ||
2861 | |||
2862 | return 0; | ||
2855 | } | 2863 | } |
2856 | 2864 | ||
2857 | static const struct { | 2865 | static const struct { |
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 88b89dc95c77..efdb179ecc8c 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -133,14 +133,18 @@ | |||
133 | - finally added firmware (GPL'ed by Adaptec) | 133 | - finally added firmware (GPL'ed by Adaptec) |
134 | - removed compatibility code for 2.2.x | 134 | - removed compatibility code for 2.2.x |
135 | 135 | ||
136 | LK1.4.2.1 (Ion Badulescu) | ||
137 | - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM | ||
138 | - added 32-bit padding to outgoing skb's, removed previous workaround | ||
139 | |||
136 | TODO: - fix forced speed/duplexing code (broken a long time ago, when | 140 | TODO: - fix forced speed/duplexing code (broken a long time ago, when |
137 | somebody converted the driver to use the generic MII code) | 141 | somebody converted the driver to use the generic MII code) |
138 | - fix VLAN support | 142 | - fix VLAN support |
139 | */ | 143 | */ |
140 | 144 | ||
141 | #define DRV_NAME "starfire" | 145 | #define DRV_NAME "starfire" |
142 | #define DRV_VERSION "1.03+LK1.4.2" | 146 | #define DRV_VERSION "1.03+LK1.4.2.1" |
143 | #define DRV_RELDATE "January 19, 2005" | 147 | #define DRV_RELDATE "October 3, 2005" |
144 | 148 | ||
145 | #include <linux/config.h> | 149 | #include <linux/config.h> |
146 | #include <linux/version.h> | 150 | #include <linux/version.h> |
@@ -165,6 +169,14 @@ TODO: - fix forced speed/duplexing code (broken a long time ago, when | |||
165 | * of length 1. If and when this is fixed, the #define below can be removed. | 169 | * of length 1. If and when this is fixed, the #define below can be removed. |
166 | */ | 170 | */ |
167 | #define HAS_BROKEN_FIRMWARE | 171 | #define HAS_BROKEN_FIRMWARE |
172 | |||
173 | /* | ||
174 | * If using the broken firmware, data must be padded to the next 32-bit boundary. | ||
175 | */ | ||
176 | #ifdef HAS_BROKEN_FIRMWARE | ||
177 | #define PADDING_MASK 3 | ||
178 | #endif | ||
179 | |||
168 | /* | 180 | /* |
169 | * Define this if using the driver with the zero-copy patch | 181 | * Define this if using the driver with the zero-copy patch |
170 | */ | 182 | */ |
@@ -257,9 +269,10 @@ static int full_duplex[MAX_UNITS] = {0, }; | |||
257 | * This SUCKS. | 269 | * This SUCKS. |
258 | * We need a much better method to determine if dma_addr_t is 64-bit. | 270 | * We need a much better method to determine if dma_addr_t is 64-bit. |
259 | */ | 271 | */ |
260 | #if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) | 272 | #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) |
261 | /* 64-bit dma_addr_t */ | 273 | /* 64-bit dma_addr_t */ |
262 | #define ADDR_64BITS /* This chip uses 64 bit addresses. */ | 274 | #define ADDR_64BITS /* This chip uses 64 bit addresses. */ |
275 | #define netdrv_addr_t u64 | ||
263 | #define cpu_to_dma(x) cpu_to_le64(x) | 276 | #define cpu_to_dma(x) cpu_to_le64(x) |
264 | #define dma_to_cpu(x) le64_to_cpu(x) | 277 | #define dma_to_cpu(x) le64_to_cpu(x) |
265 | #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit | 278 | #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit |
@@ -268,6 +281,7 @@ static int full_duplex[MAX_UNITS] = {0, }; | |||
268 | #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit | 281 | #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit |
269 | #define RX_DESC_ADDR_SIZE RxDescAddr64bit | 282 | #define RX_DESC_ADDR_SIZE RxDescAddr64bit |
270 | #else /* 32-bit dma_addr_t */ | 283 | #else /* 32-bit dma_addr_t */ |
284 | #define netdrv_addr_t u32 | ||
271 | #define cpu_to_dma(x) cpu_to_le32(x) | 285 | #define cpu_to_dma(x) cpu_to_le32(x) |
272 | #define dma_to_cpu(x) le32_to_cpu(x) | 286 | #define dma_to_cpu(x) le32_to_cpu(x) |
273 | #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit | 287 | #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit |
@@ -1333,21 +1347,10 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1333 | } | 1347 | } |
1334 | 1348 | ||
1335 | #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) | 1349 | #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) |
1336 | { | 1350 | if (skb->ip_summed == CHECKSUM_HW) { |
1337 | int has_bad_length = 0; | 1351 | skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK); |
1338 | 1352 | if (skb == NULL) | |
1339 | if (skb_first_frag_len(skb) == 1) | 1353 | return NETDEV_TX_OK; |
1340 | has_bad_length = 1; | ||
1341 | else { | ||
1342 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1343 | if (skb_shinfo(skb)->frags[i].size == 1) { | ||
1344 | has_bad_length = 1; | ||
1345 | break; | ||
1346 | } | ||
1347 | } | ||
1348 | |||
1349 | if (has_bad_length) | ||
1350 | skb_checksum_help(skb, 0); | ||
1351 | } | 1354 | } |
1352 | #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ | 1355 | #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ |
1353 | 1356 | ||
@@ -2127,13 +2130,12 @@ static int __init starfire_init (void) | |||
2127 | #endif | 2130 | #endif |
2128 | #endif | 2131 | #endif |
2129 | 2132 | ||
2130 | #ifndef ADDR_64BITS | ||
2131 | /* we can do this test only at run-time... sigh */ | 2133 | /* we can do this test only at run-time... sigh */ |
2132 | if (sizeof(dma_addr_t) == sizeof(u64)) { | 2134 | if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) { |
2133 | printk("This driver has not been ported to this 64-bit architecture yet\n"); | 2135 | printk("This driver has dma_addr_t issues, please send email to maintainer\n"); |
2134 | return -ENODEV; | 2136 | return -ENODEV; |
2135 | } | 2137 | } |
2136 | #endif /* not ADDR_64BITS */ | 2138 | |
2137 | return pci_module_init (&starfire_driver); | 2139 | return pci_module_init (&starfire_driver); |
2138 | } | 2140 | } |
2139 | 2141 | ||
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index ff8ae5f79970..16edbb1a4a7a 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h | |||
@@ -1035,7 +1035,8 @@ struct gem { | |||
1035 | 1035 | ||
1036 | #define ALIGNED_RX_SKB_ADDR(addr) \ | 1036 | #define ALIGNED_RX_SKB_ADDR(addr) \ |
1037 | ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) | 1037 | ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) |
1038 | static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags) | 1038 | static __inline__ struct sk_buff *gem_alloc_skb(int size, |
1039 | unsigned int __nocast gfp_flags) | ||
1039 | { | 1040 | { |
1040 | struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); | 1041 | struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); |
1041 | 1042 | ||
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index e7b001017b9a..32057e65808b 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c | |||
@@ -531,7 +531,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) | |||
531 | if (!time_after(jiffies, timeout)) continue; | 531 | if (!time_after(jiffies, timeout)) continue; |
532 | DPRINTK( "Hardware timeout during initialization.\n"); | 532 | DPRINTK( "Hardware timeout during initialization.\n"); |
533 | iounmap(t_mmio); | 533 | iounmap(t_mmio); |
534 | kfree(ti); | ||
535 | return -ENODEV; | 534 | return -ENODEV; |
536 | } | 535 | } |
537 | ti->sram_phys = | 536 | ti->sram_phys = |
@@ -645,7 +644,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) | |||
645 | DPRINTK("Unknown shared ram paging info %01X\n", | 644 | DPRINTK("Unknown shared ram paging info %01X\n", |
646 | ti->shared_ram_paging); | 645 | ti->shared_ram_paging); |
647 | iounmap(t_mmio); | 646 | iounmap(t_mmio); |
648 | kfree(ti); | ||
649 | return -ENODEV; | 647 | return -ENODEV; |
650 | break; | 648 | break; |
651 | } /*end switch shared_ram_paging */ | 649 | } /*end switch shared_ram_paging */ |
@@ -675,7 +673,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) | |||
675 | "driver limit (%05x), adapter not started.\n", | 673 | "driver limit (%05x), adapter not started.\n", |
676 | chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); | 674 | chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); |
677 | iounmap(t_mmio); | 675 | iounmap(t_mmio); |
678 | kfree(ti); | ||
679 | return -ENODEV; | 676 | return -ENODEV; |
680 | } else { /* seems cool, record what we have figured out */ | 677 | } else { /* seems cool, record what we have figured out */ |
681 | ti->sram_base = new_base >> 12; | 678 | ti->sram_base = new_base >> 12; |
@@ -690,7 +687,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) | |||
690 | DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", | 687 | DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", |
691 | irq); | 688 | irq); |
692 | iounmap(t_mmio); | 689 | iounmap(t_mmio); |
693 | kfree(ti); | ||
694 | return -ENODEV; | 690 | return -ENODEV; |
695 | } | 691 | } |
696 | /*?? Now, allocate some of the PIO PORTs for this driver.. */ | 692 | /*?? Now, allocate some of the PIO PORTs for this driver.. */ |
@@ -699,7 +695,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) | |||
699 | DPRINTK("Could not grab PIO range. Halting driver.\n"); | 695 | DPRINTK("Could not grab PIO range. Halting driver.\n"); |
700 | free_irq(dev->irq, dev); | 696 | free_irq(dev->irq, dev); |
701 | iounmap(t_mmio); | 697 | iounmap(t_mmio); |
702 | kfree(ti); | ||
703 | return -EBUSY; | 698 | return -EBUSY; |
704 | } | 699 | } |
705 | 700 | ||
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c index 5db694c4eb02..683f14b01c06 100644 --- a/drivers/net/tulip/21142.c +++ b/drivers/net/tulip/21142.c | |||
@@ -172,7 +172,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5) | |||
172 | int i; | 172 | int i; |
173 | for (i = 0; i < tp->mtable->leafcount; i++) | 173 | for (i = 0; i < tp->mtable->leafcount; i++) |
174 | if (tp->mtable->mleaf[i].media == dev->if_port) { | 174 | if (tp->mtable->mleaf[i].media == dev->if_port) { |
175 | int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65)); | 175 | int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65))); |
176 | tp->cur_index = i; | 176 | tp->cur_index = i; |
177 | tulip_select_media(dev, startup); | 177 | tulip_select_media(dev, startup); |
178 | setup_done = 1; | 178 | setup_done = 1; |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 6deb7cc810cc..cf3daaa1b369 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -503,9 +503,14 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) | |||
503 | return 0; | 503 | return 0; |
504 | } | 504 | } |
505 | 505 | ||
506 | /* Length of the packet body */ | 506 | /* Check packet length, pad short packets, round up odd length */ |
507 | /* FIXME: what if the skb is smaller than this? */ | 507 | len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN); |
508 | len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN); | 508 | if (skb->len < len) { |
509 | skb = skb_padto(skb, len); | ||
510 | if (skb == NULL) | ||
511 | goto fail; | ||
512 | } | ||
513 | len -= ETH_HLEN; | ||
509 | 514 | ||
510 | eh = (struct ethhdr *)skb->data; | 515 | eh = (struct ethhdr *)skb->data; |
511 | 516 | ||
@@ -557,8 +562,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) | |||
557 | p = skb->data; | 562 | p = skb->data; |
558 | } | 563 | } |
559 | 564 | ||
560 | /* Round up for odd length packets */ | 565 | err = hermes_bap_pwrite(hw, USER_BAP, p, data_len, |
561 | err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2), | ||
562 | txfid, data_off); | 566 | txfid, data_off); |
563 | if (err) { | 567 | if (err) { |
564 | printk(KERN_ERR "%s: Error %d writing packet to BAP\n", | 568 | printk(KERN_ERR "%s: Error %d writing packet to BAP\n", |
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 2ad4797ce024..9963479ba89f 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h | |||
@@ -686,6 +686,7 @@ struct qeth_seqno { | |||
686 | __u32 pdu_hdr; | 686 | __u32 pdu_hdr; |
687 | __u32 pdu_hdr_ack; | 687 | __u32 pdu_hdr_ack; |
688 | __u16 ipa; | 688 | __u16 ipa; |
689 | __u32 pkt_seqno; | ||
689 | }; | 690 | }; |
690 | 691 | ||
691 | struct qeth_reply { | 692 | struct qeth_reply { |
@@ -848,6 +849,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size) | |||
848 | "on interface %s", QETH_CARD_IFNAME(card)); | 849 | "on interface %s", QETH_CARD_IFNAME(card)); |
849 | return -ENOMEM; | 850 | return -ENOMEM; |
850 | } | 851 | } |
852 | kfree_skb(*skb); | ||
851 | *skb = new_skb; | 853 | *skb = new_skb; |
852 | } | 854 | } |
853 | return 0; | 855 | return 0; |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 71de834ece1a..bd28e2438d7f 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -511,7 +511,7 @@ static int | |||
511 | __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) | 511 | __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) |
512 | { | 512 | { |
513 | struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; | 513 | struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; |
514 | int rc = 0; | 514 | int rc = 0, rc2 = 0, rc3 = 0; |
515 | enum qeth_card_states recover_flag; | 515 | enum qeth_card_states recover_flag; |
516 | 516 | ||
517 | QETH_DBF_TEXT(setup, 3, "setoffl"); | 517 | QETH_DBF_TEXT(setup, 3, "setoffl"); |
@@ -523,11 +523,13 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) | |||
523 | CARD_BUS_ID(card)); | 523 | CARD_BUS_ID(card)); |
524 | return -ERESTARTSYS; | 524 | return -ERESTARTSYS; |
525 | } | 525 | } |
526 | if ((rc = ccw_device_set_offline(CARD_DDEV(card))) || | 526 | rc = ccw_device_set_offline(CARD_DDEV(card)); |
527 | (rc = ccw_device_set_offline(CARD_WDEV(card))) || | 527 | rc2 = ccw_device_set_offline(CARD_WDEV(card)); |
528 | (rc = ccw_device_set_offline(CARD_RDEV(card)))) { | 528 | rc3 = ccw_device_set_offline(CARD_RDEV(card)); |
529 | if (!rc) | ||
530 | rc = (rc2) ? rc2 : rc3; | ||
531 | if (rc) | ||
529 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 532 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); |
530 | } | ||
531 | if (recover_flag == CARD_STATE_UP) | 533 | if (recover_flag == CARD_STATE_UP) |
532 | card->state = CARD_STATE_RECOVER; | 534 | card->state = CARD_STATE_RECOVER; |
533 | qeth_notify_processes(); | 535 | qeth_notify_processes(); |
@@ -1046,6 +1048,7 @@ qeth_setup_card(struct qeth_card *card) | |||
1046 | spin_lock_init(&card->vlanlock); | 1048 | spin_lock_init(&card->vlanlock); |
1047 | card->vlangrp = NULL; | 1049 | card->vlangrp = NULL; |
1048 | #endif | 1050 | #endif |
1051 | spin_lock_init(&card->lock); | ||
1049 | spin_lock_init(&card->ip_lock); | 1052 | spin_lock_init(&card->ip_lock); |
1050 | spin_lock_init(&card->thread_mask_lock); | 1053 | spin_lock_init(&card->thread_mask_lock); |
1051 | card->thread_start_mask = 0; | 1054 | card->thread_start_mask = 0; |
@@ -1626,16 +1629,6 @@ qeth_cmd_timeout(unsigned long data) | |||
1626 | spin_unlock_irqrestore(&reply->card->lock, flags); | 1629 | spin_unlock_irqrestore(&reply->card->lock, flags); |
1627 | } | 1630 | } |
1628 | 1631 | ||
1629 | static void | ||
1630 | qeth_reset_ip_addresses(struct qeth_card *card) | ||
1631 | { | ||
1632 | QETH_DBF_TEXT(trace, 2, "rstipadd"); | ||
1633 | |||
1634 | qeth_clear_ip_list(card, 0, 1); | ||
1635 | /* this function will also schedule the SET_IP_THREAD */ | ||
1636 | qeth_set_multicast_list(card->dev); | ||
1637 | } | ||
1638 | |||
1639 | static struct qeth_ipa_cmd * | 1632 | static struct qeth_ipa_cmd * |
1640 | qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) | 1633 | qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) |
1641 | { | 1634 | { |
@@ -1664,9 +1657,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) | |||
1664 | "IP address reset.\n", | 1657 | "IP address reset.\n", |
1665 | QETH_CARD_IFNAME(card), | 1658 | QETH_CARD_IFNAME(card), |
1666 | card->info.chpid); | 1659 | card->info.chpid); |
1667 | card->lan_online = 1; | ||
1668 | netif_carrier_on(card->dev); | 1660 | netif_carrier_on(card->dev); |
1669 | qeth_reset_ip_addresses(card); | 1661 | qeth_schedule_recovery(card); |
1670 | return NULL; | 1662 | return NULL; |
1671 | case IPA_CMD_REGISTER_LOCAL_ADDR: | 1663 | case IPA_CMD_REGISTER_LOCAL_ADDR: |
1672 | QETH_DBF_TEXT(trace,3, "irla"); | 1664 | QETH_DBF_TEXT(trace,3, "irla"); |
@@ -2387,6 +2379,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2387 | skb_pull(skb, VLAN_HLEN); | 2379 | skb_pull(skb, VLAN_HLEN); |
2388 | } | 2380 | } |
2389 | #endif | 2381 | #endif |
2382 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; | ||
2390 | return vlan_id; | 2383 | return vlan_id; |
2391 | } | 2384 | } |
2392 | 2385 | ||
@@ -3014,7 +3007,7 @@ qeth_alloc_buffer_pool(struct qeth_card *card) | |||
3014 | return -ENOMEM; | 3007 | return -ENOMEM; |
3015 | } | 3008 | } |
3016 | for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ | 3009 | for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ |
3017 | ptr = (void *) __get_free_page(GFP_KERNEL); | 3010 | ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA); |
3018 | if (!ptr) { | 3011 | if (!ptr) { |
3019 | while (j > 0) | 3012 | while (j > 0) |
3020 | free_page((unsigned long) | 3013 | free_page((unsigned long) |
@@ -3058,7 +3051,8 @@ qeth_alloc_qdio_buffers(struct qeth_card *card) | |||
3058 | if (card->qdio.state == QETH_QDIO_ALLOCATED) | 3051 | if (card->qdio.state == QETH_QDIO_ALLOCATED) |
3059 | return 0; | 3052 | return 0; |
3060 | 3053 | ||
3061 | card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); | 3054 | card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), |
3055 | GFP_KERNEL|GFP_DMA); | ||
3062 | if (!card->qdio.in_q) | 3056 | if (!card->qdio.in_q) |
3063 | return - ENOMEM; | 3057 | return - ENOMEM; |
3064 | QETH_DBF_TEXT(setup, 2, "inq"); | 3058 | QETH_DBF_TEXT(setup, 2, "inq"); |
@@ -3083,7 +3077,7 @@ qeth_alloc_qdio_buffers(struct qeth_card *card) | |||
3083 | } | 3077 | } |
3084 | for (i = 0; i < card->qdio.no_out_queues; ++i){ | 3078 | for (i = 0; i < card->qdio.no_out_queues; ++i){ |
3085 | card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), | 3079 | card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), |
3086 | GFP_KERNEL); | 3080 | GFP_KERNEL|GFP_DMA); |
3087 | if (!card->qdio.out_qs[i]){ | 3081 | if (!card->qdio.out_qs[i]){ |
3088 | while (i > 0) | 3082 | while (i > 0) |
3089 | kfree(card->qdio.out_qs[--i]); | 3083 | kfree(card->qdio.out_qs[--i]); |
@@ -6470,6 +6464,9 @@ qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
6470 | if (cmd->hdr.prot_version == QETH_PROT_IPV4) { | 6464 | if (cmd->hdr.prot_version == QETH_PROT_IPV4) { |
6471 | card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; | 6465 | card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; |
6472 | card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; | 6466 | card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; |
6467 | /* Disable IPV6 support hard coded for Hipersockets */ | ||
6468 | if(card->info.type == QETH_CARD_TYPE_IQD) | ||
6469 | card->options.ipa4.supported_funcs &= ~IPA_IPV6; | ||
6473 | } else { | 6470 | } else { |
6474 | #ifdef CONFIG_QETH_IPV6 | 6471 | #ifdef CONFIG_QETH_IPV6 |
6475 | card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; | 6472 | card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 20019b82b4a8..be96cb78e3b5 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -521,6 +521,14 @@ config SCSI_SATA_SIL | |||
521 | 521 | ||
522 | If unsure, say N. | 522 | If unsure, say N. |
523 | 523 | ||
524 | config SCSI_SATA_SIL24 | ||
525 | tristate "Silicon Image 3124/3132 SATA support" | ||
526 | depends on SCSI_SATA && PCI && EXPERIMENTAL | ||
527 | help | ||
528 | This option enables support for Silicon Image 3124/3132 Serial ATA. | ||
529 | |||
530 | If unsure, say N. | ||
531 | |||
524 | config SCSI_SATA_SIS | 532 | config SCSI_SATA_SIS |
525 | tristate "SiS 964/180 SATA support" | 533 | tristate "SiS 964/180 SATA support" |
526 | depends on SCSI_SATA && PCI && EXPERIMENTAL | 534 | depends on SCSI_SATA && PCI && EXPERIMENTAL |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 48529d180ca8..e2e3d8671930 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o | |||
130 | obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o | 130 | obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o |
131 | obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o | 131 | obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o |
132 | obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o | 132 | obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o |
133 | obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o | ||
133 | obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o | 134 | obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o |
134 | obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o | 135 | obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o |
135 | obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o | 136 | obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o |
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 6e4bb36f8d7c..f0d8f89b5d40 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
@@ -680,17 +680,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * | |||
680 | 680 | ||
681 | for (i = 0; i < host_set->n_ports; i++) { | 681 | for (i = 0; i < host_set->n_ports; i++) { |
682 | struct ata_port *ap; | 682 | struct ata_port *ap; |
683 | u32 tmp; | ||
684 | 683 | ||
685 | VPRINTK("port %u\n", i); | 684 | if (!(irq_stat & (1 << i))) |
685 | continue; | ||
686 | |||
686 | ap = host_set->ports[i]; | 687 | ap = host_set->ports[i]; |
687 | tmp = irq_stat & (1 << i); | 688 | if (ap) { |
688 | if (tmp && ap) { | ||
689 | struct ata_queued_cmd *qc; | 689 | struct ata_queued_cmd *qc; |
690 | qc = ata_qc_from_tag(ap, ap->active_tag); | 690 | qc = ata_qc_from_tag(ap, ap->active_tag); |
691 | if (ahci_host_intr(ap, qc)) | 691 | if (!ahci_host_intr(ap, qc)) |
692 | irq_ack |= (1 << i); | 692 | if (ata_ratelimit()) { |
693 | struct pci_dev *pdev = | ||
694 | to_pci_dev(ap->host_set->dev); | ||
695 | printk(KERN_WARNING | ||
696 | "ahci(%s): unhandled interrupt on port %u\n", | ||
697 | pci_name(pdev), i); | ||
698 | } | ||
699 | |||
700 | VPRINTK("port %u\n", i); | ||
701 | } else { | ||
702 | VPRINTK("port %u (no irq)\n", i); | ||
703 | if (ata_ratelimit()) { | ||
704 | struct pci_dev *pdev = | ||
705 | to_pci_dev(ap->host_set->dev); | ||
706 | printk(KERN_WARNING | ||
707 | "ahci(%s): interrupt on disabled port %u\n", | ||
708 | pci_name(pdev), i); | ||
709 | } | ||
693 | } | 710 | } |
711 | |||
712 | irq_ack |= (1 << i); | ||
694 | } | 713 | } |
695 | 714 | ||
696 | if (irq_ack) { | 715 | if (irq_ack) { |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 943b44c3c16f..9aa93087d495 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/completion.h> | 48 | #include <linux/completion.h> |
49 | #include <linux/suspend.h> | 49 | #include <linux/suspend.h> |
50 | #include <linux/workqueue.h> | 50 | #include <linux/workqueue.h> |
51 | #include <linux/jiffies.h> | ||
51 | #include <scsi/scsi.h> | 52 | #include <scsi/scsi.h> |
52 | #include "scsi.h" | 53 | #include "scsi.h" |
53 | #include "scsi_priv.h" | 54 | #include "scsi_priv.h" |
@@ -62,6 +63,7 @@ | |||
62 | static unsigned int ata_busy_sleep (struct ata_port *ap, | 63 | static unsigned int ata_busy_sleep (struct ata_port *ap, |
63 | unsigned long tmout_pat, | 64 | unsigned long tmout_pat, |
64 | unsigned long tmout); | 65 | unsigned long tmout); |
66 | static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); | ||
65 | static void ata_set_mode(struct ata_port *ap); | 67 | static void ata_set_mode(struct ata_port *ap); |
66 | static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); | 68 | static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); |
67 | static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); | 69 | static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); |
@@ -69,7 +71,6 @@ static int fgb(u32 bitmap); | |||
69 | static int ata_choose_xfer_mode(struct ata_port *ap, | 71 | static int ata_choose_xfer_mode(struct ata_port *ap, |
70 | u8 *xfer_mode_out, | 72 | u8 *xfer_mode_out, |
71 | unsigned int *xfer_shift_out); | 73 | unsigned int *xfer_shift_out); |
72 | static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat); | ||
73 | static void __ata_qc_complete(struct ata_queued_cmd *qc); | 74 | static void __ata_qc_complete(struct ata_queued_cmd *qc); |
74 | 75 | ||
75 | static unsigned int ata_unique_id = 1; | 76 | static unsigned int ata_unique_id = 1; |
@@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev) | |||
1131 | static void ata_dev_identify(struct ata_port *ap, unsigned int device) | 1132 | static void ata_dev_identify(struct ata_port *ap, unsigned int device) |
1132 | { | 1133 | { |
1133 | struct ata_device *dev = &ap->device[device]; | 1134 | struct ata_device *dev = &ap->device[device]; |
1134 | unsigned int i; | 1135 | unsigned int major_version; |
1135 | u16 tmp; | 1136 | u16 tmp; |
1136 | unsigned long xfer_modes; | 1137 | unsigned long xfer_modes; |
1137 | u8 status; | 1138 | u8 status; |
@@ -1229,9 +1230,9 @@ retry: | |||
1229 | * common ATA, ATAPI feature tests | 1230 | * common ATA, ATAPI feature tests |
1230 | */ | 1231 | */ |
1231 | 1232 | ||
1232 | /* we require LBA and DMA support (bits 8 & 9 of word 49) */ | 1233 | /* we require DMA support (bits 8 of word 49) */ |
1233 | if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { | 1234 | if (!ata_id_has_dma(dev->id)) { |
1234 | printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); | 1235 | printk(KERN_DEBUG "ata%u: no dma\n", ap->id); |
1235 | goto err_out_nosup; | 1236 | goto err_out_nosup; |
1236 | } | 1237 | } |
1237 | 1238 | ||
@@ -1251,32 +1252,69 @@ retry: | |||
1251 | if (!ata_id_is_ata(dev->id)) /* sanity check */ | 1252 | if (!ata_id_is_ata(dev->id)) /* sanity check */ |
1252 | goto err_out_nosup; | 1253 | goto err_out_nosup; |
1253 | 1254 | ||
1255 | /* get major version */ | ||
1254 | tmp = dev->id[ATA_ID_MAJOR_VER]; | 1256 | tmp = dev->id[ATA_ID_MAJOR_VER]; |
1255 | for (i = 14; i >= 1; i--) | 1257 | for (major_version = 14; major_version >= 1; major_version--) |
1256 | if (tmp & (1 << i)) | 1258 | if (tmp & (1 << major_version)) |
1257 | break; | 1259 | break; |
1258 | 1260 | ||
1259 | /* we require at least ATA-3 */ | 1261 | /* |
1260 | if (i < 3) { | 1262 | * The exact sequence expected by certain pre-ATA4 drives is: |
1261 | printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); | 1263 | * SRST RESET |
1262 | goto err_out_nosup; | 1264 | * IDENTIFY |
1263 | } | 1265 | * INITIALIZE DEVICE PARAMETERS |
1266 | * anything else.. | ||
1267 | * Some drives were very specific about that exact sequence. | ||
1268 | */ | ||
1269 | if (major_version < 4 || (!ata_id_has_lba(dev->id))) | ||
1270 | ata_dev_init_params(ap, dev); | ||
1271 | |||
1272 | if (ata_id_has_lba(dev->id)) { | ||
1273 | dev->flags |= ATA_DFLAG_LBA; | ||
1274 | |||
1275 | if (ata_id_has_lba48(dev->id)) { | ||
1276 | dev->flags |= ATA_DFLAG_LBA48; | ||
1277 | dev->n_sectors = ata_id_u64(dev->id, 100); | ||
1278 | } else { | ||
1279 | dev->n_sectors = ata_id_u32(dev->id, 60); | ||
1280 | } | ||
1281 | |||
1282 | /* print device info to dmesg */ | ||
1283 | printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", | ||
1284 | ap->id, device, | ||
1285 | major_version, | ||
1286 | ata_mode_string(xfer_modes), | ||
1287 | (unsigned long long)dev->n_sectors, | ||
1288 | dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); | ||
1289 | } else { | ||
1290 | /* CHS */ | ||
1291 | |||
1292 | /* Default translation */ | ||
1293 | dev->cylinders = dev->id[1]; | ||
1294 | dev->heads = dev->id[3]; | ||
1295 | dev->sectors = dev->id[6]; | ||
1296 | dev->n_sectors = dev->cylinders * dev->heads * dev->sectors; | ||
1297 | |||
1298 | if (ata_id_current_chs_valid(dev->id)) { | ||
1299 | /* Current CHS translation is valid. */ | ||
1300 | dev->cylinders = dev->id[54]; | ||
1301 | dev->heads = dev->id[55]; | ||
1302 | dev->sectors = dev->id[56]; | ||
1303 | |||
1304 | dev->n_sectors = ata_id_u32(dev->id, 57); | ||
1305 | } | ||
1306 | |||
1307 | /* print device info to dmesg */ | ||
1308 | printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", | ||
1309 | ap->id, device, | ||
1310 | major_version, | ||
1311 | ata_mode_string(xfer_modes), | ||
1312 | (unsigned long long)dev->n_sectors, | ||
1313 | (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); | ||
1264 | 1314 | ||
1265 | if (ata_id_has_lba48(dev->id)) { | ||
1266 | dev->flags |= ATA_DFLAG_LBA48; | ||
1267 | dev->n_sectors = ata_id_u64(dev->id, 100); | ||
1268 | } else { | ||
1269 | dev->n_sectors = ata_id_u32(dev->id, 60); | ||
1270 | } | 1315 | } |
1271 | 1316 | ||
1272 | ap->host->max_cmd_len = 16; | 1317 | ap->host->max_cmd_len = 16; |
1273 | |||
1274 | /* print device info to dmesg */ | ||
1275 | printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n", | ||
1276 | ap->id, device, | ||
1277 | ata_mode_string(xfer_modes), | ||
1278 | (unsigned long long)dev->n_sectors, | ||
1279 | dev->flags & ATA_DFLAG_LBA48 ? " lba48" : ""); | ||
1280 | } | 1318 | } |
1281 | 1319 | ||
1282 | /* ATAPI-specific feature tests */ | 1320 | /* ATAPI-specific feature tests */ |
@@ -2144,6 +2182,54 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) | |||
2144 | } | 2182 | } |
2145 | 2183 | ||
2146 | /** | 2184 | /** |
2185 | * ata_dev_init_params - Issue INIT DEV PARAMS command | ||
2186 | * @ap: Port associated with device @dev | ||
2187 | * @dev: Device to which command will be sent | ||
2188 | * | ||
2189 | * LOCKING: | ||
2190 | */ | ||
2191 | |||
2192 | static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) | ||
2193 | { | ||
2194 | DECLARE_COMPLETION(wait); | ||
2195 | struct ata_queued_cmd *qc; | ||
2196 | int rc; | ||
2197 | unsigned long flags; | ||
2198 | u16 sectors = dev->id[6]; | ||
2199 | u16 heads = dev->id[3]; | ||
2200 | |||
2201 | /* Number of sectors per track 1-255. Number of heads 1-16 */ | ||
2202 | if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) | ||
2203 | return; | ||
2204 | |||
2205 | /* set up init dev params taskfile */ | ||
2206 | DPRINTK("init dev params \n"); | ||
2207 | |||
2208 | qc = ata_qc_new_init(ap, dev); | ||
2209 | BUG_ON(qc == NULL); | ||
2210 | |||
2211 | qc->tf.command = ATA_CMD_INIT_DEV_PARAMS; | ||
2212 | qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
2213 | qc->tf.protocol = ATA_PROT_NODATA; | ||
2214 | qc->tf.nsect = sectors; | ||
2215 | qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ | ||
2216 | |||
2217 | qc->waiting = &wait; | ||
2218 | qc->complete_fn = ata_qc_complete_noop; | ||
2219 | |||
2220 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
2221 | rc = ata_qc_issue(qc); | ||
2222 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
2223 | |||
2224 | if (rc) | ||
2225 | ata_port_disable(ap); | ||
2226 | else | ||
2227 | wait_for_completion(&wait); | ||
2228 | |||
2229 | DPRINTK("EXIT\n"); | ||
2230 | } | ||
2231 | |||
2232 | /** | ||
2147 | * ata_sg_clean - Unmap DMA memory associated with command | 2233 | * ata_sg_clean - Unmap DMA memory associated with command |
2148 | * @qc: Command containing DMA memory to be released | 2234 | * @qc: Command containing DMA memory to be released |
2149 | * | 2235 | * |
@@ -2507,20 +2593,20 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | |||
2507 | static unsigned long ata_pio_poll(struct ata_port *ap) | 2593 | static unsigned long ata_pio_poll(struct ata_port *ap) |
2508 | { | 2594 | { |
2509 | u8 status; | 2595 | u8 status; |
2510 | unsigned int poll_state = PIO_ST_UNKNOWN; | 2596 | unsigned int poll_state = HSM_ST_UNKNOWN; |
2511 | unsigned int reg_state = PIO_ST_UNKNOWN; | 2597 | unsigned int reg_state = HSM_ST_UNKNOWN; |
2512 | const unsigned int tmout_state = PIO_ST_TMOUT; | 2598 | const unsigned int tmout_state = HSM_ST_TMOUT; |
2513 | 2599 | ||
2514 | switch (ap->pio_task_state) { | 2600 | switch (ap->hsm_task_state) { |
2515 | case PIO_ST: | 2601 | case HSM_ST: |
2516 | case PIO_ST_POLL: | 2602 | case HSM_ST_POLL: |
2517 | poll_state = PIO_ST_POLL; | 2603 | poll_state = HSM_ST_POLL; |
2518 | reg_state = PIO_ST; | 2604 | reg_state = HSM_ST; |
2519 | break; | 2605 | break; |
2520 | case PIO_ST_LAST: | 2606 | case HSM_ST_LAST: |
2521 | case PIO_ST_LAST_POLL: | 2607 | case HSM_ST_LAST_POLL: |
2522 | poll_state = PIO_ST_LAST_POLL; | 2608 | poll_state = HSM_ST_LAST_POLL; |
2523 | reg_state = PIO_ST_LAST; | 2609 | reg_state = HSM_ST_LAST; |
2524 | break; | 2610 | break; |
2525 | default: | 2611 | default: |
2526 | BUG(); | 2612 | BUG(); |
@@ -2530,14 +2616,14 @@ static unsigned long ata_pio_poll(struct ata_port *ap) | |||
2530 | status = ata_chk_status(ap); | 2616 | status = ata_chk_status(ap); |
2531 | if (status & ATA_BUSY) { | 2617 | if (status & ATA_BUSY) { |
2532 | if (time_after(jiffies, ap->pio_task_timeout)) { | 2618 | if (time_after(jiffies, ap->pio_task_timeout)) { |
2533 | ap->pio_task_state = tmout_state; | 2619 | ap->hsm_task_state = tmout_state; |
2534 | return 0; | 2620 | return 0; |
2535 | } | 2621 | } |
2536 | ap->pio_task_state = poll_state; | 2622 | ap->hsm_task_state = poll_state; |
2537 | return ATA_SHORT_PAUSE; | 2623 | return ATA_SHORT_PAUSE; |
2538 | } | 2624 | } |
2539 | 2625 | ||
2540 | ap->pio_task_state = reg_state; | 2626 | ap->hsm_task_state = reg_state; |
2541 | return 0; | 2627 | return 0; |
2542 | } | 2628 | } |
2543 | 2629 | ||
@@ -2562,14 +2648,14 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2562 | * we enter, BSY will be cleared in a chk-status or two. If not, | 2648 | * we enter, BSY will be cleared in a chk-status or two. If not, |
2563 | * the drive is probably seeking or something. Snooze for a couple | 2649 | * the drive is probably seeking or something. Snooze for a couple |
2564 | * msecs, then chk-status again. If still busy, fall back to | 2650 | * msecs, then chk-status again. If still busy, fall back to |
2565 | * PIO_ST_POLL state. | 2651 | * HSM_ST_POLL state. |
2566 | */ | 2652 | */ |
2567 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); | 2653 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); |
2568 | if (drv_stat & (ATA_BUSY | ATA_DRQ)) { | 2654 | if (drv_stat & (ATA_BUSY | ATA_DRQ)) { |
2569 | msleep(2); | 2655 | msleep(2); |
2570 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); | 2656 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); |
2571 | if (drv_stat & (ATA_BUSY | ATA_DRQ)) { | 2657 | if (drv_stat & (ATA_BUSY | ATA_DRQ)) { |
2572 | ap->pio_task_state = PIO_ST_LAST_POLL; | 2658 | ap->hsm_task_state = HSM_ST_LAST_POLL; |
2573 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | 2659 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; |
2574 | return 0; | 2660 | return 0; |
2575 | } | 2661 | } |
@@ -2577,14 +2663,14 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2577 | 2663 | ||
2578 | drv_stat = ata_wait_idle(ap); | 2664 | drv_stat = ata_wait_idle(ap); |
2579 | if (!ata_ok(drv_stat)) { | 2665 | if (!ata_ok(drv_stat)) { |
2580 | ap->pio_task_state = PIO_ST_ERR; | 2666 | ap->hsm_task_state = HSM_ST_ERR; |
2581 | return 0; | 2667 | return 0; |
2582 | } | 2668 | } |
2583 | 2669 | ||
2584 | qc = ata_qc_from_tag(ap, ap->active_tag); | 2670 | qc = ata_qc_from_tag(ap, ap->active_tag); |
2585 | assert(qc != NULL); | 2671 | assert(qc != NULL); |
2586 | 2672 | ||
2587 | ap->pio_task_state = PIO_ST_IDLE; | 2673 | ap->hsm_task_state = HSM_ST_IDLE; |
2588 | 2674 | ||
2589 | ata_poll_qc_complete(qc, drv_stat); | 2675 | ata_poll_qc_complete(qc, drv_stat); |
2590 | 2676 | ||
@@ -2744,7 +2830,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2744 | unsigned char *buf; | 2830 | unsigned char *buf; |
2745 | 2831 | ||
2746 | if (qc->cursect == (qc->nsect - 1)) | 2832 | if (qc->cursect == (qc->nsect - 1)) |
2747 | ap->pio_task_state = PIO_ST_LAST; | 2833 | ap->hsm_task_state = HSM_ST_LAST; |
2748 | 2834 | ||
2749 | page = sg[qc->cursg].page; | 2835 | page = sg[qc->cursg].page; |
2750 | offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; | 2836 | offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; |
@@ -2794,7 +2880,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | |||
2794 | unsigned int offset, count; | 2880 | unsigned int offset, count; |
2795 | 2881 | ||
2796 | if (qc->curbytes + bytes >= qc->nbytes) | 2882 | if (qc->curbytes + bytes >= qc->nbytes) |
2797 | ap->pio_task_state = PIO_ST_LAST; | 2883 | ap->hsm_task_state = HSM_ST_LAST; |
2798 | 2884 | ||
2799 | next_sg: | 2885 | next_sg: |
2800 | if (unlikely(qc->cursg >= qc->n_elem)) { | 2886 | if (unlikely(qc->cursg >= qc->n_elem)) { |
@@ -2816,7 +2902,7 @@ next_sg: | |||
2816 | for (i = 0; i < words; i++) | 2902 | for (i = 0; i < words; i++) |
2817 | ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); | 2903 | ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); |
2818 | 2904 | ||
2819 | ap->pio_task_state = PIO_ST_LAST; | 2905 | ap->hsm_task_state = HSM_ST_LAST; |
2820 | return; | 2906 | return; |
2821 | } | 2907 | } |
2822 | 2908 | ||
@@ -2897,7 +2983,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
2897 | err_out: | 2983 | err_out: |
2898 | printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", | 2984 | printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", |
2899 | ap->id, dev->devno); | 2985 | ap->id, dev->devno); |
2900 | ap->pio_task_state = PIO_ST_ERR; | 2986 | ap->hsm_task_state = HSM_ST_ERR; |
2901 | } | 2987 | } |
2902 | 2988 | ||
2903 | /** | 2989 | /** |
@@ -2919,14 +3005,14 @@ static void ata_pio_block(struct ata_port *ap) | |||
2919 | * a chk-status or two. If not, the drive is probably seeking | 3005 | * a chk-status or two. If not, the drive is probably seeking |
2920 | * or something. Snooze for a couple msecs, then | 3006 | * or something. Snooze for a couple msecs, then |
2921 | * chk-status again. If still busy, fall back to | 3007 | * chk-status again. If still busy, fall back to |
2922 | * PIO_ST_POLL state. | 3008 | * HSM_ST_POLL state. |
2923 | */ | 3009 | */ |
2924 | status = ata_busy_wait(ap, ATA_BUSY, 5); | 3010 | status = ata_busy_wait(ap, ATA_BUSY, 5); |
2925 | if (status & ATA_BUSY) { | 3011 | if (status & ATA_BUSY) { |
2926 | msleep(2); | 3012 | msleep(2); |
2927 | status = ata_busy_wait(ap, ATA_BUSY, 10); | 3013 | status = ata_busy_wait(ap, ATA_BUSY, 10); |
2928 | if (status & ATA_BUSY) { | 3014 | if (status & ATA_BUSY) { |
2929 | ap->pio_task_state = PIO_ST_POLL; | 3015 | ap->hsm_task_state = HSM_ST_POLL; |
2930 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | 3016 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; |
2931 | return; | 3017 | return; |
2932 | } | 3018 | } |
@@ -2938,7 +3024,7 @@ static void ata_pio_block(struct ata_port *ap) | |||
2938 | if (is_atapi_taskfile(&qc->tf)) { | 3024 | if (is_atapi_taskfile(&qc->tf)) { |
2939 | /* no more data to transfer or unsupported ATAPI command */ | 3025 | /* no more data to transfer or unsupported ATAPI command */ |
2940 | if ((status & ATA_DRQ) == 0) { | 3026 | if ((status & ATA_DRQ) == 0) { |
2941 | ap->pio_task_state = PIO_ST_LAST; | 3027 | ap->hsm_task_state = HSM_ST_LAST; |
2942 | return; | 3028 | return; |
2943 | } | 3029 | } |
2944 | 3030 | ||
@@ -2946,7 +3032,7 @@ static void ata_pio_block(struct ata_port *ap) | |||
2946 | } else { | 3032 | } else { |
2947 | /* handle BSY=0, DRQ=0 as error */ | 3033 | /* handle BSY=0, DRQ=0 as error */ |
2948 | if ((status & ATA_DRQ) == 0) { | 3034 | if ((status & ATA_DRQ) == 0) { |
2949 | ap->pio_task_state = PIO_ST_ERR; | 3035 | ap->hsm_task_state = HSM_ST_ERR; |
2950 | return; | 3036 | return; |
2951 | } | 3037 | } |
2952 | 3038 | ||
@@ -2966,7 +3052,7 @@ static void ata_pio_error(struct ata_port *ap) | |||
2966 | printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", | 3052 | printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", |
2967 | ap->id, drv_stat); | 3053 | ap->id, drv_stat); |
2968 | 3054 | ||
2969 | ap->pio_task_state = PIO_ST_IDLE; | 3055 | ap->hsm_task_state = HSM_ST_IDLE; |
2970 | 3056 | ||
2971 | ata_poll_qc_complete(qc, drv_stat | ATA_ERR); | 3057 | ata_poll_qc_complete(qc, drv_stat | ATA_ERR); |
2972 | } | 3058 | } |
@@ -2981,25 +3067,25 @@ fsm_start: | |||
2981 | timeout = 0; | 3067 | timeout = 0; |
2982 | qc_completed = 0; | 3068 | qc_completed = 0; |
2983 | 3069 | ||
2984 | switch (ap->pio_task_state) { | 3070 | switch (ap->hsm_task_state) { |
2985 | case PIO_ST_IDLE: | 3071 | case HSM_ST_IDLE: |
2986 | return; | 3072 | return; |
2987 | 3073 | ||
2988 | case PIO_ST: | 3074 | case HSM_ST: |
2989 | ata_pio_block(ap); | 3075 | ata_pio_block(ap); |
2990 | break; | 3076 | break; |
2991 | 3077 | ||
2992 | case PIO_ST_LAST: | 3078 | case HSM_ST_LAST: |
2993 | qc_completed = ata_pio_complete(ap); | 3079 | qc_completed = ata_pio_complete(ap); |
2994 | break; | 3080 | break; |
2995 | 3081 | ||
2996 | case PIO_ST_POLL: | 3082 | case HSM_ST_POLL: |
2997 | case PIO_ST_LAST_POLL: | 3083 | case HSM_ST_LAST_POLL: |
2998 | timeout = ata_pio_poll(ap); | 3084 | timeout = ata_pio_poll(ap); |
2999 | break; | 3085 | break; |
3000 | 3086 | ||
3001 | case PIO_ST_TMOUT: | 3087 | case HSM_ST_TMOUT: |
3002 | case PIO_ST_ERR: | 3088 | case HSM_ST_ERR: |
3003 | ata_pio_error(ap); | 3089 | ata_pio_error(ap); |
3004 | return; | 3090 | return; |
3005 | } | 3091 | } |
@@ -3010,52 +3096,6 @@ fsm_start: | |||
3010 | goto fsm_start; | 3096 | goto fsm_start; |
3011 | } | 3097 | } |
3012 | 3098 | ||
3013 | static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, | ||
3014 | struct scsi_cmnd *cmd) | ||
3015 | { | ||
3016 | DECLARE_COMPLETION(wait); | ||
3017 | struct ata_queued_cmd *qc; | ||
3018 | unsigned long flags; | ||
3019 | int rc; | ||
3020 | |||
3021 | DPRINTK("ATAPI request sense\n"); | ||
3022 | |||
3023 | qc = ata_qc_new_init(ap, dev); | ||
3024 | BUG_ON(qc == NULL); | ||
3025 | |||
3026 | /* FIXME: is this needed? */ | ||
3027 | memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); | ||
3028 | |||
3029 | ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); | ||
3030 | qc->dma_dir = DMA_FROM_DEVICE; | ||
3031 | |||
3032 | memset(&qc->cdb, 0, ap->cdb_len); | ||
3033 | qc->cdb[0] = REQUEST_SENSE; | ||
3034 | qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; | ||
3035 | |||
3036 | qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
3037 | qc->tf.command = ATA_CMD_PACKET; | ||
3038 | |||
3039 | qc->tf.protocol = ATA_PROT_ATAPI; | ||
3040 | qc->tf.lbam = (8 * 1024) & 0xff; | ||
3041 | qc->tf.lbah = (8 * 1024) >> 8; | ||
3042 | qc->nbytes = SCSI_SENSE_BUFFERSIZE; | ||
3043 | |||
3044 | qc->waiting = &wait; | ||
3045 | qc->complete_fn = ata_qc_complete_noop; | ||
3046 | |||
3047 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
3048 | rc = ata_qc_issue(qc); | ||
3049 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
3050 | |||
3051 | if (rc) | ||
3052 | ata_port_disable(ap); | ||
3053 | else | ||
3054 | wait_for_completion(&wait); | ||
3055 | |||
3056 | DPRINTK("EXIT\n"); | ||
3057 | } | ||
3058 | |||
3059 | /** | 3099 | /** |
3060 | * ata_qc_timeout - Handle timeout of queued command | 3100 | * ata_qc_timeout - Handle timeout of queued command |
3061 | * @qc: Command that timed out | 3101 | * @qc: Command that timed out |
@@ -3173,14 +3213,14 @@ void ata_eng_timeout(struct ata_port *ap) | |||
3173 | DPRINTK("ENTER\n"); | 3213 | DPRINTK("ENTER\n"); |
3174 | 3214 | ||
3175 | qc = ata_qc_from_tag(ap, ap->active_tag); | 3215 | qc = ata_qc_from_tag(ap, ap->active_tag); |
3176 | if (!qc) { | 3216 | if (qc) |
3217 | ata_qc_timeout(qc); | ||
3218 | else { | ||
3177 | printk(KERN_ERR "ata%u: BUG: timeout without command\n", | 3219 | printk(KERN_ERR "ata%u: BUG: timeout without command\n", |
3178 | ap->id); | 3220 | ap->id); |
3179 | goto out; | 3221 | goto out; |
3180 | } | 3222 | } |
3181 | 3223 | ||
3182 | ata_qc_timeout(qc); | ||
3183 | |||
3184 | out: | 3224 | out: |
3185 | DPRINTK("EXIT\n"); | 3225 | DPRINTK("EXIT\n"); |
3186 | } | 3226 | } |
@@ -3238,14 +3278,18 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | |||
3238 | 3278 | ||
3239 | ata_tf_init(ap, &qc->tf, dev->devno); | 3279 | ata_tf_init(ap, &qc->tf, dev->devno); |
3240 | 3280 | ||
3241 | if (dev->flags & ATA_DFLAG_LBA48) | 3281 | if (dev->flags & ATA_DFLAG_LBA) { |
3242 | qc->tf.flags |= ATA_TFLAG_LBA48; | 3282 | qc->tf.flags |= ATA_TFLAG_LBA; |
3283 | |||
3284 | if (dev->flags & ATA_DFLAG_LBA48) | ||
3285 | qc->tf.flags |= ATA_TFLAG_LBA48; | ||
3286 | } | ||
3243 | } | 3287 | } |
3244 | 3288 | ||
3245 | return qc; | 3289 | return qc; |
3246 | } | 3290 | } |
3247 | 3291 | ||
3248 | static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) | 3292 | int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) |
3249 | { | 3293 | { |
3250 | return 0; | 3294 | return 0; |
3251 | } | 3295 | } |
@@ -3442,7 +3486,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
3442 | case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ | 3486 | case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ |
3443 | ata_qc_set_polling(qc); | 3487 | ata_qc_set_polling(qc); |
3444 | ata_tf_to_host_nolock(ap, &qc->tf); | 3488 | ata_tf_to_host_nolock(ap, &qc->tf); |
3445 | ap->pio_task_state = PIO_ST; | 3489 | ap->hsm_task_state = HSM_ST; |
3446 | queue_work(ata_wq, &ap->pio_task); | 3490 | queue_work(ata_wq, &ap->pio_task); |
3447 | break; | 3491 | break; |
3448 | 3492 | ||
@@ -3668,7 +3712,7 @@ u8 ata_bmdma_status(struct ata_port *ap) | |||
3668 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | 3712 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; |
3669 | host_stat = readb(mmio + ATA_DMA_STATUS); | 3713 | host_stat = readb(mmio + ATA_DMA_STATUS); |
3670 | } else | 3714 | } else |
3671 | host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | 3715 | host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
3672 | return host_stat; | 3716 | return host_stat; |
3673 | } | 3717 | } |
3674 | 3718 | ||
@@ -3888,7 +3932,7 @@ static void atapi_packet_task(void *_data) | |||
3888 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | 3932 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); |
3889 | 3933 | ||
3890 | /* PIO commands are handled by polling */ | 3934 | /* PIO commands are handled by polling */ |
3891 | ap->pio_task_state = PIO_ST; | 3935 | ap->hsm_task_state = HSM_ST; |
3892 | queue_work(ata_wq, &ap->pio_task); | 3936 | queue_work(ata_wq, &ap->pio_task); |
3893 | } | 3937 | } |
3894 | 3938 | ||
@@ -4202,7 +4246,7 @@ int ata_device_add(struct ata_probe_ent *ent) | |||
4202 | for (i = 0; i < count; i++) { | 4246 | for (i = 0; i < count; i++) { |
4203 | struct ata_port *ap = host_set->ports[i]; | 4247 | struct ata_port *ap = host_set->ports[i]; |
4204 | 4248 | ||
4205 | scsi_scan_host(ap->host); | 4249 | ata_scsi_scan_host(ap); |
4206 | } | 4250 | } |
4207 | 4251 | ||
4208 | dev_set_drvdata(dev, host_set); | 4252 | dev_set_drvdata(dev, host_set); |
@@ -4362,85 +4406,87 @@ void ata_pci_host_stop (struct ata_host_set *host_set) | |||
4362 | * ata_pci_init_native_mode - Initialize native-mode driver | 4406 | * ata_pci_init_native_mode - Initialize native-mode driver |
4363 | * @pdev: pci device to be initialized | 4407 | * @pdev: pci device to be initialized |
4364 | * @port: array[2] of pointers to port info structures. | 4408 | * @port: array[2] of pointers to port info structures. |
4409 | * @ports: bitmap of ports present | ||
4365 | * | 4410 | * |
4366 | * Utility function which allocates and initializes an | 4411 | * Utility function which allocates and initializes an |
4367 | * ata_probe_ent structure for a standard dual-port | 4412 | * ata_probe_ent structure for a standard dual-port |
4368 | * PIO-based IDE controller. The returned ata_probe_ent | 4413 | * PIO-based IDE controller. The returned ata_probe_ent |
4369 | * structure can be passed to ata_device_add(). The returned | 4414 | * structure can be passed to ata_device_add(). The returned |
4370 | * ata_probe_ent structure should then be freed with kfree(). | 4415 | * ata_probe_ent structure should then be freed with kfree(). |
4416 | * | ||
4417 | * The caller need only pass the address of the primary port, the | ||
4418 | * secondary will be deduced automatically. If the device has non | ||
4419 | * standard secondary port mappings this function can be called twice, | ||
4420 | * once for each interface. | ||
4371 | */ | 4421 | */ |
4372 | 4422 | ||
4373 | struct ata_probe_ent * | 4423 | struct ata_probe_ent * |
4374 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) | 4424 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) |
4375 | { | 4425 | { |
4376 | struct ata_probe_ent *probe_ent = | 4426 | struct ata_probe_ent *probe_ent = |
4377 | ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); | 4427 | ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); |
4428 | int p = 0; | ||
4429 | |||
4378 | if (!probe_ent) | 4430 | if (!probe_ent) |
4379 | return NULL; | 4431 | return NULL; |
4380 | 4432 | ||
4381 | probe_ent->n_ports = 2; | ||
4382 | probe_ent->irq = pdev->irq; | 4433 | probe_ent->irq = pdev->irq; |
4383 | probe_ent->irq_flags = SA_SHIRQ; | 4434 | probe_ent->irq_flags = SA_SHIRQ; |
4384 | 4435 | ||
4385 | probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); | 4436 | if (ports & ATA_PORT_PRIMARY) { |
4386 | probe_ent->port[0].altstatus_addr = | 4437 | probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); |
4387 | probe_ent->port[0].ctl_addr = | 4438 | probe_ent->port[p].altstatus_addr = |
4388 | pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; | 4439 | probe_ent->port[p].ctl_addr = |
4389 | probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); | 4440 | pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; |
4390 | 4441 | probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4); | |
4391 | probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); | 4442 | ata_std_ports(&probe_ent->port[p]); |
4392 | probe_ent->port[1].altstatus_addr = | 4443 | p++; |
4393 | probe_ent->port[1].ctl_addr = | 4444 | } |
4394 | pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; | ||
4395 | probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; | ||
4396 | 4445 | ||
4397 | ata_std_ports(&probe_ent->port[0]); | 4446 | if (ports & ATA_PORT_SECONDARY) { |
4398 | ata_std_ports(&probe_ent->port[1]); | 4447 | probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); |
4448 | probe_ent->port[p].altstatus_addr = | ||
4449 | probe_ent->port[p].ctl_addr = | ||
4450 | pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; | ||
4451 | probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8; | ||
4452 | ata_std_ports(&probe_ent->port[p]); | ||
4453 | p++; | ||
4454 | } | ||
4399 | 4455 | ||
4456 | probe_ent->n_ports = p; | ||
4400 | return probe_ent; | 4457 | return probe_ent; |
4401 | } | 4458 | } |
4402 | 4459 | ||
4403 | static struct ata_probe_ent * | 4460 | static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num) |
4404 | ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port, | ||
4405 | struct ata_probe_ent **ppe2) | ||
4406 | { | 4461 | { |
4407 | struct ata_probe_ent *probe_ent, *probe_ent2; | 4462 | struct ata_probe_ent *probe_ent; |
4408 | 4463 | ||
4409 | probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); | 4464 | probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); |
4410 | if (!probe_ent) | 4465 | if (!probe_ent) |
4411 | return NULL; | 4466 | return NULL; |
4412 | probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]); | ||
4413 | if (!probe_ent2) { | ||
4414 | kfree(probe_ent); | ||
4415 | return NULL; | ||
4416 | } | ||
4417 | 4467 | ||
4418 | probe_ent->n_ports = 1; | 4468 | |
4419 | probe_ent->irq = 14; | ||
4420 | |||
4421 | probe_ent->hard_port_no = 0; | ||
4422 | probe_ent->legacy_mode = 1; | 4469 | probe_ent->legacy_mode = 1; |
4423 | 4470 | probe_ent->n_ports = 1; | |
4424 | probe_ent2->n_ports = 1; | 4471 | probe_ent->hard_port_no = port_num; |
4425 | probe_ent2->irq = 15; | 4472 | |
4426 | 4473 | switch(port_num) | |
4427 | probe_ent2->hard_port_no = 1; | 4474 | { |
4428 | probe_ent2->legacy_mode = 1; | 4475 | case 0: |
4429 | 4476 | probe_ent->irq = 14; | |
4430 | probe_ent->port[0].cmd_addr = 0x1f0; | 4477 | probe_ent->port[0].cmd_addr = 0x1f0; |
4431 | probe_ent->port[0].altstatus_addr = | 4478 | probe_ent->port[0].altstatus_addr = |
4432 | probe_ent->port[0].ctl_addr = 0x3f6; | 4479 | probe_ent->port[0].ctl_addr = 0x3f6; |
4433 | probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); | 4480 | break; |
4434 | 4481 | case 1: | |
4435 | probe_ent2->port[0].cmd_addr = 0x170; | 4482 | probe_ent->irq = 15; |
4436 | probe_ent2->port[0].altstatus_addr = | 4483 | probe_ent->port[0].cmd_addr = 0x170; |
4437 | probe_ent2->port[0].ctl_addr = 0x376; | 4484 | probe_ent->port[0].altstatus_addr = |
4438 | probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; | 4485 | probe_ent->port[0].ctl_addr = 0x376; |
4439 | 4486 | break; | |
4487 | } | ||
4488 | probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num; | ||
4440 | ata_std_ports(&probe_ent->port[0]); | 4489 | ata_std_ports(&probe_ent->port[0]); |
4441 | ata_std_ports(&probe_ent2->port[0]); | ||
4442 | |||
4443 | *ppe2 = probe_ent2; | ||
4444 | return probe_ent; | 4490 | return probe_ent; |
4445 | } | 4491 | } |
4446 | 4492 | ||
@@ -4469,7 +4515,7 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port, | |||
4469 | int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | 4515 | int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, |
4470 | unsigned int n_ports) | 4516 | unsigned int n_ports) |
4471 | { | 4517 | { |
4472 | struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; | 4518 | struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL; |
4473 | struct ata_port_info *port[2]; | 4519 | struct ata_port_info *port[2]; |
4474 | u8 tmp8, mask; | 4520 | u8 tmp8, mask; |
4475 | unsigned int legacy_mode = 0; | 4521 | unsigned int legacy_mode = 0; |
@@ -4486,7 +4532,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
4486 | 4532 | ||
4487 | if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 | 4533 | if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 |
4488 | && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { | 4534 | && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { |
4489 | /* TODO: support transitioning to native mode? */ | 4535 | /* TODO: What if one channel is in native mode ... */ |
4490 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); | 4536 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); |
4491 | mask = (1 << 2) | (1 << 0); | 4537 | mask = (1 << 2) | (1 << 0); |
4492 | if ((tmp8 & mask) != mask) | 4538 | if ((tmp8 & mask) != mask) |
@@ -4494,11 +4540,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
4494 | } | 4540 | } |
4495 | 4541 | ||
4496 | /* FIXME... */ | 4542 | /* FIXME... */ |
4497 | if ((!legacy_mode) && (n_ports > 1)) { | 4543 | if ((!legacy_mode) && (n_ports > 2)) { |
4498 | printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); | 4544 | printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n"); |
4499 | return -EINVAL; | 4545 | n_ports = 2; |
4546 | /* For now */ | ||
4500 | } | 4547 | } |
4501 | 4548 | ||
4549 | /* FIXME: Really for ATA it isn't safe because the device may be | ||
4550 | multi-purpose and we want to leave it alone if it was already | ||
4551 | enabled. Secondly for shared use as Arjan says we want refcounting | ||
4552 | |||
4553 | Checking dev->is_enabled is insufficient as this is not set at | ||
4554 | boot for the primary video which is BIOS enabled | ||
4555 | */ | ||
4556 | |||
4502 | rc = pci_enable_device(pdev); | 4557 | rc = pci_enable_device(pdev); |
4503 | if (rc) | 4558 | if (rc) |
4504 | return rc; | 4559 | return rc; |
@@ -4509,6 +4564,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
4509 | goto err_out; | 4564 | goto err_out; |
4510 | } | 4565 | } |
4511 | 4566 | ||
4567 | /* FIXME: Should use platform specific mappers for legacy port ranges */ | ||
4512 | if (legacy_mode) { | 4568 | if (legacy_mode) { |
4513 | if (!request_region(0x1f0, 8, "libata")) { | 4569 | if (!request_region(0x1f0, 8, "libata")) { |
4514 | struct resource *conflict, res; | 4570 | struct resource *conflict, res; |
@@ -4553,10 +4609,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
4553 | goto err_out_regions; | 4609 | goto err_out_regions; |
4554 | 4610 | ||
4555 | if (legacy_mode) { | 4611 | if (legacy_mode) { |
4556 | probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); | 4612 | if (legacy_mode & (1 << 0)) |
4557 | } else | 4613 | probe_ent = ata_pci_init_legacy_port(pdev, port, 0); |
4558 | probe_ent = ata_pci_init_native_mode(pdev, port); | 4614 | if (legacy_mode & (1 << 1)) |
4559 | if (!probe_ent) { | 4615 | probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1); |
4616 | } else { | ||
4617 | if (n_ports == 2) | ||
4618 | probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); | ||
4619 | else | ||
4620 | probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); | ||
4621 | } | ||
4622 | if (!probe_ent && !probe_ent2) { | ||
4560 | rc = -ENOMEM; | 4623 | rc = -ENOMEM; |
4561 | goto err_out_regions; | 4624 | goto err_out_regions; |
4562 | } | 4625 | } |
@@ -4668,6 +4731,27 @@ static void __exit ata_exit(void) | |||
4668 | module_init(ata_init); | 4731 | module_init(ata_init); |
4669 | module_exit(ata_exit); | 4732 | module_exit(ata_exit); |
4670 | 4733 | ||
4734 | static unsigned long ratelimit_time; | ||
4735 | static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED; | ||
4736 | |||
4737 | int ata_ratelimit(void) | ||
4738 | { | ||
4739 | int rc; | ||
4740 | unsigned long flags; | ||
4741 | |||
4742 | spin_lock_irqsave(&ata_ratelimit_lock, flags); | ||
4743 | |||
4744 | if (time_after(jiffies, ratelimit_time)) { | ||
4745 | rc = 1; | ||
4746 | ratelimit_time = jiffies + (HZ/5); | ||
4747 | } else | ||
4748 | rc = 0; | ||
4749 | |||
4750 | spin_unlock_irqrestore(&ata_ratelimit_lock, flags); | ||
4751 | |||
4752 | return rc; | ||
4753 | } | ||
4754 | |||
4671 | /* | 4755 | /* |
4672 | * libata is essentially a library of internal helper functions for | 4756 | * libata is essentially a library of internal helper functions for |
4673 | * low-level ATA host controller drivers. As such, the API/ABI is | 4757 | * low-level ATA host controller drivers. As such, the API/ABI is |
@@ -4709,6 +4793,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset); | |||
4709 | EXPORT_SYMBOL_GPL(__sata_phy_reset); | 4793 | EXPORT_SYMBOL_GPL(__sata_phy_reset); |
4710 | EXPORT_SYMBOL_GPL(ata_bus_reset); | 4794 | EXPORT_SYMBOL_GPL(ata_bus_reset); |
4711 | EXPORT_SYMBOL_GPL(ata_port_disable); | 4795 | EXPORT_SYMBOL_GPL(ata_port_disable); |
4796 | EXPORT_SYMBOL_GPL(ata_ratelimit); | ||
4712 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); | 4797 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); |
4713 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); | 4798 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); |
4714 | EXPORT_SYMBOL_GPL(ata_scsi_error); | 4799 | EXPORT_SYMBOL_GPL(ata_scsi_error); |
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index ee3f1050fb5f..4cf43de4060e 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c | |||
@@ -49,6 +49,14 @@ static struct ata_device * | |||
49 | ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev); | 49 | ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev); |
50 | 50 | ||
51 | 51 | ||
52 | static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, | ||
53 | void (*done)(struct scsi_cmnd *)) | ||
54 | { | ||
55 | ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); | ||
56 | /* "Invalid field in cbd" */ | ||
57 | done(cmd); | ||
58 | } | ||
59 | |||
52 | /** | 60 | /** |
53 | * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. | 61 | * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. |
54 | * @sdev: SCSI device for which BIOS geometry is to be determined | 62 | * @sdev: SCSI device for which BIOS geometry is to be determined |
@@ -182,7 +190,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) | |||
182 | { | 190 | { |
183 | struct scsi_cmnd *cmd = qc->scsicmd; | 191 | struct scsi_cmnd *cmd = qc->scsicmd; |
184 | u8 err = 0; | 192 | u8 err = 0; |
185 | unsigned char *sb = cmd->sense_buffer; | ||
186 | /* Based on the 3ware driver translation table */ | 193 | /* Based on the 3ware driver translation table */ |
187 | static unsigned char sense_table[][4] = { | 194 | static unsigned char sense_table[][4] = { |
188 | /* BBD|ECC|ID|MAR */ | 195 | /* BBD|ECC|ID|MAR */ |
@@ -225,8 +232,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) | |||
225 | }; | 232 | }; |
226 | int i = 0; | 233 | int i = 0; |
227 | 234 | ||
228 | cmd->result = SAM_STAT_CHECK_CONDITION; | ||
229 | |||
230 | /* | 235 | /* |
231 | * Is this an error we can process/parse | 236 | * Is this an error we can process/parse |
232 | */ | 237 | */ |
@@ -281,11 +286,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) | |||
281 | /* Look for best matches first */ | 286 | /* Look for best matches first */ |
282 | if((sense_table[i][0] & err) == sense_table[i][0]) | 287 | if((sense_table[i][0] & err) == sense_table[i][0]) |
283 | { | 288 | { |
284 | sb[0] = 0x70; | 289 | ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */, |
285 | sb[2] = sense_table[i][1]; | 290 | sense_table[i][2] /* asc */, |
286 | sb[7] = 0x0a; | 291 | sense_table[i][3] /* ascq */ ); |
287 | sb[12] = sense_table[i][2]; | ||
288 | sb[13] = sense_table[i][3]; | ||
289 | return; | 292 | return; |
290 | } | 293 | } |
291 | i++; | 294 | i++; |
@@ -300,11 +303,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) | |||
300 | { | 303 | { |
301 | if(stat_table[i][0] & drv_stat) | 304 | if(stat_table[i][0] & drv_stat) |
302 | { | 305 | { |
303 | sb[0] = 0x70; | 306 | ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */, |
304 | sb[2] = stat_table[i][1]; | 307 | sense_table[i][2] /* asc */, |
305 | sb[7] = 0x0a; | 308 | sense_table[i][3] /* ascq */ ); |
306 | sb[12] = stat_table[i][2]; | ||
307 | sb[13] = stat_table[i][3]; | ||
308 | return; | 309 | return; |
309 | } | 310 | } |
310 | i++; | 311 | i++; |
@@ -313,15 +314,12 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) | |||
313 | printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat); | 314 | printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat); |
314 | /* additional-sense-code[-qualifier] */ | 315 | /* additional-sense-code[-qualifier] */ |
315 | 316 | ||
316 | sb[0] = 0x70; | ||
317 | sb[2] = MEDIUM_ERROR; | ||
318 | sb[7] = 0x0A; | ||
319 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | 317 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
320 | sb[12] = 0x11; /* "unrecovered read error" */ | 318 | ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0x11, 0x4); |
321 | sb[13] = 0x04; | 319 | /* "unrecovered read error" */ |
322 | } else { | 320 | } else { |
323 | sb[12] = 0x0C; /* "write error - */ | 321 | ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0xc, 0x2); |
324 | sb[13] = 0x02; /* auto-reallocation failed" */ | 322 | /* "write error - auto-reallocation failed" */ |
325 | } | 323 | } |
326 | } | 324 | } |
327 | 325 | ||
@@ -440,15 +438,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, | |||
440 | ; /* ignore IMMED bit, violates sat-r05 */ | 438 | ; /* ignore IMMED bit, violates sat-r05 */ |
441 | } | 439 | } |
442 | if (scsicmd[4] & 0x2) | 440 | if (scsicmd[4] & 0x2) |
443 | return 1; /* LOEJ bit set not supported */ | 441 | goto invalid_fld; /* LOEJ bit set not supported */ |
444 | if (((scsicmd[4] >> 4) & 0xf) != 0) | 442 | if (((scsicmd[4] >> 4) & 0xf) != 0) |
445 | return 1; /* power conditions not supported */ | 443 | goto invalid_fld; /* power conditions not supported */ |
446 | if (scsicmd[4] & 0x1) { | 444 | if (scsicmd[4] & 0x1) { |
447 | tf->nsect = 1; /* 1 sector, lba=0 */ | 445 | tf->nsect = 1; /* 1 sector, lba=0 */ |
448 | tf->lbah = 0x0; | 446 | |
449 | tf->lbam = 0x0; | 447 | if (qc->dev->flags & ATA_DFLAG_LBA) { |
450 | tf->lbal = 0x0; | 448 | qc->tf.flags |= ATA_TFLAG_LBA; |
451 | tf->device |= ATA_LBA; | 449 | |
450 | tf->lbah = 0x0; | ||
451 | tf->lbam = 0x0; | ||
452 | tf->lbal = 0x0; | ||
453 | tf->device |= ATA_LBA; | ||
454 | } else { | ||
455 | /* CHS */ | ||
456 | tf->lbal = 0x1; /* sect */ | ||
457 | tf->lbam = 0x0; /* cyl low */ | ||
458 | tf->lbah = 0x0; /* cyl high */ | ||
459 | } | ||
460 | |||
452 | tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ | 461 | tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ |
453 | } else { | 462 | } else { |
454 | tf->nsect = 0; /* time period value (0 implies now) */ | 463 | tf->nsect = 0; /* time period value (0 implies now) */ |
@@ -463,6 +472,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, | |||
463 | */ | 472 | */ |
464 | 473 | ||
465 | return 0; | 474 | return 0; |
475 | |||
476 | invalid_fld: | ||
477 | ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); | ||
478 | /* "Invalid field in cbd" */ | ||
479 | return 1; | ||
466 | } | 480 | } |
467 | 481 | ||
468 | 482 | ||
@@ -498,6 +512,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) | |||
498 | } | 512 | } |
499 | 513 | ||
500 | /** | 514 | /** |
515 | * scsi_6_lba_len - Get LBA and transfer length | ||
516 | * @scsicmd: SCSI command to translate | ||
517 | * | ||
518 | * Calculate LBA and transfer length for 6-byte commands. | ||
519 | * | ||
520 | * RETURNS: | ||
521 | * @plba: the LBA | ||
522 | * @plen: the transfer length | ||
523 | */ | ||
524 | |||
525 | static void scsi_6_lba_len(u8 *scsicmd, u64 *plba, u32 *plen) | ||
526 | { | ||
527 | u64 lba = 0; | ||
528 | u32 len = 0; | ||
529 | |||
530 | VPRINTK("six-byte command\n"); | ||
531 | |||
532 | lba |= ((u64)scsicmd[2]) << 8; | ||
533 | lba |= ((u64)scsicmd[3]); | ||
534 | |||
535 | len |= ((u32)scsicmd[4]); | ||
536 | |||
537 | *plba = lba; | ||
538 | *plen = len; | ||
539 | } | ||
540 | |||
541 | /** | ||
542 | * scsi_10_lba_len - Get LBA and transfer length | ||
543 | * @scsicmd: SCSI command to translate | ||
544 | * | ||
545 | * Calculate LBA and transfer length for 10-byte commands. | ||
546 | * | ||
547 | * RETURNS: | ||
548 | * @plba: the LBA | ||
549 | * @plen: the transfer length | ||
550 | */ | ||
551 | |||
552 | static void scsi_10_lba_len(u8 *scsicmd, u64 *plba, u32 *plen) | ||
553 | { | ||
554 | u64 lba = 0; | ||
555 | u32 len = 0; | ||
556 | |||
557 | VPRINTK("ten-byte command\n"); | ||
558 | |||
559 | lba |= ((u64)scsicmd[2]) << 24; | ||
560 | lba |= ((u64)scsicmd[3]) << 16; | ||
561 | lba |= ((u64)scsicmd[4]) << 8; | ||
562 | lba |= ((u64)scsicmd[5]); | ||
563 | |||
564 | len |= ((u32)scsicmd[7]) << 8; | ||
565 | len |= ((u32)scsicmd[8]); | ||
566 | |||
567 | *plba = lba; | ||
568 | *plen = len; | ||
569 | } | ||
570 | |||
571 | /** | ||
572 | * scsi_16_lba_len - Get LBA and transfer length | ||
573 | * @scsicmd: SCSI command to translate | ||
574 | * | ||
575 | * Calculate LBA and transfer length for 16-byte commands. | ||
576 | * | ||
577 | * RETURNS: | ||
578 | * @plba: the LBA | ||
579 | * @plen: the transfer length | ||
580 | */ | ||
581 | |||
582 | static void scsi_16_lba_len(u8 *scsicmd, u64 *plba, u32 *plen) | ||
583 | { | ||
584 | u64 lba = 0; | ||
585 | u32 len = 0; | ||
586 | |||
587 | VPRINTK("sixteen-byte command\n"); | ||
588 | |||
589 | lba |= ((u64)scsicmd[2]) << 56; | ||
590 | lba |= ((u64)scsicmd[3]) << 48; | ||
591 | lba |= ((u64)scsicmd[4]) << 40; | ||
592 | lba |= ((u64)scsicmd[5]) << 32; | ||
593 | lba |= ((u64)scsicmd[6]) << 24; | ||
594 | lba |= ((u64)scsicmd[7]) << 16; | ||
595 | lba |= ((u64)scsicmd[8]) << 8; | ||
596 | lba |= ((u64)scsicmd[9]); | ||
597 | |||
598 | len |= ((u32)scsicmd[10]) << 24; | ||
599 | len |= ((u32)scsicmd[11]) << 16; | ||
600 | len |= ((u32)scsicmd[12]) << 8; | ||
601 | len |= ((u32)scsicmd[13]); | ||
602 | |||
603 | *plba = lba; | ||
604 | *plen = len; | ||
605 | } | ||
606 | |||
607 | /** | ||
501 | * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one | 608 | * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one |
502 | * @qc: Storage for translated ATA taskfile | 609 | * @qc: Storage for translated ATA taskfile |
503 | * @scsicmd: SCSI command to translate | 610 | * @scsicmd: SCSI command to translate |
@@ -514,79 +621,102 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) | |||
514 | static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) | 621 | static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) |
515 | { | 622 | { |
516 | struct ata_taskfile *tf = &qc->tf; | 623 | struct ata_taskfile *tf = &qc->tf; |
624 | struct ata_device *dev = qc->dev; | ||
625 | unsigned int lba = tf->flags & ATA_TFLAG_LBA; | ||
517 | unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; | 626 | unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; |
518 | u64 dev_sectors = qc->dev->n_sectors; | 627 | u64 dev_sectors = qc->dev->n_sectors; |
519 | u64 sect = 0; | 628 | u64 block; |
520 | u32 n_sect = 0; | 629 | u32 n_block; |
521 | 630 | ||
522 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 631 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
523 | tf->protocol = ATA_PROT_NODATA; | 632 | tf->protocol = ATA_PROT_NODATA; |
524 | tf->device |= ATA_LBA; | ||
525 | |||
526 | if (scsicmd[0] == VERIFY) { | ||
527 | sect |= ((u64)scsicmd[2]) << 24; | ||
528 | sect |= ((u64)scsicmd[3]) << 16; | ||
529 | sect |= ((u64)scsicmd[4]) << 8; | ||
530 | sect |= ((u64)scsicmd[5]); | ||
531 | |||
532 | n_sect |= ((u32)scsicmd[7]) << 8; | ||
533 | n_sect |= ((u32)scsicmd[8]); | ||
534 | } | ||
535 | |||
536 | else if (scsicmd[0] == VERIFY_16) { | ||
537 | sect |= ((u64)scsicmd[2]) << 56; | ||
538 | sect |= ((u64)scsicmd[3]) << 48; | ||
539 | sect |= ((u64)scsicmd[4]) << 40; | ||
540 | sect |= ((u64)scsicmd[5]) << 32; | ||
541 | sect |= ((u64)scsicmd[6]) << 24; | ||
542 | sect |= ((u64)scsicmd[7]) << 16; | ||
543 | sect |= ((u64)scsicmd[8]) << 8; | ||
544 | sect |= ((u64)scsicmd[9]); | ||
545 | |||
546 | n_sect |= ((u32)scsicmd[10]) << 24; | ||
547 | n_sect |= ((u32)scsicmd[11]) << 16; | ||
548 | n_sect |= ((u32)scsicmd[12]) << 8; | ||
549 | n_sect |= ((u32)scsicmd[13]); | ||
550 | } | ||
551 | 633 | ||
634 | if (scsicmd[0] == VERIFY) | ||
635 | scsi_10_lba_len(scsicmd, &block, &n_block); | ||
636 | else if (scsicmd[0] == VERIFY_16) | ||
637 | scsi_16_lba_len(scsicmd, &block, &n_block); | ||
552 | else | 638 | else |
553 | return 1; | 639 | goto invalid_fld; |
554 | 640 | ||
555 | if (!n_sect) | 641 | if (!n_block) |
556 | return 1; | 642 | goto nothing_to_do; |
557 | if (sect >= dev_sectors) | 643 | if (block >= dev_sectors) |
558 | return 1; | 644 | goto out_of_range; |
559 | if ((sect + n_sect) > dev_sectors) | 645 | if ((block + n_block) > dev_sectors) |
560 | return 1; | 646 | goto out_of_range; |
561 | if (lba48) { | 647 | if (lba48) { |
562 | if (n_sect > (64 * 1024)) | 648 | if (n_block > (64 * 1024)) |
563 | return 1; | 649 | goto invalid_fld; |
564 | } else { | 650 | } else { |
565 | if (n_sect > 256) | 651 | if (n_block > 256) |
566 | return 1; | 652 | goto invalid_fld; |
567 | } | 653 | } |
568 | 654 | ||
569 | if (lba48) { | 655 | if (lba) { |
570 | tf->command = ATA_CMD_VERIFY_EXT; | 656 | if (lba48) { |
657 | tf->command = ATA_CMD_VERIFY_EXT; | ||
658 | |||
659 | tf->hob_nsect = (n_block >> 8) & 0xff; | ||
660 | |||
661 | tf->hob_lbah = (block >> 40) & 0xff; | ||
662 | tf->hob_lbam = (block >> 32) & 0xff; | ||
663 | tf->hob_lbal = (block >> 24) & 0xff; | ||
664 | } else { | ||
665 | tf->command = ATA_CMD_VERIFY; | ||
666 | |||
667 | tf->device |= (block >> 24) & 0xf; | ||
668 | } | ||
571 | 669 | ||
572 | tf->hob_nsect = (n_sect >> 8) & 0xff; | 670 | tf->nsect = n_block & 0xff; |
573 | 671 | ||
574 | tf->hob_lbah = (sect >> 40) & 0xff; | 672 | tf->lbah = (block >> 16) & 0xff; |
575 | tf->hob_lbam = (sect >> 32) & 0xff; | 673 | tf->lbam = (block >> 8) & 0xff; |
576 | tf->hob_lbal = (sect >> 24) & 0xff; | 674 | tf->lbal = block & 0xff; |
675 | |||
676 | tf->device |= ATA_LBA; | ||
577 | } else { | 677 | } else { |
678 | /* CHS */ | ||
679 | u32 sect, head, cyl, track; | ||
680 | |||
681 | /* Convert LBA to CHS */ | ||
682 | track = (u32)block / dev->sectors; | ||
683 | cyl = track / dev->heads; | ||
684 | head = track % dev->heads; | ||
685 | sect = (u32)block % dev->sectors + 1; | ||
686 | |||
687 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", | ||
688 | (u32)block, track, cyl, head, sect); | ||
689 | |||
690 | /* Check whether the converted CHS can fit. | ||
691 | Cylinder: 0-65535 | ||
692 | Head: 0-15 | ||
693 | Sector: 1-255*/ | ||
694 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) | ||
695 | goto out_of_range; | ||
696 | |||
578 | tf->command = ATA_CMD_VERIFY; | 697 | tf->command = ATA_CMD_VERIFY; |
579 | 698 | tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ | |
580 | tf->device |= (sect >> 24) & 0xf; | 699 | tf->lbal = sect; |
700 | tf->lbam = cyl; | ||
701 | tf->lbah = cyl >> 8; | ||
702 | tf->device |= head; | ||
581 | } | 703 | } |
582 | 704 | ||
583 | tf->nsect = n_sect & 0xff; | 705 | return 0; |
584 | 706 | ||
585 | tf->lbah = (sect >> 16) & 0xff; | 707 | invalid_fld: |
586 | tf->lbam = (sect >> 8) & 0xff; | 708 | ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); |
587 | tf->lbal = sect & 0xff; | 709 | /* "Invalid field in cbd" */ |
710 | return 1; | ||
588 | 711 | ||
589 | return 0; | 712 | out_of_range: |
713 | ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0); | ||
714 | /* "Logical Block Address out of range" */ | ||
715 | return 1; | ||
716 | |||
717 | nothing_to_do: | ||
718 | qc->scsicmd->result = SAM_STAT_GOOD; | ||
719 | return 1; | ||
590 | } | 720 | } |
591 | 721 | ||
592 | /** | 722 | /** |
@@ -612,11 +742,14 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) | |||
612 | static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) | 742 | static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) |
613 | { | 743 | { |
614 | struct ata_taskfile *tf = &qc->tf; | 744 | struct ata_taskfile *tf = &qc->tf; |
745 | struct ata_device *dev = qc->dev; | ||
746 | unsigned int lba = tf->flags & ATA_TFLAG_LBA; | ||
615 | unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; | 747 | unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; |
748 | u64 block; | ||
749 | u32 n_block; | ||
616 | 750 | ||
617 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 751 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
618 | tf->protocol = qc->dev->xfer_protocol; | 752 | tf->protocol = qc->dev->xfer_protocol; |
619 | tf->device |= ATA_LBA; | ||
620 | 753 | ||
621 | if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || | 754 | if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || |
622 | scsicmd[0] == READ_16) { | 755 | scsicmd[0] == READ_16) { |
@@ -626,89 +759,115 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) | |||
626 | tf->flags |= ATA_TFLAG_WRITE; | 759 | tf->flags |= ATA_TFLAG_WRITE; |
627 | } | 760 | } |
628 | 761 | ||
629 | if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { | 762 | /* Calculate the SCSI LBA and transfer length. */ |
630 | if (lba48) { | 763 | switch (scsicmd[0]) { |
631 | tf->hob_nsect = scsicmd[7]; | 764 | case READ_10: |
632 | tf->hob_lbal = scsicmd[2]; | 765 | case WRITE_10: |
633 | 766 | scsi_10_lba_len(scsicmd, &block, &n_block); | |
634 | qc->nsect = ((unsigned int)scsicmd[7] << 8) | | 767 | break; |
635 | scsicmd[8]; | 768 | case READ_6: |
636 | } else { | 769 | case WRITE_6: |
637 | /* if we don't support LBA48 addressing, the request | 770 | scsi_6_lba_len(scsicmd, &block, &n_block); |
638 | * -may- be too large. */ | ||
639 | if ((scsicmd[2] & 0xf0) || scsicmd[7]) | ||
640 | return 1; | ||
641 | |||
642 | /* stores LBA27:24 in lower 4 bits of device reg */ | ||
643 | tf->device |= scsicmd[2]; | ||
644 | 771 | ||
645 | qc->nsect = scsicmd[8]; | 772 | /* for 6-byte r/w commands, transfer length 0 |
646 | } | 773 | * means 256 blocks of data, not 0 block. |
774 | */ | ||
775 | if (!n_block) | ||
776 | n_block = 256; | ||
777 | break; | ||
778 | case READ_16: | ||
779 | case WRITE_16: | ||
780 | scsi_16_lba_len(scsicmd, &block, &n_block); | ||
781 | break; | ||
782 | default: | ||
783 | DPRINTK("no-byte command\n"); | ||
784 | goto invalid_fld; | ||
785 | } | ||
647 | 786 | ||
648 | tf->nsect = scsicmd[8]; | 787 | /* Check and compose ATA command */ |
649 | tf->lbal = scsicmd[5]; | 788 | if (!n_block) |
650 | tf->lbam = scsicmd[4]; | 789 | /* For 10-byte and 16-byte SCSI R/W commands, transfer |
651 | tf->lbah = scsicmd[3]; | 790 | * length 0 means transfer 0 block of data. |
791 | * However, for ATA R/W commands, sector count 0 means | ||
792 | * 256 or 65536 sectors, not 0 sectors as in SCSI. | ||
793 | */ | ||
794 | goto nothing_to_do; | ||
652 | 795 | ||
653 | VPRINTK("ten-byte command\n"); | 796 | if (lba) { |
654 | if (qc->nsect == 0) /* we don't support length==0 cmds */ | 797 | if (lba48) { |
655 | return 1; | 798 | /* The request -may- be too large for LBA48. */ |
656 | return 0; | 799 | if ((block >> 48) || (n_block > 65536)) |
657 | } | 800 | goto out_of_range; |
658 | 801 | ||
659 | if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { | 802 | tf->hob_nsect = (n_block >> 8) & 0xff; |
660 | qc->nsect = tf->nsect = scsicmd[4]; | ||
661 | if (!qc->nsect) { | ||
662 | qc->nsect = 256; | ||
663 | if (lba48) | ||
664 | tf->hob_nsect = 1; | ||
665 | } | ||
666 | 803 | ||
667 | tf->lbal = scsicmd[3]; | 804 | tf->hob_lbah = (block >> 40) & 0xff; |
668 | tf->lbam = scsicmd[2]; | 805 | tf->hob_lbam = (block >> 32) & 0xff; |
669 | tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ | 806 | tf->hob_lbal = (block >> 24) & 0xff; |
807 | } else { | ||
808 | /* LBA28 */ | ||
670 | 809 | ||
671 | VPRINTK("six-byte command\n"); | 810 | /* The request -may- be too large for LBA28. */ |
672 | return 0; | 811 | if ((block >> 28) || (n_block > 256)) |
673 | } | 812 | goto out_of_range; |
674 | 813 | ||
675 | if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { | 814 | tf->device |= (block >> 24) & 0xf; |
676 | /* rule out impossible LBAs and sector counts */ | 815 | } |
677 | if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11]) | ||
678 | return 1; | ||
679 | 816 | ||
680 | if (lba48) { | 817 | qc->nsect = n_block; |
681 | tf->hob_nsect = scsicmd[12]; | 818 | tf->nsect = n_block & 0xff; |
682 | tf->hob_lbal = scsicmd[6]; | ||
683 | tf->hob_lbam = scsicmd[5]; | ||
684 | tf->hob_lbah = scsicmd[4]; | ||
685 | 819 | ||
686 | qc->nsect = ((unsigned int)scsicmd[12] << 8) | | 820 | tf->lbah = (block >> 16) & 0xff; |
687 | scsicmd[13]; | 821 | tf->lbam = (block >> 8) & 0xff; |
688 | } else { | 822 | tf->lbal = block & 0xff; |
689 | /* once again, filter out impossible non-zero values */ | ||
690 | if (scsicmd[4] || scsicmd[5] || scsicmd[12] || | ||
691 | (scsicmd[6] & 0xf0)) | ||
692 | return 1; | ||
693 | 823 | ||
694 | /* stores LBA27:24 in lower 4 bits of device reg */ | 824 | tf->device |= ATA_LBA; |
695 | tf->device |= scsicmd[6]; | 825 | } else { |
826 | /* CHS */ | ||
827 | u32 sect, head, cyl, track; | ||
828 | |||
829 | /* The request -may- be too large for CHS addressing. */ | ||
830 | if ((block >> 28) || (n_block > 256)) | ||
831 | goto out_of_range; | ||
832 | |||
833 | /* Convert LBA to CHS */ | ||
834 | track = (u32)block / dev->sectors; | ||
835 | cyl = track / dev->heads; | ||
836 | head = track % dev->heads; | ||
837 | sect = (u32)block % dev->sectors + 1; | ||
838 | |||
839 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", | ||
840 | (u32)block, track, cyl, head, sect); | ||
841 | |||
842 | /* Check whether the converted CHS can fit. | ||
843 | Cylinder: 0-65535 | ||
844 | Head: 0-15 | ||
845 | Sector: 1-255*/ | ||
846 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) | ||
847 | goto out_of_range; | ||
848 | |||
849 | qc->nsect = n_block; | ||
850 | tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ | ||
851 | tf->lbal = sect; | ||
852 | tf->lbam = cyl; | ||
853 | tf->lbah = cyl >> 8; | ||
854 | tf->device |= head; | ||
855 | } | ||
696 | 856 | ||
697 | qc->nsect = scsicmd[13]; | 857 | return 0; |
698 | } | ||
699 | 858 | ||
700 | tf->nsect = scsicmd[13]; | 859 | invalid_fld: |
701 | tf->lbal = scsicmd[9]; | 860 | ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); |
702 | tf->lbam = scsicmd[8]; | 861 | /* "Invalid field in cbd" */ |
703 | tf->lbah = scsicmd[7]; | 862 | return 1; |
704 | 863 | ||
705 | VPRINTK("sixteen-byte command\n"); | 864 | out_of_range: |
706 | if (qc->nsect == 0) /* we don't support length==0 cmds */ | 865 | ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0); |
707 | return 1; | 866 | /* "Logical Block Address out of range" */ |
708 | return 0; | 867 | return 1; |
709 | } | ||
710 | 868 | ||
711 | DPRINTK("no-byte command\n"); | 869 | nothing_to_do: |
870 | qc->scsicmd->result = SAM_STAT_GOOD; | ||
712 | return 1; | 871 | return 1; |
713 | } | 872 | } |
714 | 873 | ||
@@ -741,6 +900,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | |||
741 | * This function sets up an ata_queued_cmd structure for the | 900 | * This function sets up an ata_queued_cmd structure for the |
742 | * SCSI command, and sends that ata_queued_cmd to the hardware. | 901 | * SCSI command, and sends that ata_queued_cmd to the hardware. |
743 | * | 902 | * |
903 | * The xlat_func argument (actor) returns 0 if ready to execute | ||
904 | * ATA command, else 1 to finish translation. If 1 is returned | ||
905 | * then cmd->result (and possibly cmd->sense_buffer) are assumed | ||
906 | * to be set reflecting an error condition or clean (early) | ||
907 | * termination. | ||
908 | * | ||
744 | * LOCKING: | 909 | * LOCKING: |
745 | * spin_lock_irqsave(host_set lock) | 910 | * spin_lock_irqsave(host_set lock) |
746 | */ | 911 | */ |
@@ -757,7 +922,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, | |||
757 | 922 | ||
758 | qc = ata_scsi_qc_new(ap, dev, cmd, done); | 923 | qc = ata_scsi_qc_new(ap, dev, cmd, done); |
759 | if (!qc) | 924 | if (!qc) |
760 | return; | 925 | goto err_mem; |
761 | 926 | ||
762 | /* data is present; dma-map it */ | 927 | /* data is present; dma-map it */ |
763 | if (cmd->sc_data_direction == DMA_FROM_DEVICE || | 928 | if (cmd->sc_data_direction == DMA_FROM_DEVICE || |
@@ -765,7 +930,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, | |||
765 | if (unlikely(cmd->request_bufflen < 1)) { | 930 | if (unlikely(cmd->request_bufflen < 1)) { |
766 | printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", | 931 | printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", |
767 | ap->id, dev->devno); | 932 | ap->id, dev->devno); |
768 | goto err_out; | 933 | goto err_did; |
769 | } | 934 | } |
770 | 935 | ||
771 | if (cmd->use_sg) | 936 | if (cmd->use_sg) |
@@ -780,19 +945,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, | |||
780 | qc->complete_fn = ata_scsi_qc_complete; | 945 | qc->complete_fn = ata_scsi_qc_complete; |
781 | 946 | ||
782 | if (xlat_func(qc, scsicmd)) | 947 | if (xlat_func(qc, scsicmd)) |
783 | goto err_out; | 948 | goto early_finish; |
784 | 949 | ||
785 | /* select device, send command to hardware */ | 950 | /* select device, send command to hardware */ |
786 | if (ata_qc_issue(qc)) | 951 | if (ata_qc_issue(qc)) |
787 | goto err_out; | 952 | goto err_did; |
788 | 953 | ||
789 | VPRINTK("EXIT\n"); | 954 | VPRINTK("EXIT\n"); |
790 | return; | 955 | return; |
791 | 956 | ||
792 | err_out: | 957 | early_finish: |
958 | ata_qc_free(qc); | ||
959 | done(cmd); | ||
960 | DPRINTK("EXIT - early finish (good or error)\n"); | ||
961 | return; | ||
962 | |||
963 | err_did: | ||
793 | ata_qc_free(qc); | 964 | ata_qc_free(qc); |
794 | ata_bad_cdb(cmd, done); | 965 | err_mem: |
795 | DPRINTK("EXIT - badcmd\n"); | 966 | cmd->result = (DID_ERROR << 16); |
967 | done(cmd); | ||
968 | DPRINTK("EXIT - internal\n"); | ||
969 | return; | ||
796 | } | 970 | } |
797 | 971 | ||
798 | /** | 972 | /** |
@@ -859,7 +1033,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) | |||
859 | * Mapping the response buffer, calling the command's handler, | 1033 | * Mapping the response buffer, calling the command's handler, |
860 | * and handling the handler's return value. This return value | 1034 | * and handling the handler's return value. This return value |
861 | * indicates whether the handler wishes the SCSI command to be | 1035 | * indicates whether the handler wishes the SCSI command to be |
862 | * completed successfully, or not. | 1036 | * completed successfully (0), or not (in which case cmd->result |
1037 | * and sense buffer are assumed to be set). | ||
863 | * | 1038 | * |
864 | * LOCKING: | 1039 | * LOCKING: |
865 | * spin_lock_irqsave(host_set lock) | 1040 | * spin_lock_irqsave(host_set lock) |
@@ -878,12 +1053,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | |||
878 | rc = actor(args, rbuf, buflen); | 1053 | rc = actor(args, rbuf, buflen); |
879 | ata_scsi_rbuf_put(cmd, rbuf); | 1054 | ata_scsi_rbuf_put(cmd, rbuf); |
880 | 1055 | ||
881 | if (rc) | 1056 | if (rc == 0) |
882 | ata_bad_cdb(cmd, args->done); | ||
883 | else { | ||
884 | cmd->result = SAM_STAT_GOOD; | 1057 | cmd->result = SAM_STAT_GOOD; |
885 | args->done(cmd); | 1058 | args->done(cmd); |
886 | } | ||
887 | } | 1059 | } |
888 | 1060 | ||
889 | /** | 1061 | /** |
@@ -1189,8 +1361,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, | |||
1189 | * in the same manner) | 1361 | * in the same manner) |
1190 | */ | 1362 | */ |
1191 | page_control = scsicmd[2] >> 6; | 1363 | page_control = scsicmd[2] >> 6; |
1192 | if ((page_control != 0) && (page_control != 3)) | 1364 | switch (page_control) { |
1193 | return 1; | 1365 | case 0: /* current */ |
1366 | break; /* supported */ | ||
1367 | case 3: /* saved */ | ||
1368 | goto saving_not_supp; | ||
1369 | case 1: /* changeable */ | ||
1370 | case 2: /* defaults */ | ||
1371 | default: | ||
1372 | goto invalid_fld; | ||
1373 | } | ||
1194 | 1374 | ||
1195 | if (six_byte) | 1375 | if (six_byte) |
1196 | output_len = 4; | 1376 | output_len = 4; |
@@ -1221,7 +1401,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, | |||
1221 | break; | 1401 | break; |
1222 | 1402 | ||
1223 | default: /* invalid page code */ | 1403 | default: /* invalid page code */ |
1224 | return 1; | 1404 | goto invalid_fld; |
1225 | } | 1405 | } |
1226 | 1406 | ||
1227 | if (six_byte) { | 1407 | if (six_byte) { |
@@ -1234,6 +1414,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, | |||
1234 | } | 1414 | } |
1235 | 1415 | ||
1236 | return 0; | 1416 | return 0; |
1417 | |||
1418 | invalid_fld: | ||
1419 | ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0); | ||
1420 | /* "Invalid field in cbd" */ | ||
1421 | return 1; | ||
1422 | |||
1423 | saving_not_supp: | ||
1424 | ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); | ||
1425 | /* "Saving parameters not supported" */ | ||
1426 | return 1; | ||
1237 | } | 1427 | } |
1238 | 1428 | ||
1239 | /** | 1429 | /** |
@@ -1256,10 +1446,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, | |||
1256 | 1446 | ||
1257 | VPRINTK("ENTER\n"); | 1447 | VPRINTK("ENTER\n"); |
1258 | 1448 | ||
1259 | if (ata_id_has_lba48(args->id)) | 1449 | if (ata_id_has_lba(args->id)) { |
1260 | n_sectors = ata_id_u64(args->id, 100); | 1450 | if (ata_id_has_lba48(args->id)) |
1261 | else | 1451 | n_sectors = ata_id_u64(args->id, 100); |
1262 | n_sectors = ata_id_u32(args->id, 60); | 1452 | else |
1453 | n_sectors = ata_id_u32(args->id, 60); | ||
1454 | } else { | ||
1455 | /* CHS default translation */ | ||
1456 | n_sectors = args->id[1] * args->id[3] * args->id[6]; | ||
1457 | |||
1458 | if (ata_id_current_chs_valid(args->id)) | ||
1459 | /* CHS current translation */ | ||
1460 | n_sectors = ata_id_u32(args->id, 57); | ||
1461 | } | ||
1462 | |||
1263 | n_sectors--; /* ATA TotalUserSectors - 1 */ | 1463 | n_sectors--; /* ATA TotalUserSectors - 1 */ |
1264 | 1464 | ||
1265 | if (args->cmd->cmnd[0] == READ_CAPACITY) { | 1465 | if (args->cmd->cmnd[0] == READ_CAPACITY) { |
@@ -1323,6 +1523,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, | |||
1323 | } | 1523 | } |
1324 | 1524 | ||
1325 | /** | 1525 | /** |
1526 | * ata_scsi_set_sense - Set SCSI sense data and status | ||
1527 | * @cmd: SCSI request to be handled | ||
1528 | * @sk: SCSI-defined sense key | ||
1529 | * @asc: SCSI-defined additional sense code | ||
1530 | * @ascq: SCSI-defined additional sense code qualifier | ||
1531 | * | ||
1532 | * Helper function that builds a valid fixed format, current | ||
1533 | * response code and the given sense key (sk), additional sense | ||
1534 | * code (asc) and additional sense code qualifier (ascq) with | ||
1535 | * a SCSI command status of %SAM_STAT_CHECK_CONDITION and | ||
1536 | * DRIVER_SENSE set in the upper bits of scsi_cmnd::result . | ||
1537 | * | ||
1538 | * LOCKING: | ||
1539 | * Not required | ||
1540 | */ | ||
1541 | |||
1542 | void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) | ||
1543 | { | ||
1544 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; | ||
1545 | |||
1546 | cmd->sense_buffer[0] = 0x70; /* fixed format, current */ | ||
1547 | cmd->sense_buffer[2] = sk; | ||
1548 | cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ | ||
1549 | cmd->sense_buffer[12] = asc; | ||
1550 | cmd->sense_buffer[13] = ascq; | ||
1551 | } | ||
1552 | |||
1553 | /** | ||
1326 | * ata_scsi_badcmd - End a SCSI request with an error | 1554 | * ata_scsi_badcmd - End a SCSI request with an error |
1327 | * @cmd: SCSI request to be handled | 1555 | * @cmd: SCSI request to be handled |
1328 | * @done: SCSI command completion function | 1556 | * @done: SCSI command completion function |
@@ -1340,30 +1568,84 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, | |||
1340 | void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) | 1568 | void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) |
1341 | { | 1569 | { |
1342 | DPRINTK("ENTER\n"); | 1570 | DPRINTK("ENTER\n"); |
1343 | cmd->result = SAM_STAT_CHECK_CONDITION; | 1571 | ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq); |
1344 | |||
1345 | cmd->sense_buffer[0] = 0x70; | ||
1346 | cmd->sense_buffer[2] = ILLEGAL_REQUEST; | ||
1347 | cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ | ||
1348 | cmd->sense_buffer[12] = asc; | ||
1349 | cmd->sense_buffer[13] = ascq; | ||
1350 | 1572 | ||
1351 | done(cmd); | 1573 | done(cmd); |
1352 | } | 1574 | } |
1353 | 1575 | ||
1576 | void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, | ||
1577 | struct scsi_cmnd *cmd) | ||
1578 | { | ||
1579 | DECLARE_COMPLETION(wait); | ||
1580 | struct ata_queued_cmd *qc; | ||
1581 | unsigned long flags; | ||
1582 | int rc; | ||
1583 | |||
1584 | DPRINTK("ATAPI request sense\n"); | ||
1585 | |||
1586 | qc = ata_qc_new_init(ap, dev); | ||
1587 | BUG_ON(qc == NULL); | ||
1588 | |||
1589 | /* FIXME: is this needed? */ | ||
1590 | memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); | ||
1591 | |||
1592 | ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); | ||
1593 | qc->dma_dir = DMA_FROM_DEVICE; | ||
1594 | |||
1595 | memset(&qc->cdb, 0, ap->cdb_len); | ||
1596 | qc->cdb[0] = REQUEST_SENSE; | ||
1597 | qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; | ||
1598 | |||
1599 | qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
1600 | qc->tf.command = ATA_CMD_PACKET; | ||
1601 | |||
1602 | qc->tf.protocol = ATA_PROT_ATAPI; | ||
1603 | qc->tf.lbam = (8 * 1024) & 0xff; | ||
1604 | qc->tf.lbah = (8 * 1024) >> 8; | ||
1605 | qc->nbytes = SCSI_SENSE_BUFFERSIZE; | ||
1606 | |||
1607 | qc->waiting = &wait; | ||
1608 | qc->complete_fn = ata_qc_complete_noop; | ||
1609 | |||
1610 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
1611 | rc = ata_qc_issue(qc); | ||
1612 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
1613 | |||
1614 | if (rc) | ||
1615 | ata_port_disable(ap); | ||
1616 | else | ||
1617 | wait_for_completion(&wait); | ||
1618 | |||
1619 | DPRINTK("EXIT\n"); | ||
1620 | } | ||
1621 | |||
1354 | static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | 1622 | static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) |
1355 | { | 1623 | { |
1356 | struct scsi_cmnd *cmd = qc->scsicmd; | 1624 | struct scsi_cmnd *cmd = qc->scsicmd; |
1357 | 1625 | ||
1358 | if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { | 1626 | VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat); |
1627 | |||
1628 | if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ))) | ||
1629 | ata_to_sense_error(qc, drv_stat); | ||
1630 | |||
1631 | else if (unlikely(drv_stat & ATA_ERR)) { | ||
1359 | DPRINTK("request check condition\n"); | 1632 | DPRINTK("request check condition\n"); |
1360 | 1633 | ||
1634 | /* FIXME: command completion with check condition | ||
1635 | * but no sense causes the error handler to run, | ||
1636 | * which then issues REQUEST SENSE, fills in the sense | ||
1637 | * buffer, and completes the command (for the second | ||
1638 | * time). We need to issue REQUEST SENSE some other | ||
1639 | * way, to avoid completing the command twice. | ||
1640 | */ | ||
1361 | cmd->result = SAM_STAT_CHECK_CONDITION; | 1641 | cmd->result = SAM_STAT_CHECK_CONDITION; |
1362 | 1642 | ||
1363 | qc->scsidone(cmd); | 1643 | qc->scsidone(cmd); |
1364 | 1644 | ||
1365 | return 1; | 1645 | return 1; |
1366 | } else { | 1646 | } |
1647 | |||
1648 | else { | ||
1367 | u8 *scsicmd = cmd->cmnd; | 1649 | u8 *scsicmd = cmd->cmnd; |
1368 | 1650 | ||
1369 | if (scsicmd[0] == INQUIRY) { | 1651 | if (scsicmd[0] == INQUIRY) { |
@@ -1371,15 +1653,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | |||
1371 | unsigned int buflen; | 1653 | unsigned int buflen; |
1372 | 1654 | ||
1373 | buflen = ata_scsi_rbuf_get(cmd, &buf); | 1655 | buflen = ata_scsi_rbuf_get(cmd, &buf); |
1374 | buf[2] = 0x5; | 1656 | |
1375 | buf[3] = (buf[3] & 0xf0) | 2; | 1657 | /* ATAPI devices typically report zero for their SCSI version, |
1658 | * and sometimes deviate from the spec WRT response data | ||
1659 | * format. If SCSI version is reported as zero like normal, | ||
1660 | * then we make the following fixups: 1) Fake MMC-5 version, | ||
1661 | * to indicate to the Linux scsi midlayer this is a modern | ||
1662 | * device. 2) Ensure response data format / ATAPI information | ||
1663 | * are always correct. | ||
1664 | */ | ||
1665 | /* FIXME: do we ever override EVPD pages and the like, with | ||
1666 | * this code? | ||
1667 | */ | ||
1668 | if (buf[2] == 0) { | ||
1669 | buf[2] = 0x5; | ||
1670 | buf[3] = 0x32; | ||
1671 | } | ||
1672 | |||
1376 | ata_scsi_rbuf_put(cmd, buf); | 1673 | ata_scsi_rbuf_put(cmd, buf); |
1377 | } | 1674 | } |
1675 | |||
1378 | cmd->result = SAM_STAT_GOOD; | 1676 | cmd->result = SAM_STAT_GOOD; |
1379 | } | 1677 | } |
1380 | 1678 | ||
1381 | qc->scsidone(cmd); | 1679 | qc->scsidone(cmd); |
1382 | |||
1383 | return 0; | 1680 | return 0; |
1384 | } | 1681 | } |
1385 | /** | 1682 | /** |
@@ -1640,7 +1937,7 @@ void ata_scsi_simulate(u16 *id, | |||
1640 | 1937 | ||
1641 | case INQUIRY: | 1938 | case INQUIRY: |
1642 | if (scsicmd[1] & 2) /* is CmdDt set? */ | 1939 | if (scsicmd[1] & 2) /* is CmdDt set? */ |
1643 | ata_bad_cdb(cmd, done); | 1940 | ata_scsi_invalid_field(cmd, done); |
1644 | else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ | 1941 | else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ |
1645 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); | 1942 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); |
1646 | else if (scsicmd[2] == 0x00) | 1943 | else if (scsicmd[2] == 0x00) |
@@ -1650,7 +1947,7 @@ void ata_scsi_simulate(u16 *id, | |||
1650 | else if (scsicmd[2] == 0x83) | 1947 | else if (scsicmd[2] == 0x83) |
1651 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); | 1948 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); |
1652 | else | 1949 | else |
1653 | ata_bad_cdb(cmd, done); | 1950 | ata_scsi_invalid_field(cmd, done); |
1654 | break; | 1951 | break; |
1655 | 1952 | ||
1656 | case MODE_SENSE: | 1953 | case MODE_SENSE: |
@@ -1660,7 +1957,7 @@ void ata_scsi_simulate(u16 *id, | |||
1660 | 1957 | ||
1661 | case MODE_SELECT: /* unconditionally return */ | 1958 | case MODE_SELECT: /* unconditionally return */ |
1662 | case MODE_SELECT_10: /* bad-field-in-cdb */ | 1959 | case MODE_SELECT_10: /* bad-field-in-cdb */ |
1663 | ata_bad_cdb(cmd, done); | 1960 | ata_scsi_invalid_field(cmd, done); |
1664 | break; | 1961 | break; |
1665 | 1962 | ||
1666 | case READ_CAPACITY: | 1963 | case READ_CAPACITY: |
@@ -1671,7 +1968,7 @@ void ata_scsi_simulate(u16 *id, | |||
1671 | if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) | 1968 | if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) |
1672 | ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); | 1969 | ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); |
1673 | else | 1970 | else |
1674 | ata_bad_cdb(cmd, done); | 1971 | ata_scsi_invalid_field(cmd, done); |
1675 | break; | 1972 | break; |
1676 | 1973 | ||
1677 | case REPORT_LUNS: | 1974 | case REPORT_LUNS: |
@@ -1683,8 +1980,26 @@ void ata_scsi_simulate(u16 *id, | |||
1683 | 1980 | ||
1684 | /* all other commands */ | 1981 | /* all other commands */ |
1685 | default: | 1982 | default: |
1686 | ata_bad_scsiop(cmd, done); | 1983 | ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); |
1984 | /* "Invalid command operation code" */ | ||
1985 | done(cmd); | ||
1687 | break; | 1986 | break; |
1688 | } | 1987 | } |
1689 | } | 1988 | } |
1690 | 1989 | ||
1990 | void ata_scsi_scan_host(struct ata_port *ap) | ||
1991 | { | ||
1992 | struct ata_device *dev; | ||
1993 | unsigned int i; | ||
1994 | |||
1995 | if (ap->flags & ATA_FLAG_PORT_DISABLED) | ||
1996 | return; | ||
1997 | |||
1998 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
1999 | dev = &ap->device[i]; | ||
2000 | |||
2001 | if (ata_dev_present(dev)) | ||
2002 | scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0); | ||
2003 | } | ||
2004 | } | ||
2005 | |||
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h index d608b3a0f6fe..a18f2ac1d4a1 100644 --- a/drivers/scsi/libata.h +++ b/drivers/scsi/libata.h | |||
@@ -39,6 +39,7 @@ struct ata_scsi_args { | |||
39 | 39 | ||
40 | /* libata-core.c */ | 40 | /* libata-core.c */ |
41 | extern int atapi_enabled; | 41 | extern int atapi_enabled; |
42 | extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat); | ||
42 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | 43 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, |
43 | struct ata_device *dev); | 44 | struct ata_device *dev); |
44 | extern void ata_qc_free(struct ata_queued_cmd *qc); | 45 | extern void ata_qc_free(struct ata_queued_cmd *qc); |
@@ -51,6 +52,9 @@ extern void swap_buf_le16(u16 *buf, unsigned int buf_words); | |||
51 | 52 | ||
52 | 53 | ||
53 | /* libata-scsi.c */ | 54 | /* libata-scsi.c */ |
55 | extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, | ||
56 | struct scsi_cmnd *cmd); | ||
57 | extern void ata_scsi_scan_host(struct ata_port *ap); | ||
54 | extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); | 58 | extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); |
55 | extern int ata_scsi_error(struct Scsi_Host *host); | 59 | extern int ata_scsi_error(struct Scsi_Host *host); |
56 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, | 60 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, |
@@ -76,18 +80,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, | |||
76 | extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, | 80 | extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, |
77 | void (*done)(struct scsi_cmnd *), | 81 | void (*done)(struct scsi_cmnd *), |
78 | u8 asc, u8 ascq); | 82 | u8 asc, u8 ascq); |
83 | extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, | ||
84 | u8 sk, u8 asc, u8 ascq); | ||
79 | extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | 85 | extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, |
80 | unsigned int (*actor) (struct ata_scsi_args *args, | 86 | unsigned int (*actor) (struct ata_scsi_args *args, |
81 | u8 *rbuf, unsigned int buflen)); | 87 | u8 *rbuf, unsigned int buflen)); |
82 | 88 | ||
83 | static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | ||
84 | { | ||
85 | ata_scsi_badcmd(cmd, done, 0x20, 0x00); | ||
86 | } | ||
87 | |||
88 | static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | ||
89 | { | ||
90 | ata_scsi_badcmd(cmd, done, 0x24, 0x00); | ||
91 | } | ||
92 | |||
93 | #endif /* __LIBATA_H__ */ | 89 | #endif /* __LIBATA_H__ */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 1b3148e842af..c3f637395734 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/uio.h> | 35 | #include <linux/uio.h> |
36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
37 | #include <linux/fs.h> | ||
37 | #include <linux/compat.h> | 38 | #include <linux/compat.h> |
38 | 39 | ||
39 | #include <scsi/scsi.h> | 40 | #include <scsi/scsi.h> |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index ea76fe44585e..d457f5673476 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <asm/io.h> | 35 | #include <asm/io.h> |
36 | 36 | ||
37 | #define DRV_NAME "sata_mv" | 37 | #define DRV_NAME "sata_mv" |
38 | #define DRV_VERSION "0.12" | 38 | #define DRV_VERSION "0.24" |
39 | 39 | ||
40 | enum { | 40 | enum { |
41 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 41 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -55,31 +55,61 @@ enum { | |||
55 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ | 55 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ |
56 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, | 56 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, |
57 | 57 | ||
58 | MV_Q_CT = 32, | 58 | MV_USE_Q_DEPTH = ATA_DEF_QUEUE, |
59 | MV_CRQB_SZ = 32, | ||
60 | MV_CRPB_SZ = 8, | ||
61 | 59 | ||
62 | MV_DMA_BOUNDARY = 0xffffffffU, | 60 | MV_MAX_Q_DEPTH = 32, |
63 | SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), | 61 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, |
62 | |||
63 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB | ||
64 | * CRPB needs alignment on a 256B boundary. Size == 256B | ||
65 | * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB | ||
66 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B | ||
67 | */ | ||
68 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), | ||
69 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), | ||
70 | MV_MAX_SG_CT = 176, | ||
71 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), | ||
72 | MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), | ||
73 | |||
74 | /* Our DMA boundary is determined by an ePRD being unable to handle | ||
75 | * anything larger than 64KB | ||
76 | */ | ||
77 | MV_DMA_BOUNDARY = 0xffffU, | ||
64 | 78 | ||
65 | MV_PORTS_PER_HC = 4, | 79 | MV_PORTS_PER_HC = 4, |
66 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ | 80 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ |
67 | MV_PORT_HC_SHIFT = 2, | 81 | MV_PORT_HC_SHIFT = 2, |
68 | /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ | 82 | /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ |
69 | MV_PORT_MASK = 3, | 83 | MV_PORT_MASK = 3, |
70 | 84 | ||
71 | /* Host Flags */ | 85 | /* Host Flags */ |
72 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 86 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
73 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
74 | MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ | 88 | MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ |
89 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
90 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), | ||
91 | MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | | ||
92 | MV_FLAG_GLBL_SFT_RST), | ||
75 | 93 | ||
76 | chip_504x = 0, | 94 | chip_504x = 0, |
77 | chip_508x = 1, | 95 | chip_508x = 1, |
78 | chip_604x = 2, | 96 | chip_604x = 2, |
79 | chip_608x = 3, | 97 | chip_608x = 3, |
80 | 98 | ||
99 | CRQB_FLAG_READ = (1 << 0), | ||
100 | CRQB_TAG_SHIFT = 1, | ||
101 | CRQB_CMD_ADDR_SHIFT = 8, | ||
102 | CRQB_CMD_CS = (0x2 << 11), | ||
103 | CRQB_CMD_LAST = (1 << 15), | ||
104 | |||
105 | CRPB_FLAG_STATUS_SHIFT = 8, | ||
106 | |||
107 | EPRD_FLAG_END_OF_TBL = (1 << 31), | ||
108 | |||
81 | /* PCI interface registers */ | 109 | /* PCI interface registers */ |
82 | 110 | ||
111 | PCI_COMMAND_OFS = 0xc00, | ||
112 | |||
83 | PCI_MAIN_CMD_STS_OFS = 0xd30, | 113 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
84 | STOP_PCI_MASTER = (1 << 2), | 114 | STOP_PCI_MASTER = (1 << 2), |
85 | PCI_MASTER_EMPTY = (1 << 3), | 115 | PCI_MASTER_EMPTY = (1 << 3), |
@@ -111,20 +141,13 @@ enum { | |||
111 | HC_CFG_OFS = 0, | 141 | HC_CFG_OFS = 0, |
112 | 142 | ||
113 | HC_IRQ_CAUSE_OFS = 0x14, | 143 | HC_IRQ_CAUSE_OFS = 0x14, |
114 | CRBP_DMA_DONE = (1 << 0), /* shift by port # */ | 144 | CRPB_DMA_DONE = (1 << 0), /* shift by port # */ |
115 | HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ | 145 | HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ |
116 | DEV_IRQ = (1 << 8), /* shift by port # */ | 146 | DEV_IRQ = (1 << 8), /* shift by port # */ |
117 | 147 | ||
118 | /* Shadow block registers */ | 148 | /* Shadow block registers */ |
119 | SHD_PIO_DATA_OFS = 0x100, | 149 | SHD_BLK_OFS = 0x100, |
120 | SHD_FEA_ERR_OFS = 0x104, | 150 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ |
121 | SHD_SECT_CNT_OFS = 0x108, | ||
122 | SHD_LBA_L_OFS = 0x10C, | ||
123 | SHD_LBA_M_OFS = 0x110, | ||
124 | SHD_LBA_H_OFS = 0x114, | ||
125 | SHD_DEV_HD_OFS = 0x118, | ||
126 | SHD_CMD_STA_OFS = 0x11C, | ||
127 | SHD_CTL_AST_OFS = 0x120, | ||
128 | 151 | ||
129 | /* SATA registers */ | 152 | /* SATA registers */ |
130 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | 153 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ |
@@ -132,6 +155,11 @@ enum { | |||
132 | 155 | ||
133 | /* Port registers */ | 156 | /* Port registers */ |
134 | EDMA_CFG_OFS = 0, | 157 | EDMA_CFG_OFS = 0, |
158 | EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ | ||
159 | EDMA_CFG_NCQ = (1 << 5), | ||
160 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | ||
161 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | ||
162 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | ||
135 | 163 | ||
136 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | 164 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, |
137 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | 165 | EDMA_ERR_IRQ_MASK_OFS = 0xc, |
@@ -161,33 +189,85 @@ enum { | |||
161 | EDMA_ERR_LNK_DATA_TX | | 189 | EDMA_ERR_LNK_DATA_TX | |
162 | EDMA_ERR_TRANS_PROTO), | 190 | EDMA_ERR_TRANS_PROTO), |
163 | 191 | ||
192 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, | ||
193 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | ||
194 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, | ||
195 | |||
196 | EDMA_REQ_Q_OUT_PTR_OFS = 0x18, | ||
197 | EDMA_REQ_Q_PTR_SHIFT = 5, | ||
198 | |||
199 | EDMA_RSP_Q_BASE_HI_OFS = 0x1c, | ||
200 | EDMA_RSP_Q_IN_PTR_OFS = 0x20, | ||
201 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | ||
202 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, | ||
203 | EDMA_RSP_Q_PTR_SHIFT = 3, | ||
204 | |||
164 | EDMA_CMD_OFS = 0x28, | 205 | EDMA_CMD_OFS = 0x28, |
165 | EDMA_EN = (1 << 0), | 206 | EDMA_EN = (1 << 0), |
166 | EDMA_DS = (1 << 1), | 207 | EDMA_DS = (1 << 1), |
167 | ATA_RST = (1 << 2), | 208 | ATA_RST = (1 << 2), |
168 | 209 | ||
169 | /* BDMA is 6xxx part only */ | 210 | /* Host private flags (hp_flags) */ |
170 | BDMA_CMD_OFS = 0x224, | 211 | MV_HP_FLAG_MSI = (1 << 0), |
171 | BDMA_START = (1 << 0), | ||
172 | 212 | ||
173 | MV_UNDEF = 0, | 213 | /* Port private flags (pp_flags) */ |
214 | MV_PP_FLAG_EDMA_EN = (1 << 0), | ||
215 | MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), | ||
174 | }; | 216 | }; |
175 | 217 | ||
176 | struct mv_port_priv { | 218 | /* Command ReQuest Block: 32B */ |
219 | struct mv_crqb { | ||
220 | u32 sg_addr; | ||
221 | u32 sg_addr_hi; | ||
222 | u16 ctrl_flags; | ||
223 | u16 ata_cmd[11]; | ||
224 | }; | ||
177 | 225 | ||
226 | /* Command ResPonse Block: 8B */ | ||
227 | struct mv_crpb { | ||
228 | u16 id; | ||
229 | u16 flags; | ||
230 | u32 tmstmp; | ||
178 | }; | 231 | }; |
179 | 232 | ||
180 | struct mv_host_priv { | 233 | /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ |
234 | struct mv_sg { | ||
235 | u32 addr; | ||
236 | u32 flags_size; | ||
237 | u32 addr_hi; | ||
238 | u32 reserved; | ||
239 | }; | ||
181 | 240 | ||
241 | struct mv_port_priv { | ||
242 | struct mv_crqb *crqb; | ||
243 | dma_addr_t crqb_dma; | ||
244 | struct mv_crpb *crpb; | ||
245 | dma_addr_t crpb_dma; | ||
246 | struct mv_sg *sg_tbl; | ||
247 | dma_addr_t sg_tbl_dma; | ||
248 | |||
249 | unsigned req_producer; /* cp of req_in_ptr */ | ||
250 | unsigned rsp_consumer; /* cp of rsp_out_ptr */ | ||
251 | u32 pp_flags; | ||
252 | }; | ||
253 | |||
254 | struct mv_host_priv { | ||
255 | u32 hp_flags; | ||
182 | }; | 256 | }; |
183 | 257 | ||
184 | static void mv_irq_clear(struct ata_port *ap); | 258 | static void mv_irq_clear(struct ata_port *ap); |
185 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | 259 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); |
186 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 260 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
261 | static u8 mv_check_err(struct ata_port *ap); | ||
187 | static void mv_phy_reset(struct ata_port *ap); | 262 | static void mv_phy_reset(struct ata_port *ap); |
188 | static int mv_master_reset(void __iomem *mmio_base); | 263 | static void mv_host_stop(struct ata_host_set *host_set); |
264 | static int mv_port_start(struct ata_port *ap); | ||
265 | static void mv_port_stop(struct ata_port *ap); | ||
266 | static void mv_qc_prep(struct ata_queued_cmd *qc); | ||
267 | static int mv_qc_issue(struct ata_queued_cmd *qc); | ||
189 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, | 268 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
190 | struct pt_regs *regs); | 269 | struct pt_regs *regs); |
270 | static void mv_eng_timeout(struct ata_port *ap); | ||
191 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 271 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
192 | 272 | ||
193 | static Scsi_Host_Template mv_sht = { | 273 | static Scsi_Host_Template mv_sht = { |
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = { | |||
196 | .ioctl = ata_scsi_ioctl, | 276 | .ioctl = ata_scsi_ioctl, |
197 | .queuecommand = ata_scsi_queuecmd, | 277 | .queuecommand = ata_scsi_queuecmd, |
198 | .eh_strategy_handler = ata_scsi_error, | 278 | .eh_strategy_handler = ata_scsi_error, |
199 | .can_queue = ATA_DEF_QUEUE, | 279 | .can_queue = MV_USE_Q_DEPTH, |
200 | .this_id = ATA_SHT_THIS_ID, | 280 | .this_id = ATA_SHT_THIS_ID, |
201 | .sg_tablesize = MV_UNDEF, | 281 | .sg_tablesize = MV_MAX_SG_CT, |
202 | .max_sectors = ATA_MAX_SECTORS, | 282 | .max_sectors = ATA_MAX_SECTORS, |
203 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 283 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
204 | .emulated = ATA_SHT_EMULATED, | 284 | .emulated = ATA_SHT_EMULATED, |
205 | .use_clustering = MV_UNDEF, | 285 | .use_clustering = ATA_SHT_USE_CLUSTERING, |
206 | .proc_name = DRV_NAME, | 286 | .proc_name = DRV_NAME, |
207 | .dma_boundary = MV_DMA_BOUNDARY, | 287 | .dma_boundary = MV_DMA_BOUNDARY, |
208 | .slave_configure = ata_scsi_slave_config, | 288 | .slave_configure = ata_scsi_slave_config, |
@@ -216,15 +296,16 @@ static struct ata_port_operations mv_ops = { | |||
216 | .tf_load = ata_tf_load, | 296 | .tf_load = ata_tf_load, |
217 | .tf_read = ata_tf_read, | 297 | .tf_read = ata_tf_read, |
218 | .check_status = ata_check_status, | 298 | .check_status = ata_check_status, |
299 | .check_err = mv_check_err, | ||
219 | .exec_command = ata_exec_command, | 300 | .exec_command = ata_exec_command, |
220 | .dev_select = ata_std_dev_select, | 301 | .dev_select = ata_std_dev_select, |
221 | 302 | ||
222 | .phy_reset = mv_phy_reset, | 303 | .phy_reset = mv_phy_reset, |
223 | 304 | ||
224 | .qc_prep = ata_qc_prep, | 305 | .qc_prep = mv_qc_prep, |
225 | .qc_issue = ata_qc_issue_prot, | 306 | .qc_issue = mv_qc_issue, |
226 | 307 | ||
227 | .eng_timeout = ata_eng_timeout, | 308 | .eng_timeout = mv_eng_timeout, |
228 | 309 | ||
229 | .irq_handler = mv_interrupt, | 310 | .irq_handler = mv_interrupt, |
230 | .irq_clear = mv_irq_clear, | 311 | .irq_clear = mv_irq_clear, |
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = { | |||
232 | .scr_read = mv_scr_read, | 313 | .scr_read = mv_scr_read, |
233 | .scr_write = mv_scr_write, | 314 | .scr_write = mv_scr_write, |
234 | 315 | ||
235 | .port_start = ata_port_start, | 316 | .port_start = mv_port_start, |
236 | .port_stop = ata_port_stop, | 317 | .port_stop = mv_port_stop, |
237 | .host_stop = ata_host_stop, | 318 | .host_stop = mv_host_stop, |
238 | }; | 319 | }; |
239 | 320 | ||
240 | static struct ata_port_info mv_port_info[] = { | 321 | static struct ata_port_info mv_port_info[] = { |
241 | { /* chip_504x */ | 322 | { /* chip_504x */ |
242 | .sht = &mv_sht, | 323 | .sht = &mv_sht, |
243 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 324 | .host_flags = MV_COMMON_FLAGS, |
244 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), | 325 | .pio_mask = 0x1f, /* pio0-4 */ |
245 | .pio_mask = 0x1f, /* pio4-0 */ | 326 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ |
246 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
247 | .port_ops = &mv_ops, | 327 | .port_ops = &mv_ops, |
248 | }, | 328 | }, |
249 | { /* chip_508x */ | 329 | { /* chip_508x */ |
250 | .sht = &mv_sht, | 330 | .sht = &mv_sht, |
251 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 331 | .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), |
252 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 332 | .pio_mask = 0x1f, /* pio0-4 */ |
253 | MV_FLAG_DUAL_HC), | 333 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ |
254 | .pio_mask = 0x1f, /* pio4-0 */ | ||
255 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
256 | .port_ops = &mv_ops, | 334 | .port_ops = &mv_ops, |
257 | }, | 335 | }, |
258 | { /* chip_604x */ | 336 | { /* chip_604x */ |
259 | .sht = &mv_sht, | 337 | .sht = &mv_sht, |
260 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 338 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), |
261 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 339 | .pio_mask = 0x1f, /* pio0-4 */ |
262 | MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), | 340 | .udma_mask = 0x7f, /* udma0-6 */ |
263 | .pio_mask = 0x1f, /* pio4-0 */ | ||
264 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
265 | .port_ops = &mv_ops, | 341 | .port_ops = &mv_ops, |
266 | }, | 342 | }, |
267 | { /* chip_608x */ | 343 | { /* chip_608x */ |
268 | .sht = &mv_sht, | 344 | .sht = &mv_sht, |
269 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 345 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
270 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 346 | MV_FLAG_DUAL_HC), |
271 | MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | | 347 | .pio_mask = 0x1f, /* pio0-4 */ |
272 | MV_FLAG_BDMA), | 348 | .udma_mask = 0x7f, /* udma0-6 */ |
273 | .pio_mask = 0x1f, /* pio4-0 */ | ||
274 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
275 | .port_ops = &mv_ops, | 349 | .port_ops = &mv_ops, |
276 | }, | 350 | }, |
277 | }; | 351 | }; |
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr) | |||
306 | (void) readl(addr); /* flush to avoid PCI posted write */ | 380 | (void) readl(addr); /* flush to avoid PCI posted write */ |
307 | } | 381 | } |
308 | 382 | ||
309 | static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio) | ||
310 | { | ||
311 | return ((void __iomem *)((unsigned long)port_mmio & | ||
312 | (unsigned long)SATAHC_MASK)); | ||
313 | } | ||
314 | |||
315 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) | 383 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
316 | { | 384 | { |
317 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); | 385 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); |
@@ -329,24 +397,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap) | |||
329 | return mv_port_base(ap->host_set->mmio_base, ap->port_no); | 397 | return mv_port_base(ap->host_set->mmio_base, ap->port_no); |
330 | } | 398 | } |
331 | 399 | ||
332 | static inline int mv_get_hc_count(unsigned long flags) | 400 | static inline int mv_get_hc_count(unsigned long hp_flags) |
333 | { | 401 | { |
334 | return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); | 402 | return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1); |
335 | } | 403 | } |
336 | 404 | ||
337 | static inline int mv_is_edma_active(struct ata_port *ap) | 405 | static void mv_irq_clear(struct ata_port *ap) |
406 | { | ||
407 | } | ||
408 | |||
409 | /** | ||
410 | * mv_start_dma - Enable eDMA engine | ||
411 | * @base: port base address | ||
412 | * @pp: port private data | ||
413 | * | ||
414 | * Verify the local cache of the eDMA state is accurate with an | ||
415 | * assert. | ||
416 | * | ||
417 | * LOCKING: | ||
418 | * Inherited from caller. | ||
419 | */ | ||
420 | static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) | ||
421 | { | ||
422 | if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { | ||
423 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); | ||
424 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | ||
425 | } | ||
426 | assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * mv_stop_dma - Disable eDMA engine | ||
431 | * @ap: ATA channel to manipulate | ||
432 | * | ||
433 | * Verify the local cache of the eDMA state is accurate with an | ||
434 | * assert. | ||
435 | * | ||
436 | * LOCKING: | ||
437 | * Inherited from caller. | ||
438 | */ | ||
439 | static void mv_stop_dma(struct ata_port *ap) | ||
338 | { | 440 | { |
339 | void __iomem *port_mmio = mv_ap_base(ap); | 441 | void __iomem *port_mmio = mv_ap_base(ap); |
340 | return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); | 442 | struct mv_port_priv *pp = ap->private_data; |
443 | u32 reg; | ||
444 | int i; | ||
445 | |||
446 | if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { | ||
447 | /* Disable EDMA if active. The disable bit auto clears. | ||
448 | */ | ||
449 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | ||
450 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
451 | } else { | ||
452 | assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); | ||
453 | } | ||
454 | |||
455 | /* now properly wait for the eDMA to stop */ | ||
456 | for (i = 1000; i > 0; i--) { | ||
457 | reg = readl(port_mmio + EDMA_CMD_OFS); | ||
458 | if (!(EDMA_EN & reg)) { | ||
459 | break; | ||
460 | } | ||
461 | udelay(100); | ||
462 | } | ||
463 | |||
464 | if (EDMA_EN & reg) { | ||
465 | printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); | ||
466 | /* FIXME: Consider doing a reset here to recover */ | ||
467 | } | ||
341 | } | 468 | } |
342 | 469 | ||
343 | static inline int mv_port_bdma_capable(struct ata_port *ap) | 470 | #ifdef ATA_DEBUG |
471 | static void mv_dump_mem(void __iomem *start, unsigned bytes) | ||
344 | { | 472 | { |
345 | return (ap->flags & MV_FLAG_BDMA); | 473 | int b, w; |
474 | for (b = 0; b < bytes; ) { | ||
475 | DPRINTK("%p: ", start + b); | ||
476 | for (w = 0; b < bytes && w < 4; w++) { | ||
477 | printk("%08x ",readl(start + b)); | ||
478 | b += sizeof(u32); | ||
479 | } | ||
480 | printk("\n"); | ||
481 | } | ||
346 | } | 482 | } |
483 | #endif | ||
347 | 484 | ||
348 | static void mv_irq_clear(struct ata_port *ap) | 485 | static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) |
486 | { | ||
487 | #ifdef ATA_DEBUG | ||
488 | int b, w; | ||
489 | u32 dw; | ||
490 | for (b = 0; b < bytes; ) { | ||
491 | DPRINTK("%02x: ", b); | ||
492 | for (w = 0; b < bytes && w < 4; w++) { | ||
493 | (void) pci_read_config_dword(pdev,b,&dw); | ||
494 | printk("%08x ",dw); | ||
495 | b += sizeof(u32); | ||
496 | } | ||
497 | printk("\n"); | ||
498 | } | ||
499 | #endif | ||
500 | } | ||
501 | static void mv_dump_all_regs(void __iomem *mmio_base, int port, | ||
502 | struct pci_dev *pdev) | ||
349 | { | 503 | { |
504 | #ifdef ATA_DEBUG | ||
505 | void __iomem *hc_base = mv_hc_base(mmio_base, | ||
506 | port >> MV_PORT_HC_SHIFT); | ||
507 | void __iomem *port_base; | ||
508 | int start_port, num_ports, p, start_hc, num_hcs, hc; | ||
509 | |||
510 | if (0 > port) { | ||
511 | start_hc = start_port = 0; | ||
512 | num_ports = 8; /* shld be benign for 4 port devs */ | ||
513 | num_hcs = 2; | ||
514 | } else { | ||
515 | start_hc = port >> MV_PORT_HC_SHIFT; | ||
516 | start_port = port; | ||
517 | num_ports = num_hcs = 1; | ||
518 | } | ||
519 | DPRINTK("All registers for port(s) %u-%u:\n", start_port, | ||
520 | num_ports > 1 ? num_ports - 1 : start_port); | ||
521 | |||
522 | if (NULL != pdev) { | ||
523 | DPRINTK("PCI config space regs:\n"); | ||
524 | mv_dump_pci_cfg(pdev, 0x68); | ||
525 | } | ||
526 | DPRINTK("PCI regs:\n"); | ||
527 | mv_dump_mem(mmio_base+0xc00, 0x3c); | ||
528 | mv_dump_mem(mmio_base+0xd00, 0x34); | ||
529 | mv_dump_mem(mmio_base+0xf00, 0x4); | ||
530 | mv_dump_mem(mmio_base+0x1d00, 0x6c); | ||
531 | for (hc = start_hc; hc < start_hc + num_hcs; hc++) { | ||
532 | hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); | ||
533 | DPRINTK("HC regs (HC %i):\n", hc); | ||
534 | mv_dump_mem(hc_base, 0x1c); | ||
535 | } | ||
536 | for (p = start_port; p < start_port + num_ports; p++) { | ||
537 | port_base = mv_port_base(mmio_base, p); | ||
538 | DPRINTK("EDMA regs (port %i):\n",p); | ||
539 | mv_dump_mem(port_base, 0x54); | ||
540 | DPRINTK("SATA regs (port %i):\n",p); | ||
541 | mv_dump_mem(port_base+0x300, 0x60); | ||
542 | } | ||
543 | #endif | ||
350 | } | 544 | } |
351 | 545 | ||
352 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) | 546 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) |
@@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | |||
389 | } | 583 | } |
390 | } | 584 | } |
391 | 585 | ||
392 | static int mv_master_reset(void __iomem *mmio_base) | 586 | /** |
587 | * mv_global_soft_reset - Perform the 6xxx global soft reset | ||
588 | * @mmio_base: base address of the HBA | ||
589 | * | ||
590 | * This routine only applies to 6xxx parts. | ||
591 | * | ||
592 | * LOCKING: | ||
593 | * Inherited from caller. | ||
594 | */ | ||
595 | static int mv_global_soft_reset(void __iomem *mmio_base) | ||
393 | { | 596 | { |
394 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; | 597 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; |
395 | int i, rc = 0; | 598 | int i, rc = 0; |
396 | u32 t; | 599 | u32 t; |
397 | 600 | ||
398 | VPRINTK("ENTER\n"); | ||
399 | |||
400 | /* Following procedure defined in PCI "main command and status | 601 | /* Following procedure defined in PCI "main command and status |
401 | * register" table. | 602 | * register" table. |
402 | */ | 603 | */ |
403 | t = readl(reg); | 604 | t = readl(reg); |
404 | writel(t | STOP_PCI_MASTER, reg); | 605 | writel(t | STOP_PCI_MASTER, reg); |
405 | 606 | ||
406 | for (i = 0; i < 100; i++) { | 607 | for (i = 0; i < 1000; i++) { |
407 | msleep(10); | 608 | udelay(1); |
408 | t = readl(reg); | 609 | t = readl(reg); |
409 | if (PCI_MASTER_EMPTY & t) { | 610 | if (PCI_MASTER_EMPTY & t) { |
410 | break; | 611 | break; |
411 | } | 612 | } |
412 | } | 613 | } |
413 | if (!(PCI_MASTER_EMPTY & t)) { | 614 | if (!(PCI_MASTER_EMPTY & t)) { |
414 | printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); | 615 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); |
415 | rc = 1; /* broken HW? */ | 616 | rc = 1; |
416 | goto done; | 617 | goto done; |
417 | } | 618 | } |
418 | 619 | ||
@@ -425,39 +626,398 @@ static int mv_master_reset(void __iomem *mmio_base) | |||
425 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); | 626 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); |
426 | 627 | ||
427 | if (!(GLOB_SFT_RST & t)) { | 628 | if (!(GLOB_SFT_RST & t)) { |
428 | printk(KERN_ERR DRV_NAME "can't set global reset\n"); | 629 | printk(KERN_ERR DRV_NAME ": can't set global reset\n"); |
429 | rc = 1; /* broken HW? */ | 630 | rc = 1; |
430 | goto done; | 631 | goto done; |
431 | } | 632 | } |
432 | 633 | ||
433 | /* clear reset */ | 634 | /* clear reset and *reenable the PCI master* (not mentioned in spec) */ |
434 | i = 5; | 635 | i = 5; |
435 | do { | 636 | do { |
436 | writel(t & ~GLOB_SFT_RST, reg); | 637 | writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); |
437 | t = readl(reg); | 638 | t = readl(reg); |
438 | udelay(1); | 639 | udelay(1); |
439 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); | 640 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); |
440 | 641 | ||
441 | if (GLOB_SFT_RST & t) { | 642 | if (GLOB_SFT_RST & t) { |
442 | printk(KERN_ERR DRV_NAME "can't clear global reset\n"); | 643 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); |
443 | rc = 1; /* broken HW? */ | 644 | rc = 1; |
444 | } | 645 | } |
445 | 646 | done: | |
446 | done: | ||
447 | VPRINTK("EXIT, rc = %i\n", rc); | ||
448 | return rc; | 647 | return rc; |
449 | } | 648 | } |
450 | 649 | ||
451 | static void mv_err_intr(struct ata_port *ap) | 650 | /** |
651 | * mv_host_stop - Host specific cleanup/stop routine. | ||
652 | * @host_set: host data structure | ||
653 | * | ||
654 | * Disable ints, cleanup host memory, call general purpose | ||
655 | * host_stop. | ||
656 | * | ||
657 | * LOCKING: | ||
658 | * Inherited from caller. | ||
659 | */ | ||
660 | static void mv_host_stop(struct ata_host_set *host_set) | ||
452 | { | 661 | { |
453 | void __iomem *port_mmio; | 662 | struct mv_host_priv *hpriv = host_set->private_data; |
454 | u32 edma_err_cause, serr = 0; | 663 | struct pci_dev *pdev = to_pci_dev(host_set->dev); |
664 | |||
665 | if (hpriv->hp_flags & MV_HP_FLAG_MSI) { | ||
666 | pci_disable_msi(pdev); | ||
667 | } else { | ||
668 | pci_intx(pdev, 0); | ||
669 | } | ||
670 | kfree(hpriv); | ||
671 | ata_host_stop(host_set); | ||
672 | } | ||
673 | |||
674 | /** | ||
675 | * mv_port_start - Port specific init/start routine. | ||
676 | * @ap: ATA channel to manipulate | ||
677 | * | ||
678 | * Allocate and point to DMA memory, init port private memory, | ||
679 | * zero indices. | ||
680 | * | ||
681 | * LOCKING: | ||
682 | * Inherited from caller. | ||
683 | */ | ||
684 | static int mv_port_start(struct ata_port *ap) | ||
685 | { | ||
686 | struct device *dev = ap->host_set->dev; | ||
687 | struct mv_port_priv *pp; | ||
688 | void __iomem *port_mmio = mv_ap_base(ap); | ||
689 | void *mem; | ||
690 | dma_addr_t mem_dma; | ||
691 | |||
692 | pp = kmalloc(sizeof(*pp), GFP_KERNEL); | ||
693 | if (!pp) { | ||
694 | return -ENOMEM; | ||
695 | } | ||
696 | memset(pp, 0, sizeof(*pp)); | ||
697 | |||
698 | mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, | ||
699 | GFP_KERNEL); | ||
700 | if (!mem) { | ||
701 | kfree(pp); | ||
702 | return -ENOMEM; | ||
703 | } | ||
704 | memset(mem, 0, MV_PORT_PRIV_DMA_SZ); | ||
705 | |||
706 | /* First item in chunk of DMA memory: | ||
707 | * 32-slot command request table (CRQB), 32 bytes each in size | ||
708 | */ | ||
709 | pp->crqb = mem; | ||
710 | pp->crqb_dma = mem_dma; | ||
711 | mem += MV_CRQB_Q_SZ; | ||
712 | mem_dma += MV_CRQB_Q_SZ; | ||
713 | |||
714 | /* Second item: | ||
715 | * 32-slot command response table (CRPB), 8 bytes each in size | ||
716 | */ | ||
717 | pp->crpb = mem; | ||
718 | pp->crpb_dma = mem_dma; | ||
719 | mem += MV_CRPB_Q_SZ; | ||
720 | mem_dma += MV_CRPB_Q_SZ; | ||
721 | |||
722 | /* Third item: | ||
723 | * Table of scatter-gather descriptors (ePRD), 16 bytes each | ||
724 | */ | ||
725 | pp->sg_tbl = mem; | ||
726 | pp->sg_tbl_dma = mem_dma; | ||
727 | |||
728 | writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | | ||
729 | EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); | ||
730 | |||
731 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | ||
732 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, | ||
733 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
734 | |||
735 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
736 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
737 | |||
738 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | ||
739 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | ||
740 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
741 | |||
742 | pp->req_producer = pp->rsp_consumer = 0; | ||
743 | |||
744 | /* Don't turn on EDMA here...do it before DMA commands only. Else | ||
745 | * we'll be unable to send non-data, PIO, etc due to restricted access | ||
746 | * to shadow regs. | ||
747 | */ | ||
748 | ap->private_data = pp; | ||
749 | return 0; | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * mv_port_stop - Port specific cleanup/stop routine. | ||
754 | * @ap: ATA channel to manipulate | ||
755 | * | ||
756 | * Stop DMA, cleanup port memory. | ||
757 | * | ||
758 | * LOCKING: | ||
759 | * This routine uses the host_set lock to protect the DMA stop. | ||
760 | */ | ||
761 | static void mv_port_stop(struct ata_port *ap) | ||
762 | { | ||
763 | struct device *dev = ap->host_set->dev; | ||
764 | struct mv_port_priv *pp = ap->private_data; | ||
765 | unsigned long flags; | ||
766 | |||
767 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
768 | mv_stop_dma(ap); | ||
769 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
770 | |||
771 | ap->private_data = NULL; | ||
772 | dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); | ||
773 | kfree(pp); | ||
774 | } | ||
775 | |||
776 | /** | ||
777 | * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries | ||
778 | * @qc: queued command whose SG list to source from | ||
779 | * | ||
780 | * Populate the SG list and mark the last entry. | ||
781 | * | ||
782 | * LOCKING: | ||
783 | * Inherited from caller. | ||
784 | */ | ||
785 | static void mv_fill_sg(struct ata_queued_cmd *qc) | ||
786 | { | ||
787 | struct mv_port_priv *pp = qc->ap->private_data; | ||
788 | unsigned int i; | ||
789 | |||
790 | for (i = 0; i < qc->n_elem; i++) { | ||
791 | u32 sg_len; | ||
792 | dma_addr_t addr; | ||
793 | |||
794 | addr = sg_dma_address(&qc->sg[i]); | ||
795 | sg_len = sg_dma_len(&qc->sg[i]); | ||
796 | |||
797 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); | ||
798 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | ||
799 | assert(0 == (sg_len & ~MV_DMA_BOUNDARY)); | ||
800 | pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len); | ||
801 | } | ||
802 | if (0 < qc->n_elem) { | ||
803 | pp->sg_tbl[qc->n_elem - 1].flags_size |= EPRD_FLAG_END_OF_TBL; | ||
804 | } | ||
805 | } | ||
806 | |||
807 | static inline unsigned mv_inc_q_index(unsigned *index) | ||
808 | { | ||
809 | *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; | ||
810 | return *index; | ||
811 | } | ||
812 | |||
813 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) | ||
814 | { | ||
815 | *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | ||
816 | (last ? CRQB_CMD_LAST : 0); | ||
817 | } | ||
455 | 818 | ||
456 | /* bug here b/c we got an err int on a port we don't know about, | 819 | /** |
457 | * so there's no way to clear it | 820 | * mv_qc_prep - Host specific command preparation. |
821 | * @qc: queued command to prepare | ||
822 | * | ||
823 | * This routine simply redirects to the general purpose routine | ||
824 | * if command is not DMA. Else, it handles prep of the CRQB | ||
825 | * (command request block), does some sanity checking, and calls | ||
826 | * the SG load routine. | ||
827 | * | ||
828 | * LOCKING: | ||
829 | * Inherited from caller. | ||
830 | */ | ||
831 | static void mv_qc_prep(struct ata_queued_cmd *qc) | ||
832 | { | ||
833 | struct ata_port *ap = qc->ap; | ||
834 | struct mv_port_priv *pp = ap->private_data; | ||
835 | u16 *cw; | ||
836 | struct ata_taskfile *tf; | ||
837 | u16 flags = 0; | ||
838 | |||
839 | if (ATA_PROT_DMA != qc->tf.protocol) { | ||
840 | return; | ||
841 | } | ||
842 | |||
843 | /* the req producer index should be the same as we remember it */ | ||
844 | assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | ||
845 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
846 | pp->req_producer); | ||
847 | |||
848 | /* Fill in command request block | ||
458 | */ | 849 | */ |
459 | BUG_ON(NULL == ap); | 850 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { |
460 | port_mmio = mv_ap_base(ap); | 851 | flags |= CRQB_FLAG_READ; |
852 | } | ||
853 | assert(MV_MAX_Q_DEPTH > qc->tag); | ||
854 | flags |= qc->tag << CRQB_TAG_SHIFT; | ||
855 | |||
856 | pp->crqb[pp->req_producer].sg_addr = | ||
857 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | ||
858 | pp->crqb[pp->req_producer].sg_addr_hi = | ||
859 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | ||
860 | pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); | ||
861 | |||
862 | cw = &pp->crqb[pp->req_producer].ata_cmd[0]; | ||
863 | tf = &qc->tf; | ||
864 | |||
865 | /* Sadly, the CRQB cannot accomodate all registers--there are | ||
866 | * only 11 bytes...so we must pick and choose required | ||
867 | * registers based on the command. So, we drop feature and | ||
868 | * hob_feature for [RW] DMA commands, but they are needed for | ||
869 | * NCQ. NCQ will drop hob_nsect. | ||
870 | */ | ||
871 | switch (tf->command) { | ||
872 | case ATA_CMD_READ: | ||
873 | case ATA_CMD_READ_EXT: | ||
874 | case ATA_CMD_WRITE: | ||
875 | case ATA_CMD_WRITE_EXT: | ||
876 | mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); | ||
877 | break; | ||
878 | #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ | ||
879 | case ATA_CMD_FPDMA_READ: | ||
880 | case ATA_CMD_FPDMA_WRITE: | ||
881 | mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); | ||
882 | mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); | ||
883 | break; | ||
884 | #endif /* FIXME: remove this line when NCQ added */ | ||
885 | default: | ||
886 | /* The only other commands EDMA supports in non-queued and | ||
887 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none | ||
888 | * of which are defined/used by Linux. If we get here, this | ||
889 | * driver needs work. | ||
890 | * | ||
891 | * FIXME: modify libata to give qc_prep a return value and | ||
892 | * return error here. | ||
893 | */ | ||
894 | BUG_ON(tf->command); | ||
895 | break; | ||
896 | } | ||
897 | mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); | ||
898 | mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); | ||
899 | mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); | ||
900 | mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); | ||
901 | mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); | ||
902 | mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); | ||
903 | mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); | ||
904 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); | ||
905 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ | ||
906 | |||
907 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { | ||
908 | return; | ||
909 | } | ||
910 | mv_fill_sg(qc); | ||
911 | } | ||
912 | |||
913 | /** | ||
914 | * mv_qc_issue - Initiate a command to the host | ||
915 | * @qc: queued command to start | ||
916 | * | ||
917 | * This routine simply redirects to the general purpose routine | ||
918 | * if command is not DMA. Else, it sanity checks our local | ||
919 | * caches of the request producer/consumer indices then enables | ||
920 | * DMA and bumps the request producer index. | ||
921 | * | ||
922 | * LOCKING: | ||
923 | * Inherited from caller. | ||
924 | */ | ||
925 | static int mv_qc_issue(struct ata_queued_cmd *qc) | ||
926 | { | ||
927 | void __iomem *port_mmio = mv_ap_base(qc->ap); | ||
928 | struct mv_port_priv *pp = qc->ap->private_data; | ||
929 | u32 in_ptr; | ||
930 | |||
931 | if (ATA_PROT_DMA != qc->tf.protocol) { | ||
932 | /* We're about to send a non-EDMA capable command to the | ||
933 | * port. Turn off EDMA so there won't be problems accessing | ||
934 | * shadow block, etc registers. | ||
935 | */ | ||
936 | mv_stop_dma(qc->ap); | ||
937 | return ata_qc_issue_prot(qc); | ||
938 | } | ||
939 | |||
940 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
941 | |||
942 | /* the req producer index should be the same as we remember it */ | ||
943 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
944 | pp->req_producer); | ||
945 | /* until we do queuing, the queue should be empty at this point */ | ||
946 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
947 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> | ||
948 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | ||
949 | |||
950 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ | ||
951 | |||
952 | mv_start_dma(port_mmio, pp); | ||
953 | |||
954 | /* and write the request in pointer to kick the EDMA to life */ | ||
955 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | ||
956 | in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; | ||
957 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | /** | ||
963 | * mv_get_crpb_status - get status from most recently completed cmd | ||
964 | * @ap: ATA channel to manipulate | ||
965 | * | ||
966 | * This routine is for use when the port is in DMA mode, when it | ||
967 | * will be using the CRPB (command response block) method of | ||
968 | * returning command completion information. We assert indices | ||
969 | * are good, grab status, and bump the response consumer index to | ||
970 | * prove that we're up to date. | ||
971 | * | ||
972 | * LOCKING: | ||
973 | * Inherited from caller. | ||
974 | */ | ||
975 | static u8 mv_get_crpb_status(struct ata_port *ap) | ||
976 | { | ||
977 | void __iomem *port_mmio = mv_ap_base(ap); | ||
978 | struct mv_port_priv *pp = ap->private_data; | ||
979 | u32 out_ptr; | ||
980 | |||
981 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
982 | |||
983 | /* the response consumer index should be the same as we remember it */ | ||
984 | assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
985 | pp->rsp_consumer); | ||
986 | |||
987 | /* increment our consumer index... */ | ||
988 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); | ||
989 | |||
990 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | ||
991 | assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> | ||
992 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
993 | pp->rsp_consumer); | ||
994 | |||
995 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | ||
996 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | ||
997 | out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; | ||
998 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
999 | |||
1000 | /* Return ATA status register for completed CRPB */ | ||
1001 | return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); | ||
1002 | } | ||
1003 | |||
1004 | /** | ||
1005 | * mv_err_intr - Handle error interrupts on the port | ||
1006 | * @ap: ATA channel to manipulate | ||
1007 | * | ||
1008 | * In most cases, just clear the interrupt and move on. However, | ||
1009 | * some cases require an eDMA reset, which is done right before | ||
1010 | * the COMRESET in mv_phy_reset(). The SERR case requires a | ||
1011 | * clear of pending errors in the SATA SERROR register. Finally, | ||
1012 | * if the port disabled DMA, update our cached copy to match. | ||
1013 | * | ||
1014 | * LOCKING: | ||
1015 | * Inherited from caller. | ||
1016 | */ | ||
1017 | static void mv_err_intr(struct ata_port *ap) | ||
1018 | { | ||
1019 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1020 | u32 edma_err_cause, serr = 0; | ||
461 | 1021 | ||
462 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1022 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
463 | 1023 | ||
@@ -465,8 +1025,12 @@ static void mv_err_intr(struct ata_port *ap) | |||
465 | serr = scr_read(ap, SCR_ERROR); | 1025 | serr = scr_read(ap, SCR_ERROR); |
466 | scr_write_flush(ap, SCR_ERROR, serr); | 1026 | scr_write_flush(ap, SCR_ERROR, serr); |
467 | } | 1027 | } |
468 | DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", | 1028 | if (EDMA_ERR_SELF_DIS & edma_err_cause) { |
469 | ap->port_no, edma_err_cause, serr); | 1029 | struct mv_port_priv *pp = ap->private_data; |
1030 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
1031 | } | ||
1032 | DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " | ||
1033 | "SERR: 0x%08x\n", ap->id, edma_err_cause, serr); | ||
470 | 1034 | ||
471 | /* Clear EDMA now that SERR cleanup done */ | 1035 | /* Clear EDMA now that SERR cleanup done */ |
472 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1036 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
@@ -477,7 +1041,21 @@ static void mv_err_intr(struct ata_port *ap) | |||
477 | } | 1041 | } |
478 | } | 1042 | } |
479 | 1043 | ||
480 | /* Handle any outstanding interrupts in a single SATAHC | 1044 | /** |
1045 | * mv_host_intr - Handle all interrupts on the given host controller | ||
1046 | * @host_set: host specific structure | ||
1047 | * @relevant: port error bits relevant to this host controller | ||
1048 | * @hc: which host controller we're to look at | ||
1049 | * | ||
1050 | * Read then write clear the HC interrupt status then walk each | ||
1051 | * port connected to the HC and see if it needs servicing. Port | ||
1052 | * success ints are reported in the HC interrupt status reg, the | ||
1053 | * port error ints are reported in the higher level main | ||
1054 | * interrupt status register and thus are passed in via the | ||
1055 | * 'relevant' argument. | ||
1056 | * | ||
1057 | * LOCKING: | ||
1058 | * Inherited from caller. | ||
481 | */ | 1059 | */ |
482 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | 1060 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, |
483 | unsigned int hc) | 1061 | unsigned int hc) |
@@ -487,8 +1065,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
487 | struct ata_port *ap; | 1065 | struct ata_port *ap; |
488 | struct ata_queued_cmd *qc; | 1066 | struct ata_queued_cmd *qc; |
489 | u32 hc_irq_cause; | 1067 | u32 hc_irq_cause; |
490 | int shift, port, port0, hard_port; | 1068 | int shift, port, port0, hard_port, handled; |
491 | u8 ata_status; | 1069 | u8 ata_status = 0; |
492 | 1070 | ||
493 | if (hc == 0) { | 1071 | if (hc == 0) { |
494 | port0 = 0; | 1072 | port0 = 0; |
@@ -499,7 +1077,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
499 | /* we'll need the HC success int register in most cases */ | 1077 | /* we'll need the HC success int register in most cases */ |
500 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | 1078 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); |
501 | if (hc_irq_cause) { | 1079 | if (hc_irq_cause) { |
502 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | 1080 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
503 | } | 1081 | } |
504 | 1082 | ||
505 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | 1083 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", |
@@ -508,35 +1086,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
508 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | 1086 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { |
509 | ap = host_set->ports[port]; | 1087 | ap = host_set->ports[port]; |
510 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ | 1088 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ |
511 | ata_status = 0xffU; | 1089 | handled = 0; /* ensure ata_status is set if handled++ */ |
512 | 1090 | ||
513 | if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { | 1091 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { |
514 | BUG_ON(NULL == ap); | 1092 | /* new CRPB on the queue; just one at a time until NCQ |
515 | /* rcv'd new resp, basic DMA complete, or ATA IRQ */ | 1093 | */ |
516 | /* This is needed to clear the ATA INTRQ. | 1094 | ata_status = mv_get_crpb_status(ap); |
517 | * FIXME: don't read the status reg in EDMA mode! | 1095 | handled++; |
1096 | } else if ((DEV_IRQ << hard_port) & hc_irq_cause) { | ||
1097 | /* received ATA IRQ; read the status reg to clear INTRQ | ||
518 | */ | 1098 | */ |
519 | ata_status = readb((void __iomem *) | 1099 | ata_status = readb((void __iomem *) |
520 | ap->ioaddr.status_addr); | 1100 | ap->ioaddr.status_addr); |
1101 | handled++; | ||
521 | } | 1102 | } |
522 | 1103 | ||
523 | shift = port * 2; | 1104 | shift = port << 1; /* (port * 2) */ |
524 | if (port >= MV_PORTS_PER_HC) { | 1105 | if (port >= MV_PORTS_PER_HC) { |
525 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | 1106 | shift++; /* skip bit 8 in the HC Main IRQ reg */ |
526 | } | 1107 | } |
527 | if ((PORT0_ERR << shift) & relevant) { | 1108 | if ((PORT0_ERR << shift) & relevant) { |
528 | mv_err_intr(ap); | 1109 | mv_err_intr(ap); |
529 | /* FIXME: smart to OR in ATA_ERR? */ | 1110 | /* OR in ATA_ERR to ensure libata knows we took one */ |
530 | ata_status = readb((void __iomem *) | 1111 | ata_status = readb((void __iomem *) |
531 | ap->ioaddr.status_addr) | ATA_ERR; | 1112 | ap->ioaddr.status_addr) | ATA_ERR; |
1113 | handled++; | ||
532 | } | 1114 | } |
533 | 1115 | ||
534 | if (ap) { | 1116 | if (handled && ap) { |
535 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1117 | qc = ata_qc_from_tag(ap, ap->active_tag); |
536 | if (NULL != qc) { | 1118 | if (NULL != qc) { |
537 | VPRINTK("port %u IRQ found for qc, " | 1119 | VPRINTK("port %u IRQ found for qc, " |
538 | "ata_status 0x%x\n", port,ata_status); | 1120 | "ata_status 0x%x\n", port,ata_status); |
539 | BUG_ON(0xffU == ata_status); | ||
540 | /* mark qc status appropriately */ | 1121 | /* mark qc status appropriately */ |
541 | ata_qc_complete(qc, ata_status); | 1122 | ata_qc_complete(qc, ata_status); |
542 | } | 1123 | } |
@@ -545,17 +1126,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
545 | VPRINTK("EXIT\n"); | 1126 | VPRINTK("EXIT\n"); |
546 | } | 1127 | } |
547 | 1128 | ||
1129 | /** | ||
1130 | * mv_interrupt - | ||
1131 | * @irq: unused | ||
1132 | * @dev_instance: private data; in this case the host structure | ||
1133 | * @regs: unused | ||
1134 | * | ||
1135 | * Read the read only register to determine if any host | ||
1136 | * controllers have pending interrupts. If so, call lower level | ||
1137 | * routine to handle. Also check for PCI errors which are only | ||
1138 | * reported here. | ||
1139 | * | ||
1140 | * LOCKING: | ||
1141 | * This routine holds the host_set lock while processing pending | ||
1142 | * interrupts. | ||
1143 | */ | ||
548 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, | 1144 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
549 | struct pt_regs *regs) | 1145 | struct pt_regs *regs) |
550 | { | 1146 | { |
551 | struct ata_host_set *host_set = dev_instance; | 1147 | struct ata_host_set *host_set = dev_instance; |
552 | unsigned int hc, handled = 0, n_hcs; | 1148 | unsigned int hc, handled = 0, n_hcs; |
553 | void __iomem *mmio; | 1149 | void __iomem *mmio = host_set->mmio_base; |
554 | u32 irq_stat; | 1150 | u32 irq_stat; |
555 | 1151 | ||
556 | mmio = host_set->mmio_base; | ||
557 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); | 1152 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); |
558 | n_hcs = mv_get_hc_count(host_set->ports[0]->flags); | ||
559 | 1153 | ||
560 | /* check the cases where we either have nothing pending or have read | 1154 | /* check the cases where we either have nothing pending or have read |
561 | * a bogus register value which can indicate HW removal or PCI fault | 1155 | * a bogus register value which can indicate HW removal or PCI fault |
@@ -564,64 +1158,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
564 | return IRQ_NONE; | 1158 | return IRQ_NONE; |
565 | } | 1159 | } |
566 | 1160 | ||
1161 | n_hcs = mv_get_hc_count(host_set->ports[0]->flags); | ||
567 | spin_lock(&host_set->lock); | 1162 | spin_lock(&host_set->lock); |
568 | 1163 | ||
569 | for (hc = 0; hc < n_hcs; hc++) { | 1164 | for (hc = 0; hc < n_hcs; hc++) { |
570 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); | 1165 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); |
571 | if (relevant) { | 1166 | if (relevant) { |
572 | mv_host_intr(host_set, relevant, hc); | 1167 | mv_host_intr(host_set, relevant, hc); |
573 | handled = 1; | 1168 | handled++; |
574 | } | 1169 | } |
575 | } | 1170 | } |
576 | if (PCI_ERR & irq_stat) { | 1171 | if (PCI_ERR & irq_stat) { |
577 | /* FIXME: these are all masked by default, but still need | 1172 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", |
578 | * to recover from them properly. | 1173 | readl(mmio + PCI_IRQ_CAUSE_OFS)); |
579 | */ | ||
580 | } | ||
581 | 1174 | ||
1175 | DPRINTK("All regs @ PCI error\n"); | ||
1176 | mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); | ||
1177 | |||
1178 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); | ||
1179 | handled++; | ||
1180 | } | ||
582 | spin_unlock(&host_set->lock); | 1181 | spin_unlock(&host_set->lock); |
583 | 1182 | ||
584 | return IRQ_RETVAL(handled); | 1183 | return IRQ_RETVAL(handled); |
585 | } | 1184 | } |
586 | 1185 | ||
1186 | /** | ||
1187 | * mv_check_err - Return the error shadow register to caller. | ||
1188 | * @ap: ATA channel to manipulate | ||
1189 | * | ||
1190 | * Marvell requires DMA to be stopped before accessing shadow | ||
1191 | * registers. So we do that, then return the needed register. | ||
1192 | * | ||
1193 | * LOCKING: | ||
1194 | * Inherited from caller. FIXME: protect mv_stop_dma with lock? | ||
1195 | */ | ||
1196 | static u8 mv_check_err(struct ata_port *ap) | ||
1197 | { | ||
1198 | mv_stop_dma(ap); /* can't read shadow regs if DMA on */ | ||
1199 | return readb((void __iomem *) ap->ioaddr.error_addr); | ||
1200 | } | ||
1201 | |||
1202 | /** | ||
1203 | * mv_phy_reset - Perform eDMA reset followed by COMRESET | ||
1204 | * @ap: ATA channel to manipulate | ||
1205 | * | ||
1206 | * Part of this is taken from __sata_phy_reset and modified to | ||
1207 | * not sleep since this routine gets called from interrupt level. | ||
1208 | * | ||
1209 | * LOCKING: | ||
1210 | * Inherited from caller. This is coded to safe to call at | ||
1211 | * interrupt level, i.e. it does not sleep. | ||
1212 | */ | ||
587 | static void mv_phy_reset(struct ata_port *ap) | 1213 | static void mv_phy_reset(struct ata_port *ap) |
588 | { | 1214 | { |
589 | void __iomem *port_mmio = mv_ap_base(ap); | 1215 | void __iomem *port_mmio = mv_ap_base(ap); |
590 | struct ata_taskfile tf; | 1216 | struct ata_taskfile tf; |
591 | struct ata_device *dev = &ap->device[0]; | 1217 | struct ata_device *dev = &ap->device[0]; |
592 | u32 edma = 0, bdma; | 1218 | unsigned long timeout; |
593 | 1219 | ||
594 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); | 1220 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); |
595 | 1221 | ||
596 | edma = readl(port_mmio + EDMA_CMD_OFS); | 1222 | mv_stop_dma(ap); |
597 | if (EDMA_EN & edma) { | ||
598 | /* disable EDMA if active */ | ||
599 | edma &= ~EDMA_EN; | ||
600 | writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS); | ||
601 | udelay(1); | ||
602 | } else if (mv_port_bdma_capable(ap) && | ||
603 | (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) { | ||
604 | /* disable BDMA if active */ | ||
605 | writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS); | ||
606 | } | ||
607 | 1223 | ||
608 | writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); | 1224 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); |
609 | udelay(25); /* allow reset propagation */ | 1225 | udelay(25); /* allow reset propagation */ |
610 | 1226 | ||
611 | /* Spec never mentions clearing the bit. Marvell's driver does | 1227 | /* Spec never mentions clearing the bit. Marvell's driver does |
612 | * clear the bit, however. | 1228 | * clear the bit, however. |
613 | */ | 1229 | */ |
614 | writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); | 1230 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
615 | 1231 | ||
616 | VPRINTK("Done. Now calling __sata_phy_reset()\n"); | 1232 | VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " |
1233 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | ||
1234 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | ||
617 | 1235 | ||
618 | /* proceed to init communications via the scr_control reg */ | 1236 | /* proceed to init communications via the scr_control reg */ |
619 | __sata_phy_reset(ap); | 1237 | scr_write_flush(ap, SCR_CONTROL, 0x301); |
1238 | mdelay(1); | ||
1239 | scr_write_flush(ap, SCR_CONTROL, 0x300); | ||
1240 | timeout = jiffies + (HZ * 1); | ||
1241 | do { | ||
1242 | mdelay(10); | ||
1243 | if ((scr_read(ap, SCR_STATUS) & 0xf) != 1) | ||
1244 | break; | ||
1245 | } while (time_before(jiffies, timeout)); | ||
620 | 1246 | ||
621 | if (ap->flags & ATA_FLAG_PORT_DISABLED) { | 1247 | VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " |
622 | VPRINTK("Port disabled pre-sig. Exiting.\n"); | 1248 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), |
1249 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | ||
1250 | |||
1251 | if (sata_dev_present(ap)) { | ||
1252 | ata_port_probe(ap); | ||
1253 | } else { | ||
1254 | printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", | ||
1255 | ap->id, scr_read(ap, SCR_STATUS)); | ||
1256 | ata_port_disable(ap); | ||
623 | return; | 1257 | return; |
624 | } | 1258 | } |
1259 | ap->cbl = ATA_CBL_SATA; | ||
625 | 1260 | ||
626 | tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); | 1261 | tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); |
627 | tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); | 1262 | tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); |
@@ -636,37 +1271,118 @@ static void mv_phy_reset(struct ata_port *ap) | |||
636 | VPRINTK("EXIT\n"); | 1271 | VPRINTK("EXIT\n"); |
637 | } | 1272 | } |
638 | 1273 | ||
639 | static void mv_port_init(struct ata_ioports *port, unsigned long base) | 1274 | /** |
1275 | * mv_eng_timeout - Routine called by libata when SCSI times out I/O | ||
1276 | * @ap: ATA channel to manipulate | ||
1277 | * | ||
1278 | * Intent is to clear all pending error conditions, reset the | ||
1279 | * chip/bus, fail the command, and move on. | ||
1280 | * | ||
1281 | * LOCKING: | ||
1282 | * This routine holds the host_set lock while failing the command. | ||
1283 | */ | ||
1284 | static void mv_eng_timeout(struct ata_port *ap) | ||
1285 | { | ||
1286 | struct ata_queued_cmd *qc; | ||
1287 | unsigned long flags; | ||
1288 | |||
1289 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); | ||
1290 | DPRINTK("All regs @ start of eng_timeout\n"); | ||
1291 | mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, | ||
1292 | to_pci_dev(ap->host_set->dev)); | ||
1293 | |||
1294 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1295 | printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", | ||
1296 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, | ||
1297 | &qc->scsicmd->cmnd); | ||
1298 | |||
1299 | mv_err_intr(ap); | ||
1300 | mv_phy_reset(ap); | ||
1301 | |||
1302 | if (!qc) { | ||
1303 | printk(KERN_ERR "ata%u: BUG: timeout without command\n", | ||
1304 | ap->id); | ||
1305 | } else { | ||
1306 | /* hack alert! We cannot use the supplied completion | ||
1307 | * function from inside the ->eh_strategy_handler() thread. | ||
1308 | * libata is the only user of ->eh_strategy_handler() in | ||
1309 | * any kernel, so the default scsi_done() assumes it is | ||
1310 | * not being called from the SCSI EH. | ||
1311 | */ | ||
1312 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
1313 | qc->scsidone = scsi_finish_command; | ||
1314 | ata_qc_complete(qc, ATA_ERR); | ||
1315 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
1316 | } | ||
1317 | } | ||
1318 | |||
1319 | /** | ||
1320 | * mv_port_init - Perform some early initialization on a single port. | ||
1321 | * @port: libata data structure storing shadow register addresses | ||
1322 | * @port_mmio: base address of the port | ||
1323 | * | ||
1324 | * Initialize shadow register mmio addresses, clear outstanding | ||
1325 | * interrupts on the port, and unmask interrupts for the future | ||
1326 | * start of the port. | ||
1327 | * | ||
1328 | * LOCKING: | ||
1329 | * Inherited from caller. | ||
1330 | */ | ||
1331 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | ||
640 | { | 1332 | { |
641 | /* PIO related setup */ | 1333 | unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; |
642 | port->data_addr = base + SHD_PIO_DATA_OFS; | 1334 | unsigned serr_ofs; |
643 | port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; | 1335 | |
644 | port->nsect_addr = base + SHD_SECT_CNT_OFS; | 1336 | /* PIO related setup |
645 | port->lbal_addr = base + SHD_LBA_L_OFS; | 1337 | */ |
646 | port->lbam_addr = base + SHD_LBA_M_OFS; | 1338 | port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); |
647 | port->lbah_addr = base + SHD_LBA_H_OFS; | 1339 | port->error_addr = |
648 | port->device_addr = base + SHD_DEV_HD_OFS; | 1340 | port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); |
649 | port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; | 1341 | port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); |
650 | port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; | 1342 | port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); |
651 | /* unused */ | 1343 | port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); |
1344 | port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); | ||
1345 | port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); | ||
1346 | port->status_addr = | ||
1347 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); | ||
1348 | /* special case: control/altstatus doesn't have ATA_REG_ address */ | ||
1349 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; | ||
1350 | |||
1351 | /* unused: */ | ||
652 | port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; | 1352 | port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; |
653 | 1353 | ||
1354 | /* Clear any currently outstanding port interrupt conditions */ | ||
1355 | serr_ofs = mv_scr_offset(SCR_ERROR); | ||
1356 | writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); | ||
1357 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
1358 | |||
654 | /* unmask all EDMA error interrupts */ | 1359 | /* unmask all EDMA error interrupts */ |
655 | writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); | 1360 | writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); |
656 | 1361 | ||
657 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", | 1362 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", |
658 | readl((void __iomem *)base + EDMA_CFG_OFS), | 1363 | readl(port_mmio + EDMA_CFG_OFS), |
659 | readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), | 1364 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), |
660 | readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); | 1365 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); |
661 | } | 1366 | } |
662 | 1367 | ||
1368 | /** | ||
1369 | * mv_host_init - Perform some early initialization of the host. | ||
1370 | * @probe_ent: early data struct representing the host | ||
1371 | * | ||
1372 | * If possible, do an early global reset of the host. Then do | ||
1373 | * our port init and clear/unmask all/relevant host interrupts. | ||
1374 | * | ||
1375 | * LOCKING: | ||
1376 | * Inherited from caller. | ||
1377 | */ | ||
663 | static int mv_host_init(struct ata_probe_ent *probe_ent) | 1378 | static int mv_host_init(struct ata_probe_ent *probe_ent) |
664 | { | 1379 | { |
665 | int rc = 0, n_hc, port, hc; | 1380 | int rc = 0, n_hc, port, hc; |
666 | void __iomem *mmio = probe_ent->mmio_base; | 1381 | void __iomem *mmio = probe_ent->mmio_base; |
667 | void __iomem *port_mmio; | 1382 | void __iomem *port_mmio; |
668 | 1383 | ||
669 | if (mv_master_reset(probe_ent->mmio_base)) { | 1384 | if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) && |
1385 | mv_global_soft_reset(probe_ent->mmio_base)) { | ||
670 | rc = 1; | 1386 | rc = 1; |
671 | goto done; | 1387 | goto done; |
672 | } | 1388 | } |
@@ -676,17 +1392,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent) | |||
676 | 1392 | ||
677 | for (port = 0; port < probe_ent->n_ports; port++) { | 1393 | for (port = 0; port < probe_ent->n_ports; port++) { |
678 | port_mmio = mv_port_base(mmio, port); | 1394 | port_mmio = mv_port_base(mmio, port); |
679 | mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); | 1395 | mv_port_init(&probe_ent->port[port], port_mmio); |
680 | } | 1396 | } |
681 | 1397 | ||
682 | for (hc = 0; hc < n_hc; hc++) { | 1398 | for (hc = 0; hc < n_hc; hc++) { |
683 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, | 1399 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
684 | readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), | 1400 | |
685 | readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); | 1401 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " |
1402 | "(before clear)=0x%08x\n", hc, | ||
1403 | readl(hc_mmio + HC_CFG_OFS), | ||
1404 | readl(hc_mmio + HC_IRQ_CAUSE_OFS)); | ||
1405 | |||
1406 | /* Clear any currently outstanding hc interrupt conditions */ | ||
1407 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
686 | } | 1408 | } |
687 | 1409 | ||
688 | writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); | 1410 | /* Clear any currently outstanding host interrupt conditions */ |
689 | writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); | 1411 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); |
1412 | |||
1413 | /* and unmask interrupt generation for host regs */ | ||
1414 | writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); | ||
1415 | writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); | ||
690 | 1416 | ||
691 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " | 1417 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " |
692 | "PCI int cause/mask=0x%08x/0x%08x\n", | 1418 | "PCI int cause/mask=0x%08x/0x%08x\n", |
@@ -694,11 +1420,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent) | |||
694 | readl(mmio + HC_MAIN_IRQ_MASK_OFS), | 1420 | readl(mmio + HC_MAIN_IRQ_MASK_OFS), |
695 | readl(mmio + PCI_IRQ_CAUSE_OFS), | 1421 | readl(mmio + PCI_IRQ_CAUSE_OFS), |
696 | readl(mmio + PCI_IRQ_MASK_OFS)); | 1422 | readl(mmio + PCI_IRQ_MASK_OFS)); |
697 | 1423 | done: | |
698 | done: | ||
699 | return rc; | 1424 | return rc; |
700 | } | 1425 | } |
701 | 1426 | ||
1427 | /** | ||
1428 | * mv_print_info - Dump key info to kernel log for perusal. | ||
1429 | * @probe_ent: early data struct representing the host | ||
1430 | * | ||
1431 | * FIXME: complete this. | ||
1432 | * | ||
1433 | * LOCKING: | ||
1434 | * Inherited from caller. | ||
1435 | */ | ||
1436 | static void mv_print_info(struct ata_probe_ent *probe_ent) | ||
1437 | { | ||
1438 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | ||
1439 | struct mv_host_priv *hpriv = probe_ent->private_data; | ||
1440 | u8 rev_id, scc; | ||
1441 | const char *scc_s; | ||
1442 | |||
1443 | /* Use this to determine the HW stepping of the chip so we know | ||
1444 | * what errata to workaround | ||
1445 | */ | ||
1446 | pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); | ||
1447 | |||
1448 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); | ||
1449 | if (scc == 0) | ||
1450 | scc_s = "SCSI"; | ||
1451 | else if (scc == 0x01) | ||
1452 | scc_s = "RAID"; | ||
1453 | else | ||
1454 | scc_s = "unknown"; | ||
1455 | |||
1456 | printk(KERN_INFO DRV_NAME | ||
1457 | "(%s) %u slots %u ports %s mode IRQ via %s\n", | ||
1458 | pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, | ||
1459 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); | ||
1460 | } | ||
1461 | |||
1462 | /** | ||
1463 | * mv_init_one - handle a positive probe of a Marvell host | ||
1464 | * @pdev: PCI device found | ||
1465 | * @ent: PCI device ID entry for the matched host | ||
1466 | * | ||
1467 | * LOCKING: | ||
1468 | * Inherited from caller. | ||
1469 | */ | ||
702 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1470 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
703 | { | 1471 | { |
704 | static int printed_version = 0; | 1472 | static int printed_version = 0; |
@@ -706,16 +1474,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
706 | struct mv_host_priv *hpriv; | 1474 | struct mv_host_priv *hpriv; |
707 | unsigned int board_idx = (unsigned int)ent->driver_data; | 1475 | unsigned int board_idx = (unsigned int)ent->driver_data; |
708 | void __iomem *mmio_base; | 1476 | void __iomem *mmio_base; |
709 | int pci_dev_busy = 0; | 1477 | int pci_dev_busy = 0, rc; |
710 | int rc; | ||
711 | 1478 | ||
712 | if (!printed_version++) { | 1479 | if (!printed_version++) { |
713 | printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); | 1480 | printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n"); |
714 | } | 1481 | } |
715 | 1482 | ||
716 | VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number, | ||
717 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | ||
718 | |||
719 | rc = pci_enable_device(pdev); | 1483 | rc = pci_enable_device(pdev); |
720 | if (rc) { | 1484 | if (rc) { |
721 | return rc; | 1485 | return rc; |
@@ -727,8 +1491,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
727 | goto err_out; | 1491 | goto err_out; |
728 | } | 1492 | } |
729 | 1493 | ||
730 | pci_intx(pdev, 1); | ||
731 | |||
732 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); | 1494 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); |
733 | if (probe_ent == NULL) { | 1495 | if (probe_ent == NULL) { |
734 | rc = -ENOMEM; | 1496 | rc = -ENOMEM; |
@@ -739,8 +1501,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
739 | probe_ent->dev = pci_dev_to_dev(pdev); | 1501 | probe_ent->dev = pci_dev_to_dev(pdev); |
740 | INIT_LIST_HEAD(&probe_ent->node); | 1502 | INIT_LIST_HEAD(&probe_ent->node); |
741 | 1503 | ||
742 | mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), | 1504 | mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); |
743 | pci_resource_len(pdev, MV_PRIMARY_BAR)); | ||
744 | if (mmio_base == NULL) { | 1505 | if (mmio_base == NULL) { |
745 | rc = -ENOMEM; | 1506 | rc = -ENOMEM; |
746 | goto err_out_free_ent; | 1507 | goto err_out_free_ent; |
@@ -769,37 +1530,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
769 | if (rc) { | 1530 | if (rc) { |
770 | goto err_out_hpriv; | 1531 | goto err_out_hpriv; |
771 | } | 1532 | } |
772 | /* mv_print_info(probe_ent); */ | ||
773 | 1533 | ||
774 | { | 1534 | /* Enable interrupts */ |
775 | int b, w; | 1535 | if (pci_enable_msi(pdev) == 0) { |
776 | u32 dw[4]; /* hold a line of 16b */ | 1536 | hpriv->hp_flags |= MV_HP_FLAG_MSI; |
777 | VPRINTK("PCI config space:\n"); | 1537 | } else { |
778 | for (b = 0; b < 0x40; ) { | 1538 | pci_intx(pdev, 1); |
779 | for (w = 0; w < 4; w++) { | ||
780 | (void) pci_read_config_dword(pdev,b,&dw[w]); | ||
781 | b += sizeof(*dw); | ||
782 | } | ||
783 | VPRINTK("%08x %08x %08x %08x\n", | ||
784 | dw[0],dw[1],dw[2],dw[3]); | ||
785 | } | ||
786 | } | 1539 | } |
787 | 1540 | ||
788 | /* FIXME: check ata_device_add return value */ | 1541 | mv_dump_pci_cfg(pdev, 0x68); |
789 | ata_device_add(probe_ent); | 1542 | mv_print_info(probe_ent); |
790 | kfree(probe_ent); | 1543 | |
1544 | if (ata_device_add(probe_ent) == 0) { | ||
1545 | rc = -ENODEV; /* No devices discovered */ | ||
1546 | goto err_out_dev_add; | ||
1547 | } | ||
791 | 1548 | ||
1549 | kfree(probe_ent); | ||
792 | return 0; | 1550 | return 0; |
793 | 1551 | ||
794 | err_out_hpriv: | 1552 | err_out_dev_add: |
1553 | if (MV_HP_FLAG_MSI & hpriv->hp_flags) { | ||
1554 | pci_disable_msi(pdev); | ||
1555 | } else { | ||
1556 | pci_intx(pdev, 0); | ||
1557 | } | ||
1558 | err_out_hpriv: | ||
795 | kfree(hpriv); | 1559 | kfree(hpriv); |
796 | err_out_iounmap: | 1560 | err_out_iounmap: |
797 | iounmap(mmio_base); | 1561 | pci_iounmap(pdev, mmio_base); |
798 | err_out_free_ent: | 1562 | err_out_free_ent: |
799 | kfree(probe_ent); | 1563 | kfree(probe_ent); |
800 | err_out_regions: | 1564 | err_out_regions: |
801 | pci_release_regions(pdev); | 1565 | pci_release_regions(pdev); |
802 | err_out: | 1566 | err_out: |
803 | if (!pci_dev_busy) { | 1567 | if (!pci_dev_busy) { |
804 | pci_disable_device(pdev); | 1568 | pci_disable_device(pdev); |
805 | } | 1569 | } |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index c05653c7779d..9fa2535dd937 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -29,6 +29,8 @@ | |||
29 | * NV-specific details such as register offsets, SATA phy location, | 29 | * NV-specific details such as register offsets, SATA phy location, |
30 | * hotplug info, etc. | 30 | * hotplug info, etc. |
31 | * | 31 | * |
32 | * 0.09 | ||
33 | * - Fixed bug introduced by 0.08's MCP51 and MCP55 support. | ||
32 | * | 34 | * |
33 | * 0.08 | 35 | * 0.08 |
34 | * - Added support for MCP51 and MCP55. | 36 | * - Added support for MCP51 and MCP55. |
@@ -132,9 +134,7 @@ enum nv_host_type | |||
132 | GENERIC, | 134 | GENERIC, |
133 | NFORCE2, | 135 | NFORCE2, |
134 | NFORCE3, | 136 | NFORCE3, |
135 | CK804, | 137 | CK804 |
136 | MCP51, | ||
137 | MCP55 | ||
138 | }; | 138 | }; |
139 | 139 | ||
140 | static struct pci_device_id nv_pci_tbl[] = { | 140 | static struct pci_device_id nv_pci_tbl[] = { |
@@ -153,13 +153,13 @@ static struct pci_device_id nv_pci_tbl[] = { | |||
153 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, | 153 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, |
154 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, | 154 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, |
155 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA, | 155 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA, |
156 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, | 156 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, |
157 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2, | 157 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2, |
158 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, | 158 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, |
159 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, | 159 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, |
160 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, | 160 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, |
161 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, | 161 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, |
162 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, | 162 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, |
163 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, | 163 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, |
164 | PCI_ANY_ID, PCI_ANY_ID, | 164 | PCI_ANY_ID, PCI_ANY_ID, |
165 | PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, | 165 | PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, |
@@ -405,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
405 | rc = -ENOMEM; | 405 | rc = -ENOMEM; |
406 | 406 | ||
407 | ppi = &nv_port_info; | 407 | ppi = &nv_port_info; |
408 | probe_ent = ata_pci_init_native_mode(pdev, &ppi); | 408 | probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); |
409 | if (!probe_ent) | 409 | if (!probe_ent) |
410 | goto err_out_regions; | 410 | goto err_out_regions; |
411 | 411 | ||
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 538ad727bd2e..def7e0d9dacb 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
@@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap, | |||
438 | break; | 438 | break; |
439 | 439 | ||
440 | default: | 440 | default: |
441 | ap->stats.idle_irq++; | 441 | ap->stats.idle_irq++; |
442 | break; | 442 | break; |
443 | } | 443 | } |
444 | 444 | ||
445 | return handled; | 445 | return handled; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void pdc_irq_clear(struct ata_port *ap) | 448 | static void pdc_irq_clear(struct ata_port *ap) |
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c new file mode 100644 index 000000000000..19857814d69f --- /dev/null +++ b/drivers/scsi/sata_sil24.c | |||
@@ -0,0 +1,875 @@ | |||
1 | /* | ||
2 | * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers | ||
3 | * | ||
4 | * Copyright 2005 Tejun Heo | ||
5 | * | ||
6 | * Based on preview driver from Silicon Image. | ||
7 | * | ||
8 | * NOTE: No NCQ/ATAPI support yet. The preview driver didn't support | ||
9 | * NCQ nor ATAPI, and, unfortunately, I couldn't find out how to make | ||
10 | * those work. Enabling those shouldn't be difficult. Basic | ||
11 | * structure is all there (in libata-dev tree). If you have any | ||
12 | * information about this hardware, please contact me or linux-ide. | ||
13 | * Info is needed on... | ||
14 | * | ||
15 | * - How to issue tagged commands and turn on sactive on issue accordingly. | ||
16 | * - Where to put an ATAPI command and how to tell the device to send it. | ||
17 | * - How to enable/use 64bit. | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify it | ||
20 | * under the terms of the GNU General Public License as published by the | ||
21 | * Free Software Foundation; either version 2, or (at your option) any | ||
22 | * later version. | ||
23 | * | ||
24 | * This program is distributed in the hope that it will be useful, but | ||
25 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
27 | * General Public License for more details. | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/pci.h> | ||
34 | #include <linux/blkdev.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <scsi/scsi_host.h> | ||
39 | #include "scsi.h" | ||
40 | #include <linux/libata.h> | ||
41 | #include <asm/io.h> | ||
42 | |||
43 | #define DRV_NAME "sata_sil24" | ||
44 | #define DRV_VERSION "0.22" /* Silicon Image's preview driver was 0.10 */ | ||
45 | |||
46 | /* | ||
47 | * Port request block (PRB) 32 bytes | ||
48 | */ | ||
49 | struct sil24_prb { | ||
50 | u16 ctrl; | ||
51 | u16 prot; | ||
52 | u32 rx_cnt; | ||
53 | u8 fis[6 * 4]; | ||
54 | }; | ||
55 | |||
56 | /* | ||
57 | * Scatter gather entry (SGE) 16 bytes | ||
58 | */ | ||
59 | struct sil24_sge { | ||
60 | u64 addr; | ||
61 | u32 cnt; | ||
62 | u32 flags; | ||
63 | }; | ||
64 | |||
65 | /* | ||
66 | * Port multiplier | ||
67 | */ | ||
68 | struct sil24_port_multiplier { | ||
69 | u32 diag; | ||
70 | u32 sactive; | ||
71 | }; | ||
72 | |||
73 | enum { | ||
74 | /* | ||
75 | * Global controller registers (128 bytes @ BAR0) | ||
76 | */ | ||
77 | /* 32 bit regs */ | ||
78 | HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */ | ||
79 | HOST_CTRL = 0x40, | ||
80 | HOST_IRQ_STAT = 0x44, | ||
81 | HOST_PHY_CFG = 0x48, | ||
82 | HOST_BIST_CTRL = 0x50, | ||
83 | HOST_BIST_PTRN = 0x54, | ||
84 | HOST_BIST_STAT = 0x58, | ||
85 | HOST_MEM_BIST_STAT = 0x5c, | ||
86 | HOST_FLASH_CMD = 0x70, | ||
87 | /* 8 bit regs */ | ||
88 | HOST_FLASH_DATA = 0x74, | ||
89 | HOST_TRANSITION_DETECT = 0x75, | ||
90 | HOST_GPIO_CTRL = 0x76, | ||
91 | HOST_I2C_ADDR = 0x78, /* 32 bit */ | ||
92 | HOST_I2C_DATA = 0x7c, | ||
93 | HOST_I2C_XFER_CNT = 0x7e, | ||
94 | HOST_I2C_CTRL = 0x7f, | ||
95 | |||
96 | /* HOST_SLOT_STAT bits */ | ||
97 | HOST_SSTAT_ATTN = (1 << 31), | ||
98 | |||
99 | /* | ||
100 | * Port registers | ||
101 | * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2) | ||
102 | */ | ||
103 | PORT_REGS_SIZE = 0x2000, | ||
104 | PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */ | ||
105 | |||
106 | PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */ | ||
107 | /* 32 bit regs */ | ||
108 | PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */ | ||
109 | PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */ | ||
110 | PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */ | ||
111 | PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */ | ||
112 | PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */ | ||
113 | PORT_ACTIVATE_UPPER_ADDR= 0x101c, | ||
114 | PORT_EXEC_FIFO = 0x1020, /* command execution fifo */ | ||
115 | PORT_CMD_ERR = 0x1024, /* command error number */ | ||
116 | PORT_FIS_CFG = 0x1028, | ||
117 | PORT_FIFO_THRES = 0x102c, | ||
118 | /* 16 bit regs */ | ||
119 | PORT_DECODE_ERR_CNT = 0x1040, | ||
120 | PORT_DECODE_ERR_THRESH = 0x1042, | ||
121 | PORT_CRC_ERR_CNT = 0x1044, | ||
122 | PORT_CRC_ERR_THRESH = 0x1046, | ||
123 | PORT_HSHK_ERR_CNT = 0x1048, | ||
124 | PORT_HSHK_ERR_THRESH = 0x104a, | ||
125 | /* 32 bit regs */ | ||
126 | PORT_PHY_CFG = 0x1050, | ||
127 | PORT_SLOT_STAT = 0x1800, | ||
128 | PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */ | ||
129 | PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */ | ||
130 | PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */ | ||
131 | PORT_SCONTROL = 0x1f00, | ||
132 | PORT_SSTATUS = 0x1f04, | ||
133 | PORT_SERROR = 0x1f08, | ||
134 | PORT_SACTIVE = 0x1f0c, | ||
135 | |||
136 | /* PORT_CTRL_STAT bits */ | ||
137 | PORT_CS_PORT_RST = (1 << 0), /* port reset */ | ||
138 | PORT_CS_DEV_RST = (1 << 1), /* device reset */ | ||
139 | PORT_CS_INIT = (1 << 2), /* port initialize */ | ||
140 | PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */ | ||
141 | PORT_CS_RESUME = (1 << 6), /* port resume */ | ||
142 | PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */ | ||
143 | PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */ | ||
144 | PORT_CS_RDY = (1 << 31), /* port ready to accept commands */ | ||
145 | |||
146 | /* PORT_IRQ_STAT/ENABLE_SET/CLR */ | ||
147 | /* bits[11:0] are masked */ | ||
148 | PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */ | ||
149 | PORT_IRQ_ERROR = (1 << 1), /* command execution error */ | ||
150 | PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */ | ||
151 | PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */ | ||
152 | PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */ | ||
153 | PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */ | ||
154 | PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */ | ||
155 | PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */ | ||
156 | |||
157 | /* bits[27:16] are unmasked (raw) */ | ||
158 | PORT_IRQ_RAW_SHIFT = 16, | ||
159 | PORT_IRQ_MASKED_MASK = 0x7ff, | ||
160 | PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT), | ||
161 | |||
162 | /* ENABLE_SET/CLR specific, intr steering - 2 bit field */ | ||
163 | PORT_IRQ_STEER_SHIFT = 30, | ||
164 | PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT), | ||
165 | |||
166 | /* PORT_CMD_ERR constants */ | ||
167 | PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */ | ||
168 | PORT_CERR_SDB = 2, /* Error bit in SDB FIS */ | ||
169 | PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */ | ||
170 | PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */ | ||
171 | PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */ | ||
172 | PORT_CERR_DIRECTION = 6, /* Data direction mismatch */ | ||
173 | PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */ | ||
174 | PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */ | ||
175 | PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */ | ||
176 | PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */ | ||
177 | PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */ | ||
178 | PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */ | ||
179 | PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */ | ||
180 | PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */ | ||
181 | PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */ | ||
182 | PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */ | ||
183 | PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */ | ||
184 | PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */ | ||
185 | PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */ | ||
186 | PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */ | ||
187 | PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */ | ||
188 | PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */ | ||
189 | |||
190 | /* | ||
191 | * Other constants | ||
192 | */ | ||
193 | SGE_TRM = (1 << 31), /* Last SGE in chain */ | ||
194 | PRB_SOFT_RST = (1 << 7), /* Soft reset request (ign BSY?) */ | ||
195 | |||
196 | /* board id */ | ||
197 | BID_SIL3124 = 0, | ||
198 | BID_SIL3132 = 1, | ||
199 | BID_SIL3131 = 2, | ||
200 | |||
201 | IRQ_STAT_4PORTS = 0xf, | ||
202 | }; | ||
203 | |||
204 | struct sil24_cmd_block { | ||
205 | struct sil24_prb prb; | ||
206 | struct sil24_sge sge[LIBATA_MAX_PRD]; | ||
207 | }; | ||
208 | |||
209 | /* | ||
210 | * ap->private_data | ||
211 | * | ||
212 | * The preview driver always returned 0 for status. We emulate it | ||
213 | * here from the previous interrupt. | ||
214 | */ | ||
215 | struct sil24_port_priv { | ||
216 | struct sil24_cmd_block *cmd_block; /* 32 cmd blocks */ | ||
217 | dma_addr_t cmd_block_dma; /* DMA base addr for them */ | ||
218 | struct ata_taskfile tf; /* Cached taskfile registers */ | ||
219 | }; | ||
220 | |||
221 | /* ap->host_set->private_data */ | ||
222 | struct sil24_host_priv { | ||
223 | void *host_base; /* global controller control (128 bytes @BAR0) */ | ||
224 | void *port_base; /* port registers (4 * 8192 bytes @BAR2) */ | ||
225 | }; | ||
226 | |||
227 | static u8 sil24_check_status(struct ata_port *ap); | ||
228 | static u8 sil24_check_err(struct ata_port *ap); | ||
229 | static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); | ||
230 | static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); | ||
231 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
232 | static void sil24_phy_reset(struct ata_port *ap); | ||
233 | static void sil24_qc_prep(struct ata_queued_cmd *qc); | ||
234 | static int sil24_qc_issue(struct ata_queued_cmd *qc); | ||
235 | static void sil24_irq_clear(struct ata_port *ap); | ||
236 | static void sil24_eng_timeout(struct ata_port *ap); | ||
237 | static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | ||
238 | static int sil24_port_start(struct ata_port *ap); | ||
239 | static void sil24_port_stop(struct ata_port *ap); | ||
240 | static void sil24_host_stop(struct ata_host_set *host_set); | ||
241 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | ||
242 | |||
243 | static struct pci_device_id sil24_pci_tbl[] = { | ||
244 | { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, | ||
245 | { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, | ||
246 | { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, | ||
247 | { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, | ||
248 | { } /* terminate list */ | ||
249 | }; | ||
250 | |||
251 | static struct pci_driver sil24_pci_driver = { | ||
252 | .name = DRV_NAME, | ||
253 | .id_table = sil24_pci_tbl, | ||
254 | .probe = sil24_init_one, | ||
255 | .remove = ata_pci_remove_one, /* safe? */ | ||
256 | }; | ||
257 | |||
258 | static Scsi_Host_Template sil24_sht = { | ||
259 | .module = THIS_MODULE, | ||
260 | .name = DRV_NAME, | ||
261 | .ioctl = ata_scsi_ioctl, | ||
262 | .queuecommand = ata_scsi_queuecmd, | ||
263 | .eh_strategy_handler = ata_scsi_error, | ||
264 | .can_queue = ATA_DEF_QUEUE, | ||
265 | .this_id = ATA_SHT_THIS_ID, | ||
266 | .sg_tablesize = LIBATA_MAX_PRD, | ||
267 | .max_sectors = ATA_MAX_SECTORS, | ||
268 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
269 | .emulated = ATA_SHT_EMULATED, | ||
270 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
271 | .proc_name = DRV_NAME, | ||
272 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
273 | .slave_configure = ata_scsi_slave_config, | ||
274 | .bios_param = ata_std_bios_param, | ||
275 | .ordered_flush = 1, /* NCQ not supported yet */ | ||
276 | }; | ||
277 | |||
278 | static struct ata_port_operations sil24_ops = { | ||
279 | .port_disable = ata_port_disable, | ||
280 | |||
281 | .check_status = sil24_check_status, | ||
282 | .check_altstatus = sil24_check_status, | ||
283 | .check_err = sil24_check_err, | ||
284 | .dev_select = ata_noop_dev_select, | ||
285 | |||
286 | .tf_read = sil24_tf_read, | ||
287 | |||
288 | .phy_reset = sil24_phy_reset, | ||
289 | |||
290 | .qc_prep = sil24_qc_prep, | ||
291 | .qc_issue = sil24_qc_issue, | ||
292 | |||
293 | .eng_timeout = sil24_eng_timeout, | ||
294 | |||
295 | .irq_handler = sil24_interrupt, | ||
296 | .irq_clear = sil24_irq_clear, | ||
297 | |||
298 | .scr_read = sil24_scr_read, | ||
299 | .scr_write = sil24_scr_write, | ||
300 | |||
301 | .port_start = sil24_port_start, | ||
302 | .port_stop = sil24_port_stop, | ||
303 | .host_stop = sil24_host_stop, | ||
304 | }; | ||
305 | |||
306 | /* | ||
307 | * Use bits 30-31 of host_flags to encode available port numbers. | ||
308 | * Current maxium is 4. | ||
309 | */ | ||
310 | #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30) | ||
311 | #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1) | ||
312 | |||
313 | static struct ata_port_info sil24_port_info[] = { | ||
314 | /* sil_3124 */ | ||
315 | { | ||
316 | .sht = &sil24_sht, | ||
317 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
318 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | ||
319 | ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), | ||
320 | .pio_mask = 0x1f, /* pio0-4 */ | ||
321 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
322 | .udma_mask = 0x3f, /* udma0-5 */ | ||
323 | .port_ops = &sil24_ops, | ||
324 | }, | ||
325 | /* sil_3132 */ | ||
326 | { | ||
327 | .sht = &sil24_sht, | ||
328 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
329 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | ||
330 | ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), | ||
331 | .pio_mask = 0x1f, /* pio0-4 */ | ||
332 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
333 | .udma_mask = 0x3f, /* udma0-5 */ | ||
334 | .port_ops = &sil24_ops, | ||
335 | }, | ||
336 | /* sil_3131/sil_3531 */ | ||
337 | { | ||
338 | .sht = &sil24_sht, | ||
339 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
340 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | ||
341 | ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), | ||
342 | .pio_mask = 0x1f, /* pio0-4 */ | ||
343 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
344 | .udma_mask = 0x3f, /* udma0-5 */ | ||
345 | .port_ops = &sil24_ops, | ||
346 | }, | ||
347 | }; | ||
348 | |||
349 | static inline void sil24_update_tf(struct ata_port *ap) | ||
350 | { | ||
351 | struct sil24_port_priv *pp = ap->private_data; | ||
352 | void *port = (void *)ap->ioaddr.cmd_addr; | ||
353 | struct sil24_prb *prb = port; | ||
354 | |||
355 | ata_tf_from_fis(prb->fis, &pp->tf); | ||
356 | } | ||
357 | |||
358 | static u8 sil24_check_status(struct ata_port *ap) | ||
359 | { | ||
360 | struct sil24_port_priv *pp = ap->private_data; | ||
361 | return pp->tf.command; | ||
362 | } | ||
363 | |||
364 | static u8 sil24_check_err(struct ata_port *ap) | ||
365 | { | ||
366 | struct sil24_port_priv *pp = ap->private_data; | ||
367 | return pp->tf.feature; | ||
368 | } | ||
369 | |||
370 | static int sil24_scr_map[] = { | ||
371 | [SCR_CONTROL] = 0, | ||
372 | [SCR_STATUS] = 1, | ||
373 | [SCR_ERROR] = 2, | ||
374 | [SCR_ACTIVE] = 3, | ||
375 | }; | ||
376 | |||
377 | static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg) | ||
378 | { | ||
379 | void *scr_addr = (void *)ap->ioaddr.scr_addr; | ||
380 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { | ||
381 | void *addr; | ||
382 | addr = scr_addr + sil24_scr_map[sc_reg] * 4; | ||
383 | return readl(scr_addr + sil24_scr_map[sc_reg] * 4); | ||
384 | } | ||
385 | return 0xffffffffU; | ||
386 | } | ||
387 | |||
388 | static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | ||
389 | { | ||
390 | void *scr_addr = (void *)ap->ioaddr.scr_addr; | ||
391 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { | ||
392 | void *addr; | ||
393 | addr = scr_addr + sil24_scr_map[sc_reg] * 4; | ||
394 | writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
399 | { | ||
400 | struct sil24_port_priv *pp = ap->private_data; | ||
401 | *tf = pp->tf; | ||
402 | } | ||
403 | |||
404 | static void sil24_phy_reset(struct ata_port *ap) | ||
405 | { | ||
406 | __sata_phy_reset(ap); | ||
407 | /* | ||
408 | * No ATAPI yet. Just unconditionally indicate ATA device. | ||
409 | * If ATAPI device is attached, it will fail ATA_CMD_ID_ATA | ||
410 | * and libata core will ignore the device. | ||
411 | */ | ||
412 | if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) | ||
413 | ap->device[0].class = ATA_DEV_ATA; | ||
414 | } | ||
415 | |||
416 | static inline void sil24_fill_sg(struct ata_queued_cmd *qc, | ||
417 | struct sil24_cmd_block *cb) | ||
418 | { | ||
419 | struct scatterlist *sg = qc->sg; | ||
420 | struct sil24_sge *sge = cb->sge; | ||
421 | unsigned i; | ||
422 | |||
423 | for (i = 0; i < qc->n_elem; i++, sg++, sge++) { | ||
424 | sge->addr = cpu_to_le64(sg_dma_address(sg)); | ||
425 | sge->cnt = cpu_to_le32(sg_dma_len(sg)); | ||
426 | sge->flags = 0; | ||
427 | sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | static void sil24_qc_prep(struct ata_queued_cmd *qc) | ||
432 | { | ||
433 | struct ata_port *ap = qc->ap; | ||
434 | struct sil24_port_priv *pp = ap->private_data; | ||
435 | struct sil24_cmd_block *cb = pp->cmd_block + qc->tag; | ||
436 | struct sil24_prb *prb = &cb->prb; | ||
437 | |||
438 | switch (qc->tf.protocol) { | ||
439 | case ATA_PROT_PIO: | ||
440 | case ATA_PROT_DMA: | ||
441 | case ATA_PROT_NODATA: | ||
442 | break; | ||
443 | default: | ||
444 | /* ATAPI isn't supported yet */ | ||
445 | BUG(); | ||
446 | } | ||
447 | |||
448 | ata_tf_to_fis(&qc->tf, prb->fis, 0); | ||
449 | |||
450 | if (qc->flags & ATA_QCFLAG_DMAMAP) | ||
451 | sil24_fill_sg(qc, cb); | ||
452 | } | ||
453 | |||
454 | static int sil24_qc_issue(struct ata_queued_cmd *qc) | ||
455 | { | ||
456 | struct ata_port *ap = qc->ap; | ||
457 | void *port = (void *)ap->ioaddr.cmd_addr; | ||
458 | struct sil24_port_priv *pp = ap->private_data; | ||
459 | dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block); | ||
460 | |||
461 | writel((u32)paddr, port + PORT_CMD_ACTIVATE); | ||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static void sil24_irq_clear(struct ata_port *ap) | ||
466 | { | ||
467 | /* unused */ | ||
468 | } | ||
469 | |||
470 | static int __sil24_reset_controller(void *port) | ||
471 | { | ||
472 | int cnt; | ||
473 | u32 tmp; | ||
474 | |||
475 | /* Reset controller state. Is this correct? */ | ||
476 | writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); | ||
477 | readl(port + PORT_CTRL_STAT); /* sync */ | ||
478 | |||
479 | /* Max ~100ms */ | ||
480 | for (cnt = 0; cnt < 1000; cnt++) { | ||
481 | udelay(100); | ||
482 | tmp = readl(port + PORT_CTRL_STAT); | ||
483 | if (!(tmp & PORT_CS_DEV_RST)) | ||
484 | break; | ||
485 | } | ||
486 | |||
487 | if (tmp & PORT_CS_DEV_RST) | ||
488 | return -1; | ||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | static void sil24_reset_controller(struct ata_port *ap) | ||
493 | { | ||
494 | printk(KERN_NOTICE DRV_NAME | ||
495 | " ata%u: resetting controller...\n", ap->id); | ||
496 | if (__sil24_reset_controller((void *)ap->ioaddr.cmd_addr)) | ||
497 | printk(KERN_ERR DRV_NAME | ||
498 | " ata%u: failed to reset controller\n", ap->id); | ||
499 | } | ||
500 | |||
501 | static void sil24_eng_timeout(struct ata_port *ap) | ||
502 | { | ||
503 | struct ata_queued_cmd *qc; | ||
504 | |||
505 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
506 | if (!qc) { | ||
507 | printk(KERN_ERR "ata%u: BUG: tiemout without command\n", | ||
508 | ap->id); | ||
509 | return; | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * hack alert! We cannot use the supplied completion | ||
514 | * function from inside the ->eh_strategy_handler() thread. | ||
515 | * libata is the only user of ->eh_strategy_handler() in | ||
516 | * any kernel, so the default scsi_done() assumes it is | ||
517 | * not being called from the SCSI EH. | ||
518 | */ | ||
519 | printk(KERN_ERR "ata%u: command timeout\n", ap->id); | ||
520 | qc->scsidone = scsi_finish_command; | ||
521 | ata_qc_complete(qc, ATA_ERR); | ||
522 | |||
523 | sil24_reset_controller(ap); | ||
524 | } | ||
525 | |||
526 | static void sil24_error_intr(struct ata_port *ap, u32 slot_stat) | ||
527 | { | ||
528 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | ||
529 | struct sil24_port_priv *pp = ap->private_data; | ||
530 | void *port = (void *)ap->ioaddr.cmd_addr; | ||
531 | u32 irq_stat, cmd_err, sstatus, serror; | ||
532 | |||
533 | irq_stat = readl(port + PORT_IRQ_STAT); | ||
534 | writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ | ||
535 | |||
536 | if (!(irq_stat & PORT_IRQ_ERROR)) { | ||
537 | /* ignore non-completion, non-error irqs for now */ | ||
538 | printk(KERN_WARNING DRV_NAME | ||
539 | "ata%u: non-error exception irq (irq_stat %x)\n", | ||
540 | ap->id, irq_stat); | ||
541 | return; | ||
542 | } | ||
543 | |||
544 | cmd_err = readl(port + PORT_CMD_ERR); | ||
545 | sstatus = readl(port + PORT_SSTATUS); | ||
546 | serror = readl(port + PORT_SERROR); | ||
547 | if (serror) | ||
548 | writel(serror, port + PORT_SERROR); | ||
549 | |||
550 | printk(KERN_ERR DRV_NAME " ata%u: error interrupt on port%d\n" | ||
551 | " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n", | ||
552 | ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror); | ||
553 | |||
554 | if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) { | ||
555 | /* | ||
556 | * Device is reporting error, tf registers are valid. | ||
557 | */ | ||
558 | sil24_update_tf(ap); | ||
559 | } else { | ||
560 | /* | ||
561 | * Other errors. libata currently doesn't have any | ||
562 | * mechanism to report these errors. Just turn on | ||
563 | * ATA_ERR. | ||
564 | */ | ||
565 | pp->tf.command = ATA_ERR; | ||
566 | } | ||
567 | |||
568 | if (qc) | ||
569 | ata_qc_complete(qc, pp->tf.command); | ||
570 | |||
571 | sil24_reset_controller(ap); | ||
572 | } | ||
573 | |||
574 | static inline void sil24_host_intr(struct ata_port *ap) | ||
575 | { | ||
576 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | ||
577 | void *port = (void *)ap->ioaddr.cmd_addr; | ||
578 | u32 slot_stat; | ||
579 | |||
580 | slot_stat = readl(port + PORT_SLOT_STAT); | ||
581 | if (!(slot_stat & HOST_SSTAT_ATTN)) { | ||
582 | struct sil24_port_priv *pp = ap->private_data; | ||
583 | /* | ||
584 | * !HOST_SSAT_ATTN guarantees successful completion, | ||
585 | * so reading back tf registers is unnecessary for | ||
586 | * most commands. TODO: read tf registers for | ||
587 | * commands which require these values on successful | ||
588 | * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER, | ||
589 | * DEVICE RESET and READ PORT MULTIPLIER (any more?). | ||
590 | */ | ||
591 | sil24_update_tf(ap); | ||
592 | |||
593 | if (qc) | ||
594 | ata_qc_complete(qc, pp->tf.command); | ||
595 | } else | ||
596 | sil24_error_intr(ap, slot_stat); | ||
597 | } | ||
598 | |||
599 | static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | ||
600 | { | ||
601 | struct ata_host_set *host_set = dev_instance; | ||
602 | struct sil24_host_priv *hpriv = host_set->private_data; | ||
603 | unsigned handled = 0; | ||
604 | u32 status; | ||
605 | int i; | ||
606 | |||
607 | status = readl(hpriv->host_base + HOST_IRQ_STAT); | ||
608 | |||
609 | if (status == 0xffffffff) { | ||
610 | printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, " | ||
611 | "PCI fault or device removal?\n"); | ||
612 | goto out; | ||
613 | } | ||
614 | |||
615 | if (!(status & IRQ_STAT_4PORTS)) | ||
616 | goto out; | ||
617 | |||
618 | spin_lock(&host_set->lock); | ||
619 | |||
620 | for (i = 0; i < host_set->n_ports; i++) | ||
621 | if (status & (1 << i)) { | ||
622 | struct ata_port *ap = host_set->ports[i]; | ||
623 | if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { | ||
624 | sil24_host_intr(host_set->ports[i]); | ||
625 | handled++; | ||
626 | } else | ||
627 | printk(KERN_ERR DRV_NAME | ||
628 | ": interrupt from disabled port %d\n", i); | ||
629 | } | ||
630 | |||
631 | spin_unlock(&host_set->lock); | ||
632 | out: | ||
633 | return IRQ_RETVAL(handled); | ||
634 | } | ||
635 | |||
636 | static int sil24_port_start(struct ata_port *ap) | ||
637 | { | ||
638 | struct device *dev = ap->host_set->dev; | ||
639 | struct sil24_port_priv *pp; | ||
640 | struct sil24_cmd_block *cb; | ||
641 | size_t cb_size = sizeof(*cb); | ||
642 | dma_addr_t cb_dma; | ||
643 | |||
644 | pp = kmalloc(sizeof(*pp), GFP_KERNEL); | ||
645 | if (!pp) | ||
646 | return -ENOMEM; | ||
647 | memset(pp, 0, sizeof(*pp)); | ||
648 | |||
649 | pp->tf.command = ATA_DRDY; | ||
650 | |||
651 | cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); | ||
652 | if (!cb) { | ||
653 | kfree(pp); | ||
654 | return -ENOMEM; | ||
655 | } | ||
656 | memset(cb, 0, cb_size); | ||
657 | |||
658 | pp->cmd_block = cb; | ||
659 | pp->cmd_block_dma = cb_dma; | ||
660 | |||
661 | ap->private_data = pp; | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static void sil24_port_stop(struct ata_port *ap) | ||
667 | { | ||
668 | struct device *dev = ap->host_set->dev; | ||
669 | struct sil24_port_priv *pp = ap->private_data; | ||
670 | size_t cb_size = sizeof(*pp->cmd_block); | ||
671 | |||
672 | dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma); | ||
673 | kfree(pp); | ||
674 | } | ||
675 | |||
676 | static void sil24_host_stop(struct ata_host_set *host_set) | ||
677 | { | ||
678 | struct sil24_host_priv *hpriv = host_set->private_data; | ||
679 | |||
680 | iounmap(hpriv->host_base); | ||
681 | iounmap(hpriv->port_base); | ||
682 | kfree(hpriv); | ||
683 | } | ||
684 | |||
685 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
686 | { | ||
687 | static int printed_version = 0; | ||
688 | unsigned int board_id = (unsigned int)ent->driver_data; | ||
689 | struct ata_port_info *pinfo = &sil24_port_info[board_id]; | ||
690 | struct ata_probe_ent *probe_ent = NULL; | ||
691 | struct sil24_host_priv *hpriv = NULL; | ||
692 | void *host_base = NULL, *port_base = NULL; | ||
693 | int i, rc; | ||
694 | |||
695 | if (!printed_version++) | ||
696 | printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); | ||
697 | |||
698 | rc = pci_enable_device(pdev); | ||
699 | if (rc) | ||
700 | return rc; | ||
701 | |||
702 | rc = pci_request_regions(pdev, DRV_NAME); | ||
703 | if (rc) | ||
704 | goto out_disable; | ||
705 | |||
706 | rc = -ENOMEM; | ||
707 | /* ioremap mmio registers */ | ||
708 | host_base = ioremap(pci_resource_start(pdev, 0), | ||
709 | pci_resource_len(pdev, 0)); | ||
710 | if (!host_base) | ||
711 | goto out_free; | ||
712 | port_base = ioremap(pci_resource_start(pdev, 2), | ||
713 | pci_resource_len(pdev, 2)); | ||
714 | if (!port_base) | ||
715 | goto out_free; | ||
716 | |||
717 | /* allocate & init probe_ent and hpriv */ | ||
718 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); | ||
719 | if (!probe_ent) | ||
720 | goto out_free; | ||
721 | |||
722 | hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); | ||
723 | if (!hpriv) | ||
724 | goto out_free; | ||
725 | |||
726 | memset(probe_ent, 0, sizeof(*probe_ent)); | ||
727 | probe_ent->dev = pci_dev_to_dev(pdev); | ||
728 | INIT_LIST_HEAD(&probe_ent->node); | ||
729 | |||
730 | probe_ent->sht = pinfo->sht; | ||
731 | probe_ent->host_flags = pinfo->host_flags; | ||
732 | probe_ent->pio_mask = pinfo->pio_mask; | ||
733 | probe_ent->udma_mask = pinfo->udma_mask; | ||
734 | probe_ent->port_ops = pinfo->port_ops; | ||
735 | probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); | ||
736 | |||
737 | probe_ent->irq = pdev->irq; | ||
738 | probe_ent->irq_flags = SA_SHIRQ; | ||
739 | probe_ent->mmio_base = port_base; | ||
740 | probe_ent->private_data = hpriv; | ||
741 | |||
742 | memset(hpriv, 0, sizeof(*hpriv)); | ||
743 | hpriv->host_base = host_base; | ||
744 | hpriv->port_base = port_base; | ||
745 | |||
746 | /* | ||
747 | * Configure the device | ||
748 | */ | ||
749 | /* | ||
750 | * FIXME: This device is certainly 64-bit capable. We just | ||
751 | * don't know how to use it. After fixing 32bit activation in | ||
752 | * this function, enable 64bit masks here. | ||
753 | */ | ||
754 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
755 | if (rc) { | ||
756 | printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n", | ||
757 | pci_name(pdev)); | ||
758 | goto out_free; | ||
759 | } | ||
760 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
761 | if (rc) { | ||
762 | printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n", | ||
763 | pci_name(pdev)); | ||
764 | goto out_free; | ||
765 | } | ||
766 | |||
767 | /* GPIO off */ | ||
768 | writel(0, host_base + HOST_FLASH_CMD); | ||
769 | |||
770 | /* Mask interrupts during initialization */ | ||
771 | writel(0, host_base + HOST_CTRL); | ||
772 | |||
773 | for (i = 0; i < probe_ent->n_ports; i++) { | ||
774 | void *port = port_base + i * PORT_REGS_SIZE; | ||
775 | unsigned long portu = (unsigned long)port; | ||
776 | u32 tmp; | ||
777 | int cnt; | ||
778 | |||
779 | probe_ent->port[i].cmd_addr = portu + PORT_PRB; | ||
780 | probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; | ||
781 | |||
782 | ata_std_ports(&probe_ent->port[i]); | ||
783 | |||
784 | /* Initial PHY setting */ | ||
785 | writel(0x20c, port + PORT_PHY_CFG); | ||
786 | |||
787 | /* Clear port RST */ | ||
788 | tmp = readl(port + PORT_CTRL_STAT); | ||
789 | if (tmp & PORT_CS_PORT_RST) { | ||
790 | writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); | ||
791 | readl(port + PORT_CTRL_STAT); /* sync */ | ||
792 | for (cnt = 0; cnt < 10; cnt++) { | ||
793 | msleep(10); | ||
794 | tmp = readl(port + PORT_CTRL_STAT); | ||
795 | if (!(tmp & PORT_CS_PORT_RST)) | ||
796 | break; | ||
797 | } | ||
798 | if (tmp & PORT_CS_PORT_RST) | ||
799 | printk(KERN_ERR DRV_NAME | ||
800 | "(%s): failed to clear port RST\n", | ||
801 | pci_name(pdev)); | ||
802 | } | ||
803 | |||
804 | /* Zero error counters. */ | ||
805 | writel(0x8000, port + PORT_DECODE_ERR_THRESH); | ||
806 | writel(0x8000, port + PORT_CRC_ERR_THRESH); | ||
807 | writel(0x8000, port + PORT_HSHK_ERR_THRESH); | ||
808 | writel(0x0000, port + PORT_DECODE_ERR_CNT); | ||
809 | writel(0x0000, port + PORT_CRC_ERR_CNT); | ||
810 | writel(0x0000, port + PORT_HSHK_ERR_CNT); | ||
811 | |||
812 | /* FIXME: 32bit activation? */ | ||
813 | writel(0, port + PORT_ACTIVATE_UPPER_ADDR); | ||
814 | writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT); | ||
815 | |||
816 | /* Configure interrupts */ | ||
817 | writel(0xffff, port + PORT_IRQ_ENABLE_CLR); | ||
818 | writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS, | ||
819 | port + PORT_IRQ_ENABLE_SET); | ||
820 | |||
821 | /* Clear interrupts */ | ||
822 | writel(0x0fff0fff, port + PORT_IRQ_STAT); | ||
823 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); | ||
824 | |||
825 | /* Clear port multiplier enable and resume bits */ | ||
826 | writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); | ||
827 | |||
828 | /* Reset itself */ | ||
829 | if (__sil24_reset_controller(port)) | ||
830 | printk(KERN_ERR DRV_NAME | ||
831 | "(%s): failed to reset controller\n", | ||
832 | pci_name(pdev)); | ||
833 | } | ||
834 | |||
835 | /* Turn on interrupts */ | ||
836 | writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); | ||
837 | |||
838 | pci_set_master(pdev); | ||
839 | |||
840 | /* FIXME: check ata_device_add return value */ | ||
841 | ata_device_add(probe_ent); | ||
842 | |||
843 | kfree(probe_ent); | ||
844 | return 0; | ||
845 | |||
846 | out_free: | ||
847 | if (host_base) | ||
848 | iounmap(host_base); | ||
849 | if (port_base) | ||
850 | iounmap(port_base); | ||
851 | kfree(probe_ent); | ||
852 | kfree(hpriv); | ||
853 | pci_release_regions(pdev); | ||
854 | out_disable: | ||
855 | pci_disable_device(pdev); | ||
856 | return rc; | ||
857 | } | ||
858 | |||
859 | static int __init sil24_init(void) | ||
860 | { | ||
861 | return pci_module_init(&sil24_pci_driver); | ||
862 | } | ||
863 | |||
864 | static void __exit sil24_exit(void) | ||
865 | { | ||
866 | pci_unregister_driver(&sil24_pci_driver); | ||
867 | } | ||
868 | |||
869 | MODULE_AUTHOR("Tejun Heo"); | ||
870 | MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver"); | ||
871 | MODULE_LICENSE("GPL"); | ||
872 | MODULE_DEVICE_TABLE(pci, sil24_pci_tbl); | ||
873 | |||
874 | module_init(sil24_init); | ||
875 | module_exit(sil24_exit); | ||
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c index b227e51d12f4..0761a3234fcf 100644 --- a/drivers/scsi/sata_sis.c +++ b/drivers/scsi/sata_sis.c | |||
@@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
263 | goto err_out_regions; | 263 | goto err_out_regions; |
264 | 264 | ||
265 | ppi = &sis_port_info; | 265 | ppi = &sis_port_info; |
266 | probe_ent = ata_pci_init_native_mode(pdev, &ppi); | 266 | probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); |
267 | if (!probe_ent) { | 267 | if (!probe_ent) { |
268 | rc = -ENOMEM; | 268 | rc = -ENOMEM; |
269 | goto err_out_regions; | 269 | goto err_out_regions; |
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c index 4c9fb8b71be1..9c06f2abe7f7 100644 --- a/drivers/scsi/sata_uli.c +++ b/drivers/scsi/sata_uli.c | |||
@@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
202 | goto err_out_regions; | 202 | goto err_out_regions; |
203 | 203 | ||
204 | ppi = &uli_port_info; | 204 | ppi = &uli_port_info; |
205 | probe_ent = ata_pci_init_native_mode(pdev, &ppi); | 205 | probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); |
206 | if (!probe_ent) { | 206 | if (!probe_ent) { |
207 | rc = -ENOMEM; | 207 | rc = -ENOMEM; |
208 | goto err_out_regions; | 208 | goto err_out_regions; |
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c index 128b996b07b7..565872479b9a 100644 --- a/drivers/scsi/sata_via.c +++ b/drivers/scsi/sata_via.c | |||
@@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) | |||
212 | struct ata_probe_ent *probe_ent; | 212 | struct ata_probe_ent *probe_ent; |
213 | struct ata_port_info *ppi = &svia_port_info; | 213 | struct ata_port_info *ppi = &svia_port_info; |
214 | 214 | ||
215 | probe_ent = ata_pci_init_native_mode(pdev, &ppi); | 215 | probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); |
216 | if (!probe_ent) | 216 | if (!probe_ent) |
217 | return NULL; | 217 | return NULL; |
218 | 218 | ||
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 5959e6755a81..656c0e8d160e 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -518,11 +518,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up) | |||
518 | 518 | ||
519 | quot = up->port.uartclk / (16 * new_baud); | 519 | quot = up->port.uartclk / (16 * new_baud); |
520 | 520 | ||
521 | spin_unlock(&up->port.lock); | ||
522 | |||
523 | sunsu_change_speed(&up->port, up->cflag, 0, quot); | 521 | sunsu_change_speed(&up->port, up->cflag, 0, quot); |
524 | |||
525 | spin_lock(&up->port.lock); | ||
526 | } | 522 | } |
527 | 523 | ||
528 | static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break) | 524 | static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break) |
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index e240c335eb23..5af928fa0449 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c | |||
@@ -108,7 +108,7 @@ static int bfs_create(struct inode * dir, struct dentry * dentry, int mode, | |||
108 | inode->i_mapping->a_ops = &bfs_aops; | 108 | inode->i_mapping->a_ops = &bfs_aops; |
109 | inode->i_mode = mode; | 109 | inode->i_mode = mode; |
110 | inode->i_ino = ino; | 110 | inode->i_ino = ino; |
111 | BFS_I(inode)->i_dsk_ino = cpu_to_le16(ino); | 111 | BFS_I(inode)->i_dsk_ino = ino; |
112 | BFS_I(inode)->i_sblock = 0; | 112 | BFS_I(inode)->i_sblock = 0; |
113 | BFS_I(inode)->i_eblock = 0; | 113 | BFS_I(inode)->i_eblock = 0; |
114 | insert_inode_hash(inode); | 114 | insert_inode_hash(inode); |
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index c7b39aa279d7..3af6c73c5b5a 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c | |||
@@ -357,28 +357,46 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | |||
357 | } | 357 | } |
358 | 358 | ||
359 | info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */ | 359 | info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */ |
360 | info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - cpu_to_le32(bfs_sb->s_start))>>BFS_BSIZE_BITS; | 360 | info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - le32_to_cpu(bfs_sb->s_start))>>BFS_BSIZE_BITS; |
361 | info->si_freei = 0; | 361 | info->si_freei = 0; |
362 | info->si_lf_eblk = 0; | 362 | info->si_lf_eblk = 0; |
363 | info->si_lf_sblk = 0; | 363 | info->si_lf_sblk = 0; |
364 | info->si_lf_ioff = 0; | 364 | info->si_lf_ioff = 0; |
365 | bh = NULL; | ||
365 | for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) { | 366 | for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) { |
366 | inode = iget(s,i); | 367 | struct bfs_inode *di; |
367 | if (BFS_I(inode)->i_dsk_ino == 0) | 368 | int block = (i - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1; |
369 | int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; | ||
370 | unsigned long sblock, eblock; | ||
371 | |||
372 | if (!off) { | ||
373 | brelse(bh); | ||
374 | bh = sb_bread(s, block); | ||
375 | } | ||
376 | |||
377 | if (!bh) | ||
378 | continue; | ||
379 | |||
380 | di = (struct bfs_inode *)bh->b_data + off; | ||
381 | |||
382 | if (!di->i_ino) { | ||
368 | info->si_freei++; | 383 | info->si_freei++; |
369 | else { | 384 | continue; |
370 | set_bit(i, info->si_imap); | 385 | } |
371 | info->si_freeb -= inode->i_blocks; | 386 | set_bit(i, info->si_imap); |
372 | if (BFS_I(inode)->i_eblock > info->si_lf_eblk) { | 387 | info->si_freeb -= BFS_FILEBLOCKS(di); |
373 | info->si_lf_eblk = BFS_I(inode)->i_eblock; | 388 | |
374 | info->si_lf_sblk = BFS_I(inode)->i_sblock; | 389 | sblock = le32_to_cpu(di->i_sblock); |
375 | info->si_lf_ioff = BFS_INO2OFF(i); | 390 | eblock = le32_to_cpu(di->i_eblock); |
376 | } | 391 | if (eblock > info->si_lf_eblk) { |
392 | info->si_lf_eblk = eblock; | ||
393 | info->si_lf_sblk = sblock; | ||
394 | info->si_lf_ioff = BFS_INO2OFF(i); | ||
377 | } | 395 | } |
378 | iput(inode); | ||
379 | } | 396 | } |
397 | brelse(bh); | ||
380 | if (!(s->s_flags & MS_RDONLY)) { | 398 | if (!(s->s_flags & MS_RDONLY)) { |
381 | mark_buffer_dirty(bh); | 399 | mark_buffer_dirty(info->si_sbh); |
382 | s->s_dirt = 1; | 400 | s->s_dirt = 1; |
383 | } | 401 | } |
384 | dump_imap("read_super", s); | 402 | dump_imap("read_super", s); |
diff --git a/fs/namei.c b/fs/namei.c index 043d587216b5..aa62dbda93ac 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1551,19 +1551,19 @@ do_link: | |||
1551 | if (nd->last_type != LAST_NORM) | 1551 | if (nd->last_type != LAST_NORM) |
1552 | goto exit; | 1552 | goto exit; |
1553 | if (nd->last.name[nd->last.len]) { | 1553 | if (nd->last.name[nd->last.len]) { |
1554 | putname(nd->last.name); | 1554 | __putname(nd->last.name); |
1555 | goto exit; | 1555 | goto exit; |
1556 | } | 1556 | } |
1557 | error = -ELOOP; | 1557 | error = -ELOOP; |
1558 | if (count++==32) { | 1558 | if (count++==32) { |
1559 | putname(nd->last.name); | 1559 | __putname(nd->last.name); |
1560 | goto exit; | 1560 | goto exit; |
1561 | } | 1561 | } |
1562 | dir = nd->dentry; | 1562 | dir = nd->dentry; |
1563 | down(&dir->d_inode->i_sem); | 1563 | down(&dir->d_inode->i_sem); |
1564 | path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); | 1564 | path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); |
1565 | path.mnt = nd->mnt; | 1565 | path.mnt = nd->mnt; |
1566 | putname(nd->last.name); | 1566 | __putname(nd->last.name); |
1567 | goto do_last; | 1567 | goto do_last; |
1568 | } | 1568 | } |
1569 | 1569 | ||
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog index 83f3322765cd..de58579a1d0e 100644 --- a/fs/ntfs/ChangeLog +++ b/fs/ntfs/ChangeLog | |||
@@ -102,6 +102,9 @@ ToDo/Notes: | |||
102 | inode instead of a vfs inode as parameter. | 102 | inode instead of a vfs inode as parameter. |
103 | - Fix the definition of the CHKD ntfs record magic. It had an off by | 103 | - Fix the definition of the CHKD ntfs record magic. It had an off by |
104 | two error causing it to be CHKB instead of CHKD. | 104 | two error causing it to be CHKB instead of CHKD. |
105 | - Fix a stupid bug in __ntfs_bitmap_set_bits_in_run() which caused the | ||
106 | count to become negative and hence we had a wild memset() scribbling | ||
107 | all over the system's ram. | ||
105 | 108 | ||
106 | 2.1.23 - Implement extension of resident files and make writing safe as well as | 109 | 2.1.23 - Implement extension of resident files and make writing safe as well as |
107 | many bug fixes, cleanups, and enhancements... | 110 | many bug fixes, cleanups, and enhancements... |
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c index 12cf2e30c7dd..7a190cdc60e2 100644 --- a/fs/ntfs/bitmap.c +++ b/fs/ntfs/bitmap.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. | 2 | * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Anton Altaparmakov | 4 | * Copyright (c) 2004-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
@@ -90,7 +90,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, | |||
90 | /* If the first byte is partial, modify the appropriate bits in it. */ | 90 | /* If the first byte is partial, modify the appropriate bits in it. */ |
91 | if (bit) { | 91 | if (bit) { |
92 | u8 *byte = kaddr + pos; | 92 | u8 *byte = kaddr + pos; |
93 | while ((bit & 7) && cnt--) { | 93 | while ((bit & 7) && cnt) { |
94 | cnt--; | ||
94 | if (value) | 95 | if (value) |
95 | *byte |= 1 << bit++; | 96 | *byte |= 1 << bit++; |
96 | else | 97 | else |
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index 01f2dfa39cec..5c248d404f05 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h | |||
@@ -309,7 +309,7 @@ typedef le16 MFT_RECORD_FLAGS; | |||
309 | * Note: The _LE versions will return a CPU endian formatted value! | 309 | * Note: The _LE versions will return a CPU endian formatted value! |
310 | */ | 310 | */ |
311 | #define MFT_REF_MASK_CPU 0x0000ffffffffffffULL | 311 | #define MFT_REF_MASK_CPU 0x0000ffffffffffffULL |
312 | #define MFT_REF_MASK_LE const_cpu_to_le64(0x0000ffffffffffffULL) | 312 | #define MFT_REF_MASK_LE const_cpu_to_le64(MFT_REF_MASK_CPU) |
313 | 313 | ||
314 | typedef u64 MFT_REF; | 314 | typedef u64 MFT_REF; |
315 | typedef le64 leMFT_REF; | 315 | typedef le64 leMFT_REF; |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 247586d1d5dc..b011369b5956 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -58,7 +58,8 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) | |||
58 | * overflowing the unsigned long, but I don't think we would ever get | 58 | * overflowing the unsigned long, but I don't think we would ever get |
59 | * here if the volume was that big... | 59 | * here if the volume was that big... |
60 | */ | 60 | */ |
61 | index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; | 61 | index = (u64)ni->mft_no << vol->mft_record_size_bits >> |
62 | PAGE_CACHE_SHIFT; | ||
62 | ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; | 63 | ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; |
63 | 64 | ||
64 | i_size = i_size_read(mft_vi); | 65 | i_size = i_size_read(mft_vi); |
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c index a389a5a16c84..0ea887fc859c 100644 --- a/fs/ntfs/unistr.c +++ b/fs/ntfs/unistr.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. | 2 | * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2004 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
diff --git a/include/asm-arm/arch-h720x/system.h b/include/asm-arm/arch-h720x/system.h index 0b025e227ec2..09eda84592ff 100644 --- a/include/asm-arm/arch-h720x/system.h +++ b/include/asm-arm/arch-h720x/system.h | |||
@@ -17,9 +17,11 @@ | |||
17 | static void arch_idle(void) | 17 | static void arch_idle(void) |
18 | { | 18 | { |
19 | CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_IDLE; | 19 | CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_IDLE; |
20 | __asm__ __volatile__( | 20 | nop(); |
21 | "mov r0, r0\n\t" | 21 | nop(); |
22 | "mov r0, r0"); | 22 | CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_RUN; |
23 | nop(); | ||
24 | nop(); | ||
23 | } | 25 | } |
24 | 26 | ||
25 | 27 | ||
diff --git a/include/asm-arm/arch-imx/imx-regs.h b/include/asm-arm/arch-imx/imx-regs.h index 93b840e8fa60..229f7008d74f 100644 --- a/include/asm-arm/arch-imx/imx-regs.h +++ b/include/asm-arm/arch-imx/imx-regs.h | |||
@@ -76,6 +76,7 @@ | |||
76 | #define GPIO_PIN_MASK 0x1f | 76 | #define GPIO_PIN_MASK 0x1f |
77 | #define GPIO_PORT_MASK (0x3 << 5) | 77 | #define GPIO_PORT_MASK (0x3 << 5) |
78 | 78 | ||
79 | #define GPIO_PORT_SHIFT 5 | ||
79 | #define GPIO_PORTA (0<<5) | 80 | #define GPIO_PORTA (0<<5) |
80 | #define GPIO_PORTB (1<<5) | 81 | #define GPIO_PORTB (1<<5) |
81 | #define GPIO_PORTC (2<<5) | 82 | #define GPIO_PORTC (2<<5) |
@@ -88,24 +89,37 @@ | |||
88 | #define GPIO_PF (0<<9) | 89 | #define GPIO_PF (0<<9) |
89 | #define GPIO_AF (1<<9) | 90 | #define GPIO_AF (1<<9) |
90 | 91 | ||
92 | #define GPIO_OCR_SHIFT 10 | ||
91 | #define GPIO_OCR_MASK (3<<10) | 93 | #define GPIO_OCR_MASK (3<<10) |
92 | #define GPIO_AIN (0<<10) | 94 | #define GPIO_AIN (0<<10) |
93 | #define GPIO_BIN (1<<10) | 95 | #define GPIO_BIN (1<<10) |
94 | #define GPIO_CIN (2<<10) | 96 | #define GPIO_CIN (2<<10) |
95 | #define GPIO_GPIO (3<<10) | 97 | #define GPIO_DR (3<<10) |
96 | 98 | ||
97 | #define GPIO_AOUT (1<<12) | 99 | #define GPIO_AOUT_SHIFT 12 |
98 | #define GPIO_BOUT (1<<13) | 100 | #define GPIO_AOUT_MASK (3<<12) |
101 | #define GPIO_AOUT (0<<12) | ||
102 | #define GPIO_AOUT_ISR (1<<12) | ||
103 | #define GPIO_AOUT_0 (2<<12) | ||
104 | #define GPIO_AOUT_1 (3<<12) | ||
105 | |||
106 | #define GPIO_BOUT_SHIFT 14 | ||
107 | #define GPIO_BOUT_MASK (3<<14) | ||
108 | #define GPIO_BOUT (0<<14) | ||
109 | #define GPIO_BOUT_ISR (1<<14) | ||
110 | #define GPIO_BOUT_0 (2<<14) | ||
111 | #define GPIO_BOUT_1 (3<<14) | ||
112 | |||
113 | #define GPIO_GIUS (1<<16) | ||
99 | 114 | ||
100 | /* assignements for GPIO alternate/primary functions */ | 115 | /* assignements for GPIO alternate/primary functions */ |
101 | 116 | ||
102 | /* FIXME: This list is not completed. The correct directions are | 117 | /* FIXME: This list is not completed. The correct directions are |
103 | * missing on some (many) pins | 118 | * missing on some (many) pins |
104 | */ | 119 | */ |
105 | #define PA0_PF_A24 ( GPIO_PORTA | GPIO_PF | 0 ) | 120 | #define PA0_AIN_SPI2_CLK ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 0 ) |
106 | #define PA0_AIN_SPI2_CLK ( GPIO_PORTA | GPIO_OUT | GPIO_AIN | 0 ) | ||
107 | #define PA0_AF_ETMTRACESYNC ( GPIO_PORTA | GPIO_AF | 0 ) | 121 | #define PA0_AF_ETMTRACESYNC ( GPIO_PORTA | GPIO_AF | 0 ) |
108 | #define PA1_AOUT_SPI2_RXD ( GPIO_PORTA | GPIO_IN | GPIO_AOUT | 1 ) | 122 | #define PA1_AOUT_SPI2_RXD ( GPIO_GIUS | GPIO_PORTA | GPIO_IN | 1 ) |
109 | #define PA1_PF_TIN ( GPIO_PORTA | GPIO_PF | 1 ) | 123 | #define PA1_PF_TIN ( GPIO_PORTA | GPIO_PF | 1 ) |
110 | #define PA2_PF_PWM0 ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 2 ) | 124 | #define PA2_PF_PWM0 ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 2 ) |
111 | #define PA3_PF_CSI_MCLK ( GPIO_PORTA | GPIO_PF | 3 ) | 125 | #define PA3_PF_CSI_MCLK ( GPIO_PORTA | GPIO_PF | 3 ) |
@@ -123,7 +137,7 @@ | |||
123 | #define PA15_PF_I2C_SDA ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 15 ) | 137 | #define PA15_PF_I2C_SDA ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 15 ) |
124 | #define PA16_PF_I2C_SCL ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 16 ) | 138 | #define PA16_PF_I2C_SCL ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 16 ) |
125 | #define PA17_AF_ETMTRACEPKT4 ( GPIO_PORTA | GPIO_AF | 17 ) | 139 | #define PA17_AF_ETMTRACEPKT4 ( GPIO_PORTA | GPIO_AF | 17 ) |
126 | #define PA17_AIN_SPI2_SS ( GPIO_PORTA | GPIO_AIN | 17 ) | 140 | #define PA17_AIN_SPI2_SS ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 17 ) |
127 | #define PA18_AF_ETMTRACEPKT5 ( GPIO_PORTA | GPIO_AF | 18 ) | 141 | #define PA18_AF_ETMTRACEPKT5 ( GPIO_PORTA | GPIO_AF | 18 ) |
128 | #define PA19_AF_ETMTRACEPKT6 ( GPIO_PORTA | GPIO_AF | 19 ) | 142 | #define PA19_AF_ETMTRACEPKT6 ( GPIO_PORTA | GPIO_AF | 19 ) |
129 | #define PA20_AF_ETMTRACEPKT7 ( GPIO_PORTA | GPIO_AF | 20 ) | 143 | #define PA20_AF_ETMTRACEPKT7 ( GPIO_PORTA | GPIO_AF | 20 ) |
@@ -191,19 +205,27 @@ | |||
191 | #define PC15_PF_SPI1_SS ( GPIO_PORTC | GPIO_PF | 15 ) | 205 | #define PC15_PF_SPI1_SS ( GPIO_PORTC | GPIO_PF | 15 ) |
192 | #define PC16_PF_SPI1_MISO ( GPIO_PORTC | GPIO_PF | 16 ) | 206 | #define PC16_PF_SPI1_MISO ( GPIO_PORTC | GPIO_PF | 16 ) |
193 | #define PC17_PF_SPI1_MOSI ( GPIO_PORTC | GPIO_PF | 17 ) | 207 | #define PC17_PF_SPI1_MOSI ( GPIO_PORTC | GPIO_PF | 17 ) |
208 | #define PC24_BIN_UART3_RI ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 24 ) | ||
209 | #define PC25_BIN_UART3_DSR ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 25 ) | ||
210 | #define PC26_AOUT_UART3_DTR ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 26 ) | ||
211 | #define PC27_BIN_UART3_DCD ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 27 ) | ||
212 | #define PC28_BIN_UART3_CTS ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 28 ) | ||
213 | #define PC29_AOUT_UART3_RTS ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 29 ) | ||
214 | #define PC30_BIN_UART3_TX ( GPIO_GIUS | GPIO_PORTC | GPIO_BIN | 30 ) | ||
215 | #define PC31_AOUT_UART3_RX ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 31) | ||
194 | #define PD6_PF_LSCLK ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 6 ) | 216 | #define PD6_PF_LSCLK ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 6 ) |
195 | #define PD7_PF_REV ( GPIO_PORTD | GPIO_PF | 7 ) | 217 | #define PD7_PF_REV ( GPIO_PORTD | GPIO_PF | 7 ) |
196 | #define PD7_AF_UART2_DTR ( GPIO_PORTD | GPIO_IN | GPIO_AF | 7 ) | 218 | #define PD7_AF_UART2_DTR ( GPIO_PORTD | GPIO_IN | GPIO_AF | 7 ) |
197 | #define PD7_AIN_SPI2_SCLK ( GPIO_PORTD | GPIO_AIN | 7 ) | 219 | #define PD7_AIN_SPI2_SCLK ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 7 ) |
198 | #define PD8_PF_CLS ( GPIO_PORTD | GPIO_PF | 8 ) | 220 | #define PD8_PF_CLS ( GPIO_PORTD | GPIO_PF | 8 ) |
199 | #define PD8_AF_UART2_DCD ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 8 ) | 221 | #define PD8_AF_UART2_DCD ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 8 ) |
200 | #define PD8_AIN_SPI2_SS ( GPIO_PORTD | GPIO_AIN | 8 ) | 222 | #define PD8_AIN_SPI2_SS ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 8 ) |
201 | #define PD9_PF_PS ( GPIO_PORTD | GPIO_PF | 9 ) | 223 | #define PD9_PF_PS ( GPIO_PORTD | GPIO_PF | 9 ) |
202 | #define PD9_AF_UART2_RI ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 9 ) | 224 | #define PD9_AF_UART2_RI ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 9 ) |
203 | #define PD9_AOUT_SPI2_RXD ( GPIO_PORTD | GPIO_IN | GPIO_AOUT | 9 ) | 225 | #define PD9_AOUT_SPI2_RXD ( GPIO_GIUS | GPIO_PORTD | GPIO_IN | 9 ) |
204 | #define PD10_PF_SPL_SPR ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 10 ) | 226 | #define PD10_PF_SPL_SPR ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 10 ) |
205 | #define PD10_AF_UART2_DSR ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 10 ) | 227 | #define PD10_AF_UART2_DSR ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 10 ) |
206 | #define PD10_AIN_SPI2_TXD ( GPIO_PORTD | GPIO_OUT | GPIO_AIN | 10 ) | 228 | #define PD10_AIN_SPI2_TXD ( GPIO_GIUS | GPIO_PORTD | GPIO_OUT | 10 ) |
207 | #define PD11_PF_CONTRAST ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 11 ) | 229 | #define PD11_PF_CONTRAST ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 11 ) |
208 | #define PD12_PF_ACD_OE ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 12 ) | 230 | #define PD12_PF_ACD_OE ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 12 ) |
209 | #define PD13_PF_LP_HSYNC ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 13 ) | 231 | #define PD13_PF_LP_HSYNC ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 13 ) |
@@ -225,7 +247,7 @@ | |||
225 | #define PD29_PF_LD14 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 29 ) | 247 | #define PD29_PF_LD14 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 29 ) |
226 | #define PD30_PF_LD15 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 30 ) | 248 | #define PD30_PF_LD15 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 30 ) |
227 | #define PD31_PF_TMR2OUT ( GPIO_PORTD | GPIO_PF | 31 ) | 249 | #define PD31_PF_TMR2OUT ( GPIO_PORTD | GPIO_PF | 31 ) |
228 | #define PD31_BIN_SPI2_TXD ( GPIO_PORTD | GPIO_BIN | 31 ) | 250 | #define PD31_BIN_SPI2_TXD ( GPIO_GIUS | GPIO_PORTD | GPIO_BIN | 31 ) |
229 | 251 | ||
230 | /* | 252 | /* |
231 | * PWM controller | 253 | * PWM controller |
diff --git a/include/asm-arm/arch-ixp4xx/platform.h b/include/asm-arm/arch-ixp4xx/platform.h index d13ee7f78c70..f14ed63590c3 100644 --- a/include/asm-arm/arch-ixp4xx/platform.h +++ b/include/asm-arm/arch-ixp4xx/platform.h | |||
@@ -93,7 +93,7 @@ extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys); | |||
93 | 93 | ||
94 | static inline void gpio_line_config(u8 line, u32 direction) | 94 | static inline void gpio_line_config(u8 line, u32 direction) |
95 | { | 95 | { |
96 | if (direction == IXP4XX_GPIO_OUT) | 96 | if (direction == IXP4XX_GPIO_IN) |
97 | *IXP4XX_GPIO_GPOER |= (1 << line); | 97 | *IXP4XX_GPIO_GPOER |= (1 << line); |
98 | else | 98 | else |
99 | *IXP4XX_GPIO_GPOER &= ~(1 << line); | 99 | *IXP4XX_GPIO_GPOER &= ~(1 << line); |
diff --git a/include/asm-sparc/btfixup.h b/include/asm-sparc/btfixup.h index b84c96c89581..c2868d0f60b6 100644 --- a/include/asm-sparc/btfixup.h +++ b/include/asm-sparc/btfixup.h | |||
@@ -49,17 +49,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); | |||
49 | /* Put bottom 13bits into some register variable */ | 49 | /* Put bottom 13bits into some register variable */ |
50 | 50 | ||
51 | #define BTFIXUPDEF_SIMM13(__name) \ | 51 | #define BTFIXUPDEF_SIMM13(__name) \ |
52 | extern unsigned int ___sf_##__name(void) __attribute_const__; \ | 52 | static inline unsigned int ___sf_##__name(void) __attribute_const__; \ |
53 | extern unsigned ___ss_##__name[2]; \ | 53 | extern unsigned ___ss_##__name[2]; \ |
54 | extern __inline__ unsigned int ___sf_##__name(void) { \ | 54 | static inline unsigned int ___sf_##__name(void) { \ |
55 | unsigned int ret; \ | 55 | unsigned int ret; \ |
56 | __asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \ | 56 | __asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \ |
57 | return ret; \ | 57 | return ret; \ |
58 | } | 58 | } |
59 | #define BTFIXUPDEF_SIMM13_INIT(__name,__val) \ | 59 | #define BTFIXUPDEF_SIMM13_INIT(__name,__val) \ |
60 | extern unsigned int ___sf_##__name(void) __attribute_const__; \ | 60 | static inline unsigned int ___sf_##__name(void) __attribute_const__; \ |
61 | extern unsigned ___ss_##__name[2]; \ | 61 | extern unsigned ___ss_##__name[2]; \ |
62 | extern __inline__ unsigned int ___sf_##__name(void) { \ | 62 | static inline unsigned int ___sf_##__name(void) { \ |
63 | unsigned int ret; \ | 63 | unsigned int ret; \ |
64 | __asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ | 64 | __asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ |
65 | return ret; \ | 65 | return ret; \ |
@@ -71,17 +71,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); | |||
71 | */ | 71 | */ |
72 | 72 | ||
73 | #define BTFIXUPDEF_HALF(__name) \ | 73 | #define BTFIXUPDEF_HALF(__name) \ |
74 | extern unsigned int ___af_##__name(void) __attribute_const__; \ | 74 | static inline unsigned int ___af_##__name(void) __attribute_const__; \ |
75 | extern unsigned ___as_##__name[2]; \ | 75 | extern unsigned ___as_##__name[2]; \ |
76 | extern __inline__ unsigned int ___af_##__name(void) { \ | 76 | static inline unsigned int ___af_##__name(void) { \ |
77 | unsigned int ret; \ | 77 | unsigned int ret; \ |
78 | __asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \ | 78 | __asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \ |
79 | return ret; \ | 79 | return ret; \ |
80 | } | 80 | } |
81 | #define BTFIXUPDEF_HALF_INIT(__name,__val) \ | 81 | #define BTFIXUPDEF_HALF_INIT(__name,__val) \ |
82 | extern unsigned int ___af_##__name(void) __attribute_const__; \ | 82 | static inline unsigned int ___af_##__name(void) __attribute_const__; \ |
83 | extern unsigned ___as_##__name[2]; \ | 83 | extern unsigned ___as_##__name[2]; \ |
84 | extern __inline__ unsigned int ___af_##__name(void) { \ | 84 | static inline unsigned int ___af_##__name(void) { \ |
85 | unsigned int ret; \ | 85 | unsigned int ret; \ |
86 | __asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ | 86 | __asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ |
87 | return ret; \ | 87 | return ret; \ |
@@ -90,17 +90,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); | |||
90 | /* Put upper 22 bits into some register variable */ | 90 | /* Put upper 22 bits into some register variable */ |
91 | 91 | ||
92 | #define BTFIXUPDEF_SETHI(__name) \ | 92 | #define BTFIXUPDEF_SETHI(__name) \ |
93 | extern unsigned int ___hf_##__name(void) __attribute_const__; \ | 93 | static inline unsigned int ___hf_##__name(void) __attribute_const__; \ |
94 | extern unsigned ___hs_##__name[2]; \ | 94 | extern unsigned ___hs_##__name[2]; \ |
95 | extern __inline__ unsigned int ___hf_##__name(void) { \ | 95 | static inline unsigned int ___hf_##__name(void) { \ |
96 | unsigned int ret; \ | 96 | unsigned int ret; \ |
97 | __asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \ | 97 | __asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \ |
98 | return ret; \ | 98 | return ret; \ |
99 | } | 99 | } |
100 | #define BTFIXUPDEF_SETHI_INIT(__name,__val) \ | 100 | #define BTFIXUPDEF_SETHI_INIT(__name,__val) \ |
101 | extern unsigned int ___hf_##__name(void) __attribute_const__; \ | 101 | static inline unsigned int ___hf_##__name(void) __attribute_const__; \ |
102 | extern unsigned ___hs_##__name[2]; \ | 102 | extern unsigned ___hs_##__name[2]; \ |
103 | extern __inline__ unsigned int ___hf_##__name(void) { \ | 103 | static inline unsigned int ___hf_##__name(void) { \ |
104 | unsigned int ret; \ | 104 | unsigned int ret; \ |
105 | __asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \ | 105 | __asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \ |
106 | "=r"(ret)); \ | 106 | "=r"(ret)); \ |
diff --git a/include/asm-sparc/cache.h b/include/asm-sparc/cache.h index e6316fd7e1a4..a10522cb21b7 100644 --- a/include/asm-sparc/cache.h +++ b/include/asm-sparc/cache.h | |||
@@ -27,7 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | /* First, cache-tag access. */ | 29 | /* First, cache-tag access. */ |
30 | extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) | 30 | static inline unsigned int get_icache_tag(int setnum, int tagnum) |
31 | { | 31 | { |
32 | unsigned int vaddr, retval; | 32 | unsigned int vaddr, retval; |
33 | 33 | ||
@@ -38,7 +38,7 @@ extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) | |||
38 | return retval; | 38 | return retval; |
39 | } | 39 | } |
40 | 40 | ||
41 | extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry) | 41 | static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry) |
42 | { | 42 | { |
43 | unsigned int vaddr; | 43 | unsigned int vaddr; |
44 | 44 | ||
@@ -51,7 +51,7 @@ extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry | |||
51 | /* Second cache-data access. The data is returned two-32bit quantities | 51 | /* Second cache-data access. The data is returned two-32bit quantities |
52 | * at a time. | 52 | * at a time. |
53 | */ | 53 | */ |
54 | extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, | 54 | static inline void get_icache_data(int setnum, int tagnum, int subblock, |
55 | unsigned int *data) | 55 | unsigned int *data) |
56 | { | 56 | { |
57 | unsigned int value1, value2, vaddr; | 57 | unsigned int value1, value2, vaddr; |
@@ -67,7 +67,7 @@ extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, | |||
67 | data[0] = value1; data[1] = value2; | 67 | data[0] = value1; data[1] = value2; |
68 | } | 68 | } |
69 | 69 | ||
70 | extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, | 70 | static inline void put_icache_data(int setnum, int tagnum, int subblock, |
71 | unsigned int *data) | 71 | unsigned int *data) |
72 | { | 72 | { |
73 | unsigned int value1, value2, vaddr; | 73 | unsigned int value1, value2, vaddr; |
@@ -92,35 +92,35 @@ extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, | |||
92 | */ | 92 | */ |
93 | 93 | ||
94 | /* Flushes which clear out both the on-chip and external caches */ | 94 | /* Flushes which clear out both the on-chip and external caches */ |
95 | extern __inline__ void flush_ei_page(unsigned int addr) | 95 | static inline void flush_ei_page(unsigned int addr) |
96 | { | 96 | { |
97 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 97 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
98 | "r" (addr), "i" (ASI_M_FLUSH_PAGE) : | 98 | "r" (addr), "i" (ASI_M_FLUSH_PAGE) : |
99 | "memory"); | 99 | "memory"); |
100 | } | 100 | } |
101 | 101 | ||
102 | extern __inline__ void flush_ei_seg(unsigned int addr) | 102 | static inline void flush_ei_seg(unsigned int addr) |
103 | { | 103 | { |
104 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 104 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
105 | "r" (addr), "i" (ASI_M_FLUSH_SEG) : | 105 | "r" (addr), "i" (ASI_M_FLUSH_SEG) : |
106 | "memory"); | 106 | "memory"); |
107 | } | 107 | } |
108 | 108 | ||
109 | extern __inline__ void flush_ei_region(unsigned int addr) | 109 | static inline void flush_ei_region(unsigned int addr) |
110 | { | 110 | { |
111 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 111 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
112 | "r" (addr), "i" (ASI_M_FLUSH_REGION) : | 112 | "r" (addr), "i" (ASI_M_FLUSH_REGION) : |
113 | "memory"); | 113 | "memory"); |
114 | } | 114 | } |
115 | 115 | ||
116 | extern __inline__ void flush_ei_ctx(unsigned int addr) | 116 | static inline void flush_ei_ctx(unsigned int addr) |
117 | { | 117 | { |
118 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 118 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
119 | "r" (addr), "i" (ASI_M_FLUSH_CTX) : | 119 | "r" (addr), "i" (ASI_M_FLUSH_CTX) : |
120 | "memory"); | 120 | "memory"); |
121 | } | 121 | } |
122 | 122 | ||
123 | extern __inline__ void flush_ei_user(unsigned int addr) | 123 | static inline void flush_ei_user(unsigned int addr) |
124 | { | 124 | { |
125 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 125 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
126 | "r" (addr), "i" (ASI_M_FLUSH_USER) : | 126 | "r" (addr), "i" (ASI_M_FLUSH_USER) : |
diff --git a/include/asm-sparc/cypress.h b/include/asm-sparc/cypress.h index fc92fc839c3f..99599533efbc 100644 --- a/include/asm-sparc/cypress.h +++ b/include/asm-sparc/cypress.h | |||
@@ -48,25 +48,25 @@ | |||
48 | #define CYPRESS_NFAULT 0x00000002 | 48 | #define CYPRESS_NFAULT 0x00000002 |
49 | #define CYPRESS_MENABLE 0x00000001 | 49 | #define CYPRESS_MENABLE 0x00000001 |
50 | 50 | ||
51 | extern __inline__ void cypress_flush_page(unsigned long page) | 51 | static inline void cypress_flush_page(unsigned long page) |
52 | { | 52 | { |
53 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 53 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
54 | "r" (page), "i" (ASI_M_FLUSH_PAGE)); | 54 | "r" (page), "i" (ASI_M_FLUSH_PAGE)); |
55 | } | 55 | } |
56 | 56 | ||
57 | extern __inline__ void cypress_flush_segment(unsigned long addr) | 57 | static inline void cypress_flush_segment(unsigned long addr) |
58 | { | 58 | { |
59 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 59 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
60 | "r" (addr), "i" (ASI_M_FLUSH_SEG)); | 60 | "r" (addr), "i" (ASI_M_FLUSH_SEG)); |
61 | } | 61 | } |
62 | 62 | ||
63 | extern __inline__ void cypress_flush_region(unsigned long addr) | 63 | static inline void cypress_flush_region(unsigned long addr) |
64 | { | 64 | { |
65 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 65 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
66 | "r" (addr), "i" (ASI_M_FLUSH_REGION)); | 66 | "r" (addr), "i" (ASI_M_FLUSH_REGION)); |
67 | } | 67 | } |
68 | 68 | ||
69 | extern __inline__ void cypress_flush_context(void) | 69 | static inline void cypress_flush_context(void) |
70 | { | 70 | { |
71 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : | 71 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : |
72 | "i" (ASI_M_FLUSH_CTX)); | 72 | "i" (ASI_M_FLUSH_CTX)); |
diff --git a/include/asm-sparc/delay.h b/include/asm-sparc/delay.h index 6edf2cbb246b..7ec8e9f7ad4f 100644 --- a/include/asm-sparc/delay.h +++ b/include/asm-sparc/delay.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/config.h> | 10 | #include <linux/config.h> |
11 | #include <asm/cpudata.h> | 11 | #include <asm/cpudata.h> |
12 | 12 | ||
13 | extern __inline__ void __delay(unsigned long loops) | 13 | static inline void __delay(unsigned long loops) |
14 | { | 14 | { |
15 | __asm__ __volatile__("cmp %0, 0\n\t" | 15 | __asm__ __volatile__("cmp %0, 0\n\t" |
16 | "1: bne 1b\n\t" | 16 | "1: bne 1b\n\t" |
diff --git a/include/asm-sparc/dma.h b/include/asm-sparc/dma.h index 07e6368a2521..8ec206aa5f2e 100644 --- a/include/asm-sparc/dma.h +++ b/include/asm-sparc/dma.h | |||
@@ -198,7 +198,7 @@ extern void dvma_init(struct sbus_bus *); | |||
198 | /* Pause until counter runs out or BIT isn't set in the DMA condition | 198 | /* Pause until counter runs out or BIT isn't set in the DMA condition |
199 | * register. | 199 | * register. |
200 | */ | 200 | */ |
201 | extern __inline__ void sparc_dma_pause(struct sparc_dma_registers *regs, | 201 | static inline void sparc_dma_pause(struct sparc_dma_registers *regs, |
202 | unsigned long bit) | 202 | unsigned long bit) |
203 | { | 203 | { |
204 | int ctr = 50000; /* Let's find some bugs ;) */ | 204 | int ctr = 50000; /* Let's find some bugs ;) */ |
diff --git a/include/asm-sparc/iommu.h b/include/asm-sparc/iommu.h index 8171362d56b9..70c589c05a10 100644 --- a/include/asm-sparc/iommu.h +++ b/include/asm-sparc/iommu.h | |||
@@ -108,12 +108,12 @@ struct iommu_struct { | |||
108 | struct bit_map usemap; | 108 | struct bit_map usemap; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | extern __inline__ void iommu_invalidate(struct iommu_regs *regs) | 111 | static inline void iommu_invalidate(struct iommu_regs *regs) |
112 | { | 112 | { |
113 | regs->tlbflush = 0; | 113 | regs->tlbflush = 0; |
114 | } | 114 | } |
115 | 115 | ||
116 | extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) | 116 | static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) |
117 | { | 117 | { |
118 | regs->pageflush = (ba & PAGE_MASK); | 118 | regs->pageflush = (ba & PAGE_MASK); |
119 | } | 119 | } |
diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h index 3ea4916635ee..fba92485fdba 100644 --- a/include/asm-sparc/kdebug.h +++ b/include/asm-sparc/kdebug.h | |||
@@ -46,7 +46,7 @@ struct kernel_debug { | |||
46 | extern struct kernel_debug *linux_dbvec; | 46 | extern struct kernel_debug *linux_dbvec; |
47 | 47 | ||
48 | /* Use this macro in C-code to enter the debugger. */ | 48 | /* Use this macro in C-code to enter the debugger. */ |
49 | extern __inline__ void sp_enter_debugger(void) | 49 | static inline void sp_enter_debugger(void) |
50 | { | 50 | { |
51 | __asm__ __volatile__("jmpl %0, %%o7\n\t" | 51 | __asm__ __volatile__("jmpl %0, %%o7\n\t" |
52 | "nop\n\t" : : | 52 | "nop\n\t" : : |
diff --git a/include/asm-sparc/mbus.h b/include/asm-sparc/mbus.h index 5f2749015342..ecacdf4075d7 100644 --- a/include/asm-sparc/mbus.h +++ b/include/asm-sparc/mbus.h | |||
@@ -83,7 +83,7 @@ extern unsigned int hwbug_bitmask; | |||
83 | */ | 83 | */ |
84 | #define TBR_ID_SHIFT 20 | 84 | #define TBR_ID_SHIFT 20 |
85 | 85 | ||
86 | extern __inline__ int get_cpuid(void) | 86 | static inline int get_cpuid(void) |
87 | { | 87 | { |
88 | register int retval; | 88 | register int retval; |
89 | __asm__ __volatile__("rd %%tbr, %0\n\t" | 89 | __asm__ __volatile__("rd %%tbr, %0\n\t" |
@@ -93,7 +93,7 @@ extern __inline__ int get_cpuid(void) | |||
93 | return (retval & 3); | 93 | return (retval & 3); |
94 | } | 94 | } |
95 | 95 | ||
96 | extern __inline__ int get_modid(void) | 96 | static inline int get_modid(void) |
97 | { | 97 | { |
98 | return (get_cpuid() | 0x8); | 98 | return (get_cpuid() | 0x8); |
99 | } | 99 | } |
diff --git a/include/asm-sparc/msi.h b/include/asm-sparc/msi.h index b69543dd3b46..ff72cbd946a4 100644 --- a/include/asm-sparc/msi.h +++ b/include/asm-sparc/msi.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ | 19 | #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ |
20 | 20 | ||
21 | 21 | ||
22 | extern __inline__ void msi_set_sync(void) | 22 | static inline void msi_set_sync(void) |
23 | { | 23 | { |
24 | __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" | 24 | __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" |
25 | "andn %%g3, %2, %%g3\n\t" | 25 | "andn %%g3, %2, %%g3\n\t" |
diff --git a/include/asm-sparc/mxcc.h b/include/asm-sparc/mxcc.h index 60ef9d6fe7bc..128fe9708135 100644 --- a/include/asm-sparc/mxcc.h +++ b/include/asm-sparc/mxcc.h | |||
@@ -85,7 +85,7 @@ | |||
85 | 85 | ||
86 | #ifndef __ASSEMBLY__ | 86 | #ifndef __ASSEMBLY__ |
87 | 87 | ||
88 | extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) | 88 | static inline void mxcc_set_stream_src(unsigned long *paddr) |
89 | { | 89 | { |
90 | unsigned long data0 = paddr[0]; | 90 | unsigned long data0 = paddr[0]; |
91 | unsigned long data1 = paddr[1]; | 91 | unsigned long data1 = paddr[1]; |
@@ -98,7 +98,7 @@ extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) | |||
98 | "i" (ASI_M_MXCC) : "g2", "g3"); | 98 | "i" (ASI_M_MXCC) : "g2", "g3"); |
99 | } | 99 | } |
100 | 100 | ||
101 | extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) | 101 | static inline void mxcc_set_stream_dst(unsigned long *paddr) |
102 | { | 102 | { |
103 | unsigned long data0 = paddr[0]; | 103 | unsigned long data0 = paddr[0]; |
104 | unsigned long data1 = paddr[1]; | 104 | unsigned long data1 = paddr[1]; |
@@ -111,7 +111,7 @@ extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) | |||
111 | "i" (ASI_M_MXCC) : "g2", "g3"); | 111 | "i" (ASI_M_MXCC) : "g2", "g3"); |
112 | } | 112 | } |
113 | 113 | ||
114 | extern __inline__ unsigned long mxcc_get_creg(void) | 114 | static inline unsigned long mxcc_get_creg(void) |
115 | { | 115 | { |
116 | unsigned long mxcc_control; | 116 | unsigned long mxcc_control; |
117 | 117 | ||
@@ -125,7 +125,7 @@ extern __inline__ unsigned long mxcc_get_creg(void) | |||
125 | return mxcc_control; | 125 | return mxcc_control; |
126 | } | 126 | } |
127 | 127 | ||
128 | extern __inline__ void mxcc_set_creg(unsigned long mxcc_control) | 128 | static inline void mxcc_set_creg(unsigned long mxcc_control) |
129 | { | 129 | { |
130 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : | 130 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : |
131 | "r" (mxcc_control), "r" (MXCC_CREG), | 131 | "r" (mxcc_control), "r" (MXCC_CREG), |
diff --git a/include/asm-sparc/obio.h b/include/asm-sparc/obio.h index 62e1d77965f3..47854a2a12cf 100644 --- a/include/asm-sparc/obio.h +++ b/include/asm-sparc/obio.h | |||
@@ -98,7 +98,7 @@ | |||
98 | 98 | ||
99 | #ifndef __ASSEMBLY__ | 99 | #ifndef __ASSEMBLY__ |
100 | 100 | ||
101 | extern __inline__ int bw_get_intr_mask(int sbus_level) | 101 | static inline int bw_get_intr_mask(int sbus_level) |
102 | { | 102 | { |
103 | int mask; | 103 | int mask; |
104 | 104 | ||
@@ -109,7 +109,7 @@ extern __inline__ int bw_get_intr_mask(int sbus_level) | |||
109 | return mask; | 109 | return mask; |
110 | } | 110 | } |
111 | 111 | ||
112 | extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) | 112 | static inline void bw_clear_intr_mask(int sbus_level, int mask) |
113 | { | 113 | { |
114 | __asm__ __volatile__ ("stha %0, [%1] %2" : : | 114 | __asm__ __volatile__ ("stha %0, [%1] %2" : : |
115 | "r" (mask), | 115 | "r" (mask), |
@@ -117,7 +117,7 @@ extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) | |||
117 | "i" (ASI_M_CTL)); | 117 | "i" (ASI_M_CTL)); |
118 | } | 118 | } |
119 | 119 | ||
120 | extern __inline__ unsigned bw_get_prof_limit(int cpu) | 120 | static inline unsigned bw_get_prof_limit(int cpu) |
121 | { | 121 | { |
122 | unsigned limit; | 122 | unsigned limit; |
123 | 123 | ||
@@ -128,7 +128,7 @@ extern __inline__ unsigned bw_get_prof_limit(int cpu) | |||
128 | return limit; | 128 | return limit; |
129 | } | 129 | } |
130 | 130 | ||
131 | extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) | 131 | static inline void bw_set_prof_limit(int cpu, unsigned limit) |
132 | { | 132 | { |
133 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 133 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
134 | "r" (limit), | 134 | "r" (limit), |
@@ -136,7 +136,7 @@ extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) | |||
136 | "i" (ASI_M_CTL)); | 136 | "i" (ASI_M_CTL)); |
137 | } | 137 | } |
138 | 138 | ||
139 | extern __inline__ unsigned bw_get_ctrl(int cpu) | 139 | static inline unsigned bw_get_ctrl(int cpu) |
140 | { | 140 | { |
141 | unsigned ctrl; | 141 | unsigned ctrl; |
142 | 142 | ||
@@ -147,7 +147,7 @@ extern __inline__ unsigned bw_get_ctrl(int cpu) | |||
147 | return ctrl; | 147 | return ctrl; |
148 | } | 148 | } |
149 | 149 | ||
150 | extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) | 150 | static inline void bw_set_ctrl(int cpu, unsigned ctrl) |
151 | { | 151 | { |
152 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 152 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
153 | "r" (ctrl), | 153 | "r" (ctrl), |
@@ -157,7 +157,7 @@ extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) | |||
157 | 157 | ||
158 | extern unsigned char cpu_leds[32]; | 158 | extern unsigned char cpu_leds[32]; |
159 | 159 | ||
160 | extern __inline__ void show_leds(int cpuid) | 160 | static inline void show_leds(int cpuid) |
161 | { | 161 | { |
162 | cpuid &= 0x1e; | 162 | cpuid &= 0x1e; |
163 | __asm__ __volatile__ ("stba %0, [%1] %2" : : | 163 | __asm__ __volatile__ ("stba %0, [%1] %2" : : |
@@ -166,7 +166,7 @@ extern __inline__ void show_leds(int cpuid) | |||
166 | "i" (ASI_M_CTL)); | 166 | "i" (ASI_M_CTL)); |
167 | } | 167 | } |
168 | 168 | ||
169 | extern __inline__ unsigned cc_get_ipen(void) | 169 | static inline unsigned cc_get_ipen(void) |
170 | { | 170 | { |
171 | unsigned pending; | 171 | unsigned pending; |
172 | 172 | ||
@@ -177,7 +177,7 @@ extern __inline__ unsigned cc_get_ipen(void) | |||
177 | return pending; | 177 | return pending; |
178 | } | 178 | } |
179 | 179 | ||
180 | extern __inline__ void cc_set_iclr(unsigned clear) | 180 | static inline void cc_set_iclr(unsigned clear) |
181 | { | 181 | { |
182 | __asm__ __volatile__ ("stha %0, [%1] %2" : : | 182 | __asm__ __volatile__ ("stha %0, [%1] %2" : : |
183 | "r" (clear), | 183 | "r" (clear), |
@@ -185,7 +185,7 @@ extern __inline__ void cc_set_iclr(unsigned clear) | |||
185 | "i" (ASI_M_MXCC)); | 185 | "i" (ASI_M_MXCC)); |
186 | } | 186 | } |
187 | 187 | ||
188 | extern __inline__ unsigned cc_get_imsk(void) | 188 | static inline unsigned cc_get_imsk(void) |
189 | { | 189 | { |
190 | unsigned mask; | 190 | unsigned mask; |
191 | 191 | ||
@@ -196,7 +196,7 @@ extern __inline__ unsigned cc_get_imsk(void) | |||
196 | return mask; | 196 | return mask; |
197 | } | 197 | } |
198 | 198 | ||
199 | extern __inline__ void cc_set_imsk(unsigned mask) | 199 | static inline void cc_set_imsk(unsigned mask) |
200 | { | 200 | { |
201 | __asm__ __volatile__ ("stha %0, [%1] %2" : : | 201 | __asm__ __volatile__ ("stha %0, [%1] %2" : : |
202 | "r" (mask), | 202 | "r" (mask), |
@@ -204,7 +204,7 @@ extern __inline__ void cc_set_imsk(unsigned mask) | |||
204 | "i" (ASI_M_MXCC)); | 204 | "i" (ASI_M_MXCC)); |
205 | } | 205 | } |
206 | 206 | ||
207 | extern __inline__ unsigned cc_get_imsk_other(int cpuid) | 207 | static inline unsigned cc_get_imsk_other(int cpuid) |
208 | { | 208 | { |
209 | unsigned mask; | 209 | unsigned mask; |
210 | 210 | ||
@@ -215,7 +215,7 @@ extern __inline__ unsigned cc_get_imsk_other(int cpuid) | |||
215 | return mask; | 215 | return mask; |
216 | } | 216 | } |
217 | 217 | ||
218 | extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) | 218 | static inline void cc_set_imsk_other(int cpuid, unsigned mask) |
219 | { | 219 | { |
220 | __asm__ __volatile__ ("stha %0, [%1] %2" : : | 220 | __asm__ __volatile__ ("stha %0, [%1] %2" : : |
221 | "r" (mask), | 221 | "r" (mask), |
@@ -223,7 +223,7 @@ extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) | |||
223 | "i" (ASI_M_CTL)); | 223 | "i" (ASI_M_CTL)); |
224 | } | 224 | } |
225 | 225 | ||
226 | extern __inline__ void cc_set_igen(unsigned gen) | 226 | static inline void cc_set_igen(unsigned gen) |
227 | { | 227 | { |
228 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 228 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
229 | "r" (gen), | 229 | "r" (gen), |
@@ -239,7 +239,7 @@ extern __inline__ void cc_set_igen(unsigned gen) | |||
239 | #define IGEN_MESSAGE(bcast, devid, sid, levels) \ | 239 | #define IGEN_MESSAGE(bcast, devid, sid, levels) \ |
240 | (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) | 240 | (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) |
241 | 241 | ||
242 | extern __inline__ void sun4d_send_ipi(int cpu, int level) | 242 | static inline void sun4d_send_ipi(int cpu, int level) |
243 | { | 243 | { |
244 | cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); | 244 | cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); |
245 | } | 245 | } |
diff --git a/include/asm-sparc/pci.h b/include/asm-sparc/pci.h index 97052baf90c1..38644742f011 100644 --- a/include/asm-sparc/pci.h +++ b/include/asm-sparc/pci.h | |||
@@ -15,12 +15,12 @@ | |||
15 | 15 | ||
16 | #define PCI_IRQ_NONE 0xffffffff | 16 | #define PCI_IRQ_NONE 0xffffffff |
17 | 17 | ||
18 | extern inline void pcibios_set_master(struct pci_dev *dev) | 18 | static inline void pcibios_set_master(struct pci_dev *dev) |
19 | { | 19 | { |
20 | /* No special bus mastering setup handling */ | 20 | /* No special bus mastering setup handling */ |
21 | } | 21 | } |
22 | 22 | ||
23 | extern inline void pcibios_penalize_isa_irq(int irq, int active) | 23 | static inline void pcibios_penalize_isa_irq(int irq, int active) |
24 | { | 24 | { |
25 | /* We don't do dynamic PCI IRQ allocation */ | 25 | /* We don't do dynamic PCI IRQ allocation */ |
26 | } | 26 | } |
@@ -137,7 +137,7 @@ extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist | |||
137 | * only drive the low 24-bits during PCI bus mastering, then | 137 | * only drive the low 24-bits during PCI bus mastering, then |
138 | * you would pass 0x00ffffff as the mask to this function. | 138 | * you would pass 0x00ffffff as the mask to this function. |
139 | */ | 139 | */ |
140 | extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) | 140 | static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) |
141 | { | 141 | { |
142 | return 1; | 142 | return 1; |
143 | } | 143 | } |
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index 8395ad2f1c09..a14e98677500 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h | |||
@@ -154,7 +154,7 @@ BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t) | |||
154 | BTFIXUPDEF_CALL(void, pte_clear, pte_t *) | 154 | BTFIXUPDEF_CALL(void, pte_clear, pte_t *) |
155 | BTFIXUPDEF_CALL(int, pte_read, pte_t) | 155 | BTFIXUPDEF_CALL(int, pte_read, pte_t) |
156 | 156 | ||
157 | extern __inline__ int pte_none(pte_t pte) | 157 | static inline int pte_none(pte_t pte) |
158 | { | 158 | { |
159 | return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask)); | 159 | return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask)); |
160 | } | 160 | } |
@@ -167,7 +167,7 @@ BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t) | |||
167 | BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) | 167 | BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) |
168 | BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *) | 168 | BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *) |
169 | 169 | ||
170 | extern __inline__ int pmd_none(pmd_t pmd) | 170 | static inline int pmd_none(pmd_t pmd) |
171 | { | 171 | { |
172 | return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask)); | 172 | return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask)); |
173 | } | 173 | } |
@@ -194,20 +194,20 @@ BTFIXUPDEF_HALF(pte_writei) | |||
194 | BTFIXUPDEF_HALF(pte_dirtyi) | 194 | BTFIXUPDEF_HALF(pte_dirtyi) |
195 | BTFIXUPDEF_HALF(pte_youngi) | 195 | BTFIXUPDEF_HALF(pte_youngi) |
196 | 196 | ||
197 | extern int pte_write(pte_t pte) __attribute_const__; | 197 | static int pte_write(pte_t pte) __attribute_const__; |
198 | extern __inline__ int pte_write(pte_t pte) | 198 | static inline int pte_write(pte_t pte) |
199 | { | 199 | { |
200 | return pte_val(pte) & BTFIXUP_HALF(pte_writei); | 200 | return pte_val(pte) & BTFIXUP_HALF(pte_writei); |
201 | } | 201 | } |
202 | 202 | ||
203 | extern int pte_dirty(pte_t pte) __attribute_const__; | 203 | static int pte_dirty(pte_t pte) __attribute_const__; |
204 | extern __inline__ int pte_dirty(pte_t pte) | 204 | static inline int pte_dirty(pte_t pte) |
205 | { | 205 | { |
206 | return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi); | 206 | return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi); |
207 | } | 207 | } |
208 | 208 | ||
209 | extern int pte_young(pte_t pte) __attribute_const__; | 209 | static int pte_young(pte_t pte) __attribute_const__; |
210 | extern __inline__ int pte_young(pte_t pte) | 210 | static inline int pte_young(pte_t pte) |
211 | { | 211 | { |
212 | return pte_val(pte) & BTFIXUP_HALF(pte_youngi); | 212 | return pte_val(pte) & BTFIXUP_HALF(pte_youngi); |
213 | } | 213 | } |
@@ -217,8 +217,8 @@ extern __inline__ int pte_young(pte_t pte) | |||
217 | */ | 217 | */ |
218 | BTFIXUPDEF_HALF(pte_filei) | 218 | BTFIXUPDEF_HALF(pte_filei) |
219 | 219 | ||
220 | extern int pte_file(pte_t pte) __attribute_const__; | 220 | static int pte_file(pte_t pte) __attribute_const__; |
221 | extern __inline__ int pte_file(pte_t pte) | 221 | static inline int pte_file(pte_t pte) |
222 | { | 222 | { |
223 | return pte_val(pte) & BTFIXUP_HALF(pte_filei); | 223 | return pte_val(pte) & BTFIXUP_HALF(pte_filei); |
224 | } | 224 | } |
@@ -229,20 +229,20 @@ BTFIXUPDEF_HALF(pte_wrprotecti) | |||
229 | BTFIXUPDEF_HALF(pte_mkcleani) | 229 | BTFIXUPDEF_HALF(pte_mkcleani) |
230 | BTFIXUPDEF_HALF(pte_mkoldi) | 230 | BTFIXUPDEF_HALF(pte_mkoldi) |
231 | 231 | ||
232 | extern pte_t pte_wrprotect(pte_t pte) __attribute_const__; | 232 | static pte_t pte_wrprotect(pte_t pte) __attribute_const__; |
233 | extern __inline__ pte_t pte_wrprotect(pte_t pte) | 233 | static inline pte_t pte_wrprotect(pte_t pte) |
234 | { | 234 | { |
235 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti)); | 235 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti)); |
236 | } | 236 | } |
237 | 237 | ||
238 | extern pte_t pte_mkclean(pte_t pte) __attribute_const__; | 238 | static pte_t pte_mkclean(pte_t pte) __attribute_const__; |
239 | extern __inline__ pte_t pte_mkclean(pte_t pte) | 239 | static inline pte_t pte_mkclean(pte_t pte) |
240 | { | 240 | { |
241 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani)); | 241 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani)); |
242 | } | 242 | } |
243 | 243 | ||
244 | extern pte_t pte_mkold(pte_t pte) __attribute_const__; | 244 | static pte_t pte_mkold(pte_t pte) __attribute_const__; |
245 | extern __inline__ pte_t pte_mkold(pte_t pte) | 245 | static inline pte_t pte_mkold(pte_t pte) |
246 | { | 246 | { |
247 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi)); | 247 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi)); |
248 | } | 248 | } |
@@ -278,8 +278,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int) | |||
278 | 278 | ||
279 | BTFIXUPDEF_INT(pte_modify_mask) | 279 | BTFIXUPDEF_INT(pte_modify_mask) |
280 | 280 | ||
281 | extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; | 281 | static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; |
282 | extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot) | 282 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
283 | { | 283 | { |
284 | return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) | | 284 | return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) | |
285 | pgprot_val(newprot)); | 285 | pgprot_val(newprot)); |
@@ -386,13 +386,13 @@ extern struct ctx_list ctx_used; /* Head of used contexts list */ | |||
386 | 386 | ||
387 | #define NO_CONTEXT -1 | 387 | #define NO_CONTEXT -1 |
388 | 388 | ||
389 | extern __inline__ void remove_from_ctx_list(struct ctx_list *entry) | 389 | static inline void remove_from_ctx_list(struct ctx_list *entry) |
390 | { | 390 | { |
391 | entry->next->prev = entry->prev; | 391 | entry->next->prev = entry->prev; |
392 | entry->prev->next = entry->next; | 392 | entry->prev->next = entry->next; |
393 | } | 393 | } |
394 | 394 | ||
395 | extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) | 395 | static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) |
396 | { | 396 | { |
397 | entry->next = head; | 397 | entry->next = head; |
398 | (entry->prev = head->prev)->next = entry; | 398 | (entry->prev = head->prev)->next = entry; |
@@ -401,7 +401,7 @@ extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *e | |||
401 | #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) | 401 | #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) |
402 | #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) | 402 | #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) |
403 | 403 | ||
404 | extern __inline__ unsigned long | 404 | static inline unsigned long |
405 | __get_phys (unsigned long addr) | 405 | __get_phys (unsigned long addr) |
406 | { | 406 | { |
407 | switch (sparc_cpu_model){ | 407 | switch (sparc_cpu_model){ |
@@ -416,7 +416,7 @@ __get_phys (unsigned long addr) | |||
416 | } | 416 | } |
417 | } | 417 | } |
418 | 418 | ||
419 | extern __inline__ int | 419 | static inline int |
420 | __get_iospace (unsigned long addr) | 420 | __get_iospace (unsigned long addr) |
421 | { | 421 | { |
422 | switch (sparc_cpu_model){ | 422 | switch (sparc_cpu_model){ |
diff --git a/include/asm-sparc/pgtsrmmu.h b/include/asm-sparc/pgtsrmmu.h index ee3b9d93187c..edeb9811e728 100644 --- a/include/asm-sparc/pgtsrmmu.h +++ b/include/asm-sparc/pgtsrmmu.h | |||
@@ -148,7 +148,7 @@ extern void *srmmu_nocache_pool; | |||
148 | #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) | 148 | #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) |
149 | 149 | ||
150 | /* Accessing the MMU control register. */ | 150 | /* Accessing the MMU control register. */ |
151 | extern __inline__ unsigned int srmmu_get_mmureg(void) | 151 | static inline unsigned int srmmu_get_mmureg(void) |
152 | { | 152 | { |
153 | unsigned int retval; | 153 | unsigned int retval; |
154 | __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : | 154 | __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : |
@@ -157,14 +157,14 @@ extern __inline__ unsigned int srmmu_get_mmureg(void) | |||
157 | return retval; | 157 | return retval; |
158 | } | 158 | } |
159 | 159 | ||
160 | extern __inline__ void srmmu_set_mmureg(unsigned long regval) | 160 | static inline void srmmu_set_mmureg(unsigned long regval) |
161 | { | 161 | { |
162 | __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : | 162 | __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : |
163 | "r" (regval), "i" (ASI_M_MMUREGS) : "memory"); | 163 | "r" (regval), "i" (ASI_M_MMUREGS) : "memory"); |
164 | 164 | ||
165 | } | 165 | } |
166 | 166 | ||
167 | extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) | 167 | static inline void srmmu_set_ctable_ptr(unsigned long paddr) |
168 | { | 168 | { |
169 | paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); | 169 | paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); |
170 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : | 170 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : |
@@ -173,7 +173,7 @@ extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) | |||
173 | "memory"); | 173 | "memory"); |
174 | } | 174 | } |
175 | 175 | ||
176 | extern __inline__ unsigned long srmmu_get_ctable_ptr(void) | 176 | static inline unsigned long srmmu_get_ctable_ptr(void) |
177 | { | 177 | { |
178 | unsigned int retval; | 178 | unsigned int retval; |
179 | 179 | ||
@@ -184,14 +184,14 @@ extern __inline__ unsigned long srmmu_get_ctable_ptr(void) | |||
184 | return (retval & SRMMU_CTX_PMASK) << 4; | 184 | return (retval & SRMMU_CTX_PMASK) << 4; |
185 | } | 185 | } |
186 | 186 | ||
187 | extern __inline__ void srmmu_set_context(int context) | 187 | static inline void srmmu_set_context(int context) |
188 | { | 188 | { |
189 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : | 189 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : |
190 | "r" (context), "r" (SRMMU_CTX_REG), | 190 | "r" (context), "r" (SRMMU_CTX_REG), |
191 | "i" (ASI_M_MMUREGS) : "memory"); | 191 | "i" (ASI_M_MMUREGS) : "memory"); |
192 | } | 192 | } |
193 | 193 | ||
194 | extern __inline__ int srmmu_get_context(void) | 194 | static inline int srmmu_get_context(void) |
195 | { | 195 | { |
196 | register int retval; | 196 | register int retval; |
197 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | 197 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : |
@@ -201,7 +201,7 @@ extern __inline__ int srmmu_get_context(void) | |||
201 | return retval; | 201 | return retval; |
202 | } | 202 | } |
203 | 203 | ||
204 | extern __inline__ unsigned int srmmu_get_fstatus(void) | 204 | static inline unsigned int srmmu_get_fstatus(void) |
205 | { | 205 | { |
206 | unsigned int retval; | 206 | unsigned int retval; |
207 | 207 | ||
@@ -211,7 +211,7 @@ extern __inline__ unsigned int srmmu_get_fstatus(void) | |||
211 | return retval; | 211 | return retval; |
212 | } | 212 | } |
213 | 213 | ||
214 | extern __inline__ unsigned int srmmu_get_faddr(void) | 214 | static inline unsigned int srmmu_get_faddr(void) |
215 | { | 215 | { |
216 | unsigned int retval; | 216 | unsigned int retval; |
217 | 217 | ||
@@ -222,7 +222,7 @@ extern __inline__ unsigned int srmmu_get_faddr(void) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | /* This is guaranteed on all SRMMU's. */ | 224 | /* This is guaranteed on all SRMMU's. */ |
225 | extern __inline__ void srmmu_flush_whole_tlb(void) | 225 | static inline void srmmu_flush_whole_tlb(void) |
226 | { | 226 | { |
227 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : | 227 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : |
228 | "r" (0x400), /* Flush entire TLB!! */ | 228 | "r" (0x400), /* Flush entire TLB!! */ |
@@ -231,7 +231,7 @@ extern __inline__ void srmmu_flush_whole_tlb(void) | |||
231 | } | 231 | } |
232 | 232 | ||
233 | /* These flush types are not available on all chips... */ | 233 | /* These flush types are not available on all chips... */ |
234 | extern __inline__ void srmmu_flush_tlb_ctx(void) | 234 | static inline void srmmu_flush_tlb_ctx(void) |
235 | { | 235 | { |
236 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : | 236 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : |
237 | "r" (0x300), /* Flush TLB ctx.. */ | 237 | "r" (0x300), /* Flush TLB ctx.. */ |
@@ -239,7 +239,7 @@ extern __inline__ void srmmu_flush_tlb_ctx(void) | |||
239 | 239 | ||
240 | } | 240 | } |
241 | 241 | ||
242 | extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) | 242 | static inline void srmmu_flush_tlb_region(unsigned long addr) |
243 | { | 243 | { |
244 | addr &= SRMMU_PGDIR_MASK; | 244 | addr &= SRMMU_PGDIR_MASK; |
245 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : | 245 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : |
@@ -249,7 +249,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) | |||
249 | } | 249 | } |
250 | 250 | ||
251 | 251 | ||
252 | extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) | 252 | static inline void srmmu_flush_tlb_segment(unsigned long addr) |
253 | { | 253 | { |
254 | addr &= SRMMU_REAL_PMD_MASK; | 254 | addr &= SRMMU_REAL_PMD_MASK; |
255 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : | 255 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : |
@@ -258,7 +258,7 @@ extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) | |||
258 | 258 | ||
259 | } | 259 | } |
260 | 260 | ||
261 | extern __inline__ void srmmu_flush_tlb_page(unsigned long page) | 261 | static inline void srmmu_flush_tlb_page(unsigned long page) |
262 | { | 262 | { |
263 | page &= PAGE_MASK; | 263 | page &= PAGE_MASK; |
264 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : | 264 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : |
@@ -267,7 +267,7 @@ extern __inline__ void srmmu_flush_tlb_page(unsigned long page) | |||
267 | 267 | ||
268 | } | 268 | } |
269 | 269 | ||
270 | extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) | 270 | static inline unsigned long srmmu_hwprobe(unsigned long vaddr) |
271 | { | 271 | { |
272 | unsigned long retval; | 272 | unsigned long retval; |
273 | 273 | ||
@@ -279,7 +279,7 @@ extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) | |||
279 | return retval; | 279 | return retval; |
280 | } | 280 | } |
281 | 281 | ||
282 | extern __inline__ int | 282 | static inline int |
283 | srmmu_get_pte (unsigned long addr) | 283 | srmmu_get_pte (unsigned long addr) |
284 | { | 284 | { |
285 | register unsigned long entry; | 285 | register unsigned long entry; |
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h index 5a7a1a8d29ac..6fbb3f0af8d8 100644 --- a/include/asm-sparc/processor.h +++ b/include/asm-sparc/processor.h | |||
@@ -79,7 +79,7 @@ struct thread_struct { | |||
79 | extern unsigned long thread_saved_pc(struct task_struct *t); | 79 | extern unsigned long thread_saved_pc(struct task_struct *t); |
80 | 80 | ||
81 | /* Do necessary setup to start up a newly executed thread. */ | 81 | /* Do necessary setup to start up a newly executed thread. */ |
82 | extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, | 82 | static inline void start_thread(struct pt_regs * regs, unsigned long pc, |
83 | unsigned long sp) | 83 | unsigned long sp) |
84 | { | 84 | { |
85 | register unsigned long zero asm("g1"); | 85 | register unsigned long zero asm("g1"); |
diff --git a/include/asm-sparc/psr.h b/include/asm-sparc/psr.h index 9778b8c8b15b..19c978051118 100644 --- a/include/asm-sparc/psr.h +++ b/include/asm-sparc/psr.h | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #ifndef __ASSEMBLY__ | 39 | #ifndef __ASSEMBLY__ |
40 | /* Get the %psr register. */ | 40 | /* Get the %psr register. */ |
41 | extern __inline__ unsigned int get_psr(void) | 41 | static inline unsigned int get_psr(void) |
42 | { | 42 | { |
43 | unsigned int psr; | 43 | unsigned int psr; |
44 | __asm__ __volatile__( | 44 | __asm__ __volatile__( |
@@ -53,7 +53,7 @@ extern __inline__ unsigned int get_psr(void) | |||
53 | return psr; | 53 | return psr; |
54 | } | 54 | } |
55 | 55 | ||
56 | extern __inline__ void put_psr(unsigned int new_psr) | 56 | static inline void put_psr(unsigned int new_psr) |
57 | { | 57 | { |
58 | __asm__ __volatile__( | 58 | __asm__ __volatile__( |
59 | "wr %0, 0x0, %%psr\n\t" | 59 | "wr %0, 0x0, %%psr\n\t" |
@@ -72,7 +72,7 @@ extern __inline__ void put_psr(unsigned int new_psr) | |||
72 | 72 | ||
73 | extern unsigned int fsr_storage; | 73 | extern unsigned int fsr_storage; |
74 | 74 | ||
75 | extern __inline__ unsigned int get_fsr(void) | 75 | static inline unsigned int get_fsr(void) |
76 | { | 76 | { |
77 | unsigned int fsr = 0; | 77 | unsigned int fsr = 0; |
78 | 78 | ||
diff --git a/include/asm-sparc/sbi.h b/include/asm-sparc/sbi.h index 739ccac5dcf2..86a603ac7b20 100644 --- a/include/asm-sparc/sbi.h +++ b/include/asm-sparc/sbi.h | |||
@@ -65,7 +65,7 @@ struct sbi_regs { | |||
65 | 65 | ||
66 | #ifndef __ASSEMBLY__ | 66 | #ifndef __ASSEMBLY__ |
67 | 67 | ||
68 | extern __inline__ int acquire_sbi(int devid, int mask) | 68 | static inline int acquire_sbi(int devid, int mask) |
69 | { | 69 | { |
70 | __asm__ __volatile__ ("swapa [%2] %3, %0" : | 70 | __asm__ __volatile__ ("swapa [%2] %3, %0" : |
71 | "=r" (mask) : | 71 | "=r" (mask) : |
@@ -75,7 +75,7 @@ extern __inline__ int acquire_sbi(int devid, int mask) | |||
75 | return mask; | 75 | return mask; |
76 | } | 76 | } |
77 | 77 | ||
78 | extern __inline__ void release_sbi(int devid, int mask) | 78 | static inline void release_sbi(int devid, int mask) |
79 | { | 79 | { |
80 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 80 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
81 | "r" (mask), | 81 | "r" (mask), |
@@ -83,7 +83,7 @@ extern __inline__ void release_sbi(int devid, int mask) | |||
83 | "i" (ASI_M_CTL)); | 83 | "i" (ASI_M_CTL)); |
84 | } | 84 | } |
85 | 85 | ||
86 | extern __inline__ void set_sbi_tid(int devid, int targetid) | 86 | static inline void set_sbi_tid(int devid, int targetid) |
87 | { | 87 | { |
88 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 88 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
89 | "r" (targetid), | 89 | "r" (targetid), |
@@ -91,7 +91,7 @@ extern __inline__ void set_sbi_tid(int devid, int targetid) | |||
91 | "i" (ASI_M_CTL)); | 91 | "i" (ASI_M_CTL)); |
92 | } | 92 | } |
93 | 93 | ||
94 | extern __inline__ int get_sbi_ctl(int devid, int cfgno) | 94 | static inline int get_sbi_ctl(int devid, int cfgno) |
95 | { | 95 | { |
96 | int cfg; | 96 | int cfg; |
97 | 97 | ||
@@ -102,7 +102,7 @@ extern __inline__ int get_sbi_ctl(int devid, int cfgno) | |||
102 | return cfg; | 102 | return cfg; |
103 | } | 103 | } |
104 | 104 | ||
105 | extern __inline__ void set_sbi_ctl(int devid, int cfgno, int cfg) | 105 | static inline void set_sbi_ctl(int devid, int cfgno, int cfg) |
106 | { | 106 | { |
107 | __asm__ __volatile__ ("sta %0, [%1] %2" : : | 107 | __asm__ __volatile__ ("sta %0, [%1] %2" : : |
108 | "r" (cfg), | 108 | "r" (cfg), |
diff --git a/include/asm-sparc/sbus.h b/include/asm-sparc/sbus.h index 3a8b3908728a..a13cddcecec5 100644 --- a/include/asm-sparc/sbus.h +++ b/include/asm-sparc/sbus.h | |||
@@ -28,12 +28,12 @@ | |||
28 | * numbers + offsets, and vice versa. | 28 | * numbers + offsets, and vice versa. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | extern __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset) | 31 | static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset) |
32 | { | 32 | { |
33 | return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset)); | 33 | return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset)); |
34 | } | 34 | } |
35 | 35 | ||
36 | extern __inline__ int sbus_dev_slot(unsigned long dev_addr) | 36 | static inline int sbus_dev_slot(unsigned long dev_addr) |
37 | { | 37 | { |
38 | return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25); | 38 | return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25); |
39 | } | 39 | } |
@@ -80,7 +80,7 @@ struct sbus_bus { | |||
80 | 80 | ||
81 | extern struct sbus_bus *sbus_root; | 81 | extern struct sbus_bus *sbus_root; |
82 | 82 | ||
83 | extern __inline__ int | 83 | static inline int |
84 | sbus_is_slave(struct sbus_dev *dev) | 84 | sbus_is_slave(struct sbus_dev *dev) |
85 | { | 85 | { |
86 | /* XXX Have to write this for sun4c's */ | 86 | /* XXX Have to write this for sun4c's */ |
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index 4f96d8333a12..580c51d011df 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h | |||
@@ -60,22 +60,22 @@ BTFIXUPDEF_BLACKBOX(load_current) | |||
60 | #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) | 60 | #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) |
61 | #define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait) | 61 | #define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait) |
62 | 62 | ||
63 | extern __inline__ void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } | 63 | static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } |
64 | extern __inline__ void xc1(smpfunc_t func, unsigned long arg1) | 64 | static inline void xc1(smpfunc_t func, unsigned long arg1) |
65 | { smp_cross_call(func, arg1, 0, 0, 0, 0); } | 65 | { smp_cross_call(func, arg1, 0, 0, 0, 0); } |
66 | extern __inline__ void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) | 66 | static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) |
67 | { smp_cross_call(func, arg1, arg2, 0, 0, 0); } | 67 | { smp_cross_call(func, arg1, arg2, 0, 0, 0); } |
68 | extern __inline__ void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, | 68 | static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, |
69 | unsigned long arg3) | 69 | unsigned long arg3) |
70 | { smp_cross_call(func, arg1, arg2, arg3, 0, 0); } | 70 | { smp_cross_call(func, arg1, arg2, arg3, 0, 0); } |
71 | extern __inline__ void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, | 71 | static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, |
72 | unsigned long arg3, unsigned long arg4) | 72 | unsigned long arg3, unsigned long arg4) |
73 | { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } | 73 | { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } |
74 | extern __inline__ void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, | 74 | static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, |
75 | unsigned long arg3, unsigned long arg4, unsigned long arg5) | 75 | unsigned long arg3, unsigned long arg4, unsigned long arg5) |
76 | { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } | 76 | { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } |
77 | 77 | ||
78 | extern __inline__ int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) | 78 | static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) |
79 | { | 79 | { |
80 | xc1((smpfunc_t)func, (unsigned long)info); | 80 | xc1((smpfunc_t)func, (unsigned long)info); |
81 | return 0; | 81 | return 0; |
@@ -84,16 +84,16 @@ extern __inline__ int smp_call_function(void (*func)(void *info), void *info, in | |||
84 | extern __volatile__ int __cpu_number_map[NR_CPUS]; | 84 | extern __volatile__ int __cpu_number_map[NR_CPUS]; |
85 | extern __volatile__ int __cpu_logical_map[NR_CPUS]; | 85 | extern __volatile__ int __cpu_logical_map[NR_CPUS]; |
86 | 86 | ||
87 | extern __inline__ int cpu_logical_map(int cpu) | 87 | static inline int cpu_logical_map(int cpu) |
88 | { | 88 | { |
89 | return __cpu_logical_map[cpu]; | 89 | return __cpu_logical_map[cpu]; |
90 | } | 90 | } |
91 | extern __inline__ int cpu_number_map(int cpu) | 91 | static inline int cpu_number_map(int cpu) |
92 | { | 92 | { |
93 | return __cpu_number_map[cpu]; | 93 | return __cpu_number_map[cpu]; |
94 | } | 94 | } |
95 | 95 | ||
96 | extern __inline__ int hard_smp4m_processor_id(void) | 96 | static inline int hard_smp4m_processor_id(void) |
97 | { | 97 | { |
98 | int cpuid; | 98 | int cpuid; |
99 | 99 | ||
@@ -104,7 +104,7 @@ extern __inline__ int hard_smp4m_processor_id(void) | |||
104 | return cpuid; | 104 | return cpuid; |
105 | } | 105 | } |
106 | 106 | ||
107 | extern __inline__ int hard_smp4d_processor_id(void) | 107 | static inline int hard_smp4d_processor_id(void) |
108 | { | 108 | { |
109 | int cpuid; | 109 | int cpuid; |
110 | 110 | ||
@@ -114,7 +114,7 @@ extern __inline__ int hard_smp4d_processor_id(void) | |||
114 | } | 114 | } |
115 | 115 | ||
116 | #ifndef MODULE | 116 | #ifndef MODULE |
117 | extern __inline__ int hard_smp_processor_id(void) | 117 | static inline int hard_smp_processor_id(void) |
118 | { | 118 | { |
119 | int cpuid; | 119 | int cpuid; |
120 | 120 | ||
@@ -136,7 +136,7 @@ extern __inline__ int hard_smp_processor_id(void) | |||
136 | return cpuid; | 136 | return cpuid; |
137 | } | 137 | } |
138 | #else | 138 | #else |
139 | extern __inline__ int hard_smp_processor_id(void) | 139 | static inline int hard_smp_processor_id(void) |
140 | { | 140 | { |
141 | int cpuid; | 141 | int cpuid; |
142 | 142 | ||
diff --git a/include/asm-sparc/smpprim.h b/include/asm-sparc/smpprim.h index 9b9c28ed748e..e7b6d346ae10 100644 --- a/include/asm-sparc/smpprim.h +++ b/include/asm-sparc/smpprim.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * atomic. | 15 | * atomic. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | extern __inline__ __volatile__ char test_and_set(void *addr) | 18 | static inline __volatile__ char test_and_set(void *addr) |
19 | { | 19 | { |
20 | char state = 0; | 20 | char state = 0; |
21 | 21 | ||
@@ -27,7 +27,7 @@ extern __inline__ __volatile__ char test_and_set(void *addr) | |||
27 | } | 27 | } |
28 | 28 | ||
29 | /* Initialize a spin-lock. */ | 29 | /* Initialize a spin-lock. */ |
30 | extern __inline__ __volatile__ smp_initlock(void *spinlock) | 30 | static inline __volatile__ smp_initlock(void *spinlock) |
31 | { | 31 | { |
32 | /* Unset the lock. */ | 32 | /* Unset the lock. */ |
33 | *((unsigned char *) spinlock) = 0; | 33 | *((unsigned char *) spinlock) = 0; |
@@ -36,7 +36,7 @@ extern __inline__ __volatile__ smp_initlock(void *spinlock) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | /* This routine spins until it acquires the lock at ADDR. */ | 38 | /* This routine spins until it acquires the lock at ADDR. */ |
39 | extern __inline__ __volatile__ smp_lock(void *addr) | 39 | static inline __volatile__ smp_lock(void *addr) |
40 | { | 40 | { |
41 | while(test_and_set(addr) == 0xff) | 41 | while(test_and_set(addr) == 0xff) |
42 | ; | 42 | ; |
@@ -46,7 +46,7 @@ extern __inline__ __volatile__ smp_lock(void *addr) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | /* This routine releases the lock at ADDR. */ | 48 | /* This routine releases the lock at ADDR. */ |
49 | extern __inline__ __volatile__ smp_unlock(void *addr) | 49 | static inline __volatile__ smp_unlock(void *addr) |
50 | { | 50 | { |
51 | *((unsigned char *) addr) = 0; | 51 | *((unsigned char *) addr) = 0; |
52 | } | 52 | } |
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h index 111727a2bb4e..e344c98a6f5f 100644 --- a/include/asm-sparc/spinlock.h +++ b/include/asm-sparc/spinlock.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define __raw_spin_unlock_wait(lock) \ | 17 | #define __raw_spin_unlock_wait(lock) \ |
18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
19 | 19 | ||
20 | extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) | 20 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
21 | { | 21 | { |
22 | __asm__ __volatile__( | 22 | __asm__ __volatile__( |
23 | "\n1:\n\t" | 23 | "\n1:\n\t" |
@@ -37,7 +37,7 @@ extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) | |||
37 | : "g2", "memory", "cc"); | 37 | : "g2", "memory", "cc"); |
38 | } | 38 | } |
39 | 39 | ||
40 | extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) | 40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
41 | { | 41 | { |
42 | unsigned int result; | 42 | unsigned int result; |
43 | __asm__ __volatile__("ldstub [%1], %0" | 43 | __asm__ __volatile__("ldstub [%1], %0" |
@@ -47,7 +47,7 @@ extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) | |||
47 | return (result == 0); | 47 | return (result == 0); |
48 | } | 48 | } |
49 | 49 | ||
50 | extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | 50 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
51 | { | 51 | { |
52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
53 | } | 53 | } |
@@ -78,7 +78,7 @@ extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | |||
78 | * | 78 | * |
79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
80 | */ | 80 | */ |
81 | extern __inline__ void __read_lock(raw_rwlock_t *rw) | 81 | static inline void __read_lock(raw_rwlock_t *rw) |
82 | { | 82 | { |
83 | register raw_rwlock_t *lp asm("g1"); | 83 | register raw_rwlock_t *lp asm("g1"); |
84 | lp = rw; | 84 | lp = rw; |
@@ -98,7 +98,7 @@ do { unsigned long flags; \ | |||
98 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
99 | } while(0) | 99 | } while(0) |
100 | 100 | ||
101 | extern __inline__ void __read_unlock(raw_rwlock_t *rw) | 101 | static inline void __read_unlock(raw_rwlock_t *rw) |
102 | { | 102 | { |
103 | register raw_rwlock_t *lp asm("g1"); | 103 | register raw_rwlock_t *lp asm("g1"); |
104 | lp = rw; | 104 | lp = rw; |
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 3557781a4bfd..1f6b71f9e1b6 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h | |||
@@ -204,7 +204,7 @@ static inline unsigned long getipl(void) | |||
204 | BTFIXUPDEF_CALL(void, ___xchg32, void) | 204 | BTFIXUPDEF_CALL(void, ___xchg32, void) |
205 | #endif | 205 | #endif |
206 | 206 | ||
207 | extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) | 207 | static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) |
208 | { | 208 | { |
209 | #ifdef CONFIG_SMP | 209 | #ifdef CONFIG_SMP |
210 | __asm__ __volatile__("swap [%2], %0" | 210 | __asm__ __volatile__("swap [%2], %0" |
diff --git a/include/asm-sparc/traps.h b/include/asm-sparc/traps.h index 6690ab956ea6..f62c7f878ee1 100644 --- a/include/asm-sparc/traps.h +++ b/include/asm-sparc/traps.h | |||
@@ -22,7 +22,7 @@ struct tt_entry { | |||
22 | /* We set this to _start in system setup. */ | 22 | /* We set this to _start in system setup. */ |
23 | extern struct tt_entry *sparc_ttable; | 23 | extern struct tt_entry *sparc_ttable; |
24 | 24 | ||
25 | extern __inline__ unsigned long get_tbr(void) | 25 | static inline unsigned long get_tbr(void) |
26 | { | 26 | { |
27 | unsigned long tbr; | 27 | unsigned long tbr; |
28 | 28 | ||
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h index 2d242360c3d6..075771c371f6 100644 --- a/include/asm-um/processor-generic.h +++ b/include/asm-um/processor-generic.h | |||
@@ -13,6 +13,7 @@ struct task_struct; | |||
13 | #include "linux/config.h" | 13 | #include "linux/config.h" |
14 | #include "asm/ptrace.h" | 14 | #include "asm/ptrace.h" |
15 | #include "choose-mode.h" | 15 | #include "choose-mode.h" |
16 | #include "registers.h" | ||
16 | 17 | ||
17 | struct mm_struct; | 18 | struct mm_struct; |
18 | 19 | ||
@@ -136,19 +137,15 @@ extern struct cpuinfo_um cpu_data[]; | |||
136 | #define current_cpu_data boot_cpu_data | 137 | #define current_cpu_data boot_cpu_data |
137 | #endif | 138 | #endif |
138 | 139 | ||
139 | #define KSTK_EIP(tsk) (PT_REGS_IP(&tsk->thread.regs)) | ||
140 | #define KSTK_ESP(tsk) (PT_REGS_SP(&tsk->thread.regs)) | ||
141 | #define get_wchan(p) (0) | ||
142 | 140 | ||
141 | #ifdef CONFIG_MODE_SKAS | ||
142 | #define KSTK_REG(tsk, reg) \ | ||
143 | ({ union uml_pt_regs regs; \ | ||
144 | get_thread_regs(®s, tsk->thread.mode.skas.switch_buf); \ | ||
145 | UPT_REG(®s, reg); }) | ||
146 | #else | ||
147 | #define KSTK_REG(tsk, reg) (0xbadbabe) | ||
143 | #endif | 148 | #endif |
149 | #define get_wchan(p) (0) | ||
144 | 150 | ||
145 | /* | 151 | #endif |
146 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
147 | * Emacs will notice this stuff at the end of the file and automatically | ||
148 | * adjust the settings for this buffer only. This must remain at the end | ||
149 | * of the file. | ||
150 | * --------------------------------------------------------------------------- | ||
151 | * Local variables: | ||
152 | * c-file-style: "linux" | ||
153 | * End: | ||
154 | */ | ||
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h index 431bad3ae9d7..4108a579eb92 100644 --- a/include/asm-um/processor-i386.h +++ b/include/asm-um/processor-i386.h | |||
@@ -43,17 +43,10 @@ static inline void rep_nop(void) | |||
43 | #define ARCH_IS_STACKGROW(address) \ | 43 | #define ARCH_IS_STACKGROW(address) \ |
44 | (address + 32 >= UPT_SP(¤t->thread.regs.regs)) | 44 | (address + 32 >= UPT_SP(¤t->thread.regs.regs)) |
45 | 45 | ||
46 | #define KSTK_EIP(tsk) KSTK_REG(tsk, EIP) | ||
47 | #define KSTK_ESP(tsk) KSTK_REG(tsk, UESP) | ||
48 | #define KSTK_EBP(tsk) KSTK_REG(tsk, EBP) | ||
49 | |||
46 | #include "asm/processor-generic.h" | 50 | #include "asm/processor-generic.h" |
47 | 51 | ||
48 | #endif | 52 | #endif |
49 | |||
50 | /* | ||
51 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
52 | * Emacs will notice this stuff at the end of the file and automatically | ||
53 | * adjust the settings for this buffer only. This must remain at the end | ||
54 | * of the file. | ||
55 | * --------------------------------------------------------------------------- | ||
56 | * Local variables: | ||
57 | * c-file-style: "linux" | ||
58 | * End: | ||
59 | */ | ||
diff --git a/include/asm-um/processor-x86_64.h b/include/asm-um/processor-x86_64.h index 0beb9a42ae05..e1e1255a1d36 100644 --- a/include/asm-um/processor-x86_64.h +++ b/include/asm-um/processor-x86_64.h | |||
@@ -36,17 +36,9 @@ extern inline void rep_nop(void) | |||
36 | #define ARCH_IS_STACKGROW(address) \ | 36 | #define ARCH_IS_STACKGROW(address) \ |
37 | (address + 128 >= UPT_SP(¤t->thread.regs.regs)) | 37 | (address + 128 >= UPT_SP(¤t->thread.regs.regs)) |
38 | 38 | ||
39 | #define KSTK_EIP(tsk) KSTK_REG(tsk, RIP) | ||
40 | #define KSTK_ESP(tsk) KSTK_REG(tsk, RSP) | ||
41 | |||
39 | #include "asm/processor-generic.h" | 42 | #include "asm/processor-generic.h" |
40 | 43 | ||
41 | #endif | 44 | #endif |
42 | |||
43 | /* | ||
44 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
45 | * Emacs will notice this stuff at the end of the file and automatically | ||
46 | * adjust the settings for this buffer only. This must remain at the end | ||
47 | * of the file. | ||
48 | * --------------------------------------------------------------------------- | ||
49 | * Local variables: | ||
50 | * c-file-style: "linux" | ||
51 | * End: | ||
52 | */ | ||
diff --git a/include/linux/ata.h b/include/linux/ata.h index a5b74efab067..ecb7346d0c16 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -132,6 +132,7 @@ enum { | |||
132 | ATA_CMD_PACKET = 0xA0, | 132 | ATA_CMD_PACKET = 0xA0, |
133 | ATA_CMD_VERIFY = 0x40, | 133 | ATA_CMD_VERIFY = 0x40, |
134 | ATA_CMD_VERIFY_EXT = 0x42, | 134 | ATA_CMD_VERIFY_EXT = 0x42, |
135 | ATA_CMD_INIT_DEV_PARAMS = 0x91, | ||
135 | 136 | ||
136 | /* SETFEATURES stuff */ | 137 | /* SETFEATURES stuff */ |
137 | SETFEATURES_XFER = 0x03, | 138 | SETFEATURES_XFER = 0x03, |
@@ -146,14 +147,14 @@ enum { | |||
146 | XFER_MW_DMA_2 = 0x22, | 147 | XFER_MW_DMA_2 = 0x22, |
147 | XFER_MW_DMA_1 = 0x21, | 148 | XFER_MW_DMA_1 = 0x21, |
148 | XFER_MW_DMA_0 = 0x20, | 149 | XFER_MW_DMA_0 = 0x20, |
150 | XFER_SW_DMA_2 = 0x12, | ||
151 | XFER_SW_DMA_1 = 0x11, | ||
152 | XFER_SW_DMA_0 = 0x10, | ||
149 | XFER_PIO_4 = 0x0C, | 153 | XFER_PIO_4 = 0x0C, |
150 | XFER_PIO_3 = 0x0B, | 154 | XFER_PIO_3 = 0x0B, |
151 | XFER_PIO_2 = 0x0A, | 155 | XFER_PIO_2 = 0x0A, |
152 | XFER_PIO_1 = 0x09, | 156 | XFER_PIO_1 = 0x09, |
153 | XFER_PIO_0 = 0x08, | 157 | XFER_PIO_0 = 0x08, |
154 | XFER_SW_DMA_2 = 0x12, | ||
155 | XFER_SW_DMA_1 = 0x11, | ||
156 | XFER_SW_DMA_0 = 0x10, | ||
157 | XFER_PIO_SLOW = 0x00, | 158 | XFER_PIO_SLOW = 0x00, |
158 | 159 | ||
159 | /* ATAPI stuff */ | 160 | /* ATAPI stuff */ |
@@ -181,6 +182,7 @@ enum { | |||
181 | ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ | 182 | ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ |
182 | ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ | 183 | ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ |
183 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ | 184 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ |
185 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ | ||
184 | }; | 186 | }; |
185 | 187 | ||
186 | enum ata_tf_protocols { | 188 | enum ata_tf_protocols { |
@@ -250,6 +252,18 @@ struct ata_taskfile { | |||
250 | ((u64) (id)[(n) + 1] << 16) | \ | 252 | ((u64) (id)[(n) + 1] << 16) | \ |
251 | ((u64) (id)[(n) + 0]) ) | 253 | ((u64) (id)[(n) + 0]) ) |
252 | 254 | ||
255 | static inline int ata_id_current_chs_valid(u16 *id) | ||
256 | { | ||
257 | /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command | ||
258 | has not been issued to the device then the values of | ||
259 | id[54] to id[56] are vendor specific. */ | ||
260 | return (id[53] & 0x01) && /* Current translation valid */ | ||
261 | id[54] && /* cylinders in current translation */ | ||
262 | id[55] && /* heads in current translation */ | ||
263 | id[55] <= 16 && | ||
264 | id[56]; /* sectors in current translation */ | ||
265 | } | ||
266 | |||
253 | static inline int atapi_cdb_len(u16 *dev_id) | 267 | static inline int atapi_cdb_len(u16 *dev_id) |
254 | { | 268 | { |
255 | u16 tmp = dev_id[0] & 0x3; | 269 | u16 tmp = dev_id[0] & 0x3; |
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 9f374cfa1b05..f1fd849e5535 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h | |||
@@ -457,7 +457,7 @@ static inline void atm_dev_put(struct atm_dev *dev) | |||
457 | 457 | ||
458 | int atm_charge(struct atm_vcc *vcc,int truesize); | 458 | int atm_charge(struct atm_vcc *vcc,int truesize); |
459 | struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, | 459 | struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, |
460 | int gfp_flags); | 460 | unsigned int __nocast gfp_flags); |
461 | int atm_pcr_goal(struct atm_trafprm *tp); | 461 | int atm_pcr_goal(struct atm_trafprm *tp); |
462 | 462 | ||
463 | void vcc_release_async(struct atm_vcc *vcc, int reply); | 463 | void vcc_release_async(struct atm_vcc *vcc, int reply); |
diff --git a/include/linux/bfs_fs.h b/include/linux/bfs_fs.h index c1237aa92e38..8ed6dfdcd783 100644 --- a/include/linux/bfs_fs.h +++ b/include/linux/bfs_fs.h | |||
@@ -20,19 +20,19 @@ | |||
20 | 20 | ||
21 | /* BFS inode layout on disk */ | 21 | /* BFS inode layout on disk */ |
22 | struct bfs_inode { | 22 | struct bfs_inode { |
23 | __u16 i_ino; | 23 | __le16 i_ino; |
24 | __u16 i_unused; | 24 | __u16 i_unused; |
25 | __u32 i_sblock; | 25 | __le32 i_sblock; |
26 | __u32 i_eblock; | 26 | __le32 i_eblock; |
27 | __u32 i_eoffset; | 27 | __le32 i_eoffset; |
28 | __u32 i_vtype; | 28 | __le32 i_vtype; |
29 | __u32 i_mode; | 29 | __le32 i_mode; |
30 | __s32 i_uid; | 30 | __le32 i_uid; |
31 | __s32 i_gid; | 31 | __le32 i_gid; |
32 | __u32 i_nlink; | 32 | __le32 i_nlink; |
33 | __u32 i_atime; | 33 | __le32 i_atime; |
34 | __u32 i_mtime; | 34 | __le32 i_mtime; |
35 | __u32 i_ctime; | 35 | __le32 i_ctime; |
36 | __u32 i_padding[4]; | 36 | __u32 i_padding[4]; |
37 | }; | 37 | }; |
38 | 38 | ||
@@ -41,17 +41,17 @@ struct bfs_inode { | |||
41 | #define BFS_DIRS_PER_BLOCK 32 | 41 | #define BFS_DIRS_PER_BLOCK 32 |
42 | 42 | ||
43 | struct bfs_dirent { | 43 | struct bfs_dirent { |
44 | __u16 ino; | 44 | __le16 ino; |
45 | char name[BFS_NAMELEN]; | 45 | char name[BFS_NAMELEN]; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* BFS superblock layout on disk */ | 48 | /* BFS superblock layout on disk */ |
49 | struct bfs_super_block { | 49 | struct bfs_super_block { |
50 | __u32 s_magic; | 50 | __le32 s_magic; |
51 | __u32 s_start; | 51 | __le32 s_start; |
52 | __u32 s_end; | 52 | __le32 s_end; |
53 | __s32 s_from; | 53 | __le32 s_from; |
54 | __s32 s_to; | 54 | __le32 s_to; |
55 | __s32 s_bfrom; | 55 | __s32 s_bfrom; |
56 | __s32 s_bto; | 56 | __s32 s_bto; |
57 | char s_fsname[6]; | 57 | char s_fsname[6]; |
@@ -66,15 +66,15 @@ struct bfs_super_block { | |||
66 | #define BFS_INO2OFF(ino) \ | 66 | #define BFS_INO2OFF(ino) \ |
67 | ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE) | 67 | ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE) |
68 | #define BFS_NZFILESIZE(ip) \ | 68 | #define BFS_NZFILESIZE(ip) \ |
69 | ((cpu_to_le32((ip)->i_eoffset) + 1) - cpu_to_le32((ip)->i_sblock) * BFS_BSIZE) | 69 | ((le32_to_cpu((ip)->i_eoffset) + 1) - le32_to_cpu((ip)->i_sblock) * BFS_BSIZE) |
70 | 70 | ||
71 | #define BFS_FILESIZE(ip) \ | 71 | #define BFS_FILESIZE(ip) \ |
72 | ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip)) | 72 | ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip)) |
73 | 73 | ||
74 | #define BFS_FILEBLOCKS(ip) \ | 74 | #define BFS_FILEBLOCKS(ip) \ |
75 | ((ip)->i_sblock == 0 ? 0 : (cpu_to_le32((ip)->i_eblock) + 1) - cpu_to_le32((ip)->i_sblock)) | 75 | ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) - le32_to_cpu((ip)->i_sblock)) |
76 | #define BFS_UNCLEAN(bfs_sb, sb) \ | 76 | #define BFS_UNCLEAN(bfs_sb, sb) \ |
77 | ((cpu_to_le32(bfs_sb->s_from) != -1) && (cpu_to_le32(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY)) | 77 | ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY)) |
78 | 78 | ||
79 | 79 | ||
80 | #endif /* _LINUX_BFS_FS_H */ | 80 | #endif /* _LINUX_BFS_FS_H */ |
diff --git a/include/linux/connector.h b/include/linux/connector.h index 86d4b0a81713..96582c9911ac 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
@@ -149,7 +149,7 @@ struct cn_dev { | |||
149 | 149 | ||
150 | int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); | 150 | int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); |
151 | void cn_del_callback(struct cb_id *); | 151 | void cn_del_callback(struct cb_id *); |
152 | int cn_netlink_send(struct cn_msg *, u32, int); | 152 | int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast); |
153 | 153 | ||
154 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); | 154 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); |
155 | void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); | 155 | void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 3ab67622ef93..0f89f4121fa3 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -97,6 +97,7 @@ enum { | |||
97 | ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ | 97 | ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ |
98 | ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ | 98 | ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ |
99 | ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ | 99 | ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ |
100 | ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */ | ||
100 | 101 | ||
101 | ATA_DEV_UNKNOWN = 0, /* unknown device */ | 102 | ATA_DEV_UNKNOWN = 0, /* unknown device */ |
102 | ATA_DEV_ATA = 1, /* ATA device */ | 103 | ATA_DEV_ATA = 1, /* ATA device */ |
@@ -158,17 +159,21 @@ enum { | |||
158 | /* size of buffer to pad xfers ending on unaligned boundaries */ | 159 | /* size of buffer to pad xfers ending on unaligned boundaries */ |
159 | ATA_DMA_PAD_SZ = 4, | 160 | ATA_DMA_PAD_SZ = 4, |
160 | ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, | 161 | ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, |
162 | |||
163 | /* Masks for port functions */ | ||
164 | ATA_PORT_PRIMARY = (1 << 0), | ||
165 | ATA_PORT_SECONDARY = (1 << 1), | ||
161 | }; | 166 | }; |
162 | 167 | ||
163 | enum pio_task_states { | 168 | enum hsm_task_states { |
164 | PIO_ST_UNKNOWN, | 169 | HSM_ST_UNKNOWN, |
165 | PIO_ST_IDLE, | 170 | HSM_ST_IDLE, |
166 | PIO_ST_POLL, | 171 | HSM_ST_POLL, |
167 | PIO_ST_TMOUT, | 172 | HSM_ST_TMOUT, |
168 | PIO_ST, | 173 | HSM_ST, |
169 | PIO_ST_LAST, | 174 | HSM_ST_LAST, |
170 | PIO_ST_LAST_POLL, | 175 | HSM_ST_LAST_POLL, |
171 | PIO_ST_ERR, | 176 | HSM_ST_ERR, |
172 | }; | 177 | }; |
173 | 178 | ||
174 | /* forward declarations */ | 179 | /* forward declarations */ |
@@ -291,6 +296,11 @@ struct ata_device { | |||
291 | u8 xfer_protocol; /* taskfile xfer protocol */ | 296 | u8 xfer_protocol; /* taskfile xfer protocol */ |
292 | u8 read_cmd; /* opcode to use on read */ | 297 | u8 read_cmd; /* opcode to use on read */ |
293 | u8 write_cmd; /* opcode to use on write */ | 298 | u8 write_cmd; /* opcode to use on write */ |
299 | |||
300 | /* for CHS addressing */ | ||
301 | u16 cylinders; /* Number of cylinders */ | ||
302 | u16 heads; /* Number of heads */ | ||
303 | u16 sectors; /* Number of sectors per track */ | ||
294 | }; | 304 | }; |
295 | 305 | ||
296 | struct ata_port { | 306 | struct ata_port { |
@@ -331,7 +341,7 @@ struct ata_port { | |||
331 | struct work_struct packet_task; | 341 | struct work_struct packet_task; |
332 | 342 | ||
333 | struct work_struct pio_task; | 343 | struct work_struct pio_task; |
334 | unsigned int pio_task_state; | 344 | unsigned int hsm_task_state; |
335 | unsigned long pio_task_timeout; | 345 | unsigned long pio_task_timeout; |
336 | 346 | ||
337 | void *private_data; | 347 | void *private_data; |
@@ -412,6 +422,8 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn | |||
412 | extern int ata_scsi_error(struct Scsi_Host *host); | 422 | extern int ata_scsi_error(struct Scsi_Host *host); |
413 | extern int ata_scsi_release(struct Scsi_Host *host); | 423 | extern int ata_scsi_release(struct Scsi_Host *host); |
414 | extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); | 424 | extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); |
425 | extern int ata_ratelimit(void); | ||
426 | |||
415 | /* | 427 | /* |
416 | * Default driver ops implementations | 428 | * Default driver ops implementations |
417 | */ | 429 | */ |
@@ -464,7 +476,7 @@ struct pci_bits { | |||
464 | 476 | ||
465 | extern void ata_pci_host_stop (struct ata_host_set *host_set); | 477 | extern void ata_pci_host_stop (struct ata_host_set *host_set); |
466 | extern struct ata_probe_ent * | 478 | extern struct ata_probe_ent * |
467 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port); | 479 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask); |
468 | extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); | 480 | extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); |
469 | 481 | ||
470 | #endif /* CONFIG_PCI */ | 482 | #endif /* CONFIG_PCI */ |
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 941f45ac117a..1a4990e448e9 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h | |||
@@ -158,7 +158,8 @@ extern unsigned int textsearch_find_continuous(struct ts_config *, | |||
158 | #define TS_PRIV_ALIGNTO 8 | 158 | #define TS_PRIV_ALIGNTO 8 |
159 | #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) | 159 | #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) |
160 | 160 | ||
161 | static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask) | 161 | static inline struct ts_config *alloc_ts_config(size_t payload, |
162 | unsigned int __nocast gfp_mask) | ||
162 | { | 163 | { |
163 | struct ts_config *conf; | 164 | struct ts_config *conf; |
164 | 165 | ||
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h index 6bbeafa73e8b..8a0891e2e888 100644 --- a/include/net/dn_nsp.h +++ b/include/net/dn_nsp.h | |||
@@ -19,9 +19,9 @@ extern void dn_nsp_send_data_ack(struct sock *sk); | |||
19 | extern void dn_nsp_send_oth_ack(struct sock *sk); | 19 | extern void dn_nsp_send_oth_ack(struct sock *sk); |
20 | extern void dn_nsp_delayed_ack(struct sock *sk); | 20 | extern void dn_nsp_delayed_ack(struct sock *sk); |
21 | extern void dn_send_conn_ack(struct sock *sk); | 21 | extern void dn_send_conn_ack(struct sock *sk); |
22 | extern void dn_send_conn_conf(struct sock *sk, int gfp); | 22 | extern void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp); |
23 | extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, | 23 | extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, |
24 | unsigned short reason, int gfp); | 24 | unsigned short reason, unsigned int __nocast gfp); |
25 | extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, | 25 | extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, |
26 | unsigned short reason); | 26 | unsigned short reason); |
27 | extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); | 27 | extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); |
@@ -29,14 +29,14 @@ extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags); | |||
29 | 29 | ||
30 | extern void dn_nsp_output(struct sock *sk); | 30 | extern void dn_nsp_output(struct sock *sk); |
31 | extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); | 31 | extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); |
32 | extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oob); | 32 | extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, unsigned int __nocast gfp, int oob); |
33 | extern unsigned long dn_nsp_persist(struct sock *sk); | 33 | extern unsigned long dn_nsp_persist(struct sock *sk); |
34 | extern int dn_nsp_xmit_timeout(struct sock *sk); | 34 | extern int dn_nsp_xmit_timeout(struct sock *sk); |
35 | 35 | ||
36 | extern int dn_nsp_rx(struct sk_buff *); | 36 | extern int dn_nsp_rx(struct sk_buff *); |
37 | extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); | 37 | extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); |
38 | 38 | ||
39 | extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); | 39 | extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); |
40 | extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); | 40 | extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); |
41 | 41 | ||
42 | #define NSP_REASON_OK 0 /* No error */ | 42 | #define NSP_REASON_OK 0 /* No error */ |
diff --git a/include/net/dn_route.h b/include/net/dn_route.h index d084721db198..11fe973cf383 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h | |||
@@ -15,7 +15,7 @@ | |||
15 | GNU General Public License for more details. | 15 | GNU General Public License for more details. |
16 | *******************************************************************************/ | 16 | *******************************************************************************/ |
17 | 17 | ||
18 | extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); | 18 | extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); |
19 | extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); | 19 | extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); |
20 | extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); | 20 | extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); |
21 | extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); | 21 | extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); |
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 35f49e65e295..f50f95968340 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -40,7 +40,7 @@ | |||
40 | struct inet_ehash_bucket { | 40 | struct inet_ehash_bucket { |
41 | rwlock_t lock; | 41 | rwlock_t lock; |
42 | struct hlist_head chain; | 42 | struct hlist_head chain; |
43 | } __attribute__((__aligned__(8))); | 43 | }; |
44 | 44 | ||
45 | /* There are a few simple rules, which allow for local port reuse by | 45 | /* There are a few simple rules, which allow for local port reuse by |
46 | * an application. In essence: | 46 | * an application. In essence: |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 06b4235aa016..ecb2b061f597 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -832,7 +832,7 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc); | |||
832 | 832 | ||
833 | extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); | 833 | extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); |
834 | extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); | 834 | extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); |
835 | extern int ip_vs_skb_replace(struct sk_buff *skb, int pri, | 835 | extern int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, |
836 | char *o_buf, int o_len, char *n_buf, int n_len); | 836 | char *o_buf, int o_len, char *n_buf, int n_len); |
837 | extern int ip_vs_app_init(void); | 837 | extern int ip_vs_app_init(void); |
838 | extern void ip_vs_app_cleanup(void); | 838 | extern void ip_vs_app_cleanup(void); |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a9d0d8c5dfbf..b6e72f890c6c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -875,7 +875,7 @@ static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsig | |||
875 | } | 875 | } |
876 | #endif | 876 | #endif |
877 | 877 | ||
878 | struct xfrm_policy *xfrm_policy_alloc(int gfp); | 878 | struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp); |
879 | extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); | 879 | extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); |
880 | int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); | 880 | int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); |
881 | struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel, | 881 | struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel, |
@@ -931,4 +931,9 @@ static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, | |||
931 | } | 931 | } |
932 | } | 932 | } |
933 | 933 | ||
934 | static inline int xfrm_policy_id2dir(u32 index) | ||
935 | { | ||
936 | return index & 7; | ||
937 | } | ||
938 | |||
934 | #endif /* _NET_XFRM_H */ | 939 | #endif /* _NET_XFRM_H */ |
diff --git a/include/rxrpc/call.h b/include/rxrpc/call.h index f48f27e9e0ab..8118731e7d96 100644 --- a/include/rxrpc/call.h +++ b/include/rxrpc/call.h | |||
@@ -203,7 +203,7 @@ extern int rxrpc_call_write_data(struct rxrpc_call *call, | |||
203 | size_t sioc, | 203 | size_t sioc, |
204 | struct kvec *siov, | 204 | struct kvec *siov, |
205 | uint8_t rxhdr_flags, | 205 | uint8_t rxhdr_flags, |
206 | int alloc_flags, | 206 | unsigned int __nocast alloc_flags, |
207 | int dup_data, | 207 | int dup_data, |
208 | size_t *size_sent); | 208 | size_t *size_sent); |
209 | 209 | ||
diff --git a/include/rxrpc/message.h b/include/rxrpc/message.h index 3a59df6870b2..983d9f9eee1a 100644 --- a/include/rxrpc/message.h +++ b/include/rxrpc/message.h | |||
@@ -63,7 +63,7 @@ extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn, | |||
63 | uint8_t type, | 63 | uint8_t type, |
64 | int count, | 64 | int count, |
65 | struct kvec *diov, | 65 | struct kvec *diov, |
66 | int alloc_flags, | 66 | unsigned int __nocast alloc_flags, |
67 | struct rxrpc_message **_msg); | 67 | struct rxrpc_message **_msg); |
68 | 68 | ||
69 | extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg); | 69 | extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg); |
diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 2cc79112ecc3..1b61fceef777 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c | |||
@@ -127,7 +127,7 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, | |||
127 | } | 127 | } |
128 | 128 | ||
129 | static struct ts_config *bm_init(const void *pattern, unsigned int len, | 129 | static struct ts_config *bm_init(const void *pattern, unsigned int len, |
130 | int gfp_mask) | 130 | unsigned int __nocast gfp_mask) |
131 | { | 131 | { |
132 | struct ts_config *conf; | 132 | struct ts_config *conf; |
133 | struct ts_bm *bm; | 133 | struct ts_bm *bm; |
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index d27c0a072940..ef9779e00506 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c | |||
@@ -258,7 +258,7 @@ found_match: | |||
258 | } | 258 | } |
259 | 259 | ||
260 | static struct ts_config *fsm_init(const void *pattern, unsigned int len, | 260 | static struct ts_config *fsm_init(const void *pattern, unsigned int len, |
261 | int gfp_mask) | 261 | unsigned int __nocast gfp_mask) |
262 | { | 262 | { |
263 | int i, err = -EINVAL; | 263 | int i, err = -EINVAL; |
264 | struct ts_config *conf; | 264 | struct ts_config *conf; |
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c index 73266b975585..e45f0f0c2379 100644 --- a/lib/ts_kmp.c +++ b/lib/ts_kmp.c | |||
@@ -87,7 +87,7 @@ static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, | |||
87 | } | 87 | } |
88 | 88 | ||
89 | static struct ts_config *kmp_init(const void *pattern, unsigned int len, | 89 | static struct ts_config *kmp_init(const void *pattern, unsigned int len, |
90 | int gfp_mask) | 90 | unsigned int __nocast gfp_mask) |
91 | { | 91 | { |
92 | struct ts_config *conf; | 92 | struct ts_config *conf; |
93 | struct ts_kmp *kmp; | 93 | struct ts_kmp *kmp; |
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c index b2113c3454ae..71abc99ec815 100644 --- a/net/atm/atm_misc.c +++ b/net/atm/atm_misc.c | |||
@@ -25,7 +25,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize) | |||
25 | 25 | ||
26 | 26 | ||
27 | struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, | 27 | struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, |
28 | int gfp_flags) | 28 | unsigned int __nocast gfp_flags) |
29 | { | 29 | { |
30 | struct sock *sk = sk_atm(vcc); | 30 | struct sock *sk = sk_atm(vcc); |
31 | int guess = atm_guess_pdu2truesize(pdu_size); | 31 | int guess = atm_guess_pdu2truesize(pdu_size); |
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 810c9c76c2e0..73cfc3411c46 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c | |||
@@ -123,7 +123,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) | |||
123 | } | 123 | } |
124 | 124 | ||
125 | skb_pull(skb, 1); /* Remove PID */ | 125 | skb_pull(skb, 1); /* Remove PID */ |
126 | skb->h.raw = skb->data; | 126 | skb->mac.raw = skb->nh.raw; |
127 | skb->nh.raw = skb->data; | 127 | skb->nh.raw = skb->data; |
128 | skb->dev = ax25->ax25_dev->dev; | 128 | skb->dev = ax25->ax25_dev->dev; |
129 | skb->pkt_type = PACKET_HOST; | 129 | skb->pkt_type = PACKET_HOST; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 348f36b529f7..34d4128d56d5 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -452,7 +452,8 @@ static struct proto dn_proto = { | |||
452 | .obj_size = sizeof(struct dn_sock), | 452 | .obj_size = sizeof(struct dn_sock), |
453 | }; | 453 | }; |
454 | 454 | ||
455 | static struct sock *dn_alloc_sock(struct socket *sock, int gfp) | 455 | static struct sock *dn_alloc_sock(struct socket *sock, |
456 | unsigned int __nocast gfp) | ||
456 | { | 457 | { |
457 | struct dn_scp *scp; | 458 | struct dn_scp *scp; |
458 | struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); | 459 | struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); |
@@ -804,7 +805,8 @@ static int dn_auto_bind(struct socket *sock) | |||
804 | return rv; | 805 | return rv; |
805 | } | 806 | } |
806 | 807 | ||
807 | static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation) | 808 | static int dn_confirm_accept(struct sock *sk, long *timeo, |
809 | unsigned int __nocast allocation) | ||
808 | { | 810 | { |
809 | struct dn_scp *scp = DN_SK(sk); | 811 | struct dn_scp *scp = DN_SK(sk); |
810 | DEFINE_WAIT(wait); | 812 | DEFINE_WAIT(wait); |
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 53633d352868..cd08244aa10c 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c | |||
@@ -117,7 +117,8 @@ try_again: | |||
117 | * The eventual aim is for each socket to have a cached header size | 117 | * The eventual aim is for each socket to have a cached header size |
118 | * for its outgoing packets, and to set hdr from this when sk != NULL. | 118 | * for its outgoing packets, and to set hdr from this when sk != NULL. |
119 | */ | 119 | */ |
120 | struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri) | 120 | struct sk_buff *dn_alloc_skb(struct sock *sk, int size, |
121 | unsigned int __nocast pri) | ||
121 | { | 122 | { |
122 | struct sk_buff *skb; | 123 | struct sk_buff *skb; |
123 | int hdr = 64; | 124 | int hdr = 64; |
@@ -210,7 +211,8 @@ static void dn_nsp_rtt(struct sock *sk, long rtt) | |||
210 | * | 211 | * |
211 | * Returns: The number of times the packet has been sent previously | 212 | * Returns: The number of times the packet has been sent previously |
212 | */ | 213 | */ |
213 | static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, int gfp) | 214 | static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, |
215 | unsigned int __nocast gfp) | ||
214 | { | 216 | { |
215 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 217 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
216 | struct sk_buff *skb2; | 218 | struct sk_buff *skb2; |
@@ -350,7 +352,8 @@ static unsigned short *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *sk | |||
350 | return ptr; | 352 | return ptr; |
351 | } | 353 | } |
352 | 354 | ||
353 | void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oth) | 355 | void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, |
356 | unsigned int __nocast gfp, int oth) | ||
354 | { | 357 | { |
355 | struct dn_scp *scp = DN_SK(sk); | 358 | struct dn_scp *scp = DN_SK(sk); |
356 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 359 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
@@ -517,7 +520,7 @@ static int dn_nsp_retrans_conn_conf(struct sock *sk) | |||
517 | return 0; | 520 | return 0; |
518 | } | 521 | } |
519 | 522 | ||
520 | void dn_send_conn_conf(struct sock *sk, int gfp) | 523 | void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp) |
521 | { | 524 | { |
522 | struct dn_scp *scp = DN_SK(sk); | 525 | struct dn_scp *scp = DN_SK(sk); |
523 | struct sk_buff *skb = NULL; | 526 | struct sk_buff *skb = NULL; |
@@ -549,7 +552,8 @@ void dn_send_conn_conf(struct sock *sk, int gfp) | |||
549 | 552 | ||
550 | 553 | ||
551 | static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, | 554 | static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, |
552 | unsigned short reason, int gfp, struct dst_entry *dst, | 555 | unsigned short reason, unsigned int __nocast gfp, |
556 | struct dst_entry *dst, | ||
553 | int ddl, unsigned char *dd, __u16 rem, __u16 loc) | 557 | int ddl, unsigned char *dd, __u16 rem, __u16 loc) |
554 | { | 558 | { |
555 | struct sk_buff *skb = NULL; | 559 | struct sk_buff *skb = NULL; |
@@ -591,7 +595,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, | |||
591 | 595 | ||
592 | 596 | ||
593 | void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, | 597 | void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, |
594 | unsigned short reason, int gfp) | 598 | unsigned short reason, unsigned int __nocast gfp) |
595 | { | 599 | { |
596 | struct dn_scp *scp = DN_SK(sk); | 600 | struct dn_scp *scp = DN_SK(sk); |
597 | int ddl = 0; | 601 | int ddl = 0; |
@@ -612,7 +616,7 @@ void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, | |||
612 | { | 616 | { |
613 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 617 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
614 | int ddl = 0; | 618 | int ddl = 0; |
615 | int gfp = GFP_ATOMIC; | 619 | unsigned int __nocast gfp = GFP_ATOMIC; |
616 | 620 | ||
617 | dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, | 621 | dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, |
618 | NULL, cb->src_port, cb->dst_port); | 622 | NULL, cb->src_port, cb->dst_port); |
@@ -624,7 +628,7 @@ void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval) | |||
624 | struct dn_scp *scp = DN_SK(sk); | 628 | struct dn_scp *scp = DN_SK(sk); |
625 | struct sk_buff *skb; | 629 | struct sk_buff *skb; |
626 | unsigned char *ptr; | 630 | unsigned char *ptr; |
627 | int gfp = GFP_ATOMIC; | 631 | unsigned int __nocast gfp = GFP_ATOMIC; |
628 | 632 | ||
629 | if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) | 633 | if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) |
630 | return; | 634 | return; |
@@ -659,7 +663,8 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) | |||
659 | unsigned char menuver; | 663 | unsigned char menuver; |
660 | struct dn_skb_cb *cb; | 664 | struct dn_skb_cb *cb; |
661 | unsigned char type = 1; | 665 | unsigned char type = 1; |
662 | int allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; | 666 | unsigned int __nocast allocation = |
667 | (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; | ||
663 | struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); | 668 | struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); |
664 | 669 | ||
665 | if (!skb) | 670 | if (!skb) |
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index c9aaff3fea1e..ecdf9f7a538f 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -207,7 +207,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) | |||
207 | } | 207 | } |
208 | 208 | ||
209 | static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, | 209 | static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, |
210 | int gfp_mask) | 210 | unsigned int __nocast gfp_mask) |
211 | { | 211 | { |
212 | struct ieee80211_txb *txb; | 212 | struct ieee80211_txb *txb; |
213 | int i; | 213 | int i; |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 50c0519cd70d..0093ea08c7f5 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -286,6 +286,8 @@ static inline void check_tnode(const struct tnode *tn) | |||
286 | 286 | ||
287 | static int halve_threshold = 25; | 287 | static int halve_threshold = 25; |
288 | static int inflate_threshold = 50; | 288 | static int inflate_threshold = 50; |
289 | static int halve_threshold_root = 15; | ||
290 | static int inflate_threshold_root = 25; | ||
289 | 291 | ||
290 | 292 | ||
291 | static void __alias_free_mem(struct rcu_head *head) | 293 | static void __alias_free_mem(struct rcu_head *head) |
@@ -449,6 +451,8 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
449 | int i; | 451 | int i; |
450 | int err = 0; | 452 | int err = 0; |
451 | struct tnode *old_tn; | 453 | struct tnode *old_tn; |
454 | int inflate_threshold_use; | ||
455 | int halve_threshold_use; | ||
452 | 456 | ||
453 | if (!tn) | 457 | if (!tn) |
454 | return NULL; | 458 | return NULL; |
@@ -541,10 +545,17 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
541 | 545 | ||
542 | check_tnode(tn); | 546 | check_tnode(tn); |
543 | 547 | ||
548 | /* Keep root node larger */ | ||
549 | |||
550 | if(!tn->parent) | ||
551 | inflate_threshold_use = inflate_threshold_root; | ||
552 | else | ||
553 | inflate_threshold_use = inflate_threshold; | ||
554 | |||
544 | err = 0; | 555 | err = 0; |
545 | while ((tn->full_children > 0 && | 556 | while ((tn->full_children > 0 && |
546 | 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >= | 557 | 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >= |
547 | inflate_threshold * tnode_child_length(tn))) { | 558 | inflate_threshold_use * tnode_child_length(tn))) { |
548 | 559 | ||
549 | old_tn = tn; | 560 | old_tn = tn; |
550 | tn = inflate(t, tn); | 561 | tn = inflate(t, tn); |
@@ -564,10 +575,18 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
564 | * node is above threshold. | 575 | * node is above threshold. |
565 | */ | 576 | */ |
566 | 577 | ||
578 | |||
579 | /* Keep root node larger */ | ||
580 | |||
581 | if(!tn->parent) | ||
582 | halve_threshold_use = halve_threshold_root; | ||
583 | else | ||
584 | halve_threshold_use = halve_threshold; | ||
585 | |||
567 | err = 0; | 586 | err = 0; |
568 | while (tn->bits > 1 && | 587 | while (tn->bits > 1 && |
569 | 100 * (tnode_child_length(tn) - tn->empty_children) < | 588 | 100 * (tnode_child_length(tn) - tn->empty_children) < |
570 | halve_threshold * tnode_child_length(tn)) { | 589 | halve_threshold_use * tnode_child_length(tn)) { |
571 | 590 | ||
572 | old_tn = tn; | 591 | old_tn = tn; |
573 | tn = halve(t, tn); | 592 | tn = halve(t, tn); |
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 6e092dadb388..b942ff3c8860 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c | |||
@@ -604,7 +604,7 @@ static struct file_operations ip_vs_app_fops = { | |||
604 | /* | 604 | /* |
605 | * Replace a segment of data with a new segment | 605 | * Replace a segment of data with a new segment |
606 | */ | 606 | */ |
607 | int ip_vs_skb_replace(struct sk_buff *skb, int pri, | 607 | int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, |
608 | char *o_buf, int o_len, char *n_buf, int n_len) | 608 | char *o_buf, int o_len, char *n_buf, int n_len) |
609 | { | 609 | { |
610 | struct iphdr *iph; | 610 | struct iphdr *iph; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 2cd7e7d1ac90..a7659728e7a0 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -141,7 +141,7 @@ config IP_NF_PPTP | |||
141 | tristate 'PPTP protocol support' | 141 | tristate 'PPTP protocol support' |
142 | help | 142 | help |
143 | This module adds support for PPTP (Point to Point Tunnelling | 143 | This module adds support for PPTP (Point to Point Tunnelling |
144 | Protocol, RFC2637) conncection tracking and NAT. | 144 | Protocol, RFC2637) connection tracking and NAT. |
145 | 145 | ||
146 | If you are running PPTP sessions over a stateful firewall or NAT | 146 | If you are running PPTP sessions over a stateful firewall or NAT |
147 | box, you may want to enable this feature. | 147 | box, you may want to enable this feature. |
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index b940346de4e7..6d80e063c187 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c | |||
@@ -136,7 +136,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
136 | else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) | 136 | else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) |
137 | /* slow start */ | 137 | /* slow start */ |
138 | ca->cnt = (cwnd * (BICTCP_B-1)) | 138 | ca->cnt = (cwnd * (BICTCP_B-1)) |
139 | / cwnd-ca->last_max_cwnd; | 139 | / (cwnd - ca->last_max_cwnd); |
140 | else | 140 | else |
141 | /* linear increase */ | 141 | /* linear increase */ |
142 | ca->cnt = cwnd / max_increment; | 142 | ca->cnt = cwnd / max_increment; |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 519899fb11d5..39a96c768102 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1393,7 +1393,7 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1393 | 1393 | ||
1394 | static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) | 1394 | static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) |
1395 | { | 1395 | { |
1396 | return sizeof(struct mld2_grec) + 4*mld_scount(pmc,type,gdel,sdel); | 1396 | return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); |
1397 | } | 1397 | } |
1398 | 1398 | ||
1399 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, | 1399 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 555a31347eda..305d9ee6d7db 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1450,7 +1450,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1450 | 1450 | ||
1451 | static void pndisc_redo(struct sk_buff *skb) | 1451 | static void pndisc_redo(struct sk_buff *skb) |
1452 | { | 1452 | { |
1453 | ndisc_rcv(skb); | 1453 | ndisc_recv_ns(skb); |
1454 | kfree_skb(skb); | 1454 | kfree_skb(skb); |
1455 | } | 1455 | } |
1456 | 1456 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e4cad11f284a..bf9519341fd3 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -99,7 +99,7 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
99 | next:; | 99 | next:; |
100 | } | 100 | } |
101 | result = best; | 101 | result = best; |
102 | for(;; result += UDP_HTABLE_SIZE) { | 102 | for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { |
103 | if (result > sysctl_local_port_range[1]) | 103 | if (result > sysctl_local_port_range[1]) |
104 | result = sysctl_local_port_range[0] | 104 | result = sysctl_local_port_range[0] |
105 | + ((result - sysctl_local_port_range[0]) & | 105 | + ((result - sysctl_local_port_range[0]) & |
@@ -107,6 +107,8 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
107 | if (!udp_lport_inuse(result)) | 107 | if (!udp_lport_inuse(result)) |
108 | break; | 108 | break; |
109 | } | 109 | } |
110 | if (i >= (1 << 16) / UDP_HTABLE_SIZE) | ||
111 | goto fail; | ||
110 | gotit: | 112 | gotit: |
111 | udp_port_rover = snum = result; | 113 | udp_port_rover = snum = result; |
112 | } else { | 114 | } else { |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 4879743b945a..bbf0f69181ba 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -185,7 +185,7 @@ static int pfkey_release(struct socket *sock) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, | 187 | static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, |
188 | int allocation, struct sock *sk) | 188 | unsigned int __nocast allocation, struct sock *sk) |
189 | { | 189 | { |
190 | int err = -ENOBUFS; | 190 | int err = -ENOBUFS; |
191 | 191 | ||
@@ -217,7 +217,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, | |||
217 | #define BROADCAST_ONE 1 | 217 | #define BROADCAST_ONE 1 |
218 | #define BROADCAST_REGISTERED 2 | 218 | #define BROADCAST_REGISTERED 2 |
219 | #define BROADCAST_PROMISC_ONLY 4 | 219 | #define BROADCAST_PROMISC_ONLY 4 |
220 | static int pfkey_broadcast(struct sk_buff *skb, int allocation, | 220 | static int pfkey_broadcast(struct sk_buff *skb, unsigned int __nocast allocation, |
221 | int broadcast_flags, struct sock *one_sk) | 221 | int broadcast_flags, struct sock *one_sk) |
222 | { | 222 | { |
223 | struct sock *sk; | 223 | struct sock *sk; |
@@ -1416,7 +1416,8 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, | |||
1416 | return 0; | 1416 | return 0; |
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, int allocation) | 1419 | static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, |
1420 | unsigned int __nocast allocation) | ||
1420 | { | 1421 | { |
1421 | struct sk_buff *skb; | 1422 | struct sk_buff *skb; |
1422 | struct sadb_msg *hdr; | 1423 | struct sadb_msg *hdr; |
@@ -2153,6 +2154,7 @@ out: | |||
2153 | 2154 | ||
2154 | static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) | 2155 | static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) |
2155 | { | 2156 | { |
2157 | unsigned int dir; | ||
2156 | int err; | 2158 | int err; |
2157 | struct sadb_x_policy *pol; | 2159 | struct sadb_x_policy *pol; |
2158 | struct xfrm_policy *xp; | 2160 | struct xfrm_policy *xp; |
@@ -2161,7 +2163,11 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
2161 | if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) | 2163 | if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) |
2162 | return -EINVAL; | 2164 | return -EINVAL; |
2163 | 2165 | ||
2164 | xp = xfrm_policy_byid(0, pol->sadb_x_policy_id, | 2166 | dir = xfrm_policy_id2dir(pol->sadb_x_policy_id); |
2167 | if (dir >= XFRM_POLICY_MAX) | ||
2168 | return -EINVAL; | ||
2169 | |||
2170 | xp = xfrm_policy_byid(dir, pol->sadb_x_policy_id, | ||
2165 | hdr->sadb_msg_type == SADB_X_SPDDELETE2); | 2171 | hdr->sadb_msg_type == SADB_X_SPDDELETE2); |
2166 | if (xp == NULL) | 2172 | if (xp == NULL) |
2167 | return -ENOENT; | 2173 | return -ENOENT; |
@@ -2173,9 +2179,9 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
2173 | if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) { | 2179 | if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) { |
2174 | c.data.byid = 1; | 2180 | c.data.byid = 1; |
2175 | c.event = XFRM_MSG_DELPOLICY; | 2181 | c.event = XFRM_MSG_DELPOLICY; |
2176 | km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); | 2182 | km_policy_notify(xp, dir, &c); |
2177 | } else { | 2183 | } else { |
2178 | err = key_pol_get_resp(sk, xp, hdr, pol->sadb_x_policy_dir-1); | 2184 | err = key_pol_get_resp(sk, xp, hdr, dir); |
2179 | } | 2185 | } |
2180 | 2186 | ||
2181 | xfrm_pol_put(xp); | 2187 | xfrm_pol_put(xp); |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 49a3900e3d32..34d671974a4d 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -195,7 +195,8 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys, | |||
195 | 195 | ||
196 | int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) | 196 | int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) |
197 | { | 197 | { |
198 | int allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; | 198 | unsigned int __nocast allocation = |
199 | in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; | ||
199 | int err = 0; | 200 | int err = 0; |
200 | 201 | ||
201 | NETLINK_CB(skb).dst_group = group; | 202 | NETLINK_CB(skb).dst_group = group; |
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 4e66eef9a034..509afddae569 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c | |||
@@ -58,7 +58,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) | |||
58 | 58 | ||
59 | /* Spoof incoming device */ | 59 | /* Spoof incoming device */ |
60 | skb->dev = dev; | 60 | skb->dev = dev; |
61 | skb->h.raw = skb->data; | 61 | skb->mac.raw = skb->nh.raw; |
62 | skb->nh.raw = skb->data; | 62 | skb->nh.raw = skb->data; |
63 | skb->pkt_type = PACKET_HOST; | 63 | skb->pkt_type = PACKET_HOST; |
64 | 64 | ||
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c index 5cfd4cadee42..86f777052633 100644 --- a/net/rxrpc/call.c +++ b/net/rxrpc/call.c | |||
@@ -1923,7 +1923,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, | |||
1923 | size_t sioc, | 1923 | size_t sioc, |
1924 | struct kvec *siov, | 1924 | struct kvec *siov, |
1925 | u8 rxhdr_flags, | 1925 | u8 rxhdr_flags, |
1926 | int alloc_flags, | 1926 | unsigned int __nocast alloc_flags, |
1927 | int dup_data, | 1927 | int dup_data, |
1928 | size_t *size_sent) | 1928 | size_t *size_sent) |
1929 | { | 1929 | { |
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c index 61463c74f8cc..be4b2be58956 100644 --- a/net/rxrpc/connection.c +++ b/net/rxrpc/connection.c | |||
@@ -522,7 +522,7 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn, | |||
522 | uint8_t type, | 522 | uint8_t type, |
523 | int dcount, | 523 | int dcount, |
524 | struct kvec diov[], | 524 | struct kvec diov[], |
525 | int alloc_flags, | 525 | unsigned int __nocast alloc_flags, |
526 | struct rxrpc_message **_msg) | 526 | struct rxrpc_message **_msg) |
527 | { | 527 | { |
528 | struct rxrpc_message *msg; | 528 | struct rxrpc_message *msg; |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f3104035e35d..ade730eaf401 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -719,7 +719,7 @@ static void rpc_async_schedule(void *arg) | |||
719 | void * | 719 | void * |
720 | rpc_malloc(struct rpc_task *task, size_t size) | 720 | rpc_malloc(struct rpc_task *task, size_t size) |
721 | { | 721 | { |
722 | int gfp; | 722 | unsigned int __nocast gfp; |
723 | 723 | ||
724 | if (task->tk_flags & RPC_TASK_SWAPPER) | 724 | if (task->tk_flags & RPC_TASK_SWAPPER) |
725 | gfp = GFP_ATOMIC; | 725 | gfp = GFP_ATOMIC; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index fda737d77edc..061b44cc2451 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -163,7 +163,7 @@ static void xfrm_policy_timer(unsigned long data) | |||
163 | if (xp->dead) | 163 | if (xp->dead) |
164 | goto out; | 164 | goto out; |
165 | 165 | ||
166 | dir = xp->index & 7; | 166 | dir = xfrm_policy_id2dir(xp->index); |
167 | 167 | ||
168 | if (xp->lft.hard_add_expires_seconds) { | 168 | if (xp->lft.hard_add_expires_seconds) { |
169 | long tmo = xp->lft.hard_add_expires_seconds + | 169 | long tmo = xp->lft.hard_add_expires_seconds + |
@@ -225,7 +225,7 @@ expired: | |||
225 | * SPD calls. | 225 | * SPD calls. |
226 | */ | 226 | */ |
227 | 227 | ||
228 | struct xfrm_policy *xfrm_policy_alloc(int gfp) | 228 | struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp) |
229 | { | 229 | { |
230 | struct xfrm_policy *policy; | 230 | struct xfrm_policy *policy; |
231 | 231 | ||
@@ -417,7 +417,7 @@ struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete) | |||
417 | struct xfrm_policy *pol, **p; | 417 | struct xfrm_policy *pol, **p; |
418 | 418 | ||
419 | write_lock_bh(&xfrm_policy_lock); | 419 | write_lock_bh(&xfrm_policy_lock); |
420 | for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) { | 420 | for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { |
421 | if (pol->index == id) { | 421 | if (pol->index == id) { |
422 | xfrm_pol_hold(pol); | 422 | xfrm_pol_hold(pol); |
423 | if (delete) | 423 | if (delete) |