diff options
52 files changed, 784 insertions, 342 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index c61d8b876fdb..4710845dbac4 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt | |||
| @@ -19,6 +19,7 @@ Contents: | |||
| 19 | - Control dependencies. | 19 | - Control dependencies. |
| 20 | - SMP barrier pairing. | 20 | - SMP barrier pairing. |
| 21 | - Examples of memory barrier sequences. | 21 | - Examples of memory barrier sequences. |
| 22 | - Read memory barriers vs load speculation. | ||
| 22 | 23 | ||
| 23 | (*) Explicit kernel barriers. | 24 | (*) Explicit kernel barriers. |
| 24 | 25 | ||
| @@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed: | |||
| 248 | we may get either of: | 249 | we may get either of: |
| 249 | 250 | ||
| 250 | STORE *A = X; Y = LOAD *A; | 251 | STORE *A = X; Y = LOAD *A; |
| 251 | STORE *A = Y; | 252 | STORE *A = Y = X; |
| 252 | 253 | ||
| 253 | 254 | ||
| 254 | ========================= | 255 | ========================= |
| @@ -344,9 +345,12 @@ Memory barriers come in four basic varieties: | |||
| 344 | 345 | ||
| 345 | (4) General memory barriers. | 346 | (4) General memory barriers. |
| 346 | 347 | ||
| 347 | A general memory barrier is a combination of both a read memory barrier | 348 | A general memory barrier gives a guarantee that all the LOAD and STORE |
| 348 | and a write memory barrier. It is a partial ordering over both loads and | 349 | operations specified before the barrier will appear to happen before all |
| 349 | stores. | 350 | the LOAD and STORE operations specified after the barrier with respect to |
| 351 | the other components of the system. | ||
| 352 | |||
| 353 | A general memory barrier is a partial ordering over both loads and stores. | ||
| 350 | 354 | ||
| 351 | General memory barriers imply both read and write memory barriers, and so | 355 | General memory barriers imply both read and write memory barriers, and so |
| 352 | can substitute for either. | 356 | can substitute for either. |
| @@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable: | |||
| 546 | =============== =============== | 550 | =============== =============== |
| 547 | a = 1; | 551 | a = 1; |
| 548 | <write barrier> | 552 | <write barrier> |
| 549 | b = 2; x = a; | 553 | b = 2; x = b; |
| 550 | <read barrier> | 554 | <read barrier> |
| 551 | y = b; | 555 | y = a; |
| 552 | 556 | ||
| 553 | Or: | 557 | Or: |
| 554 | 558 | ||
| @@ -563,6 +567,18 @@ Or: | |||
| 563 | Basically, the read barrier always has to be there, even though it can be of | 567 | Basically, the read barrier always has to be there, even though it can be of |
| 564 | the "weaker" type. | 568 | the "weaker" type. |
| 565 | 569 | ||
| 570 | [!] Note that the stores before the write barrier would normally be expected to | ||
| 571 | match the loads after the read barrier or data dependency barrier, and vice | ||
| 572 | versa: | ||
| 573 | |||
| 574 | CPU 1 CPU 2 | ||
| 575 | =============== =============== | ||
| 576 | a = 1; }---- --->{ v = c | ||
| 577 | b = 2; } \ / { w = d | ||
| 578 | <write barrier> \ <read barrier> | ||
| 579 | c = 3; } / \ { x = a; | ||
| 580 | d = 4; }---- --->{ y = b; | ||
| 581 | |||
| 566 | 582 | ||
| 567 | EXAMPLES OF MEMORY BARRIER SEQUENCES | 583 | EXAMPLES OF MEMORY BARRIER SEQUENCES |
| 568 | ------------------------------------ | 584 | ------------------------------------ |
| @@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E | |||
| 600 | | | +------+ | 616 | | | +------+ |
| 601 | +-------+ : : | 617 | +-------+ : : |
| 602 | | | 618 | | |
| 603 | | Sequence in which stores committed to memory system | 619 | | Sequence in which stores are committed to the |
| 604 | | by CPU 1 | 620 | | memory system by CPU 1 |
| 605 | V | 621 | V |
| 606 | 622 | ||
| 607 | 623 | ||
| @@ -683,14 +699,12 @@ then the following will occur: | |||
| 683 | | : : | | | 699 | | : : | | |
| 684 | | : : | CPU 2 | | 700 | | : : | CPU 2 | |
| 685 | | +-------+ | | | 701 | | +-------+ | | |
| 686 | \ | X->9 |------>| | | 702 | | | X->9 |------>| | |
| 687 | \ +-------+ | | | 703 | | +-------+ | | |
| 688 | ----->| B->2 | | | | 704 | Makes sure all effects ---> \ ddddddddddddddddd | | |
| 689 | +-------+ | | | 705 | prior to the store of C \ +-------+ | | |
| 690 | Makes sure all effects ---> ddddddddddddddddd | | | 706 | are perceptible to ----->| B->2 |------>| | |
| 691 | prior to the store of C +-------+ | | | 707 | subsequent loads +-------+ | | |
| 692 | are perceptible to | B->2 |------>| | | ||
| 693 | successive loads +-------+ | | | ||
| 694 | : : +-------+ | 708 | : : +-------+ |
| 695 | 709 | ||
| 696 | 710 | ||
| @@ -699,73 +713,239 @@ following sequence of events: | |||
| 699 | 713 | ||
| 700 | CPU 1 CPU 2 | 714 | CPU 1 CPU 2 |
| 701 | ======================= ======================= | 715 | ======================= ======================= |
| 716 | { A = 0, B = 9 } | ||
| 702 | STORE A=1 | 717 | STORE A=1 |
| 703 | STORE B=2 | ||
| 704 | STORE C=3 | ||
| 705 | <write barrier> | 718 | <write barrier> |
| 706 | STORE D=4 | 719 | STORE B=2 |
| 707 | STORE E=5 | ||
| 708 | LOAD A | ||
| 709 | LOAD B | 720 | LOAD B |
| 710 | LOAD C | 721 | LOAD A |
| 711 | LOAD D | ||
| 712 | LOAD E | ||
| 713 | 722 | ||
| 714 | Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in | 723 | Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in |
| 715 | some effectively random order, despite the write barrier issued by CPU 1: | 724 | some effectively random order, despite the write barrier issued by CPU 1: |
| 716 | 725 | ||
| 717 | +-------+ : : | 726 | +-------+ : : : : |
| 718 | | | +------+ | 727 | | | +------+ +-------+ |
| 719 | | |------>| C=3 | } | 728 | | |------>| A=1 |------ --->| A->0 | |
| 720 | | | : +------+ } | 729 | | | +------+ \ +-------+ |
| 721 | | | : | A=1 | } | 730 | | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 | |
| 722 | | | : +------+ } | 731 | | | +------+ | +-------+ |
| 723 | | CPU 1 | : | B=2 | }--- | 732 | | |------>| B=2 |--- | : : |
| 724 | | | +------+ } \ | 733 | | | +------+ \ | : : +-------+ |
| 725 | | | wwwwwwwwwwwww} \ | 734 | +-------+ : : \ | +-------+ | | |
| 726 | | | +------+ } \ : : +-------+ | 735 | ---------->| B->2 |------>| | |
| 727 | | | : | E=5 | } \ +-------+ | | | 736 | | +-------+ | CPU 2 | |
| 728 | | | : +------+ } \ { | C->3 |------>| | | 737 | | | A->0 |------>| | |
| 729 | | |------>| D=4 | } \ { +-------+ : | | | 738 | | +-------+ | | |
| 730 | | | +------+ \ { | E->5 | : | | | 739 | | : : +-------+ |
| 731 | +-------+ : : \ { +-------+ : | | | 740 | \ : : |
| 732 | Transfer -->{ | A->1 | : | CPU 2 | | 741 | \ +-------+ |
| 733 | from CPU 1 { +-------+ : | | | 742 | ---->| A->1 | |
| 734 | to CPU 2 { | D->4 | : | | | 743 | +-------+ |
| 735 | { +-------+ : | | | 744 | : : |
| 736 | { | B->2 |------>| | | ||
| 737 | +-------+ | | | ||
| 738 | : : +-------+ | ||
| 739 | |||
| 740 | |||
| 741 | If, however, a read barrier were to be placed between the load of C and the | ||
| 742 | load of D on CPU 2, then the partial ordering imposed by CPU 1 will be | ||
| 743 | perceived correctly by CPU 2. | ||
| 744 | 745 | ||
| 745 | +-------+ : : | 746 | |
| 746 | | | +------+ | 747 | If, however, a read barrier were to be placed between the load of E and the |
| 747 | | |------>| C=3 | } | 748 | load of A on CPU 2: |
| 748 | | | : +------+ } | 749 | |
| 749 | | | : | A=1 | }--- | 750 | CPU 1 CPU 2 |
| 750 | | | : +------+ } \ | 751 | ======================= ======================= |
| 751 | | CPU 1 | : | B=2 | } \ | 752 | { A = 0, B = 9 } |
| 752 | | | +------+ \ | 753 | STORE A=1 |
| 753 | | | wwwwwwwwwwwwwwww \ | 754 | <write barrier> |
| 754 | | | +------+ \ : : +-------+ | 755 | STORE B=2 |
| 755 | | | : | E=5 | } \ +-------+ | | | 756 | LOAD B |
| 756 | | | : +------+ }--- \ { | C->3 |------>| | | 757 | <read barrier> |
| 757 | | |------>| D=4 | } \ \ { +-------+ : | | | 758 | LOAD A |
| 758 | | | +------+ \ -->{ | B->2 | : | | | 759 | |
| 759 | +-------+ : : \ { +-------+ : | | | 760 | then the partial ordering imposed by CPU 1 will be perceived correctly by CPU |
| 760 | \ { | A->1 | : | CPU 2 | | 761 | 2: |
| 761 | \ +-------+ | | | 762 | |
| 762 | At this point the read ----> \ rrrrrrrrrrrrrrrrr | | | 763 | +-------+ : : : : |
| 763 | barrier causes all effects \ +-------+ | | | 764 | | | +------+ +-------+ |
| 764 | prior to the storage of C \ { | E->5 | : | | | 765 | | |------>| A=1 |------ --->| A->0 | |
| 765 | to be perceptible to CPU 2 -->{ +-------+ : | | | 766 | | | +------+ \ +-------+ |
| 766 | { | D->4 |------>| | | 767 | | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 | |
| 767 | +-------+ | | | 768 | | | +------+ | +-------+ |
| 768 | : : +-------+ | 769 | | |------>| B=2 |--- | : : |
| 770 | | | +------+ \ | : : +-------+ | ||
| 771 | +-------+ : : \ | +-------+ | | | ||
| 772 | ---------->| B->2 |------>| | | ||
| 773 | | +-------+ | CPU 2 | | ||
| 774 | | : : | | | ||
| 775 | | : : | | | ||
| 776 | At this point the read ----> \ rrrrrrrrrrrrrrrrr | | | ||
| 777 | barrier causes all effects \ +-------+ | | | ||
| 778 | prior to the storage of B ---->| A->1 |------>| | | ||
| 779 | to be perceptible to CPU 2 +-------+ | | | ||
| 780 | : : +-------+ | ||
| 781 | |||
| 782 | |||
| 783 | To illustrate this more completely, consider what could happen if the code | ||
| 784 | contained a load of A either side of the read barrier: | ||
| 785 | |||
| 786 | CPU 1 CPU 2 | ||
| 787 | ======================= ======================= | ||
| 788 | { A = 0, B = 9 } | ||
| 789 | STORE A=1 | ||
| 790 | <write barrier> | ||
| 791 | STORE B=2 | ||
| 792 | LOAD B | ||
| 793 | LOAD A [first load of A] | ||
| 794 | <read barrier> | ||
| 795 | LOAD A [second load of A] | ||
| 796 | |||
| 797 | Even though the two loads of A both occur after the load of B, they may both | ||
| 798 | come up with different values: | ||
| 799 | |||
| 800 | +-------+ : : : : | ||
| 801 | | | +------+ +-------+ | ||
| 802 | | |------>| A=1 |------ --->| A->0 | | ||
| 803 | | | +------+ \ +-------+ | ||
| 804 | | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 | | ||
| 805 | | | +------+ | +-------+ | ||
| 806 | | |------>| B=2 |--- | : : | ||
| 807 | | | +------+ \ | : : +-------+ | ||
| 808 | +-------+ : : \ | +-------+ | | | ||
| 809 | ---------->| B->2 |------>| | | ||
| 810 | | +-------+ | CPU 2 | | ||
| 811 | | : : | | | ||
| 812 | | : : | | | ||
| 813 | | +-------+ | | | ||
| 814 | | | A->0 |------>| 1st | | ||
| 815 | | +-------+ | | | ||
| 816 | At this point the read ----> \ rrrrrrrrrrrrrrrrr | | | ||
| 817 | barrier causes all effects \ +-------+ | | | ||
| 818 | prior to the storage of B ---->| A->1 |------>| 2nd | | ||
| 819 | to be perceptible to CPU 2 +-------+ | | | ||
| 820 | : : +-------+ | ||
| 821 | |||
| 822 | |||
| 823 | But it may be that the update to A from CPU 1 becomes perceptible to CPU 2 | ||
| 824 | before the read barrier completes anyway: | ||
| 825 | |||
| 826 | +-------+ : : : : | ||
| 827 | | | +------+ +-------+ | ||
| 828 | | |------>| A=1 |------ --->| A->0 | | ||
| 829 | | | +------+ \ +-------+ | ||
| 830 | | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 | | ||
| 831 | | | +------+ | +-------+ | ||
| 832 | | |------>| B=2 |--- | : : | ||
| 833 | | | +------+ \ | : : +-------+ | ||
| 834 | +-------+ : : \ | +-------+ | | | ||
| 835 | ---------->| B->2 |------>| | | ||
| 836 | | +-------+ | CPU 2 | | ||
| 837 | | : : | | | ||
| 838 | \ : : | | | ||
| 839 | \ +-------+ | | | ||
| 840 | ---->| A->1 |------>| 1st | | ||
| 841 | +-------+ | | | ||
| 842 | rrrrrrrrrrrrrrrrr | | | ||
| 843 | +-------+ | | | ||
| 844 | | A->1 |------>| 2nd | | ||
| 845 | +-------+ | | | ||
| 846 | : : +-------+ | ||
| 847 | |||
| 848 | |||
| 849 | The guarantee is that the second load will always come up with A == 1 if the | ||
| 850 | load of B came up with B == 2. No such guarantee exists for the first load of | ||
| 851 | A; that may come up with either A == 0 or A == 1. | ||
| 852 | |||
| 853 | |||
| 854 | READ MEMORY BARRIERS VS LOAD SPECULATION | ||
| 855 | ---------------------------------------- | ||
| 856 | |||
| 857 | Many CPUs speculate with loads: that is they see that they will need to load an | ||
| 858 | item from memory, and they find a time where they're not using the bus for any | ||
| 859 | other loads, and so do the load in advance - even though they haven't actually | ||
| 860 | got to that point in the instruction execution flow yet. This permits the | ||
| 861 | actual load instruction to potentially complete immediately because the CPU | ||
| 862 | already has the value to hand. | ||
| 863 | |||
| 864 | It may turn out that the CPU didn't actually need the value - perhaps because a | ||
| 865 | branch circumvented the load - in which case it can discard the value or just | ||
| 866 | cache it for later use. | ||
| 867 | |||
| 868 | Consider: | ||
| 869 | |||
| 870 | CPU 1 CPU 2 | ||
| 871 | ======================= ======================= | ||
| 872 | LOAD B | ||
| 873 | DIVIDE } Divide instructions generally | ||
| 874 | DIVIDE } take a long time to perform | ||
| 875 | LOAD A | ||
| 876 | |||
| 877 | Which might appear as this: | ||
| 878 | |||
| 879 | : : +-------+ | ||
| 880 | +-------+ | | | ||
| 881 | --->| B->2 |------>| | | ||
| 882 | +-------+ | CPU 2 | | ||
| 883 | : :DIVIDE | | | ||
| 884 | +-------+ | | | ||
| 885 | The CPU being busy doing a ---> --->| A->0 |~~~~ | | | ||
| 886 | division speculates on the +-------+ ~ | | | ||
| 887 | LOAD of A : : ~ | | | ||
| 888 | : :DIVIDE | | | ||
| 889 | : : ~ | | | ||
| 890 | Once the divisions are complete --> : : ~-->| | | ||
| 891 | the CPU can then perform the : : | | | ||
| 892 | LOAD with immediate effect : : +-------+ | ||
| 893 | |||
| 894 | |||
| 895 | Placing a read barrier or a data dependency barrier just before the second | ||
| 896 | load: | ||
| 897 | |||
| 898 | CPU 1 CPU 2 | ||
| 899 | ======================= ======================= | ||
| 900 | LOAD B | ||
| 901 | DIVIDE | ||
| 902 | DIVIDE | ||
| 903 | <read barrier> | ||
| 904 | LOAD A | ||
| 905 | |||
| 906 | will force any value speculatively obtained to be reconsidered to an extent | ||
| 907 | dependent on the type of barrier used. If there was no change made to the | ||
| 908 | speculated memory location, then the speculated value will just be used: | ||
| 909 | |||
| 910 | : : +-------+ | ||
| 911 | +-------+ | | | ||
| 912 | --->| B->2 |------>| | | ||
| 913 | +-------+ | CPU 2 | | ||
| 914 | : :DIVIDE | | | ||
| 915 | +-------+ | | | ||
| 916 | The CPU being busy doing a ---> --->| A->0 |~~~~ | | | ||
| 917 | division speculates on the +-------+ ~ | | | ||
| 918 | LOAD of A : : ~ | | | ||
| 919 | : :DIVIDE | | | ||
| 920 | : : ~ | | | ||
| 921 | : : ~ | | | ||
| 922 | rrrrrrrrrrrrrrrr~ | | | ||
| 923 | : : ~ | | | ||
| 924 | : : ~-->| | | ||
| 925 | : : | | | ||
| 926 | : : +-------+ | ||
| 927 | |||
| 928 | |||
| 929 | but if there was an update or an invalidation from another CPU pending, then | ||
| 930 | the speculation will be cancelled and the value reloaded: | ||
| 931 | |||
| 932 | : : +-------+ | ||
| 933 | +-------+ | | | ||
| 934 | --->| B->2 |------>| | | ||
| 935 | +-------+ | CPU 2 | | ||
| 936 | : :DIVIDE | | | ||
| 937 | +-------+ | | | ||
| 938 | The CPU being busy doing a ---> --->| A->0 |~~~~ | | | ||
| 939 | division speculates on the +-------+ ~ | | | ||
| 940 | LOAD of A : : ~ | | | ||
| 941 | : :DIVIDE | | | ||
| 942 | : : ~ | | | ||
| 943 | : : ~ | | | ||
| 944 | rrrrrrrrrrrrrrrrr | | | ||
| 945 | +-------+ | | | ||
| 946 | The speculation is discarded ---> --->| A->1 |------>| | | ||
| 947 | and an updated value is +-------+ | | | ||
| 948 | retrieved : : +-------+ | ||
| 769 | 949 | ||
| 770 | 950 | ||
| 771 | ======================== | 951 | ======================== |
| @@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS | |||
| 901 | =============================== | 1081 | =============================== |
| 902 | 1082 | ||
| 903 | Some of the other functions in the linux kernel imply memory barriers, amongst | 1083 | Some of the other functions in the linux kernel imply memory barriers, amongst |
| 904 | which are locking, scheduling and memory allocation functions. | 1084 | which are locking and scheduling functions. |
| 905 | 1085 | ||
| 906 | This specification is a _minimum_ guarantee; any particular architecture may | 1086 | This specification is a _minimum_ guarantee; any particular architecture may |
| 907 | provide more substantial guarantees, but these may not be relied upon outside | 1087 | provide more substantial guarantees, but these may not be relied upon outside |
| @@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not. | |||
| 966 | barriers is that the effects instructions outside of a critical section may | 1146 | barriers is that the effects instructions outside of a critical section may |
| 967 | seep into the inside of the critical section. | 1147 | seep into the inside of the critical section. |
| 968 | 1148 | ||
| 1149 | A LOCK followed by an UNLOCK may not be assumed to be full memory barrier | ||
| 1150 | because it is possible for an access preceding the LOCK to happen after the | ||
| 1151 | LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the | ||
| 1152 | two accesses can themselves then cross: | ||
| 1153 | |||
| 1154 | *A = a; | ||
| 1155 | LOCK | ||
| 1156 | UNLOCK | ||
| 1157 | *B = b; | ||
| 1158 | |||
| 1159 | may occur as: | ||
| 1160 | |||
| 1161 | LOCK, STORE *B, STORE *A, UNLOCK | ||
| 1162 | |||
| 969 | Locks and semaphores may not provide any guarantee of ordering on UP compiled | 1163 | Locks and semaphores may not provide any guarantee of ordering on UP compiled |
| 970 | systems, and so cannot be counted on in such a situation to actually achieve | 1164 | systems, and so cannot be counted on in such a situation to actually achieve |
| 971 | anything at all - especially with respect to I/O accesses - unless combined | 1165 | anything at all - especially with respect to I/O accesses - unless combined |
| @@ -1016,8 +1210,6 @@ Other functions that imply barriers: | |||
| 1016 | 1210 | ||
| 1017 | (*) schedule() and similar imply full memory barriers. | 1211 | (*) schedule() and similar imply full memory barriers. |
| 1018 | 1212 | ||
| 1019 | (*) Memory allocation and release functions imply full memory barriers. | ||
| 1020 | |||
| 1021 | 1213 | ||
| 1022 | ================================= | 1214 | ================================= |
| 1023 | INTER-CPU LOCKING BARRIER EFFECTS | 1215 | INTER-CPU LOCKING BARRIER EFFECTS |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 8290b69da202..213c7850d5fb 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
| @@ -453,7 +453,7 @@ config ALPHA_IRONGATE | |||
| 453 | 453 | ||
| 454 | config GENERIC_HWEIGHT | 454 | config GENERIC_HWEIGHT |
| 455 | bool | 455 | bool |
| 456 | default y if !ALPHA_EV6 && !ALPHA_EV67 | 456 | default y if !ALPHA_EV67 |
| 457 | 457 | ||
| 458 | config ALPHA_AVANTI | 458 | config ALPHA_AVANTI |
| 459 | bool | 459 | bool |
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index 9be01b0c3f48..e24566b88a78 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c | |||
| @@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void) | |||
| 111 | } | 111 | } |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static unsigned char ts72xx_rtc_readb(unsigned long addr) | 114 | static unsigned char ts72xx_rtc_readbyte(unsigned long addr) |
| 115 | { | 115 | { |
| 116 | __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); | 116 | __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); |
| 117 | return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE); | 117 | return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr) | 120 | static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr) |
| 121 | { | 121 | { |
| 122 | __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); | 122 | __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); |
| 123 | __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE); | 123 | __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE); |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | static struct m48t86_ops ts72xx_rtc_ops = { | 126 | static struct m48t86_ops ts72xx_rtc_ops = { |
| 127 | .readb = ts72xx_rtc_readb, | 127 | .readbyte = ts72xx_rtc_readbyte, |
| 128 | .writeb = ts72xx_rtc_writeb, | 128 | .writebyte = ts72xx_rtc_writebyte, |
| 129 | }; | 129 | }; |
| 130 | 130 | ||
| 131 | static struct platform_device ts72xx_rtc_device = { | 131 | static struct platform_device ts72xx_rtc_device = { |
diff --git a/arch/arm/mach-imx/irq.c b/arch/arm/mach-imx/irq.c index eeb8a6d4a399..a5de5f1da9f2 100644 --- a/arch/arm/mach-imx/irq.c +++ b/arch/arm/mach-imx/irq.c | |||
| @@ -127,7 +127,7 @@ static void | |||
| 127 | imx_gpio_ack_irq(unsigned int irq) | 127 | imx_gpio_ack_irq(unsigned int irq) |
| 128 | { | 128 | { |
| 129 | DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq); | 129 | DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq); |
| 130 | ISR(IRQ_TO_REG(irq)) |= 1 << ((irq - IRQ_GPIOA(0)) % 32); | 130 | ISR(IRQ_TO_REG(irq)) = 1 << ((irq - IRQ_GPIOA(0)) % 32); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static void | 133 | static void |
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index a0724f2b24ce..9f55f5ae1044 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c | |||
| @@ -232,8 +232,6 @@ static void __init intcp_init_irq(void) | |||
| 232 | for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) { | 232 | for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) { |
| 233 | if (i == 11) | 233 | if (i == 11) |
| 234 | i = 22; | 234 | i = 22; |
| 235 | if (i == IRQ_CP_CPPLDINT) | ||
| 236 | i++; | ||
| 237 | if (i == 29) | 235 | if (i == 29) |
| 238 | break; | 236 | break; |
| 239 | set_irq_chip(i, &pic_chip); | 237 | set_irq_chip(i, &pic_chip); |
| @@ -259,8 +257,7 @@ static void __init intcp_init_irq(void) | |||
| 259 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 257 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
| 260 | } | 258 | } |
| 261 | 259 | ||
| 262 | set_irq_handler(IRQ_CP_CPPLDINT, sic_handle_irq); | 260 | set_irq_chained_handler(IRQ_CP_CPPLDINT, sic_handle_irq); |
| 263 | pic_unmask_irq(IRQ_CP_CPPLDINT); | ||
| 264 | } | 261 | } |
| 265 | 262 | ||
| 266 | /* | 263 | /* |
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 19b372df544a..44bcb8097c7a 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
| @@ -371,6 +371,7 @@ static int spitz_ohci_init(struct device *dev) | |||
| 371 | static struct pxaohci_platform_data spitz_ohci_platform_data = { | 371 | static struct pxaohci_platform_data spitz_ohci_platform_data = { |
| 372 | .port_mode = PMM_NPS_MODE, | 372 | .port_mode = PMM_NPS_MODE, |
| 373 | .init = spitz_ohci_init, | 373 | .init = spitz_ohci_init, |
| 374 | .power_budget = 150, | ||
| 374 | }; | 375 | }; |
| 375 | 376 | ||
| 376 | 377 | ||
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 9e02bc3712a0..af6d2775cf82 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
| @@ -59,6 +59,14 @@ neponset_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *reg | |||
| 59 | if (irr & (IRR_ETHERNET | IRR_USAR)) { | 59 | if (irr & (IRR_ETHERNET | IRR_USAR)) { |
| 60 | desc->chip->mask(irq); | 60 | desc->chip->mask(irq); |
| 61 | 61 | ||
| 62 | /* | ||
| 63 | * Ack the interrupt now to prevent re-entering | ||
| 64 | * this neponset handler. Again, this is safe | ||
| 65 | * since we'll check the IRR register prior to | ||
| 66 | * leaving. | ||
| 67 | */ | ||
| 68 | desc->chip->ack(irq); | ||
| 69 | |||
| 62 | if (irr & IRR_ETHERNET) { | 70 | if (irr & IRR_ETHERNET) { |
| 63 | d = irq_desc + IRQ_NEPONSET_SMC9196; | 71 | d = irq_desc + IRQ_NEPONSET_SMC9196; |
| 64 | desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs); | 72 | desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs); |
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 799697d32dec..cebd48a3dae4 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
| @@ -112,10 +112,9 @@ void __init versatile_init_irq(void) | |||
| 112 | { | 112 | { |
| 113 | unsigned int i; | 113 | unsigned int i; |
| 114 | 114 | ||
| 115 | vic_init(VA_VIC_BASE, IRQ_VIC_START, ~(1 << 31)); | 115 | vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0); |
| 116 | 116 | ||
| 117 | set_irq_handler(IRQ_VICSOURCE31, sic_handle_irq); | 117 | set_irq_chained_handler(IRQ_VICSOURCE31, sic_handle_irq); |
| 118 | enable_irq(IRQ_VICSOURCE31); | ||
| 119 | 118 | ||
| 120 | /* Do second interrupt controller */ | 119 | /* Do second interrupt controller */ |
| 121 | writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); | 120 | writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); |
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index 2e3b643a4dc4..1649a175a206 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c | |||
| @@ -5,17 +5,34 @@ | |||
| 5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
| 6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
| 7 | #include <linux/pci.h> | 7 | #include <linux/pci.h> |
| 8 | #include <linux/acpi.h> | ||
| 9 | |||
| 8 | #include <asm/pci-direct.h> | 10 | #include <asm/pci-direct.h> |
| 9 | #include <asm/acpi.h> | 11 | #include <asm/acpi.h> |
| 10 | #include <asm/apic.h> | 12 | #include <asm/apic.h> |
| 11 | 13 | ||
| 14 | #ifdef CONFIG_ACPI | ||
| 15 | |||
| 16 | static int nvidia_hpet_detected __initdata; | ||
| 17 | |||
| 18 | static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) | ||
| 19 | { | ||
| 20 | nvidia_hpet_detected = 1; | ||
| 21 | return 0; | ||
| 22 | } | ||
| 23 | #endif | ||
| 24 | |||
| 12 | static int __init check_bridge(int vendor, int device) | 25 | static int __init check_bridge(int vendor, int device) |
| 13 | { | 26 | { |
| 14 | #ifdef CONFIG_ACPI | 27 | #ifdef CONFIG_ACPI |
| 15 | /* According to Nvidia all timer overrides are bogus. Just ignore | 28 | /* According to Nvidia all timer overrides are bogus unless HPET |
| 16 | them all. */ | 29 | is enabled. */ |
| 17 | if (vendor == PCI_VENDOR_ID_NVIDIA) { | 30 | if (vendor == PCI_VENDOR_ID_NVIDIA) { |
| 18 | acpi_skip_timer_override = 1; | 31 | nvidia_hpet_detected = 0; |
| 32 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); | ||
| 33 | if (nvidia_hpet_detected == 0) { | ||
| 34 | acpi_skip_timer_override = 1; | ||
| 35 | } | ||
| 19 | } | 36 | } |
| 20 | #endif | 37 | #endif |
| 21 | if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { | 38 | if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 846e1639ef7c..dd6b0e3386ce 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
| @@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p) | |||
| 1547 | if (efi_enabled) | 1547 | if (efi_enabled) |
| 1548 | efi_map_memmap(); | 1548 | efi_map_memmap(); |
| 1549 | 1549 | ||
| 1550 | #ifdef CONFIG_X86_IO_APIC | ||
| 1551 | check_acpi_pci(); /* Checks more than just ACPI actually */ | ||
| 1552 | #endif | ||
| 1553 | |||
| 1554 | #ifdef CONFIG_ACPI | 1550 | #ifdef CONFIG_ACPI |
| 1555 | /* | 1551 | /* |
| 1556 | * Parse the ACPI tables for possible boot-time SMP configuration. | 1552 | * Parse the ACPI tables for possible boot-time SMP configuration. |
| 1557 | */ | 1553 | */ |
| 1558 | acpi_boot_table_init(); | 1554 | acpi_boot_table_init(); |
| 1555 | #endif | ||
| 1556 | |||
| 1557 | #ifdef CONFIG_X86_IO_APIC | ||
| 1558 | check_acpi_pci(); /* Checks more than just ACPI actually */ | ||
| 1559 | #endif | ||
| 1560 | |||
| 1561 | #ifdef CONFIG_ACPI | ||
| 1559 | acpi_boot_init(); | 1562 | acpi_boot_init(); |
| 1560 | 1563 | ||
| 1561 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) | 1564 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 41e9ab40cd54..f70bd090dacd 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
| @@ -822,6 +822,7 @@ static void __init prom_send_capabilities(void) | |||
| 822 | /* try calling the ibm,client-architecture-support method */ | 822 | /* try calling the ibm,client-architecture-support method */ |
| 823 | if (call_prom_ret("call-method", 3, 2, &ret, | 823 | if (call_prom_ret("call-method", 3, 2, &ret, |
| 824 | ADDR("ibm,client-architecture-support"), | 824 | ADDR("ibm,client-architecture-support"), |
| 825 | root, | ||
| 825 | ADDR(ibm_architecture_vec)) == 0) { | 826 | ADDR(ibm_architecture_vec)) == 0) { |
| 826 | /* the call exists... */ | 827 | /* the call exists... */ |
| 827 | if (ret) | 828 | if (ret) |
| @@ -1622,6 +1623,15 @@ static int __init prom_find_machine_type(void) | |||
| 1622 | if (strstr(p, RELOC("Power Macintosh")) || | 1623 | if (strstr(p, RELOC("Power Macintosh")) || |
| 1623 | strstr(p, RELOC("MacRISC"))) | 1624 | strstr(p, RELOC("MacRISC"))) |
| 1624 | return PLATFORM_POWERMAC; | 1625 | return PLATFORM_POWERMAC; |
| 1626 | #ifdef CONFIG_PPC64 | ||
| 1627 | /* We must make sure we don't detect the IBM Cell | ||
| 1628 | * blades as pSeries due to some firmware issues, | ||
| 1629 | * so we do it here. | ||
| 1630 | */ | ||
| 1631 | if (strstr(p, RELOC("IBM,CBEA")) || | ||
| 1632 | strstr(p, RELOC("IBM,CPBW-1.0"))) | ||
| 1633 | return PLATFORM_GENERIC; | ||
| 1634 | #endif /* CONFIG_PPC64 */ | ||
| 1625 | i += sl + 1; | 1635 | i += sl + 1; |
| 1626 | } | 1636 | } |
| 1627 | } | 1637 | } |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 01e3c08cb550..8fdeca2d4597 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
| @@ -803,10 +803,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int | |||
| 803 | if (__get_user(cmcp, &ucp->uc_regs)) | 803 | if (__get_user(cmcp, &ucp->uc_regs)) |
| 804 | return -EFAULT; | 804 | return -EFAULT; |
| 805 | mcp = (struct mcontext __user *)(u64)cmcp; | 805 | mcp = (struct mcontext __user *)(u64)cmcp; |
| 806 | /* no need to check access_ok(mcp), since mcp < 4GB */ | ||
| 806 | } | 807 | } |
| 807 | #else | 808 | #else |
| 808 | if (__get_user(mcp, &ucp->uc_regs)) | 809 | if (__get_user(mcp, &ucp->uc_regs)) |
| 809 | return -EFAULT; | 810 | return -EFAULT; |
| 811 | if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) | ||
| 812 | return -EFAULT; | ||
| 810 | #endif | 813 | #endif |
| 811 | restore_sigmask(&set); | 814 | restore_sigmask(&set); |
| 812 | if (restore_user_regs(regs, mcp, sig)) | 815 | if (restore_user_regs(regs, mcp, sig)) |
| @@ -908,13 +911,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
| 908 | { | 911 | { |
| 909 | struct sig_dbg_op op; | 912 | struct sig_dbg_op op; |
| 910 | int i; | 913 | int i; |
| 914 | unsigned char tmp; | ||
| 911 | unsigned long new_msr = regs->msr; | 915 | unsigned long new_msr = regs->msr; |
| 912 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 916 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
| 913 | unsigned long new_dbcr0 = current->thread.dbcr0; | 917 | unsigned long new_dbcr0 = current->thread.dbcr0; |
| 914 | #endif | 918 | #endif |
| 915 | 919 | ||
| 916 | for (i=0; i<ndbg; i++) { | 920 | for (i=0; i<ndbg; i++) { |
| 917 | if (__copy_from_user(&op, dbg, sizeof(op))) | 921 | if (copy_from_user(&op, dbg + i, sizeof(op))) |
| 918 | return -EFAULT; | 922 | return -EFAULT; |
| 919 | switch (op.dbg_type) { | 923 | switch (op.dbg_type) { |
| 920 | case SIG_DBG_SINGLE_STEPPING: | 924 | case SIG_DBG_SINGLE_STEPPING: |
| @@ -959,6 +963,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
| 959 | current->thread.dbcr0 = new_dbcr0; | 963 | current->thread.dbcr0 = new_dbcr0; |
| 960 | #endif | 964 | #endif |
| 961 | 965 | ||
| 966 | if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) | ||
| 967 | || __get_user(tmp, (u8 __user *) ctx) | ||
| 968 | || __get_user(tmp, (u8 __user *) (ctx + 1) - 1)) | ||
| 969 | return -EFAULT; | ||
| 970 | |||
| 962 | /* | 971 | /* |
| 963 | * If we get a fault copying the context into the kernel's | 972 | * If we get a fault copying the context into the kernel's |
| 964 | * image of the user's registers, we can't just return -EFAULT | 973 | * image of the user's registers, we can't just return -EFAULT |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 27f65b95184d..c2db642f4cdd 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
| @@ -182,6 +182,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | |||
| 182 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); | 182 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); |
| 183 | if (err) | 183 | if (err) |
| 184 | return err; | 184 | return err; |
| 185 | if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) | ||
| 186 | return -EFAULT; | ||
| 185 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ | 187 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ |
| 186 | if (v_regs != 0 && (msr & MSR_VEC) != 0) | 188 | if (v_regs != 0 && (msr & MSR_VEC) != 0) |
| 187 | err |= __copy_from_user(current->thread.vr, v_regs, | 189 | err |= __copy_from_user(current->thread.vr, v_regs, |
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 6574b22b3cf3..fd3e5609e3e0 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
| @@ -125,14 +125,13 @@ static void __init cell_init_early(void) | |||
| 125 | 125 | ||
| 126 | static int __init cell_probe(void) | 126 | static int __init cell_probe(void) |
| 127 | { | 127 | { |
| 128 | /* XXX This is temporary, the Cell maintainer will come up with | ||
| 129 | * more appropriate detection logic | ||
| 130 | */ | ||
| 131 | unsigned long root = of_get_flat_dt_root(); | 128 | unsigned long root = of_get_flat_dt_root(); |
| 132 | if (!of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) | ||
| 133 | return 0; | ||
| 134 | 129 | ||
| 135 | return 1; | 130 | if (of_flat_dt_is_compatible(root, "IBM,CBEA") || |
| 131 | of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) | ||
| 132 | return 1; | ||
| 133 | |||
| 134 | return 0; | ||
| 136 | } | 135 | } |
| 137 | 136 | ||
| 138 | /* | 137 | /* |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5f79f01c44f2..3ba87835757e 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
| @@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node, | |||
| 389 | 389 | ||
| 390 | static int __init pSeries_probe(void) | 390 | static int __init pSeries_probe(void) |
| 391 | { | 391 | { |
| 392 | unsigned long root = of_get_flat_dt_root(); | ||
| 392 | char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), | 393 | char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), |
| 393 | "device_type", NULL); | 394 | "device_type", NULL); |
| 394 | if (dtype == NULL) | 395 | if (dtype == NULL) |
| @@ -396,6 +397,13 @@ static int __init pSeries_probe(void) | |||
| 396 | if (strcmp(dtype, "chrp")) | 397 | if (strcmp(dtype, "chrp")) |
| 397 | return 0; | 398 | return 0; |
| 398 | 399 | ||
| 400 | /* Cell blades firmware claims to be chrp while it's not. Until this | ||
| 401 | * is fixed, we need to avoid those here. | ||
| 402 | */ | ||
| 403 | if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") || | ||
| 404 | of_flat_dt_is_compatible(root, "IBM,CBEA")) | ||
| 405 | return 0; | ||
| 406 | |||
| 399 | DBG("pSeries detected, looking for LPAR capability...\n"); | 407 | DBG("pSeries detected, looking for LPAR capability...\n"); |
| 400 | 408 | ||
| 401 | /* Now try to figure out if we are running on LPAR */ | 409 | /* Now try to figure out if we are running on LPAR */ |
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index a93f5da6855d..40b42c88e6a7 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c | |||
| @@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id) | |||
| 69 | "clock-frequency", 0); | 69 | "clock-frequency", 0); |
| 70 | cpu_data(id).prom_node = cpu_node; | 70 | cpu_data(id).prom_node = cpu_node; |
| 71 | cpu_data(id).mid = cpu_get_hwmid(cpu_node); | 71 | cpu_data(id).mid = cpu_get_hwmid(cpu_node); |
| 72 | |||
| 73 | /* this is required to tune the scheduler correctly */ | ||
| 74 | /* is it possible to have CPUs with different cache sizes? */ | ||
| 75 | if (id == boot_cpu_id) { | ||
| 76 | int cache_line,cache_nlines; | ||
| 77 | cache_line = 0x20; | ||
| 78 | cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line); | ||
| 79 | cache_nlines = 0x8000; | ||
| 80 | cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines); | ||
| 81 | max_cache_size = cache_line * cache_nlines; | ||
| 82 | } | ||
| 72 | if (cpu_data(id).mid < 0) | 83 | if (cpu_data(id).mid < 0) |
| 73 | panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); | 84 | panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); |
| 74 | } | 85 | } |
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 2b7a1f316a93..0c0895202970 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
| @@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = { | |||
| 599 | 599 | ||
| 600 | /* SUN4V PCI configuration space accessors. */ | 600 | /* SUN4V PCI configuration space accessors. */ |
| 601 | 601 | ||
| 602 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) | 602 | struct pdev_entry { |
| 603 | struct pdev_entry *next; | ||
| 604 | u32 devhandle; | ||
| 605 | unsigned int bus; | ||
| 606 | unsigned int device; | ||
| 607 | unsigned int func; | ||
| 608 | }; | ||
| 609 | |||
| 610 | #define PDEV_HTAB_SIZE 16 | ||
| 611 | #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1) | ||
| 612 | static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE]; | ||
| 613 | |||
| 614 | static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | ||
| 603 | { | 615 | { |
| 604 | if (bus == pbm->pci_first_busno) { | 616 | unsigned int val; |
| 605 | if (device == 0 && func == 0) | 617 | |
| 606 | return 0; | 618 | val = (devhandle ^ (devhandle >> 4)); |
| 607 | return 1; | 619 | val ^= bus; |
| 620 | val ^= device; | ||
| 621 | val ^= func; | ||
| 622 | |||
| 623 | return val & PDEV_HTAB_MASK; | ||
| 624 | } | ||
| 625 | |||
| 626 | static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | ||
| 627 | { | ||
| 628 | struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
| 629 | struct pdev_entry **slot; | ||
| 630 | |||
| 631 | if (!p) | ||
| 632 | return -ENOMEM; | ||
| 633 | |||
| 634 | slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; | ||
| 635 | p->next = *slot; | ||
| 636 | *slot = p; | ||
| 637 | |||
| 638 | p->devhandle = devhandle; | ||
| 639 | p->bus = bus; | ||
| 640 | p->device = device; | ||
| 641 | p->func = func; | ||
| 642 | |||
| 643 | return 0; | ||
| 644 | } | ||
| 645 | |||
| 646 | /* Recursively descend into the OBP device tree, rooted at toplevel_node, | ||
| 647 | * looking for a PCI device matching bus and devfn. | ||
| 648 | */ | ||
| 649 | static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn) | ||
| 650 | { | ||
| 651 | toplevel_node = prom_getchild(toplevel_node); | ||
| 652 | |||
| 653 | while (toplevel_node != 0) { | ||
| 654 | int ret = obp_find(pregs, toplevel_node, bus, devfn); | ||
| 655 | |||
| 656 | if (ret != 0) | ||
| 657 | return ret; | ||
| 658 | |||
| 659 | ret = prom_getproperty(toplevel_node, "reg", (char *) pregs, | ||
| 660 | sizeof(*pregs) * PROMREG_MAX); | ||
| 661 | if (ret == 0 || ret == -1) | ||
| 662 | goto next_sibling; | ||
| 663 | |||
| 664 | if (((pregs[0].phys_hi >> 16) & 0xff) == bus && | ||
| 665 | ((pregs[0].phys_hi >> 8) & 0xff) == devfn) | ||
| 666 | break; | ||
| 667 | |||
| 668 | next_sibling: | ||
| 669 | toplevel_node = prom_getsibling(toplevel_node); | ||
| 670 | } | ||
| 671 | |||
| 672 | return toplevel_node; | ||
| 673 | } | ||
| 674 | |||
| 675 | static int pdev_htab_populate(struct pci_pbm_info *pbm) | ||
| 676 | { | ||
| 677 | struct linux_prom_pci_registers pr[PROMREG_MAX]; | ||
| 678 | u32 devhandle = pbm->devhandle; | ||
| 679 | unsigned int bus; | ||
| 680 | |||
| 681 | for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) { | ||
| 682 | unsigned int devfn; | ||
| 683 | |||
| 684 | for (devfn = 0; devfn < 256; devfn++) { | ||
| 685 | unsigned int device = PCI_SLOT(devfn); | ||
| 686 | unsigned int func = PCI_FUNC(devfn); | ||
| 687 | |||
| 688 | if (obp_find(pr, pbm->prom_node, bus, devfn)) { | ||
| 689 | int err = pdev_htab_add(devhandle, bus, | ||
| 690 | device, func); | ||
| 691 | if (err) | ||
| 692 | return err; | ||
| 693 | } | ||
| 694 | } | ||
| 695 | } | ||
| 696 | |||
| 697 | return 0; | ||
| 698 | } | ||
| 699 | |||
| 700 | static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | ||
| 701 | { | ||
| 702 | struct pdev_entry *p; | ||
| 703 | |||
| 704 | p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; | ||
| 705 | while (p) { | ||
| 706 | if (p->devhandle == devhandle && | ||
| 707 | p->bus == bus && | ||
| 708 | p->device == device && | ||
| 709 | p->func == func) | ||
| 710 | break; | ||
| 711 | |||
| 712 | p = p->next; | ||
| 608 | } | 713 | } |
| 609 | 714 | ||
| 715 | return p; | ||
| 716 | } | ||
| 717 | |||
| 718 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) | ||
| 719 | { | ||
| 610 | if (bus < pbm->pci_first_busno || | 720 | if (bus < pbm->pci_first_busno || |
| 611 | bus > pbm->pci_last_busno) | 721 | bus > pbm->pci_last_busno) |
| 612 | return 1; | 722 | return 1; |
| 613 | return 0; | 723 | return pdev_find(pbm->devhandle, bus, device, func) == NULL; |
| 614 | } | 724 | } |
| 615 | 725 | ||
| 616 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | 726 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, |
| @@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 | |||
| 1063 | 1173 | ||
| 1064 | pci_sun4v_get_bus_range(pbm); | 1174 | pci_sun4v_get_bus_range(pbm); |
| 1065 | pci_sun4v_iommu_init(pbm); | 1175 | pci_sun4v_iommu_init(pbm); |
| 1176 | |||
| 1177 | pdev_htab_populate(pbm); | ||
| 1066 | } | 1178 | } |
| 1067 | 1179 | ||
| 1068 | void sun4v_pci_init(int node, char *model_name) | 1180 | void sun4v_pci_init(int node, char *model_name) |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 4e8cd79156e0..f03d52d0b88d 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
| @@ -1287,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 1287 | return 0; | 1287 | return 0; |
| 1288 | } | 1288 | } |
| 1289 | 1289 | ||
| 1290 | static void __init smp_tune_scheduling(void) | ||
| 1291 | { | ||
| 1292 | int instance, node; | ||
| 1293 | unsigned int def, smallest = ~0U; | ||
| 1294 | |||
| 1295 | def = ((tlb_type == hypervisor) ? | ||
| 1296 | (3 * 1024 * 1024) : | ||
| 1297 | (4 * 1024 * 1024)); | ||
| 1298 | |||
| 1299 | instance = 0; | ||
| 1300 | while (!cpu_find_by_instance(instance, &node, NULL)) { | ||
| 1301 | unsigned int val; | ||
| 1302 | |||
| 1303 | val = prom_getintdefault(node, "ecache-size", def); | ||
| 1304 | if (val < smallest) | ||
| 1305 | smallest = val; | ||
| 1306 | |||
| 1307 | instance++; | ||
| 1308 | } | ||
| 1309 | |||
| 1310 | /* Any value less than 256K is nonsense. */ | ||
| 1311 | if (smallest < (256U * 1024U)) | ||
| 1312 | smallest = 256 * 1024; | ||
| 1313 | |||
| 1314 | max_cache_size = smallest; | ||
| 1315 | |||
| 1316 | if (smallest < 1U * 1024U * 1024U) | ||
| 1317 | printk(KERN_INFO "Using max_cache_size of %uKB\n", | ||
| 1318 | smallest / 1024U); | ||
| 1319 | else | ||
| 1320 | printk(KERN_INFO "Using max_cache_size of %uMB\n", | ||
| 1321 | smallest / 1024U / 1024U); | ||
| 1322 | } | ||
| 1323 | |||
| 1290 | /* Constrain the number of cpus to max_cpus. */ | 1324 | /* Constrain the number of cpus to max_cpus. */ |
| 1291 | void __init smp_prepare_cpus(unsigned int max_cpus) | 1325 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 1292 | { | 1326 | { |
| @@ -1322,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
| 1322 | } | 1356 | } |
| 1323 | 1357 | ||
| 1324 | smp_store_cpu_info(boot_cpu_id); | 1358 | smp_store_cpu_info(boot_cpu_id); |
| 1359 | smp_tune_scheduling(); | ||
| 1325 | } | 1360 | } |
| 1326 | 1361 | ||
| 1327 | /* Set this up early so that things like the scheduler can init | 1362 | /* Set this up early so that things like the scheduler can init |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 62d8a99271ea..38e569f786dd 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
| @@ -297,7 +297,6 @@ EXPORT_SYMBOL(svr4_getcontext); | |||
| 297 | EXPORT_SYMBOL(svr4_setcontext); | 297 | EXPORT_SYMBOL(svr4_setcontext); |
| 298 | EXPORT_SYMBOL(compat_sys_ioctl); | 298 | EXPORT_SYMBOL(compat_sys_ioctl); |
| 299 | EXPORT_SYMBOL(sparc32_open); | 299 | EXPORT_SYMBOL(sparc32_open); |
| 300 | EXPORT_SYMBOL(sys_close); | ||
| 301 | #endif | 300 | #endif |
| 302 | 301 | ||
| 303 | /* Special internal versions of library functions. */ | 302 | /* Special internal versions of library functions. */ |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 2793a5d82380..563db528e031 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
| @@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type) | |||
| 1797 | }; | 1797 | }; |
| 1798 | } | 1798 | } |
| 1799 | 1799 | ||
| 1800 | static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) | 1800 | extern void __show_regs(struct pt_regs * regs); |
| 1801 | |||
| 1802 | static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) | ||
| 1801 | { | 1803 | { |
| 1802 | int cnt; | 1804 | int cnt; |
| 1803 | 1805 | ||
| @@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char * | |||
| 1830 | pfx, | 1832 | pfx, |
| 1831 | ent->err_raddr, ent->err_size, ent->err_cpu); | 1833 | ent->err_raddr, ent->err_size, ent->err_cpu); |
| 1832 | 1834 | ||
| 1835 | __show_regs(regs); | ||
| 1836 | |||
| 1833 | if ((cnt = atomic_read(ocnt)) != 0) { | 1837 | if ((cnt = atomic_read(ocnt)) != 0) { |
| 1834 | atomic_set(ocnt, 0); | 1838 | atomic_set(ocnt, 0); |
| 1835 | wmb(); | 1839 | wmb(); |
| @@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) | |||
| 1862 | 1866 | ||
| 1863 | put_cpu(); | 1867 | put_cpu(); |
| 1864 | 1868 | ||
| 1865 | sun4v_log_error(&local_copy, cpu, | 1869 | sun4v_log_error(regs, &local_copy, cpu, |
| 1866 | KERN_ERR "RESUMABLE ERROR", | 1870 | KERN_ERR "RESUMABLE ERROR", |
| 1867 | &sun4v_resum_oflow_cnt); | 1871 | &sun4v_resum_oflow_cnt); |
| 1868 | } | 1872 | } |
| @@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) | |||
| 1910 | } | 1914 | } |
| 1911 | #endif | 1915 | #endif |
| 1912 | 1916 | ||
| 1913 | sun4v_log_error(&local_copy, cpu, | 1917 | sun4v_log_error(regs, &local_copy, cpu, |
| 1914 | KERN_EMERG "NON-RESUMABLE ERROR", | 1918 | KERN_EMERG "NON-RESUMABLE ERROR", |
| 1915 | &sun4v_nonresum_oflow_cnt); | 1919 | &sun4v_nonresum_oflow_cnt); |
| 1916 | 1920 | ||
| @@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) | |||
| 2200 | void die_if_kernel(char *str, struct pt_regs *regs) | 2204 | void die_if_kernel(char *str, struct pt_regs *regs) |
| 2201 | { | 2205 | { |
| 2202 | static int die_counter; | 2206 | static int die_counter; |
| 2203 | extern void __show_regs(struct pt_regs * regs); | ||
| 2204 | extern void smp_report_regs(void); | 2207 | extern void smp_report_regs(void); |
| 2205 | int count = 0; | 2208 | int count = 0; |
| 2206 | 2209 | ||
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 0de3ea938830..9cc7031b7151 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
| @@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer); | |||
| 271 | #include <linux/pci_ids.h> | 271 | #include <linux/pci_ids.h> |
| 272 | #include <linux/pci.h> | 272 | #include <linux/pci.h> |
| 273 | 273 | ||
| 274 | |||
| 275 | #ifdef CONFIG_ACPI | ||
| 276 | |||
| 277 | static int nvidia_hpet_detected __initdata; | ||
| 278 | |||
| 279 | static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) | ||
| 280 | { | ||
| 281 | nvidia_hpet_detected = 1; | ||
| 282 | return 0; | ||
| 283 | } | ||
| 284 | #endif | ||
| 285 | |||
| 274 | /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC | 286 | /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC |
| 275 | off. Check for an Nvidia or VIA PCI bridge and turn it off. | 287 | off. Check for an Nvidia or VIA PCI bridge and turn it off. |
| 276 | Use pci direct infrastructure because this runs before the PCI subsystem. | 288 | Use pci direct infrastructure because this runs before the PCI subsystem. |
| @@ -317,11 +329,19 @@ void __init check_ioapic(void) | |||
| 317 | return; | 329 | return; |
| 318 | case PCI_VENDOR_ID_NVIDIA: | 330 | case PCI_VENDOR_ID_NVIDIA: |
| 319 | #ifdef CONFIG_ACPI | 331 | #ifdef CONFIG_ACPI |
| 320 | /* All timer overrides on Nvidia | 332 | /* |
| 321 | seem to be wrong. Skip them. */ | 333 | * All timer overrides on Nvidia are |
| 322 | acpi_skip_timer_override = 1; | 334 | * wrong unless HPET is enabled. |
| 323 | printk(KERN_INFO | 335 | */ |
| 324 | "Nvidia board detected. Ignoring ACPI timer override.\n"); | 336 | nvidia_hpet_detected = 0; |
| 337 | acpi_table_parse(ACPI_HPET, | ||
| 338 | nvidia_hpet_check); | ||
| 339 | if (nvidia_hpet_detected == 0) { | ||
| 340 | acpi_skip_timer_override = 1; | ||
| 341 | printk(KERN_INFO "Nvidia board " | ||
| 342 | "detected. Ignoring ACPI " | ||
| 343 | "timer override.\n"); | ||
| 344 | } | ||
| 325 | #endif | 345 | #endif |
| 326 | /* RED-PEN skip them on mptables too? */ | 346 | /* RED-PEN skip them on mptables too? */ |
| 327 | return; | 347 | return; |
diff --git a/block/as-iosched.c b/block/as-iosched.c index e25a5d79ab27..a7caf35ca0c2 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
| @@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e) | |||
| 1648 | * initialize elevator private data (as_data), and alloc a arq for | 1648 | * initialize elevator private data (as_data), and alloc a arq for |
| 1649 | * each request on the free lists | 1649 | * each request on the free lists |
| 1650 | */ | 1650 | */ |
| 1651 | static int as_init_queue(request_queue_t *q, elevator_t *e) | 1651 | static void *as_init_queue(request_queue_t *q, elevator_t *e) |
| 1652 | { | 1652 | { |
| 1653 | struct as_data *ad; | 1653 | struct as_data *ad; |
| 1654 | int i; | 1654 | int i; |
| 1655 | 1655 | ||
| 1656 | if (!arq_pool) | 1656 | if (!arq_pool) |
| 1657 | return -ENOMEM; | 1657 | return NULL; |
| 1658 | 1658 | ||
| 1659 | ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); | 1659 | ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); |
| 1660 | if (!ad) | 1660 | if (!ad) |
| 1661 | return -ENOMEM; | 1661 | return NULL; |
| 1662 | memset(ad, 0, sizeof(*ad)); | 1662 | memset(ad, 0, sizeof(*ad)); |
| 1663 | 1663 | ||
| 1664 | ad->q = q; /* Identify what queue the data belongs to */ | 1664 | ad->q = q; /* Identify what queue the data belongs to */ |
| @@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
| 1667 | GFP_KERNEL, q->node); | 1667 | GFP_KERNEL, q->node); |
| 1668 | if (!ad->hash) { | 1668 | if (!ad->hash) { |
| 1669 | kfree(ad); | 1669 | kfree(ad); |
| 1670 | return -ENOMEM; | 1670 | return NULL; |
| 1671 | } | 1671 | } |
| 1672 | 1672 | ||
| 1673 | ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 1673 | ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
| @@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
| 1675 | if (!ad->arq_pool) { | 1675 | if (!ad->arq_pool) { |
| 1676 | kfree(ad->hash); | 1676 | kfree(ad->hash); |
| 1677 | kfree(ad); | 1677 | kfree(ad); |
| 1678 | return -ENOMEM; | 1678 | return NULL; |
| 1679 | } | 1679 | } |
| 1680 | 1680 | ||
| 1681 | /* anticipatory scheduling helpers */ | 1681 | /* anticipatory scheduling helpers */ |
| @@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
| 1696 | ad->antic_expire = default_antic_expire; | 1696 | ad->antic_expire = default_antic_expire; |
| 1697 | ad->batch_expire[REQ_SYNC] = default_read_batch_expire; | 1697 | ad->batch_expire[REQ_SYNC] = default_read_batch_expire; |
| 1698 | ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; | 1698 | ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; |
| 1699 | e->elevator_data = ad; | ||
| 1700 | 1699 | ||
| 1701 | ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; | 1700 | ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; |
| 1702 | ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; | 1701 | ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; |
| 1703 | if (ad->write_batch_count < 2) | 1702 | if (ad->write_batch_count < 2) |
| 1704 | ad->write_batch_count = 2; | 1703 | ad->write_batch_count = 2; |
| 1705 | 1704 | ||
| 1706 | return 0; | 1705 | return ad; |
| 1707 | } | 1706 | } |
| 1708 | 1707 | ||
| 1709 | /* | 1708 | /* |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 8e9d84825e1c..a46d030e092a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -2251,14 +2251,14 @@ static void cfq_exit_queue(elevator_t *e) | |||
| 2251 | kfree(cfqd); | 2251 | kfree(cfqd); |
| 2252 | } | 2252 | } |
| 2253 | 2253 | ||
| 2254 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) | 2254 | static void *cfq_init_queue(request_queue_t *q, elevator_t *e) |
| 2255 | { | 2255 | { |
| 2256 | struct cfq_data *cfqd; | 2256 | struct cfq_data *cfqd; |
| 2257 | int i; | 2257 | int i; |
| 2258 | 2258 | ||
| 2259 | cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); | 2259 | cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); |
| 2260 | if (!cfqd) | 2260 | if (!cfqd) |
| 2261 | return -ENOMEM; | 2261 | return NULL; |
| 2262 | 2262 | ||
| 2263 | memset(cfqd, 0, sizeof(*cfqd)); | 2263 | memset(cfqd, 0, sizeof(*cfqd)); |
| 2264 | 2264 | ||
| @@ -2288,8 +2288,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
| 2288 | for (i = 0; i < CFQ_QHASH_ENTRIES; i++) | 2288 | for (i = 0; i < CFQ_QHASH_ENTRIES; i++) |
| 2289 | INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); | 2289 | INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); |
| 2290 | 2290 | ||
| 2291 | e->elevator_data = cfqd; | ||
| 2292 | |||
| 2293 | cfqd->queue = q; | 2291 | cfqd->queue = q; |
| 2294 | 2292 | ||
| 2295 | cfqd->max_queued = q->nr_requests / 4; | 2293 | cfqd->max_queued = q->nr_requests / 4; |
| @@ -2316,14 +2314,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
| 2316 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 2314 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
| 2317 | cfqd->cfq_slice_idle = cfq_slice_idle; | 2315 | cfqd->cfq_slice_idle = cfq_slice_idle; |
| 2318 | 2316 | ||
| 2319 | return 0; | 2317 | return cfqd; |
| 2320 | out_crqpool: | 2318 | out_crqpool: |
| 2321 | kfree(cfqd->cfq_hash); | 2319 | kfree(cfqd->cfq_hash); |
| 2322 | out_cfqhash: | 2320 | out_cfqhash: |
| 2323 | kfree(cfqd->crq_hash); | 2321 | kfree(cfqd->crq_hash); |
| 2324 | out_crqhash: | 2322 | out_crqhash: |
| 2325 | kfree(cfqd); | 2323 | kfree(cfqd); |
| 2326 | return -ENOMEM; | 2324 | return NULL; |
| 2327 | } | 2325 | } |
| 2328 | 2326 | ||
| 2329 | static void cfq_slab_kill(void) | 2327 | static void cfq_slab_kill(void) |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 399fa1e60e1f..3bd0415a9828 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
| @@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e) | |||
| 613 | * initialize elevator private data (deadline_data), and alloc a drq for | 613 | * initialize elevator private data (deadline_data), and alloc a drq for |
| 614 | * each request on the free lists | 614 | * each request on the free lists |
| 615 | */ | 615 | */ |
| 616 | static int deadline_init_queue(request_queue_t *q, elevator_t *e) | 616 | static void *deadline_init_queue(request_queue_t *q, elevator_t *e) |
| 617 | { | 617 | { |
| 618 | struct deadline_data *dd; | 618 | struct deadline_data *dd; |
| 619 | int i; | 619 | int i; |
| 620 | 620 | ||
| 621 | if (!drq_pool) | 621 | if (!drq_pool) |
| 622 | return -ENOMEM; | 622 | return NULL; |
| 623 | 623 | ||
| 624 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | 624 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); |
| 625 | if (!dd) | 625 | if (!dd) |
| 626 | return -ENOMEM; | 626 | return NULL; |
| 627 | memset(dd, 0, sizeof(*dd)); | 627 | memset(dd, 0, sizeof(*dd)); |
| 628 | 628 | ||
| 629 | dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, | 629 | dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, |
| 630 | GFP_KERNEL, q->node); | 630 | GFP_KERNEL, q->node); |
| 631 | if (!dd->hash) { | 631 | if (!dd->hash) { |
| 632 | kfree(dd); | 632 | kfree(dd); |
| 633 | return -ENOMEM; | 633 | return NULL; |
| 634 | } | 634 | } |
| 635 | 635 | ||
| 636 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 636 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
| @@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
| 638 | if (!dd->drq_pool) { | 638 | if (!dd->drq_pool) { |
| 639 | kfree(dd->hash); | 639 | kfree(dd->hash); |
| 640 | kfree(dd); | 640 | kfree(dd); |
| 641 | return -ENOMEM; | 641 | return NULL; |
| 642 | } | 642 | } |
| 643 | 643 | ||
| 644 | for (i = 0; i < DL_HASH_ENTRIES; i++) | 644 | for (i = 0; i < DL_HASH_ENTRIES; i++) |
| @@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
| 653 | dd->writes_starved = writes_starved; | 653 | dd->writes_starved = writes_starved; |
| 654 | dd->front_merges = 1; | 654 | dd->front_merges = 1; |
| 655 | dd->fifo_batch = fifo_batch; | 655 | dd->fifo_batch = fifo_batch; |
| 656 | e->elevator_data = dd; | 656 | return dd; |
| 657 | return 0; | ||
| 658 | } | 657 | } |
| 659 | 658 | ||
| 660 | static void deadline_put_request(request_queue_t *q, struct request *rq) | 659 | static void deadline_put_request(request_queue_t *q, struct request *rq) |
diff --git a/block/elevator.c b/block/elevator.c index 8768a367fdde..a0afdd317cef 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name) | |||
| 121 | return e; | 121 | return e; |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | static int elevator_attach(request_queue_t *q, struct elevator_queue *eq) | 124 | static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) |
| 125 | { | 125 | { |
| 126 | int ret = 0; | 126 | return eq->ops->elevator_init_fn(q, eq); |
| 127 | } | ||
| 127 | 128 | ||
| 129 | static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, | ||
| 130 | void *data) | ||
| 131 | { | ||
| 128 | q->elevator = eq; | 132 | q->elevator = eq; |
| 129 | 133 | eq->elevator_data = data; | |
| 130 | if (eq->ops->elevator_init_fn) | ||
| 131 | ret = eq->ops->elevator_init_fn(q, eq); | ||
| 132 | |||
| 133 | return ret; | ||
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | static char chosen_elevator[16]; | 136 | static char chosen_elevator[16]; |
| @@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name) | |||
| 181 | struct elevator_type *e = NULL; | 181 | struct elevator_type *e = NULL; |
| 182 | struct elevator_queue *eq; | 182 | struct elevator_queue *eq; |
| 183 | int ret = 0; | 183 | int ret = 0; |
| 184 | void *data; | ||
| 184 | 185 | ||
| 185 | INIT_LIST_HEAD(&q->queue_head); | 186 | INIT_LIST_HEAD(&q->queue_head); |
| 186 | q->last_merge = NULL; | 187 | q->last_merge = NULL; |
| @@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name) | |||
| 202 | if (!eq) | 203 | if (!eq) |
| 203 | return -ENOMEM; | 204 | return -ENOMEM; |
| 204 | 205 | ||
| 205 | ret = elevator_attach(q, eq); | 206 | data = elevator_init_queue(q, eq); |
| 206 | if (ret) | 207 | if (!data) { |
| 207 | kobject_put(&eq->kobj); | 208 | kobject_put(&eq->kobj); |
| 209 | return -ENOMEM; | ||
| 210 | } | ||
| 208 | 211 | ||
| 212 | elevator_attach(q, eq, data); | ||
| 209 | return ret; | 213 | return ret; |
| 210 | } | 214 | } |
| 211 | 215 | ||
| @@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q) | |||
| 722 | return error; | 726 | return error; |
| 723 | } | 727 | } |
| 724 | 728 | ||
| 729 | static void __elv_unregister_queue(elevator_t *e) | ||
| 730 | { | ||
| 731 | kobject_uevent(&e->kobj, KOBJ_REMOVE); | ||
| 732 | kobject_del(&e->kobj); | ||
| 733 | } | ||
| 734 | |||
| 725 | void elv_unregister_queue(struct request_queue *q) | 735 | void elv_unregister_queue(struct request_queue *q) |
| 726 | { | 736 | { |
| 727 | if (q) { | 737 | if (q) |
| 728 | elevator_t *e = q->elevator; | 738 | __elv_unregister_queue(q->elevator); |
| 729 | kobject_uevent(&e->kobj, KOBJ_REMOVE); | ||
| 730 | kobject_del(&e->kobj); | ||
| 731 | } | ||
| 732 | } | 739 | } |
| 733 | 740 | ||
| 734 | int elv_register(struct elevator_type *e) | 741 | int elv_register(struct elevator_type *e) |
| @@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); | |||
| 780 | static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) | 787 | static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) |
| 781 | { | 788 | { |
| 782 | elevator_t *old_elevator, *e; | 789 | elevator_t *old_elevator, *e; |
| 790 | void *data; | ||
| 783 | 791 | ||
| 784 | /* | 792 | /* |
| 785 | * Allocate new elevator | 793 | * Allocate new elevator |
| @@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
| 788 | if (!e) | 796 | if (!e) |
| 789 | return 0; | 797 | return 0; |
| 790 | 798 | ||
| 799 | data = elevator_init_queue(q, e); | ||
| 800 | if (!data) { | ||
| 801 | kobject_put(&e->kobj); | ||
| 802 | return 0; | ||
| 803 | } | ||
| 804 | |||
| 791 | /* | 805 | /* |
| 792 | * Turn on BYPASS and drain all requests w/ elevator private data | 806 | * Turn on BYPASS and drain all requests w/ elevator private data |
| 793 | */ | 807 | */ |
| @@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
| 806 | elv_drain_elevator(q); | 820 | elv_drain_elevator(q); |
| 807 | } | 821 | } |
| 808 | 822 | ||
| 809 | spin_unlock_irq(q->queue_lock); | ||
| 810 | |||
| 811 | /* | 823 | /* |
| 812 | * unregister old elevator data | 824 | * Remember old elevator. |
| 813 | */ | 825 | */ |
| 814 | elv_unregister_queue(q); | ||
| 815 | old_elevator = q->elevator; | 826 | old_elevator = q->elevator; |
| 816 | 827 | ||
| 817 | /* | 828 | /* |
| 818 | * attach and start new elevator | 829 | * attach and start new elevator |
| 819 | */ | 830 | */ |
| 820 | if (elevator_attach(q, e)) | 831 | elevator_attach(q, e, data); |
| 821 | goto fail; | 832 | |
| 833 | spin_unlock_irq(q->queue_lock); | ||
| 834 | |||
| 835 | __elv_unregister_queue(old_elevator); | ||
| 822 | 836 | ||
| 823 | if (elv_register_queue(q)) | 837 | if (elv_register_queue(q)) |
| 824 | goto fail_register; | 838 | goto fail_register; |
| @@ -837,7 +851,6 @@ fail_register: | |||
| 837 | */ | 851 | */ |
| 838 | elevator_exit(e); | 852 | elevator_exit(e); |
| 839 | e = NULL; | 853 | e = NULL; |
| 840 | fail: | ||
| 841 | q->elevator = old_elevator; | 854 | q->elevator = old_elevator; |
| 842 | elv_register_queue(q); | 855 | elv_register_queue(q); |
| 843 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 856 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index f370e4a7fe6d..56a7c620574f 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
| @@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq) | |||
| 65 | return list_entry(rq->queuelist.next, struct request, queuelist); | 65 | return list_entry(rq->queuelist.next, struct request, queuelist); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static int noop_init_queue(request_queue_t *q, elevator_t *e) | 68 | static void *noop_init_queue(request_queue_t *q, elevator_t *e) |
| 69 | { | 69 | { |
| 70 | struct noop_data *nd; | 70 | struct noop_data *nd; |
| 71 | 71 | ||
| 72 | nd = kmalloc(sizeof(*nd), GFP_KERNEL); | 72 | nd = kmalloc(sizeof(*nd), GFP_KERNEL); |
| 73 | if (!nd) | 73 | if (!nd) |
| 74 | return -ENOMEM; | 74 | return NULL; |
| 75 | INIT_LIST_HEAD(&nd->queue); | 75 | INIT_LIST_HEAD(&nd->queue); |
| 76 | e->elevator_data = nd; | 76 | return nd; |
| 77 | return 0; | ||
| 78 | } | 77 | } |
| 79 | 78 | ||
| 80 | static void noop_exit_queue(elevator_t *e) | 79 | static void noop_exit_queue(elevator_t *e) |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index abbdb37a7f5f..f36db22ce1ae 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
| @@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance | |||
| 577 | return_VALUE(-EBUSY); | 577 | return_VALUE(-EBUSY); |
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | WARN_ON(!performance); | ||
| 581 | |||
| 580 | pr->performance = performance; | 582 | pr->performance = performance; |
| 581 | 583 | ||
| 582 | if (acpi_processor_get_performance_info(pr)) { | 584 | if (acpi_processor_get_performance_info(pr)) { |
| @@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance | |||
| 609 | return_VOID; | 611 | return_VOID; |
| 610 | } | 612 | } |
| 611 | 613 | ||
| 612 | kfree(pr->performance->states); | 614 | if (pr->performance) |
| 615 | kfree(pr->performance->states); | ||
| 613 | pr->performance = NULL; | 616 | pr->performance = NULL; |
| 614 | 617 | ||
| 615 | acpi_cpufreq_remove_file(pr); | 618 | acpi_cpufreq_remove_file(pr); |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index f5b01c6d498e..fb919bfb2824 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
| @@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC) += n_hdlc.o | |||
| 41 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o | 41 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o |
| 42 | obj-$(CONFIG_SX) += sx.o generic_serial.o | 42 | obj-$(CONFIG_SX) += sx.o generic_serial.o |
| 43 | obj-$(CONFIG_RIO) += rio/ generic_serial.o | 43 | obj-$(CONFIG_RIO) += rio/ generic_serial.o |
| 44 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o | ||
| 45 | obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o | 44 | obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o |
| 46 | obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o | 45 | obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o |
| 46 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o | ||
| 47 | obj-$(CONFIG_RAW_DRIVER) += raw.o | 47 | obj-$(CONFIG_RAW_DRIVER) += raw.o |
| 48 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o | 48 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o |
| 49 | obj-$(CONFIG_MMTIMER) += mmtimer.o | 49 | obj-$(CONFIG_MMTIMER) += mmtimer.o |
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index ede365d05387..b9371d5bf790 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
| @@ -1384,8 +1384,10 @@ do_it_again: | |||
| 1384 | * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, | 1384 | * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, |
| 1385 | * we won't get any more characters. | 1385 | * we won't get any more characters. |
| 1386 | */ | 1386 | */ |
| 1387 | if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) | 1387 | if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) { |
| 1388 | n_tty_set_room(tty); | ||
| 1388 | check_unthrottle(tty); | 1389 | check_unthrottle(tty); |
| 1390 | } | ||
| 1389 | 1391 | ||
| 1390 | if (b - buf >= minimum) | 1392 | if (b - buf >= minimum) |
| 1391 | break; | 1393 | break; |
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index f2a4d382ea19..3201de053943 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c | |||
| @@ -831,6 +831,7 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) | |||
| 831 | return rc; | 831 | return rc; |
| 832 | } | 832 | } |
| 833 | 833 | ||
| 834 | #ifdef CONFIG_PM | ||
| 834 | /* | 835 | /* |
| 835 | * spi module resume handler | 836 | * spi module resume handler |
| 836 | */ | 837 | */ |
| @@ -846,6 +847,7 @@ mptspi_resume(struct pci_dev *pdev) | |||
| 846 | 847 | ||
| 847 | return rc; | 848 | return rc; |
| 848 | } | 849 | } |
| 850 | #endif | ||
| 849 | 851 | ||
| 850 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 852 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
| 851 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 853 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index 5ea133c59afb..7bd4d85d0b42 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c | |||
| @@ -55,6 +55,7 @@ struct i2o_exec_wait { | |||
| 55 | u32 m; /* message id */ | 55 | u32 m; /* message id */ |
| 56 | struct i2o_message *msg; /* pointer to the reply message */ | 56 | struct i2o_message *msg; /* pointer to the reply message */ |
| 57 | struct list_head list; /* node in global wait list */ | 57 | struct list_head list; /* node in global wait list */ |
| 58 | spinlock_t lock; /* lock before modifying */ | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | /* Work struct needed to handle LCT NOTIFY replies */ | 61 | /* Work struct needed to handle LCT NOTIFY replies */ |
| @@ -87,6 +88,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void) | |||
| 87 | return NULL; | 88 | return NULL; |
| 88 | 89 | ||
| 89 | INIT_LIST_HEAD(&wait->list); | 90 | INIT_LIST_HEAD(&wait->list); |
| 91 | spin_lock_init(&wait->lock); | ||
| 90 | 92 | ||
| 91 | return wait; | 93 | return wait; |
| 92 | }; | 94 | }; |
| @@ -125,6 +127,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg, | |||
| 125 | DECLARE_WAIT_QUEUE_HEAD(wq); | 127 | DECLARE_WAIT_QUEUE_HEAD(wq); |
| 126 | struct i2o_exec_wait *wait; | 128 | struct i2o_exec_wait *wait; |
| 127 | static u32 tcntxt = 0x80000000; | 129 | static u32 tcntxt = 0x80000000; |
| 130 | long flags; | ||
| 128 | int rc = 0; | 131 | int rc = 0; |
| 129 | 132 | ||
| 130 | wait = i2o_exec_wait_alloc(); | 133 | wait = i2o_exec_wait_alloc(); |
| @@ -146,33 +149,28 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg, | |||
| 146 | wait->tcntxt = tcntxt++; | 149 | wait->tcntxt = tcntxt++; |
| 147 | msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); | 150 | msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); |
| 148 | 151 | ||
| 152 | wait->wq = &wq; | ||
| 153 | /* | ||
| 154 | * we add elements to the head, because if a entry in the list will | ||
| 155 | * never be removed, we have to iterate over it every time | ||
| 156 | */ | ||
| 157 | list_add(&wait->list, &i2o_exec_wait_list); | ||
| 158 | |||
| 149 | /* | 159 | /* |
| 150 | * Post the message to the controller. At some point later it will | 160 | * Post the message to the controller. At some point later it will |
| 151 | * return. If we time out before it returns then complete will be zero. | 161 | * return. If we time out before it returns then complete will be zero. |
| 152 | */ | 162 | */ |
| 153 | i2o_msg_post(c, msg); | 163 | i2o_msg_post(c, msg); |
| 154 | 164 | ||
| 155 | if (!wait->complete) { | 165 | wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ); |
| 156 | wait->wq = &wq; | ||
| 157 | /* | ||
| 158 | * we add elements add the head, because if a entry in the list | ||
| 159 | * will never be removed, we have to iterate over it every time | ||
| 160 | */ | ||
| 161 | list_add(&wait->list, &i2o_exec_wait_list); | ||
| 162 | |||
| 163 | wait_event_interruptible_timeout(wq, wait->complete, | ||
| 164 | timeout * HZ); | ||
| 165 | 166 | ||
| 166 | wait->wq = NULL; | 167 | spin_lock_irqsave(&wait->lock, flags); |
| 167 | } | ||
| 168 | 168 | ||
| 169 | barrier(); | 169 | wait->wq = NULL; |
| 170 | 170 | ||
| 171 | if (wait->complete) { | 171 | if (wait->complete) |
| 172 | rc = le32_to_cpu(wait->msg->body[0]) >> 24; | 172 | rc = le32_to_cpu(wait->msg->body[0]) >> 24; |
| 173 | i2o_flush_reply(c, wait->m); | 173 | else { |
| 174 | i2o_exec_wait_free(wait); | ||
| 175 | } else { | ||
| 176 | /* | 174 | /* |
| 177 | * We cannot remove it now. This is important. When it does | 175 | * We cannot remove it now. This is important. When it does |
| 178 | * terminate (which it must do if the controller has not | 176 | * terminate (which it must do if the controller has not |
| @@ -186,6 +184,13 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg, | |||
| 186 | rc = -ETIMEDOUT; | 184 | rc = -ETIMEDOUT; |
| 187 | } | 185 | } |
| 188 | 186 | ||
| 187 | spin_unlock_irqrestore(&wait->lock, flags); | ||
| 188 | |||
| 189 | if (rc != -ETIMEDOUT) { | ||
| 190 | i2o_flush_reply(c, wait->m); | ||
| 191 | i2o_exec_wait_free(wait); | ||
| 192 | } | ||
| 193 | |||
| 189 | return rc; | 194 | return rc; |
| 190 | }; | 195 | }; |
| 191 | 196 | ||
| @@ -213,7 +218,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | |||
| 213 | { | 218 | { |
| 214 | struct i2o_exec_wait *wait, *tmp; | 219 | struct i2o_exec_wait *wait, *tmp; |
| 215 | unsigned long flags; | 220 | unsigned long flags; |
| 216 | static spinlock_t lock = SPIN_LOCK_UNLOCKED; | ||
| 217 | int rc = 1; | 221 | int rc = 1; |
| 218 | 222 | ||
| 219 | /* | 223 | /* |
| @@ -223,23 +227,24 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | |||
| 223 | * already expired. Not much we can do about that except log it for | 227 | * already expired. Not much we can do about that except log it for |
| 224 | * debug purposes, increase timeout, and recompile. | 228 | * debug purposes, increase timeout, and recompile. |
| 225 | */ | 229 | */ |
| 226 | spin_lock_irqsave(&lock, flags); | ||
| 227 | list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { | 230 | list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { |
| 228 | if (wait->tcntxt == context) { | 231 | if (wait->tcntxt == context) { |
| 229 | list_del(&wait->list); | 232 | spin_lock_irqsave(&wait->lock, flags); |
| 230 | 233 | ||
| 231 | spin_unlock_irqrestore(&lock, flags); | 234 | list_del(&wait->list); |
| 232 | 235 | ||
| 233 | wait->m = m; | 236 | wait->m = m; |
| 234 | wait->msg = msg; | 237 | wait->msg = msg; |
| 235 | wait->complete = 1; | 238 | wait->complete = 1; |
| 236 | 239 | ||
| 237 | barrier(); | 240 | if (wait->wq) |
| 238 | |||
| 239 | if (wait->wq) { | ||
| 240 | wake_up_interruptible(wait->wq); | ||
| 241 | rc = 0; | 241 | rc = 0; |
| 242 | } else { | 242 | else |
| 243 | rc = -1; | ||
| 244 | |||
| 245 | spin_unlock_irqrestore(&wait->lock, flags); | ||
| 246 | |||
| 247 | if (rc) { | ||
| 243 | struct device *dev; | 248 | struct device *dev; |
| 244 | 249 | ||
| 245 | dev = &c->pdev->dev; | 250 | dev = &c->pdev->dev; |
| @@ -248,15 +253,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | |||
| 248 | c->name); | 253 | c->name); |
| 249 | i2o_dma_free(dev, &wait->dma); | 254 | i2o_dma_free(dev, &wait->dma); |
| 250 | i2o_exec_wait_free(wait); | 255 | i2o_exec_wait_free(wait); |
| 251 | rc = -1; | 256 | } else |
| 252 | } | 257 | wake_up_interruptible(wait->wq); |
| 253 | 258 | ||
| 254 | return rc; | 259 | return rc; |
| 255 | } | 260 | } |
| 256 | } | 261 | } |
| 257 | 262 | ||
| 258 | spin_unlock_irqrestore(&lock, flags); | ||
| 259 | |||
| 260 | osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, | 263 | osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, |
| 261 | context); | 264 | context); |
| 262 | 265 | ||
| @@ -322,14 +325,9 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL); | |||
| 322 | static int i2o_exec_probe(struct device *dev) | 325 | static int i2o_exec_probe(struct device *dev) |
| 323 | { | 326 | { |
| 324 | struct i2o_device *i2o_dev = to_i2o_device(dev); | 327 | struct i2o_device *i2o_dev = to_i2o_device(dev); |
| 325 | struct i2o_controller *c = i2o_dev->iop; | ||
| 326 | 328 | ||
| 327 | i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); | 329 | i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); |
| 328 | 330 | ||
| 329 | c->exec = i2o_dev; | ||
| 330 | |||
| 331 | i2o_exec_lct_notify(c, c->lct->change_ind + 1); | ||
| 332 | |||
| 333 | device_create_file(dev, &dev_attr_vendor_id); | 331 | device_create_file(dev, &dev_attr_vendor_id); |
| 334 | device_create_file(dev, &dev_attr_product_id); | 332 | device_create_file(dev, &dev_attr_product_id); |
| 335 | 333 | ||
| @@ -523,6 +521,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) | |||
| 523 | struct device *dev; | 521 | struct device *dev; |
| 524 | struct i2o_message *msg; | 522 | struct i2o_message *msg; |
| 525 | 523 | ||
| 524 | down(&c->lct_lock); | ||
| 525 | |||
| 526 | dev = &c->pdev->dev; | 526 | dev = &c->pdev->dev; |
| 527 | 527 | ||
| 528 | if (i2o_dma_realloc | 528 | if (i2o_dma_realloc |
| @@ -545,6 +545,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) | |||
| 545 | 545 | ||
| 546 | i2o_msg_post(c, msg); | 546 | i2o_msg_post(c, msg); |
| 547 | 547 | ||
| 548 | up(&c->lct_lock); | ||
| 549 | |||
| 548 | return 0; | 550 | return 0; |
| 549 | }; | 551 | }; |
| 550 | 552 | ||
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 492167446936..febbdd4e0605 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c | |||
| @@ -804,8 +804,6 @@ void i2o_iop_remove(struct i2o_controller *c) | |||
| 804 | 804 | ||
| 805 | /* Ask the IOP to switch to RESET state */ | 805 | /* Ask the IOP to switch to RESET state */ |
| 806 | i2o_iop_reset(c); | 806 | i2o_iop_reset(c); |
| 807 | |||
| 808 | put_device(&c->device); | ||
| 809 | } | 807 | } |
| 810 | 808 | ||
| 811 | /** | 809 | /** |
| @@ -1059,7 +1057,7 @@ struct i2o_controller *i2o_iop_alloc(void) | |||
| 1059 | 1057 | ||
| 1060 | snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); | 1058 | snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); |
| 1061 | if (i2o_pool_alloc | 1059 | if (i2o_pool_alloc |
| 1062 | (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4, | 1060 | (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32), |
| 1063 | I2O_MSG_INPOOL_MIN)) { | 1061 | I2O_MSG_INPOOL_MIN)) { |
| 1064 | kfree(c); | 1062 | kfree(c); |
| 1065 | return ERR_PTR(-ENOMEM); | 1063 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 959109609d85..97fe95666f3b 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
| @@ -187,12 +187,11 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) | |||
| 187 | return v; | 187 | return v; |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | 190 | static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) |
| 191 | { | 191 | { |
| 192 | u16 power_control; | 192 | u16 power_control; |
| 193 | u32 reg1; | 193 | u32 reg1; |
| 194 | int vaux; | 194 | int vaux; |
| 195 | int ret = 0; | ||
| 196 | 195 | ||
| 197 | pr_debug("sky2_set_power_state %d\n", state); | 196 | pr_debug("sky2_set_power_state %d\n", state); |
| 198 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | 197 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
| @@ -275,12 +274,10 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
| 275 | break; | 274 | break; |
| 276 | default: | 275 | default: |
| 277 | printk(KERN_ERR PFX "Unknown power state %d\n", state); | 276 | printk(KERN_ERR PFX "Unknown power state %d\n", state); |
| 278 | ret = -1; | ||
| 279 | } | 277 | } |
| 280 | 278 | ||
| 281 | sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); | 279 | sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); |
| 282 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | 280 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
| 283 | return ret; | ||
| 284 | } | 281 | } |
| 285 | 282 | ||
| 286 | static void sky2_phy_reset(struct sky2_hw *hw, unsigned port) | 283 | static void sky2_phy_reset(struct sky2_hw *hw, unsigned port) |
| @@ -2164,6 +2161,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, | |||
| 2164 | /* If idle then force a fake soft NAPI poll once a second | 2161 | /* If idle then force a fake soft NAPI poll once a second |
| 2165 | * to work around cases where sharing an edge triggered interrupt. | 2162 | * to work around cases where sharing an edge triggered interrupt. |
| 2166 | */ | 2163 | */ |
| 2164 | static inline void sky2_idle_start(struct sky2_hw *hw) | ||
| 2165 | { | ||
| 2166 | if (idle_timeout > 0) | ||
| 2167 | mod_timer(&hw->idle_timer, | ||
| 2168 | jiffies + msecs_to_jiffies(idle_timeout)); | ||
| 2169 | } | ||
| 2170 | |||
| 2167 | static void sky2_idle(unsigned long arg) | 2171 | static void sky2_idle(unsigned long arg) |
| 2168 | { | 2172 | { |
| 2169 | struct sky2_hw *hw = (struct sky2_hw *) arg; | 2173 | struct sky2_hw *hw = (struct sky2_hw *) arg; |
| @@ -2183,6 +2187,9 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
| 2183 | int work_done = 0; | 2187 | int work_done = 0; |
| 2184 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | 2188 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); |
| 2185 | 2189 | ||
| 2190 | if (!~status) | ||
| 2191 | goto out; | ||
| 2192 | |||
| 2186 | if (status & Y2_IS_HW_ERR) | 2193 | if (status & Y2_IS_HW_ERR) |
| 2187 | sky2_hw_intr(hw); | 2194 | sky2_hw_intr(hw); |
| 2188 | 2195 | ||
| @@ -2219,7 +2226,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
| 2219 | 2226 | ||
| 2220 | if (sky2_more_work(hw)) | 2227 | if (sky2_more_work(hw)) |
| 2221 | return 1; | 2228 | return 1; |
| 2222 | 2229 | out: | |
| 2223 | netif_rx_complete(dev0); | 2230 | netif_rx_complete(dev0); |
| 2224 | 2231 | ||
| 2225 | sky2_read32(hw, B0_Y2_SP_LISR); | 2232 | sky2_read32(hw, B0_Y2_SP_LISR); |
| @@ -3350,9 +3357,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
| 3350 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); | 3357 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); |
| 3351 | 3358 | ||
| 3352 | setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); | 3359 | setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); |
| 3353 | if (idle_timeout > 0) | 3360 | sky2_idle_start(hw); |
| 3354 | mod_timer(&hw->idle_timer, | ||
| 3355 | jiffies + msecs_to_jiffies(idle_timeout)); | ||
| 3356 | 3361 | ||
| 3357 | pci_set_drvdata(pdev, hw); | 3362 | pci_set_drvdata(pdev, hw); |
| 3358 | 3363 | ||
| @@ -3425,8 +3430,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 3425 | { | 3430 | { |
| 3426 | struct sky2_hw *hw = pci_get_drvdata(pdev); | 3431 | struct sky2_hw *hw = pci_get_drvdata(pdev); |
| 3427 | int i; | 3432 | int i; |
| 3433 | pci_power_t pstate = pci_choose_state(pdev, state); | ||
| 3434 | |||
| 3435 | if (!(pstate == PCI_D3hot || pstate == PCI_D3cold)) | ||
| 3436 | return -EINVAL; | ||
| 3437 | |||
| 3438 | del_timer_sync(&hw->idle_timer); | ||
| 3428 | 3439 | ||
| 3429 | for (i = 0; i < 2; i++) { | 3440 | for (i = 0; i < hw->ports; i++) { |
| 3430 | struct net_device *dev = hw->dev[i]; | 3441 | struct net_device *dev = hw->dev[i]; |
| 3431 | 3442 | ||
| 3432 | if (dev) { | 3443 | if (dev) { |
| @@ -3438,7 +3449,10 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 3438 | } | 3449 | } |
| 3439 | } | 3450 | } |
| 3440 | 3451 | ||
| 3441 | return sky2_set_power_state(hw, pci_choose_state(pdev, state)); | 3452 | sky2_write32(hw, B0_IMSK, 0); |
| 3453 | pci_save_state(pdev); | ||
| 3454 | sky2_set_power_state(hw, pstate); | ||
| 3455 | return 0; | ||
| 3442 | } | 3456 | } |
| 3443 | 3457 | ||
| 3444 | static int sky2_resume(struct pci_dev *pdev) | 3458 | static int sky2_resume(struct pci_dev *pdev) |
| @@ -3448,15 +3462,15 @@ static int sky2_resume(struct pci_dev *pdev) | |||
| 3448 | 3462 | ||
| 3449 | pci_restore_state(pdev); | 3463 | pci_restore_state(pdev); |
| 3450 | pci_enable_wake(pdev, PCI_D0, 0); | 3464 | pci_enable_wake(pdev, PCI_D0, 0); |
| 3451 | err = sky2_set_power_state(hw, PCI_D0); | 3465 | sky2_set_power_state(hw, PCI_D0); |
| 3452 | if (err) | ||
| 3453 | goto out; | ||
| 3454 | 3466 | ||
| 3455 | err = sky2_reset(hw); | 3467 | err = sky2_reset(hw); |
| 3456 | if (err) | 3468 | if (err) |
| 3457 | goto out; | 3469 | goto out; |
| 3458 | 3470 | ||
| 3459 | for (i = 0; i < 2; i++) { | 3471 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); |
| 3472 | |||
| 3473 | for (i = 0; i < hw->ports; i++) { | ||
| 3460 | struct net_device *dev = hw->dev[i]; | 3474 | struct net_device *dev = hw->dev[i]; |
| 3461 | if (dev && netif_running(dev)) { | 3475 | if (dev && netif_running(dev)) { |
| 3462 | netif_device_attach(dev); | 3476 | netif_device_attach(dev); |
| @@ -3465,10 +3479,12 @@ static int sky2_resume(struct pci_dev *pdev) | |||
| 3465 | printk(KERN_ERR PFX "%s: could not up: %d\n", | 3479 | printk(KERN_ERR PFX "%s: could not up: %d\n", |
| 3466 | dev->name, err); | 3480 | dev->name, err); |
| 3467 | dev_close(dev); | 3481 | dev_close(dev); |
| 3468 | break; | 3482 | goto out; |
| 3469 | } | 3483 | } |
| 3470 | } | 3484 | } |
| 3471 | } | 3485 | } |
| 3486 | |||
| 3487 | sky2_idle_start(hw); | ||
| 3472 | out: | 3488 | out: |
| 3473 | return err; | 3489 | return err; |
| 3474 | } | 3490 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 49ad60b72657..862c226dbbe2 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
| @@ -69,8 +69,8 @@ | |||
| 69 | 69 | ||
| 70 | #define DRV_MODULE_NAME "tg3" | 70 | #define DRV_MODULE_NAME "tg3" |
| 71 | #define PFX DRV_MODULE_NAME ": " | 71 | #define PFX DRV_MODULE_NAME ": " |
| 72 | #define DRV_MODULE_VERSION "3.58" | 72 | #define DRV_MODULE_VERSION "3.59" |
| 73 | #define DRV_MODULE_RELDATE "May 22, 2006" | 73 | #define DRV_MODULE_RELDATE "June 8, 2006" |
| 74 | 74 | ||
| 75 | #define TG3_DEF_MAC_MODE 0 | 75 | #define TG3_DEF_MAC_MODE 0 |
| 76 | #define TG3_DEF_RX_MODE 0 | 76 | #define TG3_DEF_RX_MODE 0 |
| @@ -4485,9 +4485,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp) | |||
| 4485 | /* tp->lock is held. */ | 4485 | /* tp->lock is held. */ |
| 4486 | static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) | 4486 | static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) |
| 4487 | { | 4487 | { |
| 4488 | if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) | 4488 | tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, |
| 4489 | tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, | 4489 | NIC_SRAM_FIRMWARE_MBOX_MAGIC1); |
| 4490 | NIC_SRAM_FIRMWARE_MBOX_MAGIC1); | ||
| 4491 | 4490 | ||
| 4492 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { | 4491 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { |
| 4493 | switch (kind) { | 4492 | switch (kind) { |
| @@ -4568,13 +4567,12 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
| 4568 | void (*write_op)(struct tg3 *, u32, u32); | 4567 | void (*write_op)(struct tg3 *, u32, u32); |
| 4569 | int i; | 4568 | int i; |
| 4570 | 4569 | ||
| 4571 | if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { | 4570 | tg3_nvram_lock(tp); |
| 4572 | tg3_nvram_lock(tp); | 4571 | |
| 4573 | /* No matching tg3_nvram_unlock() after this because | 4572 | /* No matching tg3_nvram_unlock() after this because |
| 4574 | * chip reset below will undo the nvram lock. | 4573 | * chip reset below will undo the nvram lock. |
| 4575 | */ | 4574 | */ |
| 4576 | tp->nvram_lock_cnt = 0; | 4575 | tp->nvram_lock_cnt = 0; |
| 4577 | } | ||
| 4578 | 4576 | ||
| 4579 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 4577 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || |
| 4580 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 4578 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
| @@ -4727,20 +4725,25 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
| 4727 | tw32_f(MAC_MODE, 0); | 4725 | tw32_f(MAC_MODE, 0); |
| 4728 | udelay(40); | 4726 | udelay(40); |
| 4729 | 4727 | ||
| 4730 | if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { | 4728 | /* Wait for firmware initialization to complete. */ |
| 4731 | /* Wait for firmware initialization to complete. */ | 4729 | for (i = 0; i < 100000; i++) { |
| 4732 | for (i = 0; i < 100000; i++) { | 4730 | tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); |
| 4733 | tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); | 4731 | if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) |
| 4734 | if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) | 4732 | break; |
| 4735 | break; | 4733 | udelay(10); |
| 4736 | udelay(10); | 4734 | } |
| 4737 | } | 4735 | |
| 4738 | if (i >= 100000) { | 4736 | /* Chip might not be fitted with firmare. Some Sun onboard |
| 4739 | printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, " | 4737 | * parts are configured like that. So don't signal the timeout |
| 4740 | "firmware will not restart magic=%08x\n", | 4738 | * of the above loop as an error, but do report the lack of |
| 4741 | tp->dev->name, val); | 4739 | * running firmware once. |
| 4742 | return -ENODEV; | 4740 | */ |
| 4743 | } | 4741 | if (i >= 100000 && |
| 4742 | !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { | ||
| 4743 | tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; | ||
| 4744 | |||
| 4745 | printk(KERN_INFO PFX "%s: No firmware running.\n", | ||
| 4746 | tp->dev->name); | ||
| 4744 | } | 4747 | } |
| 4745 | 4748 | ||
| 4746 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 4749 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && |
| @@ -9075,9 +9078,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
| 9075 | { | 9078 | { |
| 9076 | int j; | 9079 | int j; |
| 9077 | 9080 | ||
| 9078 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) | ||
| 9079 | return; | ||
| 9080 | |||
| 9081 | tw32_f(GRC_EEPROM_ADDR, | 9081 | tw32_f(GRC_EEPROM_ADDR, |
| 9082 | (EEPROM_ADDR_FSM_RESET | | 9082 | (EEPROM_ADDR_FSM_RESET | |
| 9083 | (EEPROM_DEFAULT_CLOCK_PERIOD << | 9083 | (EEPROM_DEFAULT_CLOCK_PERIOD << |
| @@ -9210,11 +9210,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | |||
| 9210 | { | 9210 | { |
| 9211 | int ret; | 9211 | int ret; |
| 9212 | 9212 | ||
| 9213 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { | ||
| 9214 | printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n"); | ||
| 9215 | return -EINVAL; | ||
| 9216 | } | ||
| 9217 | |||
| 9218 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) | 9213 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) |
| 9219 | return tg3_nvram_read_using_eeprom(tp, offset, val); | 9214 | return tg3_nvram_read_using_eeprom(tp, offset, val); |
| 9220 | 9215 | ||
| @@ -9447,11 +9442,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | |||
| 9447 | { | 9442 | { |
| 9448 | int ret; | 9443 | int ret; |
| 9449 | 9444 | ||
| 9450 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { | ||
| 9451 | printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n"); | ||
| 9452 | return -EINVAL; | ||
| 9453 | } | ||
| 9454 | |||
| 9455 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { | 9445 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { |
| 9456 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & | 9446 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & |
| 9457 | ~GRC_LCLCTRL_GPIO_OUTPUT1); | 9447 | ~GRC_LCLCTRL_GPIO_OUTPUT1); |
| @@ -9578,15 +9568,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
| 9578 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 9568 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, |
| 9579 | tp->misc_host_ctrl); | 9569 | tp->misc_host_ctrl); |
| 9580 | 9570 | ||
| 9571 | /* The memory arbiter has to be enabled in order for SRAM accesses | ||
| 9572 | * to succeed. Normally on powerup the tg3 chip firmware will make | ||
| 9573 | * sure it is enabled, but other entities such as system netboot | ||
| 9574 | * code might disable it. | ||
| 9575 | */ | ||
| 9576 | val = tr32(MEMARB_MODE); | ||
| 9577 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | ||
| 9578 | |||
| 9581 | tp->phy_id = PHY_ID_INVALID; | 9579 | tp->phy_id = PHY_ID_INVALID; |
| 9582 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 9580 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
| 9583 | 9581 | ||
| 9584 | /* Do not even try poking around in here on Sun parts. */ | 9582 | /* Assume an onboard device by default. */ |
| 9585 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { | 9583 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; |
| 9586 | /* All SUN chips are built-in LOMs. */ | ||
| 9587 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; | ||
| 9588 | return; | ||
| 9589 | } | ||
| 9590 | 9584 | ||
| 9591 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); | 9585 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); |
| 9592 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { | 9586 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { |
| @@ -9686,6 +9680,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
| 9686 | 9680 | ||
| 9687 | if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) | 9681 | if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) |
| 9688 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; | 9682 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; |
| 9683 | else | ||
| 9684 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | ||
| 9689 | 9685 | ||
| 9690 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 9686 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { |
| 9691 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 9687 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; |
| @@ -9834,16 +9830,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
| 9834 | int i; | 9830 | int i; |
| 9835 | u32 magic; | 9831 | u32 magic; |
| 9836 | 9832 | ||
| 9837 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { | ||
| 9838 | /* Sun decided not to put the necessary bits in the | ||
| 9839 | * NVRAM of their onboard tg3 parts :( | ||
| 9840 | */ | ||
| 9841 | strcpy(tp->board_part_number, "Sun 570X"); | ||
| 9842 | return; | ||
| 9843 | } | ||
| 9844 | |||
| 9845 | if (tg3_nvram_read_swab(tp, 0x0, &magic)) | 9833 | if (tg3_nvram_read_swab(tp, 0x0, &magic)) |
| 9846 | return; | 9834 | goto out_not_found; |
| 9847 | 9835 | ||
| 9848 | if (magic == TG3_EEPROM_MAGIC) { | 9836 | if (magic == TG3_EEPROM_MAGIC) { |
| 9849 | for (i = 0; i < 256; i += 4) { | 9837 | for (i = 0; i < 256; i += 4) { |
| @@ -9874,6 +9862,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
| 9874 | break; | 9862 | break; |
| 9875 | msleep(1); | 9863 | msleep(1); |
| 9876 | } | 9864 | } |
| 9865 | if (!(tmp16 & 0x8000)) | ||
| 9866 | goto out_not_found; | ||
| 9867 | |||
| 9877 | pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, | 9868 | pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, |
| 9878 | &tmp); | 9869 | &tmp); |
| 9879 | tmp = cpu_to_le32(tmp); | 9870 | tmp = cpu_to_le32(tmp); |
| @@ -9965,37 +9956,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) | |||
| 9965 | } | 9956 | } |
| 9966 | } | 9957 | } |
| 9967 | 9958 | ||
| 9968 | #ifdef CONFIG_SPARC64 | ||
| 9969 | static int __devinit tg3_is_sun_570X(struct tg3 *tp) | ||
| 9970 | { | ||
| 9971 | struct pci_dev *pdev = tp->pdev; | ||
| 9972 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
| 9973 | |||
| 9974 | if (pcp != NULL) { | ||
| 9975 | int node = pcp->prom_node; | ||
| 9976 | u32 venid; | ||
| 9977 | int err; | ||
| 9978 | |||
| 9979 | err = prom_getproperty(node, "subsystem-vendor-id", | ||
| 9980 | (char *) &venid, sizeof(venid)); | ||
| 9981 | if (err == 0 || err == -1) | ||
| 9982 | return 0; | ||
| 9983 | if (venid == PCI_VENDOR_ID_SUN) | ||
| 9984 | return 1; | ||
| 9985 | |||
| 9986 | /* TG3 chips onboard the SunBlade-2500 don't have the | ||
| 9987 | * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they | ||
| 9988 | * are distinguishable from non-Sun variants by being | ||
| 9989 | * named "network" by the firmware. Non-Sun cards will | ||
| 9990 | * show up as being named "ethernet". | ||
| 9991 | */ | ||
| 9992 | if (!strcmp(pcp->prom_name, "network")) | ||
| 9993 | return 1; | ||
| 9994 | } | ||
| 9995 | return 0; | ||
| 9996 | } | ||
| 9997 | #endif | ||
| 9998 | |||
| 9999 | static int __devinit tg3_get_invariants(struct tg3 *tp) | 9959 | static int __devinit tg3_get_invariants(struct tg3 *tp) |
| 10000 | { | 9960 | { |
| 10001 | static struct pci_device_id write_reorder_chipsets[] = { | 9961 | static struct pci_device_id write_reorder_chipsets[] = { |
| @@ -10012,11 +9972,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
| 10012 | u16 pci_cmd; | 9972 | u16 pci_cmd; |
| 10013 | int err; | 9973 | int err; |
| 10014 | 9974 | ||
| 10015 | #ifdef CONFIG_SPARC64 | ||
| 10016 | if (tg3_is_sun_570X(tp)) | ||
| 10017 | tp->tg3_flags2 |= TG3_FLG2_SUN_570X; | ||
| 10018 | #endif | ||
| 10019 | |||
| 10020 | /* Force memory write invalidate off. If we leave it on, | 9975 | /* Force memory write invalidate off. If we leave it on, |
| 10021 | * then on 5700_BX chips we have to enable a workaround. | 9976 | * then on 5700_BX chips we have to enable a workaround. |
| 10022 | * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary | 9977 | * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary |
| @@ -10312,8 +10267,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
| 10312 | if (tp->write32 == tg3_write_indirect_reg32 || | 10267 | if (tp->write32 == tg3_write_indirect_reg32 || |
| 10313 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | 10268 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && |
| 10314 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 10269 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
| 10315 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) || | 10270 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) |
| 10316 | (tp->tg3_flags2 & TG3_FLG2_SUN_570X)) | ||
| 10317 | tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; | 10271 | tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; |
| 10318 | 10272 | ||
| 10319 | /* Get eeprom hw config before calling tg3_set_power_state(). | 10273 | /* Get eeprom hw config before calling tg3_set_power_state(). |
| @@ -10594,8 +10548,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
| 10594 | #endif | 10548 | #endif |
| 10595 | 10549 | ||
| 10596 | mac_offset = 0x7c; | 10550 | mac_offset = 0x7c; |
| 10597 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && | 10551 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || |
| 10598 | !(tp->tg3_flags & TG3_FLG2_SUN_570X)) || | ||
| 10599 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 10552 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { |
| 10600 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) | 10553 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) |
| 10601 | mac_offset = 0xcc; | 10554 | mac_offset = 0xcc; |
| @@ -10622,8 +10575,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
| 10622 | } | 10575 | } |
| 10623 | if (!addr_ok) { | 10576 | if (!addr_ok) { |
| 10624 | /* Next, try NVRAM. */ | 10577 | /* Next, try NVRAM. */ |
| 10625 | if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) && | 10578 | if (!tg3_nvram_read(tp, mac_offset + 0, &hi) && |
| 10626 | !tg3_nvram_read(tp, mac_offset + 0, &hi) && | ||
| 10627 | !tg3_nvram_read(tp, mac_offset + 4, &lo)) { | 10579 | !tg3_nvram_read(tp, mac_offset + 4, &lo)) { |
| 10628 | dev->dev_addr[0] = ((hi >> 16) & 0xff); | 10580 | dev->dev_addr[0] = ((hi >> 16) & 0xff); |
| 10629 | dev->dev_addr[1] = ((hi >> 24) & 0xff); | 10581 | dev->dev_addr[1] = ((hi >> 24) & 0xff); |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 0e29b885d449..ff0faab94bd5 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
| @@ -2184,7 +2184,7 @@ struct tg3 { | |||
| 2184 | #define TG3_FLAG_INIT_COMPLETE 0x80000000 | 2184 | #define TG3_FLAG_INIT_COMPLETE 0x80000000 |
| 2185 | u32 tg3_flags2; | 2185 | u32 tg3_flags2; |
| 2186 | #define TG3_FLG2_RESTART_TIMER 0x00000001 | 2186 | #define TG3_FLG2_RESTART_TIMER 0x00000001 |
| 2187 | #define TG3_FLG2_SUN_570X 0x00000002 | 2187 | /* 0x00000002 available */ |
| 2188 | #define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004 | 2188 | #define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004 |
| 2189 | #define TG3_FLG2_IS_5788 0x00000008 | 2189 | #define TG3_FLG2_IS_5788 0x00000008 |
| 2190 | #define TG3_FLG2_MAX_RXPEND_64 0x00000010 | 2190 | #define TG3_FLG2_MAX_RXPEND_64 0x00000010 |
| @@ -2216,6 +2216,7 @@ struct tg3 { | |||
| 2216 | #define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) | 2216 | #define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) |
| 2217 | #define TG3_FLG2_1SHOT_MSI 0x10000000 | 2217 | #define TG3_FLG2_1SHOT_MSI 0x10000000 |
| 2218 | #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 | 2218 | #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 |
| 2219 | #define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 | ||
| 2219 | 2220 | ||
| 2220 | u32 split_mode_max_reqs; | 2221 | u32 split_mode_max_reqs; |
| 2221 | #define SPLIT_MODE_5704_MAX_REQ 3 | 2222 | #define SPLIT_MODE_5704_MAX_REQ 3 |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1456759936c5..10e1a905c144 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
| @@ -285,9 +285,9 @@ static int pci_device_suspend(struct device * dev, pm_message_t state) | |||
| 285 | * Default resume method for devices that have no driver provided resume, | 285 | * Default resume method for devices that have no driver provided resume, |
| 286 | * or not even a driver at all. | 286 | * or not even a driver at all. |
| 287 | */ | 287 | */ |
| 288 | static void pci_default_resume(struct pci_dev *pci_dev) | 288 | static int pci_default_resume(struct pci_dev *pci_dev) |
| 289 | { | 289 | { |
| 290 | int retval; | 290 | int retval = 0; |
| 291 | 291 | ||
| 292 | /* restore the PCI config space */ | 292 | /* restore the PCI config space */ |
| 293 | pci_restore_state(pci_dev); | 293 | pci_restore_state(pci_dev); |
| @@ -297,18 +297,21 @@ static void pci_default_resume(struct pci_dev *pci_dev) | |||
| 297 | /* if the device was busmaster before the suspend, make it busmaster again */ | 297 | /* if the device was busmaster before the suspend, make it busmaster again */ |
| 298 | if (pci_dev->is_busmaster) | 298 | if (pci_dev->is_busmaster) |
| 299 | pci_set_master(pci_dev); | 299 | pci_set_master(pci_dev); |
| 300 | |||
| 301 | return retval; | ||
| 300 | } | 302 | } |
| 301 | 303 | ||
| 302 | static int pci_device_resume(struct device * dev) | 304 | static int pci_device_resume(struct device * dev) |
| 303 | { | 305 | { |
| 306 | int error; | ||
| 304 | struct pci_dev * pci_dev = to_pci_dev(dev); | 307 | struct pci_dev * pci_dev = to_pci_dev(dev); |
| 305 | struct pci_driver * drv = pci_dev->driver; | 308 | struct pci_driver * drv = pci_dev->driver; |
| 306 | 309 | ||
| 307 | if (drv && drv->resume) | 310 | if (drv && drv->resume) |
| 308 | drv->resume(pci_dev); | 311 | error = drv->resume(pci_dev); |
| 309 | else | 312 | else |
| 310 | pci_default_resume(pci_dev); | 313 | error = pci_default_resume(pci_dev); |
| 311 | return 0; | 314 | return error; |
| 312 | } | 315 | } |
| 313 | 316 | ||
| 314 | static void pci_device_shutdown(struct device *dev) | 317 | static void pci_device_shutdown(struct device *dev) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 8d107c6c2c70..fde41cc14734 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -460,9 +460,23 @@ int | |||
| 460 | pci_restore_state(struct pci_dev *dev) | 460 | pci_restore_state(struct pci_dev *dev) |
| 461 | { | 461 | { |
| 462 | int i; | 462 | int i; |
| 463 | int val; | ||
| 463 | 464 | ||
| 464 | for (i = 0; i < 16; i++) | 465 | /* |
| 465 | pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); | 466 | * The Base Address register should be programmed before the command |
| 467 | * register(s) | ||
| 468 | */ | ||
| 469 | for (i = 15; i >= 0; i--) { | ||
| 470 | pci_read_config_dword(dev, i * 4, &val); | ||
| 471 | if (val != dev->saved_config_space[i]) { | ||
| 472 | printk(KERN_DEBUG "PM: Writing back config space on " | ||
| 473 | "device %s at offset %x (was %x, writing %x)\n", | ||
| 474 | pci_name(dev), i, | ||
| 475 | val, (int)dev->saved_config_space[i]); | ||
| 476 | pci_write_config_dword(dev,i * 4, | ||
| 477 | dev->saved_config_space[i]); | ||
| 478 | } | ||
| 479 | } | ||
| 466 | pci_restore_msi_state(dev); | 480 | pci_restore_msi_state(dev); |
| 467 | pci_restore_msix_state(dev); | 481 | pci_restore_msix_state(dev); |
| 468 | return 0; | 482 | return 0; |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index 9b8bca1ac1f0..f16f92a6ec0f 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
| @@ -2035,6 +2035,7 @@ static void mv_phy_reset(struct ata_port *ap) | |||
| 2035 | static void mv_eng_timeout(struct ata_port *ap) | 2035 | static void mv_eng_timeout(struct ata_port *ap) |
| 2036 | { | 2036 | { |
| 2037 | struct ata_queued_cmd *qc; | 2037 | struct ata_queued_cmd *qc; |
| 2038 | unsigned long flags; | ||
| 2038 | 2039 | ||
| 2039 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); | 2040 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); |
| 2040 | DPRINTK("All regs @ start of eng_timeout\n"); | 2041 | DPRINTK("All regs @ start of eng_timeout\n"); |
| @@ -2046,8 +2047,10 @@ static void mv_eng_timeout(struct ata_port *ap) | |||
| 2046 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, | 2047 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, |
| 2047 | &qc->scsicmd->cmnd); | 2048 | &qc->scsicmd->cmnd); |
| 2048 | 2049 | ||
| 2050 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
| 2049 | mv_err_intr(ap, 0); | 2051 | mv_err_intr(ap, 0); |
| 2050 | mv_stop_and_reset(ap); | 2052 | mv_stop_and_reset(ap); |
| 2053 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
| 2051 | 2054 | ||
| 2052 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | 2055 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
| 2053 | if (qc->flags & ATA_QCFLAG_ACTIVE) { | 2056 | if (qc->flags & ATA_QCFLAG_ACTIVE) { |
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index acde8868da21..fafe7c1265b3 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c | |||
| @@ -185,6 +185,9 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device | |||
| 185 | /* Select Power Management Mode */ | 185 | /* Select Power Management Mode */ |
| 186 | pxa27x_ohci_select_pmm(inf->port_mode); | 186 | pxa27x_ohci_select_pmm(inf->port_mode); |
| 187 | 187 | ||
| 188 | if (inf->power_budget) | ||
| 189 | hcd->power_budget = inf->power_budget; | ||
| 190 | |||
| 188 | ohci_hcd_init(hcd_to_ohci(hcd)); | 191 | ohci_hcd_init(hcd_to_ohci(hcd)); |
| 189 | 192 | ||
| 190 | retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT); | 193 | retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT); |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 953eb8c171d6..47ba1a79adcd 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
| @@ -1745,7 +1745,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
| 1745 | fbcon_redraw_move(vc, p, 0, t, count); | 1745 | fbcon_redraw_move(vc, p, 0, t, count); |
| 1746 | ypan_up_redraw(vc, t, count); | 1746 | ypan_up_redraw(vc, t, count); |
| 1747 | if (vc->vc_rows - b > 0) | 1747 | if (vc->vc_rows - b > 0) |
| 1748 | fbcon_redraw_move(vc, p, b - count, | 1748 | fbcon_redraw_move(vc, p, b, |
| 1749 | vc->vc_rows - b, b); | 1749 | vc->vc_rows - b, b); |
| 1750 | } else | 1750 | } else |
| 1751 | fbcon_redraw_move(vc, p, t + count, b - t - count, t); | 1751 | fbcon_redraw_move(vc, p, t + count, b - t - count, t); |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 85d166cdcae4..b55b4ea9a676 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
| @@ -67,12 +67,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d | |||
| 67 | static int debugfs_mknod(struct inode *dir, struct dentry *dentry, | 67 | static int debugfs_mknod(struct inode *dir, struct dentry *dentry, |
| 68 | int mode, dev_t dev) | 68 | int mode, dev_t dev) |
| 69 | { | 69 | { |
| 70 | struct inode *inode = debugfs_get_inode(dir->i_sb, mode, dev); | 70 | struct inode *inode; |
| 71 | int error = -EPERM; | 71 | int error = -EPERM; |
| 72 | 72 | ||
| 73 | if (dentry->d_inode) | 73 | if (dentry->d_inode) |
| 74 | return -EEXIST; | 74 | return -EEXIST; |
| 75 | 75 | ||
| 76 | inode = debugfs_get_inode(dir->i_sb, mode, dev); | ||
| 76 | if (inode) { | 77 | if (inode) { |
| 77 | d_instantiate(dentry, inode); | 78 | d_instantiate(dentry, inode); |
| 78 | dget(dentry); | 79 | dget(dentry); |
diff --git a/include/asm-arm/arch-pxa/ohci.h b/include/asm-arm/arch-pxa/ohci.h index 7da89569061e..e848a47128cd 100644 --- a/include/asm-arm/arch-pxa/ohci.h +++ b/include/asm-arm/arch-pxa/ohci.h | |||
| @@ -11,6 +11,8 @@ struct pxaohci_platform_data { | |||
| 11 | #define PMM_NPS_MODE 1 | 11 | #define PMM_NPS_MODE 1 |
| 12 | #define PMM_GLOBAL_MODE 2 | 12 | #define PMM_GLOBAL_MODE 2 |
| 13 | #define PMM_PERPORT_MODE 3 | 13 | #define PMM_PERPORT_MODE 3 |
| 14 | |||
| 15 | int power_budget; | ||
| 14 | }; | 16 | }; |
| 15 | 17 | ||
| 16 | extern void pxa_set_ohci_info(struct pxaohci_platform_data *info); | 18 | extern void pxa_set_ohci_info(struct pxaohci_platform_data *info); |
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h index 40c25e166a9b..1802775568b9 100644 --- a/include/asm-s390/futex.h +++ b/include/asm-s390/futex.h | |||
| @@ -11,23 +11,24 @@ | |||
| 11 | #define __futex_atomic_fixup \ | 11 | #define __futex_atomic_fixup \ |
| 12 | ".section __ex_table,\"a\"\n" \ | 12 | ".section __ex_table,\"a\"\n" \ |
| 13 | " .align 4\n" \ | 13 | " .align 4\n" \ |
| 14 | " .long 0b,2b,1b,2b\n" \ | 14 | " .long 0b,4b,2b,4b,3b,4b\n" \ |
| 15 | ".previous" | 15 | ".previous" |
| 16 | #else /* __s390x__ */ | 16 | #else /* __s390x__ */ |
| 17 | #define __futex_atomic_fixup \ | 17 | #define __futex_atomic_fixup \ |
| 18 | ".section __ex_table,\"a\"\n" \ | 18 | ".section __ex_table,\"a\"\n" \ |
| 19 | " .align 8\n" \ | 19 | " .align 8\n" \ |
| 20 | " .quad 0b,2b,1b,2b\n" \ | 20 | " .quad 0b,4b,2b,4b,3b,4b\n" \ |
| 21 | ".previous" | 21 | ".previous" |
| 22 | #endif /* __s390x__ */ | 22 | #endif /* __s390x__ */ |
| 23 | 23 | ||
| 24 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ | 24 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ |
| 25 | asm volatile(" l %1,0(%6)\n" \ | 25 | asm volatile(" sacf 256\n" \ |
| 26 | "0: " insn \ | 26 | "0: l %1,0(%6)\n" \ |
| 27 | " cs %1,%2,0(%6)\n" \ | 27 | "1: " insn \ |
| 28 | "1: jl 0b\n" \ | 28 | "2: cs %1,%2,0(%6)\n" \ |
| 29 | "3: jl 1b\n" \ | ||
| 29 | " lhi %0,0\n" \ | 30 | " lhi %0,0\n" \ |
| 30 | "2:\n" \ | 31 | "4: sacf 0\n" \ |
| 31 | __futex_atomic_fixup \ | 32 | __futex_atomic_fixup \ |
| 32 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | 33 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ |
| 33 | "=m" (*uaddr) \ | 34 | "=m" (*uaddr) \ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ad133fcfb239..1713ace808bf 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -21,7 +21,7 @@ typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); | |||
| 21 | typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); | 21 | typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); |
| 22 | typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); | 22 | typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); |
| 23 | 23 | ||
| 24 | typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); | 24 | typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *); |
| 25 | typedef void (elevator_exit_fn) (elevator_t *); | 25 | typedef void (elevator_exit_fn) (elevator_t *); |
| 26 | 26 | ||
| 27 | struct elevator_ops | 27 | struct elevator_ops |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index dd7d627bf66f..c115e9e840b4 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
| @@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c) | |||
| 1114 | 1114 | ||
| 1115 | mmsg->mfa = readl(c->in_port); | 1115 | mmsg->mfa = readl(c->in_port); |
| 1116 | if (unlikely(mmsg->mfa >= c->in_queue.len)) { | 1116 | if (unlikely(mmsg->mfa >= c->in_queue.len)) { |
| 1117 | u32 mfa = mmsg->mfa; | ||
| 1118 | |||
| 1117 | mempool_free(mmsg, c->in_msg.mempool); | 1119 | mempool_free(mmsg, c->in_msg.mempool); |
| 1118 | if(mmsg->mfa == I2O_QUEUE_EMPTY) | 1120 | |
| 1121 | if (mfa == I2O_QUEUE_EMPTY) | ||
| 1119 | return ERR_PTR(-EBUSY); | 1122 | return ERR_PTR(-EBUSY); |
| 1120 | return ERR_PTR(-EFAULT); | 1123 | return ERR_PTR(-EFAULT); |
| 1121 | } | 1124 | } |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 6a7621b2b12b..f5fdca1d67e6 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/nodemask.h> | 36 | #include <linux/nodemask.h> |
| 37 | 37 | ||
| 38 | struct vm_area_struct; | 38 | struct vm_area_struct; |
| 39 | struct mm_struct; | ||
| 39 | 40 | ||
| 40 | #ifdef CONFIG_NUMA | 41 | #ifdef CONFIG_NUMA |
| 41 | 42 | ||
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 4877e35ae202..936ef82ed76a 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); | 50 | extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); |
| 51 | extern acpi_status pci_osc_support_set(u32 flags); | 51 | extern acpi_status pci_osc_support_set(u32 flags); |
| 52 | #else | 52 | #else |
| 53 | #if !defined(acpi_status) | 53 | #if !defined(AE_ERROR) |
| 54 | typedef u32 acpi_status; | 54 | typedef u32 acpi_status; |
| 55 | #define AE_ERROR (acpi_status) (0x0001) | 55 | #define AE_ERROR (acpi_status) (0x0001) |
| 56 | #endif | 56 | #endif |
diff --git a/mm/shmem.c b/mm/shmem.c index 4c5e68e4e9ae..1e43c8a865ba 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 1780 | if (!simple_empty(dentry)) | 1780 | if (!simple_empty(dentry)) |
| 1781 | return -ENOTEMPTY; | 1781 | return -ENOTEMPTY; |
| 1782 | 1782 | ||
| 1783 | dentry->d_inode->i_nlink--; | ||
| 1783 | dir->i_nlink--; | 1784 | dir->i_nlink--; |
| 1784 | return shmem_unlink(dir, dentry); | 1785 | return shmem_unlink(dir, dentry); |
| 1785 | } | 1786 | } |
| @@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb, | |||
| 2102 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 2103 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
| 2103 | sb->s_magic = TMPFS_MAGIC; | 2104 | sb->s_magic = TMPFS_MAGIC; |
| 2104 | sb->s_op = &shmem_ops; | 2105 | sb->s_op = &shmem_ops; |
| 2106 | sb->s_time_gran = 1; | ||
| 2105 | 2107 | ||
| 2106 | inode = shmem_get_inode(sb, S_IFDIR | mode, 0); | 2108 | inode = shmem_get_inode(sb, S_IFDIR | mode, 0); |
| 2107 | if (!inode) | 2109 | if (!inode) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 4649a63a8cb6..440a733fe2e9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, | |||
| 1061 | loop_again: | 1061 | loop_again: |
| 1062 | total_scanned = 0; | 1062 | total_scanned = 0; |
| 1063 | nr_reclaimed = 0; | 1063 | nr_reclaimed = 0; |
| 1064 | sc.may_writepage = !laptop_mode, | 1064 | sc.may_writepage = !laptop_mode; |
| 1065 | sc.nr_mapped = read_page_state(nr_mapped); | 1065 | sc.nr_mapped = read_page_state(nr_mapped); |
| 1066 | 1066 | ||
| 1067 | inc_page_state(pageoutrun); | 1067 | inc_page_state(pageoutrun); |
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index b5981e5f6b00..8c211c58893b 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
| @@ -452,6 +452,7 @@ found: | |||
| 452 | (unsigned long long) | 452 | (unsigned long long) |
| 453 | avr->dccpavr_ack_ackno); | 453 | avr->dccpavr_ack_ackno); |
| 454 | dccp_ackvec_throw_record(av, avr); | 454 | dccp_ackvec_throw_record(av, avr); |
| 455 | break; | ||
| 455 | } | 456 | } |
| 456 | /* | 457 | /* |
| 457 | * If it wasn't received, continue scanning... we might | 458 | * If it wasn't received, continue scanning... we might |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 0923add122b4..9f0bb529ab70 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
| @@ -116,6 +116,7 @@ sr_failed: | |||
| 116 | 116 | ||
| 117 | too_many_hops: | 117 | too_many_hops: |
| 118 | /* Tell the sender its packet died... */ | 118 | /* Tell the sender its packet died... */ |
| 119 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | ||
| 119 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); | 120 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); |
| 120 | drop: | 121 | drop: |
| 121 | kfree_skb(skb); | 122 | kfree_skb(skb); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4a538bc1683d..b5521a9d3dc1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -1649,7 +1649,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
| 1649 | * Hence, we can detect timed out packets during fast | 1649 | * Hence, we can detect timed out packets during fast |
| 1650 | * retransmit without falling to slow start. | 1650 | * retransmit without falling to slow start. |
| 1651 | */ | 1651 | */ |
| 1652 | if (tcp_head_timedout(sk, tp)) { | 1652 | if (!IsReno(tp) && tcp_head_timedout(sk, tp)) { |
| 1653 | struct sk_buff *skb; | 1653 | struct sk_buff *skb; |
| 1654 | 1654 | ||
| 1655 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint | 1655 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint |
| @@ -1662,8 +1662,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
| 1662 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { | 1662 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { |
| 1663 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1663 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
| 1664 | tp->lost_out += tcp_skb_pcount(skb); | 1664 | tp->lost_out += tcp_skb_pcount(skb); |
| 1665 | if (IsReno(tp)) | ||
| 1666 | tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1); | ||
| 1667 | 1665 | ||
| 1668 | /* clear xmit_retrans hint */ | 1666 | /* clear xmit_retrans hint */ |
| 1669 | if (tp->retransmit_skb_hint && | 1667 | if (tp->retransmit_skb_hint && |
