diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-04-02 13:37:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 13:38:10 -0400 |
commit | ec5e61aabeac58670691bd0613388d16697d0d81 (patch) | |
tree | 59838509358f27334874b90756505785cde29b02 | |
parent | 75ec5a245c7763c397f31ec8964d0a46c54a7386 (diff) | |
parent | 8bb39f9aa068262732fe44b965d7a6eb5a5a7d67 (diff) |
Merge branch 'perf/urgent' into perf/core
Conflicts:
arch/x86/kernel/cpu/perf_event.c
Merge reason: Resolve the conflict, pick up fixes
Signed-off-by: Ingo Molnar <mingo@elte.hu>
460 files changed, 6917 insertions, 4153 deletions
diff --git a/Documentation/PCI/PCI-DMA-mapping.txt b/Documentation/DMA-API-HOWTO.txt index 52618ab069ad..52618ab069ad 100644 --- a/Documentation/PCI/PCI-DMA-mapping.txt +++ b/Documentation/DMA-API-HOWTO.txt | |||
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index f8bc802d70b9..3a6aecd078ba 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt | |||
@@ -340,7 +340,7 @@ Note: | |||
340 | 5.3 swappiness | 340 | 5.3 swappiness |
341 | Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. | 341 | Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. |
342 | 342 | ||
343 | Following cgroups' swapiness can't be changed. | 343 | Following cgroups' swappiness can't be changed. |
344 | - root cgroup (uses /proc/sys/vm/swappiness). | 344 | - root cgroup (uses /proc/sys/vm/swappiness). |
345 | - a cgroup which uses hierarchy and it has child cgroup. | 345 | - a cgroup which uses hierarchy and it has child cgroup. |
346 | - a cgroup which uses hierarchy and not the root of hierarchy. | 346 | - a cgroup which uses hierarchy and not the root of hierarchy. |
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt new file mode 100644 index 000000000000..8117e5bf6065 --- /dev/null +++ b/Documentation/circular-buffers.txt | |||
@@ -0,0 +1,234 @@ | |||
1 | ================ | ||
2 | CIRCULAR BUFFERS | ||
3 | ================ | ||
4 | |||
5 | By: David Howells <dhowells@redhat.com> | ||
6 | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
7 | |||
8 | |||
9 | Linux provides a number of features that can be used to implement circular | ||
10 | buffering. There are two sets of such features: | ||
11 | |||
12 | (1) Convenience functions for determining information about power-of-2 sized | ||
13 | buffers. | ||
14 | |||
15 | (2) Memory barriers for when the producer and the consumer of objects in the | ||
16 | buffer don't want to share a lock. | ||
17 | |||
18 | To use these facilities, as discussed below, there needs to be just one | ||
19 | producer and just one consumer. It is possible to handle multiple producers by | ||
20 | serialising them, and to handle multiple consumers by serialising them. | ||
21 | |||
22 | |||
23 | Contents: | ||
24 | |||
25 | (*) What is a circular buffer? | ||
26 | |||
27 | (*) Measuring power-of-2 buffers. | ||
28 | |||
29 | (*) Using memory barriers with circular buffers. | ||
30 | - The producer. | ||
31 | - The consumer. | ||
32 | |||
33 | |||
34 | ========================== | ||
35 | WHAT IS A CIRCULAR BUFFER? | ||
36 | ========================== | ||
37 | |||
38 | First of all, what is a circular buffer? A circular buffer is a buffer of | ||
39 | fixed, finite size into which there are two indices: | ||
40 | |||
41 | (1) A 'head' index - the point at which the producer inserts items into the | ||
42 | buffer. | ||
43 | |||
44 | (2) A 'tail' index - the point at which the consumer finds the next item in | ||
45 | the buffer. | ||
46 | |||
47 | Typically when the tail pointer is equal to the head pointer, the buffer is | ||
48 | empty; and the buffer is full when the head pointer is one less than the tail | ||
49 | pointer. | ||
50 | |||
51 | The head index is incremented when items are added, and the tail index when | ||
52 | items are removed. The tail index should never jump the head index, and both | ||
53 | indices should be wrapped to 0 when they reach the end of the buffer, thus | ||
54 | allowing an infinite amount of data to flow through the buffer. | ||
55 | |||
56 | Typically, items will all be of the same unit size, but this isn't strictly | ||
57 | required to use the techniques below. The indices can be increased by more | ||
58 | than 1 if multiple items or variable-sized items are to be included in the | ||
59 | buffer, provided that neither index overtakes the other. The implementer must | ||
60 | be careful, however, as a region more than one unit in size may wrap the end of | ||
61 | the buffer and be broken into two segments. | ||
62 | |||
63 | |||
64 | ============================ | ||
65 | MEASURING POWER-OF-2 BUFFERS | ||
66 | ============================ | ||
67 | |||
68 | Calculation of the occupancy or the remaining capacity of an arbitrarily sized | ||
69 | circular buffer would normally be a slow operation, requiring the use of a | ||
70 | modulus (divide) instruction. However, if the buffer is of a power-of-2 size, | ||
71 | then a much quicker bitwise-AND instruction can be used instead. | ||
72 | |||
73 | Linux provides a set of macros for handling power-of-2 circular buffers. These | ||
74 | can be made use of by: | ||
75 | |||
76 | #include <linux/circ_buf.h> | ||
77 | |||
78 | The macros are: | ||
79 | |||
80 | (*) Measure the remaining capacity of a buffer: | ||
81 | |||
82 | CIRC_SPACE(head_index, tail_index, buffer_size); | ||
83 | |||
84 | This returns the amount of space left in the buffer[1] into which items | ||
85 | can be inserted. | ||
86 | |||
87 | |||
88 | (*) Measure the maximum consecutive immediate space in a buffer: | ||
89 | |||
90 | CIRC_SPACE_TO_END(head_index, tail_index, buffer_size); | ||
91 | |||
92 | This returns the amount of consecutive space left in the buffer[1] into | ||
93 | which items can be immediately inserted without having to wrap back to the | ||
94 | beginning of the buffer. | ||
95 | |||
96 | |||
97 | (*) Measure the occupancy of a buffer: | ||
98 | |||
99 | CIRC_CNT(head_index, tail_index, buffer_size); | ||
100 | |||
101 | This returns the number of items currently occupying a buffer[2]. | ||
102 | |||
103 | |||
104 | (*) Measure the non-wrapping occupancy of a buffer: | ||
105 | |||
106 | CIRC_CNT_TO_END(head_index, tail_index, buffer_size); | ||
107 | |||
108 | This returns the number of consecutive items[2] that can be extracted from | ||
109 | the buffer without having to wrap back to the beginning of the buffer. | ||
110 | |||
111 | |||
112 | Each of these macros will nominally return a value between 0 and buffer_size-1, | ||
113 | however: | ||
114 | |||
115 | [1] CIRC_SPACE*() are intended to be used in the producer. To the producer | ||
116 | they will return a lower bound as the producer controls the head index, | ||
117 | but the consumer may still be depleting the buffer on another CPU and | ||
118 | moving the tail index. | ||
119 | |||
120 | To the consumer it will show an upper bound as the producer may be busy | ||
121 | depleting the space. | ||
122 | |||
123 | [2] CIRC_CNT*() are intended to be used in the consumer. To the consumer they | ||
124 | will return a lower bound as the consumer controls the tail index, but the | ||
125 | producer may still be filling the buffer on another CPU and moving the | ||
126 | head index. | ||
127 | |||
128 | To the producer it will show an upper bound as the consumer may be busy | ||
129 | emptying the buffer. | ||
130 | |||
131 | [3] To a third party, the order in which the writes to the indices by the | ||
132 | producer and consumer become visible cannot be guaranteed as they are | ||
133 | independent and may be made on different CPUs - so the result in such a | ||
134 | situation will merely be a guess, and may even be negative. | ||
135 | |||
136 | |||
137 | =========================================== | ||
138 | USING MEMORY BARRIERS WITH CIRCULAR BUFFERS | ||
139 | =========================================== | ||
140 | |||
141 | By using memory barriers in conjunction with circular buffers, you can avoid | ||
142 | the need to: | ||
143 | |||
144 | (1) use a single lock to govern access to both ends of the buffer, thus | ||
145 | allowing the buffer to be filled and emptied at the same time; and | ||
146 | |||
147 | (2) use atomic counter operations. | ||
148 | |||
149 | There are two sides to this: the producer that fills the buffer, and the | ||
150 | consumer that empties it. Only one thing should be filling a buffer at any one | ||
151 | time, and only one thing should be emptying a buffer at any one time, but the | ||
152 | two sides can operate simultaneously. | ||
153 | |||
154 | |||
155 | THE PRODUCER | ||
156 | ------------ | ||
157 | |||
158 | The producer will look something like this: | ||
159 | |||
160 | spin_lock(&producer_lock); | ||
161 | |||
162 | unsigned long head = buffer->head; | ||
163 | unsigned long tail = ACCESS_ONCE(buffer->tail); | ||
164 | |||
165 | if (CIRC_SPACE(head, tail, buffer->size) >= 1) { | ||
166 | /* insert one item into the buffer */ | ||
167 | struct item *item = buffer[head]; | ||
168 | |||
169 | produce_item(item); | ||
170 | |||
171 | smp_wmb(); /* commit the item before incrementing the head */ | ||
172 | |||
173 | buffer->head = (head + 1) & (buffer->size - 1); | ||
174 | |||
175 | /* wake_up() will make sure that the head is committed before | ||
176 | * waking anyone up */ | ||
177 | wake_up(consumer); | ||
178 | } | ||
179 | |||
180 | spin_unlock(&producer_lock); | ||
181 | |||
182 | This will instruct the CPU that the contents of the new item must be written | ||
183 | before the head index makes it available to the consumer and then instructs the | ||
184 | CPU that the revised head index must be written before the consumer is woken. | ||
185 | |||
186 | Note that wake_up() doesn't have to be the exact mechanism used, but whatever | ||
187 | is used must guarantee a (write) memory barrier between the update of the head | ||
188 | index and the change of state of the consumer, if a change of state occurs. | ||
189 | |||
190 | |||
191 | THE CONSUMER | ||
192 | ------------ | ||
193 | |||
194 | The consumer will look something like this: | ||
195 | |||
196 | spin_lock(&consumer_lock); | ||
197 | |||
198 | unsigned long head = ACCESS_ONCE(buffer->head); | ||
199 | unsigned long tail = buffer->tail; | ||
200 | |||
201 | if (CIRC_CNT(head, tail, buffer->size) >= 1) { | ||
202 | /* read index before reading contents at that index */ | ||
203 | smp_read_barrier_depends(); | ||
204 | |||
205 | /* extract one item from the buffer */ | ||
206 | struct item *item = buffer[tail]; | ||
207 | |||
208 | consume_item(item); | ||
209 | |||
210 | smp_mb(); /* finish reading descriptor before incrementing tail */ | ||
211 | |||
212 | buffer->tail = (tail + 1) & (buffer->size - 1); | ||
213 | } | ||
214 | |||
215 | spin_unlock(&consumer_lock); | ||
216 | |||
217 | This will instruct the CPU to make sure the index is up to date before reading | ||
218 | the new item, and then it shall make sure the CPU has finished reading the item | ||
219 | before it writes the new tail pointer, which will erase the item. | ||
220 | |||
221 | |||
222 | Note the use of ACCESS_ONCE() in both algorithms to read the opposition index. | ||
223 | This prevents the compiler from discarding and reloading its cached value - | ||
224 | which some compilers will do across smp_read_barrier_depends(). This isn't | ||
225 | strictly needed if you can be sure that the opposition index will _only_ be | ||
226 | used the once. | ||
227 | |||
228 | |||
229 | =============== | ||
230 | FURTHER READING | ||
231 | =============== | ||
232 | |||
233 | See also Documentation/memory-barriers.txt for a description of Linux's memory | ||
234 | barrier facilities. | ||
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX index 3bae418c6ad3..4303614b5add 100644 --- a/Documentation/filesystems/00-INDEX +++ b/Documentation/filesystems/00-INDEX | |||
@@ -16,6 +16,8 @@ befs.txt | |||
16 | - information about the BeOS filesystem for Linux. | 16 | - information about the BeOS filesystem for Linux. |
17 | bfs.txt | 17 | bfs.txt |
18 | - info for the SCO UnixWare Boot Filesystem (BFS). | 18 | - info for the SCO UnixWare Boot Filesystem (BFS). |
19 | ceph.txt | ||
20 | - info for the Ceph Distributed File System | ||
19 | cifs.txt | 21 | cifs.txt |
20 | - description of the CIFS filesystem. | 22 | - description of the CIFS filesystem. |
21 | coda.txt | 23 | coda.txt |
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt index 6e03917316bd..0660c9f5deef 100644 --- a/Documentation/filesystems/ceph.txt +++ b/Documentation/filesystems/ceph.txt | |||
@@ -8,7 +8,7 @@ Basic features include: | |||
8 | 8 | ||
9 | * POSIX semantics | 9 | * POSIX semantics |
10 | * Seamless scaling from 1 to many thousands of nodes | 10 | * Seamless scaling from 1 to many thousands of nodes |
11 | * High availability and reliability. No single points of failure. | 11 | * High availability and reliability. No single point of failure. |
12 | * N-way replication of data across storage nodes | 12 | * N-way replication of data across storage nodes |
13 | * Fast recovery from node failures | 13 | * Fast recovery from node failures |
14 | * Automatic rebalancing of data on node addition/removal | 14 | * Automatic rebalancing of data on node addition/removal |
@@ -94,7 +94,7 @@ Mount Options | |||
94 | 94 | ||
95 | wsize=X | 95 | wsize=X |
96 | Specify the maximum write size in bytes. By default there is no | 96 | Specify the maximum write size in bytes. By default there is no |
97 | maximu. Ceph will normally size writes based on the file stripe | 97 | maximum. Ceph will normally size writes based on the file stripe |
98 | size. | 98 | size. |
99 | 99 | ||
100 | rsize=X | 100 | rsize=X |
@@ -115,7 +115,7 @@ Mount Options | |||
115 | number of entries in that directory. | 115 | number of entries in that directory. |
116 | 116 | ||
117 | nocrc | 117 | nocrc |
118 | Disable CRC32C calculation for data writes. If set, the OSD | 118 | Disable CRC32C calculation for data writes. If set, the storage node |
119 | must rely on TCP's error correction to detect data corruption | 119 | must rely on TCP's error correction to detect data corruption |
120 | in the data payload. | 120 | in the data payload. |
121 | 121 | ||
@@ -133,7 +133,8 @@ For more information on Ceph, see the home page at | |||
133 | http://ceph.newdream.net/ | 133 | http://ceph.newdream.net/ |
134 | 134 | ||
135 | The Linux kernel client source tree is available at | 135 | The Linux kernel client source tree is available at |
136 | git://ceph.newdream.net/linux-ceph-client.git | 136 | git://ceph.newdream.net/git/ceph-client.git |
137 | git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git | ||
137 | 138 | ||
138 | and the source for the full system is at | 139 | and the source for the full system is at |
139 | git://ceph.newdream.net/ceph.git | 140 | git://ceph.newdream.net/git/ceph.git |
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt index 3015da0c6b2a..fe09a2cb1858 100644 --- a/Documentation/filesystems/tmpfs.txt +++ b/Documentation/filesystems/tmpfs.txt | |||
@@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA memory allocation policy for | |||
82 | all files in that instance (if CONFIG_NUMA is enabled) - which can be | 82 | all files in that instance (if CONFIG_NUMA is enabled) - which can be |
83 | adjusted on the fly via 'mount -o remount ...' | 83 | adjusted on the fly via 'mount -o remount ...' |
84 | 84 | ||
85 | mpol=default prefers to allocate memory from the local node | 85 | mpol=default use the process allocation policy |
86 | (see set_mempolicy(2)) | ||
86 | mpol=prefer:Node prefers to allocate memory from the given Node | 87 | mpol=prefer:Node prefers to allocate memory from the given Node |
87 | mpol=bind:NodeList allocates memory only from nodes in NodeList | 88 | mpol=bind:NodeList allocates memory only from nodes in NodeList |
88 | mpol=interleave prefers to allocate from each node in turn | 89 | mpol=interleave prefers to allocate from each node in turn |
89 | mpol=interleave:NodeList allocates from each node of NodeList in turn | 90 | mpol=interleave:NodeList allocates from each node of NodeList in turn |
91 | mpol=local prefers to allocate memory from the local node | ||
90 | 92 | ||
91 | NodeList format is a comma-separated list of decimal numbers and ranges, | 93 | NodeList format is a comma-separated list of decimal numbers and ranges, |
92 | a range being two hyphen-separated decimal numbers, the smallest and | 94 | a range being two hyphen-separated decimal numbers, the smallest and |
@@ -134,3 +136,5 @@ Author: | |||
134 | Christoph Rohland <cr@sap.com>, 1.12.01 | 136 | Christoph Rohland <cr@sap.com>, 1.12.01 |
135 | Updated: | 137 | Updated: |
136 | Hugh Dickins, 4 June 2007 | 138 | Hugh Dickins, 4 June 2007 |
139 | Updated: | ||
140 | KOSAKI Motohiro, 16 Mar 2010 | ||
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 7f5809eddee6..631ad2f1b229 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt | |||
@@ -3,6 +3,7 @@ | |||
3 | ============================ | 3 | ============================ |
4 | 4 | ||
5 | By: David Howells <dhowells@redhat.com> | 5 | By: David Howells <dhowells@redhat.com> |
6 | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
6 | 7 | ||
7 | Contents: | 8 | Contents: |
8 | 9 | ||
@@ -60,6 +61,10 @@ Contents: | |||
60 | 61 | ||
61 | - And then there's the Alpha. | 62 | - And then there's the Alpha. |
62 | 63 | ||
64 | (*) Example uses. | ||
65 | |||
66 | - Circular buffers. | ||
67 | |||
63 | (*) References. | 68 | (*) References. |
64 | 69 | ||
65 | 70 | ||
@@ -2226,6 +2231,21 @@ The Alpha defines the Linux kernel's memory barrier model. | |||
2226 | See the subsection on "Cache Coherency" above. | 2231 | See the subsection on "Cache Coherency" above. |
2227 | 2232 | ||
2228 | 2233 | ||
2234 | ============ | ||
2235 | EXAMPLE USES | ||
2236 | ============ | ||
2237 | |||
2238 | CIRCULAR BUFFERS | ||
2239 | ---------------- | ||
2240 | |||
2241 | Memory barriers can be used to implement circular buffering without the need | ||
2242 | of a lock to serialise the producer with the consumer. See: | ||
2243 | |||
2244 | Documentation/circular-buffers.txt | ||
2245 | |||
2246 | for details. | ||
2247 | |||
2248 | |||
2229 | ========== | 2249 | ========== |
2230 | REFERENCES | 2250 | REFERENCES |
2231 | ========== | 2251 | ========== |
diff --git a/Documentation/volatile-considered-harmful.txt b/Documentation/volatile-considered-harmful.txt index 991c26a6ef64..db0cb228d64a 100644 --- a/Documentation/volatile-considered-harmful.txt +++ b/Documentation/volatile-considered-harmful.txt | |||
@@ -63,9 +63,9 @@ way to perform a busy wait is: | |||
63 | cpu_relax(); | 63 | cpu_relax(); |
64 | 64 | ||
65 | The cpu_relax() call can lower CPU power consumption or yield to a | 65 | The cpu_relax() call can lower CPU power consumption or yield to a |
66 | hyperthreaded twin processor; it also happens to serve as a memory barrier, | 66 | hyperthreaded twin processor; it also happens to serve as a compiler |
67 | so, once again, volatile is unnecessary. Of course, busy-waiting is | 67 | barrier, so, once again, volatile is unnecessary. Of course, busy- |
68 | generally an anti-social act to begin with. | 68 | waiting is generally an anti-social act to begin with. |
69 | 69 | ||
70 | There are still a few rare situations where volatile makes sense in the | 70 | There are still a few rare situations where volatile makes sense in the |
71 | kernel: | 71 | kernel: |
diff --git a/MAINTAINERS b/MAINTAINERS index 281505676444..6c858e89c7d0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -797,12 +797,12 @@ M: Michael Petchkovsky <mkpetch@internode.on.net> | |||
797 | S: Maintained | 797 | S: Maintained |
798 | 798 | ||
799 | ARM/NOMADIK ARCHITECTURE | 799 | ARM/NOMADIK ARCHITECTURE |
800 | M: Alessandro Rubini <rubini@unipv.it> | 800 | M: Alessandro Rubini <rubini@unipv.it> |
801 | M: STEricsson <STEricsson_nomadik_linux@list.st.com> | 801 | M: STEricsson <STEricsson_nomadik_linux@list.st.com> |
802 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 802 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
803 | S: Maintained | 803 | S: Maintained |
804 | F: arch/arm/mach-nomadik/ | 804 | F: arch/arm/mach-nomadik/ |
805 | F: arch/arm/plat-nomadik/ | 805 | F: arch/arm/plat-nomadik/ |
806 | 806 | ||
807 | ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT | 807 | ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT |
808 | M: Nelson Castillo <arhuaco@freaks-unidos.net> | 808 | M: Nelson Castillo <arhuaco@freaks-unidos.net> |
@@ -1443,7 +1443,7 @@ F: arch/powerpc/platforms/cell/ | |||
1443 | 1443 | ||
1444 | CEPH DISTRIBUTED FILE SYSTEM CLIENT | 1444 | CEPH DISTRIBUTED FILE SYSTEM CLIENT |
1445 | M: Sage Weil <sage@newdream.net> | 1445 | M: Sage Weil <sage@newdream.net> |
1446 | L: ceph-devel@lists.sourceforge.net | 1446 | L: ceph-devel@vger.kernel.org |
1447 | W: http://ceph.newdream.net/ | 1447 | W: http://ceph.newdream.net/ |
1448 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git | 1448 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git |
1449 | S: Supported | 1449 | S: Supported |
@@ -1926,17 +1926,17 @@ F: drivers/scsi/dpt* | |||
1926 | F: drivers/scsi/dpt/ | 1926 | F: drivers/scsi/dpt/ |
1927 | 1927 | ||
1928 | DRBD DRIVER | 1928 | DRBD DRIVER |
1929 | P: Philipp Reisner | 1929 | P: Philipp Reisner |
1930 | P: Lars Ellenberg | 1930 | P: Lars Ellenberg |
1931 | M: drbd-dev@lists.linbit.com | 1931 | M: drbd-dev@lists.linbit.com |
1932 | L: drbd-user@lists.linbit.com | 1932 | L: drbd-user@lists.linbit.com |
1933 | W: http://www.drbd.org | 1933 | W: http://www.drbd.org |
1934 | T: git git://git.drbd.org/linux-2.6-drbd.git drbd | 1934 | T: git git://git.drbd.org/linux-2.6-drbd.git drbd |
1935 | T: git git://git.drbd.org/drbd-8.3.git | 1935 | T: git git://git.drbd.org/drbd-8.3.git |
1936 | S: Supported | 1936 | S: Supported |
1937 | F: drivers/block/drbd/ | 1937 | F: drivers/block/drbd/ |
1938 | F: lib/lru_cache.c | 1938 | F: lib/lru_cache.c |
1939 | F: Documentation/blockdev/drbd/ | 1939 | F: Documentation/blockdev/drbd/ |
1940 | 1940 | ||
1941 | DRIVER CORE, KOBJECTS, AND SYSFS | 1941 | DRIVER CORE, KOBJECTS, AND SYSFS |
1942 | M: Greg Kroah-Hartman <gregkh@suse.de> | 1942 | M: Greg Kroah-Hartman <gregkh@suse.de> |
@@ -3083,6 +3083,7 @@ F: include/scsi/*iscsi* | |||
3083 | ISDN SUBSYSTEM | 3083 | ISDN SUBSYSTEM |
3084 | M: Karsten Keil <isdn@linux-pingi.de> | 3084 | M: Karsten Keil <isdn@linux-pingi.de> |
3085 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) | 3085 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) |
3086 | L: netdev@vger.kernel.org | ||
3086 | W: http://www.isdn4linux.de | 3087 | W: http://www.isdn4linux.de |
3087 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git | 3088 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git |
3088 | S: Maintained | 3089 | S: Maintained |
@@ -3269,6 +3270,16 @@ S: Maintained | |||
3269 | F: include/linux/kexec.h | 3270 | F: include/linux/kexec.h |
3270 | F: kernel/kexec.c | 3271 | F: kernel/kexec.c |
3271 | 3272 | ||
3273 | KEYS/KEYRINGS: | ||
3274 | M: David Howells <dhowells@redhat.com> | ||
3275 | L: keyrings@linux-nfs.org | ||
3276 | S: Maintained | ||
3277 | F: Documentation/keys.txt | ||
3278 | F: include/linux/key.h | ||
3279 | F: include/linux/key-type.h | ||
3280 | F: include/keys/ | ||
3281 | F: security/keys/ | ||
3282 | |||
3272 | KGDB | 3283 | KGDB |
3273 | M: Jason Wessel <jason.wessel@windriver.com> | 3284 | M: Jason Wessel <jason.wessel@windriver.com> |
3274 | L: kgdb-bugreport@lists.sourceforge.net | 3285 | L: kgdb-bugreport@lists.sourceforge.net |
@@ -3518,8 +3529,8 @@ F: drivers/scsi/sym53c8xx_2/ | |||
3518 | LTP (Linux Test Project) | 3529 | LTP (Linux Test Project) |
3519 | M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com> | 3530 | M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com> |
3520 | M: Garrett Cooper <yanegomi@gmail.com> | 3531 | M: Garrett Cooper <yanegomi@gmail.com> |
3521 | M: Mike Frysinger <vapier@gentoo.org> | 3532 | M: Mike Frysinger <vapier@gentoo.org> |
3522 | M: Subrata Modak <subrata@linux.vnet.ibm.com> | 3533 | M: Subrata Modak <subrata@linux.vnet.ibm.com> |
3523 | L: ltp-list@lists.sourceforge.net (subscribers-only) | 3534 | L: ltp-list@lists.sourceforge.net (subscribers-only) |
3524 | W: http://ltp.sourceforge.net/ | 3535 | W: http://ltp.sourceforge.net/ |
3525 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git | 3536 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git |
@@ -5423,7 +5434,6 @@ S: Maintained | |||
5423 | F: sound/soc/codecs/twl4030* | 5434 | F: sound/soc/codecs/twl4030* |
5424 | 5435 | ||
5425 | TIPC NETWORK LAYER | 5436 | TIPC NETWORK LAYER |
5426 | M: Per Liden <per.liden@ericsson.com> | ||
5427 | M: Jon Maloy <jon.maloy@ericsson.com> | 5437 | M: Jon Maloy <jon.maloy@ericsson.com> |
5428 | M: Allan Stephens <allan.stephens@windriver.com> | 5438 | M: Allan Stephens <allan.stephens@windriver.com> |
5429 | L: tipc-discussion@lists.sourceforge.net | 5439 | L: tipc-discussion@lists.sourceforge.net |
@@ -6201,7 +6211,7 @@ F: arch/x86/ | |||
6201 | X86 PLATFORM DRIVERS | 6211 | X86 PLATFORM DRIVERS |
6202 | M: Matthew Garrett <mjg@redhat.com> | 6212 | M: Matthew Garrett <mjg@redhat.com> |
6203 | L: platform-driver-x86@vger.kernel.org | 6213 | L: platform-driver-x86@vger.kernel.org |
6204 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git | 6214 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git |
6205 | S: Maintained | 6215 | S: Maintained |
6206 | F: drivers/platform/x86 | 6216 | F: drivers/platform/x86 |
6207 | 6217 | ||
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 34 | 3 | SUBLEVEL = 34 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Man-Eating Seals of Antiquity |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 90ae00b631c2..9dff07c80ddb 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c | |||
@@ -290,7 +290,7 @@ static int locomo_suspend(struct platform_device *dev, pm_message_t state) | |||
290 | save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ | 290 | save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ |
291 | locomo_writel(0x00, lchip->base + LOCOMO_GPO); | 291 | locomo_writel(0x00, lchip->base + LOCOMO_GPO); |
292 | save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ | 292 | save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ |
293 | locomo_writel(0x40, lchip->base + LOCOMO_SPICT); | 293 | locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); |
294 | save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ | 294 | save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ |
295 | locomo_writel(0x00, lchip->base + LOCOMO_GPE); | 295 | locomo_writel(0x00, lchip->base + LOCOMO_GPE); |
296 | save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ | 296 | save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ |
@@ -418,7 +418,7 @@ __locomo_probe(struct device *me, struct resource *mem, int irq) | |||
418 | /* Longtime timer */ | 418 | /* Longtime timer */ |
419 | locomo_writel(0, lchip->base + LOCOMO_LTINT); | 419 | locomo_writel(0, lchip->base + LOCOMO_LTINT); |
420 | /* SPI */ | 420 | /* SPI */ |
421 | locomo_writel(0, lchip->base + LOCOMO_SPIIE); | 421 | locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); |
422 | 422 | ||
423 | locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); | 423 | locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); |
424 | r = locomo_readl(lchip->base + LOCOMO_ASD); | 424 | r = locomo_readl(lchip->base + LOCOMO_ASD); |
@@ -707,7 +707,7 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int | |||
707 | udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ | 707 | udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ |
708 | if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ | 708 | if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ |
709 | printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); | 709 | printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); |
710 | return; | 710 | goto out; |
711 | } | 711 | } |
712 | 712 | ||
713 | /* Send Sub address (LSB is channel select) */ | 713 | /* Send Sub address (LSB is channel select) */ |
@@ -735,7 +735,7 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int | |||
735 | udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ | 735 | udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ |
736 | if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ | 736 | if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ |
737 | printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); | 737 | printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); |
738 | return; | 738 | goto out; |
739 | } | 739 | } |
740 | 740 | ||
741 | /* Send DAC data */ | 741 | /* Send DAC data */ |
@@ -760,9 +760,9 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int | |||
760 | udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ | 760 | udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ |
761 | if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ | 761 | if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ |
762 | printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); | 762 | printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); |
763 | return; | ||
764 | } | 763 | } |
765 | 764 | ||
765 | out: | ||
766 | /* stop */ | 766 | /* stop */ |
767 | r = locomo_readl(mapbase + LOCOMO_DAC); | 767 | r = locomo_readl(mapbase + LOCOMO_DAC); |
768 | r &= ~(LOCOMO_DAC_SCLOEB); | 768 | r &= ~(LOCOMO_DAC_SCLOEB); |
diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h index 94a3a86cfeb8..6ef65d813f16 100644 --- a/arch/arm/mach-ixp23xx/include/mach/memory.h +++ b/arch/arm/mach-ixp23xx/include/mach/memory.h | |||
@@ -19,7 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | #define PHYS_OFFSET (0x00000000) | 20 | #define PHYS_OFFSET (0x00000000) |
21 | 21 | ||
22 | #define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0)) | 22 | #define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0) |
23 | 23 | ||
24 | #define __phys_to_bus(x) ((x) + (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET)) | 24 | #define __phys_to_bus(x) ((x) + (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET)) |
25 | #define __bus_to_phys(x) ((x) - (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET)) | 25 | #define __bus_to_phys(x) ((x) - (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET)) |
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c index 0358f45766cb..5e6f711b1c67 100644 --- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c +++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c | |||
@@ -74,9 +74,9 @@ static struct gpio_keys_button mv88f6281gtw_ge_button_pins[] = { | |||
74 | .desc = "SWR Button", | 74 | .desc = "SWR Button", |
75 | .active_low = 1, | 75 | .active_low = 1, |
76 | }, { | 76 | }, { |
77 | .code = KEY_F1, | 77 | .code = KEY_WPS_BUTTON, |
78 | .gpio = 46, | 78 | .gpio = 46, |
79 | .desc = "WPS Button(F1)", | 79 | .desc = "WPS Button", |
80 | .active_low = 1, | 80 | .active_low = 1, |
81 | }, | 81 | }, |
82 | }; | 82 | }; |
diff --git a/arch/arm/mach-mmp/include/mach/uncompress.h b/arch/arm/mach-mmp/include/mach/uncompress.h index a7dcc5307216..85bd8a2d84b5 100644 --- a/arch/arm/mach-mmp/include/mach/uncompress.h +++ b/arch/arm/mach-mmp/include/mach/uncompress.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #define UART2_BASE (APB_PHYS_BASE + 0x17000) | 14 | #define UART2_BASE (APB_PHYS_BASE + 0x17000) |
15 | #define UART3_BASE (APB_PHYS_BASE + 0x18000) | 15 | #define UART3_BASE (APB_PHYS_BASE + 0x18000) |
16 | 16 | ||
17 | static volatile unsigned long *UART = (unsigned long *)UART2_BASE; | 17 | static volatile unsigned long *UART; |
18 | 18 | ||
19 | static inline void putc(char c) | 19 | static inline void putc(char c) |
20 | { | 20 | { |
@@ -37,6 +37,9 @@ static inline void flush(void) | |||
37 | 37 | ||
38 | static inline void arch_decomp_setup(void) | 38 | static inline void arch_decomp_setup(void) |
39 | { | 39 | { |
40 | /* default to UART2 */ | ||
41 | UART = (unsigned long *)UART2_BASE; | ||
42 | |||
40 | if (machine_is_avengers_lite()) | 43 | if (machine_is_avengers_lite()) |
41 | UART = (unsigned long *)UART3_BASE; | 44 | UART = (unsigned long *)UART3_BASE; |
42 | } | 45 | } |
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c index cb0feca193d4..f9f222ebb7ed 100644 --- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c +++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c | |||
@@ -77,7 +77,7 @@ static struct gpio_keys_button wrt350n_v2_buttons[] = { | |||
77 | .desc = "Reset Button", | 77 | .desc = "Reset Button", |
78 | .active_low = 1, | 78 | .active_low = 1, |
79 | }, { | 79 | }, { |
80 | .code = KEY_WLAN, | 80 | .code = KEY_WPS_BUTTON, |
81 | .gpio = 2, | 81 | .gpio = 2, |
82 | .desc = "WPS Button", | 82 | .desc = "WPS Button", |
83 | .active_low = 1, | 83 | .active_low = 1, |
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 38fbd0a0e402..5b6ee46fa7f6 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig | |||
@@ -272,7 +272,6 @@ config MACH_H5000 | |||
272 | config MACH_HIMALAYA | 272 | config MACH_HIMALAYA |
273 | bool "HTC Himalaya Support" | 273 | bool "HTC Himalaya Support" |
274 | select CPU_PXA26x | 274 | select CPU_PXA26x |
275 | select FB_W100 | ||
276 | 275 | ||
277 | config MACH_MAGICIAN | 276 | config MACH_MAGICIAN |
278 | bool "Enable HTC Magician Support" | 277 | bool "Enable HTC Magician Support" |
@@ -454,6 +453,13 @@ config PXA_SHARPSL | |||
454 | config SHARPSL_PM | 453 | config SHARPSL_PM |
455 | bool | 454 | bool |
456 | select APM_EMULATION | 455 | select APM_EMULATION |
456 | select SHARPSL_PM_MAX1111 | ||
457 | |||
458 | config SHARPSL_PM_MAX1111 | ||
459 | bool | ||
460 | depends on !CORGI_SSP_DEPRECATED | ||
461 | select HWMON | ||
462 | select SENSORS_MAX1111 | ||
457 | 463 | ||
458 | config CORGI_SSP_DEPRECATED | 464 | config CORGI_SSP_DEPRECATED |
459 | bool | 465 | bool |
@@ -547,7 +553,6 @@ config MACH_E740 | |||
547 | bool "Toshiba e740" | 553 | bool "Toshiba e740" |
548 | default y | 554 | default y |
549 | depends on ARCH_PXA_ESERIES | 555 | depends on ARCH_PXA_ESERIES |
550 | select FB_W100 | ||
551 | help | 556 | help |
552 | Say Y here if you intend to run this kernel on a Toshiba | 557 | Say Y here if you intend to run this kernel on a Toshiba |
553 | e740 family PDA. | 558 | e740 family PDA. |
@@ -556,7 +561,6 @@ config MACH_E750 | |||
556 | bool "Toshiba e750" | 561 | bool "Toshiba e750" |
557 | default y | 562 | default y |
558 | depends on ARCH_PXA_ESERIES | 563 | depends on ARCH_PXA_ESERIES |
559 | select FB_W100 | ||
560 | help | 564 | help |
561 | Say Y here if you intend to run this kernel on a Toshiba | 565 | Say Y here if you intend to run this kernel on a Toshiba |
562 | e750 family PDA. | 566 | e750 family PDA. |
@@ -573,7 +577,6 @@ config MACH_E800 | |||
573 | bool "Toshiba e800" | 577 | bool "Toshiba e800" |
574 | default y | 578 | default y |
575 | depends on ARCH_PXA_ESERIES | 579 | depends on ARCH_PXA_ESERIES |
576 | select FB_W100 | ||
577 | help | 580 | help |
578 | Say Y here if you intend to run this kernel on a Toshiba | 581 | Say Y here if you intend to run this kernel on a Toshiba |
579 | e800 family PDA. | 582 | e800 family PDA. |
diff --git a/arch/arm/mach-pxa/imote2.c b/arch/arm/mach-pxa/imote2.c index b2f878bd460b..5161dca8ccc0 100644 --- a/arch/arm/mach-pxa/imote2.c +++ b/arch/arm/mach-pxa/imote2.c | |||
@@ -559,10 +559,6 @@ static void __init imote2_init(void) | |||
559 | pxa_set_btuart_info(NULL); | 559 | pxa_set_btuart_info(NULL); |
560 | pxa_set_stuart_info(NULL); | 560 | pxa_set_stuart_info(NULL); |
561 | 561 | ||
562 | /* SPI chip select directions - all other directions should | ||
563 | * be handled by drivers.*/ | ||
564 | gpio_direction_output(37, 0); | ||
565 | |||
566 | platform_add_devices(imote2_devices, ARRAY_SIZE(imote2_devices)); | 562 | platform_add_devices(imote2_devices, ARRAY_SIZE(imote2_devices)); |
567 | 563 | ||
568 | pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info); | 564 | pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info); |
diff --git a/arch/arm/mach-pxa/include/mach/uncompress.h b/arch/arm/mach-pxa/include/mach/uncompress.h index 5ef91d9d17e4..759b851ec985 100644 --- a/arch/arm/mach-pxa/include/mach/uncompress.h +++ b/arch/arm/mach-pxa/include/mach/uncompress.h | |||
@@ -16,9 +16,9 @@ | |||
16 | #define BTUART_BASE (0x40200000) | 16 | #define BTUART_BASE (0x40200000) |
17 | #define STUART_BASE (0x40700000) | 17 | #define STUART_BASE (0x40700000) |
18 | 18 | ||
19 | static unsigned long uart_base = FFUART_BASE; | 19 | static unsigned long uart_base; |
20 | static unsigned int uart_shift = 2; | 20 | static unsigned int uart_shift; |
21 | static unsigned int uart_is_pxa = 1; | 21 | static unsigned int uart_is_pxa; |
22 | 22 | ||
23 | static inline unsigned char uart_read(int offset) | 23 | static inline unsigned char uart_read(int offset) |
24 | { | 24 | { |
@@ -56,6 +56,11 @@ static inline void flush(void) | |||
56 | 56 | ||
57 | static inline void arch_decomp_setup(void) | 57 | static inline void arch_decomp_setup(void) |
58 | { | 58 | { |
59 | /* initialize to default */ | ||
60 | uart_base = FFUART_BASE; | ||
61 | uart_shift = 2; | ||
62 | uart_is_pxa = 1; | ||
63 | |||
59 | if (machine_is_littleton() || machine_is_intelmote2() | 64 | if (machine_is_littleton() || machine_is_intelmote2() |
60 | || machine_is_csb726() || machine_is_stargate2() | 65 | || machine_is_csb726() || machine_is_stargate2() |
61 | || machine_is_cm_x300() || machine_is_balloon3()) | 66 | || machine_is_cm_x300() || machine_is_balloon3()) |
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 3184bdc14526..44bb675e47f1 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c | |||
@@ -37,8 +37,6 @@ | |||
37 | #include <linux/lis3lv02d.h> | 37 | #include <linux/lis3lv02d.h> |
38 | #include <linux/pda_power.h> | 38 | #include <linux/pda_power.h> |
39 | #include <linux/power_supply.h> | 39 | #include <linux/power_supply.h> |
40 | #include <linux/pda_power.h> | ||
41 | #include <linux/power_supply.h> | ||
42 | #include <linux/regulator/max8660.h> | 40 | #include <linux/regulator/max8660.h> |
43 | #include <linux/regulator/machine.h> | 41 | #include <linux/regulator/machine.h> |
44 | #include <linux/regulator/fixed.h> | 42 | #include <linux/regulator/fixed.h> |
@@ -444,7 +442,7 @@ static struct gpio_keys_button gpio_keys_button[] = { | |||
444 | .active_low = 0, | 442 | .active_low = 0, |
445 | .wakeup = 0, | 443 | .wakeup = 0, |
446 | .debounce_interval = 5, /* ms */ | 444 | .debounce_interval = 5, /* ms */ |
447 | .desc = "on/off button", | 445 | .desc = "on_off button", |
448 | }, | 446 | }, |
449 | }; | 447 | }; |
450 | 448 | ||
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index a98a434f0111..2041eb1d90ba 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c | |||
@@ -764,11 +764,6 @@ static void __init stargate2_init(void) | |||
764 | pxa_set_btuart_info(NULL); | 764 | pxa_set_btuart_info(NULL); |
765 | pxa_set_stuart_info(NULL); | 765 | pxa_set_stuart_info(NULL); |
766 | 766 | ||
767 | /* spi chip selects */ | ||
768 | gpio_direction_output(37, 0); | ||
769 | gpio_direction_output(24, 0); | ||
770 | gpio_direction_output(39, 0); | ||
771 | |||
772 | platform_add_devices(ARRAY_AND_SIZE(stargate2_devices)); | 767 | platform_add_devices(ARRAY_AND_SIZE(stargate2_devices)); |
773 | 768 | ||
774 | pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info); | 769 | pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info); |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 31c2f4c30a95..1536f1784cac 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -12,7 +12,7 @@ | |||
12 | # | 12 | # |
13 | # http://www.arm.linux.org.uk/developer/machines/?action=new | 13 | # http://www.arm.linux.org.uk/developer/machines/?action=new |
14 | # | 14 | # |
15 | # Last update: Sat Feb 20 14:16:15 2010 | 15 | # Last update: Sat Mar 20 15:35:41 2010 |
16 | # | 16 | # |
17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
18 | # | 18 | # |
@@ -2663,7 +2663,7 @@ reb01 MACH_REB01 REB01 2675 | |||
2663 | aquila MACH_AQUILA AQUILA 2676 | 2663 | aquila MACH_AQUILA AQUILA 2676 |
2664 | spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677 | 2664 | spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677 |
2665 | sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678 | 2665 | sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678 |
2666 | surf7x30 MACH_SURF7X30 SURF7X30 2679 | 2666 | msm7x30_surf MACH_MSM7X30_SURF MSM7X30_SURF 2679 |
2667 | micro2440 MACH_MICRO2440 MICRO2440 2680 | 2667 | micro2440 MACH_MICRO2440 MICRO2440 2680 |
2668 | am2440 MACH_AM2440 AM2440 2681 | 2668 | am2440 MACH_AM2440 AM2440 2681 |
2669 | tq2440 MACH_TQ2440 TQ2440 2682 | 2669 | tq2440 MACH_TQ2440 TQ2440 2682 |
@@ -2678,3 +2678,74 @@ vc088x MACH_VC088X VC088X 2690 | |||
2678 | mioa702 MACH_MIOA702 MIOA702 2691 | 2678 | mioa702 MACH_MIOA702 MIOA702 2691 |
2679 | hpmin MACH_HPMIN HPMIN 2692 | 2679 | hpmin MACH_HPMIN HPMIN 2692 |
2680 | ak880xak MACH_AK880XAK AK880XAK 2693 | 2680 | ak880xak MACH_AK880XAK AK880XAK 2693 |
2681 | arm926tomap850 MACH_ARM926TOMAP850 ARM926TOMAP850 2694 | ||
2682 | lkevm MACH_LKEVM LKEVM 2695 | ||
2683 | mw6410 MACH_MW6410 MW6410 2696 | ||
2684 | terastation_wxl MACH_TERASTATION_WXL TERASTATION_WXL 2697 | ||
2685 | cpu8000e MACH_CPU8000E CPU8000E 2698 | ||
2686 | catania MACH_CATANIA CATANIA 2699 | ||
2687 | tokyo MACH_TOKYO TOKYO 2700 | ||
2688 | msm7201a_surf MACH_MSM7201A_SURF MSM7201A_SURF 2701 | ||
2689 | msm7201a_ffa MACH_MSM7201A_FFA MSM7201A_FFA 2702 | ||
2690 | msm7x25_surf MACH_MSM7X25_SURF MSM7X25_SURF 2703 | ||
2691 | msm7x25_ffa MACH_MSM7X25_FFA MSM7X25_FFA 2704 | ||
2692 | msm7x27_surf MACH_MSM7X27_SURF MSM7X27_SURF 2705 | ||
2693 | msm7x27_ffa MACH_MSM7X27_FFA MSM7X27_FFA 2706 | ||
2694 | msm7x30_ffa MACH_MSM7X30_FFA MSM7X30_FFA 2707 | ||
2695 | qsd8x50_surf MACH_QSD8X50_SURF QSD8X50_SURF 2708 | ||
2696 | qsd8x50_comet MACH_QSD8X50_COMET QSD8X50_COMET 2709 | ||
2697 | qsd8x50_ffa MACH_QSD8X50_FFA QSD8X50_FFA 2710 | ||
2698 | qsd8x50a_surf MACH_QSD8X50A_SURF QSD8X50A_SURF 2711 | ||
2699 | qsd8x50a_ffa MACH_QSD8X50A_FFA QSD8X50A_FFA 2712 | ||
2700 | adx_xgcp10 MACH_ADX_XGCP10 ADX_XGCP10 2713 | ||
2701 | mcgwumts2a MACH_MCGWUMTS2A MCGWUMTS2A 2714 | ||
2702 | mobikt MACH_MOBIKT MOBIKT 2715 | ||
2703 | mx53_evk MACH_MX53_EVK MX53_EVK 2716 | ||
2704 | igep0030 MACH_IGEP0030 IGEP0030 2717 | ||
2705 | axell_h40_h50_ctrl MACH_AXELL_H40_H50_CTRL AXELL_H40_H50_CTRL 2718 | ||
2706 | dtcommod MACH_DTCOMMOD DTCOMMOD 2719 | ||
2707 | gould MACH_GOULD GOULD 2720 | ||
2708 | siberia MACH_SIBERIA SIBERIA 2721 | ||
2709 | sbc3530 MACH_SBC3530 SBC3530 2722 | ||
2710 | qarm MACH_QARM QARM 2723 | ||
2711 | mips MACH_MIPS MIPS 2724 | ||
2712 | mx27grb MACH_MX27GRB MX27GRB 2725 | ||
2713 | sbc8100 MACH_SBC8100 SBC8100 2726 | ||
2714 | saarb MACH_SAARB SAARB 2727 | ||
2715 | omap3mini MACH_OMAP3MINI OMAP3MINI 2728 | ||
2716 | cnmbook7se MACH_CNMBOOK7SE CNMBOOK7SE 2729 | ||
2717 | catan MACH_CATAN CATAN 2730 | ||
2718 | harmony MACH_HARMONY HARMONY 2731 | ||
2719 | tonga MACH_TONGA TONGA 2732 | ||
2720 | cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733 | ||
2721 | htcrhodiumcdma MACH_HTCRHODIUMCDMA HTCRHODIUMCDMA 2734 | ||
2722 | epc_g45 MACH_EPC_G45 EPC_G45 2735 | ||
2723 | epc_lpc3250 MACH_EPC_LPC3250 EPC_LPC3250 2736 | ||
2724 | mxc91341evb MACH_MXC91341EVB MXC91341EVB 2737 | ||
2725 | rtw1000 MACH_RTW1000 RTW1000 2738 | ||
2726 | bobcat MACH_BOBCAT BOBCAT 2739 | ||
2727 | trizeps6 MACH_TRIZEPS6 TRIZEPS6 2740 | ||
2728 | msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741 | ||
2729 | nedap9263 MACH_NEDAP9263 NEDAP9263 2742 | ||
2730 | netgear_ms2110 MACH_NETGEAR_MS2110 NETGEAR_MS2110 2743 | ||
2731 | bmx MACH_BMX BMX 2744 | ||
2732 | netstream MACH_NETSTREAM NETSTREAM 2745 | ||
2733 | vpnext_rcu MACH_VPNEXT_RCU VPNEXT_RCU 2746 | ||
2734 | vpnext_mpu MACH_VPNEXT_MPU VPNEXT_MPU 2747 | ||
2735 | bcmring_tablet_v1 MACH_BCMRING_TABLET_V1 BCMRING_TABLET_V1 2748 | ||
2736 | sgarm10 MACH_SGARM10 SGARM10 2749 | ||
2737 | cm_t3517 MACH_CM_T3517 CM_T3517 2750 | ||
2738 | omap3_cps MACH_OMAP3_CPS OMAP3_CPS 2751 | ||
2739 | axar1500_receiver MACH_AXAR1500_RECEIVER AXAR1500_RECEIVER 2752 | ||
2740 | wbd222 MACH_WBD222 WBD222 2753 | ||
2741 | mt65xx MACH_MT65XX MT65XX 2754 | ||
2742 | msm8x60_surf MACH_MSM8X60_SURF MSM8X60_SURF 2755 | ||
2743 | msm8x60_sim MACH_MSM8X60_SIM MSM8X60_SIM 2756 | ||
2744 | vmc300 MACH_VMC300 VMC300 2757 | ||
2745 | tcc8000_sdk MACH_TCC8000_SDK TCC8000_SDK 2758 | ||
2746 | nanos MACH_NANOS NANOS 2759 | ||
2747 | stamp9g10 MACH_STAMP9G10 STAMP9G10 2760 | ||
2748 | stamp9g45 MACH_STAMP9G45 STAMP9G45 2761 | ||
2749 | h6053 MACH_H6053 H6053 2762 | ||
2750 | smint01 MACH_SMINT01 SMINT01 2763 | ||
2751 | prtlvt2 MACH_PRTLVT2 PRTLVT2 2764 | ||
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c index d4b9c36ddc0f..bc0cfdad1cbc 100644 --- a/arch/cris/arch-v32/drivers/pci/bios.c +++ b/arch/cris/arch-v32/drivers/pci/bios.c | |||
@@ -50,7 +50,7 @@ pcibios_align_resource(void *data, const struct resource *res, | |||
50 | if ((res->flags & IORESOURCE_IO) && (start & 0x300)) | 50 | if ((res->flags & IORESOURCE_IO) && (start & 0x300)) |
51 | start = (start + 0x3ff) & ~0x3ff; | 51 | start = (start + 0x3ff) & ~0x3ff; |
52 | 52 | ||
53 | return start | 53 | return start; |
54 | } | 54 | } |
55 | 55 | ||
56 | int pcibios_enable_resources(struct pci_dev *dev, int mask) | 56 | int pcibios_enable_resources(struct pci_dev *dev, int mask) |
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c index 1ed15d7fea20..6b4fb28e9f99 100644 --- a/arch/frv/mb93090-mb00/pci-frv.c +++ b/arch/frv/mb93090-mb00/pci-frv.c | |||
@@ -41,7 +41,7 @@ pcibios_align_resource(void *data, const struct resource *res, | |||
41 | if ((res->flags & IORESOURCE_IO) && (start & 0x300)) | 41 | if ((res->flags & IORESOURCE_IO) && (start & 0x300)) |
42 | start = (start + 0x3ff) & ~0x3ff; | 42 | start = (start + 0x3ff) & ~0x3ff; |
43 | 43 | ||
44 | return start | 44 | return start; |
45 | } | 45 | } |
46 | 46 | ||
47 | 47 | ||
@@ -94,8 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
94 | r = &dev->resource[idx]; | 94 | r = &dev->resource[idx]; |
95 | if (!r->start) | 95 | if (!r->start) |
96 | continue; | 96 | continue; |
97 | if (pci_claim_resource(dev, idx) < 0) | 97 | pci_claim_resource(dev, idx); |
98 | printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev)); | ||
99 | } | 98 | } |
100 | } | 99 | } |
101 | pcibios_allocate_bus_resources(&bus->children); | 100 | pcibios_allocate_bus_resources(&bus->children); |
@@ -125,7 +124,6 @@ static void __init pcibios_allocate_resources(int pass) | |||
125 | DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n", | 124 | DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n", |
126 | r->start, r->end, r->flags, disabled, pass); | 125 | r->start, r->end, r->flags, disabled, pass); |
127 | if (pci_claim_resource(dev, idx) < 0) { | 126 | if (pci_claim_resource(dev, idx) < 0) { |
128 | printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev)); | ||
129 | /* We'll assign a new address later */ | 127 | /* We'll assign a new address later */ |
130 | r->end -= r->start; | 128 | r->end -= r->start; |
131 | r->start = 0; | 129 | r->start = 0; |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 203ec61c6d4c..76818f926539 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT | |||
75 | config HAVE_LATENCYTOP_SUPPORT | 75 | config HAVE_LATENCYTOP_SUPPORT |
76 | def_bool y | 76 | def_bool y |
77 | 77 | ||
78 | config PCI | ||
79 | def_bool n | ||
80 | |||
81 | config DTC | 78 | config DTC |
82 | def_bool y | 79 | def_bool y |
83 | 80 | ||
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 836832dd9b26..72f6e8583746 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile | |||
@@ -84,7 +84,7 @@ define archhelp | |||
84 | echo '* linux.bin - Create raw binary' | 84 | echo '* linux.bin - Create raw binary' |
85 | echo ' linux.bin.gz - Create compressed raw binary' | 85 | echo ' linux.bin.gz - Create compressed raw binary' |
86 | echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' | 86 | echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' |
87 | echo ' - stripped elf with fdt blob | 87 | echo ' - stripped elf with fdt blob' |
88 | echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' | 88 | echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' |
89 | echo ' *_defconfig - Select default config from arch/microblaze/configs' | 89 | echo ' *_defconfig - Select default config from arch/microblaze/configs' |
90 | echo '' | 90 | echo '' |
@@ -94,3 +94,5 @@ define archhelp | |||
94 | echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' | 94 | echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' |
95 | echo ' (minus the .dts extension).' | 95 | echo ' (minus the .dts extension).' |
96 | endef | 96 | endef |
97 | |||
98 | MRPROPER_FILES += $(boot)/simpleImage.* | ||
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 902cf9846c3c..57f50c2371c6 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile | |||
@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb | |||
23 | endif | 23 | endif |
24 | 24 | ||
25 | $(obj)/linux.bin: vmlinux FORCE | 25 | $(obj)/linux.bin: vmlinux FORCE |
26 | [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \ | ||
27 | touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image" | ||
28 | $(call if_changed,objcopy) | 26 | $(call if_changed,objcopy) |
29 | $(call if_changed,uimage) | 27 | $(call if_changed,uimage) |
30 | @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' | 28 | @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' |
@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@ | |||
62 | $(obj)/%.dtb: $(dtstree)/%.dts FORCE | 60 | $(obj)/%.dtb: $(dtstree)/%.dts FORCE |
63 | $(call if_changed,dtc) | 61 | $(call if_changed,dtc) |
64 | 62 | ||
65 | clean-kernel += linux.bin linux.bin.gz simpleImage.* | 63 | clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub |
66 | |||
67 | clean-files += *.dtb simpleImage.*.unstrip | ||
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 563c6b9453f0..8eeb09211ece 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <asm/ptrace.h> | 14 | #include <asm/ptrace.h> |
15 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
16 | #include <asm/registers.h> | 16 | #include <asm/registers.h> |
17 | #include <asm/segment.h> | ||
18 | #include <asm/entry.h> | 17 | #include <asm/entry.h> |
19 | #include <asm/current.h> | 18 | #include <asm/current.h> |
20 | 19 | ||
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h deleted file mode 100644 index 0e7102c3fb11..000000000000 --- a/arch/microblaze/include/asm/segment.h +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
4 | * Copyright (C) 2006 Atmark Techno, Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef _ASM_MICROBLAZE_SEGMENT_H | ||
12 | #define _ASM_MICROBLAZE_SEGMENT_H | ||
13 | |||
14 | # ifndef __ASSEMBLY__ | ||
15 | |||
16 | typedef struct { | ||
17 | unsigned long seg; | ||
18 | } mm_segment_t; | ||
19 | |||
20 | /* | ||
21 | * On Microblaze the fs value is actually the top of the corresponding | ||
22 | * address space. | ||
23 | * | ||
24 | * The fs value determines whether argument validity checking should be | ||
25 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
26 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
27 | * | ||
28 | * For historical reasons, these macros are grossly misnamed. | ||
29 | * | ||
30 | * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. | ||
31 | */ | ||
32 | # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
33 | |||
34 | # ifndef CONFIG_MMU | ||
35 | # define KERNEL_DS MAKE_MM_SEG(0) | ||
36 | # define USER_DS KERNEL_DS | ||
37 | # else | ||
38 | # define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) | ||
39 | # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) | ||
40 | # endif | ||
41 | |||
42 | # define get_ds() (KERNEL_DS) | ||
43 | # define get_fs() (current_thread_info()->addr_limit) | ||
44 | # define set_fs(val) (current_thread_info()->addr_limit = (val)) | ||
45 | |||
46 | # define segment_eq(a, b) ((a).seg == (b).seg) | ||
47 | |||
48 | # endif /* __ASSEMBLY__ */ | ||
49 | #endif /* _ASM_MICROBLAZE_SEGMENT_H */ | ||
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 6e92885d381a..b2ca80f64640 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #ifndef __ASSEMBLY__ | 19 | #ifndef __ASSEMBLY__ |
20 | # include <linux/types.h> | 20 | # include <linux/types.h> |
21 | # include <asm/processor.h> | 21 | # include <asm/processor.h> |
22 | # include <asm/segment.h> | ||
23 | 22 | ||
24 | /* | 23 | /* |
25 | * low level task data that entry.S needs immediate access to | 24 | * low level task data that entry.S needs immediate access to |
@@ -60,6 +59,10 @@ struct cpu_context { | |||
60 | __u32 fsr; | 59 | __u32 fsr; |
61 | }; | 60 | }; |
62 | 61 | ||
62 | typedef struct { | ||
63 | unsigned long seg; | ||
64 | } mm_segment_t; | ||
65 | |||
63 | struct thread_info { | 66 | struct thread_info { |
64 | struct task_struct *task; /* main task structure */ | 67 | struct task_struct *task; /* main task structure */ |
65 | struct exec_domain *exec_domain; /* execution domain */ | 68 | struct exec_domain *exec_domain; /* execution domain */ |
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index bcb8b41d55af..2e1353c2d18d 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h | |||
@@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address); | |||
24 | extern void _tlbia(void); | 24 | extern void _tlbia(void); |
25 | 25 | ||
26 | #define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } | 26 | #define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } |
27 | #define __tlbie(x) { _tlbie(x); } | ||
27 | 28 | ||
28 | static inline void local_flush_tlb_all(void) | 29 | static inline void local_flush_tlb_all(void) |
29 | { __tlbia(); } | 30 | { __tlbia(); } |
@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) | |||
31 | { __tlbia(); } | 32 | { __tlbia(); } |
32 | static inline void local_flush_tlb_page(struct vm_area_struct *vma, | 33 | static inline void local_flush_tlb_page(struct vm_area_struct *vma, |
33 | unsigned long vmaddr) | 34 | unsigned long vmaddr) |
34 | { _tlbie(vmaddr); } | 35 | { __tlbie(vmaddr); } |
35 | static inline void local_flush_tlb_range(struct vm_area_struct *vma, | 36 | static inline void local_flush_tlb_range(struct vm_area_struct *vma, |
36 | unsigned long start, unsigned long end) | 37 | unsigned long start, unsigned long end) |
37 | { __tlbia(); } | 38 | { __tlbia(); } |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 371bd6e56d9a..446bec29b142 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -22,101 +22,73 @@ | |||
22 | #include <asm/mmu.h> | 22 | #include <asm/mmu.h> |
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
25 | #include <asm/segment.h> | ||
26 | #include <linux/string.h> | 25 | #include <linux/string.h> |
27 | 26 | ||
28 | #define VERIFY_READ 0 | 27 | #define VERIFY_READ 0 |
29 | #define VERIFY_WRITE 1 | 28 | #define VERIFY_WRITE 1 |
30 | 29 | ||
31 | #define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) | 30 | /* |
32 | 31 | * On Microblaze the fs value is actually the top of the corresponding | |
33 | #ifndef CONFIG_MMU | 32 | * address space. |
34 | 33 | * | |
35 | extern int ___range_ok(unsigned long addr, unsigned long size); | 34 | * The fs value determines whether argument validity checking should be |
36 | 35 | * performed or not. If get_fs() == USER_DS, checking is performed, with | |
37 | #define __range_ok(addr, size) \ | 36 | * get_fs() == KERNEL_DS, checking is bypassed. |
38 | ___range_ok((unsigned long)(addr), (unsigned long)(size)) | 37 | * |
39 | 38 | * For historical reasons, these macros are grossly misnamed. | |
40 | #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) | 39 | * |
41 | #define __access_ok(add, size) (__range_ok((addr), (size)) == 0) | 40 | * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. |
42 | 41 | */ | |
43 | /* Undefined function to trigger linker error */ | 42 | # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
44 | extern int bad_user_access_length(void); | ||
45 | |||
46 | /* FIXME this is function for optimalization -> memcpy */ | ||
47 | #define __get_user(var, ptr) \ | ||
48 | ({ \ | ||
49 | int __gu_err = 0; \ | ||
50 | switch (sizeof(*(ptr))) { \ | ||
51 | case 1: \ | ||
52 | case 2: \ | ||
53 | case 4: \ | ||
54 | (var) = *(ptr); \ | ||
55 | break; \ | ||
56 | case 8: \ | ||
57 | memcpy((void *) &(var), (ptr), 8); \ | ||
58 | break; \ | ||
59 | default: \ | ||
60 | (var) = 0; \ | ||
61 | __gu_err = __get_user_bad(); \ | ||
62 | break; \ | ||
63 | } \ | ||
64 | __gu_err; \ | ||
65 | }) | ||
66 | 43 | ||
67 | #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) | 44 | # ifndef CONFIG_MMU |
45 | # define KERNEL_DS MAKE_MM_SEG(0) | ||
46 | # define USER_DS KERNEL_DS | ||
47 | # else | ||
48 | # define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) | ||
49 | # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) | ||
50 | # endif | ||
68 | 51 | ||
69 | /* FIXME is not there defined __pu_val */ | 52 | # define get_ds() (KERNEL_DS) |
70 | #define __put_user(var, ptr) \ | 53 | # define get_fs() (current_thread_info()->addr_limit) |
71 | ({ \ | 54 | # define set_fs(val) (current_thread_info()->addr_limit = (val)) |
72 | int __pu_err = 0; \ | ||
73 | switch (sizeof(*(ptr))) { \ | ||
74 | case 1: \ | ||
75 | case 2: \ | ||
76 | case 4: \ | ||
77 | *(ptr) = (var); \ | ||
78 | break; \ | ||
79 | case 8: { \ | ||
80 | typeof(*(ptr)) __pu_val = (var); \ | ||
81 | memcpy(ptr, &__pu_val, sizeof(__pu_val)); \ | ||
82 | } \ | ||
83 | break; \ | ||
84 | default: \ | ||
85 | __pu_err = __put_user_bad(); \ | ||
86 | break; \ | ||
87 | } \ | ||
88 | __pu_err; \ | ||
89 | }) | ||
90 | 55 | ||
91 | #define __put_user_bad() (bad_user_access_length(), (-EFAULT)) | 56 | # define segment_eq(a, b) ((a).seg == (b).seg) |
92 | 57 | ||
93 | #define put_user(x, ptr) __put_user((x), (ptr)) | 58 | /* |
94 | #define get_user(x, ptr) __get_user((x), (ptr)) | 59 | * The exception table consists of pairs of addresses: the first is the |
60 | * address of an instruction that is allowed to fault, and the second is | ||
61 | * the address at which the program should continue. No registers are | ||
62 | * modified, so it is entirely up to the continuation code to figure out | ||
63 | * what to do. | ||
64 | * | ||
65 | * All the routines below use bits of fixup code that are out of line | ||
66 | * with the main instruction path. This means when everything is well, | ||
67 | * we don't even have to jump over them. Further, they do not intrude | ||
68 | * on our cache or tlb entries. | ||
69 | */ | ||
70 | struct exception_table_entry { | ||
71 | unsigned long insn, fixup; | ||
72 | }; | ||
95 | 73 | ||
96 | #define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) | 74 | /* Returns 0 if exception not found and fixup otherwise. */ |
97 | #define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) | 75 | extern unsigned long search_exception_table(unsigned long); |
98 | 76 | ||
99 | #define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) | 77 | #ifndef CONFIG_MMU |
100 | #define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n))) | ||
101 | #define __copy_to_user_inatomic(to, from, n) \ | ||
102 | (__copy_to_user((to), (from), (n))) | ||
103 | #define __copy_from_user_inatomic(to, from, n) \ | ||
104 | (__copy_from_user((to), (from), (n))) | ||
105 | 78 | ||
106 | static inline unsigned long clear_user(void *addr, unsigned long size) | 79 | /* Check against bounds of physical memory */ |
80 | static inline int ___range_ok(unsigned long addr, unsigned long size) | ||
107 | { | 81 | { |
108 | if (access_ok(VERIFY_WRITE, addr, size)) | 82 | return ((addr < memory_start) || |
109 | size = __clear_user(addr, size); | 83 | ((addr + size) > memory_end)); |
110 | return size; | ||
111 | } | 84 | } |
112 | 85 | ||
113 | /* Returns 0 if exception not found and fixup otherwise. */ | 86 | #define __range_ok(addr, size) \ |
114 | extern unsigned long search_exception_table(unsigned long); | 87 | ___range_ok((unsigned long)(addr), (unsigned long)(size)) |
115 | 88 | ||
116 | extern long strncpy_from_user(char *dst, const char *src, long count); | 89 | #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) |
117 | extern long strnlen_user(const char *src, long count); | ||
118 | 90 | ||
119 | #else /* CONFIG_MMU */ | 91 | #else |
120 | 92 | ||
121 | /* | 93 | /* |
122 | * Address is valid if: | 94 | * Address is valid if: |
@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count); | |||
129 | /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", | 101 | /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", |
130 | type?"WRITE":"READ",addr,size,get_fs().seg)) */ | 102 | type?"WRITE":"READ",addr,size,get_fs().seg)) */ |
131 | 103 | ||
132 | /* | 104 | #endif |
133 | * All the __XXX versions macros/functions below do not perform | ||
134 | * access checking. It is assumed that the necessary checks have been | ||
135 | * already performed before the finction (macro) is called. | ||
136 | */ | ||
137 | 105 | ||
138 | #define get_user(x, ptr) \ | 106 | #ifdef CONFIG_MMU |
139 | ({ \ | 107 | # define __FIXUP_SECTION ".section .fixup,\"ax\"\n" |
140 | access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ | 108 | # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" |
141 | ? __get_user((x), (ptr)) : -EFAULT; \ | 109 | #else |
142 | }) | 110 | # define __FIXUP_SECTION ".section .discard,\"ax\"\n" |
111 | # define __EX_TABLE_SECTION ".section .discard,\"a\"\n" | ||
112 | #endif | ||
143 | 113 | ||
144 | #define put_user(x, ptr) \ | 114 | extern unsigned long __copy_tofrom_user(void __user *to, |
145 | ({ \ | 115 | const void __user *from, unsigned long size); |
146 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ | 116 | |
147 | ? __put_user((x), (ptr)) : -EFAULT; \ | 117 | /* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */ |
118 | static inline unsigned long __must_check __clear_user(void __user *to, | ||
119 | unsigned long n) | ||
120 | { | ||
121 | /* normal memset with two words to __ex_table */ | ||
122 | __asm__ __volatile__ ( \ | ||
123 | "1: sb r0, %2, r0;" \ | ||
124 | " addik %0, %0, -1;" \ | ||
125 | " bneid %0, 1b;" \ | ||
126 | " addik %2, %2, 1;" \ | ||
127 | "2: " \ | ||
128 | __EX_TABLE_SECTION \ | ||
129 | ".word 1b,2b;" \ | ||
130 | ".previous;" \ | ||
131 | : "=r"(n) \ | ||
132 | : "0"(n), "r"(to) | ||
133 | ); | ||
134 | return n; | ||
135 | } | ||
136 | |||
137 | static inline unsigned long __must_check clear_user(void __user *to, | ||
138 | unsigned long n) | ||
139 | { | ||
140 | might_sleep(); | ||
141 | if (unlikely(!access_ok(VERIFY_WRITE, to, n))) | ||
142 | return n; | ||
143 | |||
144 | return __clear_user(to, n); | ||
145 | } | ||
146 | |||
147 | /* put_user and get_user macros */ | ||
148 | extern long __user_bad(void); | ||
149 | |||
150 | #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ | ||
151 | ({ \ | ||
152 | __asm__ __volatile__ ( \ | ||
153 | "1:" insn " %1, %2, r0;" \ | ||
154 | " addk %0, r0, r0;" \ | ||
155 | "2: " \ | ||
156 | __FIXUP_SECTION \ | ||
157 | "3: brid 2b;" \ | ||
158 | " addik %0, r0, %3;" \ | ||
159 | ".previous;" \ | ||
160 | __EX_TABLE_SECTION \ | ||
161 | ".word 1b,3b;" \ | ||
162 | ".previous;" \ | ||
163 | : "=&r"(__gu_err), "=r"(__gu_val) \ | ||
164 | : "r"(__gu_ptr), "i"(-EFAULT) \ | ||
165 | ); \ | ||
148 | }) | 166 | }) |
149 | 167 | ||
168 | /** | ||
169 | * get_user: - Get a simple variable from user space. | ||
170 | * @x: Variable to store result. | ||
171 | * @ptr: Source address, in user space. | ||
172 | * | ||
173 | * Context: User context only. This function may sleep. | ||
174 | * | ||
175 | * This macro copies a single simple variable from user space to kernel | ||
176 | * space. It supports simple types like char and int, but not larger | ||
177 | * data types like structures or arrays. | ||
178 | * | ||
179 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
180 | * dereferencing @ptr must be assignable to @x without a cast. | ||
181 | * | ||
182 | * Returns zero on success, or -EFAULT on error. | ||
183 | * On error, the variable @x is set to zero. | ||
184 | */ | ||
185 | |||
150 | #define __get_user(x, ptr) \ | 186 | #define __get_user(x, ptr) \ |
151 | ({ \ | 187 | ({ \ |
152 | unsigned long __gu_val; \ | 188 | unsigned long __gu_val; \ |
@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count); | |||
163 | __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ | 199 | __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ |
164 | break; \ | 200 | break; \ |
165 | default: \ | 201 | default: \ |
166 | __gu_val = 0; __gu_err = -EINVAL; \ | 202 | /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ |
167 | } \ | 203 | } \ |
168 | x = (__typeof__(*(ptr))) __gu_val; \ | 204 | x = (__typeof__(*(ptr))) __gu_val; \ |
169 | __gu_err; \ | 205 | __gu_err; \ |
170 | }) | 206 | }) |
171 | 207 | ||
172 | #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ | 208 | |
209 | #define get_user(x, ptr) \ | ||
173 | ({ \ | 210 | ({ \ |
174 | __asm__ __volatile__ ( \ | 211 | access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ |
175 | "1:" insn " %1, %2, r0; \ | 212 | ? __get_user((x), (ptr)) : -EFAULT; \ |
176 | addk %0, r0, r0; \ | 213 | }) |
177 | 2: \ | 214 | |
178 | .section .fixup,\"ax\"; \ | 215 | #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ |
179 | 3: brid 2b; \ | 216 | ({ \ |
180 | addik %0, r0, %3; \ | 217 | __asm__ __volatile__ ( \ |
181 | .previous; \ | 218 | "1:" insn " %1, %2, r0;" \ |
182 | .section __ex_table,\"a\"; \ | 219 | " addk %0, r0, r0;" \ |
183 | .word 1b,3b; \ | 220 | "2: " \ |
184 | .previous;" \ | 221 | __FIXUP_SECTION \ |
185 | : "=r"(__gu_err), "=r"(__gu_val) \ | 222 | "3: brid 2b;" \ |
186 | : "r"(__gu_ptr), "i"(-EFAULT) \ | 223 | " addik %0, r0, %3;" \ |
187 | ); \ | 224 | ".previous;" \ |
225 | __EX_TABLE_SECTION \ | ||
226 | ".word 1b,3b;" \ | ||
227 | ".previous;" \ | ||
228 | : "=&r"(__gu_err) \ | ||
229 | : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ | ||
230 | ); \ | ||
188 | }) | 231 | }) |
189 | 232 | ||
233 | #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ | ||
234 | ({ \ | ||
235 | __asm__ __volatile__ (" lwi %0, %1, 0;" \ | ||
236 | "1: swi %0, %2, 0;" \ | ||
237 | " lwi %0, %1, 4;" \ | ||
238 | "2: swi %0, %2, 4;" \ | ||
239 | " addk %0, r0, r0;" \ | ||
240 | "3: " \ | ||
241 | __FIXUP_SECTION \ | ||
242 | "4: brid 3b;" \ | ||
243 | " addik %0, r0, %3;" \ | ||
244 | ".previous;" \ | ||
245 | __EX_TABLE_SECTION \ | ||
246 | ".word 1b,4b,2b,4b;" \ | ||
247 | ".previous;" \ | ||
248 | : "=&r"(__gu_err) \ | ||
249 | : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ | ||
250 | ); \ | ||
251 | }) | ||
252 | |||
253 | /** | ||
254 | * put_user: - Write a simple value into user space. | ||
255 | * @x: Value to copy to user space. | ||
256 | * @ptr: Destination address, in user space. | ||
257 | * | ||
258 | * Context: User context only. This function may sleep. | ||
259 | * | ||
260 | * This macro copies a single simple value from kernel space to user | ||
261 | * space. It supports simple types like char and int, but not larger | ||
262 | * data types like structures or arrays. | ||
263 | * | ||
264 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
265 | * to the result of dereferencing @ptr. | ||
266 | * | ||
267 | * Returns zero on success, or -EFAULT on error. | ||
268 | */ | ||
269 | |||
190 | #define __put_user(x, ptr) \ | 270 | #define __put_user(x, ptr) \ |
191 | ({ \ | 271 | ({ \ |
192 | __typeof__(*(ptr)) volatile __gu_val = (x); \ | 272 | __typeof__(*(ptr)) volatile __gu_val = (x); \ |
@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count); | |||
195 | case 1: \ | 275 | case 1: \ |
196 | __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ | 276 | __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ |
197 | break; \ | 277 | break; \ |
198 | case 2: \ | 278 | case 2: \ |
199 | __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ | 279 | __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ |
200 | break; \ | 280 | break; \ |
201 | case 4: \ | 281 | case 4: \ |
@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count); | |||
205 | __put_user_asm_8((ptr), __gu_val, __gu_err); \ | 285 | __put_user_asm_8((ptr), __gu_val, __gu_err); \ |
206 | break; \ | 286 | break; \ |
207 | default: \ | 287 | default: \ |
208 | __gu_err = -EINVAL; \ | 288 | /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \ |
209 | } \ | 289 | } \ |
210 | __gu_err; \ | 290 | __gu_err; \ |
211 | }) | 291 | }) |
212 | 292 | ||
213 | #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ | 293 | #ifndef CONFIG_MMU |
214 | ({ \ | ||
215 | __asm__ __volatile__ (" lwi %0, %1, 0; \ | ||
216 | 1: swi %0, %2, 0; \ | ||
217 | lwi %0, %1, 4; \ | ||
218 | 2: swi %0, %2, 4; \ | ||
219 | addk %0,r0,r0; \ | ||
220 | 3: \ | ||
221 | .section .fixup,\"ax\"; \ | ||
222 | 4: brid 3b; \ | ||
223 | addik %0, r0, %3; \ | ||
224 | .previous; \ | ||
225 | .section __ex_table,\"a\"; \ | ||
226 | .word 1b,4b,2b,4b; \ | ||
227 | .previous;" \ | ||
228 | : "=&r"(__gu_err) \ | ||
229 | : "r"(&__gu_val), \ | ||
230 | "r"(__gu_ptr), "i"(-EFAULT) \ | ||
231 | ); \ | ||
232 | }) | ||
233 | 294 | ||
234 | #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ | 295 | #define put_user(x, ptr) __put_user((x), (ptr)) |
235 | ({ \ | ||
236 | __asm__ __volatile__ ( \ | ||
237 | "1:" insn " %1, %2, r0; \ | ||
238 | addk %0, r0, r0; \ | ||
239 | 2: \ | ||
240 | .section .fixup,\"ax\"; \ | ||
241 | 3: brid 2b; \ | ||
242 | addik %0, r0, %3; \ | ||
243 | .previous; \ | ||
244 | .section __ex_table,\"a\"; \ | ||
245 | .word 1b,3b; \ | ||
246 | .previous;" \ | ||
247 | : "=r"(__gu_err) \ | ||
248 | : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ | ||
249 | ); \ | ||
250 | }) | ||
251 | 296 | ||
252 | /* | 297 | #else /* CONFIG_MMU */ |
253 | * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. | ||
254 | */ | ||
255 | static inline int clear_user(char *to, int size) | ||
256 | { | ||
257 | if (size && access_ok(VERIFY_WRITE, to, size)) { | ||
258 | __asm__ __volatile__ (" \ | ||
259 | 1: \ | ||
260 | sb r0, %2, r0; \ | ||
261 | addik %0, %0, -1; \ | ||
262 | bneid %0, 1b; \ | ||
263 | addik %2, %2, 1; \ | ||
264 | 2: \ | ||
265 | .section __ex_table,\"a\"; \ | ||
266 | .word 1b,2b; \ | ||
267 | .section .text;" \ | ||
268 | : "=r"(size) \ | ||
269 | : "0"(size), "r"(to) | ||
270 | ); | ||
271 | } | ||
272 | return size; | ||
273 | } | ||
274 | 298 | ||
275 | #define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) | 299 | #define put_user(x, ptr) \ |
300 | ({ \ | ||
301 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ | ||
302 | ? __put_user((x), (ptr)) : -EFAULT; \ | ||
303 | }) | ||
304 | #endif /* CONFIG_MMU */ | ||
305 | |||
306 | /* copy_to_from_user */ | ||
307 | #define __copy_from_user(to, from, n) \ | ||
308 | __copy_tofrom_user((__force void __user *)(to), \ | ||
309 | (void __user *)(from), (n)) | ||
276 | #define __copy_from_user_inatomic(to, from, n) \ | 310 | #define __copy_from_user_inatomic(to, from, n) \ |
277 | copy_from_user((to), (from), (n)) | 311 | copy_from_user((to), (from), (n)) |
278 | 312 | ||
279 | #define copy_to_user(to, from, n) \ | 313 | static inline long copy_from_user(void *to, |
280 | (access_ok(VERIFY_WRITE, (to), (n)) ? \ | 314 | const void __user *from, unsigned long n) |
281 | __copy_tofrom_user((void __user *)(to), \ | 315 | { |
282 | (__force const void __user *)(from), (n)) \ | 316 | might_sleep(); |
283 | : -EFAULT) | 317 | if (access_ok(VERIFY_READ, from, n)) |
318 | return __copy_from_user(to, from, n); | ||
319 | return n; | ||
320 | } | ||
284 | 321 | ||
285 | #define __copy_to_user(to, from, n) copy_to_user((to), (from), (n)) | 322 | #define __copy_to_user(to, from, n) \ |
323 | __copy_tofrom_user((void __user *)(to), \ | ||
324 | (__force const void __user *)(from), (n)) | ||
286 | #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) | 325 | #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) |
287 | 326 | ||
288 | #define copy_from_user(to, from, n) \ | 327 | static inline long copy_to_user(void __user *to, |
289 | (access_ok(VERIFY_READ, (from), (n)) ? \ | 328 | const void *from, unsigned long n) |
290 | __copy_tofrom_user((__force void __user *)(to), \ | 329 | { |
291 | (void __user *)(from), (n)) \ | 330 | might_sleep(); |
292 | : -EFAULT) | 331 | if (access_ok(VERIFY_WRITE, to, n)) |
332 | return __copy_to_user(to, from, n); | ||
333 | return n; | ||
334 | } | ||
293 | 335 | ||
336 | /* | ||
337 | * Copy a null terminated string from userspace. | ||
338 | */ | ||
294 | extern int __strncpy_user(char *to, const char __user *from, int len); | 339 | extern int __strncpy_user(char *to, const char __user *from, int len); |
295 | extern int __strnlen_user(const char __user *sstr, int len); | ||
296 | 340 | ||
297 | #define strncpy_from_user(to, from, len) \ | 341 | #define __strncpy_from_user __strncpy_user |
298 | (access_ok(VERIFY_READ, from, 1) ? \ | ||
299 | __strncpy_user(to, from, len) : -EFAULT) | ||
300 | #define strnlen_user(str, len) \ | ||
301 | (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0) | ||
302 | 342 | ||
303 | #endif /* CONFIG_MMU */ | 343 | static inline long |
304 | 344 | strncpy_from_user(char *dst, const char __user *src, long count) | |
305 | extern unsigned long __copy_tofrom_user(void __user *to, | 345 | { |
306 | const void __user *from, unsigned long size); | 346 | if (!access_ok(VERIFY_READ, src, 1)) |
347 | return -EFAULT; | ||
348 | return __strncpy_from_user(dst, src, count); | ||
349 | } | ||
307 | 350 | ||
308 | /* | 351 | /* |
309 | * The exception table consists of pairs of addresses: the first is the | 352 | * Return the size of a string (including the ending 0) |
310 | * address of an instruction that is allowed to fault, and the second is | ||
311 | * the address at which the program should continue. No registers are | ||
312 | * modified, so it is entirely up to the continuation code to figure out | ||
313 | * what to do. | ||
314 | * | 353 | * |
315 | * All the routines below use bits of fixup code that are out of line | 354 | * Return 0 on exception, a value greater than N if too long |
316 | * with the main instruction path. This means when everything is well, | ||
317 | * we don't even have to jump over them. Further, they do not intrude | ||
318 | * on our cache or tlb entries. | ||
319 | */ | 355 | */ |
320 | struct exception_table_entry { | 356 | extern int __strnlen_user(const char __user *sstr, int len); |
321 | unsigned long insn, fixup; | 357 | |
322 | }; | 358 | static inline long strnlen_user(const char __user *src, long n) |
359 | { | ||
360 | if (!access_ok(VERIFY_READ, src, 1)) | ||
361 | return 0; | ||
362 | return __strnlen_user(src, n); | ||
363 | } | ||
323 | 364 | ||
324 | #endif /* __ASSEMBLY__ */ | 365 | #endif /* __ASSEMBLY__ */ |
325 | #endif /* __KERNEL__ */ | 366 | #endif /* __KERNEL__ */ |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index b1084974fccd..4d5b0311601b 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, | |||
37 | 37 | ||
38 | static unsigned long get_dma_direct_offset(struct device *dev) | 38 | static unsigned long get_dma_direct_offset(struct device *dev) |
39 | { | 39 | { |
40 | if (dev) | 40 | if (likely(dev)) |
41 | return (unsigned long)dev->archdata.dma_data; | 41 | return (unsigned long)dev->archdata.dma_data; |
42 | 42 | ||
43 | return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ | 43 | return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index cb7815cfe5ab..da6a5f5dc766 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -51,6 +51,12 @@ swapper_pg_dir: | |||
51 | 51 | ||
52 | .text | 52 | .text |
53 | ENTRY(_start) | 53 | ENTRY(_start) |
54 | #if CONFIG_KERNEL_BASE_ADDR == 0 | ||
55 | brai TOPHYS(real_start) | ||
56 | .org 0x100 | ||
57 | real_start: | ||
58 | #endif | ||
59 | |||
54 | mfs r1, rmsr | 60 | mfs r1, rmsr |
55 | andi r1, r1, ~2 | 61 | andi r1, r1, ~2 |
56 | mts rmsr, r1 | 62 | mts rmsr, r1 |
@@ -99,8 +105,8 @@ no_fdt_arg: | |||
99 | tophys(r4,r4) /* convert to phys address */ | 105 | tophys(r4,r4) /* convert to phys address */ |
100 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ | 106 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ |
101 | _copy_command_line: | 107 | _copy_command_line: |
102 | lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ | 108 | lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */ |
103 | sb r2, r4, r6 /* addr[r4+r6]= r7*/ | 109 | sb r2, r4, r6 /* addr[r4+r6]= r2*/ |
104 | addik r6, r6, 1 /* increment counting */ | 110 | addik r6, r6, 1 /* increment counting */ |
105 | bgtid r3, _copy_command_line /* loop for all entries */ | 111 | bgtid r3, _copy_command_line /* loop for all entries */ |
106 | addik r3, r3, -1 /* descrement loop */ | 112 | addik r3, r3, -1 /* descrement loop */ |
@@ -128,7 +134,7 @@ _copy_bram: | |||
128 | * virtual to physical. | 134 | * virtual to physical. |
129 | */ | 135 | */ |
130 | nop | 136 | nop |
131 | addik r3, r0, 63 /* Invalidate all TLB entries */ | 137 | addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */ |
132 | _invalidate: | 138 | _invalidate: |
133 | mts rtlbx, r3 | 139 | mts rtlbx, r3 |
134 | mts rtlbhi, r0 /* flush: ensure V is clear */ | 140 | mts rtlbhi, r0 /* flush: ensure V is clear */ |
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 2b86c03aa841..995a2123635b 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
@@ -313,13 +313,13 @@ _hw_exception_handler: | |||
313 | mfs r5, rmsr; | 313 | mfs r5, rmsr; |
314 | nop | 314 | nop |
315 | swi r5, r1, 0; | 315 | swi r5, r1, 0; |
316 | mfs r3, resr | 316 | mfs r4, resr |
317 | nop | 317 | nop |
318 | mfs r4, rear; | 318 | mfs r3, rear; |
319 | nop | 319 | nop |
320 | 320 | ||
321 | #ifndef CONFIG_MMU | 321 | #ifndef CONFIG_MMU |
322 | andi r5, r3, 0x1000; /* Check ESR[DS] */ | 322 | andi r5, r4, 0x1000; /* Check ESR[DS] */ |
323 | beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ | 323 | beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ |
324 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ | 324 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ |
325 | nop | 325 | nop |
@@ -327,13 +327,14 @@ not_in_delay_slot: | |||
327 | swi r17, r1, PT_R17 | 327 | swi r17, r1, PT_R17 |
328 | #endif | 328 | #endif |
329 | 329 | ||
330 | andi r5, r3, 0x1F; /* Extract ESR[EXC] */ | 330 | andi r5, r4, 0x1F; /* Extract ESR[EXC] */ |
331 | 331 | ||
332 | #ifdef CONFIG_MMU | 332 | #ifdef CONFIG_MMU |
333 | /* Calculate exception vector offset = r5 << 2 */ | 333 | /* Calculate exception vector offset = r5 << 2 */ |
334 | addk r6, r5, r5; /* << 1 */ | 334 | addk r6, r5, r5; /* << 1 */ |
335 | addk r6, r6, r6; /* << 2 */ | 335 | addk r6, r6, r6; /* << 2 */ |
336 | 336 | ||
337 | #ifdef DEBUG | ||
337 | /* counting which exception happen */ | 338 | /* counting which exception happen */ |
338 | lwi r5, r0, 0x200 + TOPHYS(r0_ram) | 339 | lwi r5, r0, 0x200 + TOPHYS(r0_ram) |
339 | addi r5, r5, 1 | 340 | addi r5, r5, 1 |
@@ -341,6 +342,7 @@ not_in_delay_slot: | |||
341 | lwi r5, r6, 0x200 + TOPHYS(r0_ram) | 342 | lwi r5, r6, 0x200 + TOPHYS(r0_ram) |
342 | addi r5, r5, 1 | 343 | addi r5, r5, 1 |
343 | swi r5, r6, 0x200 + TOPHYS(r0_ram) | 344 | swi r5, r6, 0x200 + TOPHYS(r0_ram) |
345 | #endif | ||
344 | /* end */ | 346 | /* end */ |
345 | /* Load the HW Exception vector */ | 347 | /* Load the HW Exception vector */ |
346 | lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) | 348 | lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) |
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
376 | swi r18, r1, PT_R18 | 378 | swi r18, r1, PT_R18 |
377 | 379 | ||
378 | or r5, r1, r0 | 380 | or r5, r1, r0 |
379 | andi r6, r3, 0x1F; /* Load ESR[EC] */ | 381 | andi r6, r4, 0x1F; /* Load ESR[EC] */ |
380 | lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ | 382 | lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ |
381 | swi r7, r1, PT_MODE | 383 | swi r7, r1, PT_MODE |
382 | mfs r7, rfsr | 384 | mfs r7, rfsr |
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
426 | */ | 428 | */ |
427 | handle_unaligned_ex: | 429 | handle_unaligned_ex: |
428 | /* Working registers already saved: R3, R4, R5, R6 | 430 | /* Working registers already saved: R3, R4, R5, R6 |
429 | * R3 = ESR | 431 | * R4 = ESR |
430 | * R4 = EAR | 432 | * R3 = EAR |
431 | */ | 433 | */ |
432 | #ifdef CONFIG_MMU | 434 | #ifdef CONFIG_MMU |
433 | andi r6, r3, 0x1000 /* Check ESR[DS] */ | 435 | andi r6, r4, 0x1000 /* Check ESR[DS] */ |
434 | beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ | 436 | beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ |
435 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ | 437 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ |
436 | nop | 438 | nop |
@@ -439,7 +441,7 @@ _no_delayslot: | |||
439 | RESTORE_STATE; | 441 | RESTORE_STATE; |
440 | bri unaligned_data_trap | 442 | bri unaligned_data_trap |
441 | #endif | 443 | #endif |
442 | andi r6, r3, 0x3E0; /* Mask and extract the register operand */ | 444 | andi r6, r4, 0x3E0; /* Mask and extract the register operand */ |
443 | srl r6, r6; /* r6 >> 5 */ | 445 | srl r6, r6; /* r6 >> 5 */ |
444 | srl r6, r6; | 446 | srl r6, r6; |
445 | srl r6, r6; | 447 | srl r6, r6; |
@@ -448,33 +450,33 @@ _no_delayslot: | |||
448 | /* Store the register operand in a temporary location */ | 450 | /* Store the register operand in a temporary location */ |
449 | sbi r6, r0, TOPHYS(ex_reg_op); | 451 | sbi r6, r0, TOPHYS(ex_reg_op); |
450 | 452 | ||
451 | andi r6, r3, 0x400; /* Extract ESR[S] */ | 453 | andi r6, r4, 0x400; /* Extract ESR[S] */ |
452 | bnei r6, ex_sw; | 454 | bnei r6, ex_sw; |
453 | ex_lw: | 455 | ex_lw: |
454 | andi r6, r3, 0x800; /* Extract ESR[W] */ | 456 | andi r6, r4, 0x800; /* Extract ESR[W] */ |
455 | beqi r6, ex_lhw; | 457 | beqi r6, ex_lhw; |
456 | lbui r5, r4, 0; /* Exception address in r4 */ | 458 | lbui r5, r3, 0; /* Exception address in r3 */ |
457 | /* Load a word, byte-by-byte from destination address | 459 | /* Load a word, byte-by-byte from destination address |
458 | and save it in tmp space */ | 460 | and save it in tmp space */ |
459 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); | 461 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); |
460 | lbui r5, r4, 1; | 462 | lbui r5, r3, 1; |
461 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); | 463 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); |
462 | lbui r5, r4, 2; | 464 | lbui r5, r3, 2; |
463 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); | 465 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); |
464 | lbui r5, r4, 3; | 466 | lbui r5, r3, 3; |
465 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); | 467 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); |
466 | /* Get the destination register value into r3 */ | 468 | /* Get the destination register value into r4 */ |
467 | lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); | 469 | lwi r4, r0, TOPHYS(ex_tmp_data_loc_0); |
468 | bri ex_lw_tail; | 470 | bri ex_lw_tail; |
469 | ex_lhw: | 471 | ex_lhw: |
470 | lbui r5, r4, 0; /* Exception address in r4 */ | 472 | lbui r5, r3, 0; /* Exception address in r3 */ |
471 | /* Load a half-word, byte-by-byte from destination | 473 | /* Load a half-word, byte-by-byte from destination |
472 | address and save it in tmp space */ | 474 | address and save it in tmp space */ |
473 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); | 475 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); |
474 | lbui r5, r4, 1; | 476 | lbui r5, r3, 1; |
475 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); | 477 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); |
476 | /* Get the destination register value into r3 */ | 478 | /* Get the destination register value into r4 */ |
477 | lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); | 479 | lhui r4, r0, TOPHYS(ex_tmp_data_loc_0); |
478 | ex_lw_tail: | 480 | ex_lw_tail: |
479 | /* Get the destination register number into r5 */ | 481 | /* Get the destination register number into r5 */ |
480 | lbui r5, r0, TOPHYS(ex_reg_op); | 482 | lbui r5, r0, TOPHYS(ex_reg_op); |
@@ -502,25 +504,25 @@ ex_sw_tail: | |||
502 | andi r6, r6, 0x800; /* Extract ESR[W] */ | 504 | andi r6, r6, 0x800; /* Extract ESR[W] */ |
503 | beqi r6, ex_shw; | 505 | beqi r6, ex_shw; |
504 | /* Get the word - delay slot */ | 506 | /* Get the word - delay slot */ |
505 | swi r3, r0, TOPHYS(ex_tmp_data_loc_0); | 507 | swi r4, r0, TOPHYS(ex_tmp_data_loc_0); |
506 | /* Store the word, byte-by-byte into destination address */ | 508 | /* Store the word, byte-by-byte into destination address */ |
507 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); | 509 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_0); |
508 | sbi r3, r4, 0; | 510 | sbi r4, r3, 0; |
509 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); | 511 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_1); |
510 | sbi r3, r4, 1; | 512 | sbi r4, r3, 1; |
511 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); | 513 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); |
512 | sbi r3, r4, 2; | 514 | sbi r4, r3, 2; |
513 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); | 515 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); |
514 | sbi r3, r4, 3; | 516 | sbi r4, r3, 3; |
515 | bri ex_handler_done; | 517 | bri ex_handler_done; |
516 | 518 | ||
517 | ex_shw: | 519 | ex_shw: |
518 | /* Store the lower half-word, byte-by-byte into destination address */ | 520 | /* Store the lower half-word, byte-by-byte into destination address */ |
519 | swi r3, r0, TOPHYS(ex_tmp_data_loc_0); | 521 | swi r4, r0, TOPHYS(ex_tmp_data_loc_0); |
520 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); | 522 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); |
521 | sbi r3, r4, 0; | 523 | sbi r4, r3, 0; |
522 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); | 524 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); |
523 | sbi r3, r4, 1; | 525 | sbi r4, r3, 1; |
524 | ex_sw_end: /* Exception handling of store word, ends. */ | 526 | ex_sw_end: /* Exception handling of store word, ends. */ |
525 | 527 | ||
526 | ex_handler_done: | 528 | ex_handler_done: |
@@ -560,21 +562,16 @@ ex_handler_done: | |||
560 | */ | 562 | */ |
561 | mfs r11, rpid | 563 | mfs r11, rpid |
562 | nop | 564 | nop |
563 | bri 4 | ||
564 | mfs r3, rear /* Get faulting address */ | ||
565 | nop | ||
566 | /* If we are faulting a kernel address, we have to use the | 565 | /* If we are faulting a kernel address, we have to use the |
567 | * kernel page tables. | 566 | * kernel page tables. |
568 | */ | 567 | */ |
569 | ori r4, r0, CONFIG_KERNEL_START | 568 | ori r5, r0, CONFIG_KERNEL_START |
570 | cmpu r4, r3, r4 | 569 | cmpu r5, r3, r5 |
571 | bgti r4, ex3 | 570 | bgti r5, ex3 |
572 | /* First, check if it was a zone fault (which means a user | 571 | /* First, check if it was a zone fault (which means a user |
573 | * tried to access a kernel or read-protected page - always | 572 | * tried to access a kernel or read-protected page - always |
574 | * a SEGV). All other faults here must be stores, so no | 573 | * a SEGV). All other faults here must be stores, so no |
575 | * need to check ESR_S as well. */ | 574 | * need to check ESR_S as well. */ |
576 | mfs r4, resr | ||
577 | nop | ||
578 | andi r4, r4, 0x800 /* ESR_Z - zone protection */ | 575 | andi r4, r4, 0x800 /* ESR_Z - zone protection */ |
579 | bnei r4, ex2 | 576 | bnei r4, ex2 |
580 | 577 | ||
@@ -589,8 +586,6 @@ ex_handler_done: | |||
589 | * tried to access a kernel or read-protected page - always | 586 | * tried to access a kernel or read-protected page - always |
590 | * a SEGV). All other faults here must be stores, so no | 587 | * a SEGV). All other faults here must be stores, so no |
591 | * need to check ESR_S as well. */ | 588 | * need to check ESR_S as well. */ |
592 | mfs r4, resr | ||
593 | nop | ||
594 | andi r4, r4, 0x800 /* ESR_Z */ | 589 | andi r4, r4, 0x800 /* ESR_Z */ |
595 | bnei r4, ex2 | 590 | bnei r4, ex2 |
596 | /* get current task address */ | 591 | /* get current task address */ |
@@ -665,8 +660,6 @@ ex_handler_done: | |||
665 | * R3 = ESR | 660 | * R3 = ESR |
666 | */ | 661 | */ |
667 | 662 | ||
668 | mfs r3, rear /* Get faulting address */ | ||
669 | nop | ||
670 | RESTORE_STATE; | 663 | RESTORE_STATE; |
671 | bri page_fault_instr_trap | 664 | bri page_fault_instr_trap |
672 | 665 | ||
@@ -677,18 +670,15 @@ ex_handler_done: | |||
677 | */ | 670 | */ |
678 | handle_data_tlb_miss_exception: | 671 | handle_data_tlb_miss_exception: |
679 | /* Working registers already saved: R3, R4, R5, R6 | 672 | /* Working registers already saved: R3, R4, R5, R6 |
680 | * R3 = ESR | 673 | * R3 = EAR, R4 = ESR |
681 | */ | 674 | */ |
682 | mfs r11, rpid | 675 | mfs r11, rpid |
683 | nop | 676 | nop |
684 | bri 4 | ||
685 | mfs r3, rear /* Get faulting address */ | ||
686 | nop | ||
687 | 677 | ||
688 | /* If we are faulting a kernel address, we have to use the | 678 | /* If we are faulting a kernel address, we have to use the |
689 | * kernel page tables. */ | 679 | * kernel page tables. */ |
690 | ori r4, r0, CONFIG_KERNEL_START | 680 | ori r6, r0, CONFIG_KERNEL_START |
691 | cmpu r4, r3, r4 | 681 | cmpu r4, r3, r6 |
692 | bgti r4, ex5 | 682 | bgti r4, ex5 |
693 | ori r4, r0, swapper_pg_dir | 683 | ori r4, r0, swapper_pg_dir |
694 | mts rpid, r0 /* TLB will have 0 TID */ | 684 | mts rpid, r0 /* TLB will have 0 TID */ |
@@ -731,9 +721,8 @@ ex_handler_done: | |||
731 | * Many of these bits are software only. Bits we don't set | 721 | * Many of these bits are software only. Bits we don't set |
732 | * here we (properly should) assume have the appropriate value. | 722 | * here we (properly should) assume have the appropriate value. |
733 | */ | 723 | */ |
724 | brid finish_tlb_load | ||
734 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ | 725 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ |
735 | |||
736 | bri finish_tlb_load | ||
737 | ex7: | 726 | ex7: |
738 | /* The bailout. Restore registers to pre-exception conditions | 727 | /* The bailout. Restore registers to pre-exception conditions |
739 | * and call the heavyweights to help us out. | 728 | * and call the heavyweights to help us out. |
@@ -754,9 +743,6 @@ ex_handler_done: | |||
754 | */ | 743 | */ |
755 | mfs r11, rpid | 744 | mfs r11, rpid |
756 | nop | 745 | nop |
757 | bri 4 | ||
758 | mfs r3, rear /* Get faulting address */ | ||
759 | nop | ||
760 | 746 | ||
761 | /* If we are faulting a kernel address, we have to use the | 747 | /* If we are faulting a kernel address, we have to use the |
762 | * kernel page tables. | 748 | * kernel page tables. |
@@ -792,7 +778,7 @@ ex_handler_done: | |||
792 | lwi r4, r5, 0 /* Get Linux PTE */ | 778 | lwi r4, r5, 0 /* Get Linux PTE */ |
793 | 779 | ||
794 | andi r6, r4, _PAGE_PRESENT | 780 | andi r6, r4, _PAGE_PRESENT |
795 | beqi r6, ex7 | 781 | beqi r6, ex10 |
796 | 782 | ||
797 | ori r4, r4, _PAGE_ACCESSED | 783 | ori r4, r4, _PAGE_ACCESSED |
798 | swi r4, r5, 0 | 784 | swi r4, r5, 0 |
@@ -805,9 +791,8 @@ ex_handler_done: | |||
805 | * Many of these bits are software only. Bits we don't set | 791 | * Many of these bits are software only. Bits we don't set |
806 | * here we (properly should) assume have the appropriate value. | 792 | * here we (properly should) assume have the appropriate value. |
807 | */ | 793 | */ |
794 | brid finish_tlb_load | ||
808 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ | 795 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ |
809 | |||
810 | bri finish_tlb_load | ||
811 | ex10: | 796 | ex10: |
812 | /* The bailout. Restore registers to pre-exception conditions | 797 | /* The bailout. Restore registers to pre-exception conditions |
813 | * and call the heavyweights to help us out. | 798 | * and call the heavyweights to help us out. |
@@ -837,9 +822,9 @@ ex_handler_done: | |||
837 | andi r5, r5, (MICROBLAZE_TLB_SIZE-1) | 822 | andi r5, r5, (MICROBLAZE_TLB_SIZE-1) |
838 | ori r6, r0, 1 | 823 | ori r6, r0, 1 |
839 | cmp r31, r5, r6 | 824 | cmp r31, r5, r6 |
840 | blti r31, sem | 825 | blti r31, ex12 |
841 | addik r5, r6, 1 | 826 | addik r5, r6, 1 |
842 | sem: | 827 | ex12: |
843 | /* MS: save back current TLB index */ | 828 | /* MS: save back current TLB index */ |
844 | swi r5, r0, TOPHYS(tlb_index) | 829 | swi r5, r0, TOPHYS(tlb_index) |
845 | 830 | ||
@@ -859,7 +844,6 @@ ex_handler_done: | |||
859 | nop | 844 | nop |
860 | 845 | ||
861 | /* Done...restore registers and get out of here. */ | 846 | /* Done...restore registers and get out of here. */ |
862 | ex12: | ||
863 | mts rpid, r11 | 847 | mts rpid, r11 |
864 | nop | 848 | nop |
865 | bri 4 | 849 | bri 4 |
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index df16c6287a8e..7cf86498326c 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S | |||
@@ -26,9 +26,10 @@ | |||
26 | * We avoid flushing the pinned 0, 1 and possibly 2 entries. | 26 | * We avoid flushing the pinned 0, 1 and possibly 2 entries. |
27 | */ | 27 | */ |
28 | .globl _tlbia; | 28 | .globl _tlbia; |
29 | .type _tlbia, @function | ||
29 | .align 4; | 30 | .align 4; |
30 | _tlbia: | 31 | _tlbia: |
31 | addik r12, r0, 63 /* flush all entries (63 - 3) */ | 32 | addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */ |
32 | /* isync */ | 33 | /* isync */ |
33 | _tlbia_1: | 34 | _tlbia_1: |
34 | mts rtlbx, r12 | 35 | mts rtlbx, r12 |
@@ -41,11 +42,13 @@ _tlbia_1: | |||
41 | /* sync */ | 42 | /* sync */ |
42 | rtsd r15, 8 | 43 | rtsd r15, 8 |
43 | nop | 44 | nop |
45 | .size _tlbia, . - _tlbia | ||
44 | 46 | ||
45 | /* | 47 | /* |
46 | * Flush MMU TLB for a particular address (in r5) | 48 | * Flush MMU TLB for a particular address (in r5) |
47 | */ | 49 | */ |
48 | .globl _tlbie; | 50 | .globl _tlbie; |
51 | .type _tlbie, @function | ||
49 | .align 4; | 52 | .align 4; |
50 | _tlbie: | 53 | _tlbie: |
51 | mts rtlbsx, r5 /* look up the address in TLB */ | 54 | mts rtlbsx, r5 /* look up the address in TLB */ |
@@ -59,17 +62,20 @@ _tlbie_1: | |||
59 | rtsd r15, 8 | 62 | rtsd r15, 8 |
60 | nop | 63 | nop |
61 | 64 | ||
65 | .size _tlbie, . - _tlbie | ||
66 | |||
62 | /* | 67 | /* |
63 | * Allocate TLB entry for early console | 68 | * Allocate TLB entry for early console |
64 | */ | 69 | */ |
65 | .globl early_console_reg_tlb_alloc; | 70 | .globl early_console_reg_tlb_alloc; |
71 | .type early_console_reg_tlb_alloc, @function | ||
66 | .align 4; | 72 | .align 4; |
67 | early_console_reg_tlb_alloc: | 73 | early_console_reg_tlb_alloc: |
68 | /* | 74 | /* |
69 | * Load a TLB entry for the UART, so that microblaze_progress() can use | 75 | * Load a TLB entry for the UART, so that microblaze_progress() can use |
70 | * the UARTs nice and early. We use a 4k real==virtual mapping. | 76 | * the UARTs nice and early. We use a 4k real==virtual mapping. |
71 | */ | 77 | */ |
72 | ori r4, r0, 63 | 78 | ori r4, r0, MICROBLAZE_TLB_SIZE - 1 |
73 | mts rtlbx, r4 /* TLB slot 2 */ | 79 | mts rtlbx, r4 /* TLB slot 2 */ |
74 | 80 | ||
75 | or r4,r5,r0 | 81 | or r4,r5,r0 |
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc: | |||
86 | rtsd r15, 8 | 92 | rtsd r15, 8 |
87 | nop | 93 | nop |
88 | 94 | ||
95 | .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc | ||
96 | |||
89 | /* | 97 | /* |
90 | * Copy a whole page (4096 bytes). | 98 | * Copy a whole page (4096 bytes). |
91 | */ | 99 | */ |
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc: | |||
104 | #define DCACHE_LINE_BYTES (4 * 4) | 112 | #define DCACHE_LINE_BYTES (4 * 4) |
105 | 113 | ||
106 | .globl copy_page; | 114 | .globl copy_page; |
115 | .type copy_page, @function | ||
107 | .align 4; | 116 | .align 4; |
108 | copy_page: | 117 | copy_page: |
109 | ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 | 118 | ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 |
@@ -118,3 +127,5 @@ _copy_page_loop: | |||
118 | addik r11, r11, -1 | 127 | addik r11, r11, -1 |
119 | rtsd r15, 8 | 128 | rtsd r15, 8 |
120 | nop | 129 | nop |
130 | |||
131 | .size copy_page, . - copy_page | ||
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 812f1bf06c9e..09bed44dfcd3 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
16 | #include <asm/system.h> | 16 | #include <asm/system.h> |
17 | #include <asm/pgalloc.h> | 17 | #include <asm/pgalloc.h> |
18 | #include <asm/uaccess.h> /* for USER_DS macros */ | ||
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | 20 | ||
20 | void show_regs(struct pt_regs *regs) | 21 | void show_regs(struct pt_regs *regs) |
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup); | |||
74 | 75 | ||
75 | void default_idle(void) | 76 | void default_idle(void) |
76 | { | 77 | { |
77 | if (!hlt_counter) { | 78 | if (likely(hlt_counter)) { |
79 | while (!need_resched()) | ||
80 | cpu_relax(); | ||
81 | } else { | ||
78 | clear_thread_flag(TIF_POLLING_NRFLAG); | 82 | clear_thread_flag(TIF_POLLING_NRFLAG); |
79 | smp_mb__after_clear_bit(); | 83 | smp_mb__after_clear_bit(); |
80 | local_irq_disable(); | 84 | local_irq_disable(); |
@@ -82,9 +86,7 @@ void default_idle(void) | |||
82 | cpu_sleep(); | 86 | cpu_sleep(); |
83 | local_irq_enable(); | 87 | local_irq_enable(); |
84 | set_thread_flag(TIF_POLLING_NRFLAG); | 88 | set_thread_flag(TIF_POLLING_NRFLAG); |
85 | } else | 89 | } |
86 | while (!need_resched()) | ||
87 | cpu_relax(); | ||
88 | } | 90 | } |
89 | 91 | ||
90 | void cpu_idle(void) | 92 | void cpu_idle(void) |
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index f974ec7aa357..17c98dbcec88 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr) | |||
92 | } | 92 | } |
93 | #endif /* CONFIG_MTD_UCLINUX_EBSS */ | 93 | #endif /* CONFIG_MTD_UCLINUX_EBSS */ |
94 | 94 | ||
95 | #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) | ||
96 | #define eprintk early_printk | ||
97 | #else | ||
98 | #define eprintk printk | ||
99 | #endif | ||
100 | |||
95 | void __init machine_early_init(const char *cmdline, unsigned int ram, | 101 | void __init machine_early_init(const char *cmdline, unsigned int ram, |
96 | unsigned int fdt, unsigned int msr) | 102 | unsigned int fdt, unsigned int msr) |
97 | { | 103 | { |
@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
139 | setup_early_printk(NULL); | 145 | setup_early_printk(NULL); |
140 | #endif | 146 | #endif |
141 | 147 | ||
142 | early_printk("Ramdisk addr 0x%08x, ", ram); | 148 | eprintk("Ramdisk addr 0x%08x, ", ram); |
143 | if (fdt) | 149 | if (fdt) |
144 | early_printk("FDT at 0x%08x\n", fdt); | 150 | eprintk("FDT at 0x%08x\n", fdt); |
145 | else | 151 | else |
146 | early_printk("Compiled-in FDT at 0x%08x\n", | 152 | eprintk("Compiled-in FDT at 0x%08x\n", |
147 | (unsigned int)_fdt_start); | 153 | (unsigned int)_fdt_start); |
148 | 154 | ||
149 | #ifdef CONFIG_MTD_UCLINUX | 155 | #ifdef CONFIG_MTD_UCLINUX |
150 | early_printk("Found romfs @ 0x%08x (0x%08x)\n", | 156 | eprintk("Found romfs @ 0x%08x (0x%08x)\n", |
151 | romfs_base, romfs_size); | 157 | romfs_base, romfs_size); |
152 | early_printk("#### klimit %p ####\n", old_klimit); | 158 | eprintk("#### klimit %p ####\n", old_klimit); |
153 | BUG_ON(romfs_size < 0); /* What else can we do? */ | 159 | BUG_ON(romfs_size < 0); /* What else can we do? */ |
154 | 160 | ||
155 | early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", | 161 | eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", |
156 | romfs_size, romfs_base, (unsigned)&_ebss); | 162 | romfs_size, romfs_base, (unsigned)&_ebss); |
157 | 163 | ||
158 | early_printk("New klimit: 0x%08x\n", (unsigned)klimit); | 164 | eprintk("New klimit: 0x%08x\n", (unsigned)klimit); |
159 | #endif | 165 | #endif |
160 | 166 | ||
161 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 167 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR |
162 | if (msr) | 168 | if (msr) |
163 | early_printk("!!!Your kernel has setup MSR instruction but " | 169 | eprintk("!!!Your kernel has setup MSR instruction but " |
164 | "CPU don't have it %d\n", msr); | 170 | "CPU don't have it %d\n", msr); |
165 | #else | 171 | #else |
166 | if (!msr) | 172 | if (!msr) |
167 | early_printk("!!!Your kernel not setup MSR instruction but " | 173 | eprintk("!!!Your kernel not setup MSR instruction but " |
168 | "CPU have it %d\n", msr); | 174 | "CPU have it %d\n", msr); |
169 | #endif | 175 | #endif |
170 | 176 | ||
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index eaaaf805f31b..5e4570ef515c 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c | |||
@@ -22,13 +22,11 @@ void trap_init(void) | |||
22 | __enable_hw_exceptions(); | 22 | __enable_hw_exceptions(); |
23 | } | 23 | } |
24 | 24 | ||
25 | static int kstack_depth_to_print = 24; | 25 | static unsigned long kstack_depth_to_print = 24; |
26 | 26 | ||
27 | static int __init kstack_setup(char *s) | 27 | static int __init kstack_setup(char *s) |
28 | { | 28 | { |
29 | kstack_depth_to_print = strict_strtoul(s, 0, NULL); | 29 | return !strict_strtoul(s, 0, &kstack_depth_to_print); |
30 | |||
31 | return 1; | ||
32 | } | 30 | } |
33 | __setup("kstack=", kstack_setup); | 31 | __setup("kstack=", kstack_setup); |
34 | 32 | ||
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile index b579db068c06..4dfe47d3cd91 100644 --- a/arch/microblaze/lib/Makefile +++ b/arch/microblaze/lib/Makefile | |||
@@ -10,5 +10,4 @@ else | |||
10 | lib-y += memcpy.o memmove.o | 10 | lib-y += memcpy.o memmove.o |
11 | endif | 11 | endif |
12 | 12 | ||
13 | lib-$(CONFIG_NO_MMU) += uaccess.o | 13 | lib-y += uaccess_old.o |
14 | lib-$(CONFIG_MMU) += uaccess_old.o | ||
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S index 02e3ab4eddf3..fdc48bb065d8 100644 --- a/arch/microblaze/lib/fastcopy.S +++ b/arch/microblaze/lib/fastcopy.S | |||
@@ -30,8 +30,9 @@ | |||
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
33 | 33 | .text | |
34 | .globl memcpy | 34 | .globl memcpy |
35 | .type memcpy, @function | ||
35 | .ent memcpy | 36 | .ent memcpy |
36 | 37 | ||
37 | memcpy: | 38 | memcpy: |
@@ -345,9 +346,11 @@ a_done: | |||
345 | rtsd r15, 8 | 346 | rtsd r15, 8 |
346 | nop | 347 | nop |
347 | 348 | ||
349 | .size memcpy, . - memcpy | ||
348 | .end memcpy | 350 | .end memcpy |
349 | /*----------------------------------------------------------------------------*/ | 351 | /*----------------------------------------------------------------------------*/ |
350 | .globl memmove | 352 | .globl memmove |
353 | .type memmove, @function | ||
351 | .ent memmove | 354 | .ent memmove |
352 | 355 | ||
353 | memmove: | 356 | memmove: |
@@ -659,4 +662,5 @@ d_done: | |||
659 | rtsd r15, 8 | 662 | rtsd r15, 8 |
660 | nop | 663 | nop |
661 | 664 | ||
665 | .size memmove, . - memmove | ||
662 | .end memmove | 666 | .end memmove |
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index cc2108b6b260..014bac92bdff 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c | |||
@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) | |||
53 | const uint32_t *i_src; | 53 | const uint32_t *i_src; |
54 | uint32_t *i_dst; | 54 | uint32_t *i_dst; |
55 | 55 | ||
56 | if (c >= 4) { | 56 | if (likely(c >= 4)) { |
57 | unsigned value, buf_hold; | 57 | unsigned value, buf_hold; |
58 | 58 | ||
59 | /* Align the dstination to a word boundry. */ | 59 | /* Align the dstination to a word boundry. */ |
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c index 4df851d41a29..ecfb663e1fc1 100644 --- a/arch/microblaze/lib/memset.c +++ b/arch/microblaze/lib/memset.c | |||
@@ -33,22 +33,23 @@ | |||
33 | #ifdef __HAVE_ARCH_MEMSET | 33 | #ifdef __HAVE_ARCH_MEMSET |
34 | void *memset(void *v_src, int c, __kernel_size_t n) | 34 | void *memset(void *v_src, int c, __kernel_size_t n) |
35 | { | 35 | { |
36 | |||
37 | char *src = v_src; | 36 | char *src = v_src; |
38 | #ifdef CONFIG_OPT_LIB_FUNCTION | 37 | #ifdef CONFIG_OPT_LIB_FUNCTION |
39 | uint32_t *i_src; | 38 | uint32_t *i_src; |
40 | uint32_t w32; | 39 | uint32_t w32 = 0; |
41 | #endif | 40 | #endif |
42 | /* Truncate c to 8 bits */ | 41 | /* Truncate c to 8 bits */ |
43 | c = (c & 0xFF); | 42 | c = (c & 0xFF); |
44 | 43 | ||
45 | #ifdef CONFIG_OPT_LIB_FUNCTION | 44 | #ifdef CONFIG_OPT_LIB_FUNCTION |
46 | /* Make a repeating word out of it */ | 45 | if (unlikely(c)) { |
47 | w32 = c; | 46 | /* Make a repeating word out of it */ |
48 | w32 |= w32 << 8; | 47 | w32 = c; |
49 | w32 |= w32 << 16; | 48 | w32 |= w32 << 8; |
49 | w32 |= w32 << 16; | ||
50 | } | ||
50 | 51 | ||
51 | if (n >= 4) { | 52 | if (likely(n >= 4)) { |
52 | /* Align the destination to a word boundary */ | 53 | /* Align the destination to a word boundary */ |
53 | /* This is done in an endian independant manner */ | 54 | /* This is done in an endian independant manner */ |
54 | switch ((unsigned) src & 3) { | 55 | switch ((unsigned) src & 3) { |
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c deleted file mode 100644 index a853fe089c44..000000000000 --- a/arch/microblaze/lib/uaccess.c +++ /dev/null | |||
@@ -1,48 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #include <linux/string.h> | ||
10 | #include <asm/uaccess.h> | ||
11 | |||
12 | #include <asm/bug.h> | ||
13 | |||
14 | long strnlen_user(const char __user *src, long count) | ||
15 | { | ||
16 | return strlen(src) + 1; | ||
17 | } | ||
18 | |||
19 | #define __do_strncpy_from_user(dst, src, count, res) \ | ||
20 | do { \ | ||
21 | char *tmp; \ | ||
22 | strncpy(dst, src, count); \ | ||
23 | for (tmp = dst; *tmp && count > 0; tmp++, count--) \ | ||
24 | ; \ | ||
25 | res = (tmp - dst); \ | ||
26 | } while (0) | ||
27 | |||
28 | long __strncpy_from_user(char *dst, const char __user *src, long count) | ||
29 | { | ||
30 | long res; | ||
31 | __do_strncpy_from_user(dst, src, count, res); | ||
32 | return res; | ||
33 | } | ||
34 | |||
35 | long strncpy_from_user(char *dst, const char __user *src, long count) | ||
36 | { | ||
37 | long res = -EFAULT; | ||
38 | if (access_ok(VERIFY_READ, src, 1)) | ||
39 | __do_strncpy_from_user(dst, src, count, res); | ||
40 | return res; | ||
41 | } | ||
42 | |||
43 | unsigned long __copy_tofrom_user(void __user *to, | ||
44 | const void __user *from, unsigned long size) | ||
45 | { | ||
46 | memcpy(to, from, size); | ||
47 | return 0; | ||
48 | } | ||
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S index 67f991c14b8a..5810cec54a7a 100644 --- a/arch/microblaze/lib/uaccess_old.S +++ b/arch/microblaze/lib/uaccess_old.S | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | .text | 23 | .text |
24 | .globl __strncpy_user; | 24 | .globl __strncpy_user; |
25 | .type __strncpy_user, @function | ||
25 | .align 4; | 26 | .align 4; |
26 | __strncpy_user: | 27 | __strncpy_user: |
27 | 28 | ||
@@ -50,7 +51,7 @@ __strncpy_user: | |||
50 | 3: | 51 | 3: |
51 | rtsd r15,8 | 52 | rtsd r15,8 |
52 | nop | 53 | nop |
53 | 54 | .size __strncpy_user, . - __strncpy_user | |
54 | 55 | ||
55 | .section .fixup, "ax" | 56 | .section .fixup, "ax" |
56 | .align 2 | 57 | .align 2 |
@@ -72,6 +73,7 @@ __strncpy_user: | |||
72 | 73 | ||
73 | .text | 74 | .text |
74 | .globl __strnlen_user; | 75 | .globl __strnlen_user; |
76 | .type __strnlen_user, @function | ||
75 | .align 4; | 77 | .align 4; |
76 | __strnlen_user: | 78 | __strnlen_user: |
77 | addik r3,r6,0 | 79 | addik r3,r6,0 |
@@ -90,7 +92,7 @@ __strnlen_user: | |||
90 | 3: | 92 | 3: |
91 | rtsd r15,8 | 93 | rtsd r15,8 |
92 | nop | 94 | nop |
93 | 95 | .size __strnlen_user, . - __strnlen_user | |
94 | 96 | ||
95 | .section .fixup,"ax" | 97 | .section .fixup,"ax" |
96 | 4: | 98 | 4: |
@@ -108,6 +110,7 @@ __strnlen_user: | |||
108 | */ | 110 | */ |
109 | .text | 111 | .text |
110 | .globl __copy_tofrom_user; | 112 | .globl __copy_tofrom_user; |
113 | .type __copy_tofrom_user, @function | ||
111 | .align 4; | 114 | .align 4; |
112 | __copy_tofrom_user: | 115 | __copy_tofrom_user: |
113 | /* | 116 | /* |
@@ -116,20 +119,34 @@ __copy_tofrom_user: | |||
116 | * r7, r3 - count | 119 | * r7, r3 - count |
117 | * r4 - tempval | 120 | * r4 - tempval |
118 | */ | 121 | */ |
119 | addik r3,r7,0 | 122 | beqid r7, 3f /* zero size is not likely */ |
120 | beqi r3,3f | 123 | andi r3, r7, 0x3 /* filter add count */ |
121 | 1: | 124 | bneid r3, 4f /* if is odd value then byte copying */ |
122 | lbu r4,r6,r0 | 125 | or r3, r5, r6 /* find if is any to/from unaligned */ |
123 | addik r6,r6,1 | 126 | andi r3, r3, 0x3 /* mask unaligned */ |
124 | 2: | 127 | bneid r3, 1f /* it is unaligned -> then jump */ |
125 | sb r4,r5,r0 | 128 | or r3, r0, r0 |
126 | addik r3,r3,-1 | 129 | |
127 | bneid r3,1b | 130 | /* at least one 4 byte copy */ |
128 | addik r5,r5,1 /* delay slot */ | 131 | 5: lw r4, r6, r3 |
132 | 6: sw r4, r5, r3 | ||
133 | addik r7, r7, -4 | ||
134 | bneid r7, 5b | ||
135 | addik r3, r3, 4 | ||
136 | addik r3, r7, 0 | ||
137 | rtsd r15, 8 | ||
138 | nop | ||
139 | 4: or r3, r0, r0 | ||
140 | 1: lbu r4,r6,r3 | ||
141 | 2: sb r4,r5,r3 | ||
142 | addik r7,r7,-1 | ||
143 | bneid r7,1b | ||
144 | addik r3,r3,1 /* delay slot */ | ||
129 | 3: | 145 | 3: |
146 | addik r3,r7,0 | ||
130 | rtsd r15,8 | 147 | rtsd r15,8 |
131 | nop | 148 | nop |
132 | 149 | .size __copy_tofrom_user, . - __copy_tofrom_user | |
133 | 150 | ||
134 | .section __ex_table,"a" | 151 | .section __ex_table,"a" |
135 | .word 1b,3b,2b,3b | 152 | .word 1b,3b,2b,3b,5b,3b,6b,3b |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index d9d249a66ff2..7af87f4b2c2c 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
106 | regs->esr = error_code; | 106 | regs->esr = error_code; |
107 | 107 | ||
108 | /* On a kernel SLB miss we can only check for a valid exception entry */ | 108 | /* On a kernel SLB miss we can only check for a valid exception entry */ |
109 | if (kernel_mode(regs) && (address >= TASK_SIZE)) { | 109 | if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) { |
110 | printk(KERN_WARNING "kernel task_size exceed"); | 110 | printk(KERN_WARNING "kernel task_size exceed"); |
111 | _exception(SIGSEGV, regs, code, address); | 111 | _exception(SIGSEGV, regs, code, address); |
112 | } | 112 | } |
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
122 | } | 122 | } |
123 | #endif /* CONFIG_KGDB */ | 123 | #endif /* CONFIG_KGDB */ |
124 | 124 | ||
125 | if (in_atomic() || !mm) { | 125 | if (unlikely(in_atomic() || !mm)) { |
126 | if (kernel_mode(regs)) | 126 | if (kernel_mode(regs)) |
127 | goto bad_area_nosemaphore; | 127 | goto bad_area_nosemaphore; |
128 | 128 | ||
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
150 | * source. If this is invalid we can skip the address space check, | 150 | * source. If this is invalid we can skip the address space check, |
151 | * thus avoiding the deadlock. | 151 | * thus avoiding the deadlock. |
152 | */ | 152 | */ |
153 | if (!down_read_trylock(&mm->mmap_sem)) { | 153 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
154 | if (kernel_mode(regs) && !search_exception_tables(regs->pc)) | 154 | if (kernel_mode(regs) && !search_exception_tables(regs->pc)) |
155 | goto bad_area_nosemaphore; | 155 | goto bad_area_nosemaphore; |
156 | 156 | ||
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
158 | } | 158 | } |
159 | 159 | ||
160 | vma = find_vma(mm, address); | 160 | vma = find_vma(mm, address); |
161 | if (!vma) | 161 | if (unlikely(!vma)) |
162 | goto bad_area; | 162 | goto bad_area; |
163 | 163 | ||
164 | if (vma->vm_start <= address) | 164 | if (vma->vm_start <= address) |
165 | goto good_area; | 165 | goto good_area; |
166 | 166 | ||
167 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 167 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) |
168 | goto bad_area; | 168 | goto bad_area; |
169 | 169 | ||
170 | if (!is_write) | 170 | if (unlikely(!is_write)) |
171 | goto bad_area; | 171 | goto bad_area; |
172 | 172 | ||
173 | /* | 173 | /* |
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
179 | * before setting the user r1. Thus we allow the stack to | 179 | * before setting the user r1. Thus we allow the stack to |
180 | * expand to 1MB without further checks. | 180 | * expand to 1MB without further checks. |
181 | */ | 181 | */ |
182 | if (address + 0x100000 < vma->vm_end) { | 182 | if (unlikely(address + 0x100000 < vma->vm_end)) { |
183 | 183 | ||
184 | /* get user regs even if this fault is in kernel mode */ | 184 | /* get user regs even if this fault is in kernel mode */ |
185 | struct pt_regs *uregs = current->thread.regs; | 185 | struct pt_regs *uregs = current->thread.regs; |
@@ -209,15 +209,15 @@ good_area: | |||
209 | code = SEGV_ACCERR; | 209 | code = SEGV_ACCERR; |
210 | 210 | ||
211 | /* a write */ | 211 | /* a write */ |
212 | if (is_write) { | 212 | if (unlikely(is_write)) { |
213 | if (!(vma->vm_flags & VM_WRITE)) | 213 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
214 | goto bad_area; | 214 | goto bad_area; |
215 | /* a read */ | 215 | /* a read */ |
216 | } else { | 216 | } else { |
217 | /* protection fault */ | 217 | /* protection fault */ |
218 | if (error_code & 0x08000000) | 218 | if (unlikely(error_code & 0x08000000)) |
219 | goto bad_area; | 219 | goto bad_area; |
220 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | 220 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) |
221 | goto bad_area; | 221 | goto bad_area; |
222 | } | 222 | } |
223 | 223 | ||
@@ -235,7 +235,7 @@ survive: | |||
235 | goto do_sigbus; | 235 | goto do_sigbus; |
236 | BUG(); | 236 | BUG(); |
237 | } | 237 | } |
238 | if (fault & VM_FAULT_MAJOR) | 238 | if (unlikely(fault & VM_FAULT_MAJOR)) |
239 | current->maj_flt++; | 239 | current->maj_flt++; |
240 | else | 240 | else |
241 | current->min_flt++; | 241 | current->min_flt++; |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 1608e2e1a44a..40bc10ede097 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -165,7 +165,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
165 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 165 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
166 | ClearPageReserved(virt_to_page(addr)); | 166 | ClearPageReserved(virt_to_page(addr)); |
167 | init_page_count(virt_to_page(addr)); | 167 | init_page_count(virt_to_page(addr)); |
168 | memset((void *)addr, 0xcc, PAGE_SIZE); | ||
169 | free_page(addr); | 168 | free_page(addr); |
170 | totalram_pages++; | 169 | totalram_pages++; |
171 | } | 170 | } |
@@ -208,14 +207,6 @@ void __init mem_init(void) | |||
208 | } | 207 | } |
209 | 208 | ||
210 | #ifndef CONFIG_MMU | 209 | #ifndef CONFIG_MMU |
211 | /* Check against bounds of physical memory */ | ||
212 | int ___range_ok(unsigned long addr, unsigned long size) | ||
213 | { | ||
214 | return ((addr < memory_start) || | ||
215 | ((addr + size) > memory_end)); | ||
216 | } | ||
217 | EXPORT_SYMBOL(___range_ok); | ||
218 | |||
219 | int page_is_ram(unsigned long pfn) | 210 | int page_is_ram(unsigned long pfn) |
220 | { | 211 | { |
221 | return __range_ok(pfn, 0); | 212 | return __range_ok(pfn, 0); |
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 63a6fd07c48f..d31312cde6ea 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c | |||
@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) | |||
154 | err = 0; | 154 | err = 0; |
155 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, | 155 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, |
156 | __pgprot(flags))); | 156 | __pgprot(flags))); |
157 | if (mem_init_done) | 157 | if (unlikely(mem_init_done)) |
158 | flush_HPTE(0, va, pmd_val(*pd)); | 158 | flush_HPTE(0, va, pmd_val(*pd)); |
159 | /* flush_HPTE(0, va, pg); */ | 159 | /* flush_HPTE(0, va, pg); */ |
160 | } | 160 | } |
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index c1b475a941eb..a9b91ed3d4b9 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) | 28 | #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) |
29 | #define PPC_STLCX stringify_in_c(stdcx.) | 29 | #define PPC_STLCX stringify_in_c(stdcx.) |
30 | #define PPC_CNTLZL stringify_in_c(cntlzd) | 30 | #define PPC_CNTLZL stringify_in_c(cntlzd) |
31 | #define PPC_LR_STKOFF 16 | ||
31 | 32 | ||
32 | /* Move to CR, single-entry optimized version. Only available | 33 | /* Move to CR, single-entry optimized version. Only available |
33 | * on POWER4 and later. | 34 | * on POWER4 and later. |
@@ -51,6 +52,7 @@ | |||
51 | #define PPC_STLCX stringify_in_c(stwcx.) | 52 | #define PPC_STLCX stringify_in_c(stwcx.) |
52 | #define PPC_CNTLZL stringify_in_c(cntlzw) | 53 | #define PPC_CNTLZL stringify_in_c(cntlzw) |
53 | #define PPC_MTOCRF stringify_in_c(mtcrf) | 54 | #define PPC_MTOCRF stringify_in_c(mtcrf) |
55 | #define PPC_LR_STKOFF 4 | ||
54 | 56 | ||
55 | #endif | 57 | #endif |
56 | 58 | ||
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 2d29752cbe16..b485a87c94e1 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S | |||
@@ -127,3 +127,31 @@ _GLOBAL(__setup_cpu_power7) | |||
127 | _GLOBAL(__restore_cpu_power7) | 127 | _GLOBAL(__restore_cpu_power7) |
128 | /* place holder */ | 128 | /* place holder */ |
129 | blr | 129 | blr |
130 | |||
131 | #ifdef CONFIG_EVENT_TRACING | ||
132 | /* | ||
133 | * Get a minimal set of registers for our caller's nth caller. | ||
134 | * r3 = regs pointer, r5 = n. | ||
135 | * | ||
136 | * We only get R1 (stack pointer), NIP (next instruction pointer) | ||
137 | * and LR (link register). These are all we can get in the | ||
138 | * general case without doing complicated stack unwinding, but | ||
139 | * fortunately they are enough to do a stack backtrace, which | ||
140 | * is all we need them for. | ||
141 | */ | ||
142 | _GLOBAL(perf_arch_fetch_caller_regs) | ||
143 | mr r6,r1 | ||
144 | cmpwi r5,0 | ||
145 | mflr r4 | ||
146 | ble 2f | ||
147 | mtctr r5 | ||
148 | 1: PPC_LL r6,0(r6) | ||
149 | bdnz 1b | ||
150 | PPC_LL r4,PPC_LR_STKOFF(r6) | ||
151 | 2: PPC_LL r7,0(r6) | ||
152 | PPC_LL r7,PPC_LR_STKOFF(r7) | ||
153 | PPC_STL r6,GPR1-STACK_FRAME_OVERHEAD(r3) | ||
154 | PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3) | ||
155 | PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3) | ||
156 | blr | ||
157 | #endif /* CONFIG_EVENT_TRACING */ | ||
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index a97d69525829..14e0479d3888 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -24,8 +24,8 @@ | |||
24 | /* Symbols defined by linker scripts */ | 24 | /* Symbols defined by linker scripts */ |
25 | extern char input_data[]; | 25 | extern char input_data[]; |
26 | extern int input_len; | 26 | extern int input_len; |
27 | extern int _text; | 27 | extern char _text, _end; |
28 | extern int _end; | 28 | extern char _bss, _ebss; |
29 | 29 | ||
30 | static void error(char *m); | 30 | static void error(char *m); |
31 | 31 | ||
@@ -129,12 +129,12 @@ unsigned long decompress_kernel(void) | |||
129 | unsigned long output_addr; | 129 | unsigned long output_addr; |
130 | unsigned char *output; | 130 | unsigned char *output; |
131 | 131 | ||
132 | check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); | ||
133 | memset(&_bss, 0, &_ebss - &_bss); | ||
132 | free_mem_ptr = (unsigned long)&_end; | 134 | free_mem_ptr = (unsigned long)&_end; |
133 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; | 135 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; |
134 | output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); | 136 | output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); |
135 | 137 | ||
136 | check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); | ||
137 | |||
138 | #ifdef CONFIG_BLK_DEV_INITRD | 138 | #ifdef CONFIG_BLK_DEV_INITRD |
139 | /* | 139 | /* |
140 | * Move the initrd right behind the end of the decompressed | 140 | * Move the initrd right behind the end of the decompressed |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 67ee6c3c6bb3..1741c1556a4e 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -110,6 +110,7 @@ extern void pfault_fini(void); | |||
110 | #endif /* CONFIG_PFAULT */ | 110 | #endif /* CONFIG_PFAULT */ |
111 | 111 | ||
112 | extern void cmma_init(void); | 112 | extern void cmma_init(void); |
113 | extern int memcpy_real(void *, void *, size_t); | ||
113 | 114 | ||
114 | #define finish_arch_switch(prev) do { \ | 115 | #define finish_arch_switch(prev) do { \ |
115 | set_fs(current->thread.mm_segment); \ | 116 | set_fs(current->thread.mm_segment); \ |
@@ -218,8 +219,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
218 | " l %0,%2\n" | 219 | " l %0,%2\n" |
219 | "0: nr %0,%5\n" | 220 | "0: nr %0,%5\n" |
220 | " lr %1,%0\n" | 221 | " lr %1,%0\n" |
221 | " or %0,%2\n" | 222 | " or %0,%3\n" |
222 | " or %1,%3\n" | 223 | " or %1,%4\n" |
223 | " cs %0,%1,%2\n" | 224 | " cs %0,%1,%2\n" |
224 | " jnl 1f\n" | 225 | " jnl 1f\n" |
225 | " xr %1,%0\n" | 226 | " xr %1,%0\n" |
@@ -239,8 +240,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
239 | " l %0,%2\n" | 240 | " l %0,%2\n" |
240 | "0: nr %0,%5\n" | 241 | "0: nr %0,%5\n" |
241 | " lr %1,%0\n" | 242 | " lr %1,%0\n" |
242 | " or %0,%2\n" | 243 | " or %0,%3\n" |
243 | " or %1,%3\n" | 244 | " or %1,%4\n" |
244 | " cs %0,%1,%2\n" | 245 | " cs %0,%1,%2\n" |
245 | " jnl 1f\n" | 246 | " jnl 1f\n" |
246 | " xr %1,%0\n" | 247 | " xr %1,%0\n" |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index ca4a62bd862f..9d1f76702d47 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -517,7 +517,10 @@ startup: | |||
517 | lhi %r1,2 # mode 2 = esame (dump) | 517 | lhi %r1,2 # mode 2 = esame (dump) |
518 | sigp %r1,%r0,0x12 # switch to esame mode | 518 | sigp %r1,%r0,0x12 # switch to esame mode |
519 | sam64 # switch to 64 bit mode | 519 | sam64 # switch to 64 bit mode |
520 | larl %r13,4f | ||
521 | lmh %r0,%r15,0(%r13) # clear high-order half | ||
520 | jg startup_continue | 522 | jg startup_continue |
523 | 4: .fill 16,4,0x0 | ||
521 | #else | 524 | #else |
522 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | 525 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) |
523 | l %r13,4f-.LPG0(%r13) | 526 | l %r13,4f-.LPG0(%r13) |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 39580e768658..1f70970de0aa 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -21,7 +21,6 @@ startup_continue: | |||
21 | larl %r1,sched_clock_base_cc | 21 | larl %r1,sched_clock_base_cc |
22 | mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK | 22 | mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK |
23 | larl %r13,.LPG1 # get base | 23 | larl %r13,.LPG1 # get base |
24 | lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half | ||
25 | lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | 24 | lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers |
26 | lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | 25 | lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area |
27 | # move IPL device to lowcore | 26 | # move IPL device to lowcore |
@@ -67,7 +66,6 @@ startup_continue: | |||
67 | .L4malign:.quad 0xffffffffffc00000 | 66 | .L4malign:.quad 0xffffffffffc00000 |
68 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 | 67 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 |
69 | .Lnop: .long 0x07000700 | 68 | .Lnop: .long 0x07000700 |
70 | .Lzero64:.fill 16,4,0x0 | ||
71 | .Lparmaddr: | 69 | .Lparmaddr: |
72 | .quad PARMAREA | 70 | .quad PARMAREA |
73 | .align 64 | 71 | .align 64 |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 77a63ae419f0..ba363d99de43 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -401,7 +401,7 @@ setup_lowcore(void) | |||
401 | * Setup lowcore for boot cpu | 401 | * Setup lowcore for boot cpu |
402 | */ | 402 | */ |
403 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); | 403 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); |
404 | lc = __alloc_bootmem(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); | 404 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); |
405 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 405 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; |
406 | lc->restart_psw.addr = | 406 | lc->restart_psw.addr = |
407 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 407 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
@@ -433,7 +433,7 @@ setup_lowcore(void) | |||
433 | #ifndef CONFIG_64BIT | 433 | #ifndef CONFIG_64BIT |
434 | if (MACHINE_HAS_IEEE) { | 434 | if (MACHINE_HAS_IEEE) { |
435 | lc->extended_save_area_addr = (__u32) | 435 | lc->extended_save_area_addr = (__u32) |
436 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); | 436 | __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); |
437 | /* enable extended save area */ | 437 | /* enable extended save area */ |
438 | __ctl_set_bit(14, 29); | 438 | __ctl_set_bit(14, 29); |
439 | } | 439 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 29f65bce55e1..d7d24fc3d6b7 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -292,9 +292,9 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
292 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); | 292 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); |
293 | while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) | 293 | while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) |
294 | cpu_relax(); | 294 | cpu_relax(); |
295 | memcpy(zfcpdump_save_areas[cpu], | 295 | memcpy_real(zfcpdump_save_areas[cpu], |
296 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | 296 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, |
297 | sizeof(struct save_area)); | 297 | sizeof(struct save_area)); |
298 | } | 298 | } |
299 | 299 | ||
300 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 300 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 81756271dc44..a8c2af8c650f 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c | |||
@@ -59,3 +59,29 @@ long probe_kernel_write(void *dst, void *src, size_t size) | |||
59 | } | 59 | } |
60 | return copied < 0 ? -EFAULT : 0; | 60 | return copied < 0 ? -EFAULT : 0; |
61 | } | 61 | } |
62 | |||
63 | int memcpy_real(void *dest, void *src, size_t count) | ||
64 | { | ||
65 | register unsigned long _dest asm("2") = (unsigned long) dest; | ||
66 | register unsigned long _len1 asm("3") = (unsigned long) count; | ||
67 | register unsigned long _src asm("4") = (unsigned long) src; | ||
68 | register unsigned long _len2 asm("5") = (unsigned long) count; | ||
69 | unsigned long flags; | ||
70 | int rc = -EFAULT; | ||
71 | |||
72 | if (!count) | ||
73 | return 0; | ||
74 | flags = __raw_local_irq_stnsm(0xf8UL); | ||
75 | asm volatile ( | ||
76 | "0: mvcle %1,%2,0x0\n" | ||
77 | "1: jo 0b\n" | ||
78 | " lhi %0,0x0\n" | ||
79 | "2:\n" | ||
80 | EX_TABLE(1b,2b) | ||
81 | : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), | ||
82 | "+d" (_len2), "=m" (*((long *) dest)) | ||
83 | : "m" (*((long *) src)) | ||
84 | : "cc", "memory"); | ||
85 | __raw_local_irq_ssm(flags); | ||
86 | return rc; | ||
87 | } | ||
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 39ed8722d11a..6c13b92742e8 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c | |||
@@ -836,6 +836,8 @@ static void __init sh_eth_init(struct sh_eth_plat_data *pd) | |||
836 | pd->mac_addr[i] = mac_read(a, 0x10 + i); | 836 | pd->mac_addr[i] = mac_read(a, 0x10 + i); |
837 | msleep(10); | 837 | msleep(10); |
838 | } | 838 | } |
839 | |||
840 | i2c_put_adapter(a); | ||
839 | } | 841 | } |
840 | #else | 842 | #else |
841 | static void __init sh_eth_init(struct sh_eth_plat_data *pd) | 843 | static void __init sh_eth_init(struct sh_eth_plat_data *pd) |
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 66cdbc3c7af9..ccaa290e9aba 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c | |||
@@ -52,6 +52,13 @@ | |||
52 | * and change SW41 to use 720p | 52 | * and change SW41 to use 720p |
53 | */ | 53 | */ |
54 | 54 | ||
55 | /* | ||
56 | * about sound | ||
57 | * | ||
58 | * This setup.c supports FSI slave mode. | ||
59 | * Please change J20, J21, J22 pin to 1-2 connection. | ||
60 | */ | ||
61 | |||
55 | /* Heartbeat */ | 62 | /* Heartbeat */ |
56 | static struct resource heartbeat_resource = { | 63 | static struct resource heartbeat_resource = { |
57 | .start = PA_LED, | 64 | .start = PA_LED, |
@@ -276,6 +283,7 @@ static struct clk fsimcka_clk = { | |||
276 | .rate = 0, /* unknown */ | 283 | .rate = 0, /* unknown */ |
277 | }; | 284 | }; |
278 | 285 | ||
286 | /* change J20, J21, J22 pin to 1-2 connection to use slave mode */ | ||
279 | struct sh_fsi_platform_info fsi_info = { | 287 | struct sh_fsi_platform_info fsi_info = { |
280 | .porta_flags = SH_FSI_BRS_INV | | 288 | .porta_flags = SH_FSI_BRS_INV | |
281 | SH_FSI_OUT_SLAVE_MODE | | 289 | SH_FSI_OUT_SLAVE_MODE | |
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h index 03ea75c5315d..310ec92f2759 100644 --- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h +++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h | |||
@@ -19,6 +19,8 @@ | |||
19 | 19 | ||
20 | #define MMUCR 0xFF000010 /* MMU Control Register */ | 20 | #define MMUCR 0xFF000010 /* MMU Control Register */ |
21 | 21 | ||
22 | #define MMU_ITLB_ADDRESS_ARRAY 0xF2000000 | ||
23 | #define MMU_ITLB_ADDRESS_ARRAY2 0xF2800000 | ||
22 | #define MMU_UTLB_ADDRESS_ARRAY 0xF6000000 | 24 | #define MMU_UTLB_ADDRESS_ARRAY 0xF6000000 |
23 | #define MMU_UTLB_ADDRESS_ARRAY2 0xF6800000 | 25 | #define MMU_UTLB_ADDRESS_ARRAY2 0xF6800000 |
24 | #define MMU_PAGE_ASSOC_BIT 0x80 | 26 | #define MMU_PAGE_ASSOC_BIT 0x80 |
diff --git a/arch/sh/include/cpu-sh4/cpu/watchdog.h b/arch/sh/include/cpu-sh4/cpu/watchdog.h index 7672301d0c70..7f62b9380938 100644 --- a/arch/sh/include/cpu-sh4/cpu/watchdog.h +++ b/arch/sh/include/cpu-sh4/cpu/watchdog.h | |||
@@ -21,6 +21,12 @@ | |||
21 | #define WTCNT 0xffcc0000 /*WDTST*/ | 21 | #define WTCNT 0xffcc0000 /*WDTST*/ |
22 | #define WTST WTCNT | 22 | #define WTST WTCNT |
23 | #define WTBST 0xffcc0008 /*WDTBST*/ | 23 | #define WTBST 0xffcc0008 /*WDTBST*/ |
24 | /* Register definitions */ | ||
25 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \ | ||
26 | defined(CONFIG_CPU_SUBTYPE_SH7723) || \ | ||
27 | defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
28 | #define WTCNT 0xa4520000 | ||
29 | #define WTCSR 0xa4520004 | ||
24 | #else | 30 | #else |
25 | /* Register definitions */ | 31 | /* Register definitions */ |
26 | #define WTCNT 0xffc00008 | 32 | #define WTCNT 0xffc00008 |
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index bd1c497280a6..94739ee7aa74 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -727,7 +727,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
727 | unsigned char *end, struct module *mod) | 727 | unsigned char *end, struct module *mod) |
728 | { | 728 | { |
729 | struct rb_node **rb_node = &cie_root.rb_node; | 729 | struct rb_node **rb_node = &cie_root.rb_node; |
730 | struct rb_node *parent; | 730 | struct rb_node *parent = *rb_node; |
731 | struct dwarf_cie *cie; | 731 | struct dwarf_cie *cie; |
732 | unsigned long flags; | 732 | unsigned long flags; |
733 | int count; | 733 | int count; |
@@ -856,7 +856,7 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
856 | unsigned char *end, struct module *mod) | 856 | unsigned char *end, struct module *mod) |
857 | { | 857 | { |
858 | struct rb_node **rb_node = &fde_root.rb_node; | 858 | struct rb_node **rb_node = &fde_root.rb_node; |
859 | struct rb_node *parent; | 859 | struct rb_node *parent = *rb_node; |
860 | struct dwarf_fde *fde; | 860 | struct dwarf_fde *fde; |
861 | struct dwarf_cie *cie; | 861 | struct dwarf_cie *cie; |
862 | unsigned long flags; | 862 | unsigned long flags; |
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 0fd7b41f0a22..273f890b17ae 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -112,7 +112,7 @@ void cpu_idle(void) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | void __cpuinit select_idle_routine(void) | 115 | void __init select_idle_routine(void) |
116 | { | 116 | { |
117 | /* | 117 | /* |
118 | * If a platform has set its own idle routine, leave it alone. | 118 | * If a platform has set its own idle routine, leave it alone. |
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 9f253e9cce01..81b6de41ae5d 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -315,7 +315,7 @@ void hw_perf_disable(void) | |||
315 | sh_pmu->disable_all(); | 315 | sh_pmu->disable_all(); |
316 | } | 316 | } |
317 | 317 | ||
318 | int register_sh_pmu(struct sh_pmu *pmu) | 318 | int __cpuinit register_sh_pmu(struct sh_pmu *pmu) |
319 | { | 319 | { |
320 | if (sh_pmu) | 320 | if (sh_pmu) |
321 | return -EBUSY; | 321 | return -EBUSY; |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index c90957a459ac..c0d40f671ecd 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -504,13 +504,6 @@ out: | |||
504 | return error; | 504 | return error; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* | ||
508 | * These bracket the sleeping functions.. | ||
509 | */ | ||
510 | extern void interruptible_sleep_on(wait_queue_head_t *q); | ||
511 | |||
512 | #define mid_sched ((unsigned long) interruptible_sleep_on) | ||
513 | |||
514 | #ifdef CONFIG_FRAME_POINTER | 507 | #ifdef CONFIG_FRAME_POINTER |
515 | static int in_sh64_switch_to(unsigned long pc) | 508 | static int in_sh64_switch_to(unsigned long pc) |
516 | { | 509 | { |
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index a4662e2782c3..3cc21933063b 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -323,6 +323,7 @@ static void __clear_pmb_entry(struct pmb_entry *pmbe) | |||
323 | writel_uncached(data_val & ~PMB_V, data); | 323 | writel_uncached(data_val & ~PMB_V, data); |
324 | } | 324 | } |
325 | 325 | ||
326 | #ifdef CONFIG_PM | ||
326 | static void set_pmb_entry(struct pmb_entry *pmbe) | 327 | static void set_pmb_entry(struct pmb_entry *pmbe) |
327 | { | 328 | { |
328 | unsigned long flags; | 329 | unsigned long flags; |
@@ -331,6 +332,7 @@ static void set_pmb_entry(struct pmb_entry *pmbe) | |||
331 | __set_pmb_entry(pmbe); | 332 | __set_pmb_entry(pmbe); |
332 | spin_unlock_irqrestore(&pmbe->lock, flags); | 333 | spin_unlock_irqrestore(&pmbe->lock, flags); |
333 | } | 334 | } |
335 | #endif /* CONFIG_PM */ | ||
334 | 336 | ||
335 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, | 337 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, |
336 | unsigned long size, pgprot_t prot) | 338 | unsigned long size, pgprot_t prot) |
@@ -802,7 +804,7 @@ void __init pmb_init(void) | |||
802 | writel_uncached(0, PMB_IRMCR); | 804 | writel_uncached(0, PMB_IRMCR); |
803 | 805 | ||
804 | /* Flush out the TLB */ | 806 | /* Flush out the TLB */ |
805 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); | 807 | local_flush_tlb_all(); |
806 | ctrl_barrier(); | 808 | ctrl_barrier(); |
807 | } | 809 | } |
808 | 810 | ||
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 32dc674c550c..bdd0982b56ee 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c | |||
@@ -73,5 +73,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
73 | jump_to_uncached(); | 73 | jump_to_uncached(); |
74 | __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); | 74 | __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); |
75 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); | 75 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); |
76 | __raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); | ||
77 | __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); | ||
76 | back_to_cached(); | 78 | back_to_cached(); |
77 | } | 79 | } |
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c index 004bb3f25b5f..77dc5efa7127 100644 --- a/arch/sh/mm/tlbflush_32.c +++ b/arch/sh/mm/tlbflush_32.c | |||
@@ -123,18 +123,27 @@ void local_flush_tlb_mm(struct mm_struct *mm) | |||
123 | void local_flush_tlb_all(void) | 123 | void local_flush_tlb_all(void) |
124 | { | 124 | { |
125 | unsigned long flags, status; | 125 | unsigned long flags, status; |
126 | int i; | ||
126 | 127 | ||
127 | /* | 128 | /* |
128 | * Flush all the TLB. | 129 | * Flush all the TLB. |
129 | * | ||
130 | * Write to the MMU control register's bit: | ||
131 | * TF-bit for SH-3, TI-bit for SH-4. | ||
132 | * It's same position, bit #2. | ||
133 | */ | 130 | */ |
134 | local_irq_save(flags); | 131 | local_irq_save(flags); |
132 | jump_to_uncached(); | ||
133 | |||
135 | status = __raw_readl(MMUCR); | 134 | status = __raw_readl(MMUCR); |
136 | status |= 0x04; | 135 | status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT); |
137 | __raw_writel(status, MMUCR); | 136 | |
137 | if (status == 0) | ||
138 | status = MMUCR_URB_NENTRIES; | ||
139 | |||
140 | for (i = 0; i < status; i++) | ||
141 | __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8)); | ||
142 | |||
143 | for (i = 0; i < 4; i++) | ||
144 | __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8)); | ||
145 | |||
146 | back_to_cached(); | ||
138 | ctrl_barrier(); | 147 | ctrl_barrier(); |
139 | local_irq_restore(flags); | 148 | local_irq_restore(flags); |
140 | } | 149 | } |
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h index 39327d6a57eb..a232e9e1f4e5 100644 --- a/arch/sparc/include/asm/stat.h +++ b/arch/sparc/include/asm/stat.h | |||
@@ -53,8 +53,8 @@ struct stat { | |||
53 | ino_t st_ino; | 53 | ino_t st_ino; |
54 | mode_t st_mode; | 54 | mode_t st_mode; |
55 | short st_nlink; | 55 | short st_nlink; |
56 | uid16_t st_uid; | 56 | unsigned short st_uid; |
57 | gid16_t st_gid; | 57 | unsigned short st_gid; |
58 | unsigned short st_rdev; | 58 | unsigned short st_rdev; |
59 | off_t st_size; | 59 | off_t st_size; |
60 | time_t st_atime; | 60 | time_t st_atime; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 68cb9b42088f..e2771939341d 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1337,7 +1337,7 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
1337 | callchain_store(entry, PERF_CONTEXT_USER); | 1337 | callchain_store(entry, PERF_CONTEXT_USER); |
1338 | callchain_store(entry, regs->tpc); | 1338 | callchain_store(entry, regs->tpc); |
1339 | 1339 | ||
1340 | ufp = regs->u_regs[UREG_I6]; | 1340 | ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; |
1341 | do { | 1341 | do { |
1342 | struct sparc_stackf32 *usf, sf; | 1342 | struct sparc_stackf32 *usf, sf; |
1343 | unsigned long pc; | 1343 | unsigned long pc; |
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index ca39c606fe8e..1eb8b00aed75 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c | |||
@@ -107,12 +107,12 @@ static unsigned long run_on_cpu(unsigned long cpu, | |||
107 | unsigned long ret; | 107 | unsigned long ret; |
108 | 108 | ||
109 | /* should return -EINVAL to userspace */ | 109 | /* should return -EINVAL to userspace */ |
110 | if (set_cpus_allowed(current, cpumask_of_cpu(cpu))) | 110 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) |
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | ret = func(arg); | 113 | ret = func(arg); |
114 | 114 | ||
115 | set_cpus_allowed(current, old_affinity); | 115 | set_cpus_allowed_ptr(current, &old_affinity); |
116 | 116 | ||
117 | return ret; | 117 | return ret; |
118 | } | 118 | } |
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c index 791c15138f3a..8f982b76c712 100644 --- a/arch/sparc/kernel/us2e_cpufreq.c +++ b/arch/sparc/kernel/us2e_cpufreq.c | |||
@@ -238,12 +238,12 @@ static unsigned int us2e_freq_get(unsigned int cpu) | |||
238 | return 0; | 238 | return 0; |
239 | 239 | ||
240 | cpus_allowed = current->cpus_allowed; | 240 | cpus_allowed = current->cpus_allowed; |
241 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 241 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
242 | 242 | ||
243 | clock_tick = sparc64_get_clock_tick(cpu) / 1000; | 243 | clock_tick = sparc64_get_clock_tick(cpu) / 1000; |
244 | estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); | 244 | estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); |
245 | 245 | ||
246 | set_cpus_allowed(current, cpus_allowed); | 246 | set_cpus_allowed_ptr(current, &cpus_allowed); |
247 | 247 | ||
248 | return clock_tick / estar_to_divisor(estar); | 248 | return clock_tick / estar_to_divisor(estar); |
249 | } | 249 | } |
@@ -259,7 +259,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) | |||
259 | return; | 259 | return; |
260 | 260 | ||
261 | cpus_allowed = current->cpus_allowed; | 261 | cpus_allowed = current->cpus_allowed; |
262 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 262 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
263 | 263 | ||
264 | new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; | 264 | new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; |
265 | new_bits = index_to_estar_mode(index); | 265 | new_bits = index_to_estar_mode(index); |
@@ -281,7 +281,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) | |||
281 | 281 | ||
282 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 282 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
283 | 283 | ||
284 | set_cpus_allowed(current, cpus_allowed); | 284 | set_cpus_allowed_ptr(current, &cpus_allowed); |
285 | } | 285 | } |
286 | 286 | ||
287 | static int us2e_freq_target(struct cpufreq_policy *policy, | 287 | static int us2e_freq_target(struct cpufreq_policy *policy, |
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c index 365b6464e2ce..f35d1e794548 100644 --- a/arch/sparc/kernel/us3_cpufreq.c +++ b/arch/sparc/kernel/us3_cpufreq.c | |||
@@ -86,12 +86,12 @@ static unsigned int us3_freq_get(unsigned int cpu) | |||
86 | return 0; | 86 | return 0; |
87 | 87 | ||
88 | cpus_allowed = current->cpus_allowed; | 88 | cpus_allowed = current->cpus_allowed; |
89 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 89 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
90 | 90 | ||
91 | reg = read_safari_cfg(); | 91 | reg = read_safari_cfg(); |
92 | ret = get_current_freq(cpu, reg); | 92 | ret = get_current_freq(cpu, reg); |
93 | 93 | ||
94 | set_cpus_allowed(current, cpus_allowed); | 94 | set_cpus_allowed_ptr(current, &cpus_allowed); |
95 | 95 | ||
96 | return ret; | 96 | return ret; |
97 | } | 97 | } |
@@ -106,7 +106,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) | |||
106 | return; | 106 | return; |
107 | 107 | ||
108 | cpus_allowed = current->cpus_allowed; | 108 | cpus_allowed = current->cpus_allowed; |
109 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 109 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
110 | 110 | ||
111 | new_freq = sparc64_get_clock_tick(cpu) / 1000; | 111 | new_freq = sparc64_get_clock_tick(cpu) / 1000; |
112 | switch (index) { | 112 | switch (index) { |
@@ -140,7 +140,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) | |||
140 | 140 | ||
141 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 141 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
142 | 142 | ||
143 | set_cpus_allowed(current, cpus_allowed); | 143 | set_cpus_allowed_ptr(current, &cpus_allowed); |
144 | } | 144 | } |
145 | 145 | ||
146 | static int us3_freq_target(struct cpufreq_policy *policy, | 146 | static int us3_freq_target(struct cpufreq_policy *policy, |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 635f03bb4995..d07b44f7d1dc 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -82,6 +82,9 @@ enum fixed_addresses { | |||
82 | #endif | 82 | #endif |
83 | FIX_DBGP_BASE, | 83 | FIX_DBGP_BASE, |
84 | FIX_EARLYCON_MEM_BASE, | 84 | FIX_EARLYCON_MEM_BASE, |
85 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
86 | FIX_OHCI1394_BASE, | ||
87 | #endif | ||
85 | #ifdef CONFIG_X86_LOCAL_APIC | 88 | #ifdef CONFIG_X86_LOCAL_APIC |
86 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | 89 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
87 | #endif | 90 | #endif |
@@ -132,9 +135,6 @@ enum fixed_addresses { | |||
132 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) | 135 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) |
133 | : __end_of_permanent_fixed_addresses, | 136 | : __end_of_permanent_fixed_addresses, |
134 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | 137 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, |
135 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
136 | FIX_OHCI1394_BASE, | ||
137 | #endif | ||
138 | #ifdef CONFIG_X86_32 | 138 | #ifdef CONFIG_X86_32 |
139 | FIX_WP_TEST, | 139 | FIX_WP_TEST, |
140 | #endif | 140 | #endif |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index a929c9ede33d..46c0fe05f230 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -133,6 +133,7 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); | |||
133 | 133 | ||
134 | typedef int vector_irq_t[NR_VECTORS]; | 134 | typedef int vector_irq_t[NR_VECTORS]; |
135 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 135 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
136 | extern void setup_vector_irq(int cpu); | ||
136 | 137 | ||
137 | #ifdef CONFIG_X86_IO_APIC | 138 | #ifdef CONFIG_X86_IO_APIC |
138 | extern void lock_vector_lock(void); | 139 | extern void lock_vector_lock(void); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 06e4cf0d3846..bc473acfa7f9 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -108,6 +108,8 @@ | |||
108 | #define MSR_AMD64_PATCH_LEVEL 0x0000008b | 108 | #define MSR_AMD64_PATCH_LEVEL 0x0000008b |
109 | #define MSR_AMD64_NB_CFG 0xc001001f | 109 | #define MSR_AMD64_NB_CFG 0xc001001f |
110 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 | 110 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 |
111 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 | ||
112 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 | ||
111 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 | 113 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
112 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 | 114 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
113 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 | 115 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e4e0ddcb1546..463de9a858ad 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1268,6 +1268,14 @@ void __setup_vector_irq(int cpu) | |||
1268 | /* Mark the inuse vectors */ | 1268 | /* Mark the inuse vectors */ |
1269 | for_each_irq_desc(irq, desc) { | 1269 | for_each_irq_desc(irq, desc) { |
1270 | cfg = desc->chip_data; | 1270 | cfg = desc->chip_data; |
1271 | |||
1272 | /* | ||
1273 | * If it is a legacy IRQ handled by the legacy PIC, this cpu | ||
1274 | * will be part of the irq_cfg's domain. | ||
1275 | */ | ||
1276 | if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) | ||
1277 | cpumask_set_cpu(cpu, cfg->domain); | ||
1278 | |||
1271 | if (!cpumask_test_cpu(cpu, cfg->domain)) | 1279 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1272 | continue; | 1280 | continue; |
1273 | vector = cfg->vector; | 1281 | vector = cfg->vector; |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 6f66d4a845ff..b53435661813 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/apic.h> | 28 | #include <asm/apic.h> |
29 | #include <asm/stacktrace.h> | 29 | #include <asm/stacktrace.h> |
30 | #include <asm/nmi.h> | 30 | #include <asm/nmi.h> |
31 | #include <asm/compat.h> | ||
31 | 32 | ||
32 | #if 0 | 33 | #if 0 |
33 | #undef wrmsrl | 34 | #undef wrmsrl |
@@ -209,7 +210,7 @@ struct x86_pmu { | |||
209 | struct event_constraint *event_constraints; | 210 | struct event_constraint *event_constraints; |
210 | void (*quirks)(void); | 211 | void (*quirks)(void); |
211 | 212 | ||
212 | void (*cpu_prepare)(int cpu); | 213 | int (*cpu_prepare)(int cpu); |
213 | void (*cpu_starting)(int cpu); | 214 | void (*cpu_starting)(int cpu); |
214 | void (*cpu_dying)(int cpu); | 215 | void (*cpu_dying)(int cpu); |
215 | void (*cpu_dead)(int cpu); | 216 | void (*cpu_dead)(int cpu); |
@@ -1330,11 +1331,12 @@ static int __cpuinit | |||
1330 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 1331 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1331 | { | 1332 | { |
1332 | unsigned int cpu = (long)hcpu; | 1333 | unsigned int cpu = (long)hcpu; |
1334 | int ret = NOTIFY_OK; | ||
1333 | 1335 | ||
1334 | switch (action & ~CPU_TASKS_FROZEN) { | 1336 | switch (action & ~CPU_TASKS_FROZEN) { |
1335 | case CPU_UP_PREPARE: | 1337 | case CPU_UP_PREPARE: |
1336 | if (x86_pmu.cpu_prepare) | 1338 | if (x86_pmu.cpu_prepare) |
1337 | x86_pmu.cpu_prepare(cpu); | 1339 | ret = x86_pmu.cpu_prepare(cpu); |
1338 | break; | 1340 | break; |
1339 | 1341 | ||
1340 | case CPU_STARTING: | 1342 | case CPU_STARTING: |
@@ -1347,6 +1349,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1347 | x86_pmu.cpu_dying(cpu); | 1349 | x86_pmu.cpu_dying(cpu); |
1348 | break; | 1350 | break; |
1349 | 1351 | ||
1352 | case CPU_UP_CANCELED: | ||
1350 | case CPU_DEAD: | 1353 | case CPU_DEAD: |
1351 | if (x86_pmu.cpu_dead) | 1354 | if (x86_pmu.cpu_dead) |
1352 | x86_pmu.cpu_dead(cpu); | 1355 | x86_pmu.cpu_dead(cpu); |
@@ -1356,7 +1359,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1356 | break; | 1359 | break; |
1357 | } | 1360 | } |
1358 | 1361 | ||
1359 | return NOTIFY_OK; | 1362 | return ret; |
1360 | } | 1363 | } |
1361 | 1364 | ||
1362 | static void __init pmu_check_apic(void) | 1365 | static void __init pmu_check_apic(void) |
@@ -1620,14 +1623,42 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1620 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); | 1623 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); |
1621 | } | 1624 | } |
1622 | 1625 | ||
1623 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | 1626 | #ifdef CONFIG_COMPAT |
1627 | static inline int | ||
1628 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1624 | { | 1629 | { |
1625 | unsigned long bytes; | 1630 | /* 32-bit process in 64-bit kernel. */ |
1631 | struct stack_frame_ia32 frame; | ||
1632 | const void __user *fp; | ||
1626 | 1633 | ||
1627 | bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); | 1634 | if (!test_thread_flag(TIF_IA32)) |
1635 | return 0; | ||
1636 | |||
1637 | fp = compat_ptr(regs->bp); | ||
1638 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | ||
1639 | unsigned long bytes; | ||
1640 | frame.next_frame = 0; | ||
1641 | frame.return_address = 0; | ||
1642 | |||
1643 | bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); | ||
1644 | if (bytes != sizeof(frame)) | ||
1645 | break; | ||
1646 | |||
1647 | if (fp < compat_ptr(regs->sp)) | ||
1648 | break; | ||
1628 | 1649 | ||
1629 | return bytes == sizeof(*frame); | 1650 | callchain_store(entry, frame.return_address); |
1651 | fp = compat_ptr(frame.next_frame); | ||
1652 | } | ||
1653 | return 1; | ||
1630 | } | 1654 | } |
1655 | #else | ||
1656 | static inline int | ||
1657 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1658 | { | ||
1659 | return 0; | ||
1660 | } | ||
1661 | #endif | ||
1631 | 1662 | ||
1632 | static void | 1663 | static void |
1633 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1664 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) |
@@ -1643,11 +1674,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1643 | callchain_store(entry, PERF_CONTEXT_USER); | 1674 | callchain_store(entry, PERF_CONTEXT_USER); |
1644 | callchain_store(entry, regs->ip); | 1675 | callchain_store(entry, regs->ip); |
1645 | 1676 | ||
1677 | if (perf_callchain_user32(regs, entry)) | ||
1678 | return; | ||
1679 | |||
1646 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | 1680 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
1681 | unsigned long bytes; | ||
1647 | frame.next_frame = NULL; | 1682 | frame.next_frame = NULL; |
1648 | frame.return_address = 0; | 1683 | frame.return_address = 0; |
1649 | 1684 | ||
1650 | if (!copy_stack_frame(fp, &frame)) | 1685 | bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); |
1686 | if (bytes != sizeof(frame)) | ||
1651 | break; | 1687 | break; |
1652 | 1688 | ||
1653 | if ((unsigned long)fp < regs->sp) | 1689 | if ((unsigned long)fp < regs->sp) |
@@ -1694,7 +1730,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1694 | return entry; | 1730 | return entry; |
1695 | } | 1731 | } |
1696 | 1732 | ||
1697 | #ifdef CONFIG_EVENT_TRACING | ||
1698 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | 1733 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) |
1699 | { | 1734 | { |
1700 | regs->ip = ip; | 1735 | regs->ip = ip; |
@@ -1706,4 +1741,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski | |||
1706 | regs->cs = __KERNEL_CS; | 1741 | regs->cs = __KERNEL_CS; |
1707 | local_save_flags(regs->flags); | 1742 | local_save_flags(regs->flags); |
1708 | } | 1743 | } |
1709 | #endif | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 358a8e3d05f8..285623bc3cc8 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) | |||
137 | return (hwc->config & 0xe0) == 0xe0; | 137 | return (hwc->config & 0xe0) == 0xe0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) | ||
141 | { | ||
142 | struct amd_nb *nb = cpuc->amd_nb; | ||
143 | |||
144 | return nb && nb->nb_id != -1; | ||
145 | } | ||
146 | |||
140 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | 147 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, |
141 | struct perf_event *event) | 148 | struct perf_event *event) |
142 | { | 149 | { |
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
147 | /* | 154 | /* |
148 | * only care about NB events | 155 | * only care about NB events |
149 | */ | 156 | */ |
150 | if (!(nb && amd_is_nb_event(hwc))) | 157 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
151 | return; | 158 | return; |
152 | 159 | ||
153 | /* | 160 | /* |
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
214 | /* | 221 | /* |
215 | * if not NB event or no NB, then no constraints | 222 | * if not NB event or no NB, then no constraints |
216 | */ | 223 | */ |
217 | if (!(nb && amd_is_nb_event(hwc))) | 224 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
218 | return &unconstrained; | 225 | return &unconstrained; |
219 | 226 | ||
220 | /* | 227 | /* |
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
293 | return nb; | 300 | return nb; |
294 | } | 301 | } |
295 | 302 | ||
296 | static void amd_pmu_cpu_online(int cpu) | 303 | static int amd_pmu_cpu_prepare(int cpu) |
304 | { | ||
305 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
306 | |||
307 | WARN_ON_ONCE(cpuc->amd_nb); | ||
308 | |||
309 | if (boot_cpu_data.x86_max_cores < 2) | ||
310 | return NOTIFY_OK; | ||
311 | |||
312 | cpuc->amd_nb = amd_alloc_nb(cpu, -1); | ||
313 | if (!cpuc->amd_nb) | ||
314 | return NOTIFY_BAD; | ||
315 | |||
316 | return NOTIFY_OK; | ||
317 | } | ||
318 | |||
319 | static void amd_pmu_cpu_starting(int cpu) | ||
297 | { | 320 | { |
298 | struct cpu_hw_events *cpu1, *cpu2; | 321 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
299 | struct amd_nb *nb = NULL; | 322 | struct amd_nb *nb; |
300 | int i, nb_id; | 323 | int i, nb_id; |
301 | 324 | ||
302 | if (boot_cpu_data.x86_max_cores < 2) | 325 | if (boot_cpu_data.x86_max_cores < 2) |
303 | return; | 326 | return; |
304 | 327 | ||
305 | /* | ||
306 | * function may be called too early in the | ||
307 | * boot process, in which case nb_id is bogus | ||
308 | */ | ||
309 | nb_id = amd_get_nb_id(cpu); | 328 | nb_id = amd_get_nb_id(cpu); |
310 | if (nb_id == BAD_APICID) | 329 | WARN_ON_ONCE(nb_id == BAD_APICID); |
311 | return; | ||
312 | |||
313 | cpu1 = &per_cpu(cpu_hw_events, cpu); | ||
314 | cpu1->amd_nb = NULL; | ||
315 | 330 | ||
316 | raw_spin_lock(&amd_nb_lock); | 331 | raw_spin_lock(&amd_nb_lock); |
317 | 332 | ||
318 | for_each_online_cpu(i) { | 333 | for_each_online_cpu(i) { |
319 | cpu2 = &per_cpu(cpu_hw_events, i); | 334 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
320 | nb = cpu2->amd_nb; | 335 | if (WARN_ON_ONCE(!nb)) |
321 | if (!nb) | ||
322 | continue; | 336 | continue; |
323 | if (nb->nb_id == nb_id) | ||
324 | goto found; | ||
325 | } | ||
326 | 337 | ||
327 | nb = amd_alloc_nb(cpu, nb_id); | 338 | if (nb->nb_id == nb_id) { |
328 | if (!nb) { | 339 | kfree(cpuc->amd_nb); |
329 | pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); | 340 | cpuc->amd_nb = nb; |
330 | raw_spin_unlock(&amd_nb_lock); | 341 | break; |
331 | return; | 342 | } |
332 | } | 343 | } |
333 | found: | 344 | |
334 | nb->refcnt++; | 345 | cpuc->amd_nb->nb_id = nb_id; |
335 | cpu1->amd_nb = nb; | 346 | cpuc->amd_nb->refcnt++; |
336 | 347 | ||
337 | raw_spin_unlock(&amd_nb_lock); | 348 | raw_spin_unlock(&amd_nb_lock); |
338 | } | 349 | } |
339 | 350 | ||
340 | static void amd_pmu_cpu_offline(int cpu) | 351 | static void amd_pmu_cpu_dead(int cpu) |
341 | { | 352 | { |
342 | struct cpu_hw_events *cpuhw; | 353 | struct cpu_hw_events *cpuhw; |
343 | 354 | ||
@@ -348,10 +359,14 @@ static void amd_pmu_cpu_offline(int cpu) | |||
348 | 359 | ||
349 | raw_spin_lock(&amd_nb_lock); | 360 | raw_spin_lock(&amd_nb_lock); |
350 | 361 | ||
351 | if (--cpuhw->amd_nb->refcnt == 0) | 362 | if (cpuhw->amd_nb) { |
352 | kfree(cpuhw->amd_nb); | 363 | struct amd_nb *nb = cpuhw->amd_nb; |
364 | |||
365 | if (nb->nb_id == -1 || --nb->refcnt == 0) | ||
366 | kfree(nb); | ||
353 | 367 | ||
354 | cpuhw->amd_nb = NULL; | 368 | cpuhw->amd_nb = NULL; |
369 | } | ||
355 | 370 | ||
356 | raw_spin_unlock(&amd_nb_lock); | 371 | raw_spin_unlock(&amd_nb_lock); |
357 | } | 372 | } |
@@ -379,8 +394,9 @@ static __initconst struct x86_pmu amd_pmu = { | |||
379 | .get_event_constraints = amd_get_event_constraints, | 394 | .get_event_constraints = amd_get_event_constraints, |
380 | .put_event_constraints = amd_put_event_constraints, | 395 | .put_event_constraints = amd_put_event_constraints, |
381 | 396 | ||
382 | .cpu_prepare = amd_pmu_cpu_online, | 397 | .cpu_prepare = amd_pmu_cpu_prepare, |
383 | .cpu_dead = amd_pmu_cpu_offline, | 398 | .cpu_starting = amd_pmu_cpu_starting, |
399 | .cpu_dead = amd_pmu_cpu_dead, | ||
384 | }; | 400 | }; |
385 | 401 | ||
386 | static __init int amd_pmu_init(void) | 402 | static __init int amd_pmu_init(void) |
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 29e5f7c845b2..e39e77168a37 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h | |||
@@ -30,6 +30,11 @@ struct stack_frame { | |||
30 | unsigned long return_address; | 30 | unsigned long return_address; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct stack_frame_ia32 { | ||
34 | u32 next_frame; | ||
35 | u32 return_address; | ||
36 | }; | ||
37 | |||
33 | static inline unsigned long rewind_frame_pointer(int n) | 38 | static inline unsigned long rewind_frame_pointer(int n) |
34 | { | 39 | { |
35 | struct stack_frame *frame; | 40 | struct stack_frame *frame; |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index adedeef1dedc..b2e246037392 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/start_kernel.h> | 9 | #include <linux/start_kernel.h> |
10 | #include <linux/mm.h> | ||
10 | 11 | ||
11 | #include <asm/setup.h> | 12 | #include <asm/setup.h> |
12 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
@@ -44,9 +45,10 @@ void __init i386_start_kernel(void) | |||
44 | #ifdef CONFIG_BLK_DEV_INITRD | 45 | #ifdef CONFIG_BLK_DEV_INITRD |
45 | /* Reserve INITRD */ | 46 | /* Reserve INITRD */ |
46 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | 47 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { |
48 | /* Assume only end is not page aligned */ | ||
47 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 49 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
48 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 50 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
49 | u64 ramdisk_end = ramdisk_image + ramdisk_size; | 51 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
50 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 52 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); |
51 | } | 53 | } |
52 | #endif | 54 | #endif |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index b5a9896ca1e7..7147143fd614 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
103 | #ifdef CONFIG_BLK_DEV_INITRD | 103 | #ifdef CONFIG_BLK_DEV_INITRD |
104 | /* Reserve INITRD */ | 104 | /* Reserve INITRD */ |
105 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | 105 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { |
106 | /* Assume only end is not page aligned */ | ||
106 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | 107 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; |
107 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | 108 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; |
108 | unsigned long ramdisk_end = ramdisk_image + ramdisk_size; | 109 | unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
109 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 110 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); |
110 | } | 111 | } |
111 | #endif | 112 | #endif |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index ef257fc2921b..f01d390f9c5b 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -141,6 +141,28 @@ void __init init_IRQ(void) | |||
141 | x86_init.irqs.intr_init(); | 141 | x86_init.irqs.intr_init(); |
142 | } | 142 | } |
143 | 143 | ||
144 | /* | ||
145 | * Setup the vector to irq mappings. | ||
146 | */ | ||
147 | void setup_vector_irq(int cpu) | ||
148 | { | ||
149 | #ifndef CONFIG_X86_IO_APIC | ||
150 | int irq; | ||
151 | |||
152 | /* | ||
153 | * On most of the platforms, legacy PIC delivers the interrupts on the | ||
154 | * boot cpu. But there are certain platforms where PIC interrupts are | ||
155 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | ||
156 | * legacy PIC, for the new cpu that is coming online, setup the static | ||
157 | * legacy vector to irq mapping: | ||
158 | */ | ||
159 | for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++) | ||
160 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | ||
161 | #endif | ||
162 | |||
163 | __setup_vector_irq(cpu); | ||
164 | } | ||
165 | |||
144 | static void __init smp_intr_init(void) | 166 | static void __init smp_intr_init(void) |
145 | { | 167 | { |
146 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index bfba6019d762..b2258ca91003 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -618,8 +618,8 @@ int kgdb_arch_init(void) | |||
618 | * portion of kgdb because this operation requires mutexs to | 618 | * portion of kgdb because this operation requires mutexs to |
619 | * complete. | 619 | * complete. |
620 | */ | 620 | */ |
621 | hw_breakpoint_init(&attr); | ||
621 | attr.bp_addr = (unsigned long)kgdb_arch_init; | 622 | attr.bp_addr = (unsigned long)kgdb_arch_init; |
622 | attr.type = PERF_TYPE_BREAKPOINT; | ||
623 | attr.bp_len = HW_BREAKPOINT_LEN_1; | 623 | attr.bp_len = HW_BREAKPOINT_LEN_1; |
624 | attr.bp_type = HW_BREAKPOINT_W; | 624 | attr.bp_type = HW_BREAKPOINT_W; |
625 | attr.disabled = 1; | 625 | attr.disabled = 1; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 8328009416d7..eccdb57094e3 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -528,21 +528,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | |||
528 | } | 528 | } |
529 | 529 | ||
530 | /* | 530 | /* |
531 | * Check for AMD CPUs, which have potentially C1E support | 531 | * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e. |
532 | * For more information see | ||
533 | * - Erratum #400 for NPT family 0xf and family 0x10 CPUs | ||
534 | * - Erratum #365 for family 0x11 (not affected because C1e not in use) | ||
532 | */ | 535 | */ |
533 | static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | 536 | static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) |
534 | { | 537 | { |
538 | u64 val; | ||
535 | if (c->x86_vendor != X86_VENDOR_AMD) | 539 | if (c->x86_vendor != X86_VENDOR_AMD) |
536 | return 0; | 540 | goto no_c1e_idle; |
537 | |||
538 | if (c->x86 < 0x0F) | ||
539 | return 0; | ||
540 | 541 | ||
541 | /* Family 0x0f models < rev F do not have C1E */ | 542 | /* Family 0x0f models < rev F do not have C1E */ |
542 | if (c->x86 == 0x0f && c->x86_model < 0x40) | 543 | if (c->x86 == 0x0F && c->x86_model >= 0x40) |
543 | return 0; | 544 | return 1; |
544 | 545 | ||
545 | return 1; | 546 | if (c->x86 == 0x10) { |
547 | /* | ||
548 | * check OSVW bit for CPUs that are not affected | ||
549 | * by erratum #400 | ||
550 | */ | ||
551 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); | ||
552 | if (val >= 2) { | ||
553 | rdmsrl(MSR_AMD64_OSVW_STATUS, val); | ||
554 | if (!(val & BIT(1))) | ||
555 | goto no_c1e_idle; | ||
556 | } | ||
557 | return 1; | ||
558 | } | ||
559 | |||
560 | no_c1e_idle: | ||
561 | return 0; | ||
546 | } | 562 | } |
547 | 563 | ||
548 | static cpumask_var_t c1e_mask; | 564 | static cpumask_var_t c1e_mask; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5d7ba1a449bd..d76e18570c60 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -314,16 +314,17 @@ static void __init reserve_brk(void) | |||
314 | #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) | 314 | #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) |
315 | static void __init relocate_initrd(void) | 315 | static void __init relocate_initrd(void) |
316 | { | 316 | { |
317 | 317 | /* Assume only end is not page aligned */ | |
318 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 318 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
319 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 319 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
320 | u64 area_size = PAGE_ALIGN(ramdisk_size); | ||
320 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; | 321 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; |
321 | u64 ramdisk_here; | 322 | u64 ramdisk_here; |
322 | unsigned long slop, clen, mapaddr; | 323 | unsigned long slop, clen, mapaddr; |
323 | char *p, *q; | 324 | char *p, *q; |
324 | 325 | ||
325 | /* We need to move the initrd down into lowmem */ | 326 | /* We need to move the initrd down into lowmem */ |
326 | ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size, | 327 | ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, |
327 | PAGE_SIZE); | 328 | PAGE_SIZE); |
328 | 329 | ||
329 | if (ramdisk_here == -1ULL) | 330 | if (ramdisk_here == -1ULL) |
@@ -332,7 +333,7 @@ static void __init relocate_initrd(void) | |||
332 | 333 | ||
333 | /* Note: this includes all the lowmem currently occupied by | 334 | /* Note: this includes all the lowmem currently occupied by |
334 | the initrd, we rely on that fact to keep the data intact. */ | 335 | the initrd, we rely on that fact to keep the data intact. */ |
335 | reserve_early(ramdisk_here, ramdisk_here + ramdisk_size, | 336 | reserve_early(ramdisk_here, ramdisk_here + area_size, |
336 | "NEW RAMDISK"); | 337 | "NEW RAMDISK"); |
337 | initrd_start = ramdisk_here + PAGE_OFFSET; | 338 | initrd_start = ramdisk_here + PAGE_OFFSET; |
338 | initrd_end = initrd_start + ramdisk_size; | 339 | initrd_end = initrd_start + ramdisk_size; |
@@ -376,9 +377,10 @@ static void __init relocate_initrd(void) | |||
376 | 377 | ||
377 | static void __init reserve_initrd(void) | 378 | static void __init reserve_initrd(void) |
378 | { | 379 | { |
380 | /* Assume only end is not page aligned */ | ||
379 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 381 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
380 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 382 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
381 | u64 ramdisk_end = ramdisk_image + ramdisk_size; | 383 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
382 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; | 384 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; |
383 | 385 | ||
384 | if (!boot_params.hdr.type_of_loader || | 386 | if (!boot_params.hdr.type_of_loader || |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a02e80c3c54b..6808b934d6c0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -242,12 +242,10 @@ static void __cpuinit smp_callin(void) | |||
242 | end_local_APIC_setup(); | 242 | end_local_APIC_setup(); |
243 | map_cpu_to_logical_apicid(); | 243 | map_cpu_to_logical_apicid(); |
244 | 244 | ||
245 | notify_cpu_starting(cpuid); | ||
246 | |||
247 | /* | 245 | /* |
248 | * Need to setup vector mappings before we enable interrupts. | 246 | * Need to setup vector mappings before we enable interrupts. |
249 | */ | 247 | */ |
250 | __setup_vector_irq(smp_processor_id()); | 248 | setup_vector_irq(smp_processor_id()); |
251 | /* | 249 | /* |
252 | * Get our bogomips. | 250 | * Get our bogomips. |
253 | * | 251 | * |
@@ -264,6 +262,8 @@ static void __cpuinit smp_callin(void) | |||
264 | */ | 262 | */ |
265 | smp_store_cpu_info(cpuid); | 263 | smp_store_cpu_info(cpuid); |
266 | 264 | ||
265 | notify_cpu_starting(cpuid); | ||
266 | |||
267 | /* | 267 | /* |
268 | * Allow the master to continue. | 268 | * Allow the master to continue. |
269 | */ | 269 | */ |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 44879df55696..2cc249718c46 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -291,8 +291,8 @@ SECTIONS | |||
291 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | 291 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { |
292 | __smp_locks = .; | 292 | __smp_locks = .; |
293 | *(.smp_locks) | 293 | *(.smp_locks) |
294 | __smp_locks_end = .; | ||
295 | . = ALIGN(PAGE_SIZE); | 294 | . = ALIGN(PAGE_SIZE); |
295 | __smp_locks_end = .; | ||
296 | } | 296 | } |
297 | 297 | ||
298 | #ifdef CONFIG_X86_64 | 298 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index e71c5cbc8f35..452ee5b8f309 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -331,11 +331,23 @@ int devmem_is_allowed(unsigned long pagenr) | |||
331 | 331 | ||
332 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 332 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
333 | { | 333 | { |
334 | unsigned long addr = begin; | 334 | unsigned long addr; |
335 | unsigned long begin_aligned, end_aligned; | ||
335 | 336 | ||
336 | if (addr >= end) | 337 | /* Make sure boundaries are page aligned */ |
338 | begin_aligned = PAGE_ALIGN(begin); | ||
339 | end_aligned = end & PAGE_MASK; | ||
340 | |||
341 | if (WARN_ON(begin_aligned != begin || end_aligned != end)) { | ||
342 | begin = begin_aligned; | ||
343 | end = end_aligned; | ||
344 | } | ||
345 | |||
346 | if (begin >= end) | ||
337 | return; | 347 | return; |
338 | 348 | ||
349 | addr = begin; | ||
350 | |||
339 | /* | 351 | /* |
340 | * If debugging page accesses then do not free this memory but | 352 | * If debugging page accesses then do not free this memory but |
341 | * mark them not present - any buggy init-section access will | 353 | * mark them not present - any buggy init-section access will |
@@ -343,7 +355,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
343 | */ | 355 | */ |
344 | #ifdef CONFIG_DEBUG_PAGEALLOC | 356 | #ifdef CONFIG_DEBUG_PAGEALLOC |
345 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | 357 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", |
346 | begin, PAGE_ALIGN(end)); | 358 | begin, end); |
347 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | 359 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); |
348 | #else | 360 | #else |
349 | /* | 361 | /* |
@@ -358,8 +370,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
358 | for (; addr < end; addr += PAGE_SIZE) { | 370 | for (; addr < end; addr += PAGE_SIZE) { |
359 | ClearPageReserved(virt_to_page(addr)); | 371 | ClearPageReserved(virt_to_page(addr)); |
360 | init_page_count(virt_to_page(addr)); | 372 | init_page_count(virt_to_page(addr)); |
361 | memset((void *)(addr & ~(PAGE_SIZE-1)), | 373 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
362 | POISON_FREE_INITMEM, PAGE_SIZE); | ||
363 | free_page(addr); | 374 | free_page(addr); |
364 | totalram_pages++; | 375 | totalram_pages++; |
365 | } | 376 | } |
@@ -376,6 +387,15 @@ void free_initmem(void) | |||
376 | #ifdef CONFIG_BLK_DEV_INITRD | 387 | #ifdef CONFIG_BLK_DEV_INITRD |
377 | void free_initrd_mem(unsigned long start, unsigned long end) | 388 | void free_initrd_mem(unsigned long start, unsigned long end) |
378 | { | 389 | { |
379 | free_init_pages("initrd memory", start, end); | 390 | /* |
391 | * end could be not aligned, and We can not align that, | ||
392 | * decompresser could be confused by aligned initrd_end | ||
393 | * We already reserve the end partial page before in | ||
394 | * - i386_start_kernel() | ||
395 | * - x86_64_start_kernel() | ||
396 | * - relocate_initrd() | ||
397 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed | ||
398 | */ | ||
399 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); | ||
380 | } | 400 | } |
381 | #endif | 401 | #endif |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 6e22454bfaa6..e31160216efb 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -122,8 +122,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
122 | struct acpi_resource_address64 addr; | 122 | struct acpi_resource_address64 addr; |
123 | acpi_status status; | 123 | acpi_status status; |
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | struct resource *root; | 125 | struct resource *root, *conflict; |
126 | u64 start, end; | 126 | u64 start, end, max_len; |
127 | 127 | ||
128 | status = resource_to_addr(acpi_res, &addr); | 128 | status = resource_to_addr(acpi_res, &addr); |
129 | if (!ACPI_SUCCESS(status)) | 129 | if (!ACPI_SUCCESS(status)) |
@@ -140,6 +140,17 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
140 | } else | 140 | } else |
141 | return AE_OK; | 141 | return AE_OK; |
142 | 142 | ||
143 | max_len = addr.maximum - addr.minimum + 1; | ||
144 | if (addr.address_length > max_len) { | ||
145 | dev_printk(KERN_DEBUG, &info->bridge->dev, | ||
146 | "host bridge window length %#llx doesn't fit in " | ||
147 | "%#llx-%#llx, trimming\n", | ||
148 | (unsigned long long) addr.address_length, | ||
149 | (unsigned long long) addr.minimum, | ||
150 | (unsigned long long) addr.maximum); | ||
151 | addr.address_length = max_len; | ||
152 | } | ||
153 | |||
143 | start = addr.minimum + addr.translation_offset; | 154 | start = addr.minimum + addr.translation_offset; |
144 | end = start + addr.address_length - 1; | 155 | end = start + addr.address_length - 1; |
145 | 156 | ||
@@ -157,9 +168,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
157 | return AE_OK; | 168 | return AE_OK; |
158 | } | 169 | } |
159 | 170 | ||
160 | if (insert_resource(root, res)) { | 171 | conflict = insert_resource_conflict(root, res); |
172 | if (conflict) { | ||
161 | dev_err(&info->bridge->dev, | 173 | dev_err(&info->bridge->dev, |
162 | "can't allocate host bridge window %pR\n", res); | 174 | "address space collision: host bridge window %pR " |
175 | "conflicts with %s %pR\n", | ||
176 | res, conflict->name, conflict); | ||
163 | } else { | 177 | } else { |
164 | pci_bus_add_resource(info->bus, res, 0); | 178 | pci_bus_add_resource(info->bus, res, 0); |
165 | info->res_num++; | 179 | info->res_num++; |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index dece3eb9c906..46fd43f79103 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -127,9 +127,6 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
127 | continue; | 127 | continue; |
128 | if (!r->start || | 128 | if (!r->start || |
129 | pci_claim_resource(dev, idx) < 0) { | 129 | pci_claim_resource(dev, idx) < 0) { |
130 | dev_info(&dev->dev, | ||
131 | "can't reserve window %pR\n", | ||
132 | r); | ||
133 | /* | 130 | /* |
134 | * Something is wrong with the region. | 131 | * Something is wrong with the region. |
135 | * Invalidate the resource to prevent | 132 | * Invalidate the resource to prevent |
@@ -181,8 +178,6 @@ static void __init pcibios_allocate_resources(int pass) | |||
181 | "BAR %d: reserving %pr (d=%d, p=%d)\n", | 178 | "BAR %d: reserving %pr (d=%d, p=%d)\n", |
182 | idx, r, disabled, pass); | 179 | idx, r, disabled, pass); |
183 | if (pci_claim_resource(dev, idx) < 0) { | 180 | if (pci_claim_resource(dev, idx) < 0) { |
184 | dev_info(&dev->dev, | ||
185 | "can't reserve %pR\n", r); | ||
186 | /* We'll assign a new address later */ | 181 | /* We'll assign a new address later */ |
187 | r->end -= r->start; | 182 | r->end -= r->start; |
188 | r->start = 0; | 183 | r->start = 0; |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fb7fc24fe727..189cbc2585fa 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/acpi.h> | 8 | #include <linux/acpi.h> |
9 | #include <linux/signal.h> | 9 | #include <linux/signal.h> |
10 | #include <linux/kthread.h> | 10 | #include <linux/kthread.h> |
11 | #include <linux/dmi.h> | ||
11 | 12 | ||
12 | #include <acpi/acpi_drivers.h> | 13 | #include <acpi/acpi_drivers.h> |
13 | 14 | ||
@@ -1032,6 +1033,41 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id) | |||
1032 | list_add_tail(&id->list, &device->pnp.ids); | 1033 | list_add_tail(&id->list, &device->pnp.ids); |
1033 | } | 1034 | } |
1034 | 1035 | ||
1036 | /* | ||
1037 | * Old IBM workstations have a DSDT bug wherein the SMBus object | ||
1038 | * lacks the SMBUS01 HID and the methods do not have the necessary "_" | ||
1039 | * prefix. Work around this. | ||
1040 | */ | ||
1041 | static int acpi_ibm_smbus_match(struct acpi_device *device) | ||
1042 | { | ||
1043 | acpi_handle h_dummy; | ||
1044 | struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
1045 | int result; | ||
1046 | |||
1047 | if (!dmi_name_in_vendors("IBM")) | ||
1048 | return -ENODEV; | ||
1049 | |||
1050 | /* Look for SMBS object */ | ||
1051 | result = acpi_get_name(device->handle, ACPI_SINGLE_NAME, &path); | ||
1052 | if (result) | ||
1053 | return result; | ||
1054 | |||
1055 | if (strcmp("SMBS", path.pointer)) { | ||
1056 | result = -ENODEV; | ||
1057 | goto out; | ||
1058 | } | ||
1059 | |||
1060 | /* Does it have the necessary (but misnamed) methods? */ | ||
1061 | result = -ENODEV; | ||
1062 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "SBI", &h_dummy)) && | ||
1063 | ACPI_SUCCESS(acpi_get_handle(device->handle, "SBR", &h_dummy)) && | ||
1064 | ACPI_SUCCESS(acpi_get_handle(device->handle, "SBW", &h_dummy))) | ||
1065 | result = 0; | ||
1066 | out: | ||
1067 | kfree(path.pointer); | ||
1068 | return result; | ||
1069 | } | ||
1070 | |||
1035 | static void acpi_device_set_id(struct acpi_device *device) | 1071 | static void acpi_device_set_id(struct acpi_device *device) |
1036 | { | 1072 | { |
1037 | acpi_status status; | 1073 | acpi_status status; |
@@ -1082,6 +1118,8 @@ static void acpi_device_set_id(struct acpi_device *device) | |||
1082 | acpi_add_id(device, ACPI_BAY_HID); | 1118 | acpi_add_id(device, ACPI_BAY_HID); |
1083 | else if (ACPI_SUCCESS(acpi_dock_match(device))) | 1119 | else if (ACPI_SUCCESS(acpi_dock_match(device))) |
1084 | acpi_add_id(device, ACPI_DOCK_HID); | 1120 | acpi_add_id(device, ACPI_DOCK_HID); |
1121 | else if (!acpi_ibm_smbus_match(device)) | ||
1122 | acpi_add_id(device, ACPI_SMBUS_IBM_HID); | ||
1085 | 1123 | ||
1086 | break; | 1124 | break; |
1087 | case ACPI_BUS_TYPE_POWER: | 1125 | case ACPI_BUS_TYPE_POWER: |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 561dec2481cb..277477251a86 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -1667,6 +1667,7 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
1667 | { | 1667 | { |
1668 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1668 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1669 | u8 status, host_stat = 0; | 1669 | u8 status, host_stat = 0; |
1670 | bool bmdma_stopped = false; | ||
1670 | 1671 | ||
1671 | VPRINTK("ata%u: protocol %d task_state %d\n", | 1672 | VPRINTK("ata%u: protocol %d task_state %d\n", |
1672 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); | 1673 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); |
@@ -1699,6 +1700,7 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
1699 | 1700 | ||
1700 | /* before we do anything else, clear DMA-Start bit */ | 1701 | /* before we do anything else, clear DMA-Start bit */ |
1701 | ap->ops->bmdma_stop(qc); | 1702 | ap->ops->bmdma_stop(qc); |
1703 | bmdma_stopped = true; | ||
1702 | 1704 | ||
1703 | if (unlikely(host_stat & ATA_DMA_ERR)) { | 1705 | if (unlikely(host_stat & ATA_DMA_ERR)) { |
1704 | /* error when transfering data to/from memory */ | 1706 | /* error when transfering data to/from memory */ |
@@ -1716,8 +1718,14 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
1716 | 1718 | ||
1717 | /* check main status, clearing INTRQ if needed */ | 1719 | /* check main status, clearing INTRQ if needed */ |
1718 | status = ata_sff_irq_status(ap); | 1720 | status = ata_sff_irq_status(ap); |
1719 | if (status & ATA_BUSY) | 1721 | if (status & ATA_BUSY) { |
1720 | goto idle_irq; | 1722 | if (bmdma_stopped) { |
1723 | /* BMDMA engine is already stopped, we're screwed */ | ||
1724 | qc->err_mask |= AC_ERR_HSM; | ||
1725 | ap->hsm_task_state = HSM_ST_ERR; | ||
1726 | } else | ||
1727 | goto idle_irq; | ||
1728 | } | ||
1721 | 1729 | ||
1722 | /* ack bmdma irq events */ | 1730 | /* ack bmdma irq events */ |
1723 | ap->ops->sff_irq_clear(ap); | 1731 | ap->ops->sff_irq_clear(ap); |
@@ -1762,13 +1770,16 @@ EXPORT_SYMBOL_GPL(ata_sff_host_intr); | |||
1762 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | 1770 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) |
1763 | { | 1771 | { |
1764 | struct ata_host *host = dev_instance; | 1772 | struct ata_host *host = dev_instance; |
1773 | bool retried = false; | ||
1765 | unsigned int i; | 1774 | unsigned int i; |
1766 | unsigned int handled = 0, polling = 0; | 1775 | unsigned int handled, idle, polling; |
1767 | unsigned long flags; | 1776 | unsigned long flags; |
1768 | 1777 | ||
1769 | /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ | 1778 | /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ |
1770 | spin_lock_irqsave(&host->lock, flags); | 1779 | spin_lock_irqsave(&host->lock, flags); |
1771 | 1780 | ||
1781 | retry: | ||
1782 | handled = idle = polling = 0; | ||
1772 | for (i = 0; i < host->n_ports; i++) { | 1783 | for (i = 0; i < host->n_ports; i++) { |
1773 | struct ata_port *ap = host->ports[i]; | 1784 | struct ata_port *ap = host->ports[i]; |
1774 | struct ata_queued_cmd *qc; | 1785 | struct ata_queued_cmd *qc; |
@@ -1782,7 +1793,8 @@ irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | |||
1782 | handled |= ata_sff_host_intr(ap, qc); | 1793 | handled |= ata_sff_host_intr(ap, qc); |
1783 | else | 1794 | else |
1784 | polling |= 1 << i; | 1795 | polling |= 1 << i; |
1785 | } | 1796 | } else |
1797 | idle |= 1 << i; | ||
1786 | } | 1798 | } |
1787 | 1799 | ||
1788 | /* | 1800 | /* |
@@ -1790,7 +1802,9 @@ irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | |||
1790 | * asserting IRQ line, nobody cared will ensue. Check IRQ | 1802 | * asserting IRQ line, nobody cared will ensue. Check IRQ |
1791 | * pending status if available and clear spurious IRQ. | 1803 | * pending status if available and clear spurious IRQ. |
1792 | */ | 1804 | */ |
1793 | if (!handled) { | 1805 | if (!handled && !retried) { |
1806 | bool retry = false; | ||
1807 | |||
1794 | for (i = 0; i < host->n_ports; i++) { | 1808 | for (i = 0; i < host->n_ports; i++) { |
1795 | struct ata_port *ap = host->ports[i]; | 1809 | struct ata_port *ap = host->ports[i]; |
1796 | 1810 | ||
@@ -1805,8 +1819,23 @@ irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | |||
1805 | ata_port_printk(ap, KERN_INFO, | 1819 | ata_port_printk(ap, KERN_INFO, |
1806 | "clearing spurious IRQ\n"); | 1820 | "clearing spurious IRQ\n"); |
1807 | 1821 | ||
1808 | ap->ops->sff_check_status(ap); | 1822 | if (idle & (1 << i)) { |
1809 | ap->ops->sff_irq_clear(ap); | 1823 | ap->ops->sff_check_status(ap); |
1824 | ap->ops->sff_irq_clear(ap); | ||
1825 | } else { | ||
1826 | /* clear INTRQ and check if BUSY cleared */ | ||
1827 | if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) | ||
1828 | retry |= true; | ||
1829 | /* | ||
1830 | * With command in flight, we can't do | ||
1831 | * sff_irq_clear() w/o racing with completion. | ||
1832 | */ | ||
1833 | } | ||
1834 | } | ||
1835 | |||
1836 | if (retry) { | ||
1837 | retried = true; | ||
1838 | goto retry; | ||
1810 | } | 1839 | } |
1811 | } | 1840 | } |
1812 | 1841 | ||
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 3059ec017de3..c59b40710fbd 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -576,6 +576,10 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
576 | u8 rev = isa->revision; | 576 | u8 rev = isa->revision; |
577 | pci_dev_put(isa); | 577 | pci_dev_put(isa); |
578 | 578 | ||
579 | if ((id->device == 0x0415 || id->device == 0x3164) && | ||
580 | (config->id != id->device)) | ||
581 | continue; | ||
582 | |||
579 | if (rev >= config->rev_min && rev <= config->rev_max) | 583 | if (rev >= config->rev_min && rev <= config->rev_max) |
580 | break; | 584 | break; |
581 | } | 585 | } |
@@ -677,6 +681,7 @@ static const struct pci_device_id via[] = { | |||
677 | { PCI_VDEVICE(VIA, 0x3164), }, | 681 | { PCI_VDEVICE(VIA, 0x3164), }, |
678 | { PCI_VDEVICE(VIA, 0x5324), }, | 682 | { PCI_VDEVICE(VIA, 0x5324), }, |
679 | { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE }, | 683 | { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE }, |
684 | { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE }, | ||
680 | 685 | ||
681 | { }, | 686 | { }, |
682 | }; | 687 | }; |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index d477f4dc5e51..941fcb87e52a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -439,8 +439,23 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
439 | if (dev->bus && dev->bus->pm) { | 439 | if (dev->bus && dev->bus->pm) { |
440 | pm_dev_dbg(dev, state, "EARLY "); | 440 | pm_dev_dbg(dev, state, "EARLY "); |
441 | error = pm_noirq_op(dev, dev->bus->pm, state); | 441 | error = pm_noirq_op(dev, dev->bus->pm, state); |
442 | if (error) | ||
443 | goto End; | ||
442 | } | 444 | } |
443 | 445 | ||
446 | if (dev->type && dev->type->pm) { | ||
447 | pm_dev_dbg(dev, state, "EARLY type "); | ||
448 | error = pm_noirq_op(dev, dev->type->pm, state); | ||
449 | if (error) | ||
450 | goto End; | ||
451 | } | ||
452 | |||
453 | if (dev->class && dev->class->pm) { | ||
454 | pm_dev_dbg(dev, state, "EARLY class "); | ||
455 | error = pm_noirq_op(dev, dev->class->pm, state); | ||
456 | } | ||
457 | |||
458 | End: | ||
444 | TRACE_RESUME(error); | 459 | TRACE_RESUME(error); |
445 | return error; | 460 | return error; |
446 | } | 461 | } |
@@ -735,10 +750,26 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
735 | { | 750 | { |
736 | int error = 0; | 751 | int error = 0; |
737 | 752 | ||
753 | if (dev->class && dev->class->pm) { | ||
754 | pm_dev_dbg(dev, state, "LATE class "); | ||
755 | error = pm_noirq_op(dev, dev->class->pm, state); | ||
756 | if (error) | ||
757 | goto End; | ||
758 | } | ||
759 | |||
760 | if (dev->type && dev->type->pm) { | ||
761 | pm_dev_dbg(dev, state, "LATE type "); | ||
762 | error = pm_noirq_op(dev, dev->type->pm, state); | ||
763 | if (error) | ||
764 | goto End; | ||
765 | } | ||
766 | |||
738 | if (dev->bus && dev->bus->pm) { | 767 | if (dev->bus && dev->bus->pm) { |
739 | pm_dev_dbg(dev, state, "LATE "); | 768 | pm_dev_dbg(dev, state, "LATE "); |
740 | error = pm_noirq_op(dev, dev->bus->pm, state); | 769 | error = pm_noirq_op(dev, dev->bus->pm, state); |
741 | } | 770 | } |
771 | |||
772 | End: | ||
742 | return error; | 773 | return error; |
743 | } | 774 | } |
744 | 775 | ||
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index a3e10dc7cc25..b78d5c381efe 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -97,6 +97,9 @@ EXPORT_SYMBOL(intel_agp_enabled); | |||
97 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | 97 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ |
98 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | 98 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) |
99 | 99 | ||
100 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | ||
101 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
102 | |||
100 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | 103 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ |
101 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | 104 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ |
102 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | 105 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ |
@@ -107,8 +110,7 @@ EXPORT_SYMBOL(intel_agp_enabled); | |||
107 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ | 110 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ |
108 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | 111 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ |
109 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | 112 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ |
110 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | 113 | IS_SNB) |
111 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
112 | 114 | ||
113 | extern int agp_memory_reserved; | 115 | extern int agp_memory_reserved; |
114 | 116 | ||
@@ -175,6 +177,10 @@ extern int agp_memory_reserved; | |||
175 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | 177 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) |
176 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | 178 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) |
177 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | 179 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) |
180 | #define SNB_GTT_SIZE_0M (0 << 8) | ||
181 | #define SNB_GTT_SIZE_1M (1 << 8) | ||
182 | #define SNB_GTT_SIZE_2M (2 << 8) | ||
183 | #define SNB_GTT_SIZE_MASK (3 << 8) | ||
178 | 184 | ||
179 | static const struct aper_size_info_fixed intel_i810_sizes[] = | 185 | static const struct aper_size_info_fixed intel_i810_sizes[] = |
180 | { | 186 | { |
@@ -1200,6 +1206,9 @@ static void intel_i9xx_setup_flush(void) | |||
1200 | if (intel_private.ifp_resource.start) | 1206 | if (intel_private.ifp_resource.start) |
1201 | return; | 1207 | return; |
1202 | 1208 | ||
1209 | if (IS_SNB) | ||
1210 | return; | ||
1211 | |||
1203 | /* setup a resource for this object */ | 1212 | /* setup a resource for this object */ |
1204 | intel_private.ifp_resource.name = "Intel Flush Page"; | 1213 | intel_private.ifp_resource.name = "Intel Flush Page"; |
1205 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | 1214 | intel_private.ifp_resource.flags = IORESOURCE_MEM; |
@@ -1438,6 +1447,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | |||
1438 | 1447 | ||
1439 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | 1448 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) |
1440 | { | 1449 | { |
1450 | u16 snb_gmch_ctl; | ||
1451 | |||
1441 | switch (agp_bridge->dev->device) { | 1452 | switch (agp_bridge->dev->device) { |
1442 | case PCI_DEVICE_ID_INTEL_GM45_HB: | 1453 | case PCI_DEVICE_ID_INTEL_GM45_HB: |
1443 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: | 1454 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: |
@@ -1449,9 +1460,26 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | |||
1449 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: | 1460 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: |
1450 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: | 1461 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: |
1451 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: | 1462 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: |
1463 | *gtt_offset = *gtt_size = MB(2); | ||
1464 | break; | ||
1452 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | 1465 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: |
1453 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | 1466 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: |
1454 | *gtt_offset = *gtt_size = MB(2); | 1467 | *gtt_offset = MB(2); |
1468 | |||
1469 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
1470 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { | ||
1471 | default: | ||
1472 | case SNB_GTT_SIZE_0M: | ||
1473 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); | ||
1474 | *gtt_size = MB(0); | ||
1475 | break; | ||
1476 | case SNB_GTT_SIZE_1M: | ||
1477 | *gtt_size = MB(1); | ||
1478 | break; | ||
1479 | case SNB_GTT_SIZE_2M: | ||
1480 | *gtt_size = MB(2); | ||
1481 | break; | ||
1482 | } | ||
1455 | break; | 1483 | break; |
1456 | default: | 1484 | default: |
1457 | *gtt_offset = *gtt_size = KB(512); | 1485 | *gtt_offset = *gtt_size = KB(512); |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index f404ccfc9c20..44288ce0cb45 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -681,6 +681,10 @@ static void resize_console(struct port *port) | |||
681 | struct virtio_device *vdev; | 681 | struct virtio_device *vdev; |
682 | struct winsize ws; | 682 | struct winsize ws; |
683 | 683 | ||
684 | /* The port could have been hot-unplugged */ | ||
685 | if (!port) | ||
686 | return; | ||
687 | |||
684 | vdev = port->portdev->vdev; | 688 | vdev = port->portdev->vdev; |
685 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { | 689 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { |
686 | vdev->config->get(vdev, | 690 | vdev->config->get(vdev, |
@@ -947,11 +951,18 @@ static void handle_control_message(struct ports_device *portdev, | |||
947 | */ | 951 | */ |
948 | err = sysfs_create_group(&port->dev->kobj, | 952 | err = sysfs_create_group(&port->dev->kobj, |
949 | &port_attribute_group); | 953 | &port_attribute_group); |
950 | if (err) | 954 | if (err) { |
951 | dev_err(port->dev, | 955 | dev_err(port->dev, |
952 | "Error %d creating sysfs device attributes\n", | 956 | "Error %d creating sysfs device attributes\n", |
953 | err); | 957 | err); |
954 | 958 | } else { | |
959 | /* | ||
960 | * Generate a udev event so that appropriate | ||
961 | * symlinks can be created based on udev | ||
962 | * rules. | ||
963 | */ | ||
964 | kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); | ||
965 | } | ||
955 | break; | 966 | break; |
956 | case VIRTIO_CONSOLE_PORT_REMOVE: | 967 | case VIRTIO_CONSOLE_PORT_REMOVE: |
957 | /* | 968 | /* |
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index 8fc91a019620..f5b6d9fe4def 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c | |||
@@ -316,7 +316,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) | |||
316 | if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) | 316 | if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) |
317 | pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); | 317 | pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); |
318 | } else { | 318 | } else { |
319 | pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1)); | 319 | u8 assoc_cpus = regs->nbsh & 0xf; |
320 | |||
321 | if (assoc_cpus > 0) | ||
322 | pr_cont(", core: %d", fls(assoc_cpus) - 1); | ||
323 | |||
324 | pr_cont("\n"); | ||
320 | } | 325 | } |
321 | 326 | ||
322 | pr_emerg("%s.\n", EXT_ERR_MSG(xec)); | 327 | pr_emerg("%s.\n", EXT_ERR_MSG(xec)); |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 5db0518c66da..882472d1e144 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -126,97 +126,74 @@ int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) | |||
126 | } | 126 | } |
127 | EXPORT_SYMBOL(fw_csr_string); | 127 | EXPORT_SYMBOL(fw_csr_string); |
128 | 128 | ||
129 | static bool is_fw_unit(struct device *dev); | 129 | static void get_ids(const u32 *directory, int *id) |
130 | |||
131 | static int match_unit_directory(const u32 *directory, u32 match_flags, | ||
132 | const struct ieee1394_device_id *id) | ||
133 | { | 130 | { |
134 | struct fw_csr_iterator ci; | 131 | struct fw_csr_iterator ci; |
135 | int key, value, match; | 132 | int key, value; |
136 | 133 | ||
137 | match = 0; | ||
138 | fw_csr_iterator_init(&ci, directory); | 134 | fw_csr_iterator_init(&ci, directory); |
139 | while (fw_csr_iterator_next(&ci, &key, &value)) { | 135 | while (fw_csr_iterator_next(&ci, &key, &value)) { |
140 | if (key == CSR_VENDOR && value == id->vendor_id) | 136 | switch (key) { |
141 | match |= IEEE1394_MATCH_VENDOR_ID; | 137 | case CSR_VENDOR: id[0] = value; break; |
142 | if (key == CSR_MODEL && value == id->model_id) | 138 | case CSR_MODEL: id[1] = value; break; |
143 | match |= IEEE1394_MATCH_MODEL_ID; | 139 | case CSR_SPECIFIER_ID: id[2] = value; break; |
144 | if (key == CSR_SPECIFIER_ID && value == id->specifier_id) | 140 | case CSR_VERSION: id[3] = value; break; |
145 | match |= IEEE1394_MATCH_SPECIFIER_ID; | 141 | } |
146 | if (key == CSR_VERSION && value == id->version) | ||
147 | match |= IEEE1394_MATCH_VERSION; | ||
148 | } | 142 | } |
143 | } | ||
144 | |||
145 | static void get_modalias_ids(struct fw_unit *unit, int *id) | ||
146 | { | ||
147 | get_ids(&fw_parent_device(unit)->config_rom[5], id); | ||
148 | get_ids(unit->directory, id); | ||
149 | } | ||
150 | |||
151 | static bool match_ids(const struct ieee1394_device_id *id_table, int *id) | ||
152 | { | ||
153 | int match = 0; | ||
154 | |||
155 | if (id[0] == id_table->vendor_id) | ||
156 | match |= IEEE1394_MATCH_VENDOR_ID; | ||
157 | if (id[1] == id_table->model_id) | ||
158 | match |= IEEE1394_MATCH_MODEL_ID; | ||
159 | if (id[2] == id_table->specifier_id) | ||
160 | match |= IEEE1394_MATCH_SPECIFIER_ID; | ||
161 | if (id[3] == id_table->version) | ||
162 | match |= IEEE1394_MATCH_VERSION; | ||
149 | 163 | ||
150 | return (match & match_flags) == match_flags; | 164 | return (match & id_table->match_flags) == id_table->match_flags; |
151 | } | 165 | } |
152 | 166 | ||
167 | static bool is_fw_unit(struct device *dev); | ||
168 | |||
153 | static int fw_unit_match(struct device *dev, struct device_driver *drv) | 169 | static int fw_unit_match(struct device *dev, struct device_driver *drv) |
154 | { | 170 | { |
155 | struct fw_unit *unit = fw_unit(dev); | 171 | const struct ieee1394_device_id *id_table = |
156 | struct fw_device *device; | 172 | container_of(drv, struct fw_driver, driver)->id_table; |
157 | const struct ieee1394_device_id *id; | 173 | int id[] = {0, 0, 0, 0}; |
158 | 174 | ||
159 | /* We only allow binding to fw_units. */ | 175 | /* We only allow binding to fw_units. */ |
160 | if (!is_fw_unit(dev)) | 176 | if (!is_fw_unit(dev)) |
161 | return 0; | 177 | return 0; |
162 | 178 | ||
163 | device = fw_parent_device(unit); | 179 | get_modalias_ids(fw_unit(dev), id); |
164 | id = container_of(drv, struct fw_driver, driver)->id_table; | ||
165 | 180 | ||
166 | for (; id->match_flags != 0; id++) { | 181 | for (; id_table->match_flags != 0; id_table++) |
167 | if (match_unit_directory(unit->directory, id->match_flags, id)) | 182 | if (match_ids(id_table, id)) |
168 | return 1; | 183 | return 1; |
169 | 184 | ||
170 | /* Also check vendor ID in the root directory. */ | ||
171 | if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) && | ||
172 | match_unit_directory(&device->config_rom[5], | ||
173 | IEEE1394_MATCH_VENDOR_ID, id) && | ||
174 | match_unit_directory(unit->directory, id->match_flags | ||
175 | & ~IEEE1394_MATCH_VENDOR_ID, id)) | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | return 0; | 185 | return 0; |
180 | } | 186 | } |
181 | 187 | ||
182 | static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) | 188 | static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) |
183 | { | 189 | { |
184 | struct fw_device *device = fw_parent_device(unit); | 190 | int id[] = {0, 0, 0, 0}; |
185 | struct fw_csr_iterator ci; | ||
186 | 191 | ||
187 | int key, value; | 192 | get_modalias_ids(unit, id); |
188 | int vendor = 0; | ||
189 | int model = 0; | ||
190 | int specifier_id = 0; | ||
191 | int version = 0; | ||
192 | |||
193 | fw_csr_iterator_init(&ci, &device->config_rom[5]); | ||
194 | while (fw_csr_iterator_next(&ci, &key, &value)) { | ||
195 | switch (key) { | ||
196 | case CSR_VENDOR: | ||
197 | vendor = value; | ||
198 | break; | ||
199 | case CSR_MODEL: | ||
200 | model = value; | ||
201 | break; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | fw_csr_iterator_init(&ci, unit->directory); | ||
206 | while (fw_csr_iterator_next(&ci, &key, &value)) { | ||
207 | switch (key) { | ||
208 | case CSR_SPECIFIER_ID: | ||
209 | specifier_id = value; | ||
210 | break; | ||
211 | case CSR_VERSION: | ||
212 | version = value; | ||
213 | break; | ||
214 | } | ||
215 | } | ||
216 | 193 | ||
217 | return snprintf(buffer, buffer_size, | 194 | return snprintf(buffer, buffer_size, |
218 | "ieee1394:ven%08Xmo%08Xsp%08Xver%08X", | 195 | "ieee1394:ven%08Xmo%08Xsp%08Xver%08X", |
219 | vendor, model, specifier_id, version); | 196 | id[0], id[1], id[2], id[3]); |
220 | } | 197 | } |
221 | 198 | ||
222 | static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) | 199 | static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 1c0b504a42f3..99c20f1b613a 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -331,8 +331,9 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
331 | if (ret < 0) | 331 | if (ret < 0) |
332 | *bandwidth = 0; | 332 | *bandwidth = 0; |
333 | 333 | ||
334 | if (allocate && ret < 0 && c >= 0) { | 334 | if (allocate && ret < 0) { |
335 | deallocate_channel(card, irm_id, generation, c, buffer); | 335 | if (c >= 0) |
336 | deallocate_channel(card, irm_id, generation, c, buffer); | ||
336 | *channel = ret; | 337 | *channel = ret; |
337 | } | 338 | } |
338 | } | 339 | } |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 75dc6988cffd..e33917bf97d2 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -231,6 +231,8 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) | |||
231 | 231 | ||
232 | static char ohci_driver_name[] = KBUILD_MODNAME; | 232 | static char ohci_driver_name[] = KBUILD_MODNAME; |
233 | 233 | ||
234 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 | ||
235 | |||
234 | #define QUIRK_CYCLE_TIMER 1 | 236 | #define QUIRK_CYCLE_TIMER 1 |
235 | #define QUIRK_RESET_PACKET 2 | 237 | #define QUIRK_RESET_PACKET 2 |
236 | #define QUIRK_BE_HEADERS 4 | 238 | #define QUIRK_BE_HEADERS 4 |
@@ -239,6 +241,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME; | |||
239 | static const struct { | 241 | static const struct { |
240 | unsigned short vendor, device, flags; | 242 | unsigned short vendor, device, flags; |
241 | } ohci_quirks[] = { | 243 | } ohci_quirks[] = { |
244 | {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | | ||
245 | QUIRK_RESET_PACKET}, | ||
242 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, | 246 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, |
243 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 247 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
244 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 248 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
diff --git a/drivers/gpio/max730x.c b/drivers/gpio/max730x.c index c9bced55f82b..4a7d662ff9b7 100644 --- a/drivers/gpio/max730x.c +++ b/drivers/gpio/max730x.c | |||
@@ -242,3 +242,7 @@ int __devexit __max730x_remove(struct device *dev) | |||
242 | return ret; | 242 | return ret; |
243 | } | 243 | } |
244 | EXPORT_SYMBOL_GPL(__max730x_remove); | 244 | EXPORT_SYMBOL_GPL(__max730x_remove); |
245 | |||
246 | MODULE_AUTHOR("Juergen Beisert, Wolfram Sang"); | ||
247 | MODULE_LICENSE("GPL v2"); | ||
248 | MODULE_DESCRIPTION("MAX730x GPIO-Expanders, generic parts"); | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f2aaf39be398..51103aa469f8 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
104 | if (connector->status == connector_status_disconnected) { | 104 | if (connector->status == connector_status_disconnected) { |
105 | DRM_DEBUG_KMS("%s is disconnected\n", | 105 | DRM_DEBUG_KMS("%s is disconnected\n", |
106 | drm_get_connector_name(connector)); | 106 | drm_get_connector_name(connector)); |
107 | drm_mode_connector_update_edid_property(connector, NULL); | ||
107 | goto prune; | 108 | goto prune; |
108 | } | 109 | } |
109 | 110 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f97e7c42ac8e..7e608f4a0df9 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -707,15 +707,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
707 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; | 707 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; |
708 | mode->vtotal = mode->vdisplay + vblank; | 708 | mode->vtotal = mode->vdisplay + vblank; |
709 | 709 | ||
710 | /* perform the basic check for the detailed timing */ | ||
711 | if (mode->hsync_end > mode->htotal || | ||
712 | mode->vsync_end > mode->vtotal) { | ||
713 | drm_mode_destroy(dev, mode); | ||
714 | DRM_DEBUG_KMS("Incorrect detailed timing. " | ||
715 | "Sync is beyond the blank.\n"); | ||
716 | return NULL; | ||
717 | } | ||
718 | |||
719 | /* Some EDIDs have bogus h/vtotal values */ | 710 | /* Some EDIDs have bogus h/vtotal values */ |
720 | if (mode->hsync_end > mode->htotal) | 711 | if (mode->hsync_end > mode->htotal) |
721 | mode->htotal = mode->hsync_end + 1; | 712 | mode->htotal = mode->hsync_end + 1; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 50549703584f..99487237111d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -283,6 +283,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { | |||
283 | .help_msg = "force-fb(V)", | 283 | .help_msg = "force-fb(V)", |
284 | .action_msg = "Restore framebuffer console", | 284 | .action_msg = "Restore framebuffer console", |
285 | }; | 285 | }; |
286 | #else | ||
287 | static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; | ||
286 | #endif | 288 | #endif |
287 | 289 | ||
288 | static void drm_fb_helper_on(struct fb_info *info) | 290 | static void drm_fb_helper_on(struct fb_info *info) |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 08d14df3bb42..4804872f8b19 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp) | |||
140 | spin_unlock(&dev->count_lock); | 140 | spin_unlock(&dev->count_lock); |
141 | } | 141 | } |
142 | out: | 142 | out: |
143 | mutex_lock(&dev->struct_mutex); | 143 | if (!retcode) { |
144 | if (minor->type == DRM_MINOR_LEGACY) { | 144 | mutex_lock(&dev->struct_mutex); |
145 | BUG_ON((dev->dev_mapping != NULL) && | 145 | if (minor->type == DRM_MINOR_LEGACY) { |
146 | (dev->dev_mapping != inode->i_mapping)); | 146 | if (dev->dev_mapping == NULL) |
147 | if (dev->dev_mapping == NULL) | 147 | dev->dev_mapping = inode->i_mapping; |
148 | dev->dev_mapping = inode->i_mapping; | 148 | else if (dev->dev_mapping != inode->i_mapping) |
149 | retcode = -ENODEV; | ||
150 | } | ||
151 | mutex_unlock(&dev->struct_mutex); | ||
149 | } | 152 | } |
150 | mutex_unlock(&dev->struct_mutex); | ||
151 | 153 | ||
152 | return retcode; | 154 | return retcode; |
153 | } | 155 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8bfc0bbf13e6..a9f8589490cf 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1881,29 +1881,29 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1881 | DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), | 1881 | DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), |
1882 | DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | 1882 | DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
1883 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1883 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1884 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1884 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1885 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | 1885 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
1886 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), | 1886 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), |
1887 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1887 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1888 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1888 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1889 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | 1889 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), |
1890 | DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), | 1890 | DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), |
1891 | DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1891 | DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1892 | DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1892 | DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1893 | DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), | 1893 | DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), |
1894 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), | 1894 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), |
1895 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), | 1895 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), |
1896 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), | 1896 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), |
1897 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), | 1897 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), |
1898 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), | 1898 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), |
1899 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), | 1899 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), |
1900 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), | 1900 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), |
1901 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), | 1901 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), |
1902 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), | 1902 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), |
1903 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | 1903 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
1904 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), | 1904 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), |
1905 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), | 1905 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1906 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), | 1906 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1907 | }; | 1907 | }; |
1908 | 1908 | ||
1909 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 1909 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1b2e95455c05..4b26919abdb2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -139,12 +139,12 @@ const static struct intel_device_info intel_ironlake_m_info = { | |||
139 | 139 | ||
140 | const static struct intel_device_info intel_sandybridge_d_info = { | 140 | const static struct intel_device_info intel_sandybridge_d_info = { |
141 | .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 141 | .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
142 | .has_hotplug = 1, | 142 | .has_hotplug = 1, .is_gen6 = 1, |
143 | }; | 143 | }; |
144 | 144 | ||
145 | const static struct intel_device_info intel_sandybridge_m_info = { | 145 | const static struct intel_device_info intel_sandybridge_m_info = { |
146 | .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 146 | .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
147 | .has_hotplug = 1, | 147 | .has_hotplug = 1, .is_gen6 = 1, |
148 | }; | 148 | }; |
149 | 149 | ||
150 | const static struct pci_device_id pciidlist[] = { | 150 | const static struct pci_device_id pciidlist[] = { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 979439cfb827..aba8260fbc5e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -205,6 +205,7 @@ struct intel_device_info { | |||
205 | u8 is_g4x : 1; | 205 | u8 is_g4x : 1; |
206 | u8 is_pineview : 1; | 206 | u8 is_pineview : 1; |
207 | u8 is_ironlake : 1; | 207 | u8 is_ironlake : 1; |
208 | u8 is_gen6 : 1; | ||
208 | u8 has_fbc : 1; | 209 | u8 has_fbc : 1; |
209 | u8 has_rc6 : 1; | 210 | u8 has_rc6 : 1; |
210 | u8 has_pipe_cxsr : 1; | 211 | u8 has_pipe_cxsr : 1; |
@@ -1084,6 +1085,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1084 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1085 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1085 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) | 1086 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) |
1086 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | 1087 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) |
1088 | #define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6) | ||
1087 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1089 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1088 | 1090 | ||
1089 | #define IS_GEN3(dev) (IS_I915G(dev) || \ | 1091 | #define IS_GEN3(dev) (IS_I915G(dev) || \ |
@@ -1107,8 +1109,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1107 | 1109 | ||
1108 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1110 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
1109 | 1111 | ||
1110 | #define IS_GEN6(dev) ((dev)->pci_device == 0x0102) | ||
1111 | |||
1112 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1112 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
1113 | * rows, which changed the alignment requirements and fence programming. | 1113 | * rows, which changed the alignment requirements and fence programming. |
1114 | */ | 1114 | */ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fba37e9f775d..933e865a8929 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1466,9 +1466,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1466 | obj_priv->dirty = 0; | 1466 | obj_priv->dirty = 0; |
1467 | 1467 | ||
1468 | for (i = 0; i < page_count; i++) { | 1468 | for (i = 0; i < page_count; i++) { |
1469 | if (obj_priv->pages[i] == NULL) | ||
1470 | break; | ||
1471 | |||
1472 | if (obj_priv->dirty) | 1469 | if (obj_priv->dirty) |
1473 | set_page_dirty(obj_priv->pages[i]); | 1470 | set_page_dirty(obj_priv->pages[i]); |
1474 | 1471 | ||
@@ -2227,11 +2224,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2227 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2224 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2228 | if (seqno == 0) | 2225 | if (seqno == 0) |
2229 | return -ENOMEM; | 2226 | return -ENOMEM; |
2230 | |||
2231 | ret = i915_wait_request(dev, seqno); | ||
2232 | if (ret) | ||
2233 | return ret; | ||
2234 | |||
2235 | continue; | 2227 | continue; |
2236 | } | 2228 | } |
2237 | } | 2229 | } |
@@ -2256,7 +2248,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
2256 | struct address_space *mapping; | 2248 | struct address_space *mapping; |
2257 | struct inode *inode; | 2249 | struct inode *inode; |
2258 | struct page *page; | 2250 | struct page *page; |
2259 | int ret; | ||
2260 | 2251 | ||
2261 | if (obj_priv->pages_refcount++ != 0) | 2252 | if (obj_priv->pages_refcount++ != 0) |
2262 | return 0; | 2253 | return 0; |
@@ -2279,11 +2270,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
2279 | mapping_gfp_mask (mapping) | | 2270 | mapping_gfp_mask (mapping) | |
2280 | __GFP_COLD | | 2271 | __GFP_COLD | |
2281 | gfpmask); | 2272 | gfpmask); |
2282 | if (IS_ERR(page)) { | 2273 | if (IS_ERR(page)) |
2283 | ret = PTR_ERR(page); | 2274 | goto err_pages; |
2284 | i915_gem_object_put_pages(obj); | 2275 | |
2285 | return ret; | ||
2286 | } | ||
2287 | obj_priv->pages[i] = page; | 2276 | obj_priv->pages[i] = page; |
2288 | } | 2277 | } |
2289 | 2278 | ||
@@ -2291,6 +2280,15 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
2291 | i915_gem_object_do_bit_17_swizzle(obj); | 2280 | i915_gem_object_do_bit_17_swizzle(obj); |
2292 | 2281 | ||
2293 | return 0; | 2282 | return 0; |
2283 | |||
2284 | err_pages: | ||
2285 | while (i--) | ||
2286 | page_cache_release(obj_priv->pages[i]); | ||
2287 | |||
2288 | drm_free_large(obj_priv->pages); | ||
2289 | obj_priv->pages = NULL; | ||
2290 | obj_priv->pages_refcount--; | ||
2291 | return PTR_ERR(page); | ||
2294 | } | 2292 | } |
2295 | 2293 | ||
2296 | static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | 2294 | static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) |
@@ -4730,6 +4728,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4730 | ring->space += ring->Size; | 4728 | ring->space += ring->Size; |
4731 | } | 4729 | } |
4732 | 4730 | ||
4731 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | ||
4732 | I915_WRITE(MI_MODE, | ||
4733 | (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); | ||
4734 | } | ||
4735 | |||
4733 | return 0; | 4736 | return 0; |
4734 | } | 4737 | } |
4735 | 4738 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b5c55d88ff76..c01c878e51ba 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -325,9 +325,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
325 | * need to ensure that any fence register is cleared. | 325 | * need to ensure that any fence register is cleared. |
326 | */ | 326 | */ |
327 | if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) | 327 | if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) |
328 | ret = i915_gem_object_unbind(obj); | 328 | ret = i915_gem_object_unbind(obj); |
329 | else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
330 | ret = i915_gem_object_put_fence_reg(obj); | ||
329 | else | 331 | else |
330 | ret = i915_gem_object_put_fence_reg(obj); | 332 | i915_gem_release_mmap(obj); |
333 | |||
331 | if (ret != 0) { | 334 | if (ret != 0) { |
332 | WARN(ret != -ERESTARTSYS, | 335 | WARN(ret != -ERESTARTSYS, |
333 | "failed to reset object for tiling switch"); | 336 | "failed to reset object for tiling switch"); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3d59862c7ccd..cbbf59f56dfa 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -298,6 +298,10 @@ | |||
298 | #define INSTDONE 0x02090 | 298 | #define INSTDONE 0x02090 |
299 | #define NOPID 0x02094 | 299 | #define NOPID 0x02094 |
300 | #define HWSTAM 0x02098 | 300 | #define HWSTAM 0x02098 |
301 | |||
302 | #define MI_MODE 0x0209c | ||
303 | # define VS_TIMER_DISPATCH (1 << 6) | ||
304 | |||
301 | #define SCPD0 0x0209c /* 915+ only */ | 305 | #define SCPD0 0x0209c /* 915+ only */ |
302 | #define IER 0x020a0 | 306 | #define IER 0x020a0 |
303 | #define IIR 0x020a4 | 307 | #define IIR 0x020a4 |
@@ -366,7 +370,7 @@ | |||
366 | #define FBC_CTL_PERIODIC (1<<30) | 370 | #define FBC_CTL_PERIODIC (1<<30) |
367 | #define FBC_CTL_INTERVAL_SHIFT (16) | 371 | #define FBC_CTL_INTERVAL_SHIFT (16) |
368 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) | 372 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) |
369 | #define FBC_C3_IDLE (1<<13) | 373 | #define FBC_CTL_C3_IDLE (1<<13) |
370 | #define FBC_CTL_STRIDE_SHIFT (5) | 374 | #define FBC_CTL_STRIDE_SHIFT (5) |
371 | #define FBC_CTL_FENCENO (1<<0) | 375 | #define FBC_CTL_FENCENO (1<<0) |
372 | #define FBC_COMMAND 0x0320c | 376 | #define FBC_COMMAND 0x0320c |
@@ -2172,6 +2176,14 @@ | |||
2172 | #define DISPLAY_PORT_PLL_BIOS_1 0x46010 | 2176 | #define DISPLAY_PORT_PLL_BIOS_1 0x46010 |
2173 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 | 2177 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 |
2174 | 2178 | ||
2179 | #define PCH_DSPCLK_GATE_D 0x42020 | ||
2180 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) | ||
2181 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) | ||
2182 | |||
2183 | #define PCH_3DCGDIS0 0x46020 | ||
2184 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) | ||
2185 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) | ||
2186 | |||
2175 | #define FDI_PLL_FREQ_CTL 0x46030 | 2187 | #define FDI_PLL_FREQ_CTL 0x46030 |
2176 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) | 2188 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) |
2177 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 | 2189 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 70c9d4ba7042..f9ba452f0cbf 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -417,8 +417,9 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
417 | edp = find_section(bdb, BDB_EDP); | 417 | edp = find_section(bdb, BDB_EDP); |
418 | if (!edp) { | 418 | if (!edp) { |
419 | if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { | 419 | if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { |
420 | DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\ | 420 | DRM_DEBUG_KMS("No eDP BDB found but eDP panel " |
421 | assume 18bpp panel color depth.\n"); | 421 | "supported, assume 18bpp panel color " |
422 | "depth.\n"); | ||
422 | dev_priv->edp_bpp = 18; | 423 | dev_priv->edp_bpp = 18; |
423 | } | 424 | } |
424 | return; | 425 | return; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9cd6de5f9906..58fc7fa0eb1d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1032,7 +1032,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1032 | /* enable it... */ | 1032 | /* enable it... */ |
1033 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | 1033 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; |
1034 | if (IS_I945GM(dev)) | 1034 | if (IS_I945GM(dev)) |
1035 | fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ | 1035 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1036 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1036 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1037 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1037 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1038 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1038 | if (obj_priv->tiling_mode != I915_TILING_NONE) |
@@ -4717,6 +4717,20 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4717 | * specs, but enable as much else as we can. | 4717 | * specs, but enable as much else as we can. |
4718 | */ | 4718 | */ |
4719 | if (HAS_PCH_SPLIT(dev)) { | 4719 | if (HAS_PCH_SPLIT(dev)) { |
4720 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
4721 | |||
4722 | if (IS_IRONLAKE(dev)) { | ||
4723 | /* Required for FBC */ | ||
4724 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | ||
4725 | /* Required for CxSR */ | ||
4726 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | ||
4727 | |||
4728 | I915_WRITE(PCH_3DCGDIS0, | ||
4729 | MARIUNIT_CLOCK_GATE_DISABLE | | ||
4730 | SVSMUNIT_CLOCK_GATE_DISABLE); | ||
4731 | } | ||
4732 | |||
4733 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
4720 | return; | 4734 | return; |
4721 | } else if (IS_G4X(dev)) { | 4735 | } else if (IS_G4X(dev)) { |
4722 | uint32_t dspclk_gate; | 4736 | uint32_t dspclk_gate; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 14e516fdc2dd..2b3fa7a3c028 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -607,53 +607,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
607 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); | 607 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); |
608 | } | 608 | } |
609 | 609 | ||
610 | /* Some lid devices report incorrect lid status, assume they're connected */ | ||
611 | static const struct dmi_system_id bad_lid_status[] = { | ||
612 | { | ||
613 | .ident = "Compaq nx9020", | ||
614 | .matches = { | ||
615 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
616 | DMI_MATCH(DMI_BOARD_NAME, "3084"), | ||
617 | }, | ||
618 | }, | ||
619 | { | ||
620 | .ident = "Samsung SX20S", | ||
621 | .matches = { | ||
622 | DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), | ||
623 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), | ||
624 | }, | ||
625 | }, | ||
626 | { | ||
627 | .ident = "Aspire One", | ||
628 | .matches = { | ||
629 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
630 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | ||
631 | }, | ||
632 | }, | ||
633 | { | ||
634 | .ident = "Aspire 1810T", | ||
635 | .matches = { | ||
636 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
637 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), | ||
638 | }, | ||
639 | }, | ||
640 | { | ||
641 | .ident = "PC-81005", | ||
642 | .matches = { | ||
643 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | ||
644 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | ||
645 | }, | ||
646 | }, | ||
647 | { | ||
648 | .ident = "Clevo M5x0N", | ||
649 | .matches = { | ||
650 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), | ||
651 | DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), | ||
652 | }, | ||
653 | }, | ||
654 | { } | ||
655 | }; | ||
656 | |||
657 | /** | 610 | /** |
658 | * Detect the LVDS connection. | 611 | * Detect the LVDS connection. |
659 | * | 612 | * |
@@ -669,12 +622,9 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
669 | /* ACPI lid methods were generally unreliable in this generation, so | 622 | /* ACPI lid methods were generally unreliable in this generation, so |
670 | * don't even bother. | 623 | * don't even bother. |
671 | */ | 624 | */ |
672 | if (IS_GEN2(dev)) | 625 | if (IS_GEN2(dev) || IS_GEN3(dev)) |
673 | return connector_status_connected; | 626 | return connector_status_connected; |
674 | 627 | ||
675 | if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) | ||
676 | status = connector_status_disconnected; | ||
677 | |||
678 | return status; | 628 | return status; |
679 | } | 629 | } |
680 | 630 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d355d1d527e7..60595fc26fdd 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -1068,14 +1068,18 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1068 | 1068 | ||
1069 | drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, | 1069 | drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, |
1070 | DRM_MODE_OBJECT_CRTC); | 1070 | DRM_MODE_OBJECT_CRTC); |
1071 | if (!drmmode_obj) | 1071 | if (!drmmode_obj) { |
1072 | return -ENOENT; | 1072 | ret = -ENOENT; |
1073 | goto out_free; | ||
1074 | } | ||
1073 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); | 1075 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
1074 | 1076 | ||
1075 | new_bo = drm_gem_object_lookup(dev, file_priv, | 1077 | new_bo = drm_gem_object_lookup(dev, file_priv, |
1076 | put_image_rec->bo_handle); | 1078 | put_image_rec->bo_handle); |
1077 | if (!new_bo) | 1079 | if (!new_bo) { |
1078 | return -ENOENT; | 1080 | ret = -ENOENT; |
1081 | goto out_free; | ||
1082 | } | ||
1079 | 1083 | ||
1080 | mutex_lock(&dev->mode_config.mutex); | 1084 | mutex_lock(&dev->mode_config.mutex); |
1081 | mutex_lock(&dev->struct_mutex); | 1085 | mutex_lock(&dev->struct_mutex); |
@@ -1165,6 +1169,7 @@ out_unlock: | |||
1165 | mutex_unlock(&dev->struct_mutex); | 1169 | mutex_unlock(&dev->struct_mutex); |
1166 | mutex_unlock(&dev->mode_config.mutex); | 1170 | mutex_unlock(&dev->mode_config.mutex); |
1167 | drm_gem_object_unreference_unlocked(new_bo); | 1171 | drm_gem_object_unreference_unlocked(new_bo); |
1172 | out_free: | ||
1168 | kfree(params); | 1173 | kfree(params); |
1169 | 1174 | ||
1170 | return ret; | 1175 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 32db806f3b5a..7f0d807a0d0d 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
12 | nouveau_dp.o nouveau_grctx.o \ | 12 | nouveau_dp.o nouveau_grctx.o \ |
13 | nv04_timer.o \ | 13 | nv04_timer.o \ |
14 | nv04_mc.o nv40_mc.o nv50_mc.o \ | 14 | nv04_mc.o nv40_mc.o nv50_mc.o \ |
15 | nv04_fb.o nv10_fb.o nv40_fb.o \ | 15 | nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \ |
16 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ | 16 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ |
17 | nv04_graph.o nv10_graph.o nv20_graph.o \ | 17 | nv04_graph.o nv10_graph.o nv20_graph.o \ |
18 | nv40_graph.o nv50_graph.o \ | 18 | nv40_graph.o nv50_graph.o \ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 75bceee76044..b5a9336a2e88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -5211,6 +5211,21 @@ divine_connector_type(struct nvbios *bios, int index) | |||
5211 | } | 5211 | } |
5212 | 5212 | ||
5213 | static void | 5213 | static void |
5214 | apply_dcb_connector_quirks(struct nvbios *bios, int idx) | ||
5215 | { | ||
5216 | struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx]; | ||
5217 | struct drm_device *dev = bios->dev; | ||
5218 | |||
5219 | /* Gigabyte NX85T */ | ||
5220 | if ((dev->pdev->device == 0x0421) && | ||
5221 | (dev->pdev->subsystem_vendor == 0x1458) && | ||
5222 | (dev->pdev->subsystem_device == 0x344c)) { | ||
5223 | if (cte->type == DCB_CONNECTOR_HDMI_1) | ||
5224 | cte->type = DCB_CONNECTOR_DVI_I; | ||
5225 | } | ||
5226 | } | ||
5227 | |||
5228 | static void | ||
5214 | parse_dcb_connector_table(struct nvbios *bios) | 5229 | parse_dcb_connector_table(struct nvbios *bios) |
5215 | { | 5230 | { |
5216 | struct drm_device *dev = bios->dev; | 5231 | struct drm_device *dev = bios->dev; |
@@ -5238,13 +5253,14 @@ parse_dcb_connector_table(struct nvbios *bios) | |||
5238 | entry = conntab + conntab[1]; | 5253 | entry = conntab + conntab[1]; |
5239 | cte = &ct->entry[0]; | 5254 | cte = &ct->entry[0]; |
5240 | for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { | 5255 | for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { |
5256 | cte->index = i; | ||
5241 | if (conntab[3] == 2) | 5257 | if (conntab[3] == 2) |
5242 | cte->entry = ROM16(entry[0]); | 5258 | cte->entry = ROM16(entry[0]); |
5243 | else | 5259 | else |
5244 | cte->entry = ROM32(entry[0]); | 5260 | cte->entry = ROM32(entry[0]); |
5245 | 5261 | ||
5246 | cte->type = (cte->entry & 0x000000ff) >> 0; | 5262 | cte->type = (cte->entry & 0x000000ff) >> 0; |
5247 | cte->index = (cte->entry & 0x00000f00) >> 8; | 5263 | cte->index2 = (cte->entry & 0x00000f00) >> 8; |
5248 | switch (cte->entry & 0x00033000) { | 5264 | switch (cte->entry & 0x00033000) { |
5249 | case 0x00001000: | 5265 | case 0x00001000: |
5250 | cte->gpio_tag = 0x07; | 5266 | cte->gpio_tag = 0x07; |
@@ -5266,6 +5282,8 @@ parse_dcb_connector_table(struct nvbios *bios) | |||
5266 | if (cte->type == 0xff) | 5282 | if (cte->type == 0xff) |
5267 | continue; | 5283 | continue; |
5268 | 5284 | ||
5285 | apply_dcb_connector_quirks(bios, i); | ||
5286 | |||
5269 | NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", | 5287 | NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", |
5270 | i, cte->entry, cte->type, cte->index, cte->gpio_tag); | 5288 | i, cte->entry, cte->type, cte->index, cte->gpio_tag); |
5271 | 5289 | ||
@@ -5287,10 +5305,16 @@ parse_dcb_connector_table(struct nvbios *bios) | |||
5287 | break; | 5305 | break; |
5288 | default: | 5306 | default: |
5289 | cte->type = divine_connector_type(bios, cte->index); | 5307 | cte->type = divine_connector_type(bios, cte->index); |
5290 | NV_WARN(dev, "unknown type, using 0x%02x", cte->type); | 5308 | NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type); |
5291 | break; | 5309 | break; |
5292 | } | 5310 | } |
5293 | 5311 | ||
5312 | if (nouveau_override_conntype) { | ||
5313 | int type = divine_connector_type(bios, cte->index); | ||
5314 | if (type != cte->type) | ||
5315 | NV_WARN(dev, " -> type 0x%02x\n", cte->type); | ||
5316 | } | ||
5317 | |||
5294 | } | 5318 | } |
5295 | } | 5319 | } |
5296 | 5320 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 9f688aa9a655..4f88e6924d27 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -72,9 +72,10 @@ enum dcb_connector_type { | |||
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct dcb_connector_table_entry { | 74 | struct dcb_connector_table_entry { |
75 | uint8_t index; | ||
75 | uint32_t entry; | 76 | uint32_t entry; |
76 | enum dcb_connector_type type; | 77 | enum dcb_connector_type type; |
77 | uint8_t index; | 78 | uint8_t index2; |
78 | uint8_t gpio_tag; | 79 | uint8_t gpio_tag; |
79 | }; | 80 | }; |
80 | 81 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 028719fddf76..026612471c92 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -439,8 +439,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |||
439 | 439 | ||
440 | switch (bo->mem.mem_type) { | 440 | switch (bo->mem.mem_type) { |
441 | case TTM_PL_VRAM: | 441 | case TTM_PL_VRAM: |
442 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | | 442 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT); |
443 | TTM_PL_FLAG_SYSTEM); | ||
444 | break; | 443 | break; |
445 | default: | 444 | default: |
446 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); | 445 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 24327f468c4b..14afe1e47e57 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
302 | 302 | ||
303 | detect_analog: | 303 | detect_analog: |
304 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); | 304 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); |
305 | if (!nv_encoder) | 305 | if (!nv_encoder && !nouveau_tv_disable) |
306 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); | 306 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); |
307 | if (nv_encoder) { | 307 | if (nv_encoder) { |
308 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); | 308 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index c8482a108a78..65c441a1999f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, | |||
190 | nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); | 190 | nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); |
191 | 191 | ||
192 | chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; | 192 | chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; |
193 | |||
194 | DRM_MEMORYBARRIER(); | ||
195 | /* Flush writes. */ | ||
196 | nouveau_bo_rd32(pb, 0); | ||
197 | |||
193 | nvchan_wr32(chan, 0x8c, chan->dma.ib_put); | 198 | nvchan_wr32(chan, 0x8c, chan->dma.ib_put); |
194 | chan->dma.ib_free--; | 199 | chan->dma.ib_free--; |
195 | } | 200 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 30cc09e8a709..1de974acbc65 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); | |||
83 | int nouveau_nofbaccel = 0; | 83 | int nouveau_nofbaccel = 0; |
84 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); | 84 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); |
85 | 85 | ||
86 | MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); | ||
87 | int nouveau_override_conntype = 0; | ||
88 | module_param_named(override_conntype, nouveau_override_conntype, int, 0400); | ||
89 | |||
90 | MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n"); | ||
91 | int nouveau_tv_disable = 0; | ||
92 | module_param_named(tv_disable, nouveau_tv_disable, int, 0400); | ||
93 | |||
86 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | 94 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" |
87 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | 95 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" |
88 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | 96 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" |
@@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
154 | if (pm_state.event == PM_EVENT_PRETHAW) | 162 | if (pm_state.event == PM_EVENT_PRETHAW) |
155 | return 0; | 163 | return 0; |
156 | 164 | ||
165 | NV_INFO(dev, "Disabling fbcon acceleration...\n"); | ||
157 | fbdev_flags = dev_priv->fbdev_info->flags; | 166 | fbdev_flags = dev_priv->fbdev_info->flags; |
158 | dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; | 167 | dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; |
159 | 168 | ||
169 | NV_INFO(dev, "Unpinning framebuffer(s)...\n"); | ||
160 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 170 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
161 | struct nouveau_framebuffer *nouveau_fb; | 171 | struct nouveau_framebuffer *nouveau_fb; |
162 | 172 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 4b9aaf2a8d0f..d8b559011777 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -681,6 +681,7 @@ extern int nouveau_uscript_tmds; | |||
681 | extern int nouveau_vram_pushbuf; | 681 | extern int nouveau_vram_pushbuf; |
682 | extern int nouveau_vram_notify; | 682 | extern int nouveau_vram_notify; |
683 | extern int nouveau_fbpercrtc; | 683 | extern int nouveau_fbpercrtc; |
684 | extern int nouveau_tv_disable; | ||
684 | extern char *nouveau_tv_norm; | 685 | extern char *nouveau_tv_norm; |
685 | extern int nouveau_reg_debug; | 686 | extern int nouveau_reg_debug; |
686 | extern char *nouveau_vbios; | 687 | extern char *nouveau_vbios; |
@@ -688,6 +689,7 @@ extern int nouveau_ctxfw; | |||
688 | extern int nouveau_ignorelid; | 689 | extern int nouveau_ignorelid; |
689 | extern int nouveau_nofbaccel; | 690 | extern int nouveau_nofbaccel; |
690 | extern int nouveau_noaccel; | 691 | extern int nouveau_noaccel; |
692 | extern int nouveau_override_conntype; | ||
691 | 693 | ||
692 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); | 694 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); |
693 | extern int nouveau_pci_resume(struct pci_dev *pdev); | 695 | extern int nouveau_pci_resume(struct pci_dev *pdev); |
@@ -926,6 +928,10 @@ extern void nv40_fb_takedown(struct drm_device *); | |||
926 | extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, | 928 | extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, |
927 | uint32_t, uint32_t); | 929 | uint32_t, uint32_t); |
928 | 930 | ||
931 | /* nv50_fb.c */ | ||
932 | extern int nv50_fb_init(struct drm_device *); | ||
933 | extern void nv50_fb_takedown(struct drm_device *); | ||
934 | |||
929 | /* nv04_fifo.c */ | 935 | /* nv04_fifo.c */ |
930 | extern int nv04_fifo_init(struct drm_device *); | 936 | extern int nv04_fifo_init(struct drm_device *); |
931 | extern void nv04_fifo_disable(struct drm_device *); | 937 | extern void nv04_fifo_disable(struct drm_device *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 95220ddebb45..2bd59a92fee5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -311,6 +311,31 @@ nouveau_print_bitfield_names_(uint32_t value, | |||
311 | #define nouveau_print_bitfield_names(val, namelist) \ | 311 | #define nouveau_print_bitfield_names(val, namelist) \ |
312 | nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) | 312 | nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) |
313 | 313 | ||
314 | struct nouveau_enum_names { | ||
315 | uint32_t value; | ||
316 | const char *name; | ||
317 | }; | ||
318 | |||
319 | static void | ||
320 | nouveau_print_enum_names_(uint32_t value, | ||
321 | const struct nouveau_enum_names *namelist, | ||
322 | const int namelist_len) | ||
323 | { | ||
324 | /* | ||
325 | * Caller must have already printed the KERN_* log level for us. | ||
326 | * Also the caller is responsible for adding the newline. | ||
327 | */ | ||
328 | int i; | ||
329 | for (i = 0; i < namelist_len; ++i) { | ||
330 | if (value == namelist[i].value) { | ||
331 | printk("%s", namelist[i].name); | ||
332 | return; | ||
333 | } | ||
334 | } | ||
335 | printk("unknown value 0x%08x", value); | ||
336 | } | ||
337 | #define nouveau_print_enum_names(val, namelist) \ | ||
338 | nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) | ||
314 | 339 | ||
315 | static int | 340 | static int |
316 | nouveau_graph_chid_from_grctx(struct drm_device *dev) | 341 | nouveau_graph_chid_from_grctx(struct drm_device *dev) |
@@ -427,14 +452,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, | |||
427 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 452 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
428 | uint32_t nsource = trap->nsource, nstatus = trap->nstatus; | 453 | uint32_t nsource = trap->nsource, nstatus = trap->nstatus; |
429 | 454 | ||
430 | NV_INFO(dev, "%s - nSource:", id); | 455 | if (dev_priv->card_type < NV_50) { |
431 | nouveau_print_bitfield_names(nsource, nsource_names); | 456 | NV_INFO(dev, "%s - nSource:", id); |
432 | printk(", nStatus:"); | 457 | nouveau_print_bitfield_names(nsource, nsource_names); |
433 | if (dev_priv->card_type < NV_10) | 458 | printk(", nStatus:"); |
434 | nouveau_print_bitfield_names(nstatus, nstatus_names); | 459 | if (dev_priv->card_type < NV_10) |
435 | else | 460 | nouveau_print_bitfield_names(nstatus, nstatus_names); |
436 | nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); | 461 | else |
437 | printk("\n"); | 462 | nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); |
463 | printk("\n"); | ||
464 | } | ||
438 | 465 | ||
439 | NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " | 466 | NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " |
440 | "Data 0x%08x:0x%08x\n", | 467 | "Data 0x%08x:0x%08x\n", |
@@ -578,27 +605,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) | |||
578 | } | 605 | } |
579 | 606 | ||
580 | static void | 607 | static void |
608 | nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) | ||
609 | { | ||
610 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
611 | uint32_t trap[6]; | ||
612 | int i, ch; | ||
613 | uint32_t idx = nv_rd32(dev, 0x100c90); | ||
614 | if (idx & 0x80000000) { | ||
615 | idx &= 0xffffff; | ||
616 | if (display) { | ||
617 | for (i = 0; i < 6; i++) { | ||
618 | nv_wr32(dev, 0x100c90, idx | i << 24); | ||
619 | trap[i] = nv_rd32(dev, 0x100c94); | ||
620 | } | ||
621 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { | ||
622 | struct nouveau_channel *chan = dev_priv->fifos[ch]; | ||
623 | |||
624 | if (!chan || !chan->ramin) | ||
625 | continue; | ||
626 | |||
627 | if (trap[1] == chan->ramin->instance >> 12) | ||
628 | break; | ||
629 | } | ||
630 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", | ||
631 | name, (trap[5]&0x100?"read":"write"), | ||
632 | trap[5]&0xff, trap[4]&0xffff, | ||
633 | trap[3]&0xffff, trap[0], trap[2], ch); | ||
634 | } | ||
635 | nv_wr32(dev, 0x100c90, idx | 0x80000000); | ||
636 | } else if (display) { | ||
637 | NV_INFO(dev, "%s - no VM fault?\n", name); | ||
638 | } | ||
639 | } | ||
640 | |||
641 | static struct nouveau_enum_names nv50_mp_exec_error_names[] = | ||
642 | { | ||
643 | { 3, "STACK_UNDERFLOW" }, | ||
644 | { 4, "QUADON_ACTIVE" }, | ||
645 | { 8, "TIMEOUT" }, | ||
646 | { 0x10, "INVALID_OPCODE" }, | ||
647 | { 0x40, "BREAKPOINT" }, | ||
648 | }; | ||
649 | |||
650 | static void | ||
651 | nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) | ||
652 | { | ||
653 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
654 | uint32_t units = nv_rd32(dev, 0x1540); | ||
655 | uint32_t addr, mp10, status, pc, oplow, ophigh; | ||
656 | int i; | ||
657 | int mps = 0; | ||
658 | for (i = 0; i < 4; i++) { | ||
659 | if (!(units & 1 << (i+24))) | ||
660 | continue; | ||
661 | if (dev_priv->chipset < 0xa0) | ||
662 | addr = 0x408200 + (tpid << 12) + (i << 7); | ||
663 | else | ||
664 | addr = 0x408100 + (tpid << 11) + (i << 7); | ||
665 | mp10 = nv_rd32(dev, addr + 0x10); | ||
666 | status = nv_rd32(dev, addr + 0x14); | ||
667 | if (!status) | ||
668 | continue; | ||
669 | if (display) { | ||
670 | nv_rd32(dev, addr + 0x20); | ||
671 | pc = nv_rd32(dev, addr + 0x24); | ||
672 | oplow = nv_rd32(dev, addr + 0x70); | ||
673 | ophigh= nv_rd32(dev, addr + 0x74); | ||
674 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " | ||
675 | "TP %d MP %d: ", tpid, i); | ||
676 | nouveau_print_enum_names(status, | ||
677 | nv50_mp_exec_error_names); | ||
678 | printk(" at %06x warp %d, opcode %08x %08x\n", | ||
679 | pc&0xffffff, pc >> 24, | ||
680 | oplow, ophigh); | ||
681 | } | ||
682 | nv_wr32(dev, addr + 0x10, mp10); | ||
683 | nv_wr32(dev, addr + 0x14, 0); | ||
684 | mps++; | ||
685 | } | ||
686 | if (!mps && display) | ||
687 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " | ||
688 | "No MPs claiming errors?\n", tpid); | ||
689 | } | ||
690 | |||
691 | static void | ||
692 | nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, | ||
693 | uint32_t ustatus_new, int display, const char *name) | ||
694 | { | ||
695 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
696 | int tps = 0; | ||
697 | uint32_t units = nv_rd32(dev, 0x1540); | ||
698 | int i, r; | ||
699 | uint32_t ustatus_addr, ustatus; | ||
700 | for (i = 0; i < 16; i++) { | ||
701 | if (!(units & (1 << i))) | ||
702 | continue; | ||
703 | if (dev_priv->chipset < 0xa0) | ||
704 | ustatus_addr = ustatus_old + (i << 12); | ||
705 | else | ||
706 | ustatus_addr = ustatus_new + (i << 11); | ||
707 | ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; | ||
708 | if (!ustatus) | ||
709 | continue; | ||
710 | tps++; | ||
711 | switch (type) { | ||
712 | case 6: /* texture error... unknown for now */ | ||
713 | nv50_pfb_vm_trap(dev, display, name); | ||
714 | if (display) { | ||
715 | NV_ERROR(dev, "magic set %d:\n", i); | ||
716 | for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) | ||
717 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
718 | nv_rd32(dev, r)); | ||
719 | } | ||
720 | break; | ||
721 | case 7: /* MP error */ | ||
722 | if (ustatus & 0x00010000) { | ||
723 | nv50_pgraph_mp_trap(dev, i, display); | ||
724 | ustatus &= ~0x00010000; | ||
725 | } | ||
726 | break; | ||
727 | case 8: /* TPDMA error */ | ||
728 | { | ||
729 | uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); | ||
730 | uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); | ||
731 | uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); | ||
732 | uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); | ||
733 | uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); | ||
734 | uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); | ||
735 | uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); | ||
736 | nv50_pfb_vm_trap(dev, display, name); | ||
737 | /* 2d engine destination */ | ||
738 | if (ustatus & 0x00000010) { | ||
739 | if (display) { | ||
740 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", | ||
741 | i, e14, e10); | ||
742 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
743 | i, e0c, e18, e1c, e20, e24); | ||
744 | } | ||
745 | ustatus &= ~0x00000010; | ||
746 | } | ||
747 | /* Render target */ | ||
748 | if (ustatus & 0x00000040) { | ||
749 | if (display) { | ||
750 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", | ||
751 | i, e14, e10); | ||
752 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
753 | i, e0c, e18, e1c, e20, e24); | ||
754 | } | ||
755 | ustatus &= ~0x00000040; | ||
756 | } | ||
757 | /* CUDA memory: l[], g[] or stack. */ | ||
758 | if (ustatus & 0x00000080) { | ||
759 | if (display) { | ||
760 | if (e18 & 0x80000000) { | ||
761 | /* g[] read fault? */ | ||
762 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", | ||
763 | i, e14, e10 | ((e18 >> 24) & 0x1f)); | ||
764 | e18 &= ~0x1f000000; | ||
765 | } else if (e18 & 0xc) { | ||
766 | /* g[] write fault? */ | ||
767 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", | ||
768 | i, e14, e10 | ((e18 >> 7) & 0x1f)); | ||
769 | e18 &= ~0x00000f80; | ||
770 | } else { | ||
771 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", | ||
772 | i, e14, e10); | ||
773 | } | ||
774 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
775 | i, e0c, e18, e1c, e20, e24); | ||
776 | } | ||
777 | ustatus &= ~0x00000080; | ||
778 | } | ||
779 | } | ||
780 | break; | ||
781 | } | ||
782 | if (ustatus) { | ||
783 | if (display) | ||
784 | NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); | ||
785 | } | ||
786 | nv_wr32(dev, ustatus_addr, 0xc0000000); | ||
787 | } | ||
788 | |||
789 | if (!tps && display) | ||
790 | NV_INFO(dev, "%s - No TPs claiming errors?\n", name); | ||
791 | } | ||
792 | |||
793 | static void | ||
794 | nv50_pgraph_trap_handler(struct drm_device *dev) | ||
795 | { | ||
796 | struct nouveau_pgraph_trap trap; | ||
797 | uint32_t status = nv_rd32(dev, 0x400108); | ||
798 | uint32_t ustatus; | ||
799 | int display = nouveau_ratelimit(); | ||
800 | |||
801 | |||
802 | if (!status && display) { | ||
803 | nouveau_graph_trap_info(dev, &trap); | ||
804 | nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); | ||
805 | NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); | ||
806 | } | ||
807 | |||
808 | /* DISPATCH: Relays commands to other units and handles NOTIFY, | ||
809 | * COND, QUERY. If you get a trap from it, the command is still stuck | ||
810 | * in DISPATCH and you need to do something about it. */ | ||
811 | if (status & 0x001) { | ||
812 | ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; | ||
813 | if (!ustatus && display) { | ||
814 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); | ||
815 | } | ||
816 | |||
817 | /* Known to be triggered by screwed up NOTIFY and COND... */ | ||
818 | if (ustatus & 0x00000001) { | ||
819 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); | ||
820 | nv_wr32(dev, 0x400500, 0); | ||
821 | if (nv_rd32(dev, 0x400808) & 0x80000000) { | ||
822 | if (display) { | ||
823 | if (nouveau_graph_trapped_channel(dev, &trap.channel)) | ||
824 | trap.channel = -1; | ||
825 | trap.class = nv_rd32(dev, 0x400814); | ||
826 | trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; | ||
827 | trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; | ||
828 | trap.data = nv_rd32(dev, 0x40080c); | ||
829 | trap.data2 = nv_rd32(dev, 0x400810); | ||
830 | nouveau_graph_dump_trap_info(dev, | ||
831 | "PGRAPH_TRAP_DISPATCH_FAULT", &trap); | ||
832 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); | ||
833 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); | ||
834 | } | ||
835 | nv_wr32(dev, 0x400808, 0); | ||
836 | } else if (display) { | ||
837 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); | ||
838 | } | ||
839 | nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); | ||
840 | nv_wr32(dev, 0x400848, 0); | ||
841 | ustatus &= ~0x00000001; | ||
842 | } | ||
843 | if (ustatus & 0x00000002) { | ||
844 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); | ||
845 | nv_wr32(dev, 0x400500, 0); | ||
846 | if (nv_rd32(dev, 0x40084c) & 0x80000000) { | ||
847 | if (display) { | ||
848 | if (nouveau_graph_trapped_channel(dev, &trap.channel)) | ||
849 | trap.channel = -1; | ||
850 | trap.class = nv_rd32(dev, 0x400814); | ||
851 | trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; | ||
852 | trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; | ||
853 | trap.data = nv_rd32(dev, 0x40085c); | ||
854 | trap.data2 = 0; | ||
855 | nouveau_graph_dump_trap_info(dev, | ||
856 | "PGRAPH_TRAP_DISPATCH_QUERY", &trap); | ||
857 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); | ||
858 | } | ||
859 | nv_wr32(dev, 0x40084c, 0); | ||
860 | } else if (display) { | ||
861 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); | ||
862 | } | ||
863 | ustatus &= ~0x00000002; | ||
864 | } | ||
865 | if (ustatus && display) | ||
866 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); | ||
867 | nv_wr32(dev, 0x400804, 0xc0000000); | ||
868 | nv_wr32(dev, 0x400108, 0x001); | ||
869 | status &= ~0x001; | ||
870 | } | ||
871 | |||
872 | /* TRAPs other than dispatch use the "normal" trap regs. */ | ||
873 | if (status && display) { | ||
874 | nouveau_graph_trap_info(dev, &trap); | ||
875 | nouveau_graph_dump_trap_info(dev, | ||
876 | "PGRAPH_TRAP", &trap); | ||
877 | } | ||
878 | |||
879 | /* M2MF: Memory to memory copy engine. */ | ||
880 | if (status & 0x002) { | ||
881 | ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; | ||
882 | if (!ustatus && display) { | ||
883 | NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); | ||
884 | } | ||
885 | if (ustatus & 0x00000001) { | ||
886 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); | ||
887 | ustatus &= ~0x00000001; | ||
888 | } | ||
889 | if (ustatus & 0x00000002) { | ||
890 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); | ||
891 | ustatus &= ~0x00000002; | ||
892 | } | ||
893 | if (ustatus & 0x00000004) { | ||
894 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); | ||
895 | ustatus &= ~0x00000004; | ||
896 | } | ||
897 | NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", | ||
898 | nv_rd32(dev, 0x406804), | ||
899 | nv_rd32(dev, 0x406808), | ||
900 | nv_rd32(dev, 0x40680c), | ||
901 | nv_rd32(dev, 0x406810)); | ||
902 | if (ustatus && display) | ||
903 | NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); | ||
904 | /* No sane way found yet -- just reset the bugger. */ | ||
905 | nv_wr32(dev, 0x400040, 2); | ||
906 | nv_wr32(dev, 0x400040, 0); | ||
907 | nv_wr32(dev, 0x406800, 0xc0000000); | ||
908 | nv_wr32(dev, 0x400108, 0x002); | ||
909 | status &= ~0x002; | ||
910 | } | ||
911 | |||
912 | /* VFETCH: Fetches data from vertex buffers. */ | ||
913 | if (status & 0x004) { | ||
914 | ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; | ||
915 | if (!ustatus && display) { | ||
916 | NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); | ||
917 | } | ||
918 | if (ustatus & 0x00000001) { | ||
919 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); | ||
920 | NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", | ||
921 | nv_rd32(dev, 0x400c00), | ||
922 | nv_rd32(dev, 0x400c08), | ||
923 | nv_rd32(dev, 0x400c0c), | ||
924 | nv_rd32(dev, 0x400c10)); | ||
925 | ustatus &= ~0x00000001; | ||
926 | } | ||
927 | if (ustatus && display) | ||
928 | NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); | ||
929 | nv_wr32(dev, 0x400c04, 0xc0000000); | ||
930 | nv_wr32(dev, 0x400108, 0x004); | ||
931 | status &= ~0x004; | ||
932 | } | ||
933 | |||
934 | /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ | ||
935 | if (status & 0x008) { | ||
936 | ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; | ||
937 | if (!ustatus && display) { | ||
938 | NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); | ||
939 | } | ||
940 | if (ustatus & 0x00000001) { | ||
941 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); | ||
942 | NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", | ||
943 | nv_rd32(dev, 0x401804), | ||
944 | nv_rd32(dev, 0x401808), | ||
945 | nv_rd32(dev, 0x40180c), | ||
946 | nv_rd32(dev, 0x401810)); | ||
947 | ustatus &= ~0x00000001; | ||
948 | } | ||
949 | if (ustatus && display) | ||
950 | NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); | ||
951 | /* No sane way found yet -- just reset the bugger. */ | ||
952 | nv_wr32(dev, 0x400040, 0x80); | ||
953 | nv_wr32(dev, 0x400040, 0); | ||
954 | nv_wr32(dev, 0x401800, 0xc0000000); | ||
955 | nv_wr32(dev, 0x400108, 0x008); | ||
956 | status &= ~0x008; | ||
957 | } | ||
958 | |||
959 | /* CCACHE: Handles code and c[] caches and fills them. */ | ||
960 | if (status & 0x010) { | ||
961 | ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; | ||
962 | if (!ustatus && display) { | ||
963 | NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); | ||
964 | } | ||
965 | if (ustatus & 0x00000001) { | ||
966 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); | ||
967 | NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", | ||
968 | nv_rd32(dev, 0x405800), | ||
969 | nv_rd32(dev, 0x405804), | ||
970 | nv_rd32(dev, 0x405808), | ||
971 | nv_rd32(dev, 0x40580c), | ||
972 | nv_rd32(dev, 0x405810), | ||
973 | nv_rd32(dev, 0x405814), | ||
974 | nv_rd32(dev, 0x40581c)); | ||
975 | ustatus &= ~0x00000001; | ||
976 | } | ||
977 | if (ustatus && display) | ||
978 | NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); | ||
979 | nv_wr32(dev, 0x405018, 0xc0000000); | ||
980 | nv_wr32(dev, 0x400108, 0x010); | ||
981 | status &= ~0x010; | ||
982 | } | ||
983 | |||
984 | /* Unknown, not seen yet... 0x402000 is the only trap status reg | ||
985 | * remaining, so try to handle it anyway. Perhaps related to that | ||
986 | * unknown DMA slot on tesla? */ | ||
987 | if (status & 0x20) { | ||
988 | nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); | ||
989 | ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; | ||
990 | if (display) | ||
991 | NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); | ||
992 | nv_wr32(dev, 0x402000, 0xc0000000); | ||
993 | /* no status modifiction on purpose */ | ||
994 | } | ||
995 | |||
996 | /* TEXTURE: CUDA texturing units */ | ||
997 | if (status & 0x040) { | ||
998 | nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, | ||
999 | "PGRAPH_TRAP_TEXTURE"); | ||
1000 | nv_wr32(dev, 0x400108, 0x040); | ||
1001 | status &= ~0x040; | ||
1002 | } | ||
1003 | |||
1004 | /* MP: CUDA execution engines. */ | ||
1005 | if (status & 0x080) { | ||
1006 | nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, | ||
1007 | "PGRAPH_TRAP_MP"); | ||
1008 | nv_wr32(dev, 0x400108, 0x080); | ||
1009 | status &= ~0x080; | ||
1010 | } | ||
1011 | |||
1012 | /* TPDMA: Handles TP-initiated uncached memory accesses: | ||
1013 | * l[], g[], stack, 2d surfaces, render targets. */ | ||
1014 | if (status & 0x100) { | ||
1015 | nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, | ||
1016 | "PGRAPH_TRAP_TPDMA"); | ||
1017 | nv_wr32(dev, 0x400108, 0x100); | ||
1018 | status &= ~0x100; | ||
1019 | } | ||
1020 | |||
1021 | if (status) { | ||
1022 | if (display) | ||
1023 | NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", | ||
1024 | status); | ||
1025 | nv_wr32(dev, 0x400108, status); | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | /* There must be a *lot* of these. Will take some time to gather them up. */ | ||
1030 | static struct nouveau_enum_names nv50_data_error_names[] = | ||
1031 | { | ||
1032 | { 4, "INVALID_VALUE" }, | ||
1033 | { 5, "INVALID_ENUM" }, | ||
1034 | { 8, "INVALID_OBJECT" }, | ||
1035 | { 0xc, "INVALID_BITFIELD" }, | ||
1036 | { 0x28, "MP_NO_REG_SPACE" }, | ||
1037 | { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, | ||
1038 | }; | ||
1039 | |||
1040 | static void | ||
581 | nv50_pgraph_irq_handler(struct drm_device *dev) | 1041 | nv50_pgraph_irq_handler(struct drm_device *dev) |
582 | { | 1042 | { |
1043 | struct nouveau_pgraph_trap trap; | ||
1044 | int unhandled = 0; | ||
583 | uint32_t status; | 1045 | uint32_t status; |
584 | 1046 | ||
585 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { | 1047 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { |
586 | uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | 1048 | /* NOTIFY: You've set a NOTIFY an a command and it's done. */ |
587 | |||
588 | if (status & 0x00000001) { | 1049 | if (status & 0x00000001) { |
589 | nouveau_pgraph_intr_notify(dev, nsource); | 1050 | nouveau_graph_trap_info(dev, &trap); |
1051 | if (nouveau_ratelimit()) | ||
1052 | nouveau_graph_dump_trap_info(dev, | ||
1053 | "PGRAPH_NOTIFY", &trap); | ||
590 | status &= ~0x00000001; | 1054 | status &= ~0x00000001; |
591 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); | 1055 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); |
592 | } | 1056 | } |
593 | 1057 | ||
594 | if (status & 0x00000010) { | 1058 | /* COMPUTE_QUERY: Purpose and exact cause unknown, happens |
595 | nouveau_pgraph_intr_error(dev, nsource | | 1059 | * when you write 0x200 to 0x50c0 method 0x31c. */ |
596 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); | 1060 | if (status & 0x00000002) { |
1061 | nouveau_graph_trap_info(dev, &trap); | ||
1062 | if (nouveau_ratelimit()) | ||
1063 | nouveau_graph_dump_trap_info(dev, | ||
1064 | "PGRAPH_COMPUTE_QUERY", &trap); | ||
1065 | status &= ~0x00000002; | ||
1066 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); | ||
1067 | } | ||
597 | 1068 | ||
1069 | /* Unknown, never seen: 0x4 */ | ||
1070 | |||
1071 | /* ILLEGAL_MTHD: You used a wrong method for this class. */ | ||
1072 | if (status & 0x00000010) { | ||
1073 | nouveau_graph_trap_info(dev, &trap); | ||
1074 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) | ||
1075 | unhandled = 1; | ||
1076 | if (unhandled && nouveau_ratelimit()) | ||
1077 | nouveau_graph_dump_trap_info(dev, | ||
1078 | "PGRAPH_ILLEGAL_MTHD", &trap); | ||
598 | status &= ~0x00000010; | 1079 | status &= ~0x00000010; |
599 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); | 1080 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); |
600 | } | 1081 | } |
601 | 1082 | ||
1083 | /* ILLEGAL_CLASS: You used a wrong class. */ | ||
1084 | if (status & 0x00000020) { | ||
1085 | nouveau_graph_trap_info(dev, &trap); | ||
1086 | if (nouveau_ratelimit()) | ||
1087 | nouveau_graph_dump_trap_info(dev, | ||
1088 | "PGRAPH_ILLEGAL_CLASS", &trap); | ||
1089 | status &= ~0x00000020; | ||
1090 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); | ||
1091 | } | ||
1092 | |||
1093 | /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ | ||
1094 | if (status & 0x00000040) { | ||
1095 | nouveau_graph_trap_info(dev, &trap); | ||
1096 | if (nouveau_ratelimit()) | ||
1097 | nouveau_graph_dump_trap_info(dev, | ||
1098 | "PGRAPH_DOUBLE_NOTIFY", &trap); | ||
1099 | status &= ~0x00000040; | ||
1100 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); | ||
1101 | } | ||
1102 | |||
1103 | /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ | ||
602 | if (status & 0x00001000) { | 1104 | if (status & 0x00001000) { |
603 | nv_wr32(dev, 0x400500, 0x00000000); | 1105 | nv_wr32(dev, 0x400500, 0x00000000); |
604 | nv_wr32(dev, NV03_PGRAPH_INTR, | 1106 | nv_wr32(dev, NV03_PGRAPH_INTR, |
@@ -613,49 +1115,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev) | |||
613 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | 1115 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; |
614 | } | 1116 | } |
615 | 1117 | ||
616 | if (status & 0x00100000) { | 1118 | /* BUFFER_NOTIFY: Your m2mf transfer finished */ |
617 | nouveau_pgraph_intr_error(dev, nsource | | 1119 | if (status & 0x00010000) { |
618 | NV03_PGRAPH_NSOURCE_DATA_ERROR); | 1120 | nouveau_graph_trap_info(dev, &trap); |
1121 | if (nouveau_ratelimit()) | ||
1122 | nouveau_graph_dump_trap_info(dev, | ||
1123 | "PGRAPH_BUFFER_NOTIFY", &trap); | ||
1124 | status &= ~0x00010000; | ||
1125 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); | ||
1126 | } | ||
619 | 1127 | ||
1128 | /* DATA_ERROR: Invalid value for this method, or invalid | ||
1129 | * state in current PGRAPH context for this operation */ | ||
1130 | if (status & 0x00100000) { | ||
1131 | nouveau_graph_trap_info(dev, &trap); | ||
1132 | if (nouveau_ratelimit()) { | ||
1133 | nouveau_graph_dump_trap_info(dev, | ||
1134 | "PGRAPH_DATA_ERROR", &trap); | ||
1135 | NV_INFO (dev, "PGRAPH_DATA_ERROR - "); | ||
1136 | nouveau_print_enum_names(nv_rd32(dev, 0x400110), | ||
1137 | nv50_data_error_names); | ||
1138 | printk("\n"); | ||
1139 | } | ||
620 | status &= ~0x00100000; | 1140 | status &= ~0x00100000; |
621 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); | 1141 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); |
622 | } | 1142 | } |
623 | 1143 | ||
1144 | /* TRAP: Something bad happened in the middle of command | ||
1145 | * execution. Has a billion types, subtypes, and even | ||
1146 | * subsubtypes. */ | ||
624 | if (status & 0x00200000) { | 1147 | if (status & 0x00200000) { |
625 | int r; | 1148 | nv50_pgraph_trap_handler(dev); |
626 | |||
627 | nouveau_pgraph_intr_error(dev, nsource | | ||
628 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); | ||
629 | |||
630 | NV_ERROR(dev, "magic set 1:\n"); | ||
631 | for (r = 0x408900; r <= 0x408910; r += 4) | ||
632 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
633 | nv_rd32(dev, r)); | ||
634 | nv_wr32(dev, 0x408900, | ||
635 | nv_rd32(dev, 0x408904) | 0xc0000000); | ||
636 | for (r = 0x408e08; r <= 0x408e24; r += 4) | ||
637 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
638 | nv_rd32(dev, r)); | ||
639 | nv_wr32(dev, 0x408e08, | ||
640 | nv_rd32(dev, 0x408e08) | 0xc0000000); | ||
641 | |||
642 | NV_ERROR(dev, "magic set 2:\n"); | ||
643 | for (r = 0x409900; r <= 0x409910; r += 4) | ||
644 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
645 | nv_rd32(dev, r)); | ||
646 | nv_wr32(dev, 0x409900, | ||
647 | nv_rd32(dev, 0x409904) | 0xc0000000); | ||
648 | for (r = 0x409e08; r <= 0x409e24; r += 4) | ||
649 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
650 | nv_rd32(dev, r)); | ||
651 | nv_wr32(dev, 0x409e08, | ||
652 | nv_rd32(dev, 0x409e08) | 0xc0000000); | ||
653 | |||
654 | status &= ~0x00200000; | 1149 | status &= ~0x00200000; |
655 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | ||
656 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | 1150 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); |
657 | } | 1151 | } |
658 | 1152 | ||
1153 | /* Unknown, never seen: 0x00400000 */ | ||
1154 | |||
1155 | /* SINGLE_STEP: Happens on every method if you turned on | ||
1156 | * single stepping in 40008c */ | ||
1157 | if (status & 0x01000000) { | ||
1158 | nouveau_graph_trap_info(dev, &trap); | ||
1159 | if (nouveau_ratelimit()) | ||
1160 | nouveau_graph_dump_trap_info(dev, | ||
1161 | "PGRAPH_SINGLE_STEP", &trap); | ||
1162 | status &= ~0x01000000; | ||
1163 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); | ||
1164 | } | ||
1165 | |||
1166 | /* 0x02000000 happens when you pause a ctxprog... | ||
1167 | * but the only way this can happen that I know is by | ||
1168 | * poking the relevant MMIO register, and we don't | ||
1169 | * do that. */ | ||
1170 | |||
659 | if (status) { | 1171 | if (status) { |
660 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", | 1172 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", |
661 | status); | 1173 | status); |
@@ -672,7 +1184,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev) | |||
672 | } | 1184 | } |
673 | 1185 | ||
674 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 1186 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); |
675 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | 1187 | if (nv_rd32(dev, 0x400824) & (1 << 31)) |
1188 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
676 | } | 1189 | } |
677 | 1190 | ||
678 | static void | 1191 | static void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index eb8f084d5f53..58b46807de23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include "nouveau_drm.h" | 35 | #include "nouveau_drm.h" |
36 | #include "nv50_display.h" | 36 | #include "nv50_display.h" |
37 | 37 | ||
38 | static int nouveau_stub_init(struct drm_device *dev) { return 0; } | ||
39 | static void nouveau_stub_takedown(struct drm_device *dev) {} | 38 | static void nouveau_stub_takedown(struct drm_device *dev) {} |
40 | 39 | ||
41 | static int nouveau_init_engine_ptrs(struct drm_device *dev) | 40 | static int nouveau_init_engine_ptrs(struct drm_device *dev) |
@@ -277,8 +276,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
277 | engine->timer.init = nv04_timer_init; | 276 | engine->timer.init = nv04_timer_init; |
278 | engine->timer.read = nv04_timer_read; | 277 | engine->timer.read = nv04_timer_read; |
279 | engine->timer.takedown = nv04_timer_takedown; | 278 | engine->timer.takedown = nv04_timer_takedown; |
280 | engine->fb.init = nouveau_stub_init; | 279 | engine->fb.init = nv50_fb_init; |
281 | engine->fb.takedown = nouveau_stub_takedown; | 280 | engine->fb.takedown = nv50_fb_takedown; |
282 | engine->graph.grclass = nv50_graph_grclass; | 281 | engine->graph.grclass = nv50_graph_grclass; |
283 | engine->graph.init = nv50_graph_init; | 282 | engine->graph.init = nv50_graph_init; |
284 | engine->graph.takedown = nv50_graph_takedown; | 283 | engine->graph.takedown = nv50_graph_takedown; |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index a1d1ebb073d9..eba687f1099e 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
230 | struct drm_framebuffer *fb = crtc->fb; | 230 | struct drm_framebuffer *fb = crtc->fb; |
231 | 231 | ||
232 | /* Calculate our timings */ | 232 | /* Calculate our timings */ |
233 | int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; | 233 | int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; |
234 | int horizStart = (mode->crtc_hsync_start >> 3) - 1; | 234 | int horizStart = (mode->crtc_hsync_start >> 3) + 1; |
235 | int horizEnd = (mode->crtc_hsync_end >> 3) - 1; | 235 | int horizEnd = (mode->crtc_hsync_end >> 3) + 1; |
236 | int horizTotal = (mode->crtc_htotal >> 3) - 5; | 236 | int horizTotal = (mode->crtc_htotal >> 3) - 5; |
237 | int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; | 237 | int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; |
238 | int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; | 238 | int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 3da90c2c4e63..813b25cec726 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
118 | return; | 118 | return; |
119 | } | 119 | } |
120 | 120 | ||
121 | width = ALIGN(image->width, 32); | 121 | width = ALIGN(image->width, 8); |
122 | dsize = (width * image->height) >> 5; | 122 | dsize = ALIGN(width * image->height, 32) >> 5; |
123 | 123 | ||
124 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 124 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
125 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 125 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
136 | ((image->dx + image->width) & 0xffff)); | 136 | ((image->dx + image->width) & 0xffff)); |
137 | OUT_RING(chan, bg); | 137 | OUT_RING(chan, bg); |
138 | OUT_RING(chan, fg); | 138 | OUT_RING(chan, fg); |
139 | OUT_RING(chan, (image->height << 16) | image->width); | ||
140 | OUT_RING(chan, (image->height << 16) | width); | 139 | OUT_RING(chan, (image->height << 16) | width); |
140 | OUT_RING(chan, (image->height << 16) | image->width); | ||
141 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); | 141 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); |
142 | 142 | ||
143 | while (dsize) { | 143 | while (dsize) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 61a89f2dc553..fac6c88a2b1f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -522,8 +522,8 @@ int nv50_display_create(struct drm_device *dev) | |||
522 | } | 522 | } |
523 | 523 | ||
524 | for (i = 0 ; i < dcb->connector.entries; i++) { | 524 | for (i = 0 ; i < dcb->connector.entries; i++) { |
525 | if (i != 0 && dcb->connector.entry[i].index == | 525 | if (i != 0 && dcb->connector.entry[i].index2 == |
526 | dcb->connector.entry[i - 1].index) | 526 | dcb->connector.entry[i - 1].index2) |
527 | continue; | 527 | continue; |
528 | nouveau_connector_create(dev, &dcb->connector.entry[i]); | 528 | nouveau_connector_create(dev, &dcb->connector.entry[i]); |
529 | } | 529 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c new file mode 100644 index 000000000000..a95e6941ba88 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fb.c | |||
@@ -0,0 +1,32 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | int | ||
7 | nv50_fb_init(struct drm_device *dev) | ||
8 | { | ||
9 | /* This is needed to get meaningful information from 100c90 | ||
10 | * on traps. No idea what these values mean exactly. */ | ||
11 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
12 | |||
13 | switch (dev_priv->chipset) { | ||
14 | case 0x50: | ||
15 | nv_wr32(dev, 0x100c90, 0x0707ff); | ||
16 | break; | ||
17 | case 0xa5: | ||
18 | case 0xa8: | ||
19 | nv_wr32(dev, 0x100c90, 0x0d0fff); | ||
20 | break; | ||
21 | default: | ||
22 | nv_wr32(dev, 0x100c90, 0x1d07ff); | ||
23 | break; | ||
24 | } | ||
25 | |||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | void | ||
30 | nv50_fb_takedown(struct drm_device *dev) | ||
31 | { | ||
32 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 993c7126fbde..25a3cd8794f9 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -233,7 +233,7 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
233 | BEGIN_RING(chan, NvSub2D, 0x0808, 3); | 233 | BEGIN_RING(chan, NvSub2D, 0x0808, 3); |
234 | OUT_RING(chan, 0); | 234 | OUT_RING(chan, 0); |
235 | OUT_RING(chan, 0); | 235 | OUT_RING(chan, 0); |
236 | OUT_RING(chan, 0); | 236 | OUT_RING(chan, 1); |
237 | BEGIN_RING(chan, NvSub2D, 0x081c, 1); | 237 | BEGIN_RING(chan, NvSub2D, 0x081c, 1); |
238 | OUT_RING(chan, 1); | 238 | OUT_RING(chan, 1); |
239 | BEGIN_RING(chan, NvSub2D, 0x0840, 4); | 239 | BEGIN_RING(chan, NvSub2D, 0x0840, 4); |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 857a09671a39..c62b33a02f88 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev) | |||
56 | static void | 56 | static void |
57 | nv50_graph_init_regs__nv(struct drm_device *dev) | 57 | nv50_graph_init_regs__nv(struct drm_device *dev) |
58 | { | 58 | { |
59 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
60 | uint32_t units = nv_rd32(dev, 0x1540); | ||
61 | int i; | ||
62 | |||
59 | NV_DEBUG(dev, "\n"); | 63 | NV_DEBUG(dev, "\n"); |
60 | 64 | ||
61 | nv_wr32(dev, 0x400804, 0xc0000000); | 65 | nv_wr32(dev, 0x400804, 0xc0000000); |
@@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev) | |||
65 | nv_wr32(dev, 0x405018, 0xc0000000); | 69 | nv_wr32(dev, 0x405018, 0xc0000000); |
66 | nv_wr32(dev, 0x402000, 0xc0000000); | 70 | nv_wr32(dev, 0x402000, 0xc0000000); |
67 | 71 | ||
72 | for (i = 0; i < 16; i++) { | ||
73 | if (units & 1 << i) { | ||
74 | if (dev_priv->chipset < 0xa0) { | ||
75 | nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); | ||
76 | nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); | ||
77 | nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); | ||
78 | } else { | ||
79 | nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); | ||
80 | nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); | ||
81 | nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); | ||
82 | } | ||
83 | } | ||
84 | } | ||
85 | |||
68 | nv_wr32(dev, 0x400108, 0xffffffff); | 86 | nv_wr32(dev, 0x400108, 0xffffffff); |
69 | 87 | ||
70 | nv_wr32(dev, 0x400824, 0x00004000); | 88 | nv_wr32(dev, 0x400824, 0x00004000); |
@@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan) | |||
229 | nouveau_grctx_vals_load(dev, ctx); | 247 | nouveau_grctx_vals_load(dev, ctx); |
230 | } | 248 | } |
231 | nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); | 249 | nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); |
232 | if ((dev_priv->chipset & 0xf0) == 0xa0) | ||
233 | nv_wo32(dev, ctx, 0x00004/4, 0x00000000); | ||
234 | else | ||
235 | nv_wo32(dev, ctx, 0x0011c/4, 0x00000000); | ||
236 | dev_priv->engine.instmem.finish_access(dev); | 250 | dev_priv->engine.instmem.finish_access(dev); |
237 | 251 | ||
238 | return 0; | 252 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index d105fcd42ca0..546b31949a30 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c | |||
@@ -64,6 +64,9 @@ | |||
64 | #define CP_FLAG_ALWAYS ((2 * 32) + 13) | 64 | #define CP_FLAG_ALWAYS ((2 * 32) + 13) |
65 | #define CP_FLAG_ALWAYS_FALSE 0 | 65 | #define CP_FLAG_ALWAYS_FALSE 0 |
66 | #define CP_FLAG_ALWAYS_TRUE 1 | 66 | #define CP_FLAG_ALWAYS_TRUE 1 |
67 | #define CP_FLAG_INTR ((2 * 32) + 15) | ||
68 | #define CP_FLAG_INTR_NOT_PENDING 0 | ||
69 | #define CP_FLAG_INTR_PENDING 1 | ||
67 | 70 | ||
68 | #define CP_CTX 0x00100000 | 71 | #define CP_CTX 0x00100000 |
69 | #define CP_CTX_COUNT 0x000f0000 | 72 | #define CP_CTX_COUNT 0x000f0000 |
@@ -214,6 +217,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx) | |||
214 | cp_name(ctx, cp_setup_save); | 217 | cp_name(ctx, cp_setup_save); |
215 | cp_set (ctx, UNK1D, SET); | 218 | cp_set (ctx, UNK1D, SET); |
216 | cp_wait(ctx, STATUS, BUSY); | 219 | cp_wait(ctx, STATUS, BUSY); |
220 | cp_wait(ctx, INTR, PENDING); | ||
221 | cp_bra (ctx, STATUS, BUSY, cp_setup_save); | ||
217 | cp_set (ctx, UNK01, SET); | 222 | cp_set (ctx, UNK01, SET); |
218 | cp_set (ctx, SWAP_DIRECTION, SAVE); | 223 | cp_set (ctx, SWAP_DIRECTION, SAVE); |
219 | 224 | ||
@@ -269,7 +274,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
269 | int offset, base; | 274 | int offset, base; |
270 | uint32_t units = nv_rd32 (ctx->dev, 0x1540); | 275 | uint32_t units = nv_rd32 (ctx->dev, 0x1540); |
271 | 276 | ||
272 | /* 0800 */ | 277 | /* 0800: DISPATCH */ |
273 | cp_ctx(ctx, 0x400808, 7); | 278 | cp_ctx(ctx, 0x400808, 7); |
274 | gr_def(ctx, 0x400814, 0x00000030); | 279 | gr_def(ctx, 0x400814, 0x00000030); |
275 | cp_ctx(ctx, 0x400834, 0x32); | 280 | cp_ctx(ctx, 0x400834, 0x32); |
@@ -300,7 +305,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
300 | gr_def(ctx, 0x400b20, 0x0001629d); | 305 | gr_def(ctx, 0x400b20, 0x0001629d); |
301 | } | 306 | } |
302 | 307 | ||
303 | /* 0C00 */ | 308 | /* 0C00: VFETCH */ |
304 | cp_ctx(ctx, 0x400c08, 0x2); | 309 | cp_ctx(ctx, 0x400c08, 0x2); |
305 | gr_def(ctx, 0x400c08, 0x0000fe0c); | 310 | gr_def(ctx, 0x400c08, 0x0000fe0c); |
306 | 311 | ||
@@ -326,7 +331,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
326 | cp_ctx(ctx, 0x401540, 0x5); | 331 | cp_ctx(ctx, 0x401540, 0x5); |
327 | gr_def(ctx, 0x401550, 0x00001018); | 332 | gr_def(ctx, 0x401550, 0x00001018); |
328 | 333 | ||
329 | /* 1800 */ | 334 | /* 1800: STREAMOUT */ |
330 | cp_ctx(ctx, 0x401814, 0x1); | 335 | cp_ctx(ctx, 0x401814, 0x1); |
331 | gr_def(ctx, 0x401814, 0x000000ff); | 336 | gr_def(ctx, 0x401814, 0x000000ff); |
332 | if (dev_priv->chipset == 0x50) { | 337 | if (dev_priv->chipset == 0x50) { |
@@ -641,7 +646,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
641 | if (dev_priv->chipset == 0x50) | 646 | if (dev_priv->chipset == 0x50) |
642 | cp_ctx(ctx, 0x4063e0, 0x1); | 647 | cp_ctx(ctx, 0x4063e0, 0x1); |
643 | 648 | ||
644 | /* 6800 */ | 649 | /* 6800: M2MF */ |
645 | if (dev_priv->chipset < 0x90) { | 650 | if (dev_priv->chipset < 0x90) { |
646 | cp_ctx(ctx, 0x406814, 0x2b); | 651 | cp_ctx(ctx, 0x406814, 0x2b); |
647 | gr_def(ctx, 0x406818, 0x00000f80); | 652 | gr_def(ctx, 0x406818, 0x00000f80); |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index ed38262d9985..3c91312dea9a 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -50,7 +50,7 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h | |||
50 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ | 50 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ |
51 | radeon_irq.o r300_cmdbuf.o r600_cp.o | 51 | radeon_irq.o r300_cmdbuf.o r600_cp.o |
52 | # add KMS driver | 52 | # add KMS driver |
53 | radeon-y += radeon_device.o radeon_kms.o \ | 53 | radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ |
54 | radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ | 54 | radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ |
55 | atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \ | 55 | atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \ |
56 | radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \ | 56 | radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \ |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d75788feac6c..247f8ee7e940 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -52,15 +52,17 @@ | |||
52 | 52 | ||
53 | typedef struct { | 53 | typedef struct { |
54 | struct atom_context *ctx; | 54 | struct atom_context *ctx; |
55 | |||
56 | uint32_t *ps, *ws; | 55 | uint32_t *ps, *ws; |
57 | int ps_shift; | 56 | int ps_shift; |
58 | uint16_t start; | 57 | uint16_t start; |
58 | unsigned last_jump; | ||
59 | unsigned long last_jump_jiffies; | ||
60 | bool abort; | ||
59 | } atom_exec_context; | 61 | } atom_exec_context; |
60 | 62 | ||
61 | int atom_debug = 0; | 63 | int atom_debug = 0; |
62 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); | 64 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); |
63 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); | 65 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
64 | 66 | ||
65 | static uint32_t atom_arg_mask[8] = | 67 | static uint32_t atom_arg_mask[8] = |
66 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, | 68 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, |
@@ -604,12 +606,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) | |||
604 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) | 606 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) |
605 | { | 607 | { |
606 | int idx = U8((*ptr)++); | 608 | int idx = U8((*ptr)++); |
609 | int r = 0; | ||
610 | |||
607 | if (idx < ATOM_TABLE_NAMES_CNT) | 611 | if (idx < ATOM_TABLE_NAMES_CNT) |
608 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); | 612 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); |
609 | else | 613 | else |
610 | SDEBUG(" table: %d\n", idx); | 614 | SDEBUG(" table: %d\n", idx); |
611 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) | 615 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) |
612 | atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); | 616 | r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); |
617 | if (r) { | ||
618 | ctx->abort = true; | ||
619 | } | ||
613 | } | 620 | } |
614 | 621 | ||
615 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) | 622 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) |
@@ -673,6 +680,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) | |||
673 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | 680 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
674 | { | 681 | { |
675 | int execute = 0, target = U16(*ptr); | 682 | int execute = 0, target = U16(*ptr); |
683 | unsigned long cjiffies; | ||
684 | |||
676 | (*ptr) += 2; | 685 | (*ptr) += 2; |
677 | switch (arg) { | 686 | switch (arg) { |
678 | case ATOM_COND_ABOVE: | 687 | case ATOM_COND_ABOVE: |
@@ -700,8 +709,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | |||
700 | if (arg != ATOM_COND_ALWAYS) | 709 | if (arg != ATOM_COND_ALWAYS) |
701 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); | 710 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); |
702 | SDEBUG(" target: 0x%04X\n", target); | 711 | SDEBUG(" target: 0x%04X\n", target); |
703 | if (execute) | 712 | if (execute) { |
713 | if (ctx->last_jump == (ctx->start + target)) { | ||
714 | cjiffies = jiffies; | ||
715 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { | ||
716 | cjiffies -= ctx->last_jump_jiffies; | ||
717 | if ((jiffies_to_msecs(cjiffies) > 1000)) { | ||
718 | DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); | ||
719 | ctx->abort = true; | ||
720 | } | ||
721 | } else { | ||
722 | /* jiffies wrap around we will just wait a little longer */ | ||
723 | ctx->last_jump_jiffies = jiffies; | ||
724 | } | ||
725 | } else { | ||
726 | ctx->last_jump = ctx->start + target; | ||
727 | ctx->last_jump_jiffies = jiffies; | ||
728 | } | ||
704 | *ptr = ctx->start + target; | 729 | *ptr = ctx->start + target; |
730 | } | ||
705 | } | 731 | } |
706 | 732 | ||
707 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | 733 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
@@ -1104,7 +1130,7 @@ static struct { | |||
1104 | atom_op_shr, ATOM_ARG_MC}, { | 1130 | atom_op_shr, ATOM_ARG_MC}, { |
1105 | atom_op_debug, 0},}; | 1131 | atom_op_debug, 0},}; |
1106 | 1132 | ||
1107 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) | 1133 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) |
1108 | { | 1134 | { |
1109 | int base = CU16(ctx->cmd_table + 4 + 2 * index); | 1135 | int base = CU16(ctx->cmd_table + 4 + 2 * index); |
1110 | int len, ws, ps, ptr; | 1136 | int len, ws, ps, ptr; |
@@ -1112,7 +1138,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1112 | atom_exec_context ectx; | 1138 | atom_exec_context ectx; |
1113 | 1139 | ||
1114 | if (!base) | 1140 | if (!base) |
1115 | return; | 1141 | return -EINVAL; |
1116 | 1142 | ||
1117 | len = CU16(base + ATOM_CT_SIZE_PTR); | 1143 | len = CU16(base + ATOM_CT_SIZE_PTR); |
1118 | ws = CU8(base + ATOM_CT_WS_PTR); | 1144 | ws = CU8(base + ATOM_CT_WS_PTR); |
@@ -1125,6 +1151,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1125 | ectx.ps_shift = ps / 4; | 1151 | ectx.ps_shift = ps / 4; |
1126 | ectx.start = base; | 1152 | ectx.start = base; |
1127 | ectx.ps = params; | 1153 | ectx.ps = params; |
1154 | ectx.abort = false; | ||
1155 | ectx.last_jump = 0; | ||
1128 | if (ws) | 1156 | if (ws) |
1129 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); | 1157 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); |
1130 | else | 1158 | else |
@@ -1137,6 +1165,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1137 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); | 1165 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); |
1138 | else | 1166 | else |
1139 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); | 1167 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); |
1168 | if (ectx.abort) { | ||
1169 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", | ||
1170 | base, len, ws, ps, ptr - 1); | ||
1171 | return -EINVAL; | ||
1172 | } | ||
1140 | 1173 | ||
1141 | if (op < ATOM_OP_CNT && op > 0) | 1174 | if (op < ATOM_OP_CNT && op > 0) |
1142 | opcode_table[op].func(&ectx, &ptr, | 1175 | opcode_table[op].func(&ectx, &ptr, |
@@ -1152,10 +1185,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1152 | 1185 | ||
1153 | if (ws) | 1186 | if (ws) |
1154 | kfree(ectx.ws); | 1187 | kfree(ectx.ws); |
1188 | return 0; | ||
1155 | } | 1189 | } |
1156 | 1190 | ||
1157 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1191 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1158 | { | 1192 | { |
1193 | int r; | ||
1194 | |||
1159 | mutex_lock(&ctx->mutex); | 1195 | mutex_lock(&ctx->mutex); |
1160 | /* reset reg block */ | 1196 | /* reset reg block */ |
1161 | ctx->reg_block = 0; | 1197 | ctx->reg_block = 0; |
@@ -1163,8 +1199,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
1163 | ctx->fb_base = 0; | 1199 | ctx->fb_base = 0; |
1164 | /* reset io mode */ | 1200 | /* reset io mode */ |
1165 | ctx->io_mode = ATOM_IO_MM; | 1201 | ctx->io_mode = ATOM_IO_MM; |
1166 | atom_execute_table_locked(ctx, index, params); | 1202 | r = atom_execute_table_locked(ctx, index, params); |
1167 | mutex_unlock(&ctx->mutex); | 1203 | mutex_unlock(&ctx->mutex); |
1204 | return r; | ||
1168 | } | 1205 | } |
1169 | 1206 | ||
1170 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; | 1207 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
@@ -1248,9 +1285,7 @@ int atom_asic_init(struct atom_context *ctx) | |||
1248 | 1285 | ||
1249 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) | 1286 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) |
1250 | return 1; | 1287 | return 1; |
1251 | atom_execute_table(ctx, ATOM_CMD_INIT, ps); | 1288 | return atom_execute_table(ctx, ATOM_CMD_INIT, ps); |
1252 | |||
1253 | return 0; | ||
1254 | } | 1289 | } |
1255 | 1290 | ||
1256 | void atom_destroy(struct atom_context *ctx) | 1291 | void atom_destroy(struct atom_context *ctx) |
@@ -1260,12 +1295,16 @@ void atom_destroy(struct atom_context *ctx) | |||
1260 | kfree(ctx); | 1295 | kfree(ctx); |
1261 | } | 1296 | } |
1262 | 1297 | ||
1263 | void atom_parse_data_header(struct atom_context *ctx, int index, | 1298 | bool atom_parse_data_header(struct atom_context *ctx, int index, |
1264 | uint16_t * size, uint8_t * frev, uint8_t * crev, | 1299 | uint16_t * size, uint8_t * frev, uint8_t * crev, |
1265 | uint16_t * data_start) | 1300 | uint16_t * data_start) |
1266 | { | 1301 | { |
1267 | int offset = index * 2 + 4; | 1302 | int offset = index * 2 + 4; |
1268 | int idx = CU16(ctx->data_table + offset); | 1303 | int idx = CU16(ctx->data_table + offset); |
1304 | u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); | ||
1305 | |||
1306 | if (!mdt[index]) | ||
1307 | return false; | ||
1269 | 1308 | ||
1270 | if (size) | 1309 | if (size) |
1271 | *size = CU16(idx); | 1310 | *size = CU16(idx); |
@@ -1274,38 +1313,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index, | |||
1274 | if (crev) | 1313 | if (crev) |
1275 | *crev = CU8(idx + 3); | 1314 | *crev = CU8(idx + 3); |
1276 | *data_start = idx; | 1315 | *data_start = idx; |
1277 | return; | 1316 | return true; |
1278 | } | 1317 | } |
1279 | 1318 | ||
1280 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, | 1319 | bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, |
1281 | uint8_t * crev) | 1320 | uint8_t * crev) |
1282 | { | 1321 | { |
1283 | int offset = index * 2 + 4; | 1322 | int offset = index * 2 + 4; |
1284 | int idx = CU16(ctx->cmd_table + offset); | 1323 | int idx = CU16(ctx->cmd_table + offset); |
1324 | u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); | ||
1325 | |||
1326 | if (!mct[index]) | ||
1327 | return false; | ||
1285 | 1328 | ||
1286 | if (frev) | 1329 | if (frev) |
1287 | *frev = CU8(idx + 2); | 1330 | *frev = CU8(idx + 2); |
1288 | if (crev) | 1331 | if (crev) |
1289 | *crev = CU8(idx + 3); | 1332 | *crev = CU8(idx + 3); |
1290 | return; | 1333 | return true; |
1291 | } | 1334 | } |
1292 | 1335 | ||
1293 | int atom_allocate_fb_scratch(struct atom_context *ctx) | 1336 | int atom_allocate_fb_scratch(struct atom_context *ctx) |
1294 | { | 1337 | { |
1295 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); | 1338 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); |
1296 | uint16_t data_offset; | 1339 | uint16_t data_offset; |
1297 | int usage_bytes; | 1340 | int usage_bytes = 0; |
1298 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; | 1341 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; |
1299 | 1342 | ||
1300 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | 1343 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
1344 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | ||
1301 | 1345 | ||
1302 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | 1346 | DRM_DEBUG("atom firmware requested %08x %dkb\n", |
1347 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | ||
1348 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1303 | 1349 | ||
1304 | DRM_DEBUG("atom firmware requested %08x %dkb\n", | 1350 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1305 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | 1351 | } |
1306 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1307 | |||
1308 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | ||
1309 | if (usage_bytes == 0) | 1352 | if (usage_bytes == 0) |
1310 | usage_bytes = 20 * 1024; | 1353 | usage_bytes = 20 * 1024; |
1311 | /* allocate some scratch memory */ | 1354 | /* allocate some scratch memory */ |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index bc73781423a1..cd1b64ab5ca7 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -140,11 +140,13 @@ struct atom_context { | |||
140 | extern int atom_debug; | 140 | extern int atom_debug; |
141 | 141 | ||
142 | struct atom_context *atom_parse(struct card_info *, void *); | 142 | struct atom_context *atom_parse(struct card_info *, void *); |
143 | void atom_execute_table(struct atom_context *, int, uint32_t *); | 143 | int atom_execute_table(struct atom_context *, int, uint32_t *); |
144 | int atom_asic_init(struct atom_context *); | 144 | int atom_asic_init(struct atom_context *); |
145 | void atom_destroy(struct atom_context *); | 145 | void atom_destroy(struct atom_context *); |
146 | void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); | 146 | bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, |
147 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); | 147 | uint8_t *frev, uint8_t *crev, uint16_t *data_start); |
148 | bool atom_parse_cmd_header(struct atom_context *ctx, int index, | ||
149 | uint8_t *frev, uint8_t *crev); | ||
148 | int atom_allocate_fb_scratch(struct atom_context *ctx); | 150 | int atom_allocate_fb_scratch(struct atom_context *ctx); |
149 | #include "atom-types.h" | 151 | #include "atom-types.h" |
150 | #include "atombios.h" | 152 | #include "atombios.h" |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index dd9fdf560611..fd4ef6d18849 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -353,12 +353,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, | |||
353 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 353 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
354 | } | 354 | } |
355 | 355 | ||
356 | static void atombios_disable_ss(struct drm_crtc *crtc) | ||
357 | { | ||
358 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
359 | struct drm_device *dev = crtc->dev; | ||
360 | struct radeon_device *rdev = dev->dev_private; | ||
361 | u32 ss_cntl; | ||
362 | |||
363 | if (ASIC_IS_DCE4(rdev)) { | ||
364 | switch (radeon_crtc->pll_id) { | ||
365 | case ATOM_PPLL1: | ||
366 | ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL); | ||
367 | ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; | ||
368 | WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl); | ||
369 | break; | ||
370 | case ATOM_PPLL2: | ||
371 | ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL); | ||
372 | ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; | ||
373 | WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl); | ||
374 | break; | ||
375 | case ATOM_DCPLL: | ||
376 | case ATOM_PPLL_INVALID: | ||
377 | return; | ||
378 | } | ||
379 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
380 | switch (radeon_crtc->pll_id) { | ||
381 | case ATOM_PPLL1: | ||
382 | ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); | ||
383 | ss_cntl &= ~1; | ||
384 | WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl); | ||
385 | break; | ||
386 | case ATOM_PPLL2: | ||
387 | ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL); | ||
388 | ss_cntl &= ~1; | ||
389 | WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl); | ||
390 | break; | ||
391 | case ATOM_DCPLL: | ||
392 | case ATOM_PPLL_INVALID: | ||
393 | return; | ||
394 | } | ||
395 | } | ||
396 | } | ||
397 | |||
398 | |||
356 | union atom_enable_ss { | 399 | union atom_enable_ss { |
357 | ENABLE_LVDS_SS_PARAMETERS legacy; | 400 | ENABLE_LVDS_SS_PARAMETERS legacy; |
358 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; | 401 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; |
359 | }; | 402 | }; |
360 | 403 | ||
361 | static void atombios_set_ss(struct drm_crtc *crtc, int enable) | 404 | static void atombios_enable_ss(struct drm_crtc *crtc) |
362 | { | 405 | { |
363 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 406 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
364 | struct drm_device *dev = crtc->dev; | 407 | struct drm_device *dev = crtc->dev; |
@@ -387,9 +430,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) | |||
387 | step = dig->ss->step; | 430 | step = dig->ss->step; |
388 | delay = dig->ss->delay; | 431 | delay = dig->ss->delay; |
389 | range = dig->ss->range; | 432 | range = dig->ss->range; |
390 | } else if (enable) | 433 | } else |
391 | return; | 434 | return; |
392 | } else if (enable) | 435 | } else |
393 | return; | 436 | return; |
394 | break; | 437 | break; |
395 | } | 438 | } |
@@ -406,13 +449,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) | |||
406 | args.v1.ucSpreadSpectrumDelay = delay; | 449 | args.v1.ucSpreadSpectrumDelay = delay; |
407 | args.v1.ucSpreadSpectrumRange = range; | 450 | args.v1.ucSpreadSpectrumRange = range; |
408 | args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 451 | args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
409 | args.v1.ucEnable = enable; | 452 | args.v1.ucEnable = ATOM_ENABLE; |
410 | } else { | 453 | } else { |
411 | args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | 454 | args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); |
412 | args.legacy.ucSpreadSpectrumType = type; | 455 | args.legacy.ucSpreadSpectrumType = type; |
413 | args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; | 456 | args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; |
414 | args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; | 457 | args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; |
415 | args.legacy.ucEnable = enable; | 458 | args.legacy.ucEnable = ATOM_ENABLE; |
416 | } | 459 | } |
417 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 460 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
418 | } | 461 | } |
@@ -478,11 +521,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
478 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
479 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
480 | adjusted_clock = mode->clock * 2; | 523 | adjusted_clock = mode->clock * 2; |
481 | /* LVDS PLL quirks */ | ||
482 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { | ||
483 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
484 | pll->algo = dig->pll_algo; | ||
485 | } | ||
486 | } else { | 524 | } else { |
487 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 525 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
488 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 526 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -503,8 +541,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
503 | int index; | 541 | int index; |
504 | 542 | ||
505 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); | 543 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); |
506 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 544 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
507 | &crev); | 545 | &crev)) |
546 | return adjusted_clock; | ||
508 | 547 | ||
509 | memset(&args, 0, sizeof(args)); | 548 | memset(&args, 0, sizeof(args)); |
510 | 549 | ||
@@ -542,11 +581,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
542 | } | 581 | } |
543 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 582 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
544 | /* may want to enable SS on DP/eDP eventually */ | 583 | /* may want to enable SS on DP/eDP eventually */ |
545 | args.v3.sInput.ucDispPllConfig |= | 584 | /*args.v3.sInput.ucDispPllConfig |= |
546 | DISPPLL_CONFIG_SS_ENABLE; | 585 | DISPPLL_CONFIG_SS_ENABLE;*/ |
547 | if (mode->clock > 165000) | 586 | if (encoder_mode == ATOM_ENCODER_MODE_DP) |
548 | args.v3.sInput.ucDispPllConfig |= | 587 | args.v3.sInput.ucDispPllConfig |= |
549 | DISPPLL_CONFIG_DUAL_LINK; | 588 | DISPPLL_CONFIG_COHERENT_MODE; |
589 | else { | ||
590 | if (mode->clock > 165000) | ||
591 | args.v3.sInput.ucDispPllConfig |= | ||
592 | DISPPLL_CONFIG_DUAL_LINK; | ||
593 | } | ||
550 | } | 594 | } |
551 | atom_execute_table(rdev->mode_info.atom_context, | 595 | atom_execute_table(rdev->mode_info.atom_context, |
552 | index, (uint32_t *)&args); | 596 | index, (uint32_t *)&args); |
@@ -592,8 +636,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) | |||
592 | memset(&args, 0, sizeof(args)); | 636 | memset(&args, 0, sizeof(args)); |
593 | 637 | ||
594 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | 638 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); |
595 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 639 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
596 | &crev); | 640 | &crev)) |
641 | return; | ||
597 | 642 | ||
598 | switch (frev) { | 643 | switch (frev) { |
599 | case 1: | 644 | case 1: |
@@ -667,8 +712,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
667 | &ref_div, &post_div); | 712 | &ref_div, &post_div); |
668 | 713 | ||
669 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | 714 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); |
670 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 715 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
671 | &crev); | 716 | &crev)) |
717 | return; | ||
672 | 718 | ||
673 | switch (frev) { | 719 | switch (frev) { |
674 | case 1: | 720 | case 1: |
@@ -1083,15 +1129,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1083 | 1129 | ||
1084 | /* TODO color tiling */ | 1130 | /* TODO color tiling */ |
1085 | 1131 | ||
1086 | /* pick pll */ | 1132 | atombios_disable_ss(crtc); |
1087 | radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); | ||
1088 | |||
1089 | atombios_set_ss(crtc, 0); | ||
1090 | /* always set DCPLL */ | 1133 | /* always set DCPLL */ |
1091 | if (ASIC_IS_DCE4(rdev)) | 1134 | if (ASIC_IS_DCE4(rdev)) |
1092 | atombios_crtc_set_dcpll(crtc); | 1135 | atombios_crtc_set_dcpll(crtc); |
1093 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1136 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1094 | atombios_set_ss(crtc, 1); | 1137 | atombios_enable_ss(crtc); |
1095 | 1138 | ||
1096 | if (ASIC_IS_DCE4(rdev)) | 1139 | if (ASIC_IS_DCE4(rdev)) |
1097 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1140 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
@@ -1120,6 +1163,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
1120 | 1163 | ||
1121 | static void atombios_crtc_prepare(struct drm_crtc *crtc) | 1164 | static void atombios_crtc_prepare(struct drm_crtc *crtc) |
1122 | { | 1165 | { |
1166 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
1167 | |||
1168 | /* pick pll */ | ||
1169 | radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); | ||
1170 | |||
1123 | atombios_lock_crtc(crtc, ATOM_ENABLE); | 1171 | atombios_lock_crtc(crtc, ATOM_ENABLE); |
1124 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | 1172 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); |
1125 | } | 1173 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 8a133bda00a2..28b31c64f48d 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder, | |||
745 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); | 745 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); |
746 | 746 | ||
747 | /* disable the training pattern on the sink */ | 747 | /* disable the training pattern on the sink */ |
748 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); | ||
749 | |||
750 | /* disable the training pattern on the source */ | ||
748 | if (ASIC_IS_DCE4(rdev)) | 751 | if (ASIC_IS_DCE4(rdev)) |
749 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); | 752 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); |
750 | else | 753 | else |
751 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, | 754 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, |
752 | dig_connector->dp_clock, enc_id, 0); | 755 | dig_connector->dp_clock, enc_id, 0); |
753 | |||
754 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, | ||
755 | dig_connector->dp_clock, enc_id, 0); | ||
756 | } | 756 | } |
757 | 757 | ||
758 | int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 758 | int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index bd2e7aa85c1d..647a0efdc353 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "radeon.h" | 27 | #include "radeon.h" |
28 | #include "radeon_asic.h" | ||
28 | #include "radeon_drm.h" | 29 | #include "radeon_drm.h" |
29 | #include "rv770d.h" | 30 | #include "rv770d.h" |
30 | #include "atom.h" | 31 | #include "atom.h" |
@@ -436,7 +437,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
436 | 437 | ||
437 | int evergreen_mc_init(struct radeon_device *rdev) | 438 | int evergreen_mc_init(struct radeon_device *rdev) |
438 | { | 439 | { |
439 | fixed20_12 a; | ||
440 | u32 tmp; | 440 | u32 tmp; |
441 | int chansize, numchan; | 441 | int chansize, numchan; |
442 | 442 | ||
@@ -481,12 +481,8 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
481 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 481 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
482 | } | 482 | } |
483 | r600_vram_gtt_location(rdev, &rdev->mc); | 483 | r600_vram_gtt_location(rdev, &rdev->mc); |
484 | /* FIXME: we should enforce default clock in case GPU is not in | 484 | radeon_update_bandwidth_info(rdev); |
485 | * default setup | 485 | |
486 | */ | ||
487 | a.full = rfixed_const(100); | ||
488 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
489 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
490 | return 0; | 486 | return 0; |
491 | } | 487 | } |
492 | 488 | ||
@@ -746,6 +742,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
746 | 742 | ||
747 | void evergreen_fini(struct radeon_device *rdev) | 743 | void evergreen_fini(struct radeon_device *rdev) |
748 | { | 744 | { |
745 | radeon_pm_fini(rdev); | ||
749 | evergreen_suspend(rdev); | 746 | evergreen_suspend(rdev); |
750 | #if 0 | 747 | #if 0 |
751 | r600_blit_fini(rdev); | 748 | r600_blit_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 91eb762eb3f9..3ae51ada1abf 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "radeon_drm.h" | 31 | #include "radeon_drm.h" |
32 | #include "radeon_reg.h" | 32 | #include "radeon_reg.h" |
33 | #include "radeon.h" | 33 | #include "radeon.h" |
34 | #include "radeon_asic.h" | ||
34 | #include "r100d.h" | 35 | #include "r100d.h" |
35 | #include "rs100d.h" | 36 | #include "rs100d.h" |
36 | #include "rv200d.h" | 37 | #include "rv200d.h" |
@@ -235,9 +236,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
235 | 236 | ||
236 | void r100_pci_gart_fini(struct radeon_device *rdev) | 237 | void r100_pci_gart_fini(struct radeon_device *rdev) |
237 | { | 238 | { |
239 | radeon_gart_fini(rdev); | ||
238 | r100_pci_gart_disable(rdev); | 240 | r100_pci_gart_disable(rdev); |
239 | radeon_gart_table_ram_free(rdev); | 241 | radeon_gart_table_ram_free(rdev); |
240 | radeon_gart_fini(rdev); | ||
241 | } | 242 | } |
242 | 243 | ||
243 | int r100_irq_set(struct radeon_device *rdev) | 244 | int r100_irq_set(struct radeon_device *rdev) |
@@ -312,10 +313,12 @@ int r100_irq_process(struct radeon_device *rdev) | |||
312 | /* Vertical blank interrupts */ | 313 | /* Vertical blank interrupts */ |
313 | if (status & RADEON_CRTC_VBLANK_STAT) { | 314 | if (status & RADEON_CRTC_VBLANK_STAT) { |
314 | drm_handle_vblank(rdev->ddev, 0); | 315 | drm_handle_vblank(rdev->ddev, 0); |
316 | rdev->pm.vblank_sync = true; | ||
315 | wake_up(&rdev->irq.vblank_queue); | 317 | wake_up(&rdev->irq.vblank_queue); |
316 | } | 318 | } |
317 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 319 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
318 | drm_handle_vblank(rdev->ddev, 1); | 320 | drm_handle_vblank(rdev->ddev, 1); |
321 | rdev->pm.vblank_sync = true; | ||
319 | wake_up(&rdev->irq.vblank_queue); | 322 | wake_up(&rdev->irq.vblank_queue); |
320 | } | 323 | } |
321 | if (status & RADEON_FP_DETECT_STAT) { | 324 | if (status & RADEON_FP_DETECT_STAT) { |
@@ -741,6 +744,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
741 | udelay(10); | 744 | udelay(10); |
742 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 745 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
743 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); | 746 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); |
747 | /* protect against crazy HW on resume */ | ||
748 | rdev->cp.wptr &= rdev->cp.ptr_mask; | ||
744 | /* Set cp mode to bus mastering & enable cp*/ | 749 | /* Set cp mode to bus mastering & enable cp*/ |
745 | WREG32(RADEON_CP_CSQ_MODE, | 750 | WREG32(RADEON_CP_CSQ_MODE, |
746 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 751 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
@@ -1804,6 +1809,7 @@ void r100_set_common_regs(struct radeon_device *rdev) | |||
1804 | { | 1809 | { |
1805 | struct drm_device *dev = rdev->ddev; | 1810 | struct drm_device *dev = rdev->ddev; |
1806 | bool force_dac2 = false; | 1811 | bool force_dac2 = false; |
1812 | u32 tmp; | ||
1807 | 1813 | ||
1808 | /* set these so they don't interfere with anything */ | 1814 | /* set these so they don't interfere with anything */ |
1809 | WREG32(RADEON_OV0_SCALE_CNTL, 0); | 1815 | WREG32(RADEON_OV0_SCALE_CNTL, 0); |
@@ -1875,6 +1881,12 @@ void r100_set_common_regs(struct radeon_device *rdev) | |||
1875 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | 1881 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); |
1876 | WREG32(RADEON_DAC_CNTL2, dac2_cntl); | 1882 | WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
1877 | } | 1883 | } |
1884 | |||
1885 | /* switch PM block to ACPI mode */ | ||
1886 | tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); | ||
1887 | tmp &= ~RADEON_PM_MODE_SEL; | ||
1888 | WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); | ||
1889 | |||
1878 | } | 1890 | } |
1879 | 1891 | ||
1880 | /* | 1892 | /* |
@@ -2022,6 +2034,7 @@ void r100_mc_init(struct radeon_device *rdev) | |||
2022 | radeon_vram_location(rdev, &rdev->mc, base); | 2034 | radeon_vram_location(rdev, &rdev->mc, base); |
2023 | if (!(rdev->flags & RADEON_IS_AGP)) | 2035 | if (!(rdev->flags & RADEON_IS_AGP)) |
2024 | radeon_gtt_location(rdev, &rdev->mc); | 2036 | radeon_gtt_location(rdev, &rdev->mc); |
2037 | radeon_update_bandwidth_info(rdev); | ||
2025 | } | 2038 | } |
2026 | 2039 | ||
2027 | 2040 | ||
@@ -2385,6 +2398,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2385 | uint32_t pixel_bytes1 = 0; | 2398 | uint32_t pixel_bytes1 = 0; |
2386 | uint32_t pixel_bytes2 = 0; | 2399 | uint32_t pixel_bytes2 = 0; |
2387 | 2400 | ||
2401 | radeon_update_display_priority(rdev); | ||
2402 | |||
2388 | if (rdev->mode_info.crtcs[0]->base.enabled) { | 2403 | if (rdev->mode_info.crtcs[0]->base.enabled) { |
2389 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | 2404 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
2390 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | 2405 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
@@ -2413,11 +2428,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2413 | /* | 2428 | /* |
2414 | * determine is there is enough bw for current mode | 2429 | * determine is there is enough bw for current mode |
2415 | */ | 2430 | */ |
2416 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | 2431 | sclk_ff = rdev->pm.sclk; |
2417 | temp_ff.full = rfixed_const(100); | 2432 | mclk_ff = rdev->pm.mclk; |
2418 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
2419 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
2420 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
2421 | 2433 | ||
2422 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | 2434 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
2423 | temp_ff.full = rfixed_const(temp); | 2435 | temp_ff.full = rfixed_const(temp); |
@@ -3440,6 +3452,7 @@ int r100_suspend(struct radeon_device *rdev) | |||
3440 | 3452 | ||
3441 | void r100_fini(struct radeon_device *rdev) | 3453 | void r100_fini(struct radeon_device *rdev) |
3442 | { | 3454 | { |
3455 | radeon_pm_fini(rdev); | ||
3443 | r100_cp_fini(rdev); | 3456 | r100_cp_fini(rdev); |
3444 | r100_wb_fini(rdev); | 3457 | r100_wb_fini(rdev); |
3445 | r100_ib_fini(rdev); | 3458 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 1146c9909c2c..85617c311212 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "radeon_drm.h" | 30 | #include "radeon_drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_asic.h" | ||
33 | 34 | ||
34 | #include "r100d.h" | 35 | #include "r100d.h" |
35 | #include "r200_reg_safe.h" | 36 | #include "r200_reg_safe.h" |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 4cef90cd74e5..1023eeb65872 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_asic.h" | ||
33 | #include "radeon_drm.h" | 34 | #include "radeon_drm.h" |
34 | #include "r100_track.h" | 35 | #include "r100_track.h" |
35 | #include "r300d.h" | 36 | #include "r300d.h" |
@@ -164,9 +165,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) | |||
164 | 165 | ||
165 | void rv370_pcie_gart_fini(struct radeon_device *rdev) | 166 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
166 | { | 167 | { |
168 | radeon_gart_fini(rdev); | ||
167 | rv370_pcie_gart_disable(rdev); | 169 | rv370_pcie_gart_disable(rdev); |
168 | radeon_gart_table_vram_free(rdev); | 170 | radeon_gart_table_vram_free(rdev); |
169 | radeon_gart_fini(rdev); | ||
170 | } | 171 | } |
171 | 172 | ||
172 | void r300_fence_ring_emit(struct radeon_device *rdev, | 173 | void r300_fence_ring_emit(struct radeon_device *rdev, |
@@ -481,6 +482,7 @@ void r300_mc_init(struct radeon_device *rdev) | |||
481 | radeon_vram_location(rdev, &rdev->mc, base); | 482 | radeon_vram_location(rdev, &rdev->mc, base); |
482 | if (!(rdev->flags & RADEON_IS_AGP)) | 483 | if (!(rdev->flags & RADEON_IS_AGP)) |
483 | radeon_gtt_location(rdev, &rdev->mc); | 484 | radeon_gtt_location(rdev, &rdev->mc); |
485 | radeon_update_bandwidth_info(rdev); | ||
484 | } | 486 | } |
485 | 487 | ||
486 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | 488 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
@@ -1334,6 +1336,7 @@ int r300_suspend(struct radeon_device *rdev) | |||
1334 | 1336 | ||
1335 | void r300_fini(struct radeon_device *rdev) | 1337 | void r300_fini(struct radeon_device *rdev) |
1336 | { | 1338 | { |
1339 | radeon_pm_fini(rdev); | ||
1337 | r100_cp_fini(rdev); | 1340 | r100_cp_fini(rdev); |
1338 | r100_wb_fini(rdev); | 1341 | r100_wb_fini(rdev); |
1339 | r100_ib_fini(rdev); | 1342 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index c7593b8f58ee..0b8603ca6974 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "atom.h" | 33 | #include "atom.h" |
33 | #include "r100d.h" | 34 | #include "r100d.h" |
34 | #include "r420d.h" | 35 | #include "r420d.h" |
@@ -266,6 +267,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
266 | 267 | ||
267 | void r420_fini(struct radeon_device *rdev) | 268 | void r420_fini(struct radeon_device *rdev) |
268 | { | 269 | { |
270 | radeon_pm_fini(rdev); | ||
269 | r100_cp_fini(rdev); | 271 | r100_cp_fini(rdev); |
270 | r100_wb_fini(rdev); | 272 | r100_wb_fini(rdev); |
271 | r100_ib_fini(rdev); | 273 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 2b8a5dd13516..3c44b8d39318 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "atom.h" | 31 | #include "atom.h" |
31 | #include "r520d.h" | 32 | #include "r520d.h" |
32 | 33 | ||
@@ -121,19 +122,13 @@ static void r520_vram_get_type(struct radeon_device *rdev) | |||
121 | 122 | ||
122 | void r520_mc_init(struct radeon_device *rdev) | 123 | void r520_mc_init(struct radeon_device *rdev) |
123 | { | 124 | { |
124 | fixed20_12 a; | ||
125 | 125 | ||
126 | r520_vram_get_type(rdev); | 126 | r520_vram_get_type(rdev); |
127 | r100_vram_init_sizes(rdev); | 127 | r100_vram_init_sizes(rdev); |
128 | radeon_vram_location(rdev, &rdev->mc, 0); | 128 | radeon_vram_location(rdev, &rdev->mc, 0); |
129 | if (!(rdev->flags & RADEON_IS_AGP)) | 129 | if (!(rdev->flags & RADEON_IS_AGP)) |
130 | radeon_gtt_location(rdev, &rdev->mc); | 130 | radeon_gtt_location(rdev, &rdev->mc); |
131 | /* FIXME: we should enforce default clock in case GPU is not in | 131 | radeon_update_bandwidth_info(rdev); |
132 | * default setup | ||
133 | */ | ||
134 | a.full = rfixed_const(100); | ||
135 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
136 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
137 | } | 132 | } |
138 | 133 | ||
139 | void r520_mc_program(struct radeon_device *rdev) | 134 | void r520_mc_program(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c52290197292..5509354c7c89 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "radeon_drm.h" | 32 | #include "radeon_drm.h" |
33 | #include "radeon.h" | 33 | #include "radeon.h" |
34 | #include "radeon_asic.h" | ||
34 | #include "radeon_mode.h" | 35 | #include "radeon_mode.h" |
35 | #include "r600d.h" | 36 | #include "r600d.h" |
36 | #include "atom.h" | 37 | #include "atom.h" |
@@ -491,9 +492,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) | |||
491 | 492 | ||
492 | void r600_pcie_gart_fini(struct radeon_device *rdev) | 493 | void r600_pcie_gart_fini(struct radeon_device *rdev) |
493 | { | 494 | { |
495 | radeon_gart_fini(rdev); | ||
494 | r600_pcie_gart_disable(rdev); | 496 | r600_pcie_gart_disable(rdev); |
495 | radeon_gart_table_vram_free(rdev); | 497 | radeon_gart_table_vram_free(rdev); |
496 | radeon_gart_fini(rdev); | ||
497 | } | 498 | } |
498 | 499 | ||
499 | void r600_agp_enable(struct radeon_device *rdev) | 500 | void r600_agp_enable(struct radeon_device *rdev) |
@@ -675,7 +676,6 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
675 | 676 | ||
676 | int r600_mc_init(struct radeon_device *rdev) | 677 | int r600_mc_init(struct radeon_device *rdev) |
677 | { | 678 | { |
678 | fixed20_12 a; | ||
679 | u32 tmp; | 679 | u32 tmp; |
680 | int chansize, numchan; | 680 | int chansize, numchan; |
681 | 681 | ||
@@ -719,14 +719,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
719 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 719 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
720 | } | 720 | } |
721 | r600_vram_gtt_location(rdev, &rdev->mc); | 721 | r600_vram_gtt_location(rdev, &rdev->mc); |
722 | /* FIXME: we should enforce default clock in case GPU is not in | 722 | |
723 | * default setup | ||
724 | */ | ||
725 | a.full = rfixed_const(100); | ||
726 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
727 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
728 | if (rdev->flags & RADEON_IS_IGP) | 723 | if (rdev->flags & RADEON_IS_IGP) |
729 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 724 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
725 | radeon_update_bandwidth_info(rdev); | ||
730 | return 0; | 726 | return 0; |
731 | } | 727 | } |
732 | 728 | ||
@@ -1132,6 +1128,7 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1132 | /* Setup pipes */ | 1128 | /* Setup pipes */ |
1133 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1129 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
1134 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 1130 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
1131 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
1135 | 1132 | ||
1136 | tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); | 1133 | tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
1137 | WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); | 1134 | WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); |
@@ -2119,6 +2116,7 @@ int r600_init(struct radeon_device *rdev) | |||
2119 | 2116 | ||
2120 | void r600_fini(struct radeon_device *rdev) | 2117 | void r600_fini(struct radeon_device *rdev) |
2121 | { | 2118 | { |
2119 | radeon_pm_fini(rdev); | ||
2122 | r600_audio_fini(rdev); | 2120 | r600_audio_fini(rdev); |
2123 | r600_blit_fini(rdev); | 2121 | r600_blit_fini(rdev); |
2124 | r600_cp_fini(rdev); | 2122 | r600_cp_fini(rdev); |
@@ -2398,19 +2396,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) | |||
2398 | WREG32(DC_HPD4_INT_CONTROL, tmp); | 2396 | WREG32(DC_HPD4_INT_CONTROL, tmp); |
2399 | if (ASIC_IS_DCE32(rdev)) { | 2397 | if (ASIC_IS_DCE32(rdev)) { |
2400 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; | 2398 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
2401 | WREG32(DC_HPD5_INT_CONTROL, 0); | 2399 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
2402 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; | 2400 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
2403 | WREG32(DC_HPD6_INT_CONTROL, 0); | 2401 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
2404 | } | 2402 | } |
2405 | } else { | 2403 | } else { |
2406 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | 2404 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); |
2407 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | 2405 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); |
2408 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | 2406 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
2409 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0); | 2407 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
2410 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | 2408 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
2411 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0); | 2409 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
2412 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | 2410 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
2413 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0); | 2411 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); |
2414 | } | 2412 | } |
2415 | } | 2413 | } |
2416 | 2414 | ||
@@ -2765,6 +2763,7 @@ restart_ih: | |||
2765 | case 0: /* D1 vblank */ | 2763 | case 0: /* D1 vblank */ |
2766 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2764 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { |
2767 | drm_handle_vblank(rdev->ddev, 0); | 2765 | drm_handle_vblank(rdev->ddev, 0); |
2766 | rdev->pm.vblank_sync = true; | ||
2768 | wake_up(&rdev->irq.vblank_queue); | 2767 | wake_up(&rdev->irq.vblank_queue); |
2769 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2768 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
2770 | DRM_DEBUG("IH: D1 vblank\n"); | 2769 | DRM_DEBUG("IH: D1 vblank\n"); |
@@ -2786,6 +2785,7 @@ restart_ih: | |||
2786 | case 0: /* D2 vblank */ | 2785 | case 0: /* D2 vblank */ |
2787 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { | 2786 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { |
2788 | drm_handle_vblank(rdev->ddev, 1); | 2787 | drm_handle_vblank(rdev->ddev, 1); |
2788 | rdev->pm.vblank_sync = true; | ||
2789 | wake_up(&rdev->irq.vblank_queue); | 2789 | wake_up(&rdev->irq.vblank_queue); |
2790 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; | 2790 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; |
2791 | DRM_DEBUG("IH: D2 vblank\n"); | 2791 | DRM_DEBUG("IH: D2 vblank\n"); |
@@ -2834,14 +2834,14 @@ restart_ih: | |||
2834 | break; | 2834 | break; |
2835 | case 10: | 2835 | case 10: |
2836 | if (disp_int_cont2 & DC_HPD5_INTERRUPT) { | 2836 | if (disp_int_cont2 & DC_HPD5_INTERRUPT) { |
2837 | disp_int_cont &= ~DC_HPD5_INTERRUPT; | 2837 | disp_int_cont2 &= ~DC_HPD5_INTERRUPT; |
2838 | queue_hotplug = true; | 2838 | queue_hotplug = true; |
2839 | DRM_DEBUG("IH: HPD5\n"); | 2839 | DRM_DEBUG("IH: HPD5\n"); |
2840 | } | 2840 | } |
2841 | break; | 2841 | break; |
2842 | case 12: | 2842 | case 12: |
2843 | if (disp_int_cont2 & DC_HPD6_INTERRUPT) { | 2843 | if (disp_int_cont2 & DC_HPD6_INTERRUPT) { |
2844 | disp_int_cont &= ~DC_HPD6_INTERRUPT; | 2844 | disp_int_cont2 &= ~DC_HPD6_INTERRUPT; |
2845 | queue_hotplug = true; | 2845 | queue_hotplug = true; |
2846 | DRM_DEBUG("IH: HPD6\n"); | 2846 | DRM_DEBUG("IH: HPD6\n"); |
2847 | } | 2847 | } |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index db928016d034..dac7042b797e 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -182,41 +182,6 @@ int r600_audio_init(struct radeon_device *rdev) | |||
182 | } | 182 | } |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * determin how the encoders and audio interface is wired together | ||
186 | */ | ||
187 | int r600_audio_tmds_index(struct drm_encoder *encoder) | ||
188 | { | ||
189 | struct drm_device *dev = encoder->dev; | ||
190 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
191 | struct drm_encoder *other; | ||
192 | |||
193 | switch (radeon_encoder->encoder_id) { | ||
194 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
195 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
196 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
197 | return 0; | ||
198 | |||
199 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
200 | /* special case check if an TMDS1 is present */ | ||
201 | list_for_each_entry(other, &dev->mode_config.encoder_list, head) { | ||
202 | if (to_radeon_encoder(other)->encoder_id == | ||
203 | ENCODER_OBJECT_ID_INTERNAL_TMDS1) | ||
204 | return 1; | ||
205 | } | ||
206 | return 0; | ||
207 | |||
208 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
209 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
210 | return 1; | ||
211 | |||
212 | default: | ||
213 | DRM_ERROR("Unsupported encoder type 0x%02X\n", | ||
214 | radeon_encoder->encoder_id); | ||
215 | return -1; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * atach the audio codec to the clock source of the encoder | 185 | * atach the audio codec to the clock source of the encoder |
221 | */ | 186 | */ |
222 | void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | 187 | void r600_audio_set_clock(struct drm_encoder *encoder, int clock) |
@@ -224,6 +189,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
224 | struct drm_device *dev = encoder->dev; | 189 | struct drm_device *dev = encoder->dev; |
225 | struct radeon_device *rdev = dev->dev_private; | 190 | struct radeon_device *rdev = dev->dev_private; |
226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 191 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
192 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
227 | int base_rate = 48000; | 193 | int base_rate = 48000; |
228 | 194 | ||
229 | switch (radeon_encoder->encoder_id) { | 195 | switch (radeon_encoder->encoder_id) { |
@@ -231,32 +197,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
231 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | 197 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
232 | WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); | 198 | WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); |
233 | break; | 199 | break; |
234 | |||
235 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 200 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
236 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 201 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
237 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 202 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
238 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 203 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
239 | WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); | 204 | WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); |
240 | break; | 205 | break; |
241 | |||
242 | default: | 206 | default: |
243 | DRM_ERROR("Unsupported encoder type 0x%02X\n", | 207 | DRM_ERROR("Unsupported encoder type 0x%02X\n", |
244 | radeon_encoder->encoder_id); | 208 | radeon_encoder->encoder_id); |
245 | return; | 209 | return; |
246 | } | 210 | } |
247 | 211 | ||
248 | switch (r600_audio_tmds_index(encoder)) { | 212 | switch (dig->dig_encoder) { |
249 | case 0: | 213 | case 0: |
250 | WREG32(R600_AUDIO_PLL1_MUL, base_rate*50); | 214 | WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50); |
251 | WREG32(R600_AUDIO_PLL1_DIV, clock*100); | 215 | WREG32(R600_AUDIO_PLL1_DIV, clock * 100); |
252 | WREG32(R600_AUDIO_CLK_SRCSEL, 0); | 216 | WREG32(R600_AUDIO_CLK_SRCSEL, 0); |
253 | break; | 217 | break; |
254 | 218 | ||
255 | case 1: | 219 | case 1: |
256 | WREG32(R600_AUDIO_PLL2_MUL, base_rate*50); | 220 | WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50); |
257 | WREG32(R600_AUDIO_PLL2_DIV, clock*100); | 221 | WREG32(R600_AUDIO_PLL2_DIV, clock * 100); |
258 | WREG32(R600_AUDIO_CLK_SRCSEL, 1); | 222 | WREG32(R600_AUDIO_CLK_SRCSEL, 1); |
259 | break; | 223 | break; |
224 | default: | ||
225 | dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n", | ||
226 | radeon_encoder->encoder_id); | ||
227 | return; | ||
260 | } | 228 | } |
261 | } | 229 | } |
262 | 230 | ||
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index a112c59f9d82..0271b53fa2dd 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c | |||
@@ -1,7 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Alex Deucher <alexander.deucher@amd.com> | ||
25 | */ | ||
1 | 26 | ||
2 | #include <linux/types.h> | 27 | #include <linux/types.h> |
3 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
4 | 29 | ||
30 | /* | ||
31 | * R6xx+ cards need to use the 3D engine to blit data which requires | ||
32 | * quite a bit of hw state setup. Rather than pull the whole 3D driver | ||
33 | * (which normally generates the 3D state) into the DRM, we opt to use | ||
34 | * statically generated state tables. The regsiter state and shaders | ||
35 | * were hand generated to support blitting functionality. See the 3D | ||
36 | * driver or documentation for descriptions of the registers and | ||
37 | * shader instructions. | ||
38 | */ | ||
39 | |||
5 | const u32 r6xx_default_state[] = | 40 | const u32 r6xx_default_state[] = |
6 | { | 41 | { |
7 | 0xc0002400, | 42 | 0xc0002400, |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 40416c068d9f..68e6f4349309 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -1548,10 +1548,13 @@ static void r700_gfx_init(struct drm_device *dev, | |||
1548 | 1548 | ||
1549 | RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1549 | RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
1550 | RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 1550 | RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
1551 | RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
1551 | 1552 | ||
1552 | RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1553 | RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
1553 | RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); | 1554 | RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); |
1554 | RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); | 1555 | RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); |
1556 | RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0); | ||
1557 | RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0); | ||
1555 | 1558 | ||
1556 | num_qd_pipes = | 1559 | num_qd_pipes = |
1557 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); | 1560 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index cd2c63bce501..c39c1bc13016 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -45,6 +45,7 @@ struct r600_cs_track { | |||
45 | u32 nbanks; | 45 | u32 nbanks; |
46 | u32 npipes; | 46 | u32 npipes; |
47 | /* value we track */ | 47 | /* value we track */ |
48 | u32 sq_config; | ||
48 | u32 nsamples; | 49 | u32 nsamples; |
49 | u32 cb_color_base_last[8]; | 50 | u32 cb_color_base_last[8]; |
50 | struct radeon_bo *cb_color_bo[8]; | 51 | struct radeon_bo *cb_color_bo[8]; |
@@ -141,6 +142,8 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
141 | { | 142 | { |
142 | int i; | 143 | int i; |
143 | 144 | ||
145 | /* assume DX9 mode */ | ||
146 | track->sq_config = DX9_CONSTS; | ||
144 | for (i = 0; i < 8; i++) { | 147 | for (i = 0; i < 8; i++) { |
145 | track->cb_color_base_last[i] = 0; | 148 | track->cb_color_base_last[i] = 0; |
146 | track->cb_color_size[i] = 0; | 149 | track->cb_color_size[i] = 0; |
@@ -715,6 +718,9 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
715 | tmp =radeon_get_ib_value(p, idx); | 718 | tmp =radeon_get_ib_value(p, idx); |
716 | ib[idx] = 0; | 719 | ib[idx] = 0; |
717 | break; | 720 | break; |
721 | case SQ_CONFIG: | ||
722 | track->sq_config = radeon_get_ib_value(p, idx); | ||
723 | break; | ||
718 | case R_028800_DB_DEPTH_CONTROL: | 724 | case R_028800_DB_DEPTH_CONTROL: |
719 | track->db_depth_control = radeon_get_ib_value(p, idx); | 725 | track->db_depth_control = radeon_get_ib_value(p, idx); |
720 | break; | 726 | break; |
@@ -869,6 +875,54 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
869 | case SQ_PGM_START_VS: | 875 | case SQ_PGM_START_VS: |
870 | case SQ_PGM_START_GS: | 876 | case SQ_PGM_START_GS: |
871 | case SQ_PGM_START_PS: | 877 | case SQ_PGM_START_PS: |
878 | case SQ_ALU_CONST_CACHE_GS_0: | ||
879 | case SQ_ALU_CONST_CACHE_GS_1: | ||
880 | case SQ_ALU_CONST_CACHE_GS_2: | ||
881 | case SQ_ALU_CONST_CACHE_GS_3: | ||
882 | case SQ_ALU_CONST_CACHE_GS_4: | ||
883 | case SQ_ALU_CONST_CACHE_GS_5: | ||
884 | case SQ_ALU_CONST_CACHE_GS_6: | ||
885 | case SQ_ALU_CONST_CACHE_GS_7: | ||
886 | case SQ_ALU_CONST_CACHE_GS_8: | ||
887 | case SQ_ALU_CONST_CACHE_GS_9: | ||
888 | case SQ_ALU_CONST_CACHE_GS_10: | ||
889 | case SQ_ALU_CONST_CACHE_GS_11: | ||
890 | case SQ_ALU_CONST_CACHE_GS_12: | ||
891 | case SQ_ALU_CONST_CACHE_GS_13: | ||
892 | case SQ_ALU_CONST_CACHE_GS_14: | ||
893 | case SQ_ALU_CONST_CACHE_GS_15: | ||
894 | case SQ_ALU_CONST_CACHE_PS_0: | ||
895 | case SQ_ALU_CONST_CACHE_PS_1: | ||
896 | case SQ_ALU_CONST_CACHE_PS_2: | ||
897 | case SQ_ALU_CONST_CACHE_PS_3: | ||
898 | case SQ_ALU_CONST_CACHE_PS_4: | ||
899 | case SQ_ALU_CONST_CACHE_PS_5: | ||
900 | case SQ_ALU_CONST_CACHE_PS_6: | ||
901 | case SQ_ALU_CONST_CACHE_PS_7: | ||
902 | case SQ_ALU_CONST_CACHE_PS_8: | ||
903 | case SQ_ALU_CONST_CACHE_PS_9: | ||
904 | case SQ_ALU_CONST_CACHE_PS_10: | ||
905 | case SQ_ALU_CONST_CACHE_PS_11: | ||
906 | case SQ_ALU_CONST_CACHE_PS_12: | ||
907 | case SQ_ALU_CONST_CACHE_PS_13: | ||
908 | case SQ_ALU_CONST_CACHE_PS_14: | ||
909 | case SQ_ALU_CONST_CACHE_PS_15: | ||
910 | case SQ_ALU_CONST_CACHE_VS_0: | ||
911 | case SQ_ALU_CONST_CACHE_VS_1: | ||
912 | case SQ_ALU_CONST_CACHE_VS_2: | ||
913 | case SQ_ALU_CONST_CACHE_VS_3: | ||
914 | case SQ_ALU_CONST_CACHE_VS_4: | ||
915 | case SQ_ALU_CONST_CACHE_VS_5: | ||
916 | case SQ_ALU_CONST_CACHE_VS_6: | ||
917 | case SQ_ALU_CONST_CACHE_VS_7: | ||
918 | case SQ_ALU_CONST_CACHE_VS_8: | ||
919 | case SQ_ALU_CONST_CACHE_VS_9: | ||
920 | case SQ_ALU_CONST_CACHE_VS_10: | ||
921 | case SQ_ALU_CONST_CACHE_VS_11: | ||
922 | case SQ_ALU_CONST_CACHE_VS_12: | ||
923 | case SQ_ALU_CONST_CACHE_VS_13: | ||
924 | case SQ_ALU_CONST_CACHE_VS_14: | ||
925 | case SQ_ALU_CONST_CACHE_VS_15: | ||
872 | r = r600_cs_packet_next_reloc(p, &reloc); | 926 | r = r600_cs_packet_next_reloc(p, &reloc); |
873 | if (r) { | 927 | if (r) { |
874 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | 928 | dev_warn(p->dev, "bad SET_CONTEXT_REG " |
@@ -1226,13 +1280,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1226 | } | 1280 | } |
1227 | break; | 1281 | break; |
1228 | case PACKET3_SET_ALU_CONST: | 1282 | case PACKET3_SET_ALU_CONST: |
1229 | start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; | 1283 | if (track->sq_config & DX9_CONSTS) { |
1230 | end_reg = 4 * pkt->count + start_reg - 4; | 1284 | start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; |
1231 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || | 1285 | end_reg = 4 * pkt->count + start_reg - 4; |
1232 | (start_reg >= PACKET3_SET_ALU_CONST_END) || | 1286 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || |
1233 | (end_reg >= PACKET3_SET_ALU_CONST_END)) { | 1287 | (start_reg >= PACKET3_SET_ALU_CONST_END) || |
1234 | DRM_ERROR("bad SET_ALU_CONST\n"); | 1288 | (end_reg >= PACKET3_SET_ALU_CONST_END)) { |
1235 | return -EINVAL; | 1289 | DRM_ERROR("bad SET_ALU_CONST\n"); |
1290 | return -EINVAL; | ||
1291 | } | ||
1236 | } | 1292 | } |
1237 | break; | 1293 | break; |
1238 | case PACKET3_SET_BOOL_CONST: | 1294 | case PACKET3_SET_BOOL_CONST: |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index fcc949df0e5d..029fa1406d1d 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -42,13 +42,13 @@ enum r600_hdmi_color_format { | |||
42 | */ | 42 | */ |
43 | enum r600_hdmi_iec_status_bits { | 43 | enum r600_hdmi_iec_status_bits { |
44 | AUDIO_STATUS_DIG_ENABLE = 0x01, | 44 | AUDIO_STATUS_DIG_ENABLE = 0x01, |
45 | AUDIO_STATUS_V = 0x02, | 45 | AUDIO_STATUS_V = 0x02, |
46 | AUDIO_STATUS_VCFG = 0x04, | 46 | AUDIO_STATUS_VCFG = 0x04, |
47 | AUDIO_STATUS_EMPHASIS = 0x08, | 47 | AUDIO_STATUS_EMPHASIS = 0x08, |
48 | AUDIO_STATUS_COPYRIGHT = 0x10, | 48 | AUDIO_STATUS_COPYRIGHT = 0x10, |
49 | AUDIO_STATUS_NONAUDIO = 0x20, | 49 | AUDIO_STATUS_NONAUDIO = 0x20, |
50 | AUDIO_STATUS_PROFESSIONAL = 0x40, | 50 | AUDIO_STATUS_PROFESSIONAL = 0x40, |
51 | AUDIO_STATUS_LEVEL = 0x80 | 51 | AUDIO_STATUS_LEVEL = 0x80 |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct { | 54 | struct { |
@@ -85,7 +85,7 @@ struct { | |||
85 | static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) | 85 | static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) |
86 | { | 86 | { |
87 | if (*CTS == 0) | 87 | if (*CTS == 0) |
88 | *CTS = clock*N/(128*freq)*1000; | 88 | *CTS = clock * N / (128 * freq) * 1000; |
89 | DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", | 89 | DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", |
90 | N, *CTS, freq); | 90 | N, *CTS, freq); |
91 | } | 91 | } |
@@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType, | |||
131 | uint8_t length, | 131 | uint8_t length, |
132 | uint8_t *frame) | 132 | uint8_t *frame) |
133 | { | 133 | { |
134 | int i; | 134 | int i; |
135 | frame[0] = packetType + versionNumber + length; | 135 | frame[0] = packetType + versionNumber + length; |
136 | for (i = 1; i <= length; i++) | 136 | for (i = 1; i <= length; i++) |
137 | frame[0] += frame[i]; | 137 | frame[0] += frame[i]; |
138 | frame[0] = 0x100 - frame[0]; | 138 | frame[0] = 0x100 - frame[0]; |
139 | } | 139 | } |
140 | 140 | ||
141 | /* | 141 | /* |
@@ -417,90 +417,141 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, | |||
417 | WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); | 417 | WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); |
418 | } | 418 | } |
419 | 419 | ||
420 | /* | 420 | static int r600_hdmi_find_free_block(struct drm_device *dev) |
421 | * enable/disable the HDMI engine | 421 | { |
422 | */ | 422 | struct radeon_device *rdev = dev->dev_private; |
423 | void r600_hdmi_enable(struct drm_encoder *encoder, int enable) | 423 | struct drm_encoder *encoder; |
424 | struct radeon_encoder *radeon_encoder; | ||
425 | bool free_blocks[3] = { true, true, true }; | ||
426 | |||
427 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
428 | radeon_encoder = to_radeon_encoder(encoder); | ||
429 | switch (radeon_encoder->hdmi_offset) { | ||
430 | case R600_HDMI_BLOCK1: | ||
431 | free_blocks[0] = false; | ||
432 | break; | ||
433 | case R600_HDMI_BLOCK2: | ||
434 | free_blocks[1] = false; | ||
435 | break; | ||
436 | case R600_HDMI_BLOCK3: | ||
437 | free_blocks[2] = false; | ||
438 | break; | ||
439 | } | ||
440 | } | ||
441 | |||
442 | if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) { | ||
443 | return free_blocks[0] ? R600_HDMI_BLOCK1 : 0; | ||
444 | } else if (rdev->family >= CHIP_R600) { | ||
445 | if (free_blocks[0]) | ||
446 | return R600_HDMI_BLOCK1; | ||
447 | else if (free_blocks[1]) | ||
448 | return R600_HDMI_BLOCK2; | ||
449 | } | ||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | static void r600_hdmi_assign_block(struct drm_encoder *encoder) | ||
424 | { | 454 | { |
425 | struct drm_device *dev = encoder->dev; | 455 | struct drm_device *dev = encoder->dev; |
426 | struct radeon_device *rdev = dev->dev_private; | 456 | struct radeon_device *rdev = dev->dev_private; |
427 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 457 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
428 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; | 458 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
429 | 459 | ||
430 | if (!offset) | 460 | if (!dig) { |
461 | dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n"); | ||
431 | return; | 462 | return; |
463 | } | ||
432 | 464 | ||
433 | DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset); | 465 | if (ASIC_IS_DCE4(rdev)) { |
434 | 466 | /* TODO */ | |
435 | /* some version of atombios ignore the enable HDMI flag | 467 | } else if (ASIC_IS_DCE3(rdev)) { |
436 | * so enabling/disabling HDMI was moved here for TMDS1+2 */ | 468 | radeon_encoder->hdmi_offset = dig->dig_encoder ? |
437 | switch (radeon_encoder->encoder_id) { | 469 | R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1; |
438 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 470 | if (ASIC_IS_DCE32(rdev)) |
439 | WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4); | 471 | radeon_encoder->hdmi_config_offset = dig->dig_encoder ? |
440 | WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0); | 472 | R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1; |
441 | break; | 473 | } else if (rdev->family >= CHIP_R600) { |
442 | 474 | radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev); | |
443 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
444 | WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4); | ||
445 | WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0); | ||
446 | break; | ||
447 | |||
448 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
449 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
450 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
451 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
452 | /* This part is doubtfull in my opinion */ | ||
453 | WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0); | ||
454 | break; | ||
455 | |||
456 | default: | ||
457 | DRM_ERROR("unknown HDMI output type\n"); | ||
458 | break; | ||
459 | } | 475 | } |
460 | } | 476 | } |
461 | 477 | ||
462 | /* | 478 | /* |
463 | * determin at which register offset the HDMI encoder is | 479 | * enable the HDMI engine |
464 | */ | 480 | */ |
465 | void r600_hdmi_init(struct drm_encoder *encoder) | 481 | void r600_hdmi_enable(struct drm_encoder *encoder) |
466 | { | 482 | { |
483 | struct drm_device *dev = encoder->dev; | ||
484 | struct radeon_device *rdev = dev->dev_private; | ||
467 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 485 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
468 | 486 | ||
469 | switch (radeon_encoder->encoder_id) { | 487 | if (!radeon_encoder->hdmi_offset) { |
470 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 488 | r600_hdmi_assign_block(encoder); |
471 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 489 | if (!radeon_encoder->hdmi_offset) { |
472 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 490 | dev_warn(rdev->dev, "Could not find HDMI block for " |
473 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; | 491 | "0x%x encoder\n", radeon_encoder->encoder_id); |
474 | break; | 492 | return; |
475 | 493 | } | |
476 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | 494 | } |
477 | switch (r600_audio_tmds_index(encoder)) { | 495 | |
478 | case 0: | 496 | if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { |
479 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; | 497 | WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); |
498 | } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { | ||
499 | int offset = radeon_encoder->hdmi_offset; | ||
500 | switch (radeon_encoder->encoder_id) { | ||
501 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
502 | WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); | ||
503 | WREG32(offset + R600_HDMI_ENABLE, 0x101); | ||
480 | break; | 504 | break; |
481 | case 1: | 505 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
482 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; | 506 | WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4); |
507 | WREG32(offset + R600_HDMI_ENABLE, 0x105); | ||
483 | break; | 508 | break; |
484 | default: | 509 | default: |
485 | radeon_encoder->hdmi_offset = 0; | 510 | dev_err(rdev->dev, "Unknown HDMI output type\n"); |
486 | break; | 511 | break; |
487 | } | 512 | } |
488 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 513 | } |
489 | radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; | ||
490 | break; | ||
491 | 514 | ||
492 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 515 | DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
493 | radeon_encoder->hdmi_offset = R600_HDMI_DIG; | 516 | radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); |
494 | break; | 517 | } |
495 | 518 | ||
496 | default: | 519 | /* |
497 | radeon_encoder->hdmi_offset = 0; | 520 | * disable the HDMI engine |
498 | break; | 521 | */ |
522 | void r600_hdmi_disable(struct drm_encoder *encoder) | ||
523 | { | ||
524 | struct drm_device *dev = encoder->dev; | ||
525 | struct radeon_device *rdev = dev->dev_private; | ||
526 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
527 | |||
528 | if (!radeon_encoder->hdmi_offset) { | ||
529 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); | ||
530 | return; | ||
499 | } | 531 | } |
500 | 532 | ||
501 | DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n", | 533 | DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
502 | radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); | 534 | radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); |
535 | |||
536 | if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { | ||
537 | WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); | ||
538 | } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { | ||
539 | int offset = radeon_encoder->hdmi_offset; | ||
540 | switch (radeon_encoder->encoder_id) { | ||
541 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
542 | WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); | ||
543 | WREG32(offset + R600_HDMI_ENABLE, 0); | ||
544 | break; | ||
545 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
546 | WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4); | ||
547 | WREG32(offset + R600_HDMI_ENABLE, 0); | ||
548 | break; | ||
549 | default: | ||
550 | dev_err(rdev->dev, "Unknown HDMI output type\n"); | ||
551 | break; | ||
552 | } | ||
553 | } | ||
503 | 554 | ||
504 | /* TODO: make this configureable */ | 555 | radeon_encoder->hdmi_offset = 0; |
505 | radeon_encoder->hdmi_audio_workaround = 0; | 556 | radeon_encoder->hdmi_config_offset = 0; |
506 | } | 557 | } |
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d0e28ffdeda9..7b1d22370f6e 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h | |||
@@ -152,9 +152,9 @@ | |||
152 | #define R600_AUDIO_STATUS_BITS 0x73d8 | 152 | #define R600_AUDIO_STATUS_BITS 0x73d8 |
153 | 153 | ||
154 | /* HDMI base register addresses */ | 154 | /* HDMI base register addresses */ |
155 | #define R600_HDMI_TMDS1 0x7400 | 155 | #define R600_HDMI_BLOCK1 0x7400 |
156 | #define R600_HDMI_TMDS2 0x7700 | 156 | #define R600_HDMI_BLOCK2 0x7700 |
157 | #define R600_HDMI_DIG 0x7800 | 157 | #define R600_HDMI_BLOCK3 0x7800 |
158 | 158 | ||
159 | /* HDMI registers */ | 159 | /* HDMI registers */ |
160 | #define R600_HDMI_ENABLE 0x00 | 160 | #define R600_HDMI_ENABLE 0x00 |
@@ -185,4 +185,8 @@ | |||
185 | #define R600_HDMI_AUDIO_DEBUG_2 0xe8 | 185 | #define R600_HDMI_AUDIO_DEBUG_2 0xe8 |
186 | #define R600_HDMI_AUDIO_DEBUG_3 0xec | 186 | #define R600_HDMI_AUDIO_DEBUG_3 0xec |
187 | 187 | ||
188 | /* HDMI additional config base register addresses */ | ||
189 | #define R600_HDMI_CONFIG1 0x7600 | ||
190 | #define R600_HDMI_CONFIG2 0x7a00 | ||
191 | |||
188 | #endif | 192 | #endif |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 5b2e4d442823..59c1f8793e60 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -77,6 +77,55 @@ | |||
77 | #define CB_COLOR0_FRAG 0x280e0 | 77 | #define CB_COLOR0_FRAG 0x280e0 |
78 | #define CB_COLOR0_MASK 0x28100 | 78 | #define CB_COLOR0_MASK 0x28100 |
79 | 79 | ||
80 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 | ||
81 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 | ||
82 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 | ||
83 | #define SQ_ALU_CONST_CACHE_PS_3 0x2894c | ||
84 | #define SQ_ALU_CONST_CACHE_PS_4 0x28950 | ||
85 | #define SQ_ALU_CONST_CACHE_PS_5 0x28954 | ||
86 | #define SQ_ALU_CONST_CACHE_PS_6 0x28958 | ||
87 | #define SQ_ALU_CONST_CACHE_PS_7 0x2895c | ||
88 | #define SQ_ALU_CONST_CACHE_PS_8 0x28960 | ||
89 | #define SQ_ALU_CONST_CACHE_PS_9 0x28964 | ||
90 | #define SQ_ALU_CONST_CACHE_PS_10 0x28968 | ||
91 | #define SQ_ALU_CONST_CACHE_PS_11 0x2896c | ||
92 | #define SQ_ALU_CONST_CACHE_PS_12 0x28970 | ||
93 | #define SQ_ALU_CONST_CACHE_PS_13 0x28974 | ||
94 | #define SQ_ALU_CONST_CACHE_PS_14 0x28978 | ||
95 | #define SQ_ALU_CONST_CACHE_PS_15 0x2897c | ||
96 | #define SQ_ALU_CONST_CACHE_VS_0 0x28980 | ||
97 | #define SQ_ALU_CONST_CACHE_VS_1 0x28984 | ||
98 | #define SQ_ALU_CONST_CACHE_VS_2 0x28988 | ||
99 | #define SQ_ALU_CONST_CACHE_VS_3 0x2898c | ||
100 | #define SQ_ALU_CONST_CACHE_VS_4 0x28990 | ||
101 | #define SQ_ALU_CONST_CACHE_VS_5 0x28994 | ||
102 | #define SQ_ALU_CONST_CACHE_VS_6 0x28998 | ||
103 | #define SQ_ALU_CONST_CACHE_VS_7 0x2899c | ||
104 | #define SQ_ALU_CONST_CACHE_VS_8 0x289a0 | ||
105 | #define SQ_ALU_CONST_CACHE_VS_9 0x289a4 | ||
106 | #define SQ_ALU_CONST_CACHE_VS_10 0x289a8 | ||
107 | #define SQ_ALU_CONST_CACHE_VS_11 0x289ac | ||
108 | #define SQ_ALU_CONST_CACHE_VS_12 0x289b0 | ||
109 | #define SQ_ALU_CONST_CACHE_VS_13 0x289b4 | ||
110 | #define SQ_ALU_CONST_CACHE_VS_14 0x289b8 | ||
111 | #define SQ_ALU_CONST_CACHE_VS_15 0x289bc | ||
112 | #define SQ_ALU_CONST_CACHE_GS_0 0x289c0 | ||
113 | #define SQ_ALU_CONST_CACHE_GS_1 0x289c4 | ||
114 | #define SQ_ALU_CONST_CACHE_GS_2 0x289c8 | ||
115 | #define SQ_ALU_CONST_CACHE_GS_3 0x289cc | ||
116 | #define SQ_ALU_CONST_CACHE_GS_4 0x289d0 | ||
117 | #define SQ_ALU_CONST_CACHE_GS_5 0x289d4 | ||
118 | #define SQ_ALU_CONST_CACHE_GS_6 0x289d8 | ||
119 | #define SQ_ALU_CONST_CACHE_GS_7 0x289dc | ||
120 | #define SQ_ALU_CONST_CACHE_GS_8 0x289e0 | ||
121 | #define SQ_ALU_CONST_CACHE_GS_9 0x289e4 | ||
122 | #define SQ_ALU_CONST_CACHE_GS_10 0x289e8 | ||
123 | #define SQ_ALU_CONST_CACHE_GS_11 0x289ec | ||
124 | #define SQ_ALU_CONST_CACHE_GS_12 0x289f0 | ||
125 | #define SQ_ALU_CONST_CACHE_GS_13 0x289f4 | ||
126 | #define SQ_ALU_CONST_CACHE_GS_14 0x289f8 | ||
127 | #define SQ_ALU_CONST_CACHE_GS_15 0x289fc | ||
128 | |||
80 | #define CONFIG_MEMSIZE 0x5428 | 129 | #define CONFIG_MEMSIZE 0x5428 |
81 | #define CONFIG_CNTL 0x5424 | 130 | #define CONFIG_CNTL 0x5424 |
82 | #define CP_STAT 0x8680 | 131 | #define CP_STAT 0x8680 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 829e26e8a4bb..034218c3dbbb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -91,6 +91,8 @@ extern int radeon_tv; | |||
91 | extern int radeon_new_pll; | 91 | extern int radeon_new_pll; |
92 | extern int radeon_dynpm; | 92 | extern int radeon_dynpm; |
93 | extern int radeon_audio; | 93 | extern int radeon_audio; |
94 | extern int radeon_disp_priority; | ||
95 | extern int radeon_hw_i2c; | ||
94 | 96 | ||
95 | /* | 97 | /* |
96 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 98 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
@@ -168,6 +170,7 @@ struct radeon_clock { | |||
168 | * Power management | 170 | * Power management |
169 | */ | 171 | */ |
170 | int radeon_pm_init(struct radeon_device *rdev); | 172 | int radeon_pm_init(struct radeon_device *rdev); |
173 | void radeon_pm_fini(struct radeon_device *rdev); | ||
171 | void radeon_pm_compute_clocks(struct radeon_device *rdev); | 174 | void radeon_pm_compute_clocks(struct radeon_device *rdev); |
172 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 175 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
173 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 176 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
@@ -687,6 +690,7 @@ struct radeon_pm { | |||
687 | bool downclocked; | 690 | bool downclocked; |
688 | int active_crtcs; | 691 | int active_crtcs; |
689 | int req_vblank; | 692 | int req_vblank; |
693 | bool vblank_sync; | ||
690 | fixed20_12 max_bandwidth; | 694 | fixed20_12 max_bandwidth; |
691 | fixed20_12 igp_sideport_mclk; | 695 | fixed20_12 igp_sideport_mclk; |
692 | fixed20_12 igp_system_mclk; | 696 | fixed20_12 igp_system_mclk; |
@@ -697,6 +701,7 @@ struct radeon_pm { | |||
697 | fixed20_12 ht_bandwidth; | 701 | fixed20_12 ht_bandwidth; |
698 | fixed20_12 core_bandwidth; | 702 | fixed20_12 core_bandwidth; |
699 | fixed20_12 sclk; | 703 | fixed20_12 sclk; |
704 | fixed20_12 mclk; | ||
700 | fixed20_12 needed_bandwidth; | 705 | fixed20_12 needed_bandwidth; |
701 | /* XXX: use a define for num power modes */ | 706 | /* XXX: use a define for num power modes */ |
702 | struct radeon_power_state power_state[8]; | 707 | struct radeon_power_state power_state[8]; |
@@ -707,6 +712,7 @@ struct radeon_pm { | |||
707 | struct radeon_power_state *requested_power_state; | 712 | struct radeon_power_state *requested_power_state; |
708 | struct radeon_pm_clock_info *requested_clock_mode; | 713 | struct radeon_pm_clock_info *requested_clock_mode; |
709 | struct radeon_power_state *default_power_state; | 714 | struct radeon_power_state *default_power_state; |
715 | struct radeon_i2c_chan *i2c_bus; | ||
710 | }; | 716 | }; |
711 | 717 | ||
712 | 718 | ||
@@ -729,8 +735,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev, | |||
729 | struct drm_info_list *files, | 735 | struct drm_info_list *files, |
730 | unsigned nfiles); | 736 | unsigned nfiles); |
731 | int radeon_debugfs_fence_init(struct radeon_device *rdev); | 737 | int radeon_debugfs_fence_init(struct radeon_device *rdev); |
732 | int r100_debugfs_rbbm_init(struct radeon_device *rdev); | ||
733 | int r100_debugfs_cp_init(struct radeon_device *rdev); | ||
734 | 738 | ||
735 | 739 | ||
736 | /* | 740 | /* |
@@ -782,7 +786,7 @@ struct radeon_asic { | |||
782 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, | 786 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, |
783 | uint32_t tiling_flags, uint32_t pitch, | 787 | uint32_t tiling_flags, uint32_t pitch, |
784 | uint32_t offset, uint32_t obj_size); | 788 | uint32_t offset, uint32_t obj_size); |
785 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | 789 | void (*clear_surface_reg)(struct radeon_device *rdev, int reg); |
786 | void (*bandwidth_update)(struct radeon_device *rdev); | 790 | void (*bandwidth_update)(struct radeon_device *rdev); |
787 | void (*hpd_init)(struct radeon_device *rdev); | 791 | void (*hpd_init)(struct radeon_device *rdev); |
788 | void (*hpd_fini)(struct radeon_device *rdev); | 792 | void (*hpd_fini)(struct radeon_device *rdev); |
@@ -862,6 +866,12 @@ union radeon_asic_config { | |||
862 | struct rv770_asic rv770; | 866 | struct rv770_asic rv770; |
863 | }; | 867 | }; |
864 | 868 | ||
869 | /* | ||
870 | * asic initizalization from radeon_asic.c | ||
871 | */ | ||
872 | void radeon_agp_disable(struct radeon_device *rdev); | ||
873 | int radeon_asic_init(struct radeon_device *rdev); | ||
874 | |||
865 | 875 | ||
866 | /* | 876 | /* |
867 | * IOCTL. | 877 | * IOCTL. |
@@ -1172,6 +1182,8 @@ extern void radeon_gart_restore(struct radeon_device *rdev); | |||
1172 | extern int radeon_modeset_init(struct radeon_device *rdev); | 1182 | extern int radeon_modeset_init(struct radeon_device *rdev); |
1173 | extern void radeon_modeset_fini(struct radeon_device *rdev); | 1183 | extern void radeon_modeset_fini(struct radeon_device *rdev); |
1174 | extern bool radeon_card_posted(struct radeon_device *rdev); | 1184 | extern bool radeon_card_posted(struct radeon_device *rdev); |
1185 | extern void radeon_update_bandwidth_info(struct radeon_device *rdev); | ||
1186 | extern void radeon_update_display_priority(struct radeon_device *rdev); | ||
1175 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | 1187 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); |
1176 | extern int radeon_clocks_init(struct radeon_device *rdev); | 1188 | extern int radeon_clocks_init(struct radeon_device *rdev); |
1177 | extern void radeon_clocks_fini(struct radeon_device *rdev); | 1189 | extern void radeon_clocks_fini(struct radeon_device *rdev); |
@@ -1188,51 +1200,6 @@ extern int radeon_resume_kms(struct drm_device *dev); | |||
1188 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); | 1200 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); |
1189 | 1201 | ||
1190 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 1202 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ |
1191 | struct r100_mc_save { | ||
1192 | u32 GENMO_WT; | ||
1193 | u32 CRTC_EXT_CNTL; | ||
1194 | u32 CRTC_GEN_CNTL; | ||
1195 | u32 CRTC2_GEN_CNTL; | ||
1196 | u32 CUR_OFFSET; | ||
1197 | u32 CUR2_OFFSET; | ||
1198 | }; | ||
1199 | extern void r100_cp_disable(struct radeon_device *rdev); | ||
1200 | extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
1201 | extern void r100_cp_fini(struct radeon_device *rdev); | ||
1202 | extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | ||
1203 | extern int r100_pci_gart_init(struct radeon_device *rdev); | ||
1204 | extern void r100_pci_gart_fini(struct radeon_device *rdev); | ||
1205 | extern int r100_pci_gart_enable(struct radeon_device *rdev); | ||
1206 | extern void r100_pci_gart_disable(struct radeon_device *rdev); | ||
1207 | extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | ||
1208 | extern int r100_debugfs_mc_info_init(struct radeon_device *rdev); | ||
1209 | extern int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
1210 | extern void r100_ib_fini(struct radeon_device *rdev); | ||
1211 | extern int r100_ib_init(struct radeon_device *rdev); | ||
1212 | extern void r100_irq_disable(struct radeon_device *rdev); | ||
1213 | extern int r100_irq_set(struct radeon_device *rdev); | ||
1214 | extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); | ||
1215 | extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); | ||
1216 | extern void r100_vram_init_sizes(struct radeon_device *rdev); | ||
1217 | extern void r100_wb_disable(struct radeon_device *rdev); | ||
1218 | extern void r100_wb_fini(struct radeon_device *rdev); | ||
1219 | extern int r100_wb_init(struct radeon_device *rdev); | ||
1220 | extern void r100_hdp_reset(struct radeon_device *rdev); | ||
1221 | extern int r100_rb2d_reset(struct radeon_device *rdev); | ||
1222 | extern int r100_cp_reset(struct radeon_device *rdev); | ||
1223 | extern void r100_vga_render_disable(struct radeon_device *rdev); | ||
1224 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
1225 | struct radeon_cs_packet *pkt, | ||
1226 | struct radeon_bo *robj); | ||
1227 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
1228 | struct radeon_cs_packet *pkt, | ||
1229 | const unsigned *auth, unsigned n, | ||
1230 | radeon_packet0_check_t check); | ||
1231 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
1232 | struct radeon_cs_packet *pkt, | ||
1233 | unsigned idx); | ||
1234 | extern void r100_enable_bm(struct radeon_device *rdev); | ||
1235 | extern void r100_set_common_regs(struct radeon_device *rdev); | ||
1236 | 1203 | ||
1237 | /* rv200,rv250,rv280 */ | 1204 | /* rv200,rv250,rv280 */ |
1238 | extern void r200_set_safe_registers(struct radeon_device *rdev); | 1205 | extern void r200_set_safe_registers(struct radeon_device *rdev); |
@@ -1322,7 +1289,8 @@ extern int r600_audio_tmds_index(struct drm_encoder *encoder); | |||
1322 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | 1289 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); |
1323 | extern void r600_audio_fini(struct radeon_device *rdev); | 1290 | extern void r600_audio_fini(struct radeon_device *rdev); |
1324 | extern void r600_hdmi_init(struct drm_encoder *encoder); | 1291 | extern void r600_hdmi_init(struct drm_encoder *encoder); |
1325 | extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable); | 1292 | extern void r600_hdmi_enable(struct drm_encoder *encoder); |
1293 | extern void r600_hdmi_disable(struct drm_encoder *encoder); | ||
1326 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); | 1294 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
1327 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | 1295 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); |
1328 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, | 1296 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c new file mode 100644 index 000000000000..a4b4bc9fa322 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -0,0 +1,772 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | |||
29 | #include <linux/console.h> | ||
30 | #include <drm/drmP.h> | ||
31 | #include <drm/drm_crtc_helper.h> | ||
32 | #include <drm/radeon_drm.h> | ||
33 | #include <linux/vgaarb.h> | ||
34 | #include <linux/vga_switcheroo.h> | ||
35 | #include "radeon_reg.h" | ||
36 | #include "radeon.h" | ||
37 | #include "radeon_asic.h" | ||
38 | #include "atom.h" | ||
39 | |||
40 | /* | ||
41 | * Registers accessors functions. | ||
42 | */ | ||
43 | static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) | ||
44 | { | ||
45 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); | ||
46 | BUG_ON(1); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
51 | { | ||
52 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", | ||
53 | reg, v); | ||
54 | BUG_ON(1); | ||
55 | } | ||
56 | |||
57 | static void radeon_register_accessor_init(struct radeon_device *rdev) | ||
58 | { | ||
59 | rdev->mc_rreg = &radeon_invalid_rreg; | ||
60 | rdev->mc_wreg = &radeon_invalid_wreg; | ||
61 | rdev->pll_rreg = &radeon_invalid_rreg; | ||
62 | rdev->pll_wreg = &radeon_invalid_wreg; | ||
63 | rdev->pciep_rreg = &radeon_invalid_rreg; | ||
64 | rdev->pciep_wreg = &radeon_invalid_wreg; | ||
65 | |||
66 | /* Don't change order as we are overridding accessor. */ | ||
67 | if (rdev->family < CHIP_RV515) { | ||
68 | rdev->pcie_reg_mask = 0xff; | ||
69 | } else { | ||
70 | rdev->pcie_reg_mask = 0x7ff; | ||
71 | } | ||
72 | /* FIXME: not sure here */ | ||
73 | if (rdev->family <= CHIP_R580) { | ||
74 | rdev->pll_rreg = &r100_pll_rreg; | ||
75 | rdev->pll_wreg = &r100_pll_wreg; | ||
76 | } | ||
77 | if (rdev->family >= CHIP_R420) { | ||
78 | rdev->mc_rreg = &r420_mc_rreg; | ||
79 | rdev->mc_wreg = &r420_mc_wreg; | ||
80 | } | ||
81 | if (rdev->family >= CHIP_RV515) { | ||
82 | rdev->mc_rreg = &rv515_mc_rreg; | ||
83 | rdev->mc_wreg = &rv515_mc_wreg; | ||
84 | } | ||
85 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | ||
86 | rdev->mc_rreg = &rs400_mc_rreg; | ||
87 | rdev->mc_wreg = &rs400_mc_wreg; | ||
88 | } | ||
89 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { | ||
90 | rdev->mc_rreg = &rs690_mc_rreg; | ||
91 | rdev->mc_wreg = &rs690_mc_wreg; | ||
92 | } | ||
93 | if (rdev->family == CHIP_RS600) { | ||
94 | rdev->mc_rreg = &rs600_mc_rreg; | ||
95 | rdev->mc_wreg = &rs600_mc_wreg; | ||
96 | } | ||
97 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { | ||
98 | rdev->pciep_rreg = &r600_pciep_rreg; | ||
99 | rdev->pciep_wreg = &r600_pciep_wreg; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | |||
104 | /* helper to disable agp */ | ||
105 | void radeon_agp_disable(struct radeon_device *rdev) | ||
106 | { | ||
107 | rdev->flags &= ~RADEON_IS_AGP; | ||
108 | if (rdev->family >= CHIP_R600) { | ||
109 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
110 | rdev->flags |= RADEON_IS_PCIE; | ||
111 | } else if (rdev->family >= CHIP_RV515 || | ||
112 | rdev->family == CHIP_RV380 || | ||
113 | rdev->family == CHIP_RV410 || | ||
114 | rdev->family == CHIP_R423) { | ||
115 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
116 | rdev->flags |= RADEON_IS_PCIE; | ||
117 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
118 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
119 | } else { | ||
120 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
121 | rdev->flags |= RADEON_IS_PCI; | ||
122 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
123 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
124 | } | ||
125 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * ASIC | ||
130 | */ | ||
131 | static struct radeon_asic r100_asic = { | ||
132 | .init = &r100_init, | ||
133 | .fini = &r100_fini, | ||
134 | .suspend = &r100_suspend, | ||
135 | .resume = &r100_resume, | ||
136 | .vga_set_state = &r100_vga_set_state, | ||
137 | .gpu_reset = &r100_gpu_reset, | ||
138 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
139 | .gart_set_page = &r100_pci_gart_set_page, | ||
140 | .cp_commit = &r100_cp_commit, | ||
141 | .ring_start = &r100_ring_start, | ||
142 | .ring_test = &r100_ring_test, | ||
143 | .ring_ib_execute = &r100_ring_ib_execute, | ||
144 | .irq_set = &r100_irq_set, | ||
145 | .irq_process = &r100_irq_process, | ||
146 | .get_vblank_counter = &r100_get_vblank_counter, | ||
147 | .fence_ring_emit = &r100_fence_ring_emit, | ||
148 | .cs_parse = &r100_cs_parse, | ||
149 | .copy_blit = &r100_copy_blit, | ||
150 | .copy_dma = NULL, | ||
151 | .copy = &r100_copy_blit, | ||
152 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
153 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
154 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
155 | .set_memory_clock = NULL, | ||
156 | .get_pcie_lanes = NULL, | ||
157 | .set_pcie_lanes = NULL, | ||
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
159 | .set_surface_reg = r100_set_surface_reg, | ||
160 | .clear_surface_reg = r100_clear_surface_reg, | ||
161 | .bandwidth_update = &r100_bandwidth_update, | ||
162 | .hpd_init = &r100_hpd_init, | ||
163 | .hpd_fini = &r100_hpd_fini, | ||
164 | .hpd_sense = &r100_hpd_sense, | ||
165 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
166 | .ioctl_wait_idle = NULL, | ||
167 | }; | ||
168 | |||
169 | static struct radeon_asic r200_asic = { | ||
170 | .init = &r100_init, | ||
171 | .fini = &r100_fini, | ||
172 | .suspend = &r100_suspend, | ||
173 | .resume = &r100_resume, | ||
174 | .vga_set_state = &r100_vga_set_state, | ||
175 | .gpu_reset = &r100_gpu_reset, | ||
176 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
177 | .gart_set_page = &r100_pci_gart_set_page, | ||
178 | .cp_commit = &r100_cp_commit, | ||
179 | .ring_start = &r100_ring_start, | ||
180 | .ring_test = &r100_ring_test, | ||
181 | .ring_ib_execute = &r100_ring_ib_execute, | ||
182 | .irq_set = &r100_irq_set, | ||
183 | .irq_process = &r100_irq_process, | ||
184 | .get_vblank_counter = &r100_get_vblank_counter, | ||
185 | .fence_ring_emit = &r100_fence_ring_emit, | ||
186 | .cs_parse = &r100_cs_parse, | ||
187 | .copy_blit = &r100_copy_blit, | ||
188 | .copy_dma = &r200_copy_dma, | ||
189 | .copy = &r100_copy_blit, | ||
190 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
191 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
192 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
193 | .set_memory_clock = NULL, | ||
194 | .set_pcie_lanes = NULL, | ||
195 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
196 | .set_surface_reg = r100_set_surface_reg, | ||
197 | .clear_surface_reg = r100_clear_surface_reg, | ||
198 | .bandwidth_update = &r100_bandwidth_update, | ||
199 | .hpd_init = &r100_hpd_init, | ||
200 | .hpd_fini = &r100_hpd_fini, | ||
201 | .hpd_sense = &r100_hpd_sense, | ||
202 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
203 | .ioctl_wait_idle = NULL, | ||
204 | }; | ||
205 | |||
206 | static struct radeon_asic r300_asic = { | ||
207 | .init = &r300_init, | ||
208 | .fini = &r300_fini, | ||
209 | .suspend = &r300_suspend, | ||
210 | .resume = &r300_resume, | ||
211 | .vga_set_state = &r100_vga_set_state, | ||
212 | .gpu_reset = &r300_gpu_reset, | ||
213 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
214 | .gart_set_page = &r100_pci_gart_set_page, | ||
215 | .cp_commit = &r100_cp_commit, | ||
216 | .ring_start = &r300_ring_start, | ||
217 | .ring_test = &r100_ring_test, | ||
218 | .ring_ib_execute = &r100_ring_ib_execute, | ||
219 | .irq_set = &r100_irq_set, | ||
220 | .irq_process = &r100_irq_process, | ||
221 | .get_vblank_counter = &r100_get_vblank_counter, | ||
222 | .fence_ring_emit = &r300_fence_ring_emit, | ||
223 | .cs_parse = &r300_cs_parse, | ||
224 | .copy_blit = &r100_copy_blit, | ||
225 | .copy_dma = &r200_copy_dma, | ||
226 | .copy = &r100_copy_blit, | ||
227 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
228 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
229 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
230 | .set_memory_clock = NULL, | ||
231 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
232 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
233 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
234 | .set_surface_reg = r100_set_surface_reg, | ||
235 | .clear_surface_reg = r100_clear_surface_reg, | ||
236 | .bandwidth_update = &r100_bandwidth_update, | ||
237 | .hpd_init = &r100_hpd_init, | ||
238 | .hpd_fini = &r100_hpd_fini, | ||
239 | .hpd_sense = &r100_hpd_sense, | ||
240 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
241 | .ioctl_wait_idle = NULL, | ||
242 | }; | ||
243 | |||
244 | static struct radeon_asic r300_asic_pcie = { | ||
245 | .init = &r300_init, | ||
246 | .fini = &r300_fini, | ||
247 | .suspend = &r300_suspend, | ||
248 | .resume = &r300_resume, | ||
249 | .vga_set_state = &r100_vga_set_state, | ||
250 | .gpu_reset = &r300_gpu_reset, | ||
251 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
252 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
253 | .cp_commit = &r100_cp_commit, | ||
254 | .ring_start = &r300_ring_start, | ||
255 | .ring_test = &r100_ring_test, | ||
256 | .ring_ib_execute = &r100_ring_ib_execute, | ||
257 | .irq_set = &r100_irq_set, | ||
258 | .irq_process = &r100_irq_process, | ||
259 | .get_vblank_counter = &r100_get_vblank_counter, | ||
260 | .fence_ring_emit = &r300_fence_ring_emit, | ||
261 | .cs_parse = &r300_cs_parse, | ||
262 | .copy_blit = &r100_copy_blit, | ||
263 | .copy_dma = &r200_copy_dma, | ||
264 | .copy = &r100_copy_blit, | ||
265 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
266 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
267 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
268 | .set_memory_clock = NULL, | ||
269 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
270 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
271 | .set_surface_reg = r100_set_surface_reg, | ||
272 | .clear_surface_reg = r100_clear_surface_reg, | ||
273 | .bandwidth_update = &r100_bandwidth_update, | ||
274 | .hpd_init = &r100_hpd_init, | ||
275 | .hpd_fini = &r100_hpd_fini, | ||
276 | .hpd_sense = &r100_hpd_sense, | ||
277 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
278 | .ioctl_wait_idle = NULL, | ||
279 | }; | ||
280 | |||
281 | static struct radeon_asic r420_asic = { | ||
282 | .init = &r420_init, | ||
283 | .fini = &r420_fini, | ||
284 | .suspend = &r420_suspend, | ||
285 | .resume = &r420_resume, | ||
286 | .vga_set_state = &r100_vga_set_state, | ||
287 | .gpu_reset = &r300_gpu_reset, | ||
288 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
289 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
290 | .cp_commit = &r100_cp_commit, | ||
291 | .ring_start = &r300_ring_start, | ||
292 | .ring_test = &r100_ring_test, | ||
293 | .ring_ib_execute = &r100_ring_ib_execute, | ||
294 | .irq_set = &r100_irq_set, | ||
295 | .irq_process = &r100_irq_process, | ||
296 | .get_vblank_counter = &r100_get_vblank_counter, | ||
297 | .fence_ring_emit = &r300_fence_ring_emit, | ||
298 | .cs_parse = &r300_cs_parse, | ||
299 | .copy_blit = &r100_copy_blit, | ||
300 | .copy_dma = &r200_copy_dma, | ||
301 | .copy = &r100_copy_blit, | ||
302 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
303 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
304 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
305 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
306 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
307 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
308 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
309 | .set_surface_reg = r100_set_surface_reg, | ||
310 | .clear_surface_reg = r100_clear_surface_reg, | ||
311 | .bandwidth_update = &r100_bandwidth_update, | ||
312 | .hpd_init = &r100_hpd_init, | ||
313 | .hpd_fini = &r100_hpd_fini, | ||
314 | .hpd_sense = &r100_hpd_sense, | ||
315 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
316 | .ioctl_wait_idle = NULL, | ||
317 | }; | ||
318 | |||
319 | static struct radeon_asic rs400_asic = { | ||
320 | .init = &rs400_init, | ||
321 | .fini = &rs400_fini, | ||
322 | .suspend = &rs400_suspend, | ||
323 | .resume = &rs400_resume, | ||
324 | .vga_set_state = &r100_vga_set_state, | ||
325 | .gpu_reset = &r300_gpu_reset, | ||
326 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
327 | .gart_set_page = &rs400_gart_set_page, | ||
328 | .cp_commit = &r100_cp_commit, | ||
329 | .ring_start = &r300_ring_start, | ||
330 | .ring_test = &r100_ring_test, | ||
331 | .ring_ib_execute = &r100_ring_ib_execute, | ||
332 | .irq_set = &r100_irq_set, | ||
333 | .irq_process = &r100_irq_process, | ||
334 | .get_vblank_counter = &r100_get_vblank_counter, | ||
335 | .fence_ring_emit = &r300_fence_ring_emit, | ||
336 | .cs_parse = &r300_cs_parse, | ||
337 | .copy_blit = &r100_copy_blit, | ||
338 | .copy_dma = &r200_copy_dma, | ||
339 | .copy = &r100_copy_blit, | ||
340 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
341 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
342 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
343 | .set_memory_clock = NULL, | ||
344 | .get_pcie_lanes = NULL, | ||
345 | .set_pcie_lanes = NULL, | ||
346 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
347 | .set_surface_reg = r100_set_surface_reg, | ||
348 | .clear_surface_reg = r100_clear_surface_reg, | ||
349 | .bandwidth_update = &r100_bandwidth_update, | ||
350 | .hpd_init = &r100_hpd_init, | ||
351 | .hpd_fini = &r100_hpd_fini, | ||
352 | .hpd_sense = &r100_hpd_sense, | ||
353 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
354 | .ioctl_wait_idle = NULL, | ||
355 | }; | ||
356 | |||
357 | static struct radeon_asic rs600_asic = { | ||
358 | .init = &rs600_init, | ||
359 | .fini = &rs600_fini, | ||
360 | .suspend = &rs600_suspend, | ||
361 | .resume = &rs600_resume, | ||
362 | .vga_set_state = &r100_vga_set_state, | ||
363 | .gpu_reset = &r300_gpu_reset, | ||
364 | .gart_tlb_flush = &rs600_gart_tlb_flush, | ||
365 | .gart_set_page = &rs600_gart_set_page, | ||
366 | .cp_commit = &r100_cp_commit, | ||
367 | .ring_start = &r300_ring_start, | ||
368 | .ring_test = &r100_ring_test, | ||
369 | .ring_ib_execute = &r100_ring_ib_execute, | ||
370 | .irq_set = &rs600_irq_set, | ||
371 | .irq_process = &rs600_irq_process, | ||
372 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
373 | .fence_ring_emit = &r300_fence_ring_emit, | ||
374 | .cs_parse = &r300_cs_parse, | ||
375 | .copy_blit = &r100_copy_blit, | ||
376 | .copy_dma = &r200_copy_dma, | ||
377 | .copy = &r100_copy_blit, | ||
378 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
379 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
380 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
381 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
382 | .get_pcie_lanes = NULL, | ||
383 | .set_pcie_lanes = NULL, | ||
384 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
385 | .set_surface_reg = r100_set_surface_reg, | ||
386 | .clear_surface_reg = r100_clear_surface_reg, | ||
387 | .bandwidth_update = &rs600_bandwidth_update, | ||
388 | .hpd_init = &rs600_hpd_init, | ||
389 | .hpd_fini = &rs600_hpd_fini, | ||
390 | .hpd_sense = &rs600_hpd_sense, | ||
391 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
392 | .ioctl_wait_idle = NULL, | ||
393 | }; | ||
394 | |||
395 | static struct radeon_asic rs690_asic = { | ||
396 | .init = &rs690_init, | ||
397 | .fini = &rs690_fini, | ||
398 | .suspend = &rs690_suspend, | ||
399 | .resume = &rs690_resume, | ||
400 | .vga_set_state = &r100_vga_set_state, | ||
401 | .gpu_reset = &r300_gpu_reset, | ||
402 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
403 | .gart_set_page = &rs400_gart_set_page, | ||
404 | .cp_commit = &r100_cp_commit, | ||
405 | .ring_start = &r300_ring_start, | ||
406 | .ring_test = &r100_ring_test, | ||
407 | .ring_ib_execute = &r100_ring_ib_execute, | ||
408 | .irq_set = &rs600_irq_set, | ||
409 | .irq_process = &rs600_irq_process, | ||
410 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
411 | .fence_ring_emit = &r300_fence_ring_emit, | ||
412 | .cs_parse = &r300_cs_parse, | ||
413 | .copy_blit = &r100_copy_blit, | ||
414 | .copy_dma = &r200_copy_dma, | ||
415 | .copy = &r200_copy_dma, | ||
416 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
417 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
418 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
419 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
420 | .get_pcie_lanes = NULL, | ||
421 | .set_pcie_lanes = NULL, | ||
422 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
423 | .set_surface_reg = r100_set_surface_reg, | ||
424 | .clear_surface_reg = r100_clear_surface_reg, | ||
425 | .bandwidth_update = &rs690_bandwidth_update, | ||
426 | .hpd_init = &rs600_hpd_init, | ||
427 | .hpd_fini = &rs600_hpd_fini, | ||
428 | .hpd_sense = &rs600_hpd_sense, | ||
429 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
430 | .ioctl_wait_idle = NULL, | ||
431 | }; | ||
432 | |||
433 | static struct radeon_asic rv515_asic = { | ||
434 | .init = &rv515_init, | ||
435 | .fini = &rv515_fini, | ||
436 | .suspend = &rv515_suspend, | ||
437 | .resume = &rv515_resume, | ||
438 | .vga_set_state = &r100_vga_set_state, | ||
439 | .gpu_reset = &rv515_gpu_reset, | ||
440 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
441 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
442 | .cp_commit = &r100_cp_commit, | ||
443 | .ring_start = &rv515_ring_start, | ||
444 | .ring_test = &r100_ring_test, | ||
445 | .ring_ib_execute = &r100_ring_ib_execute, | ||
446 | .irq_set = &rs600_irq_set, | ||
447 | .irq_process = &rs600_irq_process, | ||
448 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
449 | .fence_ring_emit = &r300_fence_ring_emit, | ||
450 | .cs_parse = &r300_cs_parse, | ||
451 | .copy_blit = &r100_copy_blit, | ||
452 | .copy_dma = &r200_copy_dma, | ||
453 | .copy = &r100_copy_blit, | ||
454 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
455 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
456 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
457 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
458 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
459 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
460 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
461 | .set_surface_reg = r100_set_surface_reg, | ||
462 | .clear_surface_reg = r100_clear_surface_reg, | ||
463 | .bandwidth_update = &rv515_bandwidth_update, | ||
464 | .hpd_init = &rs600_hpd_init, | ||
465 | .hpd_fini = &rs600_hpd_fini, | ||
466 | .hpd_sense = &rs600_hpd_sense, | ||
467 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
468 | .ioctl_wait_idle = NULL, | ||
469 | }; | ||
470 | |||
471 | static struct radeon_asic r520_asic = { | ||
472 | .init = &r520_init, | ||
473 | .fini = &rv515_fini, | ||
474 | .suspend = &rv515_suspend, | ||
475 | .resume = &r520_resume, | ||
476 | .vga_set_state = &r100_vga_set_state, | ||
477 | .gpu_reset = &rv515_gpu_reset, | ||
478 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
479 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
480 | .cp_commit = &r100_cp_commit, | ||
481 | .ring_start = &rv515_ring_start, | ||
482 | .ring_test = &r100_ring_test, | ||
483 | .ring_ib_execute = &r100_ring_ib_execute, | ||
484 | .irq_set = &rs600_irq_set, | ||
485 | .irq_process = &rs600_irq_process, | ||
486 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
487 | .fence_ring_emit = &r300_fence_ring_emit, | ||
488 | .cs_parse = &r300_cs_parse, | ||
489 | .copy_blit = &r100_copy_blit, | ||
490 | .copy_dma = &r200_copy_dma, | ||
491 | .copy = &r100_copy_blit, | ||
492 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
493 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
494 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
495 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
496 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
497 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
498 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
499 | .set_surface_reg = r100_set_surface_reg, | ||
500 | .clear_surface_reg = r100_clear_surface_reg, | ||
501 | .bandwidth_update = &rv515_bandwidth_update, | ||
502 | .hpd_init = &rs600_hpd_init, | ||
503 | .hpd_fini = &rs600_hpd_fini, | ||
504 | .hpd_sense = &rs600_hpd_sense, | ||
505 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
506 | .ioctl_wait_idle = NULL, | ||
507 | }; | ||
508 | |||
509 | static struct radeon_asic r600_asic = { | ||
510 | .init = &r600_init, | ||
511 | .fini = &r600_fini, | ||
512 | .suspend = &r600_suspend, | ||
513 | .resume = &r600_resume, | ||
514 | .cp_commit = &r600_cp_commit, | ||
515 | .vga_set_state = &r600_vga_set_state, | ||
516 | .gpu_reset = &r600_gpu_reset, | ||
517 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
518 | .gart_set_page = &rs600_gart_set_page, | ||
519 | .ring_test = &r600_ring_test, | ||
520 | .ring_ib_execute = &r600_ring_ib_execute, | ||
521 | .irq_set = &r600_irq_set, | ||
522 | .irq_process = &r600_irq_process, | ||
523 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
524 | .fence_ring_emit = &r600_fence_ring_emit, | ||
525 | .cs_parse = &r600_cs_parse, | ||
526 | .copy_blit = &r600_copy_blit, | ||
527 | .copy_dma = &r600_copy_blit, | ||
528 | .copy = &r600_copy_blit, | ||
529 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
530 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
531 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
532 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
533 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
534 | .set_pcie_lanes = NULL, | ||
535 | .set_clock_gating = NULL, | ||
536 | .set_surface_reg = r600_set_surface_reg, | ||
537 | .clear_surface_reg = r600_clear_surface_reg, | ||
538 | .bandwidth_update = &rv515_bandwidth_update, | ||
539 | .hpd_init = &r600_hpd_init, | ||
540 | .hpd_fini = &r600_hpd_fini, | ||
541 | .hpd_sense = &r600_hpd_sense, | ||
542 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
543 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
544 | }; | ||
545 | |||
546 | static struct radeon_asic rs780_asic = { | ||
547 | .init = &r600_init, | ||
548 | .fini = &r600_fini, | ||
549 | .suspend = &r600_suspend, | ||
550 | .resume = &r600_resume, | ||
551 | .cp_commit = &r600_cp_commit, | ||
552 | .vga_set_state = &r600_vga_set_state, | ||
553 | .gpu_reset = &r600_gpu_reset, | ||
554 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
555 | .gart_set_page = &rs600_gart_set_page, | ||
556 | .ring_test = &r600_ring_test, | ||
557 | .ring_ib_execute = &r600_ring_ib_execute, | ||
558 | .irq_set = &r600_irq_set, | ||
559 | .irq_process = &r600_irq_process, | ||
560 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
561 | .fence_ring_emit = &r600_fence_ring_emit, | ||
562 | .cs_parse = &r600_cs_parse, | ||
563 | .copy_blit = &r600_copy_blit, | ||
564 | .copy_dma = &r600_copy_blit, | ||
565 | .copy = &r600_copy_blit, | ||
566 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
567 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
568 | .get_memory_clock = NULL, | ||
569 | .set_memory_clock = NULL, | ||
570 | .get_pcie_lanes = NULL, | ||
571 | .set_pcie_lanes = NULL, | ||
572 | .set_clock_gating = NULL, | ||
573 | .set_surface_reg = r600_set_surface_reg, | ||
574 | .clear_surface_reg = r600_clear_surface_reg, | ||
575 | .bandwidth_update = &rs690_bandwidth_update, | ||
576 | .hpd_init = &r600_hpd_init, | ||
577 | .hpd_fini = &r600_hpd_fini, | ||
578 | .hpd_sense = &r600_hpd_sense, | ||
579 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
580 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
581 | }; | ||
582 | |||
583 | static struct radeon_asic rv770_asic = { | ||
584 | .init = &rv770_init, | ||
585 | .fini = &rv770_fini, | ||
586 | .suspend = &rv770_suspend, | ||
587 | .resume = &rv770_resume, | ||
588 | .cp_commit = &r600_cp_commit, | ||
589 | .gpu_reset = &rv770_gpu_reset, | ||
590 | .vga_set_state = &r600_vga_set_state, | ||
591 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
592 | .gart_set_page = &rs600_gart_set_page, | ||
593 | .ring_test = &r600_ring_test, | ||
594 | .ring_ib_execute = &r600_ring_ib_execute, | ||
595 | .irq_set = &r600_irq_set, | ||
596 | .irq_process = &r600_irq_process, | ||
597 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
598 | .fence_ring_emit = &r600_fence_ring_emit, | ||
599 | .cs_parse = &r600_cs_parse, | ||
600 | .copy_blit = &r600_copy_blit, | ||
601 | .copy_dma = &r600_copy_blit, | ||
602 | .copy = &r600_copy_blit, | ||
603 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
604 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
605 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
606 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
607 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
608 | .set_pcie_lanes = NULL, | ||
609 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
610 | .set_surface_reg = r600_set_surface_reg, | ||
611 | .clear_surface_reg = r600_clear_surface_reg, | ||
612 | .bandwidth_update = &rv515_bandwidth_update, | ||
613 | .hpd_init = &r600_hpd_init, | ||
614 | .hpd_fini = &r600_hpd_fini, | ||
615 | .hpd_sense = &r600_hpd_sense, | ||
616 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
617 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
618 | }; | ||
619 | |||
620 | static struct radeon_asic evergreen_asic = { | ||
621 | .init = &evergreen_init, | ||
622 | .fini = &evergreen_fini, | ||
623 | .suspend = &evergreen_suspend, | ||
624 | .resume = &evergreen_resume, | ||
625 | .cp_commit = NULL, | ||
626 | .gpu_reset = &evergreen_gpu_reset, | ||
627 | .vga_set_state = &r600_vga_set_state, | ||
628 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
629 | .gart_set_page = &rs600_gart_set_page, | ||
630 | .ring_test = NULL, | ||
631 | .ring_ib_execute = NULL, | ||
632 | .irq_set = NULL, | ||
633 | .irq_process = NULL, | ||
634 | .get_vblank_counter = NULL, | ||
635 | .fence_ring_emit = NULL, | ||
636 | .cs_parse = NULL, | ||
637 | .copy_blit = NULL, | ||
638 | .copy_dma = NULL, | ||
639 | .copy = NULL, | ||
640 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
641 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
642 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
643 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
644 | .set_pcie_lanes = NULL, | ||
645 | .set_clock_gating = NULL, | ||
646 | .set_surface_reg = r600_set_surface_reg, | ||
647 | .clear_surface_reg = r600_clear_surface_reg, | ||
648 | .bandwidth_update = &evergreen_bandwidth_update, | ||
649 | .hpd_init = &evergreen_hpd_init, | ||
650 | .hpd_fini = &evergreen_hpd_fini, | ||
651 | .hpd_sense = &evergreen_hpd_sense, | ||
652 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
653 | }; | ||
654 | |||
655 | int radeon_asic_init(struct radeon_device *rdev) | ||
656 | { | ||
657 | radeon_register_accessor_init(rdev); | ||
658 | switch (rdev->family) { | ||
659 | case CHIP_R100: | ||
660 | case CHIP_RV100: | ||
661 | case CHIP_RS100: | ||
662 | case CHIP_RV200: | ||
663 | case CHIP_RS200: | ||
664 | rdev->asic = &r100_asic; | ||
665 | break; | ||
666 | case CHIP_R200: | ||
667 | case CHIP_RV250: | ||
668 | case CHIP_RS300: | ||
669 | case CHIP_RV280: | ||
670 | rdev->asic = &r200_asic; | ||
671 | break; | ||
672 | case CHIP_R300: | ||
673 | case CHIP_R350: | ||
674 | case CHIP_RV350: | ||
675 | case CHIP_RV380: | ||
676 | if (rdev->flags & RADEON_IS_PCIE) | ||
677 | rdev->asic = &r300_asic_pcie; | ||
678 | else | ||
679 | rdev->asic = &r300_asic; | ||
680 | break; | ||
681 | case CHIP_R420: | ||
682 | case CHIP_R423: | ||
683 | case CHIP_RV410: | ||
684 | rdev->asic = &r420_asic; | ||
685 | break; | ||
686 | case CHIP_RS400: | ||
687 | case CHIP_RS480: | ||
688 | rdev->asic = &rs400_asic; | ||
689 | break; | ||
690 | case CHIP_RS600: | ||
691 | rdev->asic = &rs600_asic; | ||
692 | break; | ||
693 | case CHIP_RS690: | ||
694 | case CHIP_RS740: | ||
695 | rdev->asic = &rs690_asic; | ||
696 | break; | ||
697 | case CHIP_RV515: | ||
698 | rdev->asic = &rv515_asic; | ||
699 | break; | ||
700 | case CHIP_R520: | ||
701 | case CHIP_RV530: | ||
702 | case CHIP_RV560: | ||
703 | case CHIP_RV570: | ||
704 | case CHIP_R580: | ||
705 | rdev->asic = &r520_asic; | ||
706 | break; | ||
707 | case CHIP_R600: | ||
708 | case CHIP_RV610: | ||
709 | case CHIP_RV630: | ||
710 | case CHIP_RV620: | ||
711 | case CHIP_RV635: | ||
712 | case CHIP_RV670: | ||
713 | rdev->asic = &r600_asic; | ||
714 | break; | ||
715 | case CHIP_RS780: | ||
716 | case CHIP_RS880: | ||
717 | rdev->asic = &rs780_asic; | ||
718 | break; | ||
719 | case CHIP_RV770: | ||
720 | case CHIP_RV730: | ||
721 | case CHIP_RV710: | ||
722 | case CHIP_RV740: | ||
723 | rdev->asic = &rv770_asic; | ||
724 | break; | ||
725 | case CHIP_CEDAR: | ||
726 | case CHIP_REDWOOD: | ||
727 | case CHIP_JUNIPER: | ||
728 | case CHIP_CYPRESS: | ||
729 | case CHIP_HEMLOCK: | ||
730 | rdev->asic = &evergreen_asic; | ||
731 | break; | ||
732 | default: | ||
733 | /* FIXME: not supported yet */ | ||
734 | return -EINVAL; | ||
735 | } | ||
736 | |||
737 | if (rdev->flags & RADEON_IS_IGP) { | ||
738 | rdev->asic->get_memory_clock = NULL; | ||
739 | rdev->asic->set_memory_clock = NULL; | ||
740 | } | ||
741 | |||
742 | /* set the number of crtcs */ | ||
743 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
744 | rdev->num_crtc = 1; | ||
745 | else { | ||
746 | if (ASIC_IS_DCE4(rdev)) | ||
747 | rdev->num_crtc = 6; | ||
748 | else | ||
749 | rdev->num_crtc = 2; | ||
750 | } | ||
751 | |||
752 | return 0; | ||
753 | } | ||
754 | |||
755 | /* | ||
756 | * Wrapper around modesetting bits. Move to radeon_clocks.c? | ||
757 | */ | ||
758 | int radeon_clocks_init(struct radeon_device *rdev) | ||
759 | { | ||
760 | int r; | ||
761 | |||
762 | r = radeon_static_clocks_init(rdev->ddev); | ||
763 | if (r) { | ||
764 | return r; | ||
765 | } | ||
766 | DRM_INFO("Clocks initialized !\n"); | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | void radeon_clocks_fini(struct radeon_device *rdev) | ||
771 | { | ||
772 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index d3a157b2bcb7..a0b8280663d1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -45,10 +45,18 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | |||
45 | /* | 45 | /* |
46 | * r100,rv100,rs100,rv200,rs200 | 46 | * r100,rv100,rs100,rv200,rs200 |
47 | */ | 47 | */ |
48 | extern int r100_init(struct radeon_device *rdev); | 48 | struct r100_mc_save { |
49 | extern void r100_fini(struct radeon_device *rdev); | 49 | u32 GENMO_WT; |
50 | extern int r100_suspend(struct radeon_device *rdev); | 50 | u32 CRTC_EXT_CNTL; |
51 | extern int r100_resume(struct radeon_device *rdev); | 51 | u32 CRTC_GEN_CNTL; |
52 | u32 CRTC2_GEN_CNTL; | ||
53 | u32 CUR_OFFSET; | ||
54 | u32 CUR2_OFFSET; | ||
55 | }; | ||
56 | int r100_init(struct radeon_device *rdev); | ||
57 | void r100_fini(struct radeon_device *rdev); | ||
58 | int r100_suspend(struct radeon_device *rdev); | ||
59 | int r100_resume(struct radeon_device *rdev); | ||
52 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); | 60 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
53 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 61 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
54 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 62 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
@@ -73,7 +81,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
73 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | 81 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
74 | uint32_t tiling_flags, uint32_t pitch, | 82 | uint32_t tiling_flags, uint32_t pitch, |
75 | uint32_t offset, uint32_t obj_size); | 83 | uint32_t offset, uint32_t obj_size); |
76 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | 84 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg); |
77 | void r100_bandwidth_update(struct radeon_device *rdev); | 85 | void r100_bandwidth_update(struct radeon_device *rdev); |
78 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 86 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
79 | int r100_ring_test(struct radeon_device *rdev); | 87 | int r100_ring_test(struct radeon_device *rdev); |
@@ -82,44 +90,42 @@ void r100_hpd_fini(struct radeon_device *rdev); | |||
82 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 90 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
83 | void r100_hpd_set_polarity(struct radeon_device *rdev, | 91 | void r100_hpd_set_polarity(struct radeon_device *rdev, |
84 | enum radeon_hpd_id hpd); | 92 | enum radeon_hpd_id hpd); |
85 | 93 | int r100_debugfs_rbbm_init(struct radeon_device *rdev); | |
86 | static struct radeon_asic r100_asic = { | 94 | int r100_debugfs_cp_init(struct radeon_device *rdev); |
87 | .init = &r100_init, | 95 | void r100_cp_disable(struct radeon_device *rdev); |
88 | .fini = &r100_fini, | 96 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); |
89 | .suspend = &r100_suspend, | 97 | void r100_cp_fini(struct radeon_device *rdev); |
90 | .resume = &r100_resume, | 98 | int r100_pci_gart_init(struct radeon_device *rdev); |
91 | .vga_set_state = &r100_vga_set_state, | 99 | void r100_pci_gart_fini(struct radeon_device *rdev); |
92 | .gpu_reset = &r100_gpu_reset, | 100 | int r100_pci_gart_enable(struct radeon_device *rdev); |
93 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 101 | void r100_pci_gart_disable(struct radeon_device *rdev); |
94 | .gart_set_page = &r100_pci_gart_set_page, | 102 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
95 | .cp_commit = &r100_cp_commit, | 103 | int r100_gui_wait_for_idle(struct radeon_device *rdev); |
96 | .ring_start = &r100_ring_start, | 104 | void r100_ib_fini(struct radeon_device *rdev); |
97 | .ring_test = &r100_ring_test, | 105 | int r100_ib_init(struct radeon_device *rdev); |
98 | .ring_ib_execute = &r100_ring_ib_execute, | 106 | void r100_irq_disable(struct radeon_device *rdev); |
99 | .irq_set = &r100_irq_set, | 107 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); |
100 | .irq_process = &r100_irq_process, | 108 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); |
101 | .get_vblank_counter = &r100_get_vblank_counter, | 109 | void r100_vram_init_sizes(struct radeon_device *rdev); |
102 | .fence_ring_emit = &r100_fence_ring_emit, | 110 | void r100_wb_disable(struct radeon_device *rdev); |
103 | .cs_parse = &r100_cs_parse, | 111 | void r100_wb_fini(struct radeon_device *rdev); |
104 | .copy_blit = &r100_copy_blit, | 112 | int r100_wb_init(struct radeon_device *rdev); |
105 | .copy_dma = NULL, | 113 | void r100_hdp_reset(struct radeon_device *rdev); |
106 | .copy = &r100_copy_blit, | 114 | int r100_rb2d_reset(struct radeon_device *rdev); |
107 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 115 | int r100_cp_reset(struct radeon_device *rdev); |
108 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 116 | void r100_vga_render_disable(struct radeon_device *rdev); |
109 | .get_memory_clock = &radeon_legacy_get_memory_clock, | 117 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
110 | .set_memory_clock = NULL, | 118 | struct radeon_cs_packet *pkt, |
111 | .get_pcie_lanes = NULL, | 119 | struct radeon_bo *robj); |
112 | .set_pcie_lanes = NULL, | 120 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
113 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 121 | struct radeon_cs_packet *pkt, |
114 | .set_surface_reg = r100_set_surface_reg, | 122 | const unsigned *auth, unsigned n, |
115 | .clear_surface_reg = r100_clear_surface_reg, | 123 | radeon_packet0_check_t check); |
116 | .bandwidth_update = &r100_bandwidth_update, | 124 | int r100_cs_packet_parse(struct radeon_cs_parser *p, |
117 | .hpd_init = &r100_hpd_init, | 125 | struct radeon_cs_packet *pkt, |
118 | .hpd_fini = &r100_hpd_fini, | 126 | unsigned idx); |
119 | .hpd_sense = &r100_hpd_sense, | 127 | void r100_enable_bm(struct radeon_device *rdev); |
120 | .hpd_set_polarity = &r100_hpd_set_polarity, | 128 | void r100_set_common_regs(struct radeon_device *rdev); |
121 | .ioctl_wait_idle = NULL, | ||
122 | }; | ||
123 | 129 | ||
124 | /* | 130 | /* |
125 | * r200,rv250,rs300,rv280 | 131 | * r200,rv250,rs300,rv280 |
@@ -129,43 +135,6 @@ extern int r200_copy_dma(struct radeon_device *rdev, | |||
129 | uint64_t dst_offset, | 135 | uint64_t dst_offset, |
130 | unsigned num_pages, | 136 | unsigned num_pages, |
131 | struct radeon_fence *fence); | 137 | struct radeon_fence *fence); |
132 | static struct radeon_asic r200_asic = { | ||
133 | .init = &r100_init, | ||
134 | .fini = &r100_fini, | ||
135 | .suspend = &r100_suspend, | ||
136 | .resume = &r100_resume, | ||
137 | .vga_set_state = &r100_vga_set_state, | ||
138 | .gpu_reset = &r100_gpu_reset, | ||
139 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
140 | .gart_set_page = &r100_pci_gart_set_page, | ||
141 | .cp_commit = &r100_cp_commit, | ||
142 | .ring_start = &r100_ring_start, | ||
143 | .ring_test = &r100_ring_test, | ||
144 | .ring_ib_execute = &r100_ring_ib_execute, | ||
145 | .irq_set = &r100_irq_set, | ||
146 | .irq_process = &r100_irq_process, | ||
147 | .get_vblank_counter = &r100_get_vblank_counter, | ||
148 | .fence_ring_emit = &r100_fence_ring_emit, | ||
149 | .cs_parse = &r100_cs_parse, | ||
150 | .copy_blit = &r100_copy_blit, | ||
151 | .copy_dma = &r200_copy_dma, | ||
152 | .copy = &r100_copy_blit, | ||
153 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
154 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
155 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
156 | .set_memory_clock = NULL, | ||
157 | .set_pcie_lanes = NULL, | ||
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
159 | .set_surface_reg = r100_set_surface_reg, | ||
160 | .clear_surface_reg = r100_clear_surface_reg, | ||
161 | .bandwidth_update = &r100_bandwidth_update, | ||
162 | .hpd_init = &r100_hpd_init, | ||
163 | .hpd_fini = &r100_hpd_fini, | ||
164 | .hpd_sense = &r100_hpd_sense, | ||
165 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
166 | .ioctl_wait_idle = NULL, | ||
167 | }; | ||
168 | |||
169 | 138 | ||
170 | /* | 139 | /* |
171 | * r300,r350,rv350,rv380 | 140 | * r300,r350,rv350,rv380 |
@@ -186,82 +155,6 @@ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v | |||
186 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 155 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
187 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); | 156 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
188 | 157 | ||
189 | static struct radeon_asic r300_asic = { | ||
190 | .init = &r300_init, | ||
191 | .fini = &r300_fini, | ||
192 | .suspend = &r300_suspend, | ||
193 | .resume = &r300_resume, | ||
194 | .vga_set_state = &r100_vga_set_state, | ||
195 | .gpu_reset = &r300_gpu_reset, | ||
196 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | ||
197 | .gart_set_page = &r100_pci_gart_set_page, | ||
198 | .cp_commit = &r100_cp_commit, | ||
199 | .ring_start = &r300_ring_start, | ||
200 | .ring_test = &r100_ring_test, | ||
201 | .ring_ib_execute = &r100_ring_ib_execute, | ||
202 | .irq_set = &r100_irq_set, | ||
203 | .irq_process = &r100_irq_process, | ||
204 | .get_vblank_counter = &r100_get_vblank_counter, | ||
205 | .fence_ring_emit = &r300_fence_ring_emit, | ||
206 | .cs_parse = &r300_cs_parse, | ||
207 | .copy_blit = &r100_copy_blit, | ||
208 | .copy_dma = &r200_copy_dma, | ||
209 | .copy = &r100_copy_blit, | ||
210 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
211 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
212 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
213 | .set_memory_clock = NULL, | ||
214 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
215 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
216 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
217 | .set_surface_reg = r100_set_surface_reg, | ||
218 | .clear_surface_reg = r100_clear_surface_reg, | ||
219 | .bandwidth_update = &r100_bandwidth_update, | ||
220 | .hpd_init = &r100_hpd_init, | ||
221 | .hpd_fini = &r100_hpd_fini, | ||
222 | .hpd_sense = &r100_hpd_sense, | ||
223 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
224 | .ioctl_wait_idle = NULL, | ||
225 | }; | ||
226 | |||
227 | |||
228 | static struct radeon_asic r300_asic_pcie = { | ||
229 | .init = &r300_init, | ||
230 | .fini = &r300_fini, | ||
231 | .suspend = &r300_suspend, | ||
232 | .resume = &r300_resume, | ||
233 | .vga_set_state = &r100_vga_set_state, | ||
234 | .gpu_reset = &r300_gpu_reset, | ||
235 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
236 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
237 | .cp_commit = &r100_cp_commit, | ||
238 | .ring_start = &r300_ring_start, | ||
239 | .ring_test = &r100_ring_test, | ||
240 | .ring_ib_execute = &r100_ring_ib_execute, | ||
241 | .irq_set = &r100_irq_set, | ||
242 | .irq_process = &r100_irq_process, | ||
243 | .get_vblank_counter = &r100_get_vblank_counter, | ||
244 | .fence_ring_emit = &r300_fence_ring_emit, | ||
245 | .cs_parse = &r300_cs_parse, | ||
246 | .copy_blit = &r100_copy_blit, | ||
247 | .copy_dma = &r200_copy_dma, | ||
248 | .copy = &r100_copy_blit, | ||
249 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
250 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
251 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
252 | .set_memory_clock = NULL, | ||
253 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
254 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
255 | .set_surface_reg = r100_set_surface_reg, | ||
256 | .clear_surface_reg = r100_clear_surface_reg, | ||
257 | .bandwidth_update = &r100_bandwidth_update, | ||
258 | .hpd_init = &r100_hpd_init, | ||
259 | .hpd_fini = &r100_hpd_fini, | ||
260 | .hpd_sense = &r100_hpd_sense, | ||
261 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
262 | .ioctl_wait_idle = NULL, | ||
263 | }; | ||
264 | |||
265 | /* | 158 | /* |
266 | * r420,r423,rv410 | 159 | * r420,r423,rv410 |
267 | */ | 160 | */ |
@@ -269,44 +162,6 @@ extern int r420_init(struct radeon_device *rdev); | |||
269 | extern void r420_fini(struct radeon_device *rdev); | 162 | extern void r420_fini(struct radeon_device *rdev); |
270 | extern int r420_suspend(struct radeon_device *rdev); | 163 | extern int r420_suspend(struct radeon_device *rdev); |
271 | extern int r420_resume(struct radeon_device *rdev); | 164 | extern int r420_resume(struct radeon_device *rdev); |
272 | static struct radeon_asic r420_asic = { | ||
273 | .init = &r420_init, | ||
274 | .fini = &r420_fini, | ||
275 | .suspend = &r420_suspend, | ||
276 | .resume = &r420_resume, | ||
277 | .vga_set_state = &r100_vga_set_state, | ||
278 | .gpu_reset = &r300_gpu_reset, | ||
279 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
280 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
281 | .cp_commit = &r100_cp_commit, | ||
282 | .ring_start = &r300_ring_start, | ||
283 | .ring_test = &r100_ring_test, | ||
284 | .ring_ib_execute = &r100_ring_ib_execute, | ||
285 | .irq_set = &r100_irq_set, | ||
286 | .irq_process = &r100_irq_process, | ||
287 | .get_vblank_counter = &r100_get_vblank_counter, | ||
288 | .fence_ring_emit = &r300_fence_ring_emit, | ||
289 | .cs_parse = &r300_cs_parse, | ||
290 | .copy_blit = &r100_copy_blit, | ||
291 | .copy_dma = &r200_copy_dma, | ||
292 | .copy = &r100_copy_blit, | ||
293 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
294 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
295 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
296 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
297 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
298 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
299 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
300 | .set_surface_reg = r100_set_surface_reg, | ||
301 | .clear_surface_reg = r100_clear_surface_reg, | ||
302 | .bandwidth_update = &r100_bandwidth_update, | ||
303 | .hpd_init = &r100_hpd_init, | ||
304 | .hpd_fini = &r100_hpd_fini, | ||
305 | .hpd_sense = &r100_hpd_sense, | ||
306 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
307 | .ioctl_wait_idle = NULL, | ||
308 | }; | ||
309 | |||
310 | 165 | ||
311 | /* | 166 | /* |
312 | * rs400,rs480 | 167 | * rs400,rs480 |
@@ -319,44 +174,6 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev); | |||
319 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 174 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
320 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 175 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
321 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 176 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
322 | static struct radeon_asic rs400_asic = { | ||
323 | .init = &rs400_init, | ||
324 | .fini = &rs400_fini, | ||
325 | .suspend = &rs400_suspend, | ||
326 | .resume = &rs400_resume, | ||
327 | .vga_set_state = &r100_vga_set_state, | ||
328 | .gpu_reset = &r300_gpu_reset, | ||
329 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
330 | .gart_set_page = &rs400_gart_set_page, | ||
331 | .cp_commit = &r100_cp_commit, | ||
332 | .ring_start = &r300_ring_start, | ||
333 | .ring_test = &r100_ring_test, | ||
334 | .ring_ib_execute = &r100_ring_ib_execute, | ||
335 | .irq_set = &r100_irq_set, | ||
336 | .irq_process = &r100_irq_process, | ||
337 | .get_vblank_counter = &r100_get_vblank_counter, | ||
338 | .fence_ring_emit = &r300_fence_ring_emit, | ||
339 | .cs_parse = &r300_cs_parse, | ||
340 | .copy_blit = &r100_copy_blit, | ||
341 | .copy_dma = &r200_copy_dma, | ||
342 | .copy = &r100_copy_blit, | ||
343 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
344 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
345 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
346 | .set_memory_clock = NULL, | ||
347 | .get_pcie_lanes = NULL, | ||
348 | .set_pcie_lanes = NULL, | ||
349 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
350 | .set_surface_reg = r100_set_surface_reg, | ||
351 | .clear_surface_reg = r100_clear_surface_reg, | ||
352 | .bandwidth_update = &r100_bandwidth_update, | ||
353 | .hpd_init = &r100_hpd_init, | ||
354 | .hpd_fini = &r100_hpd_fini, | ||
355 | .hpd_sense = &r100_hpd_sense, | ||
356 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
357 | .ioctl_wait_idle = NULL, | ||
358 | }; | ||
359 | |||
360 | 177 | ||
361 | /* | 178 | /* |
362 | * rs600. | 179 | * rs600. |
@@ -379,45 +196,6 @@ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | |||
379 | void rs600_hpd_set_polarity(struct radeon_device *rdev, | 196 | void rs600_hpd_set_polarity(struct radeon_device *rdev, |
380 | enum radeon_hpd_id hpd); | 197 | enum radeon_hpd_id hpd); |
381 | 198 | ||
382 | static struct radeon_asic rs600_asic = { | ||
383 | .init = &rs600_init, | ||
384 | .fini = &rs600_fini, | ||
385 | .suspend = &rs600_suspend, | ||
386 | .resume = &rs600_resume, | ||
387 | .vga_set_state = &r100_vga_set_state, | ||
388 | .gpu_reset = &r300_gpu_reset, | ||
389 | .gart_tlb_flush = &rs600_gart_tlb_flush, | ||
390 | .gart_set_page = &rs600_gart_set_page, | ||
391 | .cp_commit = &r100_cp_commit, | ||
392 | .ring_start = &r300_ring_start, | ||
393 | .ring_test = &r100_ring_test, | ||
394 | .ring_ib_execute = &r100_ring_ib_execute, | ||
395 | .irq_set = &rs600_irq_set, | ||
396 | .irq_process = &rs600_irq_process, | ||
397 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
398 | .fence_ring_emit = &r300_fence_ring_emit, | ||
399 | .cs_parse = &r300_cs_parse, | ||
400 | .copy_blit = &r100_copy_blit, | ||
401 | .copy_dma = &r200_copy_dma, | ||
402 | .copy = &r100_copy_blit, | ||
403 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
404 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
405 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
406 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
407 | .get_pcie_lanes = NULL, | ||
408 | .set_pcie_lanes = NULL, | ||
409 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
410 | .set_surface_reg = r100_set_surface_reg, | ||
411 | .clear_surface_reg = r100_clear_surface_reg, | ||
412 | .bandwidth_update = &rs600_bandwidth_update, | ||
413 | .hpd_init = &rs600_hpd_init, | ||
414 | .hpd_fini = &rs600_hpd_fini, | ||
415 | .hpd_sense = &rs600_hpd_sense, | ||
416 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
417 | .ioctl_wait_idle = NULL, | ||
418 | }; | ||
419 | |||
420 | |||
421 | /* | 199 | /* |
422 | * rs690,rs740 | 200 | * rs690,rs740 |
423 | */ | 201 | */ |
@@ -428,44 +206,6 @@ int rs690_suspend(struct radeon_device *rdev); | |||
428 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 206 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
429 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 207 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
430 | void rs690_bandwidth_update(struct radeon_device *rdev); | 208 | void rs690_bandwidth_update(struct radeon_device *rdev); |
431 | static struct radeon_asic rs690_asic = { | ||
432 | .init = &rs690_init, | ||
433 | .fini = &rs690_fini, | ||
434 | .suspend = &rs690_suspend, | ||
435 | .resume = &rs690_resume, | ||
436 | .vga_set_state = &r100_vga_set_state, | ||
437 | .gpu_reset = &r300_gpu_reset, | ||
438 | .gart_tlb_flush = &rs400_gart_tlb_flush, | ||
439 | .gart_set_page = &rs400_gart_set_page, | ||
440 | .cp_commit = &r100_cp_commit, | ||
441 | .ring_start = &r300_ring_start, | ||
442 | .ring_test = &r100_ring_test, | ||
443 | .ring_ib_execute = &r100_ring_ib_execute, | ||
444 | .irq_set = &rs600_irq_set, | ||
445 | .irq_process = &rs600_irq_process, | ||
446 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
447 | .fence_ring_emit = &r300_fence_ring_emit, | ||
448 | .cs_parse = &r300_cs_parse, | ||
449 | .copy_blit = &r100_copy_blit, | ||
450 | .copy_dma = &r200_copy_dma, | ||
451 | .copy = &r200_copy_dma, | ||
452 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
453 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
454 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
455 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
456 | .get_pcie_lanes = NULL, | ||
457 | .set_pcie_lanes = NULL, | ||
458 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
459 | .set_surface_reg = r100_set_surface_reg, | ||
460 | .clear_surface_reg = r100_clear_surface_reg, | ||
461 | .bandwidth_update = &rs690_bandwidth_update, | ||
462 | .hpd_init = &rs600_hpd_init, | ||
463 | .hpd_fini = &rs600_hpd_fini, | ||
464 | .hpd_sense = &rs600_hpd_sense, | ||
465 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
466 | .ioctl_wait_idle = NULL, | ||
467 | }; | ||
468 | |||
469 | 209 | ||
470 | /* | 210 | /* |
471 | * rv515 | 211 | * rv515 |
@@ -481,87 +221,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
481 | void rv515_bandwidth_update(struct radeon_device *rdev); | 221 | void rv515_bandwidth_update(struct radeon_device *rdev); |
482 | int rv515_resume(struct radeon_device *rdev); | 222 | int rv515_resume(struct radeon_device *rdev); |
483 | int rv515_suspend(struct radeon_device *rdev); | 223 | int rv515_suspend(struct radeon_device *rdev); |
484 | static struct radeon_asic rv515_asic = { | ||
485 | .init = &rv515_init, | ||
486 | .fini = &rv515_fini, | ||
487 | .suspend = &rv515_suspend, | ||
488 | .resume = &rv515_resume, | ||
489 | .vga_set_state = &r100_vga_set_state, | ||
490 | .gpu_reset = &rv515_gpu_reset, | ||
491 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
492 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
493 | .cp_commit = &r100_cp_commit, | ||
494 | .ring_start = &rv515_ring_start, | ||
495 | .ring_test = &r100_ring_test, | ||
496 | .ring_ib_execute = &r100_ring_ib_execute, | ||
497 | .irq_set = &rs600_irq_set, | ||
498 | .irq_process = &rs600_irq_process, | ||
499 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
500 | .fence_ring_emit = &r300_fence_ring_emit, | ||
501 | .cs_parse = &r300_cs_parse, | ||
502 | .copy_blit = &r100_copy_blit, | ||
503 | .copy_dma = &r200_copy_dma, | ||
504 | .copy = &r100_copy_blit, | ||
505 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
506 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
507 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
508 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
509 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
510 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
511 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
512 | .set_surface_reg = r100_set_surface_reg, | ||
513 | .clear_surface_reg = r100_clear_surface_reg, | ||
514 | .bandwidth_update = &rv515_bandwidth_update, | ||
515 | .hpd_init = &rs600_hpd_init, | ||
516 | .hpd_fini = &rs600_hpd_fini, | ||
517 | .hpd_sense = &rs600_hpd_sense, | ||
518 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
519 | .ioctl_wait_idle = NULL, | ||
520 | }; | ||
521 | |||
522 | 224 | ||
523 | /* | 225 | /* |
524 | * r520,rv530,rv560,rv570,r580 | 226 | * r520,rv530,rv560,rv570,r580 |
525 | */ | 227 | */ |
526 | int r520_init(struct radeon_device *rdev); | 228 | int r520_init(struct radeon_device *rdev); |
527 | int r520_resume(struct radeon_device *rdev); | 229 | int r520_resume(struct radeon_device *rdev); |
528 | static struct radeon_asic r520_asic = { | ||
529 | .init = &r520_init, | ||
530 | .fini = &rv515_fini, | ||
531 | .suspend = &rv515_suspend, | ||
532 | .resume = &r520_resume, | ||
533 | .vga_set_state = &r100_vga_set_state, | ||
534 | .gpu_reset = &rv515_gpu_reset, | ||
535 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
536 | .gart_set_page = &rv370_pcie_gart_set_page, | ||
537 | .cp_commit = &r100_cp_commit, | ||
538 | .ring_start = &rv515_ring_start, | ||
539 | .ring_test = &r100_ring_test, | ||
540 | .ring_ib_execute = &r100_ring_ib_execute, | ||
541 | .irq_set = &rs600_irq_set, | ||
542 | .irq_process = &rs600_irq_process, | ||
543 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
544 | .fence_ring_emit = &r300_fence_ring_emit, | ||
545 | .cs_parse = &r300_cs_parse, | ||
546 | .copy_blit = &r100_copy_blit, | ||
547 | .copy_dma = &r200_copy_dma, | ||
548 | .copy = &r100_copy_blit, | ||
549 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
550 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
551 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
552 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
553 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
554 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
555 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
556 | .set_surface_reg = r100_set_surface_reg, | ||
557 | .clear_surface_reg = r100_clear_surface_reg, | ||
558 | .bandwidth_update = &rv515_bandwidth_update, | ||
559 | .hpd_init = &rs600_hpd_init, | ||
560 | .hpd_fini = &rs600_hpd_fini, | ||
561 | .hpd_sense = &rs600_hpd_sense, | ||
562 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
563 | .ioctl_wait_idle = NULL, | ||
564 | }; | ||
565 | 230 | ||
566 | /* | 231 | /* |
567 | * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 | 232 | * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 |
@@ -591,7 +256,7 @@ int r600_gpu_reset(struct radeon_device *rdev); | |||
591 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 256 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
592 | uint32_t tiling_flags, uint32_t pitch, | 257 | uint32_t tiling_flags, uint32_t pitch, |
593 | uint32_t offset, uint32_t obj_size); | 258 | uint32_t offset, uint32_t obj_size); |
594 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 259 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
595 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 260 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
596 | int r600_ring_test(struct radeon_device *rdev); | 261 | int r600_ring_test(struct radeon_device *rdev); |
597 | int r600_copy_blit(struct radeon_device *rdev, | 262 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -604,43 +269,6 @@ void r600_hpd_set_polarity(struct radeon_device *rdev, | |||
604 | enum radeon_hpd_id hpd); | 269 | enum radeon_hpd_id hpd); |
605 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); | 270 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); |
606 | 271 | ||
607 | static struct radeon_asic r600_asic = { | ||
608 | .init = &r600_init, | ||
609 | .fini = &r600_fini, | ||
610 | .suspend = &r600_suspend, | ||
611 | .resume = &r600_resume, | ||
612 | .cp_commit = &r600_cp_commit, | ||
613 | .vga_set_state = &r600_vga_set_state, | ||
614 | .gpu_reset = &r600_gpu_reset, | ||
615 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
616 | .gart_set_page = &rs600_gart_set_page, | ||
617 | .ring_test = &r600_ring_test, | ||
618 | .ring_ib_execute = &r600_ring_ib_execute, | ||
619 | .irq_set = &r600_irq_set, | ||
620 | .irq_process = &r600_irq_process, | ||
621 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
622 | .fence_ring_emit = &r600_fence_ring_emit, | ||
623 | .cs_parse = &r600_cs_parse, | ||
624 | .copy_blit = &r600_copy_blit, | ||
625 | .copy_dma = &r600_copy_blit, | ||
626 | .copy = &r600_copy_blit, | ||
627 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
628 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
629 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
630 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
631 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
632 | .set_pcie_lanes = NULL, | ||
633 | .set_clock_gating = NULL, | ||
634 | .set_surface_reg = r600_set_surface_reg, | ||
635 | .clear_surface_reg = r600_clear_surface_reg, | ||
636 | .bandwidth_update = &rv515_bandwidth_update, | ||
637 | .hpd_init = &r600_hpd_init, | ||
638 | .hpd_fini = &r600_hpd_fini, | ||
639 | .hpd_sense = &r600_hpd_sense, | ||
640 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
641 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
642 | }; | ||
643 | |||
644 | /* | 272 | /* |
645 | * rv770,rv730,rv710,rv740 | 273 | * rv770,rv730,rv710,rv740 |
646 | */ | 274 | */ |
@@ -650,43 +278,6 @@ int rv770_suspend(struct radeon_device *rdev); | |||
650 | int rv770_resume(struct radeon_device *rdev); | 278 | int rv770_resume(struct radeon_device *rdev); |
651 | int rv770_gpu_reset(struct radeon_device *rdev); | 279 | int rv770_gpu_reset(struct radeon_device *rdev); |
652 | 280 | ||
653 | static struct radeon_asic rv770_asic = { | ||
654 | .init = &rv770_init, | ||
655 | .fini = &rv770_fini, | ||
656 | .suspend = &rv770_suspend, | ||
657 | .resume = &rv770_resume, | ||
658 | .cp_commit = &r600_cp_commit, | ||
659 | .gpu_reset = &rv770_gpu_reset, | ||
660 | .vga_set_state = &r600_vga_set_state, | ||
661 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
662 | .gart_set_page = &rs600_gart_set_page, | ||
663 | .ring_test = &r600_ring_test, | ||
664 | .ring_ib_execute = &r600_ring_ib_execute, | ||
665 | .irq_set = &r600_irq_set, | ||
666 | .irq_process = &r600_irq_process, | ||
667 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
668 | .fence_ring_emit = &r600_fence_ring_emit, | ||
669 | .cs_parse = &r600_cs_parse, | ||
670 | .copy_blit = &r600_copy_blit, | ||
671 | .copy_dma = &r600_copy_blit, | ||
672 | .copy = &r600_copy_blit, | ||
673 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
674 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
675 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
676 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
677 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
678 | .set_pcie_lanes = NULL, | ||
679 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
680 | .set_surface_reg = r600_set_surface_reg, | ||
681 | .clear_surface_reg = r600_clear_surface_reg, | ||
682 | .bandwidth_update = &rv515_bandwidth_update, | ||
683 | .hpd_init = &r600_hpd_init, | ||
684 | .hpd_fini = &r600_hpd_fini, | ||
685 | .hpd_sense = &r600_hpd_sense, | ||
686 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
687 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
688 | }; | ||
689 | |||
690 | /* | 281 | /* |
691 | * evergreen | 282 | * evergreen |
692 | */ | 283 | */ |
@@ -701,40 +292,4 @@ void evergreen_hpd_fini(struct radeon_device *rdev); | |||
701 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 292 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
702 | void evergreen_hpd_set_polarity(struct radeon_device *rdev, | 293 | void evergreen_hpd_set_polarity(struct radeon_device *rdev, |
703 | enum radeon_hpd_id hpd); | 294 | enum radeon_hpd_id hpd); |
704 | |||
705 | static struct radeon_asic evergreen_asic = { | ||
706 | .init = &evergreen_init, | ||
707 | .fini = &evergreen_fini, | ||
708 | .suspend = &evergreen_suspend, | ||
709 | .resume = &evergreen_resume, | ||
710 | .cp_commit = NULL, | ||
711 | .gpu_reset = &evergreen_gpu_reset, | ||
712 | .vga_set_state = &r600_vga_set_state, | ||
713 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | ||
714 | .gart_set_page = &rs600_gart_set_page, | ||
715 | .ring_test = NULL, | ||
716 | .ring_ib_execute = NULL, | ||
717 | .irq_set = NULL, | ||
718 | .irq_process = NULL, | ||
719 | .get_vblank_counter = NULL, | ||
720 | .fence_ring_emit = NULL, | ||
721 | .cs_parse = NULL, | ||
722 | .copy_blit = NULL, | ||
723 | .copy_dma = NULL, | ||
724 | .copy = NULL, | ||
725 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
726 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
727 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
728 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
729 | .set_pcie_lanes = NULL, | ||
730 | .set_clock_gating = NULL, | ||
731 | .set_surface_reg = r600_set_surface_reg, | ||
732 | .clear_surface_reg = r600_clear_surface_reg, | ||
733 | .bandwidth_update = &evergreen_bandwidth_update, | ||
734 | .hpd_init = &evergreen_hpd_init, | ||
735 | .hpd_fini = &evergreen_hpd_fini, | ||
736 | .hpd_sense = &evergreen_hpd_sense, | ||
737 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
738 | }; | ||
739 | |||
740 | #endif | 295 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 93783b15c81d..1fff95505cf5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -75,46 +75,45 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
75 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); | 75 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); |
76 | i2c.valid = false; | 76 | i2c.valid = false; |
77 | 77 | ||
78 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | 78 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
79 | 79 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | |
80 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | 80 | |
81 | 81 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | |
82 | 82 | gpio = &i2c_info->asGPIO_Info[i]; | |
83 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | 83 | |
84 | gpio = &i2c_info->asGPIO_Info[i]; | 84 | if (gpio->sucI2cId.ucAccess == id) { |
85 | 85 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | |
86 | if (gpio->sucI2cId.ucAccess == id) { | 86 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
87 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 87 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
88 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 88 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; |
89 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | 89 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; |
90 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; | 90 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; |
91 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; | 91 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; |
92 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; | 92 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; |
93 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; | 93 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); |
94 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; | 94 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); |
95 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); | 95 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); |
96 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); | 96 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); |
97 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); | 97 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); |
98 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); | 98 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); |
99 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); | 99 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); |
100 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); | 100 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); |
101 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); | 101 | |
102 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); | 102 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) |
103 | 103 | i2c.hw_capable = true; | |
104 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) | 104 | else |
105 | i2c.hw_capable = true; | 105 | i2c.hw_capable = false; |
106 | else | 106 | |
107 | i2c.hw_capable = false; | 107 | if (gpio->sucI2cId.ucAccess == 0xa0) |
108 | 108 | i2c.mm_i2c = true; | |
109 | if (gpio->sucI2cId.ucAccess == 0xa0) | 109 | else |
110 | i2c.mm_i2c = true; | 110 | i2c.mm_i2c = false; |
111 | else | 111 | |
112 | i2c.mm_i2c = false; | 112 | i2c.i2c_id = gpio->sucI2cId.ucAccess; |
113 | 113 | ||
114 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | 114 | i2c.valid = true; |
115 | 115 | break; | |
116 | i2c.valid = true; | 116 | } |
117 | break; | ||
118 | } | 117 | } |
119 | } | 118 | } |
120 | 119 | ||
@@ -135,20 +134,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd | |||
135 | memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); | 134 | memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); |
136 | gpio.valid = false; | 135 | gpio.valid = false; |
137 | 136 | ||
138 | atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset); | 137 | if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { |
138 | gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); | ||
139 | 139 | ||
140 | gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); | 140 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
141 | sizeof(ATOM_GPIO_PIN_ASSIGNMENT); | ||
141 | 142 | ||
142 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); | 143 | for (i = 0; i < num_indices; i++) { |
143 | 144 | pin = &gpio_info->asGPIO_Pin[i]; | |
144 | for (i = 0; i < num_indices; i++) { | 145 | if (id == pin->ucGPIO_ID) { |
145 | pin = &gpio_info->asGPIO_Pin[i]; | 146 | gpio.id = pin->ucGPIO_ID; |
146 | if (id == pin->ucGPIO_ID) { | 147 | gpio.reg = pin->usGpioPin_AIndex * 4; |
147 | gpio.id = pin->ucGPIO_ID; | 148 | gpio.mask = (1 << pin->ucGpioPinBitShift); |
148 | gpio.reg = pin->usGpioPin_AIndex * 4; | 149 | gpio.valid = true; |
149 | gpio.mask = (1 << pin->ucGpioPinBitShift); | 150 | break; |
150 | gpio.valid = true; | 151 | } |
151 | break; | ||
152 | } | 152 | } |
153 | } | 153 | } |
154 | 154 | ||
@@ -264,6 +264,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
264 | if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || | 264 | if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || |
265 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | 265 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) |
266 | return false; | 266 | return false; |
267 | if (supported_device == ATOM_DEVICE_CRT2_SUPPORT) | ||
268 | *line_mux = 0x90; | ||
267 | } | 269 | } |
268 | 270 | ||
269 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ | 271 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ |
@@ -395,9 +397,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
395 | struct radeon_gpio_rec gpio; | 397 | struct radeon_gpio_rec gpio; |
396 | struct radeon_hpd hpd; | 398 | struct radeon_hpd hpd; |
397 | 399 | ||
398 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 400 | if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) |
399 | |||
400 | if (data_offset == 0) | ||
401 | return false; | 401 | return false; |
402 | 402 | ||
403 | if (crev < 2) | 403 | if (crev < 2) |
@@ -449,37 +449,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
449 | GetIndexIntoMasterTable(DATA, | 449 | GetIndexIntoMasterTable(DATA, |
450 | IntegratedSystemInfo); | 450 | IntegratedSystemInfo); |
451 | 451 | ||
452 | atom_parse_data_header(ctx, index, &size, &frev, | 452 | if (atom_parse_data_header(ctx, index, &size, &frev, |
453 | &crev, &igp_offset); | 453 | &crev, &igp_offset)) { |
454 | 454 | ||
455 | if (crev >= 2) { | 455 | if (crev >= 2) { |
456 | igp_obj = | 456 | igp_obj = |
457 | (ATOM_INTEGRATED_SYSTEM_INFO_V2 | 457 | (ATOM_INTEGRATED_SYSTEM_INFO_V2 |
458 | *) (ctx->bios + igp_offset); | 458 | *) (ctx->bios + igp_offset); |
459 | 459 | ||
460 | if (igp_obj) { | 460 | if (igp_obj) { |
461 | uint32_t slot_config, ct; | 461 | uint32_t slot_config, ct; |
462 | 462 | ||
463 | if (con_obj_num == 1) | 463 | if (con_obj_num == 1) |
464 | slot_config = | 464 | slot_config = |
465 | igp_obj-> | 465 | igp_obj-> |
466 | ulDDISlot1Config; | 466 | ulDDISlot1Config; |
467 | else | 467 | else |
468 | slot_config = | 468 | slot_config = |
469 | igp_obj-> | 469 | igp_obj-> |
470 | ulDDISlot2Config; | 470 | ulDDISlot2Config; |
471 | 471 | ||
472 | ct = (slot_config >> 16) & 0xff; | 472 | ct = (slot_config >> 16) & 0xff; |
473 | connector_type = | 473 | connector_type = |
474 | object_connector_convert | 474 | object_connector_convert |
475 | [ct]; | 475 | [ct]; |
476 | connector_object_id = ct; | 476 | connector_object_id = ct; |
477 | igp_lane_info = | 477 | igp_lane_info = |
478 | slot_config & 0xffff; | 478 | slot_config & 0xffff; |
479 | } else | ||
480 | continue; | ||
479 | } else | 481 | } else |
480 | continue; | 482 | continue; |
481 | } else | 483 | } else { |
482 | continue; | 484 | igp_lane_info = 0; |
485 | connector_type = | ||
486 | object_connector_convert[con_obj_id]; | ||
487 | connector_object_id = con_obj_id; | ||
488 | } | ||
483 | } else { | 489 | } else { |
484 | igp_lane_info = 0; | 490 | igp_lane_info = 0; |
485 | connector_type = | 491 | connector_type = |
@@ -627,20 +633,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev, | |||
627 | uint8_t frev, crev; | 633 | uint8_t frev, crev; |
628 | ATOM_XTMDS_INFO *xtmds; | 634 | ATOM_XTMDS_INFO *xtmds; |
629 | 635 | ||
630 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 636 | if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { |
631 | xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); | 637 | xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); |
632 | 638 | ||
633 | if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { | 639 | if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { |
634 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | 640 | if (connector_type == DRM_MODE_CONNECTOR_DVII) |
635 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; | 641 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; |
636 | else | 642 | else |
637 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; | 643 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; |
638 | } else { | 644 | } else { |
639 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | 645 | if (connector_type == DRM_MODE_CONNECTOR_DVII) |
640 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | 646 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; |
641 | else | 647 | else |
642 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; | 648 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; |
643 | } | 649 | } |
650 | } else | ||
651 | return supported_devices_connector_object_id_convert | ||
652 | [connector_type]; | ||
644 | } else { | 653 | } else { |
645 | return supported_devices_connector_object_id_convert | 654 | return supported_devices_connector_object_id_convert |
646 | [connector_type]; | 655 | [connector_type]; |
@@ -672,7 +681,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
672 | int i, j, max_device; | 681 | int i, j, max_device; |
673 | struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; | 682 | struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; |
674 | 683 | ||
675 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 684 | if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) |
685 | return false; | ||
676 | 686 | ||
677 | supported_devices = | 687 | supported_devices = |
678 | (union atom_supported_devices *)(ctx->bios + data_offset); | 688 | (union atom_supported_devices *)(ctx->bios + data_offset); |
@@ -865,14 +875,11 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
865 | struct radeon_pll *mpll = &rdev->clock.mpll; | 875 | struct radeon_pll *mpll = &rdev->clock.mpll; |
866 | uint16_t data_offset; | 876 | uint16_t data_offset; |
867 | 877 | ||
868 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 878 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
869 | &crev, &data_offset); | 879 | &frev, &crev, &data_offset)) { |
870 | 880 | firmware_info = | |
871 | firmware_info = | 881 | (union firmware_info *)(mode_info->atom_context->bios + |
872 | (union firmware_info *)(mode_info->atom_context->bios + | 882 | data_offset); |
873 | data_offset); | ||
874 | |||
875 | if (firmware_info) { | ||
876 | /* pixel clocks */ | 883 | /* pixel clocks */ |
877 | p1pll->reference_freq = | 884 | p1pll->reference_freq = |
878 | le16_to_cpu(firmware_info->info.usReferenceClock); | 885 | le16_to_cpu(firmware_info->info.usReferenceClock); |
@@ -887,6 +894,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
887 | p1pll->pll_out_max = | 894 | p1pll->pll_out_max = |
888 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); | 895 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); |
889 | 896 | ||
897 | if (crev >= 4) { | ||
898 | p1pll->lcd_pll_out_min = | ||
899 | le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; | ||
900 | if (p1pll->lcd_pll_out_min == 0) | ||
901 | p1pll->lcd_pll_out_min = p1pll->pll_out_min; | ||
902 | p1pll->lcd_pll_out_max = | ||
903 | le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; | ||
904 | if (p1pll->lcd_pll_out_max == 0) | ||
905 | p1pll->lcd_pll_out_max = p1pll->pll_out_max; | ||
906 | } else { | ||
907 | p1pll->lcd_pll_out_min = p1pll->pll_out_min; | ||
908 | p1pll->lcd_pll_out_max = p1pll->pll_out_max; | ||
909 | } | ||
910 | |||
890 | if (p1pll->pll_out_min == 0) { | 911 | if (p1pll->pll_out_min == 0) { |
891 | if (ASIC_IS_AVIVO(rdev)) | 912 | if (ASIC_IS_AVIVO(rdev)) |
892 | p1pll->pll_out_min = 64800; | 913 | p1pll->pll_out_min = 64800; |
@@ -992,13 +1013,10 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
992 | u8 frev, crev; | 1013 | u8 frev, crev; |
993 | u16 data_offset; | 1014 | u16 data_offset; |
994 | 1015 | ||
995 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1016 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
996 | &crev, &data_offset); | 1017 | &frev, &crev, &data_offset)) { |
997 | 1018 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | |
998 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | ||
999 | data_offset); | 1019 | data_offset); |
1000 | |||
1001 | if (igp_info) { | ||
1002 | switch (crev) { | 1020 | switch (crev) { |
1003 | case 1: | 1021 | case 1: |
1004 | if (igp_info->info.ucMemoryType & 0xf0) | 1022 | if (igp_info->info.ucMemoryType & 0xf0) |
@@ -1029,14 +1047,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | |||
1029 | uint16_t maxfreq; | 1047 | uint16_t maxfreq; |
1030 | int i; | 1048 | int i; |
1031 | 1049 | ||
1032 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1050 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1033 | &crev, &data_offset); | 1051 | &frev, &crev, &data_offset)) { |
1034 | 1052 | tmds_info = | |
1035 | tmds_info = | 1053 | (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + |
1036 | (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + | 1054 | data_offset); |
1037 | data_offset); | ||
1038 | 1055 | ||
1039 | if (tmds_info) { | ||
1040 | maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); | 1056 | maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); |
1041 | for (i = 0; i < 4; i++) { | 1057 | for (i = 0; i < 4; i++) { |
1042 | tmds->tmds_pll[i].freq = | 1058 | tmds->tmds_pll[i].freq = |
@@ -1085,13 +1101,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | |||
1085 | if (id > ATOM_MAX_SS_ENTRY) | 1101 | if (id > ATOM_MAX_SS_ENTRY) |
1086 | return NULL; | 1102 | return NULL; |
1087 | 1103 | ||
1088 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1104 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1089 | &crev, &data_offset); | 1105 | &frev, &crev, &data_offset)) { |
1106 | ss_info = | ||
1107 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); | ||
1090 | 1108 | ||
1091 | ss_info = | ||
1092 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); | ||
1093 | |||
1094 | if (ss_info) { | ||
1095 | ss = | 1109 | ss = |
1096 | kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); | 1110 | kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); |
1097 | 1111 | ||
@@ -1114,30 +1128,6 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | |||
1114 | return ss; | 1128 | return ss; |
1115 | } | 1129 | } |
1116 | 1130 | ||
1117 | static void radeon_atom_apply_lvds_quirks(struct drm_device *dev, | ||
1118 | struct radeon_encoder_atom_dig *lvds) | ||
1119 | { | ||
1120 | |||
1121 | /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */ | ||
1122 | if ((dev->pdev->device == 0x95c4) && | ||
1123 | (dev->pdev->subsystem_vendor == 0x1179) && | ||
1124 | (dev->pdev->subsystem_device == 0xff50)) { | ||
1125 | if ((lvds->native_mode.hdisplay == 1280) && | ||
1126 | (lvds->native_mode.vdisplay == 800)) | ||
1127 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1128 | } | ||
1129 | |||
1130 | /* Dell Studio 15 laptop panel doesn't like new pll divider algo */ | ||
1131 | if ((dev->pdev->device == 0x95c4) && | ||
1132 | (dev->pdev->subsystem_vendor == 0x1028) && | ||
1133 | (dev->pdev->subsystem_device == 0x029f)) { | ||
1134 | if ((lvds->native_mode.hdisplay == 1280) && | ||
1135 | (lvds->native_mode.vdisplay == 800)) | ||
1136 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1137 | } | ||
1138 | |||
1139 | } | ||
1140 | |||
1141 | union lvds_info { | 1131 | union lvds_info { |
1142 | struct _ATOM_LVDS_INFO info; | 1132 | struct _ATOM_LVDS_INFO info; |
1143 | struct _ATOM_LVDS_INFO_V12 info_12; | 1133 | struct _ATOM_LVDS_INFO_V12 info_12; |
@@ -1156,13 +1146,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1156 | uint8_t frev, crev; | 1146 | uint8_t frev, crev; |
1157 | struct radeon_encoder_atom_dig *lvds = NULL; | 1147 | struct radeon_encoder_atom_dig *lvds = NULL; |
1158 | 1148 | ||
1159 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | 1149 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1160 | &crev, &data_offset); | 1150 | &frev, &crev, &data_offset)) { |
1161 | 1151 | lvds_info = | |
1162 | lvds_info = | 1152 | (union lvds_info *)(mode_info->atom_context->bios + data_offset); |
1163 | (union lvds_info *)(mode_info->atom_context->bios + data_offset); | ||
1164 | |||
1165 | if (lvds_info) { | ||
1166 | lvds = | 1153 | lvds = |
1167 | kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); | 1154 | kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); |
1168 | 1155 | ||
@@ -1220,9 +1207,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1220 | lvds->pll_algo = PLL_ALGO_LEGACY; | 1207 | lvds->pll_algo = PLL_ALGO_LEGACY; |
1221 | } | 1208 | } |
1222 | 1209 | ||
1223 | /* LVDS quirks */ | ||
1224 | radeon_atom_apply_lvds_quirks(dev, lvds); | ||
1225 | |||
1226 | encoder->native_mode = lvds->native_mode; | 1210 | encoder->native_mode = lvds->native_mode; |
1227 | } | 1211 | } |
1228 | return lvds; | 1212 | return lvds; |
@@ -1241,11 +1225,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) | |||
1241 | uint8_t bg, dac; | 1225 | uint8_t bg, dac; |
1242 | struct radeon_encoder_primary_dac *p_dac = NULL; | 1226 | struct radeon_encoder_primary_dac *p_dac = NULL; |
1243 | 1227 | ||
1244 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | 1228 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1245 | 1229 | &frev, &crev, &data_offset)) { | |
1246 | dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); | 1230 | dac_info = (struct _COMPASSIONATE_DATA *) |
1231 | (mode_info->atom_context->bios + data_offset); | ||
1247 | 1232 | ||
1248 | if (dac_info) { | ||
1249 | p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); | 1233 | p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); |
1250 | 1234 | ||
1251 | if (!p_dac) | 1235 | if (!p_dac) |
@@ -1270,7 +1254,9 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
1270 | u8 frev, crev; | 1254 | u8 frev, crev; |
1271 | u16 data_offset, misc; | 1255 | u16 data_offset, misc; |
1272 | 1256 | ||
1273 | atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); | 1257 | if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL, |
1258 | &frev, &crev, &data_offset)) | ||
1259 | return false; | ||
1274 | 1260 | ||
1275 | switch (crev) { | 1261 | switch (crev) { |
1276 | case 1: | 1262 | case 1: |
@@ -1362,47 +1348,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev) | |||
1362 | struct _ATOM_ANALOG_TV_INFO *tv_info; | 1348 | struct _ATOM_ANALOG_TV_INFO *tv_info; |
1363 | enum radeon_tv_std tv_std = TV_STD_NTSC; | 1349 | enum radeon_tv_std tv_std = TV_STD_NTSC; |
1364 | 1350 | ||
1365 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | 1351 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1352 | &frev, &crev, &data_offset)) { | ||
1366 | 1353 | ||
1367 | tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); | 1354 | tv_info = (struct _ATOM_ANALOG_TV_INFO *) |
1355 | (mode_info->atom_context->bios + data_offset); | ||
1368 | 1356 | ||
1369 | switch (tv_info->ucTV_BootUpDefaultStandard) { | 1357 | switch (tv_info->ucTV_BootUpDefaultStandard) { |
1370 | case ATOM_TV_NTSC: | 1358 | case ATOM_TV_NTSC: |
1371 | tv_std = TV_STD_NTSC; | 1359 | tv_std = TV_STD_NTSC; |
1372 | DRM_INFO("Default TV standard: NTSC\n"); | 1360 | DRM_INFO("Default TV standard: NTSC\n"); |
1373 | break; | 1361 | break; |
1374 | case ATOM_TV_NTSCJ: | 1362 | case ATOM_TV_NTSCJ: |
1375 | tv_std = TV_STD_NTSC_J; | 1363 | tv_std = TV_STD_NTSC_J; |
1376 | DRM_INFO("Default TV standard: NTSC-J\n"); | 1364 | DRM_INFO("Default TV standard: NTSC-J\n"); |
1377 | break; | 1365 | break; |
1378 | case ATOM_TV_PAL: | 1366 | case ATOM_TV_PAL: |
1379 | tv_std = TV_STD_PAL; | 1367 | tv_std = TV_STD_PAL; |
1380 | DRM_INFO("Default TV standard: PAL\n"); | 1368 | DRM_INFO("Default TV standard: PAL\n"); |
1381 | break; | 1369 | break; |
1382 | case ATOM_TV_PALM: | 1370 | case ATOM_TV_PALM: |
1383 | tv_std = TV_STD_PAL_M; | 1371 | tv_std = TV_STD_PAL_M; |
1384 | DRM_INFO("Default TV standard: PAL-M\n"); | 1372 | DRM_INFO("Default TV standard: PAL-M\n"); |
1385 | break; | 1373 | break; |
1386 | case ATOM_TV_PALN: | 1374 | case ATOM_TV_PALN: |
1387 | tv_std = TV_STD_PAL_N; | 1375 | tv_std = TV_STD_PAL_N; |
1388 | DRM_INFO("Default TV standard: PAL-N\n"); | 1376 | DRM_INFO("Default TV standard: PAL-N\n"); |
1389 | break; | 1377 | break; |
1390 | case ATOM_TV_PALCN: | 1378 | case ATOM_TV_PALCN: |
1391 | tv_std = TV_STD_PAL_CN; | 1379 | tv_std = TV_STD_PAL_CN; |
1392 | DRM_INFO("Default TV standard: PAL-CN\n"); | 1380 | DRM_INFO("Default TV standard: PAL-CN\n"); |
1393 | break; | 1381 | break; |
1394 | case ATOM_TV_PAL60: | 1382 | case ATOM_TV_PAL60: |
1395 | tv_std = TV_STD_PAL_60; | 1383 | tv_std = TV_STD_PAL_60; |
1396 | DRM_INFO("Default TV standard: PAL-60\n"); | 1384 | DRM_INFO("Default TV standard: PAL-60\n"); |
1397 | break; | 1385 | break; |
1398 | case ATOM_TV_SECAM: | 1386 | case ATOM_TV_SECAM: |
1399 | tv_std = TV_STD_SECAM; | 1387 | tv_std = TV_STD_SECAM; |
1400 | DRM_INFO("Default TV standard: SECAM\n"); | 1388 | DRM_INFO("Default TV standard: SECAM\n"); |
1401 | break; | 1389 | break; |
1402 | default: | 1390 | default: |
1403 | tv_std = TV_STD_NTSC; | 1391 | tv_std = TV_STD_NTSC; |
1404 | DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); | 1392 | DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); |
1405 | break; | 1393 | break; |
1394 | } | ||
1406 | } | 1395 | } |
1407 | return tv_std; | 1396 | return tv_std; |
1408 | } | 1397 | } |
@@ -1420,11 +1409,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) | |||
1420 | uint8_t bg, dac; | 1409 | uint8_t bg, dac; |
1421 | struct radeon_encoder_tv_dac *tv_dac = NULL; | 1410 | struct radeon_encoder_tv_dac *tv_dac = NULL; |
1422 | 1411 | ||
1423 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | 1412 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1413 | &frev, &crev, &data_offset)) { | ||
1424 | 1414 | ||
1425 | dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); | 1415 | dac_info = (struct _COMPASSIONATE_DATA *) |
1416 | (mode_info->atom_context->bios + data_offset); | ||
1426 | 1417 | ||
1427 | if (dac_info) { | ||
1428 | tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); | 1418 | tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); |
1429 | 1419 | ||
1430 | if (!tv_dac) | 1420 | if (!tv_dac) |
@@ -1447,6 +1437,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) | |||
1447 | return tv_dac; | 1437 | return tv_dac; |
1448 | } | 1438 | } |
1449 | 1439 | ||
1440 | static const char *thermal_controller_names[] = { | ||
1441 | "NONE", | ||
1442 | "LM63", | ||
1443 | "ADM1032", | ||
1444 | "ADM1030", | ||
1445 | "MUA6649", | ||
1446 | "LM64", | ||
1447 | "F75375", | ||
1448 | "ASC7512", | ||
1449 | }; | ||
1450 | |||
1451 | static const char *pp_lib_thermal_controller_names[] = { | ||
1452 | "NONE", | ||
1453 | "LM63", | ||
1454 | "ADM1032", | ||
1455 | "ADM1030", | ||
1456 | "MUA6649", | ||
1457 | "LM64", | ||
1458 | "F75375", | ||
1459 | "RV6xx", | ||
1460 | "RV770", | ||
1461 | "ADT7473", | ||
1462 | }; | ||
1463 | |||
1450 | union power_info { | 1464 | union power_info { |
1451 | struct _ATOM_POWERPLAY_INFO info; | 1465 | struct _ATOM_POWERPLAY_INFO info; |
1452 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | 1466 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
@@ -1466,15 +1480,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
1466 | struct _ATOM_PPLIB_STATE *power_state; | 1480 | struct _ATOM_PPLIB_STATE *power_state; |
1467 | int num_modes = 0, i, j; | 1481 | int num_modes = 0, i, j; |
1468 | int state_index = 0, mode_index = 0; | 1482 | int state_index = 0, mode_index = 0; |
1469 | 1483 | struct radeon_i2c_bus_rec i2c_bus; | |
1470 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | ||
1471 | |||
1472 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
1473 | 1484 | ||
1474 | rdev->pm.default_power_state = NULL; | 1485 | rdev->pm.default_power_state = NULL; |
1475 | 1486 | ||
1476 | if (power_info) { | 1487 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1488 | &frev, &crev, &data_offset)) { | ||
1489 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
1477 | if (frev < 4) { | 1490 | if (frev < 4) { |
1491 | /* add the i2c bus for thermal/fan chip */ | ||
1492 | if (power_info->info.ucOverdriveThermalController > 0) { | ||
1493 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", | ||
1494 | thermal_controller_names[power_info->info.ucOverdriveThermalController], | ||
1495 | power_info->info.ucOverdriveControllerAddress >> 1); | ||
1496 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); | ||
1497 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); | ||
1498 | } | ||
1478 | num_modes = power_info->info.ucNumOfPowerModeEntries; | 1499 | num_modes = power_info->info.ucNumOfPowerModeEntries; |
1479 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | 1500 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) |
1480 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | 1501 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; |
@@ -1684,6 +1705,24 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
1684 | } | 1705 | } |
1685 | } | 1706 | } |
1686 | } else if (frev == 4) { | 1707 | } else if (frev == 4) { |
1708 | /* add the i2c bus for thermal/fan chip */ | ||
1709 | /* no support for internal controller yet */ | ||
1710 | if (power_info->info_4.sThermalController.ucType > 0) { | ||
1711 | if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || | ||
1712 | (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) { | ||
1713 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
1714 | (power_info->info_4.sThermalController.ucFanParameters & | ||
1715 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
1716 | } else { | ||
1717 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", | ||
1718 | pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], | ||
1719 | power_info->info_4.sThermalController.ucI2cAddress >> 1, | ||
1720 | (power_info->info_4.sThermalController.ucFanParameters & | ||
1721 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
1722 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine); | ||
1723 | rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); | ||
1724 | } | ||
1725 | } | ||
1687 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { | 1726 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { |
1688 | mode_index = 0; | 1727 | mode_index = 0; |
1689 | power_state = (struct _ATOM_PPLIB_STATE *) | 1728 | power_state = (struct _ATOM_PPLIB_STATE *) |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e9ea38ece375..2becdeda68a3 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -531,10 +531,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
531 | case CHIP_RS300: | 531 | case CHIP_RS300: |
532 | switch (ddc_line) { | 532 | switch (ddc_line) { |
533 | case RADEON_GPIO_DVI_DDC: | 533 | case RADEON_GPIO_DVI_DDC: |
534 | /* in theory this should be hw capable, | 534 | i2c.hw_capable = true; |
535 | * but it doesn't seem to work | ||
536 | */ | ||
537 | i2c.hw_capable = false; | ||
538 | break; | 535 | break; |
539 | default: | 536 | default: |
540 | i2c.hw_capable = false; | 537 | i2c.hw_capable = false; |
@@ -633,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) | |||
633 | p1pll->reference_div = RBIOS16(pll_info + 0x10); | 630 | p1pll->reference_div = RBIOS16(pll_info + 0x10); |
634 | p1pll->pll_out_min = RBIOS32(pll_info + 0x12); | 631 | p1pll->pll_out_min = RBIOS32(pll_info + 0x12); |
635 | p1pll->pll_out_max = RBIOS32(pll_info + 0x16); | 632 | p1pll->pll_out_max = RBIOS32(pll_info + 0x16); |
633 | p1pll->lcd_pll_out_min = p1pll->pll_out_min; | ||
634 | p1pll->lcd_pll_out_max = p1pll->pll_out_max; | ||
636 | 635 | ||
637 | if (rev > 9) { | 636 | if (rev > 9) { |
638 | p1pll->pll_in_min = RBIOS32(pll_info + 0x36); | 637 | p1pll->pll_in_min = RBIOS32(pll_info + 0x36); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index ee0083f982d8..60d59816b94f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -940,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) | |||
940 | if (radeon_connector->edid) | 940 | if (radeon_connector->edid) |
941 | kfree(radeon_connector->edid); | 941 | kfree(radeon_connector->edid); |
942 | if (radeon_dig_connector->dp_i2c_bus) | 942 | if (radeon_dig_connector->dp_i2c_bus) |
943 | radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus); | 943 | radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); |
944 | kfree(radeon_connector->con_priv); | 944 | kfree(radeon_connector->con_priv); |
945 | drm_sysfs_connector_remove(connector); | 945 | drm_sysfs_connector_remove(connector); |
946 | drm_connector_cleanup(connector); | 946 | drm_connector_cleanup(connector); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 70ba02ed7723..f9b0fe002c0a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -193,9 +193,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); | 193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); |
194 | } | 194 | } |
195 | radeon_bo_list_unreserve(&parser->validated); | 195 | radeon_bo_list_unreserve(&parser->validated); |
196 | for (i = 0; i < parser->nrelocs; i++) { | 196 | if (parser->relocs != NULL) { |
197 | if (parser->relocs[i].gobj) | 197 | for (i = 0; i < parser->nrelocs; i++) { |
198 | drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); | 198 | if (parser->relocs[i].gobj) |
199 | drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); | ||
200 | } | ||
199 | } | 201 | } |
200 | kfree(parser->track); | 202 | kfree(parser->track); |
201 | kfree(parser->relocs); | 203 | kfree(parser->relocs); |
@@ -243,7 +245,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
243 | } | 245 | } |
244 | r = radeon_cs_parser_relocs(&parser); | 246 | r = radeon_cs_parser_relocs(&parser); |
245 | if (r) { | 247 | if (r) { |
246 | DRM_ERROR("Failed to parse relocation !\n"); | 248 | if (r != -ERESTARTSYS) |
249 | DRM_ERROR("Failed to parse relocation %d!\n", r); | ||
247 | radeon_cs_parser_fini(&parser, r); | 250 | radeon_cs_parser_fini(&parser, r); |
248 | mutex_unlock(&rdev->cs_mutex); | 251 | mutex_unlock(&rdev->cs_mutex); |
249 | return r; | 252 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e28e4ed5f720..60ec47b71642 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/vga_switcheroo.h> | 33 | #include <linux/vga_switcheroo.h> |
34 | #include "radeon_reg.h" | 34 | #include "radeon_reg.h" |
35 | #include "radeon.h" | 35 | #include "radeon.h" |
36 | #include "radeon_asic.h" | ||
37 | #include "atom.h" | 36 | #include "atom.h" |
38 | 37 | ||
39 | /* | 38 | /* |
@@ -242,6 +241,36 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
242 | 241 | ||
243 | } | 242 | } |
244 | 243 | ||
244 | void radeon_update_bandwidth_info(struct radeon_device *rdev) | ||
245 | { | ||
246 | fixed20_12 a; | ||
247 | u32 sclk, mclk; | ||
248 | |||
249 | if (rdev->flags & RADEON_IS_IGP) { | ||
250 | sclk = radeon_get_engine_clock(rdev); | ||
251 | mclk = rdev->clock.default_mclk; | ||
252 | |||
253 | a.full = rfixed_const(100); | ||
254 | rdev->pm.sclk.full = rfixed_const(sclk); | ||
255 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
256 | rdev->pm.mclk.full = rfixed_const(mclk); | ||
257 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | ||
258 | |||
259 | a.full = rfixed_const(16); | ||
260 | /* core_bandwidth = sclk(Mhz) * 16 */ | ||
261 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | ||
262 | } else { | ||
263 | sclk = radeon_get_engine_clock(rdev); | ||
264 | mclk = radeon_get_memory_clock(rdev); | ||
265 | |||
266 | a.full = rfixed_const(100); | ||
267 | rdev->pm.sclk.full = rfixed_const(sclk); | ||
268 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
269 | rdev->pm.mclk.full = rfixed_const(mclk); | ||
270 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | ||
271 | } | ||
272 | } | ||
273 | |||
245 | bool radeon_boot_test_post_card(struct radeon_device *rdev) | 274 | bool radeon_boot_test_post_card(struct radeon_device *rdev) |
246 | { | 275 | { |
247 | if (radeon_card_posted(rdev)) | 276 | if (radeon_card_posted(rdev)) |
@@ -288,181 +317,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev) | |||
288 | } | 317 | } |
289 | 318 | ||
290 | 319 | ||
291 | /* | ||
292 | * Registers accessors functions. | ||
293 | */ | ||
294 | uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) | ||
295 | { | ||
296 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); | ||
297 | BUG_ON(1); | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
302 | { | ||
303 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", | ||
304 | reg, v); | ||
305 | BUG_ON(1); | ||
306 | } | ||
307 | |||
308 | void radeon_register_accessor_init(struct radeon_device *rdev) | ||
309 | { | ||
310 | rdev->mc_rreg = &radeon_invalid_rreg; | ||
311 | rdev->mc_wreg = &radeon_invalid_wreg; | ||
312 | rdev->pll_rreg = &radeon_invalid_rreg; | ||
313 | rdev->pll_wreg = &radeon_invalid_wreg; | ||
314 | rdev->pciep_rreg = &radeon_invalid_rreg; | ||
315 | rdev->pciep_wreg = &radeon_invalid_wreg; | ||
316 | |||
317 | /* Don't change order as we are overridding accessor. */ | ||
318 | if (rdev->family < CHIP_RV515) { | ||
319 | rdev->pcie_reg_mask = 0xff; | ||
320 | } else { | ||
321 | rdev->pcie_reg_mask = 0x7ff; | ||
322 | } | ||
323 | /* FIXME: not sure here */ | ||
324 | if (rdev->family <= CHIP_R580) { | ||
325 | rdev->pll_rreg = &r100_pll_rreg; | ||
326 | rdev->pll_wreg = &r100_pll_wreg; | ||
327 | } | ||
328 | if (rdev->family >= CHIP_R420) { | ||
329 | rdev->mc_rreg = &r420_mc_rreg; | ||
330 | rdev->mc_wreg = &r420_mc_wreg; | ||
331 | } | ||
332 | if (rdev->family >= CHIP_RV515) { | ||
333 | rdev->mc_rreg = &rv515_mc_rreg; | ||
334 | rdev->mc_wreg = &rv515_mc_wreg; | ||
335 | } | ||
336 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | ||
337 | rdev->mc_rreg = &rs400_mc_rreg; | ||
338 | rdev->mc_wreg = &rs400_mc_wreg; | ||
339 | } | ||
340 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { | ||
341 | rdev->mc_rreg = &rs690_mc_rreg; | ||
342 | rdev->mc_wreg = &rs690_mc_wreg; | ||
343 | } | ||
344 | if (rdev->family == CHIP_RS600) { | ||
345 | rdev->mc_rreg = &rs600_mc_rreg; | ||
346 | rdev->mc_wreg = &rs600_mc_wreg; | ||
347 | } | ||
348 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { | ||
349 | rdev->pciep_rreg = &r600_pciep_rreg; | ||
350 | rdev->pciep_wreg = &r600_pciep_wreg; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | |||
355 | /* | ||
356 | * ASIC | ||
357 | */ | ||
358 | int radeon_asic_init(struct radeon_device *rdev) | ||
359 | { | ||
360 | radeon_register_accessor_init(rdev); | ||
361 | switch (rdev->family) { | ||
362 | case CHIP_R100: | ||
363 | case CHIP_RV100: | ||
364 | case CHIP_RS100: | ||
365 | case CHIP_RV200: | ||
366 | case CHIP_RS200: | ||
367 | rdev->asic = &r100_asic; | ||
368 | break; | ||
369 | case CHIP_R200: | ||
370 | case CHIP_RV250: | ||
371 | case CHIP_RS300: | ||
372 | case CHIP_RV280: | ||
373 | rdev->asic = &r200_asic; | ||
374 | break; | ||
375 | case CHIP_R300: | ||
376 | case CHIP_R350: | ||
377 | case CHIP_RV350: | ||
378 | case CHIP_RV380: | ||
379 | if (rdev->flags & RADEON_IS_PCIE) | ||
380 | rdev->asic = &r300_asic_pcie; | ||
381 | else | ||
382 | rdev->asic = &r300_asic; | ||
383 | break; | ||
384 | case CHIP_R420: | ||
385 | case CHIP_R423: | ||
386 | case CHIP_RV410: | ||
387 | rdev->asic = &r420_asic; | ||
388 | break; | ||
389 | case CHIP_RS400: | ||
390 | case CHIP_RS480: | ||
391 | rdev->asic = &rs400_asic; | ||
392 | break; | ||
393 | case CHIP_RS600: | ||
394 | rdev->asic = &rs600_asic; | ||
395 | break; | ||
396 | case CHIP_RS690: | ||
397 | case CHIP_RS740: | ||
398 | rdev->asic = &rs690_asic; | ||
399 | break; | ||
400 | case CHIP_RV515: | ||
401 | rdev->asic = &rv515_asic; | ||
402 | break; | ||
403 | case CHIP_R520: | ||
404 | case CHIP_RV530: | ||
405 | case CHIP_RV560: | ||
406 | case CHIP_RV570: | ||
407 | case CHIP_R580: | ||
408 | rdev->asic = &r520_asic; | ||
409 | break; | ||
410 | case CHIP_R600: | ||
411 | case CHIP_RV610: | ||
412 | case CHIP_RV630: | ||
413 | case CHIP_RV620: | ||
414 | case CHIP_RV635: | ||
415 | case CHIP_RV670: | ||
416 | case CHIP_RS780: | ||
417 | case CHIP_RS880: | ||
418 | rdev->asic = &r600_asic; | ||
419 | break; | ||
420 | case CHIP_RV770: | ||
421 | case CHIP_RV730: | ||
422 | case CHIP_RV710: | ||
423 | case CHIP_RV740: | ||
424 | rdev->asic = &rv770_asic; | ||
425 | break; | ||
426 | case CHIP_CEDAR: | ||
427 | case CHIP_REDWOOD: | ||
428 | case CHIP_JUNIPER: | ||
429 | case CHIP_CYPRESS: | ||
430 | case CHIP_HEMLOCK: | ||
431 | rdev->asic = &evergreen_asic; | ||
432 | break; | ||
433 | default: | ||
434 | /* FIXME: not supported yet */ | ||
435 | return -EINVAL; | ||
436 | } | ||
437 | |||
438 | if (rdev->flags & RADEON_IS_IGP) { | ||
439 | rdev->asic->get_memory_clock = NULL; | ||
440 | rdev->asic->set_memory_clock = NULL; | ||
441 | } | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | |||
447 | /* | ||
448 | * Wrapper around modesetting bits. | ||
449 | */ | ||
450 | int radeon_clocks_init(struct radeon_device *rdev) | ||
451 | { | ||
452 | int r; | ||
453 | |||
454 | r = radeon_static_clocks_init(rdev->ddev); | ||
455 | if (r) { | ||
456 | return r; | ||
457 | } | ||
458 | DRM_INFO("Clocks initialized !\n"); | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | void radeon_clocks_fini(struct radeon_device *rdev) | ||
463 | { | ||
464 | } | ||
465 | |||
466 | /* ATOM accessor methods */ | 320 | /* ATOM accessor methods */ |
467 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) | 321 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) |
468 | { | 322 | { |
@@ -567,29 +421,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |||
567 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 421 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
568 | } | 422 | } |
569 | 423 | ||
570 | void radeon_agp_disable(struct radeon_device *rdev) | ||
571 | { | ||
572 | rdev->flags &= ~RADEON_IS_AGP; | ||
573 | if (rdev->family >= CHIP_R600) { | ||
574 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
575 | rdev->flags |= RADEON_IS_PCIE; | ||
576 | } else if (rdev->family >= CHIP_RV515 || | ||
577 | rdev->family == CHIP_RV380 || | ||
578 | rdev->family == CHIP_RV410 || | ||
579 | rdev->family == CHIP_R423) { | ||
580 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
581 | rdev->flags |= RADEON_IS_PCIE; | ||
582 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
583 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
584 | } else { | ||
585 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
586 | rdev->flags |= RADEON_IS_PCI; | ||
587 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
588 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
589 | } | ||
590 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
591 | } | ||
592 | |||
593 | void radeon_check_arguments(struct radeon_device *rdev) | 424 | void radeon_check_arguments(struct radeon_device *rdev) |
594 | { | 425 | { |
595 | /* vramlimit must be a power of two */ | 426 | /* vramlimit must be a power of two */ |
@@ -731,6 +562,14 @@ int radeon_device_init(struct radeon_device *rdev, | |||
731 | return r; | 562 | return r; |
732 | radeon_check_arguments(rdev); | 563 | radeon_check_arguments(rdev); |
733 | 564 | ||
565 | /* all of the newer IGP chips have an internal gart | ||
566 | * However some rs4xx report as AGP, so remove that here. | ||
567 | */ | ||
568 | if ((rdev->family >= CHIP_RS400) && | ||
569 | (rdev->flags & RADEON_IS_IGP)) { | ||
570 | rdev->flags &= ~RADEON_IS_AGP; | ||
571 | } | ||
572 | |||
734 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { | 573 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { |
735 | radeon_agp_disable(rdev); | 574 | radeon_agp_disable(rdev); |
736 | } | 575 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ba8d806dcf39..b8d672828246 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -368,10 +368,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) | |||
368 | 368 | ||
369 | if (rdev->bios) { | 369 | if (rdev->bios) { |
370 | if (rdev->is_atom_bios) { | 370 | if (rdev->is_atom_bios) { |
371 | if (rdev->family >= CHIP_R600) | 371 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); |
372 | if (ret == false) | ||
372 | ret = radeon_get_atom_connector_info_from_object_table(dev); | 373 | ret = radeon_get_atom_connector_info_from_object_table(dev); |
373 | else | ||
374 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); | ||
375 | } else { | 374 | } else { |
376 | ret = radeon_get_legacy_connector_info_from_bios(dev); | 375 | ret = radeon_get_legacy_connector_info_from_bios(dev); |
377 | if (ret == false) | 376 | if (ret == false) |
@@ -469,10 +468,19 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
469 | uint32_t best_error = 0xffffffff; | 468 | uint32_t best_error = 0xffffffff; |
470 | uint32_t best_vco_diff = 1; | 469 | uint32_t best_vco_diff = 1; |
471 | uint32_t post_div; | 470 | uint32_t post_div; |
471 | u32 pll_out_min, pll_out_max; | ||
472 | 472 | ||
473 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); | 473 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); |
474 | freq = freq * 1000; | 474 | freq = freq * 1000; |
475 | 475 | ||
476 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
477 | pll_out_min = pll->lcd_pll_out_min; | ||
478 | pll_out_max = pll->lcd_pll_out_max; | ||
479 | } else { | ||
480 | pll_out_min = pll->pll_out_min; | ||
481 | pll_out_max = pll->pll_out_max; | ||
482 | } | ||
483 | |||
476 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | 484 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
477 | min_ref_div = max_ref_div = pll->reference_div; | 485 | min_ref_div = max_ref_div = pll->reference_div; |
478 | else { | 486 | else { |
@@ -536,10 +544,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
536 | tmp = (uint64_t)pll->reference_freq * feedback_div; | 544 | tmp = (uint64_t)pll->reference_freq * feedback_div; |
537 | vco = radeon_div(tmp, ref_div); | 545 | vco = radeon_div(tmp, ref_div); |
538 | 546 | ||
539 | if (vco < pll->pll_out_min) { | 547 | if (vco < pll_out_min) { |
540 | min_feed_div = feedback_div + 1; | 548 | min_feed_div = feedback_div + 1; |
541 | continue; | 549 | continue; |
542 | } else if (vco > pll->pll_out_max) { | 550 | } else if (vco > pll_out_max) { |
543 | max_feed_div = feedback_div; | 551 | max_feed_div = feedback_div; |
544 | continue; | 552 | continue; |
545 | } | 553 | } |
@@ -675,6 +683,15 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
675 | { | 683 | { |
676 | fixed20_12 ffreq, max_error, error, pll_out, a; | 684 | fixed20_12 ffreq, max_error, error, pll_out, a; |
677 | u32 vco; | 685 | u32 vco; |
686 | u32 pll_out_min, pll_out_max; | ||
687 | |||
688 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
689 | pll_out_min = pll->lcd_pll_out_min; | ||
690 | pll_out_max = pll->lcd_pll_out_max; | ||
691 | } else { | ||
692 | pll_out_min = pll->pll_out_min; | ||
693 | pll_out_max = pll->pll_out_max; | ||
694 | } | ||
678 | 695 | ||
679 | ffreq.full = rfixed_const(freq); | 696 | ffreq.full = rfixed_const(freq); |
680 | /* max_error = ffreq * 0.0025; */ | 697 | /* max_error = ffreq * 0.0025; */ |
@@ -686,7 +703,7 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
686 | vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); | 703 | vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); |
687 | vco = vco / ((*ref_div) * 10); | 704 | vco = vco / ((*ref_div) * 10); |
688 | 705 | ||
689 | if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max)) | 706 | if ((vco < pll_out_min) || (vco > pll_out_max)) |
690 | continue; | 707 | continue; |
691 | 708 | ||
692 | /* pll_out = vco / post_div; */ | 709 | /* pll_out = vco / post_div; */ |
@@ -714,6 +731,15 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, | |||
714 | { | 731 | { |
715 | u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; | 732 | u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; |
716 | u32 best_freq = 0, vco_frequency; | 733 | u32 best_freq = 0, vco_frequency; |
734 | u32 pll_out_min, pll_out_max; | ||
735 | |||
736 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
737 | pll_out_min = pll->lcd_pll_out_min; | ||
738 | pll_out_max = pll->lcd_pll_out_max; | ||
739 | } else { | ||
740 | pll_out_min = pll->pll_out_min; | ||
741 | pll_out_max = pll->pll_out_max; | ||
742 | } | ||
717 | 743 | ||
718 | /* freq = freq / 10; */ | 744 | /* freq = freq / 10; */ |
719 | do_div(freq, 10); | 745 | do_div(freq, 10); |
@@ -724,7 +750,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, | |||
724 | goto done; | 750 | goto done; |
725 | 751 | ||
726 | vco_frequency = freq * post_div; | 752 | vco_frequency = freq * post_div; |
727 | if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) | 753 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) |
728 | goto done; | 754 | goto done; |
729 | 755 | ||
730 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | 756 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { |
@@ -749,7 +775,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, | |||
749 | continue; | 775 | continue; |
750 | 776 | ||
751 | vco_frequency = freq * post_div; | 777 | vco_frequency = freq * post_div; |
752 | if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) | 778 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) |
753 | continue; | 779 | continue; |
754 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | 780 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { |
755 | ref_div = pll->reference_div; | 781 | ref_div = pll->reference_div; |
@@ -945,6 +971,23 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) | |||
945 | return 0; | 971 | return 0; |
946 | } | 972 | } |
947 | 973 | ||
974 | void radeon_update_display_priority(struct radeon_device *rdev) | ||
975 | { | ||
976 | /* adjustment options for the display watermarks */ | ||
977 | if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { | ||
978 | /* set display priority to high for r3xx, rv515 chips | ||
979 | * this avoids flickering due to underflow to the | ||
980 | * display controllers during heavy acceleration. | ||
981 | */ | ||
982 | if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) | ||
983 | rdev->disp_priority = 2; | ||
984 | else | ||
985 | rdev->disp_priority = 0; | ||
986 | } else | ||
987 | rdev->disp_priority = radeon_disp_priority; | ||
988 | |||
989 | } | ||
990 | |||
948 | int radeon_modeset_init(struct radeon_device *rdev) | 991 | int radeon_modeset_init(struct radeon_device *rdev) |
949 | { | 992 | { |
950 | int i; | 993 | int i; |
@@ -976,15 +1019,6 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
976 | radeon_combios_check_hardcoded_edid(rdev); | 1019 | radeon_combios_check_hardcoded_edid(rdev); |
977 | } | 1020 | } |
978 | 1021 | ||
979 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
980 | rdev->num_crtc = 1; | ||
981 | else { | ||
982 | if (ASIC_IS_DCE4(rdev)) | ||
983 | rdev->num_crtc = 6; | ||
984 | else | ||
985 | rdev->num_crtc = 2; | ||
986 | } | ||
987 | |||
988 | /* allocate crtcs */ | 1022 | /* allocate crtcs */ |
989 | for (i = 0; i < rdev->num_crtc; i++) { | 1023 | for (i = 0; i < rdev->num_crtc; i++) { |
990 | radeon_crtc_init(rdev->ddev, i); | 1024 | radeon_crtc_init(rdev->ddev, i); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6eec0ece6a6c..055a51732dcb 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -42,9 +42,10 @@ | |||
42 | * KMS wrapper. | 42 | * KMS wrapper. |
43 | * - 2.0.0 - initial interface | 43 | * - 2.0.0 - initial interface |
44 | * - 2.1.0 - add square tiling interface | 44 | * - 2.1.0 - add square tiling interface |
45 | * - 2.2.0 - add r6xx/r7xx const buffer support | ||
45 | */ | 46 | */ |
46 | #define KMS_DRIVER_MAJOR 2 | 47 | #define KMS_DRIVER_MAJOR 2 |
47 | #define KMS_DRIVER_MINOR 1 | 48 | #define KMS_DRIVER_MINOR 2 |
48 | #define KMS_DRIVER_PATCHLEVEL 0 | 49 | #define KMS_DRIVER_PATCHLEVEL 0 |
49 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 50 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
50 | int radeon_driver_unload_kms(struct drm_device *dev); | 51 | int radeon_driver_unload_kms(struct drm_device *dev); |
@@ -91,6 +92,8 @@ int radeon_tv = 1; | |||
91 | int radeon_new_pll = -1; | 92 | int radeon_new_pll = -1; |
92 | int radeon_dynpm = -1; | 93 | int radeon_dynpm = -1; |
93 | int radeon_audio = 1; | 94 | int radeon_audio = 1; |
95 | int radeon_disp_priority = 0; | ||
96 | int radeon_hw_i2c = 0; | ||
94 | 97 | ||
95 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 98 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
96 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 99 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -134,6 +137,12 @@ module_param_named(dynpm, radeon_dynpm, int, 0444); | |||
134 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); | 137 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); |
135 | module_param_named(audio, radeon_audio, int, 0444); | 138 | module_param_named(audio, radeon_audio, int, 0444); |
136 | 139 | ||
140 | MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); | ||
141 | module_param_named(disp_priority, radeon_disp_priority, int, 0444); | ||
142 | |||
143 | MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); | ||
144 | module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); | ||
145 | |||
137 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) | 146 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) |
138 | { | 147 | { |
139 | drm_radeon_private_t *dev_priv = dev->dev_private; | 148 | drm_radeon_private_t *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index ec55f2b23c22..448eba89d1e6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -107,9 +107,10 @@ | |||
107 | * 1.30- Add support for occlusion queries | 107 | * 1.30- Add support for occlusion queries |
108 | * 1.31- Add support for num Z pipes from GET_PARAM | 108 | * 1.31- Add support for num Z pipes from GET_PARAM |
109 | * 1.32- fixes for rv740 setup | 109 | * 1.32- fixes for rv740 setup |
110 | * 1.33- Add r6xx/r7xx const buffer support | ||
110 | */ | 111 | */ |
111 | #define DRIVER_MAJOR 1 | 112 | #define DRIVER_MAJOR 1 |
112 | #define DRIVER_MINOR 32 | 113 | #define DRIVER_MINOR 33 |
113 | #define DRIVER_PATCHLEVEL 0 | 114 | #define DRIVER_PATCHLEVEL 0 |
114 | 115 | ||
115 | enum radeon_cp_microcode_version { | 116 | enum radeon_cp_microcode_version { |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index bc926ea0a530..52d6f96f274b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -302,7 +302,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
302 | } | 302 | } |
303 | 303 | ||
304 | if (ASIC_IS_DCE3(rdev) && | 304 | if (ASIC_IS_DCE3(rdev) && |
305 | (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { | 305 | (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { |
306 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 306 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
307 | radeon_dp_set_link_config(connector, mode); | 307 | radeon_dp_set_link_config(connector, mode); |
308 | } | 308 | } |
@@ -519,7 +519,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
519 | break; | 519 | break; |
520 | } | 520 | } |
521 | 521 | ||
522 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 522 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
523 | return; | ||
523 | 524 | ||
524 | switch (frev) { | 525 | switch (frev) { |
525 | case 1: | 526 | case 1: |
@@ -593,7 +594,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
593 | } | 594 | } |
594 | 595 | ||
595 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 596 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
596 | r600_hdmi_enable(encoder, hdmi_detected); | ||
597 | } | 597 | } |
598 | 598 | ||
599 | int | 599 | int |
@@ -708,7 +708,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
708 | struct radeon_connector_atom_dig *dig_connector = | 708 | struct radeon_connector_atom_dig *dig_connector = |
709 | radeon_get_atom_connector_priv_from_encoder(encoder); | 709 | radeon_get_atom_connector_priv_from_encoder(encoder); |
710 | union dig_encoder_control args; | 710 | union dig_encoder_control args; |
711 | int index = 0, num = 0; | 711 | int index = 0; |
712 | uint8_t frev, crev; | 712 | uint8_t frev, crev; |
713 | 713 | ||
714 | if (!dig || !dig_connector) | 714 | if (!dig || !dig_connector) |
@@ -724,9 +724,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
724 | else | 724 | else |
725 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | 725 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); |
726 | } | 726 | } |
727 | num = dig->dig_encoder + 1; | ||
728 | 727 | ||
729 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 728 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
729 | return; | ||
730 | 730 | ||
731 | args.v1.ucAction = action; | 731 | args.v1.ucAction = action; |
732 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 732 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
@@ -785,7 +785,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
785 | struct drm_connector *connector; | 785 | struct drm_connector *connector; |
786 | struct radeon_connector *radeon_connector; | 786 | struct radeon_connector *radeon_connector; |
787 | union dig_transmitter_control args; | 787 | union dig_transmitter_control args; |
788 | int index = 0, num = 0; | 788 | int index = 0; |
789 | uint8_t frev, crev; | 789 | uint8_t frev, crev; |
790 | bool is_dp = false; | 790 | bool is_dp = false; |
791 | int pll_id = 0; | 791 | int pll_id = 0; |
@@ -814,7 +814,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
814 | } | 814 | } |
815 | } | 815 | } |
816 | 816 | ||
817 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 817 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
818 | return; | ||
818 | 819 | ||
819 | args.v1.ucAction = action; | 820 | args.v1.ucAction = action; |
820 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 821 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
@@ -860,15 +861,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
860 | switch (radeon_encoder->encoder_id) { | 861 | switch (radeon_encoder->encoder_id) { |
861 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 862 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
862 | args.v3.acConfig.ucTransmitterSel = 0; | 863 | args.v3.acConfig.ucTransmitterSel = 0; |
863 | num = 0; | ||
864 | break; | 864 | break; |
865 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 865 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
866 | args.v3.acConfig.ucTransmitterSel = 1; | 866 | args.v3.acConfig.ucTransmitterSel = 1; |
867 | num = 1; | ||
868 | break; | 867 | break; |
869 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 868 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
870 | args.v3.acConfig.ucTransmitterSel = 2; | 869 | args.v3.acConfig.ucTransmitterSel = 2; |
871 | num = 2; | ||
872 | break; | 870 | break; |
873 | } | 871 | } |
874 | 872 | ||
@@ -879,23 +877,19 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
879 | args.v3.acConfig.fCoherentMode = 1; | 877 | args.v3.acConfig.fCoherentMode = 1; |
880 | } | 878 | } |
881 | } else if (ASIC_IS_DCE32(rdev)) { | 879 | } else if (ASIC_IS_DCE32(rdev)) { |
882 | if (dig->dig_encoder == 1) | 880 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; |
883 | args.v2.acConfig.ucEncoderSel = 1; | ||
884 | if (dig_connector->linkb) | 881 | if (dig_connector->linkb) |
885 | args.v2.acConfig.ucLinkSel = 1; | 882 | args.v2.acConfig.ucLinkSel = 1; |
886 | 883 | ||
887 | switch (radeon_encoder->encoder_id) { | 884 | switch (radeon_encoder->encoder_id) { |
888 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 885 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
889 | args.v2.acConfig.ucTransmitterSel = 0; | 886 | args.v2.acConfig.ucTransmitterSel = 0; |
890 | num = 0; | ||
891 | break; | 887 | break; |
892 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 888 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
893 | args.v2.acConfig.ucTransmitterSel = 1; | 889 | args.v2.acConfig.ucTransmitterSel = 1; |
894 | num = 1; | ||
895 | break; | 890 | break; |
896 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 891 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
897 | args.v2.acConfig.ucTransmitterSel = 2; | 892 | args.v2.acConfig.ucTransmitterSel = 2; |
898 | num = 2; | ||
899 | break; | 893 | break; |
900 | } | 894 | } |
901 | 895 | ||
@@ -913,31 +907,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
913 | else | 907 | else |
914 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | 908 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; |
915 | 909 | ||
916 | switch (radeon_encoder->encoder_id) { | 910 | if ((rdev->flags & RADEON_IS_IGP) && |
917 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 911 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { |
918 | if (rdev->flags & RADEON_IS_IGP) { | 912 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { |
919 | if (radeon_encoder->pixel_clock > 165000) { | 913 | if (dig_connector->igp_lane_info & 0x1) |
920 | if (dig_connector->igp_lane_info & 0x3) | 914 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
921 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | 915 | else if (dig_connector->igp_lane_info & 0x2) |
922 | else if (dig_connector->igp_lane_info & 0xc) | 916 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; |
923 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | 917 | else if (dig_connector->igp_lane_info & 0x4) |
924 | } else { | 918 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; |
925 | if (dig_connector->igp_lane_info & 0x1) | 919 | else if (dig_connector->igp_lane_info & 0x8) |
926 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | 920 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; |
927 | else if (dig_connector->igp_lane_info & 0x2) | 921 | } else { |
928 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; | 922 | if (dig_connector->igp_lane_info & 0x3) |
929 | else if (dig_connector->igp_lane_info & 0x4) | 923 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; |
930 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; | 924 | else if (dig_connector->igp_lane_info & 0xc) |
931 | else if (dig_connector->igp_lane_info & 0x8) | 925 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; |
932 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | ||
933 | } | ||
934 | } | 926 | } |
935 | break; | ||
936 | } | 927 | } |
937 | 928 | ||
938 | if (radeon_encoder->pixel_clock > 165000) | ||
939 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
940 | |||
941 | if (dig_connector->linkb) | 929 | if (dig_connector->linkb) |
942 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | 930 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; |
943 | else | 931 | else |
@@ -948,6 +936,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
948 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 936 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
949 | if (dig->coherent_mode) | 937 | if (dig->coherent_mode) |
950 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | 938 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; |
939 | if (radeon_encoder->pixel_clock > 165000) | ||
940 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
951 | } | 941 | } |
952 | } | 942 | } |
953 | 943 | ||
@@ -1054,16 +1044,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1054 | if (is_dig) { | 1044 | if (is_dig) { |
1055 | switch (mode) { | 1045 | switch (mode) { |
1056 | case DRM_MODE_DPMS_ON: | 1046 | case DRM_MODE_DPMS_ON: |
1057 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | 1047 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1058 | { | ||
1059 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1048 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1049 | |||
1060 | dp_link_train(encoder, connector); | 1050 | dp_link_train(encoder, connector); |
1051 | if (ASIC_IS_DCE4(rdev)) | ||
1052 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | ||
1061 | } | 1053 | } |
1054 | if (!ASIC_IS_DCE4(rdev)) | ||
1055 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1062 | break; | 1056 | break; |
1063 | case DRM_MODE_DPMS_STANDBY: | 1057 | case DRM_MODE_DPMS_STANDBY: |
1064 | case DRM_MODE_DPMS_SUSPEND: | 1058 | case DRM_MODE_DPMS_SUSPEND: |
1065 | case DRM_MODE_DPMS_OFF: | 1059 | case DRM_MODE_DPMS_OFF: |
1066 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | 1060 | if (!ASIC_IS_DCE4(rdev)) |
1061 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | ||
1062 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | ||
1063 | if (ASIC_IS_DCE4(rdev)) | ||
1064 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); | ||
1065 | } | ||
1067 | break; | 1066 | break; |
1068 | } | 1067 | } |
1069 | } else { | 1068 | } else { |
@@ -1104,7 +1103,8 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1104 | 1103 | ||
1105 | memset(&args, 0, sizeof(args)); | 1104 | memset(&args, 0, sizeof(args)); |
1106 | 1105 | ||
1107 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 1106 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
1107 | return; | ||
1108 | 1108 | ||
1109 | switch (frev) { | 1109 | switch (frev) { |
1110 | case 1: | 1110 | case 1: |
@@ -1216,6 +1216,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1216 | } | 1216 | } |
1217 | 1217 | ||
1218 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1218 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
1219 | |||
1220 | /* update scratch regs with new routing */ | ||
1221 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | ||
1219 | } | 1222 | } |
1220 | 1223 | ||
1221 | static void | 1224 | static void |
@@ -1326,19 +1329,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1326 | struct drm_device *dev = encoder->dev; | 1329 | struct drm_device *dev = encoder->dev; |
1327 | struct radeon_device *rdev = dev->dev_private; | 1330 | struct radeon_device *rdev = dev->dev_private; |
1328 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1331 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1329 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1330 | 1332 | ||
1331 | if (radeon_encoder->active_device & | ||
1332 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | ||
1333 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
1334 | if (dig) | ||
1335 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | ||
1336 | } | ||
1337 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1333 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1338 | 1334 | ||
1339 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | ||
1340 | atombios_set_encoder_crtc_source(encoder); | ||
1341 | |||
1342 | if (ASIC_IS_AVIVO(rdev)) { | 1335 | if (ASIC_IS_AVIVO(rdev)) { |
1343 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) | 1336 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
1344 | atombios_yuv_setup(encoder, true); | 1337 | atombios_yuv_setup(encoder, true); |
@@ -1396,9 +1389,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1396 | } | 1389 | } |
1397 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1390 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
1398 | 1391 | ||
1399 | /* XXX */ | 1392 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
1400 | if (!ASIC_IS_DCE4(rdev)) | 1393 | r600_hdmi_enable(encoder); |
1401 | r600_hdmi_setmode(encoder, adjusted_mode); | 1394 | r600_hdmi_setmode(encoder, adjusted_mode); |
1395 | } | ||
1402 | } | 1396 | } |
1403 | 1397 | ||
1404 | static bool | 1398 | static bool |
@@ -1418,7 +1412,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn | |||
1418 | 1412 | ||
1419 | memset(&args, 0, sizeof(args)); | 1413 | memset(&args, 0, sizeof(args)); |
1420 | 1414 | ||
1421 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 1415 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
1416 | return false; | ||
1422 | 1417 | ||
1423 | args.sDacload.ucMisc = 0; | 1418 | args.sDacload.ucMisc = 0; |
1424 | 1419 | ||
@@ -1492,8 +1487,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec | |||
1492 | 1487 | ||
1493 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | 1488 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) |
1494 | { | 1489 | { |
1490 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1491 | |||
1492 | if (radeon_encoder->active_device & | ||
1493 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | ||
1494 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
1495 | if (dig) | ||
1496 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | ||
1497 | } | ||
1498 | |||
1495 | radeon_atom_output_lock(encoder, true); | 1499 | radeon_atom_output_lock(encoder, true); |
1496 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1500 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1501 | |||
1502 | /* this is needed for the pll/ss setup to work correctly in some cases */ | ||
1503 | atombios_set_encoder_crtc_source(encoder); | ||
1497 | } | 1504 | } |
1498 | 1505 | ||
1499 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | 1506 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) |
@@ -1509,6 +1516,8 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1509 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1516 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1510 | 1517 | ||
1511 | if (radeon_encoder_is_digital(encoder)) { | 1518 | if (radeon_encoder_is_digital(encoder)) { |
1519 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
1520 | r600_hdmi_disable(encoder); | ||
1512 | dig = radeon_encoder->enc_priv; | 1521 | dig = radeon_encoder->enc_priv; |
1513 | dig->dig_encoder = -1; | 1522 | dig->dig_encoder = -1; |
1514 | } | 1523 | } |
@@ -1659,6 +1668,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1659 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | 1668 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
1660 | break; | 1669 | break; |
1661 | } | 1670 | } |
1662 | |||
1663 | r600_hdmi_init(encoder); | ||
1664 | } | 1671 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 4ae50c19589f..5def6f5dff38 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -59,6 +59,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
59 | return false; | 59 | return false; |
60 | } | 60 | } |
61 | 61 | ||
62 | /* bit banging i2c */ | ||
62 | 63 | ||
63 | static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) | 64 | static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) |
64 | { | 65 | { |
@@ -181,13 +182,30 @@ static void set_data(void *i2c_priv, int data) | |||
181 | WREG32(rec->en_data_reg, val); | 182 | WREG32(rec->en_data_reg, val); |
182 | } | 183 | } |
183 | 184 | ||
185 | static int pre_xfer(struct i2c_adapter *i2c_adap) | ||
186 | { | ||
187 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
188 | |||
189 | radeon_i2c_do_lock(i2c, 1); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static void post_xfer(struct i2c_adapter *i2c_adap) | ||
195 | { | ||
196 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
197 | |||
198 | radeon_i2c_do_lock(i2c, 0); | ||
199 | } | ||
200 | |||
201 | /* hw i2c */ | ||
202 | |||
184 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | 203 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) |
185 | { | 204 | { |
186 | struct radeon_pll *spll = &rdev->clock.spll; | ||
187 | u32 sclk = radeon_get_engine_clock(rdev); | 205 | u32 sclk = radeon_get_engine_clock(rdev); |
188 | u32 prescale = 0; | 206 | u32 prescale = 0; |
189 | u32 n, m; | 207 | u32 nm; |
190 | u8 loop; | 208 | u8 n, m, loop; |
191 | int i2c_clock; | 209 | int i2c_clock; |
192 | 210 | ||
193 | switch (rdev->family) { | 211 | switch (rdev->family) { |
@@ -203,13 +221,15 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | |||
203 | case CHIP_R300: | 221 | case CHIP_R300: |
204 | case CHIP_R350: | 222 | case CHIP_R350: |
205 | case CHIP_RV350: | 223 | case CHIP_RV350: |
206 | n = (spll->reference_freq) / (4 * 6); | 224 | i2c_clock = 60; |
225 | nm = (sclk * 10) / (i2c_clock * 4); | ||
207 | for (loop = 1; loop < 255; loop++) { | 226 | for (loop = 1; loop < 255; loop++) { |
208 | if ((loop * (loop - 1)) > n) | 227 | if ((nm / loop) < loop) |
209 | break; | 228 | break; |
210 | } | 229 | } |
211 | m = loop - 1; | 230 | n = loop - 1; |
212 | prescale = m | (loop << 8); | 231 | m = loop - 2; |
232 | prescale = m | (n << 8); | ||
213 | break; | 233 | break; |
214 | case CHIP_RV380: | 234 | case CHIP_RV380: |
215 | case CHIP_RS400: | 235 | case CHIP_RS400: |
@@ -217,7 +237,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | |||
217 | case CHIP_R420: | 237 | case CHIP_R420: |
218 | case CHIP_R423: | 238 | case CHIP_R423: |
219 | case CHIP_RV410: | 239 | case CHIP_RV410: |
220 | sclk = radeon_get_engine_clock(rdev); | ||
221 | prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; | 240 | prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; |
222 | break; | 241 | break; |
223 | case CHIP_RS600: | 242 | case CHIP_RS600: |
@@ -232,7 +251,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | |||
232 | case CHIP_RV570: | 251 | case CHIP_RV570: |
233 | case CHIP_R580: | 252 | case CHIP_R580: |
234 | i2c_clock = 50; | 253 | i2c_clock = 50; |
235 | sclk = radeon_get_engine_clock(rdev); | ||
236 | if (rdev->family == CHIP_R520) | 254 | if (rdev->family == CHIP_R520) |
237 | prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); | 255 | prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); |
238 | else | 256 | else |
@@ -291,6 +309,7 @@ static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
291 | prescale = radeon_get_i2c_prescale(rdev); | 309 | prescale = radeon_get_i2c_prescale(rdev); |
292 | 310 | ||
293 | reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | | 311 | reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | |
312 | RADEON_I2C_DRIVE_EN | | ||
294 | RADEON_I2C_START | | 313 | RADEON_I2C_START | |
295 | RADEON_I2C_STOP | | 314 | RADEON_I2C_STOP | |
296 | RADEON_I2C_GO); | 315 | RADEON_I2C_GO); |
@@ -757,26 +776,13 @@ done: | |||
757 | return ret; | 776 | return ret; |
758 | } | 777 | } |
759 | 778 | ||
760 | static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap, | 779 | static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap, |
761 | struct i2c_msg *msgs, int num) | 780 | struct i2c_msg *msgs, int num) |
762 | { | 781 | { |
763 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | 782 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); |
764 | int ret; | ||
765 | |||
766 | radeon_i2c_do_lock(i2c, 1); | ||
767 | ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num); | ||
768 | radeon_i2c_do_lock(i2c, 0); | ||
769 | |||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | ||
774 | struct i2c_msg *msgs, int num) | ||
775 | { | ||
776 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
777 | struct radeon_device *rdev = i2c->dev->dev_private; | 783 | struct radeon_device *rdev = i2c->dev->dev_private; |
778 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 784 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
779 | int ret; | 785 | int ret = 0; |
780 | 786 | ||
781 | switch (rdev->family) { | 787 | switch (rdev->family) { |
782 | case CHIP_R100: | 788 | case CHIP_R100: |
@@ -797,16 +803,12 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
797 | case CHIP_RV410: | 803 | case CHIP_RV410: |
798 | case CHIP_RS400: | 804 | case CHIP_RS400: |
799 | case CHIP_RS480: | 805 | case CHIP_RS480: |
800 | if (rec->hw_capable) | 806 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); |
801 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); | ||
802 | else | ||
803 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
804 | break; | 807 | break; |
805 | case CHIP_RS600: | 808 | case CHIP_RS600: |
806 | case CHIP_RS690: | 809 | case CHIP_RS690: |
807 | case CHIP_RS740: | 810 | case CHIP_RS740: |
808 | /* XXX fill in hw i2c implementation */ | 811 | /* XXX fill in hw i2c implementation */ |
809 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
810 | break; | 812 | break; |
811 | case CHIP_RV515: | 813 | case CHIP_RV515: |
812 | case CHIP_R520: | 814 | case CHIP_R520: |
@@ -814,20 +816,16 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
814 | case CHIP_RV560: | 816 | case CHIP_RV560: |
815 | case CHIP_RV570: | 817 | case CHIP_RV570: |
816 | case CHIP_R580: | 818 | case CHIP_R580: |
817 | if (rec->hw_capable) { | 819 | if (rec->mm_i2c) |
818 | if (rec->mm_i2c) | 820 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); |
819 | ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); | 821 | else |
820 | else | 822 | ret = r500_hw_i2c_xfer(i2c_adap, msgs, num); |
821 | ret = r500_hw_i2c_xfer(i2c_adap, msgs, num); | ||
822 | } else | ||
823 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
824 | break; | 823 | break; |
825 | case CHIP_R600: | 824 | case CHIP_R600: |
826 | case CHIP_RV610: | 825 | case CHIP_RV610: |
827 | case CHIP_RV630: | 826 | case CHIP_RV630: |
828 | case CHIP_RV670: | 827 | case CHIP_RV670: |
829 | /* XXX fill in hw i2c implementation */ | 828 | /* XXX fill in hw i2c implementation */ |
830 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
831 | break; | 829 | break; |
832 | case CHIP_RV620: | 830 | case CHIP_RV620: |
833 | case CHIP_RV635: | 831 | case CHIP_RV635: |
@@ -838,7 +836,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
838 | case CHIP_RV710: | 836 | case CHIP_RV710: |
839 | case CHIP_RV740: | 837 | case CHIP_RV740: |
840 | /* XXX fill in hw i2c implementation */ | 838 | /* XXX fill in hw i2c implementation */ |
841 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
842 | break; | 839 | break; |
843 | case CHIP_CEDAR: | 840 | case CHIP_CEDAR: |
844 | case CHIP_REDWOOD: | 841 | case CHIP_REDWOOD: |
@@ -846,7 +843,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
846 | case CHIP_CYPRESS: | 843 | case CHIP_CYPRESS: |
847 | case CHIP_HEMLOCK: | 844 | case CHIP_HEMLOCK: |
848 | /* XXX fill in hw i2c implementation */ | 845 | /* XXX fill in hw i2c implementation */ |
849 | ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); | ||
850 | break; | 846 | break; |
851 | default: | 847 | default: |
852 | DRM_ERROR("i2c: unhandled radeon chip\n"); | 848 | DRM_ERROR("i2c: unhandled radeon chip\n"); |
@@ -857,20 +853,21 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
857 | return ret; | 853 | return ret; |
858 | } | 854 | } |
859 | 855 | ||
860 | static u32 radeon_i2c_func(struct i2c_adapter *adap) | 856 | static u32 radeon_hw_i2c_func(struct i2c_adapter *adap) |
861 | { | 857 | { |
862 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | 858 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; |
863 | } | 859 | } |
864 | 860 | ||
865 | static const struct i2c_algorithm radeon_i2c_algo = { | 861 | static const struct i2c_algorithm radeon_i2c_algo = { |
866 | .master_xfer = radeon_i2c_xfer, | 862 | .master_xfer = radeon_hw_i2c_xfer, |
867 | .functionality = radeon_i2c_func, | 863 | .functionality = radeon_hw_i2c_func, |
868 | }; | 864 | }; |
869 | 865 | ||
870 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | 866 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, |
871 | struct radeon_i2c_bus_rec *rec, | 867 | struct radeon_i2c_bus_rec *rec, |
872 | const char *name) | 868 | const char *name) |
873 | { | 869 | { |
870 | struct radeon_device *rdev = dev->dev_private; | ||
874 | struct radeon_i2c_chan *i2c; | 871 | struct radeon_i2c_chan *i2c; |
875 | int ret; | 872 | int ret; |
876 | 873 | ||
@@ -878,37 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
878 | if (i2c == NULL) | 875 | if (i2c == NULL) |
879 | return NULL; | 876 | return NULL; |
880 | 877 | ||
881 | /* set the internal bit adapter */ | ||
882 | i2c->algo.radeon.bit_adapter.owner = THIS_MODULE; | ||
883 | i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c); | ||
884 | sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name); | ||
885 | i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data; | ||
886 | i2c->algo.radeon.bit_data.setsda = set_data; | ||
887 | i2c->algo.radeon.bit_data.setscl = set_clock; | ||
888 | i2c->algo.radeon.bit_data.getsda = get_data; | ||
889 | i2c->algo.radeon.bit_data.getscl = get_clock; | ||
890 | i2c->algo.radeon.bit_data.udelay = 20; | ||
891 | /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always | ||
892 | * make this, 2 jiffies is a lot more reliable */ | ||
893 | i2c->algo.radeon.bit_data.timeout = 2; | ||
894 | i2c->algo.radeon.bit_data.data = i2c; | ||
895 | ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter); | ||
896 | if (ret) { | ||
897 | DRM_ERROR("Failed to register internal bit i2c %s\n", name); | ||
898 | goto out_free; | ||
899 | } | ||
900 | /* set the radeon i2c adapter */ | ||
901 | i2c->dev = dev; | ||
902 | i2c->rec = *rec; | 878 | i2c->rec = *rec; |
903 | i2c->adapter.owner = THIS_MODULE; | 879 | i2c->adapter.owner = THIS_MODULE; |
880 | i2c->dev = dev; | ||
904 | i2c_set_adapdata(&i2c->adapter, i2c); | 881 | i2c_set_adapdata(&i2c->adapter, i2c); |
905 | sprintf(i2c->adapter.name, "Radeon i2c %s", name); | 882 | if (rec->mm_i2c || |
906 | i2c->adapter.algo_data = &i2c->algo.radeon; | 883 | (rec->hw_capable && |
907 | i2c->adapter.algo = &radeon_i2c_algo; | 884 | radeon_hw_i2c && |
908 | ret = i2c_add_adapter(&i2c->adapter); | 885 | ((rdev->family <= CHIP_RS480) || |
909 | if (ret) { | 886 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { |
910 | DRM_ERROR("Failed to register i2c %s\n", name); | 887 | /* set the radeon hw i2c adapter */ |
911 | goto out_free; | 888 | sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); |
889 | i2c->adapter.algo = &radeon_i2c_algo; | ||
890 | ret = i2c_add_adapter(&i2c->adapter); | ||
891 | if (ret) { | ||
892 | DRM_ERROR("Failed to register hw i2c %s\n", name); | ||
893 | goto out_free; | ||
894 | } | ||
895 | } else { | ||
896 | /* set the radeon bit adapter */ | ||
897 | sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); | ||
898 | i2c->adapter.algo_data = &i2c->algo.bit; | ||
899 | i2c->algo.bit.pre_xfer = pre_xfer; | ||
900 | i2c->algo.bit.post_xfer = post_xfer; | ||
901 | i2c->algo.bit.setsda = set_data; | ||
902 | i2c->algo.bit.setscl = set_clock; | ||
903 | i2c->algo.bit.getsda = get_data; | ||
904 | i2c->algo.bit.getscl = get_clock; | ||
905 | i2c->algo.bit.udelay = 20; | ||
906 | /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always | ||
907 | * make this, 2 jiffies is a lot more reliable */ | ||
908 | i2c->algo.bit.timeout = 2; | ||
909 | i2c->algo.bit.data = i2c; | ||
910 | ret = i2c_bit_add_bus(&i2c->adapter); | ||
911 | if (ret) { | ||
912 | DRM_ERROR("Failed to register bit i2c %s\n", name); | ||
913 | goto out_free; | ||
914 | } | ||
912 | } | 915 | } |
913 | 916 | ||
914 | return i2c; | 917 | return i2c; |
@@ -953,16 +956,6 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) | |||
953 | { | 956 | { |
954 | if (!i2c) | 957 | if (!i2c) |
955 | return; | 958 | return; |
956 | i2c_del_adapter(&i2c->algo.radeon.bit_adapter); | ||
957 | i2c_del_adapter(&i2c->adapter); | ||
958 | kfree(i2c); | ||
959 | } | ||
960 | |||
961 | void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c) | ||
962 | { | ||
963 | if (!i2c) | ||
964 | return; | ||
965 | |||
966 | i2c_del_adapter(&i2c->adapter); | 959 | i2c_del_adapter(&i2c->adapter); |
967 | kfree(i2c); | 960 | kfree(i2c); |
968 | } | 961 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 3cfd60fd0083..a212041e8b0b 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -67,9 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | |||
67 | 67 | ||
68 | /* Disable *all* interrupts */ | 68 | /* Disable *all* interrupts */ |
69 | rdev->irq.sw_int = false; | 69 | rdev->irq.sw_int = false; |
70 | for (i = 0; i < 2; i++) { | 70 | for (i = 0; i < rdev->num_crtc; i++) |
71 | rdev->irq.crtc_vblank_int[i] = false; | 71 | rdev->irq.crtc_vblank_int[i] = false; |
72 | } | 72 | for (i = 0; i < 6; i++) |
73 | rdev->irq.hpd[i] = false; | ||
73 | radeon_irq_set(rdev); | 74 | radeon_irq_set(rdev); |
74 | /* Clear bits */ | 75 | /* Clear bits */ |
75 | radeon_irq_process(rdev); | 76 | radeon_irq_process(rdev); |
@@ -95,34 +96,29 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
95 | } | 96 | } |
96 | /* Disable *all* interrupts */ | 97 | /* Disable *all* interrupts */ |
97 | rdev->irq.sw_int = false; | 98 | rdev->irq.sw_int = false; |
98 | for (i = 0; i < 2; i++) { | 99 | for (i = 0; i < rdev->num_crtc; i++) |
99 | rdev->irq.crtc_vblank_int[i] = false; | 100 | rdev->irq.crtc_vblank_int[i] = false; |
101 | for (i = 0; i < 6; i++) | ||
100 | rdev->irq.hpd[i] = false; | 102 | rdev->irq.hpd[i] = false; |
101 | } | ||
102 | radeon_irq_set(rdev); | 103 | radeon_irq_set(rdev); |
103 | } | 104 | } |
104 | 105 | ||
105 | int radeon_irq_kms_init(struct radeon_device *rdev) | 106 | int radeon_irq_kms_init(struct radeon_device *rdev) |
106 | { | 107 | { |
107 | int r = 0; | 108 | int r = 0; |
108 | int num_crtc = 2; | ||
109 | 109 | ||
110 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
111 | num_crtc = 1; | ||
112 | spin_lock_init(&rdev->irq.sw_lock); | 110 | spin_lock_init(&rdev->irq.sw_lock); |
113 | r = drm_vblank_init(rdev->ddev, num_crtc); | 111 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
114 | if (r) { | 112 | if (r) { |
115 | return r; | 113 | return r; |
116 | } | 114 | } |
117 | /* enable msi */ | 115 | /* enable msi */ |
118 | rdev->msi_enabled = 0; | 116 | rdev->msi_enabled = 0; |
119 | /* MSIs don't seem to work on my rs780; | 117 | /* MSIs don't seem to work reliably on all IGP |
120 | * not sure about rs880 or other rs780s. | 118 | * chips. Disable MSI on them for now. |
121 | * Needs more investigation. | ||
122 | */ | 119 | */ |
123 | if ((rdev->family >= CHIP_RV380) && | 120 | if ((rdev->family >= CHIP_RV380) && |
124 | (rdev->family != CHIP_RS780) && | 121 | (!(rdev->flags & RADEON_IS_IGP))) { |
125 | (rdev->family != CHIP_RS880)) { | ||
126 | int ret = pci_enable_msi(rdev->pdev); | 122 | int ret = pci_enable_msi(rdev->pdev); |
127 | if (!ret) { | 123 | if (!ret) { |
128 | rdev->msi_enabled = 1; | 124 | rdev->msi_enabled = 1; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index df23d6a01d02..88865e38fe30 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -603,6 +603,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
603 | ? RADEON_CRTC2_INTERLACE_EN | 603 | ? RADEON_CRTC2_INTERLACE_EN |
604 | : 0)); | 604 | : 0)); |
605 | 605 | ||
606 | /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ | ||
607 | if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) | ||
608 | crtc2_gen_cntl |= RADEON_CRTC2_EN; | ||
609 | |||
606 | disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); | 610 | disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); |
607 | disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; | 611 | disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; |
608 | 612 | ||
@@ -630,6 +634,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
630 | ? RADEON_CRTC_INTERLACE_EN | 634 | ? RADEON_CRTC_INTERLACE_EN |
631 | : 0)); | 635 | : 0)); |
632 | 636 | ||
637 | /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ | ||
638 | if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) | ||
639 | crtc_gen_cntl |= RADEON_CRTC_EN; | ||
640 | |||
633 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | 641 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); |
634 | crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | | 642 | crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | |
635 | RADEON_CRTC_VSYNC_DIS | | 643 | RADEON_CRTC_VSYNC_DIS | |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 417684daef4c..f2ed27c8055b 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
@@ -57,6 +57,10 @@ | |||
57 | #define NTSC_TV_PLL_N_14 693 | 57 | #define NTSC_TV_PLL_N_14 693 |
58 | #define NTSC_TV_PLL_P_14 7 | 58 | #define NTSC_TV_PLL_P_14 7 |
59 | 59 | ||
60 | #define PAL_TV_PLL_M_14 19 | ||
61 | #define PAL_TV_PLL_N_14 353 | ||
62 | #define PAL_TV_PLL_P_14 5 | ||
63 | |||
60 | #define VERT_LEAD_IN_LINES 2 | 64 | #define VERT_LEAD_IN_LINES 2 |
61 | #define FRAC_BITS 0xe | 65 | #define FRAC_BITS 0xe |
62 | #define FRAC_MASK 0x3fff | 66 | #define FRAC_MASK 0x3fff |
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = { | |||
205 | 630627, /* defRestart */ | 209 | 630627, /* defRestart */ |
206 | 347, /* crtcPLL_N */ | 210 | 347, /* crtcPLL_N */ |
207 | 14, /* crtcPLL_M */ | 211 | 14, /* crtcPLL_M */ |
208 | 8, /* crtcPLL_postDiv */ | 212 | 8, /* crtcPLL_postDiv */ |
209 | 1022, /* pixToTV */ | 213 | 1022, /* pixToTV */ |
210 | }, | 214 | }, |
215 | { /* PAL timing for 14 Mhz ref clk */ | ||
216 | 800, /* horResolution */ | ||
217 | 600, /* verResolution */ | ||
218 | TV_STD_PAL, /* standard */ | ||
219 | 1131, /* horTotal */ | ||
220 | 742, /* verTotal */ | ||
221 | 813, /* horStart */ | ||
222 | 840, /* horSyncStart */ | ||
223 | 633, /* verSyncStart */ | ||
224 | 708369, /* defRestart */ | ||
225 | 211, /* crtcPLL_N */ | ||
226 | 9, /* crtcPLL_M */ | ||
227 | 8, /* crtcPLL_postDiv */ | ||
228 | 759, /* pixToTV */ | ||
229 | }, | ||
211 | }; | 230 | }; |
212 | 231 | ||
213 | #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) | 232 | #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) |
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru | |||
242 | if (pll->reference_freq == 2700) | 261 | if (pll->reference_freq == 2700) |
243 | const_ptr = &available_tv_modes[1]; | 262 | const_ptr = &available_tv_modes[1]; |
244 | else | 263 | else |
245 | const_ptr = &available_tv_modes[1]; /* FIX ME */ | 264 | const_ptr = &available_tv_modes[3]; |
246 | } | 265 | } |
247 | return const_ptr; | 266 | return const_ptr; |
248 | } | 267 | } |
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, | |||
685 | n = PAL_TV_PLL_N_27; | 704 | n = PAL_TV_PLL_N_27; |
686 | p = PAL_TV_PLL_P_27; | 705 | p = PAL_TV_PLL_P_27; |
687 | } else { | 706 | } else { |
688 | m = PAL_TV_PLL_M_27; | 707 | m = PAL_TV_PLL_M_14; |
689 | n = PAL_TV_PLL_N_27; | 708 | n = PAL_TV_PLL_N_14; |
690 | p = PAL_TV_PLL_P_27; | 709 | p = PAL_TV_PLL_P_14; |
691 | } | 710 | } |
692 | } | 711 | } |
693 | 712 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 1702b820aa4d..0b8e32776b10 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -129,6 +129,7 @@ struct radeon_tmds_pll { | |||
129 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | 129 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
130 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | 130 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
131 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | 131 | #define RADEON_PLL_USE_POST_DIV (1 << 12) |
132 | #define RADEON_PLL_IS_LCD (1 << 13) | ||
132 | 133 | ||
133 | /* pll algo */ | 134 | /* pll algo */ |
134 | enum radeon_pll_algo { | 135 | enum radeon_pll_algo { |
@@ -149,6 +150,8 @@ struct radeon_pll { | |||
149 | uint32_t pll_in_max; | 150 | uint32_t pll_in_max; |
150 | uint32_t pll_out_min; | 151 | uint32_t pll_out_min; |
151 | uint32_t pll_out_max; | 152 | uint32_t pll_out_max; |
153 | uint32_t lcd_pll_out_min; | ||
154 | uint32_t lcd_pll_out_max; | ||
152 | uint32_t best_vco; | 155 | uint32_t best_vco; |
153 | 156 | ||
154 | /* divider limits */ | 157 | /* divider limits */ |
@@ -170,17 +173,12 @@ struct radeon_pll { | |||
170 | enum radeon_pll_algo algo; | 173 | enum radeon_pll_algo algo; |
171 | }; | 174 | }; |
172 | 175 | ||
173 | struct i2c_algo_radeon_data { | ||
174 | struct i2c_adapter bit_adapter; | ||
175 | struct i2c_algo_bit_data bit_data; | ||
176 | }; | ||
177 | |||
178 | struct radeon_i2c_chan { | 176 | struct radeon_i2c_chan { |
179 | struct i2c_adapter adapter; | 177 | struct i2c_adapter adapter; |
180 | struct drm_device *dev; | 178 | struct drm_device *dev; |
181 | union { | 179 | union { |
180 | struct i2c_algo_bit_data bit; | ||
182 | struct i2c_algo_dp_aux_data dp; | 181 | struct i2c_algo_dp_aux_data dp; |
183 | struct i2c_algo_radeon_data radeon; | ||
184 | } algo; | 182 | } algo; |
185 | struct radeon_i2c_bus_rec rec; | 183 | struct radeon_i2c_bus_rec rec; |
186 | }; | 184 | }; |
@@ -342,6 +340,7 @@ struct radeon_encoder { | |||
342 | struct drm_display_mode native_mode; | 340 | struct drm_display_mode native_mode; |
343 | void *enc_priv; | 341 | void *enc_priv; |
344 | int hdmi_offset; | 342 | int hdmi_offset; |
343 | int hdmi_config_offset; | ||
345 | int hdmi_audio_workaround; | 344 | int hdmi_audio_workaround; |
346 | int hdmi_buffer_status; | 345 | int hdmi_buffer_status; |
347 | }; | 346 | }; |
@@ -431,7 +430,6 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
431 | struct radeon_i2c_bus_rec *rec, | 430 | struct radeon_i2c_bus_rec *rec, |
432 | const char *name); | 431 | const char *name); |
433 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); | 432 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); |
434 | extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c); | ||
435 | extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, | 433 | extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, |
436 | u8 slave_addr, | 434 | u8 slave_addr, |
437 | u8 addr, | 435 | u8 addr, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index fc9d00ac6b15..dc7e3f449138 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -185,8 +185,10 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | radeon_ttm_placement_from_domain(bo, domain); | 187 | radeon_ttm_placement_from_domain(bo, domain); |
188 | /* force to pin into visible video ram */ | 188 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
189 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | 189 | /* force to pin into visible video ram */ |
190 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | ||
191 | } | ||
190 | for (i = 0; i < bo->placement.num_placement; i++) | 192 | for (i = 0; i < bo->placement.num_placement; i++) |
191 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | 193 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
192 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 194 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index d4d1c39a0e99..a4b57493aa78 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #define RADEON_RECLOCK_DELAY_MS 200 | 28 | #define RADEON_RECLOCK_DELAY_MS 200 |
29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 | 29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 |
30 | 30 | ||
31 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | ||
31 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); | 32 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); |
32 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 33 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
33 | static void radeon_pm_idle_work_handler(struct work_struct *work); | 34 | static void radeon_pm_idle_work_handler(struct work_struct *work); |
@@ -179,6 +180,16 @@ static void radeon_get_power_state(struct radeon_device *rdev, | |||
179 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); | 180 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); |
180 | } | 181 | } |
181 | 182 | ||
183 | static inline void radeon_sync_with_vblank(struct radeon_device *rdev) | ||
184 | { | ||
185 | if (rdev->pm.active_crtcs) { | ||
186 | rdev->pm.vblank_sync = false; | ||
187 | wait_event_timeout( | ||
188 | rdev->irq.vblank_queue, rdev->pm.vblank_sync, | ||
189 | msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); | ||
190 | } | ||
191 | } | ||
192 | |||
182 | static void radeon_set_power_state(struct radeon_device *rdev) | 193 | static void radeon_set_power_state(struct radeon_device *rdev) |
183 | { | 194 | { |
184 | /* if *_clock_mode are the same, *_power_state are as well */ | 195 | /* if *_clock_mode are the same, *_power_state are as well */ |
@@ -189,11 +200,28 @@ static void radeon_set_power_state(struct radeon_device *rdev) | |||
189 | rdev->pm.requested_clock_mode->sclk, | 200 | rdev->pm.requested_clock_mode->sclk, |
190 | rdev->pm.requested_clock_mode->mclk, | 201 | rdev->pm.requested_clock_mode->mclk, |
191 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); | 202 | rdev->pm.requested_power_state->non_clock_info.pcie_lanes); |
203 | |||
192 | /* set pcie lanes */ | 204 | /* set pcie lanes */ |
205 | /* TODO */ | ||
206 | |||
193 | /* set voltage */ | 207 | /* set voltage */ |
208 | /* TODO */ | ||
209 | |||
194 | /* set engine clock */ | 210 | /* set engine clock */ |
211 | radeon_sync_with_vblank(rdev); | ||
212 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
195 | radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); | 213 | radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); |
214 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
215 | |||
216 | #if 0 | ||
196 | /* set memory clock */ | 217 | /* set memory clock */ |
218 | if (rdev->asic->set_memory_clock) { | ||
219 | radeon_sync_with_vblank(rdev); | ||
220 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
221 | radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk); | ||
222 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
223 | } | ||
224 | #endif | ||
197 | 225 | ||
198 | rdev->pm.current_power_state = rdev->pm.requested_power_state; | 226 | rdev->pm.current_power_state = rdev->pm.requested_power_state; |
199 | rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; | 227 | rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; |
@@ -229,6 +257,12 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
229 | return 0; | 257 | return 0; |
230 | } | 258 | } |
231 | 259 | ||
260 | void radeon_pm_fini(struct radeon_device *rdev) | ||
261 | { | ||
262 | if (rdev->pm.i2c_bus) | ||
263 | radeon_i2c_destroy(rdev->pm.i2c_bus); | ||
264 | } | ||
265 | |||
232 | void radeon_pm_compute_clocks(struct radeon_device *rdev) | 266 | void radeon_pm_compute_clocks(struct radeon_device *rdev) |
233 | { | 267 | { |
234 | struct drm_device *ddev = rdev->ddev; | 268 | struct drm_device *ddev = rdev->ddev; |
@@ -245,7 +279,8 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
245 | list_for_each_entry(connector, | 279 | list_for_each_entry(connector, |
246 | &ddev->mode_config.connector_list, head) { | 280 | &ddev->mode_config.connector_list, head) { |
247 | if (connector->encoder && | 281 | if (connector->encoder && |
248 | connector->dpms != DRM_MODE_DPMS_OFF) { | 282 | connector->encoder->crtc && |
283 | connector->dpms != DRM_MODE_DPMS_OFF) { | ||
249 | radeon_crtc = to_radeon_crtc(connector->encoder->crtc); | 284 | radeon_crtc = to_radeon_crtc(connector->encoder->crtc); |
250 | rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); | 285 | rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); |
251 | ++count; | 286 | ++count; |
@@ -333,10 +368,7 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) | |||
333 | break; | 368 | break; |
334 | } | 369 | } |
335 | 370 | ||
336 | /* check if we are in vblank */ | ||
337 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
338 | radeon_set_power_state(rdev); | 371 | radeon_set_power_state(rdev); |
339 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
340 | rdev->pm.planned_action = PM_ACTION_NONE; | 372 | rdev->pm.planned_action = PM_ACTION_NONE; |
341 | } | 373 | } |
342 | 374 | ||
@@ -353,10 +385,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
353 | rdev->pm.req_vblank |= (1 << 1); | 385 | rdev->pm.req_vblank |= (1 << 1); |
354 | drm_vblank_get(rdev->ddev, 1); | 386 | drm_vblank_get(rdev->ddev, 1); |
355 | } | 387 | } |
356 | if (rdev->pm.active_crtcs) | 388 | radeon_pm_set_clocks_locked(rdev); |
357 | wait_event_interruptible_timeout( | ||
358 | rdev->irq.vblank_queue, 0, | ||
359 | msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); | ||
360 | if (rdev->pm.req_vblank & (1 << 0)) { | 389 | if (rdev->pm.req_vblank & (1 << 0)) { |
361 | rdev->pm.req_vblank &= ~(1 << 0); | 390 | rdev->pm.req_vblank &= ~(1 << 0); |
362 | drm_vblank_put(rdev->ddev, 0); | 391 | drm_vblank_put(rdev->ddev, 0); |
@@ -366,7 +395,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
366 | drm_vblank_put(rdev->ddev, 1); | 395 | drm_vblank_put(rdev->ddev, 1); |
367 | } | 396 | } |
368 | 397 | ||
369 | radeon_pm_set_clocks_locked(rdev); | ||
370 | mutex_unlock(&rdev->cp.mutex); | 398 | mutex_unlock(&rdev->cp.mutex); |
371 | } | 399 | } |
372 | 400 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 5c0dc082d330..eabbc9cf30a7 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -346,6 +346,7 @@ | |||
346 | # define RADEON_TVPLL_PWRMGT_OFF (1 << 30) | 346 | # define RADEON_TVPLL_PWRMGT_OFF (1 << 30) |
347 | # define RADEON_TVCLK_TURNOFF (1 << 31) | 347 | # define RADEON_TVCLK_TURNOFF (1 << 31) |
348 | #define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ | 348 | #define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ |
349 | # define RADEON_PM_MODE_SEL (1 << 13) | ||
349 | # define RADEON_TCL_BYPASS_DISABLE (1 << 20) | 350 | # define RADEON_TCL_BYPASS_DISABLE (1 << 20) |
350 | #define RADEON_CLR_CMP_CLR_3D 0x1a24 | 351 | #define RADEON_CLR_CMP_CLR_3D 0x1a24 |
351 | #define RADEON_CLR_CMP_CLR_DST 0x15c8 | 352 | #define RADEON_CLR_CMP_CLR_DST 0x15c8 |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 8f414a5f520f..af0da4ae3f55 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
@@ -26,20 +26,16 @@ r600 0x9400 | |||
26 | 0x00028408 VGT_INDX_OFFSET | 26 | 0x00028408 VGT_INDX_OFFSET |
27 | 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 | 27 | 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 |
28 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 | 28 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 |
29 | 0x000088C0 VGT_LAST_COPY_STATE | ||
30 | 0x00028400 VGT_MAX_VTX_INDX | 29 | 0x00028400 VGT_MAX_VTX_INDX |
31 | 0x000088D8 VGT_MC_LAT_CNTL | ||
32 | 0x00028404 VGT_MIN_VTX_INDX | 30 | 0x00028404 VGT_MIN_VTX_INDX |
33 | 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN | 31 | 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN |
34 | 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX | 32 | 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX |
35 | 0x00008970 VGT_NUM_INDICES | 33 | 0x00008970 VGT_NUM_INDICES |
36 | 0x00008974 VGT_NUM_INSTANCES | 34 | 0x00008974 VGT_NUM_INSTANCES |
37 | 0x00028A10 VGT_OUTPUT_PATH_CNTL | 35 | 0x00028A10 VGT_OUTPUT_PATH_CNTL |
38 | 0x00028C5C VGT_OUT_DEALLOC_CNTL | ||
39 | 0x00028A84 VGT_PRIMITIVEID_EN | 36 | 0x00028A84 VGT_PRIMITIVEID_EN |
40 | 0x00008958 VGT_PRIMITIVE_TYPE | 37 | 0x00008958 VGT_PRIMITIVE_TYPE |
41 | 0x00028AB4 VGT_REUSE_OFF | 38 | 0x00028AB4 VGT_REUSE_OFF |
42 | 0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL | ||
43 | 0x00028AB8 VGT_VTX_CNT_EN | 39 | 0x00028AB8 VGT_VTX_CNT_EN |
44 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | 40 | 0x000088B0 VGT_VTX_VECT_EJECT_REG |
45 | 0x00028810 PA_CL_CLIP_CNTL | 41 | 0x00028810 PA_CL_CLIP_CNTL |
@@ -280,7 +276,6 @@ r600 0x9400 | |||
280 | 0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE | 276 | 0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE |
281 | 0x00028814 PA_SU_SC_MODE_CNTL | 277 | 0x00028814 PA_SU_SC_MODE_CNTL |
282 | 0x00028C08 PA_SU_VTX_CNTL | 278 | 0x00028C08 PA_SU_VTX_CNTL |
283 | 0x00008C00 SQ_CONFIG | ||
284 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 | 279 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 |
285 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 | 280 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 |
286 | 0x00008C10 SQ_STACK_RESOURCE_MGMT_1 | 281 | 0x00008C10 SQ_STACK_RESOURCE_MGMT_1 |
@@ -320,18 +315,6 @@ r600 0x9400 | |||
320 | 0x000283FC SQ_VTX_SEMANTIC_31 | 315 | 0x000283FC SQ_VTX_SEMANTIC_31 |
321 | 0x000288E0 SQ_VTX_SEMANTIC_CLEAR | 316 | 0x000288E0 SQ_VTX_SEMANTIC_CLEAR |
322 | 0x0003CFF4 SQ_VTX_START_INST_LOC | 317 | 0x0003CFF4 SQ_VTX_START_INST_LOC |
323 | 0x0003C000 SQ_TEX_SAMPLER_WORD0_0 | ||
324 | 0x0003C004 SQ_TEX_SAMPLER_WORD1_0 | ||
325 | 0x0003C008 SQ_TEX_SAMPLER_WORD2_0 | ||
326 | 0x00030000 SQ_ALU_CONSTANT0_0 | ||
327 | 0x00030004 SQ_ALU_CONSTANT1_0 | ||
328 | 0x00030008 SQ_ALU_CONSTANT2_0 | ||
329 | 0x0003000C SQ_ALU_CONSTANT3_0 | ||
330 | 0x0003E380 SQ_BOOL_CONST_0 | ||
331 | 0x0003E384 SQ_BOOL_CONST_1 | ||
332 | 0x0003E388 SQ_BOOL_CONST_2 | ||
333 | 0x0003E200 SQ_LOOP_CONST_0 | ||
334 | 0x0003E200 SQ_LOOP_CONST_DX10_0 | ||
335 | 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 | 318 | 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 |
336 | 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 | 319 | 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 |
337 | 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 | 320 | 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 |
@@ -380,54 +363,6 @@ r600 0x9400 | |||
380 | 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 | 363 | 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 |
381 | 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 | 364 | 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 |
382 | 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 | 365 | 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 |
383 | 0x000289C0 SQ_ALU_CONST_CACHE_GS_0 | ||
384 | 0x000289C4 SQ_ALU_CONST_CACHE_GS_1 | ||
385 | 0x000289C8 SQ_ALU_CONST_CACHE_GS_2 | ||
386 | 0x000289CC SQ_ALU_CONST_CACHE_GS_3 | ||
387 | 0x000289D0 SQ_ALU_CONST_CACHE_GS_4 | ||
388 | 0x000289D4 SQ_ALU_CONST_CACHE_GS_5 | ||
389 | 0x000289D8 SQ_ALU_CONST_CACHE_GS_6 | ||
390 | 0x000289DC SQ_ALU_CONST_CACHE_GS_7 | ||
391 | 0x000289E0 SQ_ALU_CONST_CACHE_GS_8 | ||
392 | 0x000289E4 SQ_ALU_CONST_CACHE_GS_9 | ||
393 | 0x000289E8 SQ_ALU_CONST_CACHE_GS_10 | ||
394 | 0x000289EC SQ_ALU_CONST_CACHE_GS_11 | ||
395 | 0x000289F0 SQ_ALU_CONST_CACHE_GS_12 | ||
396 | 0x000289F4 SQ_ALU_CONST_CACHE_GS_13 | ||
397 | 0x000289F8 SQ_ALU_CONST_CACHE_GS_14 | ||
398 | 0x000289FC SQ_ALU_CONST_CACHE_GS_15 | ||
399 | 0x00028940 SQ_ALU_CONST_CACHE_PS_0 | ||
400 | 0x00028944 SQ_ALU_CONST_CACHE_PS_1 | ||
401 | 0x00028948 SQ_ALU_CONST_CACHE_PS_2 | ||
402 | 0x0002894C SQ_ALU_CONST_CACHE_PS_3 | ||
403 | 0x00028950 SQ_ALU_CONST_CACHE_PS_4 | ||
404 | 0x00028954 SQ_ALU_CONST_CACHE_PS_5 | ||
405 | 0x00028958 SQ_ALU_CONST_CACHE_PS_6 | ||
406 | 0x0002895C SQ_ALU_CONST_CACHE_PS_7 | ||
407 | 0x00028960 SQ_ALU_CONST_CACHE_PS_8 | ||
408 | 0x00028964 SQ_ALU_CONST_CACHE_PS_9 | ||
409 | 0x00028968 SQ_ALU_CONST_CACHE_PS_10 | ||
410 | 0x0002896C SQ_ALU_CONST_CACHE_PS_11 | ||
411 | 0x00028970 SQ_ALU_CONST_CACHE_PS_12 | ||
412 | 0x00028974 SQ_ALU_CONST_CACHE_PS_13 | ||
413 | 0x00028978 SQ_ALU_CONST_CACHE_PS_14 | ||
414 | 0x0002897C SQ_ALU_CONST_CACHE_PS_15 | ||
415 | 0x00028980 SQ_ALU_CONST_CACHE_VS_0 | ||
416 | 0x00028984 SQ_ALU_CONST_CACHE_VS_1 | ||
417 | 0x00028988 SQ_ALU_CONST_CACHE_VS_2 | ||
418 | 0x0002898C SQ_ALU_CONST_CACHE_VS_3 | ||
419 | 0x00028990 SQ_ALU_CONST_CACHE_VS_4 | ||
420 | 0x00028994 SQ_ALU_CONST_CACHE_VS_5 | ||
421 | 0x00028998 SQ_ALU_CONST_CACHE_VS_6 | ||
422 | 0x0002899C SQ_ALU_CONST_CACHE_VS_7 | ||
423 | 0x000289A0 SQ_ALU_CONST_CACHE_VS_8 | ||
424 | 0x000289A4 SQ_ALU_CONST_CACHE_VS_9 | ||
425 | 0x000289A8 SQ_ALU_CONST_CACHE_VS_10 | ||
426 | 0x000289AC SQ_ALU_CONST_CACHE_VS_11 | ||
427 | 0x000289B0 SQ_ALU_CONST_CACHE_VS_12 | ||
428 | 0x000289B4 SQ_ALU_CONST_CACHE_VS_13 | ||
429 | 0x000289B8 SQ_ALU_CONST_CACHE_VS_14 | ||
430 | 0x000289BC SQ_ALU_CONST_CACHE_VS_15 | ||
431 | 0x000288D8 SQ_PGM_CF_OFFSET_ES | 366 | 0x000288D8 SQ_PGM_CF_OFFSET_ES |
432 | 0x000288DC SQ_PGM_CF_OFFSET_FS | 367 | 0x000288DC SQ_PGM_CF_OFFSET_FS |
433 | 0x000288D4 SQ_PGM_CF_OFFSET_GS | 368 | 0x000288D4 SQ_PGM_CF_OFFSET_GS |
@@ -494,12 +429,7 @@ r600 0x9400 | |||
494 | 0x00028438 SX_ALPHA_REF | 429 | 0x00028438 SX_ALPHA_REF |
495 | 0x00028410 SX_ALPHA_TEST_CONTROL | 430 | 0x00028410 SX_ALPHA_TEST_CONTROL |
496 | 0x00028350 SX_MISC | 431 | 0x00028350 SX_MISC |
497 | 0x0000A020 SMX_DC_CTL0 | ||
498 | 0x0000A024 SMX_DC_CTL1 | ||
499 | 0x0000A028 SMX_DC_CTL2 | ||
500 | 0x00009608 TC_CNTL | ||
501 | 0x00009604 TC_INVALIDATE | 432 | 0x00009604 TC_INVALIDATE |
502 | 0x00009490 TD_CNTL | ||
503 | 0x00009400 TD_FILTER4 | 433 | 0x00009400 TD_FILTER4 |
504 | 0x00009404 TD_FILTER4_1 | 434 | 0x00009404 TD_FILTER4_1 |
505 | 0x00009408 TD_FILTER4_2 | 435 | 0x00009408 TD_FILTER4_2 |
@@ -824,14 +754,9 @@ r600 0x9400 | |||
824 | 0x00028428 CB_FOG_GREEN | 754 | 0x00028428 CB_FOG_GREEN |
825 | 0x00028424 CB_FOG_RED | 755 | 0x00028424 CB_FOG_RED |
826 | 0x00008040 WAIT_UNTIL | 756 | 0x00008040 WAIT_UNTIL |
827 | 0x00008950 CC_GC_SHADER_PIPE_CONFIG | ||
828 | 0x00008954 GC_USER_SHADER_PIPE_CONFIG | ||
829 | 0x00009714 VC_ENHANCE | 757 | 0x00009714 VC_ENHANCE |
830 | 0x00009830 DB_DEBUG | 758 | 0x00009830 DB_DEBUG |
831 | 0x00009838 DB_WATERMARKS | 759 | 0x00009838 DB_WATERMARKS |
832 | 0x00028D28 DB_SRESULTS_COMPARE_STATE0 | 760 | 0x00028D28 DB_SRESULTS_COMPARE_STATE0 |
833 | 0x00028D44 DB_ALPHA_TO_MASK | 761 | 0x00028D44 DB_ALPHA_TO_MASK |
834 | 0x00009504 TA_CNTL | ||
835 | 0x00009700 VC_CNTL | 762 | 0x00009700 VC_CNTL |
836 | 0x00009718 VC_CONFIG | ||
837 | 0x0000A02C SMX_DC_MC_INTF_CTL | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 626d51891ee9..626aaf082b1a 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_asic.h" | ||
31 | #include "rs400d.h" | 32 | #include "rs400d.h" |
32 | 33 | ||
33 | /* This files gather functions specifics to : rs400,rs480 */ | 34 | /* This files gather functions specifics to : rs400,rs480 */ |
@@ -202,9 +203,9 @@ void rs400_gart_disable(struct radeon_device *rdev) | |||
202 | 203 | ||
203 | void rs400_gart_fini(struct radeon_device *rdev) | 204 | void rs400_gart_fini(struct radeon_device *rdev) |
204 | { | 205 | { |
206 | radeon_gart_fini(rdev); | ||
205 | rs400_gart_disable(rdev); | 207 | rs400_gart_disable(rdev); |
206 | radeon_gart_table_ram_free(rdev); | 208 | radeon_gart_table_ram_free(rdev); |
207 | radeon_gart_fini(rdev); | ||
208 | } | 209 | } |
209 | 210 | ||
210 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 211 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
@@ -264,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev) | |||
264 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 265 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
265 | radeon_vram_location(rdev, &rdev->mc, base); | 266 | radeon_vram_location(rdev, &rdev->mc, base); |
266 | radeon_gtt_location(rdev, &rdev->mc); | 267 | radeon_gtt_location(rdev, &rdev->mc); |
268 | radeon_update_bandwidth_info(rdev); | ||
267 | } | 269 | } |
268 | 270 | ||
269 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 271 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
@@ -388,6 +390,8 @@ static int rs400_startup(struct radeon_device *rdev) | |||
388 | { | 390 | { |
389 | int r; | 391 | int r; |
390 | 392 | ||
393 | r100_set_common_regs(rdev); | ||
394 | |||
391 | rs400_mc_program(rdev); | 395 | rs400_mc_program(rdev); |
392 | /* Resume clock */ | 396 | /* Resume clock */ |
393 | r300_clock_startup(rdev); | 397 | r300_clock_startup(rdev); |
@@ -453,6 +457,7 @@ int rs400_suspend(struct radeon_device *rdev) | |||
453 | 457 | ||
454 | void rs400_fini(struct radeon_device *rdev) | 458 | void rs400_fini(struct radeon_device *rdev) |
455 | { | 459 | { |
460 | radeon_pm_fini(rdev); | ||
456 | r100_cp_fini(rdev); | 461 | r100_cp_fini(rdev); |
457 | r100_wb_fini(rdev); | 462 | r100_wb_fini(rdev); |
458 | r100_ib_fini(rdev); | 463 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 47f046b78c6b..abf824c2123d 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -37,6 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | #include "drmP.h" | 38 | #include "drmP.h" |
39 | #include "radeon.h" | 39 | #include "radeon.h" |
40 | #include "radeon_asic.h" | ||
40 | #include "atom.h" | 41 | #include "atom.h" |
41 | #include "rs600d.h" | 42 | #include "rs600d.h" |
42 | 43 | ||
@@ -267,9 +268,9 @@ void rs600_gart_disable(struct radeon_device *rdev) | |||
267 | 268 | ||
268 | void rs600_gart_fini(struct radeon_device *rdev) | 269 | void rs600_gart_fini(struct radeon_device *rdev) |
269 | { | 270 | { |
271 | radeon_gart_fini(rdev); | ||
270 | rs600_gart_disable(rdev); | 272 | rs600_gart_disable(rdev); |
271 | radeon_gart_table_vram_free(rdev); | 273 | radeon_gart_table_vram_free(rdev); |
272 | radeon_gart_fini(rdev); | ||
273 | } | 274 | } |
274 | 275 | ||
275 | #define R600_PTE_VALID (1 << 0) | 276 | #define R600_PTE_VALID (1 << 0) |
@@ -392,10 +393,12 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
392 | /* Vertical blank interrupts */ | 393 | /* Vertical blank interrupts */ |
393 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { | 394 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { |
394 | drm_handle_vblank(rdev->ddev, 0); | 395 | drm_handle_vblank(rdev->ddev, 0); |
396 | rdev->pm.vblank_sync = true; | ||
395 | wake_up(&rdev->irq.vblank_queue); | 397 | wake_up(&rdev->irq.vblank_queue); |
396 | } | 398 | } |
397 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { | 399 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { |
398 | drm_handle_vblank(rdev->ddev, 1); | 400 | drm_handle_vblank(rdev->ddev, 1); |
401 | rdev->pm.vblank_sync = true; | ||
399 | wake_up(&rdev->irq.vblank_queue); | 402 | wake_up(&rdev->irq.vblank_queue); |
400 | } | 403 | } |
401 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { | 404 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { |
@@ -472,13 +475,38 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
472 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 475 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
473 | base = RREG32_MC(R_000004_MC_FB_LOCATION); | 476 | base = RREG32_MC(R_000004_MC_FB_LOCATION); |
474 | base = G_000004_MC_FB_START(base) << 16; | 477 | base = G_000004_MC_FB_START(base) << 16; |
478 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | ||
475 | radeon_vram_location(rdev, &rdev->mc, base); | 479 | radeon_vram_location(rdev, &rdev->mc, base); |
476 | radeon_gtt_location(rdev, &rdev->mc); | 480 | radeon_gtt_location(rdev, &rdev->mc); |
481 | radeon_update_bandwidth_info(rdev); | ||
477 | } | 482 | } |
478 | 483 | ||
479 | void rs600_bandwidth_update(struct radeon_device *rdev) | 484 | void rs600_bandwidth_update(struct radeon_device *rdev) |
480 | { | 485 | { |
481 | /* FIXME: implement, should this be like rs690 ? */ | 486 | struct drm_display_mode *mode0 = NULL; |
487 | struct drm_display_mode *mode1 = NULL; | ||
488 | u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; | ||
489 | /* FIXME: implement full support */ | ||
490 | |||
491 | radeon_update_display_priority(rdev); | ||
492 | |||
493 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
494 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
495 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
496 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
497 | |||
498 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
499 | |||
500 | if (rdev->disp_priority == 2) { | ||
501 | d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); | ||
502 | d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); | ||
503 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | ||
504 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | ||
505 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
506 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
507 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | ||
508 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | ||
509 | } | ||
482 | } | 510 | } |
483 | 511 | ||
484 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 512 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
@@ -598,6 +626,7 @@ int rs600_suspend(struct radeon_device *rdev) | |||
598 | 626 | ||
599 | void rs600_fini(struct radeon_device *rdev) | 627 | void rs600_fini(struct radeon_device *rdev) |
600 | { | 628 | { |
629 | radeon_pm_fini(rdev); | ||
601 | r100_cp_fini(rdev); | 630 | r100_cp_fini(rdev); |
602 | r100_wb_fini(rdev); | 631 | r100_wb_fini(rdev); |
603 | r100_ib_fini(rdev); | 632 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h index c1c8f5885cbb..e52d2695510b 100644 --- a/drivers/gpu/drm/radeon/rs600d.h +++ b/drivers/gpu/drm/radeon/rs600d.h | |||
@@ -535,4 +535,57 @@ | |||
535 | #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) | 535 | #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) |
536 | #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF | 536 | #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF |
537 | 537 | ||
538 | #define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 | ||
539 | #define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
540 | #define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
541 | #define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
542 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
543 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
544 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
545 | #define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
546 | #define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
547 | #define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
548 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
549 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
550 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
551 | #define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C | ||
552 | #define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
553 | #define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
554 | #define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
555 | #define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
556 | #define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
557 | #define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
558 | #define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
559 | #define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
560 | #define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
561 | #define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
562 | #define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
563 | #define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
564 | #define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 | ||
565 | #define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
566 | #define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
567 | #define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
568 | #define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
569 | #define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
570 | #define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
571 | #define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
572 | #define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
573 | #define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
574 | #define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
575 | #define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
576 | #define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
577 | #define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C | ||
578 | #define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
579 | #define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
580 | #define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
581 | #define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
582 | #define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
583 | #define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
584 | #define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
585 | #define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
586 | #define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
587 | #define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
588 | #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
589 | #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
590 | |||
538 | #endif | 591 | #endif |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 83b9174f76f2..bbf3da790fd5 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "atom.h" | 31 | #include "atom.h" |
31 | #include "rs690d.h" | 32 | #include "rs690d.h" |
32 | 33 | ||
@@ -57,42 +58,57 @@ static void rs690_gpu_init(struct radeon_device *rdev) | |||
57 | } | 58 | } |
58 | } | 59 | } |
59 | 60 | ||
61 | union igp_info { | ||
62 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
63 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; | ||
64 | }; | ||
65 | |||
60 | void rs690_pm_info(struct radeon_device *rdev) | 66 | void rs690_pm_info(struct radeon_device *rdev) |
61 | { | 67 | { |
62 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | 68 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
63 | struct _ATOM_INTEGRATED_SYSTEM_INFO *info; | 69 | union igp_info *info; |
64 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; | ||
65 | void *ptr; | ||
66 | uint16_t data_offset; | 70 | uint16_t data_offset; |
67 | uint8_t frev, crev; | 71 | uint8_t frev, crev; |
68 | fixed20_12 tmp; | 72 | fixed20_12 tmp; |
69 | 73 | ||
70 | atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, | 74 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, |
71 | &frev, &crev, &data_offset); | 75 | &frev, &crev, &data_offset)) { |
72 | ptr = rdev->mode_info.atom_context->bios + data_offset; | 76 | info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); |
73 | info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; | 77 | |
74 | info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; | 78 | /* Get various system informations from bios */ |
75 | /* Get various system informations from bios */ | 79 | switch (crev) { |
76 | switch (crev) { | 80 | case 1: |
77 | case 1: | 81 | tmp.full = rfixed_const(100); |
78 | tmp.full = rfixed_const(100); | 82 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock); |
79 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); | 83 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
80 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 84 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
81 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); | 85 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock)); |
82 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); | 86 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth); |
83 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); | 87 | break; |
84 | break; | 88 | case 2: |
85 | case 2: | 89 | tmp.full = rfixed_const(100); |
86 | tmp.full = rfixed_const(100); | 90 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock); |
87 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); | 91 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
88 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 92 | rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock); |
89 | rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); | 93 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); |
90 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 94 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq); |
91 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); | 95 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
92 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 96 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
93 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); | 97 | break; |
94 | break; | 98 | default: |
95 | default: | 99 | tmp.full = rfixed_const(100); |
100 | /* We assume the slower possible clock ie worst case */ | ||
101 | /* DDR 333Mhz */ | ||
102 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | ||
103 | /* FIXME: system clock ? */ | ||
104 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | ||
105 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
106 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | ||
107 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | ||
108 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | ||
109 | break; | ||
110 | } | ||
111 | } else { | ||
96 | tmp.full = rfixed_const(100); | 112 | tmp.full = rfixed_const(100); |
97 | /* We assume the slower possible clock ie worst case */ | 113 | /* We assume the slower possible clock ie worst case */ |
98 | /* DDR 333Mhz */ | 114 | /* DDR 333Mhz */ |
@@ -103,7 +119,6 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
103 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | 119 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); |
104 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | 120 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); |
105 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 121 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
106 | break; | ||
107 | } | 122 | } |
108 | /* Compute various bandwidth */ | 123 | /* Compute various bandwidth */ |
109 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | 124 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
@@ -131,7 +146,6 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
131 | 146 | ||
132 | void rs690_mc_init(struct radeon_device *rdev) | 147 | void rs690_mc_init(struct radeon_device *rdev) |
133 | { | 148 | { |
134 | fixed20_12 a; | ||
135 | u64 base; | 149 | u64 base; |
136 | 150 | ||
137 | rs400_gart_adjust_size(rdev); | 151 | rs400_gart_adjust_size(rdev); |
@@ -145,18 +159,10 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
145 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 159 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
146 | base = G_000100_MC_FB_START(base) << 16; | 160 | base = G_000100_MC_FB_START(base) << 16; |
147 | rs690_pm_info(rdev); | 161 | rs690_pm_info(rdev); |
148 | /* FIXME: we should enforce default clock in case GPU is not in | ||
149 | * default setup | ||
150 | */ | ||
151 | a.full = rfixed_const(100); | ||
152 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
153 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
154 | a.full = rfixed_const(16); | ||
155 | /* core_bandwidth = sclk(Mhz) * 16 */ | ||
156 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | ||
157 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 162 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
158 | radeon_vram_location(rdev, &rdev->mc, base); | 163 | radeon_vram_location(rdev, &rdev->mc, base); |
159 | radeon_gtt_location(rdev, &rdev->mc); | 164 | radeon_gtt_location(rdev, &rdev->mc); |
165 | radeon_update_bandwidth_info(rdev); | ||
160 | } | 166 | } |
161 | 167 | ||
162 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | 168 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
@@ -394,10 +400,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
394 | struct drm_display_mode *mode1 = NULL; | 400 | struct drm_display_mode *mode1 = NULL; |
395 | struct rs690_watermark wm0; | 401 | struct rs690_watermark wm0; |
396 | struct rs690_watermark wm1; | 402 | struct rs690_watermark wm1; |
397 | u32 tmp; | 403 | u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; |
398 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | 404 | fixed20_12 priority_mark02, priority_mark12, fill_rate; |
399 | fixed20_12 a, b; | 405 | fixed20_12 a, b; |
400 | 406 | ||
407 | radeon_update_display_priority(rdev); | ||
408 | |||
401 | if (rdev->mode_info.crtcs[0]->base.enabled) | 409 | if (rdev->mode_info.crtcs[0]->base.enabled) |
402 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | 410 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
403 | if (rdev->mode_info.crtcs[1]->base.enabled) | 411 | if (rdev->mode_info.crtcs[1]->base.enabled) |
@@ -407,7 +415,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
407 | * modes if the user specifies HIGH for displaypriority | 415 | * modes if the user specifies HIGH for displaypriority |
408 | * option. | 416 | * option. |
409 | */ | 417 | */ |
410 | if (rdev->disp_priority == 2) { | 418 | if ((rdev->disp_priority == 2) && |
419 | ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { | ||
411 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); | 420 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
412 | tmp &= C_000104_MC_DISP0R_INIT_LAT; | 421 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
413 | tmp &= C_000104_MC_DISP1R_INIT_LAT; | 422 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
@@ -482,10 +491,16 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
482 | priority_mark12.full = 0; | 491 | priority_mark12.full = 0; |
483 | if (wm1.priority_mark_max.full > priority_mark12.full) | 492 | if (wm1.priority_mark_max.full > priority_mark12.full) |
484 | priority_mark12.full = wm1.priority_mark_max.full; | 493 | priority_mark12.full = wm1.priority_mark_max.full; |
485 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 494 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
486 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 495 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); |
487 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 496 | if (rdev->disp_priority == 2) { |
488 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 497 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
498 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | ||
499 | } | ||
500 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
501 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
502 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | ||
503 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | ||
489 | } else if (mode0) { | 504 | } else if (mode0) { |
490 | if (rfixed_trunc(wm0.dbpp) > 64) | 505 | if (rfixed_trunc(wm0.dbpp) > 64) |
491 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 506 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); |
@@ -512,8 +527,11 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
512 | priority_mark02.full = 0; | 527 | priority_mark02.full = 0; |
513 | if (wm0.priority_mark_max.full > priority_mark02.full) | 528 | if (wm0.priority_mark_max.full > priority_mark02.full) |
514 | priority_mark02.full = wm0.priority_mark_max.full; | 529 | priority_mark02.full = wm0.priority_mark_max.full; |
515 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 530 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
516 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 531 | if (rdev->disp_priority == 2) |
532 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | ||
533 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
534 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
517 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, | 535 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, |
518 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); | 536 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); |
519 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | 537 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, |
@@ -544,12 +562,15 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
544 | priority_mark12.full = 0; | 562 | priority_mark12.full = 0; |
545 | if (wm1.priority_mark_max.full > priority_mark12.full) | 563 | if (wm1.priority_mark_max.full > priority_mark12.full) |
546 | priority_mark12.full = wm1.priority_mark_max.full; | 564 | priority_mark12.full = wm1.priority_mark_max.full; |
565 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | ||
566 | if (rdev->disp_priority == 2) | ||
567 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | ||
547 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, | 568 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
548 | S_006548_D1MODE_PRIORITY_A_OFF(1)); | 569 | S_006548_D1MODE_PRIORITY_A_OFF(1)); |
549 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, | 570 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, |
550 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); | 571 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); |
551 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 572 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
552 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 573 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
553 | } | 574 | } |
554 | } | 575 | } |
555 | 576 | ||
@@ -657,6 +678,7 @@ int rs690_suspend(struct radeon_device *rdev) | |||
657 | 678 | ||
658 | void rs690_fini(struct radeon_device *rdev) | 679 | void rs690_fini(struct radeon_device *rdev) |
659 | { | 680 | { |
681 | radeon_pm_fini(rdev); | ||
660 | r100_cp_fini(rdev); | 682 | r100_cp_fini(rdev); |
661 | r100_wb_fini(rdev); | 683 | r100_wb_fini(rdev); |
662 | r100_ib_fini(rdev); | 684 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h index 62d31e7a897f..36e6398a98ae 100644 --- a/drivers/gpu/drm/radeon/rs690d.h +++ b/drivers/gpu/drm/radeon/rs690d.h | |||
@@ -182,6 +182,9 @@ | |||
182 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | 182 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) |
183 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | 183 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) |
184 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF | 184 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF |
185 | #define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
186 | #define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
187 | #define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
185 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | 188 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) |
186 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | 189 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) |
187 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | 190 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index bea747da123f..1cf233f7e516 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "rv515d.h" | 30 | #include "rv515d.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "atom.h" | 33 | #include "atom.h" |
33 | #include "rv515_reg_safe.h" | 34 | #include "rv515_reg_safe.h" |
34 | 35 | ||
@@ -279,19 +280,13 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
279 | 280 | ||
280 | void rv515_mc_init(struct radeon_device *rdev) | 281 | void rv515_mc_init(struct radeon_device *rdev) |
281 | { | 282 | { |
282 | fixed20_12 a; | ||
283 | 283 | ||
284 | rv515_vram_get_type(rdev); | 284 | rv515_vram_get_type(rdev); |
285 | r100_vram_init_sizes(rdev); | 285 | r100_vram_init_sizes(rdev); |
286 | radeon_vram_location(rdev, &rdev->mc, 0); | 286 | radeon_vram_location(rdev, &rdev->mc, 0); |
287 | if (!(rdev->flags & RADEON_IS_AGP)) | 287 | if (!(rdev->flags & RADEON_IS_AGP)) |
288 | radeon_gtt_location(rdev, &rdev->mc); | 288 | radeon_gtt_location(rdev, &rdev->mc); |
289 | /* FIXME: we should enforce default clock in case GPU is not in | 289 | radeon_update_bandwidth_info(rdev); |
290 | * default setup | ||
291 | */ | ||
292 | a.full = rfixed_const(100); | ||
293 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
294 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
295 | } | 290 | } |
296 | 291 | ||
297 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 292 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
@@ -539,6 +534,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
539 | 534 | ||
540 | void rv515_fini(struct radeon_device *rdev) | 535 | void rv515_fini(struct radeon_device *rdev) |
541 | { | 536 | { |
537 | radeon_pm_fini(rdev); | ||
542 | r100_cp_fini(rdev); | 538 | r100_cp_fini(rdev); |
543 | r100_wb_fini(rdev); | 539 | r100_wb_fini(rdev); |
544 | r100_ib_fini(rdev); | 540 | r100_ib_fini(rdev); |
@@ -1020,7 +1016,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1020 | struct drm_display_mode *mode1 = NULL; | 1016 | struct drm_display_mode *mode1 = NULL; |
1021 | struct rv515_watermark wm0; | 1017 | struct rv515_watermark wm0; |
1022 | struct rv515_watermark wm1; | 1018 | struct rv515_watermark wm1; |
1023 | u32 tmp; | 1019 | u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; |
1024 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | 1020 | fixed20_12 priority_mark02, priority_mark12, fill_rate; |
1025 | fixed20_12 a, b; | 1021 | fixed20_12 a, b; |
1026 | 1022 | ||
@@ -1088,10 +1084,16 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1088 | priority_mark12.full = 0; | 1084 | priority_mark12.full = 0; |
1089 | if (wm1.priority_mark_max.full > priority_mark12.full) | 1085 | if (wm1.priority_mark_max.full > priority_mark12.full) |
1090 | priority_mark12.full = wm1.priority_mark_max.full; | 1086 | priority_mark12.full = wm1.priority_mark_max.full; |
1091 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 1087 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
1092 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 1088 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); |
1093 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 1089 | if (rdev->disp_priority == 2) { |
1094 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 1090 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
1091 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | ||
1092 | } | ||
1093 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
1094 | WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
1095 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | ||
1096 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | ||
1095 | } else if (mode0) { | 1097 | } else if (mode0) { |
1096 | if (rfixed_trunc(wm0.dbpp) > 64) | 1098 | if (rfixed_trunc(wm0.dbpp) > 64) |
1097 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | 1099 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); |
@@ -1118,8 +1120,11 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1118 | priority_mark02.full = 0; | 1120 | priority_mark02.full = 0; |
1119 | if (wm0.priority_mark_max.full > priority_mark02.full) | 1121 | if (wm0.priority_mark_max.full > priority_mark02.full) |
1120 | priority_mark02.full = wm0.priority_mark_max.full; | 1122 | priority_mark02.full = wm0.priority_mark_max.full; |
1121 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 1123 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); |
1122 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 1124 | if (rdev->disp_priority == 2) |
1125 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | ||
1126 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | ||
1127 | WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | ||
1123 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1128 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
1124 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 1129 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
1125 | } else { | 1130 | } else { |
@@ -1148,10 +1153,13 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1148 | priority_mark12.full = 0; | 1153 | priority_mark12.full = 0; |
1149 | if (wm1.priority_mark_max.full > priority_mark12.full) | 1154 | if (wm1.priority_mark_max.full > priority_mark12.full) |
1150 | priority_mark12.full = wm1.priority_mark_max.full; | 1155 | priority_mark12.full = wm1.priority_mark_max.full; |
1156 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | ||
1157 | if (rdev->disp_priority == 2) | ||
1158 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | ||
1151 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1159 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
1152 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 1160 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
1153 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 1161 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
1154 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 1162 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
1155 | } | 1163 | } |
1156 | } | 1164 | } |
1157 | 1165 | ||
@@ -1161,6 +1169,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev) | |||
1161 | struct drm_display_mode *mode0 = NULL; | 1169 | struct drm_display_mode *mode0 = NULL; |
1162 | struct drm_display_mode *mode1 = NULL; | 1170 | struct drm_display_mode *mode1 = NULL; |
1163 | 1171 | ||
1172 | radeon_update_display_priority(rdev); | ||
1173 | |||
1164 | if (rdev->mode_info.crtcs[0]->base.enabled) | 1174 | if (rdev->mode_info.crtcs[0]->base.enabled) |
1165 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | 1175 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
1166 | if (rdev->mode_info.crtcs[1]->base.enabled) | 1176 | if (rdev->mode_info.crtcs[1]->base.enabled) |
@@ -1170,7 +1180,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev) | |||
1170 | * modes if the user specifies HIGH for displaypriority | 1180 | * modes if the user specifies HIGH for displaypriority |
1171 | * option. | 1181 | * option. |
1172 | */ | 1182 | */ |
1173 | if (rdev->disp_priority == 2) { | 1183 | if ((rdev->disp_priority == 2) && |
1184 | (rdev->family == CHIP_RV515)) { | ||
1174 | tmp = RREG32_MC(MC_MISC_LAT_TIMER); | 1185 | tmp = RREG32_MC(MC_MISC_LAT_TIMER); |
1175 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | 1186 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; |
1176 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | 1187 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 37887dee12af..9f37d2efb0a9 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
30 | #include "drmP.h" | 30 | #include "drmP.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "radeon_drm.h" | 33 | #include "radeon_drm.h" |
33 | #include "rv770d.h" | 34 | #include "rv770d.h" |
34 | #include "atom.h" | 35 | #include "atom.h" |
@@ -125,9 +126,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) | |||
125 | 126 | ||
126 | void rv770_pcie_gart_fini(struct radeon_device *rdev) | 127 | void rv770_pcie_gart_fini(struct radeon_device *rdev) |
127 | { | 128 | { |
129 | radeon_gart_fini(rdev); | ||
128 | rv770_pcie_gart_disable(rdev); | 130 | rv770_pcie_gart_disable(rdev); |
129 | radeon_gart_table_vram_free(rdev); | 131 | radeon_gart_table_vram_free(rdev); |
130 | radeon_gart_fini(rdev); | ||
131 | } | 132 | } |
132 | 133 | ||
133 | 134 | ||
@@ -647,10 +648,13 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
647 | 648 | ||
648 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 649 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
649 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 650 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
651 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
650 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 652 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
651 | 653 | ||
652 | WREG32(CGTS_SYS_TCC_DISABLE, 0); | 654 | WREG32(CGTS_SYS_TCC_DISABLE, 0); |
653 | WREG32(CGTS_TCC_DISABLE, 0); | 655 | WREG32(CGTS_TCC_DISABLE, 0); |
656 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); | ||
657 | WREG32(CGTS_USER_TCC_DISABLE, 0); | ||
654 | 658 | ||
655 | num_qd_pipes = | 659 | num_qd_pipes = |
656 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); | 660 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
@@ -864,7 +868,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
864 | 868 | ||
865 | int rv770_mc_init(struct radeon_device *rdev) | 869 | int rv770_mc_init(struct radeon_device *rdev) |
866 | { | 870 | { |
867 | fixed20_12 a; | ||
868 | u32 tmp; | 871 | u32 tmp; |
869 | int chansize, numchan; | 872 | int chansize, numchan; |
870 | 873 | ||
@@ -908,12 +911,8 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
908 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 911 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
909 | } | 912 | } |
910 | r600_vram_gtt_location(rdev, &rdev->mc); | 913 | r600_vram_gtt_location(rdev, &rdev->mc); |
911 | /* FIXME: we should enforce default clock in case GPU is not in | 914 | radeon_update_bandwidth_info(rdev); |
912 | * default setup | 915 | |
913 | */ | ||
914 | a.full = rfixed_const(100); | ||
915 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
916 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
917 | return 0; | 916 | return 0; |
918 | } | 917 | } |
919 | 918 | ||
@@ -1013,6 +1012,13 @@ int rv770_resume(struct radeon_device *rdev) | |||
1013 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1012 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1014 | return r; | 1013 | return r; |
1015 | } | 1014 | } |
1015 | |||
1016 | r = r600_audio_init(rdev); | ||
1017 | if (r) { | ||
1018 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
1019 | return r; | ||
1020 | } | ||
1021 | |||
1016 | return r; | 1022 | return r; |
1017 | 1023 | ||
1018 | } | 1024 | } |
@@ -1021,6 +1027,7 @@ int rv770_suspend(struct radeon_device *rdev) | |||
1021 | { | 1027 | { |
1022 | int r; | 1028 | int r; |
1023 | 1029 | ||
1030 | r600_audio_fini(rdev); | ||
1024 | /* FIXME: we should wait for ring to be empty */ | 1031 | /* FIXME: we should wait for ring to be empty */ |
1025 | r700_cp_stop(rdev); | 1032 | r700_cp_stop(rdev); |
1026 | rdev->cp.ready = false; | 1033 | rdev->cp.ready = false; |
@@ -1144,11 +1151,19 @@ int rv770_init(struct radeon_device *rdev) | |||
1144 | } | 1151 | } |
1145 | } | 1152 | } |
1146 | } | 1153 | } |
1154 | |||
1155 | r = r600_audio_init(rdev); | ||
1156 | if (r) { | ||
1157 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
1158 | return r; | ||
1159 | } | ||
1160 | |||
1147 | return 0; | 1161 | return 0; |
1148 | } | 1162 | } |
1149 | 1163 | ||
1150 | void rv770_fini(struct radeon_device *rdev) | 1164 | void rv770_fini(struct radeon_device *rdev) |
1151 | { | 1165 | { |
1166 | radeon_pm_fini(rdev); | ||
1152 | r600_blit_fini(rdev); | 1167 | r600_blit_fini(rdev); |
1153 | r600_cp_fini(rdev); | 1168 | r600_cp_fini(rdev); |
1154 | r600_wb_fini(rdev); | 1169 | r600_wb_fini(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 89c38c49066f..dd47b2a9a791 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref) | |||
1425 | 1425 | ||
1426 | atomic_set(&glob->bo_count, 0); | 1426 | atomic_set(&glob->bo_count, 0); |
1427 | 1427 | ||
1428 | kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); | 1428 | ret = kobject_init_and_add( |
1429 | ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); | 1429 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); |
1430 | if (unlikely(ret != 0)) | 1430 | if (unlikely(ret != 0)) |
1431 | kobject_put(&glob->kobj); | 1431 | kobject_put(&glob->kobj); |
1432 | return ret; | 1432 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index eb143e04d402..c40e5f48e9a1 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -260,8 +260,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, | |||
260 | zone->used_mem = 0; | 260 | zone->used_mem = 0; |
261 | zone->glob = glob; | 261 | zone->glob = glob; |
262 | glob->zone_kernel = zone; | 262 | glob->zone_kernel = zone; |
263 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 263 | ret = kobject_init_and_add( |
264 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 264 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
265 | if (unlikely(ret != 0)) { | 265 | if (unlikely(ret != 0)) { |
266 | kobject_put(&zone->kobj); | 266 | kobject_put(&zone->kobj); |
267 | return ret; | 267 | return ret; |
@@ -296,8 +296,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, | |||
296 | zone->used_mem = 0; | 296 | zone->used_mem = 0; |
297 | zone->glob = glob; | 297 | zone->glob = glob; |
298 | glob->zone_highmem = zone; | 298 | glob->zone_highmem = zone; |
299 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 299 | ret = kobject_init_and_add( |
300 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 300 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
301 | if (unlikely(ret != 0)) { | 301 | if (unlikely(ret != 0)) { |
302 | kobject_put(&zone->kobj); | 302 | kobject_put(&zone->kobj); |
303 | return ret; | 303 | return ret; |
@@ -343,8 +343,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, | |||
343 | zone->used_mem = 0; | 343 | zone->used_mem = 0; |
344 | zone->glob = glob; | 344 | zone->glob = glob; |
345 | glob->zone_dma32 = zone; | 345 | glob->zone_dma32 = zone; |
346 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 346 | ret = kobject_init_and_add( |
347 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 347 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
348 | if (unlikely(ret != 0)) { | 348 | if (unlikely(ret != 0)) { |
349 | kobject_put(&zone->kobj); | 349 | kobject_put(&zone->kobj); |
350 | return ret; | 350 | return ret; |
@@ -365,10 +365,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) | |||
365 | glob->swap_queue = create_singlethread_workqueue("ttm_swap"); | 365 | glob->swap_queue = create_singlethread_workqueue("ttm_swap"); |
366 | INIT_WORK(&glob->work, ttm_shrink_work); | 366 | INIT_WORK(&glob->work, ttm_shrink_work); |
367 | init_waitqueue_head(&glob->queue); | 367 | init_waitqueue_head(&glob->queue); |
368 | kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); | 368 | ret = kobject_init_and_add( |
369 | ret = kobject_add(&glob->kobj, | 369 | &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); |
370 | ttm_get_kobj(), | ||
371 | "memory_accounting"); | ||
372 | if (unlikely(ret != 0)) { | 370 | if (unlikely(ret != 0)) { |
373 | kobject_put(&glob->kobj); | 371 | kobject_put(&glob->kobj); |
374 | return ret; | 372 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a759170763bb..bab6cd8d8a1e 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -28,13 +28,13 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
33 | #include <linux/highmem.h> | 32 | #include <linux/highmem.h> |
34 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
35 | #include <linux/file.h> | 34 | #include <linux/file.h> |
36 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
37 | #include "drm_cache.h" | 36 | #include "drm_cache.h" |
37 | #include "drm_mem_util.h" | ||
38 | #include "ttm/ttm_module.h" | 38 | #include "ttm/ttm_module.h" |
39 | #include "ttm/ttm_bo_driver.h" | 39 | #include "ttm/ttm_bo_driver.h" |
40 | #include "ttm/ttm_placement.h" | 40 | #include "ttm/ttm_placement.h" |
@@ -43,32 +43,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm); | |||
43 | 43 | ||
44 | /** | 44 | /** |
45 | * Allocates storage for pointers to the pages that back the ttm. | 45 | * Allocates storage for pointers to the pages that back the ttm. |
46 | * | ||
47 | * Uses kmalloc if possible. Otherwise falls back to vmalloc. | ||
48 | */ | 46 | */ |
49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | 47 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) |
50 | { | 48 | { |
51 | unsigned long size = ttm->num_pages * sizeof(*ttm->pages); | 49 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); |
52 | ttm->pages = NULL; | ||
53 | |||
54 | if (size <= PAGE_SIZE) | ||
55 | ttm->pages = kzalloc(size, GFP_KERNEL); | ||
56 | |||
57 | if (!ttm->pages) { | ||
58 | ttm->pages = vmalloc_user(size); | ||
59 | if (ttm->pages) | ||
60 | ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; | ||
61 | } | ||
62 | } | 50 | } |
63 | 51 | ||
64 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | 52 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) |
65 | { | 53 | { |
66 | if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { | 54 | drm_free_large(ttm->pages); |
67 | vfree(ttm->pages); | ||
68 | ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; | ||
69 | } else { | ||
70 | kfree(ttm->pages); | ||
71 | } | ||
72 | ttm->pages = NULL; | 55 | ttm->pages = NULL; |
73 | } | 56 | } |
74 | 57 | ||
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index f20b8bcbef39..30ad13344f7b 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_VMWGFX | 1 | config DRM_VMWGFX |
2 | tristate "DRM driver for VMware Virtual GPU" | 2 | tristate "DRM driver for VMware Virtual GPU" |
3 | depends on DRM && PCI | 3 | depends on DRM && PCI && FB |
4 | select FB_DEFERRED_IO | 4 | select FB_DEFERRED_IO |
5 | select FB_CFB_FILLRECT | 5 | select FB_CFB_FILLRECT |
6 | select FB_CFB_COPYAREA | 6 | select FB_CFB_COPYAREA |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index e4595e6147b4..9be8e1754a0b 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -217,8 +217,8 @@ config SENSORS_ASC7621 | |||
217 | depends on HWMON && I2C | 217 | depends on HWMON && I2C |
218 | help | 218 | help |
219 | If you say yes here you get support for the aSC7621 | 219 | If you say yes here you get support for the aSC7621 |
220 | family of SMBus sensors chip found on most Intel X48, X38, 975, | 220 | family of SMBus sensors chip found on most Intel X38, X48, X58, |
221 | 965 and 945 desktop boards. Currently supported chips: | 221 | 945, 965 and 975 desktop boards. Currently supported chips: |
222 | aSC7621 | 222 | aSC7621 |
223 | aSC7621a | 223 | aSC7621a |
224 | 224 | ||
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 2d7bceeed0bc..e9b7fbc5a447 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -228,7 +228,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * | |||
228 | if (err) { | 228 | if (err) { |
229 | dev_warn(dev, | 229 | dev_warn(dev, |
230 | "Unable to access MSR 0xEE, for Tjmax, left" | 230 | "Unable to access MSR 0xEE, for Tjmax, left" |
231 | " at default"); | 231 | " at default\n"); |
232 | } else if (eax & 0x40000000) { | 232 | } else if (eax & 0x40000000) { |
233 | tjmax = tjmax_ee; | 233 | tjmax = tjmax_ee; |
234 | } | 234 | } |
@@ -466,7 +466,7 @@ static int __init coretemp_init(void) | |||
466 | family 6 CPU */ | 466 | family 6 CPU */ |
467 | if ((c->x86 == 0x6) && (c->x86_model > 0xf)) | 467 | if ((c->x86 == 0x6) && (c->x86_model > 0xf)) |
468 | printk(KERN_WARNING DRVNAME ": Unknown CPU " | 468 | printk(KERN_WARNING DRVNAME ": Unknown CPU " |
469 | "model %x\n", c->x86_model); | 469 | "model 0x%x\n", c->x86_model); |
470 | continue; | 470 | continue; |
471 | } | 471 | } |
472 | 472 | ||
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 9de81a4c15a2..612807d97155 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c | |||
@@ -1294,7 +1294,7 @@ static int watchdog_close(struct inode *inode, struct file *filp) | |||
1294 | static ssize_t watchdog_write(struct file *filp, const char __user *buf, | 1294 | static ssize_t watchdog_write(struct file *filp, const char __user *buf, |
1295 | size_t count, loff_t *offset) | 1295 | size_t count, loff_t *offset) |
1296 | { | 1296 | { |
1297 | size_t ret; | 1297 | ssize_t ret; |
1298 | struct w83793_data *data = filp->private_data; | 1298 | struct w83793_data *data = filp->private_data; |
1299 | 1299 | ||
1300 | if (count) { | 1300 | if (count) { |
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index 365e0becaf12..388cbdc96db7 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c | |||
@@ -33,6 +33,7 @@ struct acpi_smbus_cmi { | |||
33 | u8 cap_info:1; | 33 | u8 cap_info:1; |
34 | u8 cap_read:1; | 34 | u8 cap_read:1; |
35 | u8 cap_write:1; | 35 | u8 cap_write:1; |
36 | struct smbus_methods_t *methods; | ||
36 | }; | 37 | }; |
37 | 38 | ||
38 | static const struct smbus_methods_t smbus_methods = { | 39 | static const struct smbus_methods_t smbus_methods = { |
@@ -41,10 +42,19 @@ static const struct smbus_methods_t smbus_methods = { | |||
41 | .mt_sbw = "_SBW", | 42 | .mt_sbw = "_SBW", |
42 | }; | 43 | }; |
43 | 44 | ||
45 | /* Some IBM BIOSes omit the leading underscore */ | ||
46 | static const struct smbus_methods_t ibm_smbus_methods = { | ||
47 | .mt_info = "SBI_", | ||
48 | .mt_sbr = "SBR_", | ||
49 | .mt_sbw = "SBW_", | ||
50 | }; | ||
51 | |||
44 | static const struct acpi_device_id acpi_smbus_cmi_ids[] = { | 52 | static const struct acpi_device_id acpi_smbus_cmi_ids[] = { |
45 | {"SMBUS01", 0}, | 53 | {"SMBUS01", (kernel_ulong_t)&smbus_methods}, |
54 | {ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods}, | ||
46 | {"", 0} | 55 | {"", 0} |
47 | }; | 56 | }; |
57 | MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids); | ||
48 | 58 | ||
49 | #define ACPI_SMBUS_STATUS_OK 0x00 | 59 | #define ACPI_SMBUS_STATUS_OK 0x00 |
50 | #define ACPI_SMBUS_STATUS_FAIL 0x07 | 60 | #define ACPI_SMBUS_STATUS_FAIL 0x07 |
@@ -150,11 +160,11 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
150 | 160 | ||
151 | if (read_write == I2C_SMBUS_READ) { | 161 | if (read_write == I2C_SMBUS_READ) { |
152 | protocol |= ACPI_SMBUS_PRTCL_READ; | 162 | protocol |= ACPI_SMBUS_PRTCL_READ; |
153 | method = smbus_methods.mt_sbr; | 163 | method = smbus_cmi->methods->mt_sbr; |
154 | input.count = 3; | 164 | input.count = 3; |
155 | } else { | 165 | } else { |
156 | protocol |= ACPI_SMBUS_PRTCL_WRITE; | 166 | protocol |= ACPI_SMBUS_PRTCL_WRITE; |
157 | method = smbus_methods.mt_sbw; | 167 | method = smbus_cmi->methods->mt_sbw; |
158 | input.count = 5; | 168 | input.count = 5; |
159 | } | 169 | } |
160 | 170 | ||
@@ -290,13 +300,13 @@ static int acpi_smbus_cmi_add_cap(struct acpi_smbus_cmi *smbus_cmi, | |||
290 | union acpi_object *obj; | 300 | union acpi_object *obj; |
291 | acpi_status status; | 301 | acpi_status status; |
292 | 302 | ||
293 | if (!strcmp(name, smbus_methods.mt_info)) { | 303 | if (!strcmp(name, smbus_cmi->methods->mt_info)) { |
294 | status = acpi_evaluate_object(smbus_cmi->handle, | 304 | status = acpi_evaluate_object(smbus_cmi->handle, |
295 | smbus_methods.mt_info, | 305 | smbus_cmi->methods->mt_info, |
296 | NULL, &buffer); | 306 | NULL, &buffer); |
297 | if (ACPI_FAILURE(status)) { | 307 | if (ACPI_FAILURE(status)) { |
298 | ACPI_ERROR((AE_INFO, "Evaluating %s: %i", | 308 | ACPI_ERROR((AE_INFO, "Evaluating %s: %i", |
299 | smbus_methods.mt_info, status)); | 309 | smbus_cmi->methods->mt_info, status)); |
300 | return -EIO; | 310 | return -EIO; |
301 | } | 311 | } |
302 | 312 | ||
@@ -319,9 +329,9 @@ static int acpi_smbus_cmi_add_cap(struct acpi_smbus_cmi *smbus_cmi, | |||
319 | 329 | ||
320 | kfree(buffer.pointer); | 330 | kfree(buffer.pointer); |
321 | smbus_cmi->cap_info = 1; | 331 | smbus_cmi->cap_info = 1; |
322 | } else if (!strcmp(name, smbus_methods.mt_sbr)) | 332 | } else if (!strcmp(name, smbus_cmi->methods->mt_sbr)) |
323 | smbus_cmi->cap_read = 1; | 333 | smbus_cmi->cap_read = 1; |
324 | else if (!strcmp(name, smbus_methods.mt_sbw)) | 334 | else if (!strcmp(name, smbus_cmi->methods->mt_sbw)) |
325 | smbus_cmi->cap_write = 1; | 335 | smbus_cmi->cap_write = 1; |
326 | else | 336 | else |
327 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported CMI method: %s\n", | 337 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported CMI method: %s\n", |
@@ -349,6 +359,7 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level, | |||
349 | static int acpi_smbus_cmi_add(struct acpi_device *device) | 359 | static int acpi_smbus_cmi_add(struct acpi_device *device) |
350 | { | 360 | { |
351 | struct acpi_smbus_cmi *smbus_cmi; | 361 | struct acpi_smbus_cmi *smbus_cmi; |
362 | const struct acpi_device_id *id; | ||
352 | 363 | ||
353 | smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); | 364 | smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); |
354 | if (!smbus_cmi) | 365 | if (!smbus_cmi) |
@@ -362,6 +373,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) | |||
362 | smbus_cmi->cap_read = 0; | 373 | smbus_cmi->cap_read = 0; |
363 | smbus_cmi->cap_write = 0; | 374 | smbus_cmi->cap_write = 0; |
364 | 375 | ||
376 | for (id = acpi_smbus_cmi_ids; id->id[0]; id++) | ||
377 | if (!strcmp(id->id, acpi_device_hid(device))) | ||
378 | smbus_cmi->methods = | ||
379 | (struct smbus_methods_t *) id->driver_data; | ||
380 | |||
365 | acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, | 381 | acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, |
366 | acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); | 382 | acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); |
367 | 383 | ||
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index fbedd35feb44..4c3d1bfec0c5 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -695,14 +695,8 @@ static int ide_probe_port(ide_hwif_t *hwif) | |||
695 | if (irqd) | 695 | if (irqd) |
696 | disable_irq(hwif->irq); | 696 | disable_irq(hwif->irq); |
697 | 697 | ||
698 | rc = ide_port_wait_ready(hwif); | 698 | if (ide_port_wait_ready(hwif) == -EBUSY) |
699 | if (rc == -ENODEV) { | 699 | printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); |
700 | printk(KERN_INFO "%s: no devices on the port\n", hwif->name); | ||
701 | goto out; | ||
702 | } else if (rc == -EBUSY) | ||
703 | printk(KERN_ERR "%s: not ready before the probe\n", hwif->name); | ||
704 | else | ||
705 | rc = -ENODEV; | ||
706 | 700 | ||
707 | /* | 701 | /* |
708 | * Second drive should only exist if first drive was found, | 702 | * Second drive should only exist if first drive was found, |
@@ -713,7 +707,7 @@ static int ide_probe_port(ide_hwif_t *hwif) | |||
713 | if (drive->dev_flags & IDE_DFLAG_PRESENT) | 707 | if (drive->dev_flags & IDE_DFLAG_PRESENT) |
714 | rc = 0; | 708 | rc = 0; |
715 | } | 709 | } |
716 | out: | 710 | |
717 | /* | 711 | /* |
718 | * Use cached IRQ number. It might be (and is...) changed by probe | 712 | * Use cached IRQ number. It might be (and is...) changed by probe |
719 | * code above | 713 | * code above |
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c index e65d010b708d..48fd4efc90ad 100644 --- a/drivers/ide/via82cxxx.c +++ b/drivers/ide/via82cxxx.c | |||
@@ -110,7 +110,6 @@ struct via82cxxx_dev | |||
110 | { | 110 | { |
111 | struct via_isa_bridge *via_config; | 111 | struct via_isa_bridge *via_config; |
112 | unsigned int via_80w; | 112 | unsigned int via_80w; |
113 | u8 cached_device[2]; | ||
114 | }; | 113 | }; |
115 | 114 | ||
116 | /** | 115 | /** |
@@ -403,66 +402,10 @@ static const struct ide_port_ops via_port_ops = { | |||
403 | .cable_detect = via82cxxx_cable_detect, | 402 | .cable_detect = via82cxxx_cable_detect, |
404 | }; | 403 | }; |
405 | 404 | ||
406 | static void via_write_devctl(ide_hwif_t *hwif, u8 ctl) | ||
407 | { | ||
408 | struct via82cxxx_dev *vdev = hwif->host->host_priv; | ||
409 | |||
410 | outb(ctl, hwif->io_ports.ctl_addr); | ||
411 | outb(vdev->cached_device[hwif->channel], hwif->io_ports.device_addr); | ||
412 | } | ||
413 | |||
414 | static void __via_dev_select(ide_drive_t *drive, u8 select) | ||
415 | { | ||
416 | ide_hwif_t *hwif = drive->hwif; | ||
417 | struct via82cxxx_dev *vdev = hwif->host->host_priv; | ||
418 | |||
419 | outb(select, hwif->io_ports.device_addr); | ||
420 | vdev->cached_device[hwif->channel] = select; | ||
421 | } | ||
422 | |||
423 | static void via_dev_select(ide_drive_t *drive) | ||
424 | { | ||
425 | __via_dev_select(drive, drive->select | ATA_DEVICE_OBS); | ||
426 | } | ||
427 | |||
428 | static void via_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) | ||
429 | { | ||
430 | ide_hwif_t *hwif = drive->hwif; | ||
431 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
432 | |||
433 | if (valid & IDE_VALID_FEATURE) | ||
434 | outb(tf->feature, io_ports->feature_addr); | ||
435 | if (valid & IDE_VALID_NSECT) | ||
436 | outb(tf->nsect, io_ports->nsect_addr); | ||
437 | if (valid & IDE_VALID_LBAL) | ||
438 | outb(tf->lbal, io_ports->lbal_addr); | ||
439 | if (valid & IDE_VALID_LBAM) | ||
440 | outb(tf->lbam, io_ports->lbam_addr); | ||
441 | if (valid & IDE_VALID_LBAH) | ||
442 | outb(tf->lbah, io_ports->lbah_addr); | ||
443 | if (valid & IDE_VALID_DEVICE) | ||
444 | __via_dev_select(drive, tf->device); | ||
445 | } | ||
446 | |||
447 | const struct ide_tp_ops via_tp_ops = { | ||
448 | .exec_command = ide_exec_command, | ||
449 | .read_status = ide_read_status, | ||
450 | .read_altstatus = ide_read_altstatus, | ||
451 | .write_devctl = via_write_devctl, | ||
452 | |||
453 | .dev_select = via_dev_select, | ||
454 | .tf_load = via_tf_load, | ||
455 | .tf_read = ide_tf_read, | ||
456 | |||
457 | .input_data = ide_input_data, | ||
458 | .output_data = ide_output_data, | ||
459 | }; | ||
460 | |||
461 | static const struct ide_port_info via82cxxx_chipset __devinitdata = { | 405 | static const struct ide_port_info via82cxxx_chipset __devinitdata = { |
462 | .name = DRV_NAME, | 406 | .name = DRV_NAME, |
463 | .init_chipset = init_chipset_via82cxxx, | 407 | .init_chipset = init_chipset_via82cxxx, |
464 | .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, | 408 | .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, |
465 | .tp_ops = &via_tp_ops, | ||
466 | .port_ops = &via_port_ops, | 409 | .port_ops = &via_port_ops, |
467 | .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | | 410 | .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | |
468 | IDE_HFLAG_POST_SET_MODE | | 411 | IDE_HFLAG_POST_SET_MODE | |
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c index e5deb15cf40c..8d1d63a02b34 100644 --- a/drivers/isdn/hisax/avma1_cs.c +++ b/drivers/isdn/hisax/avma1_cs.c | |||
@@ -50,7 +50,7 @@ module_param(isdnprot, int, 0); | |||
50 | handler. | 50 | handler. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | static int avma1cs_config(struct pcmcia_device *link); | 53 | static int avma1cs_config(struct pcmcia_device *link) __devinit ; |
54 | static void avma1cs_release(struct pcmcia_device *link); | 54 | static void avma1cs_release(struct pcmcia_device *link); |
55 | 55 | ||
56 | /* | 56 | /* |
@@ -59,7 +59,7 @@ static void avma1cs_release(struct pcmcia_device *link); | |||
59 | needed to manage one actual PCMCIA card. | 59 | needed to manage one actual PCMCIA card. |
60 | */ | 60 | */ |
61 | 61 | ||
62 | static void avma1cs_detach(struct pcmcia_device *p_dev); | 62 | static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ; |
63 | 63 | ||
64 | 64 | ||
65 | /* | 65 | /* |
@@ -99,7 +99,7 @@ typedef struct local_info_t { | |||
99 | 99 | ||
100 | ======================================================================*/ | 100 | ======================================================================*/ |
101 | 101 | ||
102 | static int avma1cs_probe(struct pcmcia_device *p_dev) | 102 | static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) |
103 | { | 103 | { |
104 | local_info_t *local; | 104 | local_info_t *local; |
105 | 105 | ||
@@ -140,7 +140,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev) | |||
140 | 140 | ||
141 | ======================================================================*/ | 141 | ======================================================================*/ |
142 | 142 | ||
143 | static void avma1cs_detach(struct pcmcia_device *link) | 143 | static void __devexit avma1cs_detach(struct pcmcia_device *link) |
144 | { | 144 | { |
145 | dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); | 145 | dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); |
146 | avma1cs_release(link); | 146 | avma1cs_release(link); |
@@ -174,7 +174,7 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev, | |||
174 | } | 174 | } |
175 | 175 | ||
176 | 176 | ||
177 | static int avma1cs_config(struct pcmcia_device *link) | 177 | static int __devinit avma1cs_config(struct pcmcia_device *link) |
178 | { | 178 | { |
179 | local_info_t *dev; | 179 | local_info_t *dev; |
180 | int i; | 180 | int i; |
@@ -282,7 +282,7 @@ static struct pcmcia_driver avma1cs_driver = { | |||
282 | .name = "avma1_cs", | 282 | .name = "avma1_cs", |
283 | }, | 283 | }, |
284 | .probe = avma1cs_probe, | 284 | .probe = avma1cs_probe, |
285 | .remove = avma1cs_detach, | 285 | .remove = __devexit_p(avma1cs_detach), |
286 | .id_table = avma1cs_ids, | 286 | .id_table = avma1cs_ids, |
287 | }; | 287 | }; |
288 | 288 | ||
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c index c9a30b1c9237..c9f2279e21f5 100644 --- a/drivers/isdn/hisax/elsa_cs.c +++ b/drivers/isdn/hisax/elsa_cs.c | |||
@@ -76,7 +76,7 @@ module_param(protocol, int, 0); | |||
76 | handler. | 76 | handler. |
77 | */ | 77 | */ |
78 | 78 | ||
79 | static int elsa_cs_config(struct pcmcia_device *link); | 79 | static int elsa_cs_config(struct pcmcia_device *link) __devinit ; |
80 | static void elsa_cs_release(struct pcmcia_device *link); | 80 | static void elsa_cs_release(struct pcmcia_device *link); |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -85,7 +85,7 @@ static void elsa_cs_release(struct pcmcia_device *link); | |||
85 | needed to manage one actual PCMCIA card. | 85 | needed to manage one actual PCMCIA card. |
86 | */ | 86 | */ |
87 | 87 | ||
88 | static void elsa_cs_detach(struct pcmcia_device *p_dev); | 88 | static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit; |
89 | 89 | ||
90 | /* | 90 | /* |
91 | A driver needs to provide a dev_node_t structure for each device | 91 | A driver needs to provide a dev_node_t structure for each device |
@@ -121,7 +121,7 @@ typedef struct local_info_t { | |||
121 | 121 | ||
122 | ======================================================================*/ | 122 | ======================================================================*/ |
123 | 123 | ||
124 | static int elsa_cs_probe(struct pcmcia_device *link) | 124 | static int __devinit elsa_cs_probe(struct pcmcia_device *link) |
125 | { | 125 | { |
126 | local_info_t *local; | 126 | local_info_t *local; |
127 | 127 | ||
@@ -166,7 +166,7 @@ static int elsa_cs_probe(struct pcmcia_device *link) | |||
166 | 166 | ||
167 | ======================================================================*/ | 167 | ======================================================================*/ |
168 | 168 | ||
169 | static void elsa_cs_detach(struct pcmcia_device *link) | 169 | static void __devexit elsa_cs_detach(struct pcmcia_device *link) |
170 | { | 170 | { |
171 | local_info_t *info = link->priv; | 171 | local_info_t *info = link->priv; |
172 | 172 | ||
@@ -210,7 +210,7 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev, | |||
210 | return -ENODEV; | 210 | return -ENODEV; |
211 | } | 211 | } |
212 | 212 | ||
213 | static int elsa_cs_config(struct pcmcia_device *link) | 213 | static int __devinit elsa_cs_config(struct pcmcia_device *link) |
214 | { | 214 | { |
215 | local_info_t *dev; | 215 | local_info_t *dev; |
216 | int i; | 216 | int i; |
@@ -327,7 +327,7 @@ static struct pcmcia_driver elsa_cs_driver = { | |||
327 | .name = "elsa_cs", | 327 | .name = "elsa_cs", |
328 | }, | 328 | }, |
329 | .probe = elsa_cs_probe, | 329 | .probe = elsa_cs_probe, |
330 | .remove = elsa_cs_detach, | 330 | .remove = __devexit_p(elsa_cs_detach), |
331 | .id_table = elsa_ids, | 331 | .id_table = elsa_ids, |
332 | .suspend = elsa_suspend, | 332 | .suspend = elsa_suspend, |
333 | .resume = elsa_resume, | 333 | .resume = elsa_resume, |
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c index 7836ec3c7f86..71b3ddef03bb 100644 --- a/drivers/isdn/hisax/sedlbauer_cs.c +++ b/drivers/isdn/hisax/sedlbauer_cs.c | |||
@@ -76,7 +76,7 @@ module_param(protocol, int, 0); | |||
76 | event handler. | 76 | event handler. |
77 | */ | 77 | */ |
78 | 78 | ||
79 | static int sedlbauer_config(struct pcmcia_device *link); | 79 | static int sedlbauer_config(struct pcmcia_device *link) __devinit ; |
80 | static void sedlbauer_release(struct pcmcia_device *link); | 80 | static void sedlbauer_release(struct pcmcia_device *link); |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -85,7 +85,7 @@ static void sedlbauer_release(struct pcmcia_device *link); | |||
85 | needed to manage one actual PCMCIA card. | 85 | needed to manage one actual PCMCIA card. |
86 | */ | 86 | */ |
87 | 87 | ||
88 | static void sedlbauer_detach(struct pcmcia_device *p_dev); | 88 | static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit; |
89 | 89 | ||
90 | /* | 90 | /* |
91 | You'll also need to prototype all the functions that will actually | 91 | You'll also need to prototype all the functions that will actually |
@@ -129,7 +129,7 @@ typedef struct local_info_t { | |||
129 | 129 | ||
130 | ======================================================================*/ | 130 | ======================================================================*/ |
131 | 131 | ||
132 | static int sedlbauer_probe(struct pcmcia_device *link) | 132 | static int __devinit sedlbauer_probe(struct pcmcia_device *link) |
133 | { | 133 | { |
134 | local_info_t *local; | 134 | local_info_t *local; |
135 | 135 | ||
@@ -177,7 +177,7 @@ static int sedlbauer_probe(struct pcmcia_device *link) | |||
177 | 177 | ||
178 | ======================================================================*/ | 178 | ======================================================================*/ |
179 | 179 | ||
180 | static void sedlbauer_detach(struct pcmcia_device *link) | 180 | static void __devexit sedlbauer_detach(struct pcmcia_device *link) |
181 | { | 181 | { |
182 | dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link); | 182 | dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link); |
183 | 183 | ||
@@ -283,7 +283,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev, | |||
283 | 283 | ||
284 | 284 | ||
285 | 285 | ||
286 | static int sedlbauer_config(struct pcmcia_device *link) | 286 | static int __devinit sedlbauer_config(struct pcmcia_device *link) |
287 | { | 287 | { |
288 | local_info_t *dev = link->priv; | 288 | local_info_t *dev = link->priv; |
289 | win_req_t *req; | 289 | win_req_t *req; |
@@ -441,7 +441,7 @@ static struct pcmcia_driver sedlbauer_driver = { | |||
441 | .name = "sedlbauer_cs", | 441 | .name = "sedlbauer_cs", |
442 | }, | 442 | }, |
443 | .probe = sedlbauer_probe, | 443 | .probe = sedlbauer_probe, |
444 | .remove = sedlbauer_detach, | 444 | .remove = __devexit_p(sedlbauer_detach), |
445 | .id_table = sedlbauer_ids, | 445 | .id_table = sedlbauer_ids, |
446 | .suspend = sedlbauer_suspend, | 446 | .suspend = sedlbauer_suspend, |
447 | .resume = sedlbauer_resume, | 447 | .resume = sedlbauer_resume, |
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c index b0c5976cbdb3..d010a0da8e19 100644 --- a/drivers/isdn/hisax/teles_cs.c +++ b/drivers/isdn/hisax/teles_cs.c | |||
@@ -57,7 +57,7 @@ module_param(protocol, int, 0); | |||
57 | handler. | 57 | handler. |
58 | */ | 58 | */ |
59 | 59 | ||
60 | static int teles_cs_config(struct pcmcia_device *link); | 60 | static int teles_cs_config(struct pcmcia_device *link) __devinit ; |
61 | static void teles_cs_release(struct pcmcia_device *link); | 61 | static void teles_cs_release(struct pcmcia_device *link); |
62 | 62 | ||
63 | /* | 63 | /* |
@@ -66,7 +66,7 @@ static void teles_cs_release(struct pcmcia_device *link); | |||
66 | needed to manage one actual PCMCIA card. | 66 | needed to manage one actual PCMCIA card. |
67 | */ | 67 | */ |
68 | 68 | ||
69 | static void teles_detach(struct pcmcia_device *p_dev); | 69 | static void teles_detach(struct pcmcia_device *p_dev) __devexit ; |
70 | 70 | ||
71 | /* | 71 | /* |
72 | A linked list of "instances" of the teles_cs device. Each actual | 72 | A linked list of "instances" of the teles_cs device. Each actual |
@@ -112,7 +112,7 @@ typedef struct local_info_t { | |||
112 | 112 | ||
113 | ======================================================================*/ | 113 | ======================================================================*/ |
114 | 114 | ||
115 | static int teles_probe(struct pcmcia_device *link) | 115 | static int __devinit teles_probe(struct pcmcia_device *link) |
116 | { | 116 | { |
117 | local_info_t *local; | 117 | local_info_t *local; |
118 | 118 | ||
@@ -156,7 +156,7 @@ static int teles_probe(struct pcmcia_device *link) | |||
156 | 156 | ||
157 | ======================================================================*/ | 157 | ======================================================================*/ |
158 | 158 | ||
159 | static void teles_detach(struct pcmcia_device *link) | 159 | static void __devexit teles_detach(struct pcmcia_device *link) |
160 | { | 160 | { |
161 | local_info_t *info = link->priv; | 161 | local_info_t *info = link->priv; |
162 | 162 | ||
@@ -200,7 +200,7 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev, | |||
200 | return -ENODEV; | 200 | return -ENODEV; |
201 | } | 201 | } |
202 | 202 | ||
203 | static int teles_cs_config(struct pcmcia_device *link) | 203 | static int __devinit teles_cs_config(struct pcmcia_device *link) |
204 | { | 204 | { |
205 | local_info_t *dev; | 205 | local_info_t *dev; |
206 | int i; | 206 | int i; |
@@ -319,7 +319,7 @@ static struct pcmcia_driver teles_cs_driver = { | |||
319 | .name = "teles_cs", | 319 | .name = "teles_cs", |
320 | }, | 320 | }, |
321 | .probe = teles_probe, | 321 | .probe = teles_probe, |
322 | .remove = teles_detach, | 322 | .remove = __devexit_p(teles_detach), |
323 | .id_table = teles_ids, | 323 | .id_table = teles_ids, |
324 | .suspend = teles_suspend, | 324 | .suspend = teles_suspend, |
325 | .resume = teles_resume, | 325 | .resume = teles_resume, |
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index b5346b4db91a..b7a85f46a6c2 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c | |||
@@ -912,8 +912,8 @@ struct c2port_device *c2port_device_register(char *name, | |||
912 | 912 | ||
913 | c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, | 913 | c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, |
914 | "c2port%d", id); | 914 | "c2port%d", id); |
915 | if (unlikely(!c2dev->dev)) { | 915 | if (unlikely(IS_ERR(c2dev->dev))) { |
916 | ret = -ENOMEM; | 916 | ret = PTR_ERR(c2dev->dev); |
917 | goto error_device_create; | 917 | goto error_device_create; |
918 | } | 918 | } |
919 | dev_set_drvdata(c2dev->dev, c2dev); | 919 | dev_set_drvdata(c2dev->dev, c2dev); |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 0eac6c814904..e041c003db22 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -225,7 +225,7 @@ static int mmc_read_ext_csd(struct mmc_card *card) | |||
225 | mmc_card_set_blockaddr(card); | 225 | mmc_card_set_blockaddr(card); |
226 | } | 226 | } |
227 | 227 | ||
228 | switch (ext_csd[EXT_CSD_CARD_TYPE]) { | 228 | switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { |
229 | case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: | 229 | case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: |
230 | card->ext_csd.hs_max_dtr = 52000000; | 230 | card->ext_csd.hs_max_dtr = 52000000; |
231 | break; | 231 | break; |
@@ -237,7 +237,6 @@ static int mmc_read_ext_csd(struct mmc_card *card) | |||
237 | printk(KERN_WARNING "%s: card is mmc v4 but doesn't " | 237 | printk(KERN_WARNING "%s: card is mmc v4 but doesn't " |
238 | "support any high-speed modes.\n", | 238 | "support any high-speed modes.\n", |
239 | mmc_hostname(card->host)); | 239 | mmc_hostname(card->host)); |
240 | goto out; | ||
241 | } | 240 | } |
242 | 241 | ||
243 | if (card->ext_csd.rev >= 3) { | 242 | if (card->ext_csd.rev >= 3) { |
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index a1d4188c430b..e7810b74f396 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c | |||
@@ -449,11 +449,10 @@ ks8695_rx_irq(int irq, void *dev_id) | |||
449 | } | 449 | } |
450 | 450 | ||
451 | /** | 451 | /** |
452 | * ks8695_rx - Receive packets called by NAPI poll method | 452 | * ks8695_rx - Receive packets called by NAPI poll method |
453 | * @ksp: Private data for the KS8695 Ethernet | 453 | * @ksp: Private data for the KS8695 Ethernet |
454 | * @budget: The max packets would be receive | 454 | * @budget: Number of packets allowed to process |
455 | */ | 455 | */ |
456 | |||
457 | static int ks8695_rx(struct ks8695_priv *ksp, int budget) | 456 | static int ks8695_rx(struct ks8695_priv *ksp, int budget) |
458 | { | 457 | { |
459 | struct net_device *ndev = ksp->ndev; | 458 | struct net_device *ndev = ksp->ndev; |
@@ -461,7 +460,6 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) | |||
461 | int buff_n; | 460 | int buff_n; |
462 | u32 flags; | 461 | u32 flags; |
463 | int pktlen; | 462 | int pktlen; |
464 | int last_rx_processed = -1; | ||
465 | int received = 0; | 463 | int received = 0; |
466 | 464 | ||
467 | buff_n = ksp->next_rx_desc_read; | 465 | buff_n = ksp->next_rx_desc_read; |
@@ -471,6 +469,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) | |||
471 | cpu_to_le32(RDES_OWN)))) { | 469 | cpu_to_le32(RDES_OWN)))) { |
472 | rmb(); | 470 | rmb(); |
473 | flags = le32_to_cpu(ksp->rx_ring[buff_n].status); | 471 | flags = le32_to_cpu(ksp->rx_ring[buff_n].status); |
472 | |||
474 | /* Found an SKB which we own, this means we | 473 | /* Found an SKB which we own, this means we |
475 | * received a packet | 474 | * received a packet |
476 | */ | 475 | */ |
@@ -533,23 +532,18 @@ rx_failure: | |||
533 | ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); | 532 | ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); |
534 | rx_finished: | 533 | rx_finished: |
535 | received++; | 534 | received++; |
536 | /* And note this as processed so we can start | ||
537 | * from here next time | ||
538 | */ | ||
539 | last_rx_processed = buff_n; | ||
540 | buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; | 535 | buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; |
541 | /*And note which RX descriptor we last did */ | ||
542 | if (likely(last_rx_processed != -1)) | ||
543 | ksp->next_rx_desc_read = | ||
544 | (last_rx_processed + 1) & | ||
545 | MAX_RX_DESC_MASK; | ||
546 | } | 536 | } |
537 | |||
538 | /* And note which RX descriptor we last did */ | ||
539 | ksp->next_rx_desc_read = buff_n; | ||
540 | |||
547 | /* And refill the buffers */ | 541 | /* And refill the buffers */ |
548 | ks8695_refill_rxbuffers(ksp); | 542 | ks8695_refill_rxbuffers(ksp); |
549 | 543 | ||
550 | /* Kick the RX DMA engine, in case it became | 544 | /* Kick the RX DMA engine, in case it became suspended */ |
551 | * suspended */ | ||
552 | ks8695_writereg(ksp, KS8695_DRSC, 0); | 545 | ks8695_writereg(ksp, KS8695_DRSC, 0); |
546 | |||
553 | return received; | 547 | return received; |
554 | } | 548 | } |
555 | 549 | ||
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 9ba547069db3..0ebd8208f606 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -84,7 +84,7 @@ | |||
84 | 84 | ||
85 | #define ATLX_DRIVER_VERSION "2.1.3" | 85 | #define ATLX_DRIVER_VERSION "2.1.3" |
86 | MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ | 86 | MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ |
87 | Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); | 87 | Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); |
88 | MODULE_LICENSE("GPL"); | 88 | MODULE_LICENSE("GPL"); |
89 | MODULE_VERSION(ATLX_DRIVER_VERSION); | 89 | MODULE_VERSION(ATLX_DRIVER_VERSION); |
90 | 90 | ||
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9560d48944ab..51e1065e7897 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
@@ -490,7 +490,7 @@ be_test_ddr_dma(struct be_adapter *adapter) | |||
490 | { | 490 | { |
491 | int ret, i; | 491 | int ret, i; |
492 | struct be_dma_mem ddrdma_cmd; | 492 | struct be_dma_mem ddrdma_cmd; |
493 | u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5}; | 493 | u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL}; |
494 | 494 | ||
495 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); | 495 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); |
496 | ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, | 496 | ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 381887ba677c..a257babd1bb4 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = { | |||
246 | 246 | ||
247 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); | 247 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); |
248 | 248 | ||
249 | static void bnx2_init_napi(struct bnx2 *bp); | ||
250 | |||
249 | static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) | 251 | static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) |
250 | { | 252 | { |
251 | u32 diff; | 253 | u32 diff; |
@@ -6197,6 +6199,7 @@ bnx2_open(struct net_device *dev) | |||
6197 | bnx2_disable_int(bp); | 6199 | bnx2_disable_int(bp); |
6198 | 6200 | ||
6199 | bnx2_setup_int_mode(bp, disable_msi); | 6201 | bnx2_setup_int_mode(bp, disable_msi); |
6202 | bnx2_init_napi(bp); | ||
6200 | bnx2_napi_enable(bp); | 6203 | bnx2_napi_enable(bp); |
6201 | rc = bnx2_alloc_mem(bp); | 6204 | rc = bnx2_alloc_mem(bp); |
6202 | if (rc) | 6205 | if (rc) |
@@ -7643,9 +7646,11 @@ poll_bnx2(struct net_device *dev) | |||
7643 | int i; | 7646 | int i; |
7644 | 7647 | ||
7645 | for (i = 0; i < bp->irq_nvecs; i++) { | 7648 | for (i = 0; i < bp->irq_nvecs; i++) { |
7646 | disable_irq(bp->irq_tbl[i].vector); | 7649 | struct bnx2_irq *irq = &bp->irq_tbl[i]; |
7647 | bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); | 7650 | |
7648 | enable_irq(bp->irq_tbl[i].vector); | 7651 | disable_irq(irq->vector); |
7652 | irq->handler(irq->vector, &bp->bnx2_napi[i]); | ||
7653 | enable_irq(irq->vector); | ||
7649 | } | 7654 | } |
7650 | } | 7655 | } |
7651 | #endif | 7656 | #endif |
@@ -8207,7 +8212,7 @@ bnx2_init_napi(struct bnx2 *bp) | |||
8207 | { | 8212 | { |
8208 | int i; | 8213 | int i; |
8209 | 8214 | ||
8210 | for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { | 8215 | for (i = 0; i < bp->irq_nvecs; i++) { |
8211 | struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; | 8216 | struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; |
8212 | int (*poll)(struct napi_struct *, int); | 8217 | int (*poll)(struct napi_struct *, int); |
8213 | 8218 | ||
@@ -8276,7 +8281,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8276 | dev->ethtool_ops = &bnx2_ethtool_ops; | 8281 | dev->ethtool_ops = &bnx2_ethtool_ops; |
8277 | 8282 | ||
8278 | bp = netdev_priv(dev); | 8283 | bp = netdev_priv(dev); |
8279 | bnx2_init_napi(bp); | ||
8280 | 8284 | ||
8281 | pci_set_drvdata(pdev, dev); | 8285 | pci_set_drvdata(pdev, dev); |
8282 | 8286 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 430c02267d7e..5b92fbff431d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1235,6 +1235,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
1235 | write_lock_bh(&bond->curr_slave_lock); | 1235 | write_lock_bh(&bond->curr_slave_lock); |
1236 | } | 1236 | } |
1237 | } | 1237 | } |
1238 | |||
1239 | /* resend IGMP joins since all were sent on curr_active_slave */ | ||
1240 | if (bond->params.mode == BOND_MODE_ROUNDROBIN) { | ||
1241 | bond_resend_igmp_join_requests(bond); | ||
1242 | } | ||
1238 | } | 1243 | } |
1239 | 1244 | ||
1240 | /** | 1245 | /** |
@@ -4138,22 +4143,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4138 | struct bonding *bond = netdev_priv(bond_dev); | 4143 | struct bonding *bond = netdev_priv(bond_dev); |
4139 | struct slave *slave, *start_at; | 4144 | struct slave *slave, *start_at; |
4140 | int i, slave_no, res = 1; | 4145 | int i, slave_no, res = 1; |
4146 | struct iphdr *iph = ip_hdr(skb); | ||
4141 | 4147 | ||
4142 | read_lock(&bond->lock); | 4148 | read_lock(&bond->lock); |
4143 | 4149 | ||
4144 | if (!BOND_IS_OK(bond)) | 4150 | if (!BOND_IS_OK(bond)) |
4145 | goto out; | 4151 | goto out; |
4146 | |||
4147 | /* | 4152 | /* |
4148 | * Concurrent TX may collide on rr_tx_counter; we accept that | 4153 | * Start with the curr_active_slave that joined the bond as the |
4149 | * as being rare enough not to justify using an atomic op here | 4154 | * default for sending IGMP traffic. For failover purposes one |
4155 | * needs to maintain some consistency for the interface that will | ||
4156 | * send the join/membership reports. The curr_active_slave found | ||
4157 | * will send all of this type of traffic. | ||
4150 | */ | 4158 | */ |
4151 | slave_no = bond->rr_tx_counter++ % bond->slave_cnt; | 4159 | if ((iph->protocol == htons(IPPROTO_IGMP)) && |
4160 | (skb->protocol == htons(ETH_P_IP))) { | ||
4152 | 4161 | ||
4153 | bond_for_each_slave(bond, slave, i) { | 4162 | read_lock(&bond->curr_slave_lock); |
4154 | slave_no--; | 4163 | slave = bond->curr_active_slave; |
4155 | if (slave_no < 0) | 4164 | read_unlock(&bond->curr_slave_lock); |
4156 | break; | 4165 | |
4166 | if (!slave) | ||
4167 | goto out; | ||
4168 | } else { | ||
4169 | /* | ||
4170 | * Concurrent TX may collide on rr_tx_counter; we accept | ||
4171 | * that as being rare enough not to justify using an | ||
4172 | * atomic op here. | ||
4173 | */ | ||
4174 | slave_no = bond->rr_tx_counter++ % bond->slave_cnt; | ||
4175 | |||
4176 | bond_for_each_slave(bond, slave, i) { | ||
4177 | slave_no--; | ||
4178 | if (slave_no < 0) | ||
4179 | break; | ||
4180 | } | ||
4157 | } | 4181 | } |
4158 | 4182 | ||
4159 | start_at = slave; | 4183 | start_at = slave; |
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 866905fa4119..03489864376d 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/can/dev.h> | 22 | #include <linux/can/dev.h> |
23 | #include <linux/can/error.h> | 23 | #include <linux/can/error.h> |
24 | 24 | ||
25 | #include <asm/bfin_can.h> | ||
25 | #include <asm/portmux.h> | 26 | #include <asm/portmux.h> |
26 | 27 | ||
27 | #define DRV_NAME "bfin_can" | 28 | #define DRV_NAME "bfin_can" |
@@ -29,90 +30,6 @@ | |||
29 | #define TX_ECHO_SKB_MAX 1 | 30 | #define TX_ECHO_SKB_MAX 1 |
30 | 31 | ||
31 | /* | 32 | /* |
32 | * transmit and receive channels | ||
33 | */ | ||
34 | #define TRANSMIT_CHL 24 | ||
35 | #define RECEIVE_STD_CHL 0 | ||
36 | #define RECEIVE_EXT_CHL 4 | ||
37 | #define RECEIVE_RTR_CHL 8 | ||
38 | #define RECEIVE_EXT_RTR_CHL 12 | ||
39 | #define MAX_CHL_NUMBER 32 | ||
40 | |||
41 | /* | ||
42 | * bfin can registers layout | ||
43 | */ | ||
44 | struct bfin_can_mask_regs { | ||
45 | u16 aml; | ||
46 | u16 dummy1; | ||
47 | u16 amh; | ||
48 | u16 dummy2; | ||
49 | }; | ||
50 | |||
51 | struct bfin_can_channel_regs { | ||
52 | u16 data[8]; | ||
53 | u16 dlc; | ||
54 | u16 dummy1; | ||
55 | u16 tsv; | ||
56 | u16 dummy2; | ||
57 | u16 id0; | ||
58 | u16 dummy3; | ||
59 | u16 id1; | ||
60 | u16 dummy4; | ||
61 | }; | ||
62 | |||
63 | struct bfin_can_regs { | ||
64 | /* | ||
65 | * global control and status registers | ||
66 | */ | ||
67 | u16 mc1; /* offset 0 */ | ||
68 | u16 dummy1; | ||
69 | u16 md1; /* offset 4 */ | ||
70 | u16 rsv1[13]; | ||
71 | u16 mbtif1; /* offset 0x20 */ | ||
72 | u16 dummy2; | ||
73 | u16 mbrif1; /* offset 0x24 */ | ||
74 | u16 dummy3; | ||
75 | u16 mbim1; /* offset 0x28 */ | ||
76 | u16 rsv2[11]; | ||
77 | u16 mc2; /* offset 0x40 */ | ||
78 | u16 dummy4; | ||
79 | u16 md2; /* offset 0x44 */ | ||
80 | u16 dummy5; | ||
81 | u16 trs2; /* offset 0x48 */ | ||
82 | u16 rsv3[11]; | ||
83 | u16 mbtif2; /* offset 0x60 */ | ||
84 | u16 dummy6; | ||
85 | u16 mbrif2; /* offset 0x64 */ | ||
86 | u16 dummy7; | ||
87 | u16 mbim2; /* offset 0x68 */ | ||
88 | u16 rsv4[11]; | ||
89 | u16 clk; /* offset 0x80 */ | ||
90 | u16 dummy8; | ||
91 | u16 timing; /* offset 0x84 */ | ||
92 | u16 rsv5[3]; | ||
93 | u16 status; /* offset 0x8c */ | ||
94 | u16 dummy9; | ||
95 | u16 cec; /* offset 0x90 */ | ||
96 | u16 dummy10; | ||
97 | u16 gis; /* offset 0x94 */ | ||
98 | u16 dummy11; | ||
99 | u16 gim; /* offset 0x98 */ | ||
100 | u16 rsv6[3]; | ||
101 | u16 ctrl; /* offset 0xa0 */ | ||
102 | u16 dummy12; | ||
103 | u16 intr; /* offset 0xa4 */ | ||
104 | u16 rsv7[7]; | ||
105 | u16 esr; /* offset 0xb4 */ | ||
106 | u16 rsv8[37]; | ||
107 | |||
108 | /* | ||
109 | * channel(mailbox) mask and message registers | ||
110 | */ | ||
111 | struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ | ||
112 | struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ | ||
113 | }; | ||
114 | |||
115 | /* | ||
116 | * bfin can private data | 33 | * bfin can private data |
117 | */ | 34 | */ |
118 | struct bfin_can_priv { | 35 | struct bfin_can_priv { |
@@ -163,7 +80,7 @@ static int bfin_can_set_bittiming(struct net_device *dev) | |||
163 | if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) | 80 | if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) |
164 | timing |= SAM; | 81 | timing |= SAM; |
165 | 82 | ||
166 | bfin_write16(®->clk, clk); | 83 | bfin_write16(®->clock, clk); |
167 | bfin_write16(®->timing, timing); | 84 | bfin_write16(®->timing, timing); |
168 | 85 | ||
169 | dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n", | 86 | dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n", |
@@ -185,11 +102,11 @@ static void bfin_can_set_reset_mode(struct net_device *dev) | |||
185 | bfin_write16(®->gim, 0); | 102 | bfin_write16(®->gim, 0); |
186 | 103 | ||
187 | /* reset can and enter configuration mode */ | 104 | /* reset can and enter configuration mode */ |
188 | bfin_write16(®->ctrl, SRS | CCR); | 105 | bfin_write16(®->control, SRS | CCR); |
189 | SSYNC(); | 106 | SSYNC(); |
190 | bfin_write16(®->ctrl, CCR); | 107 | bfin_write16(®->control, CCR); |
191 | SSYNC(); | 108 | SSYNC(); |
192 | while (!(bfin_read16(®->ctrl) & CCA)) { | 109 | while (!(bfin_read16(®->control) & CCA)) { |
193 | udelay(10); | 110 | udelay(10); |
194 | if (--timeout == 0) { | 111 | if (--timeout == 0) { |
195 | dev_err(dev->dev.parent, | 112 | dev_err(dev->dev.parent, |
@@ -244,7 +161,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev) | |||
244 | /* | 161 | /* |
245 | * leave configuration mode | 162 | * leave configuration mode |
246 | */ | 163 | */ |
247 | bfin_write16(®->ctrl, bfin_read16(®->ctrl) & ~CCR); | 164 | bfin_write16(®->control, bfin_read16(®->control) & ~CCR); |
248 | 165 | ||
249 | while (bfin_read16(®->status) & CCA) { | 166 | while (bfin_read16(®->status) & CCA) { |
250 | udelay(10); | 167 | udelay(10); |
@@ -726,7 +643,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) | |||
726 | 643 | ||
727 | if (netif_running(dev)) { | 644 | if (netif_running(dev)) { |
728 | /* enter sleep mode */ | 645 | /* enter sleep mode */ |
729 | bfin_write16(®->ctrl, bfin_read16(®->ctrl) | SMR); | 646 | bfin_write16(®->control, bfin_read16(®->control) | SMR); |
730 | SSYNC(); | 647 | SSYNC(); |
731 | while (!(bfin_read16(®->intr) & SMACK)) { | 648 | while (!(bfin_read16(®->intr) & SMACK)) { |
732 | udelay(10); | 649 | udelay(10); |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 9902b33b7160..2f29c2131851 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -261,7 +261,6 @@ struct e1000_adapter { | |||
261 | /* TX */ | 261 | /* TX */ |
262 | struct e1000_tx_ring *tx_ring; /* One per active queue */ | 262 | struct e1000_tx_ring *tx_ring; /* One per active queue */ |
263 | unsigned int restart_queue; | 263 | unsigned int restart_queue; |
264 | unsigned long tx_queue_len; | ||
265 | u32 txd_cmd; | 264 | u32 txd_cmd; |
266 | u32 tx_int_delay; | 265 | u32 tx_int_delay; |
267 | u32 tx_abs_int_delay; | 266 | u32 tx_abs_int_delay; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8be6faee43e6..b15ece26ed84 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
383 | adapter->alloc_rx_buf(adapter, ring, | 383 | adapter->alloc_rx_buf(adapter, ring, |
384 | E1000_DESC_UNUSED(ring)); | 384 | E1000_DESC_UNUSED(ring)); |
385 | } | 385 | } |
386 | |||
387 | adapter->tx_queue_len = netdev->tx_queue_len; | ||
388 | } | 386 | } |
389 | 387 | ||
390 | int e1000_up(struct e1000_adapter *adapter) | 388 | int e1000_up(struct e1000_adapter *adapter) |
@@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter) | |||
503 | del_timer_sync(&adapter->watchdog_timer); | 501 | del_timer_sync(&adapter->watchdog_timer); |
504 | del_timer_sync(&adapter->phy_info_timer); | 502 | del_timer_sync(&adapter->phy_info_timer); |
505 | 503 | ||
506 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
507 | adapter->link_speed = 0; | 504 | adapter->link_speed = 0; |
508 | adapter->link_duplex = 0; | 505 | adapter->link_duplex = 0; |
509 | netif_carrier_off(netdev); | 506 | netif_carrier_off(netdev); |
@@ -2316,19 +2313,15 @@ static void e1000_watchdog(unsigned long data) | |||
2316 | E1000_CTRL_RFCE) ? "RX" : ((ctrl & | 2313 | E1000_CTRL_RFCE) ? "RX" : ((ctrl & |
2317 | E1000_CTRL_TFCE) ? "TX" : "None" ))); | 2314 | E1000_CTRL_TFCE) ? "TX" : "None" ))); |
2318 | 2315 | ||
2319 | /* tweak tx_queue_len according to speed/duplex | 2316 | /* adjust timeout factor according to speed/duplex */ |
2320 | * and adjust the timeout factor */ | ||
2321 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
2322 | adapter->tx_timeout_factor = 1; | 2317 | adapter->tx_timeout_factor = 1; |
2323 | switch (adapter->link_speed) { | 2318 | switch (adapter->link_speed) { |
2324 | case SPEED_10: | 2319 | case SPEED_10: |
2325 | txb2b = false; | 2320 | txb2b = false; |
2326 | netdev->tx_queue_len = 10; | ||
2327 | adapter->tx_timeout_factor = 16; | 2321 | adapter->tx_timeout_factor = 16; |
2328 | break; | 2322 | break; |
2329 | case SPEED_100: | 2323 | case SPEED_100: |
2330 | txb2b = false; | 2324 | txb2b = false; |
2331 | netdev->tx_queue_len = 100; | ||
2332 | /* maybe add some timeout factor ? */ | 2325 | /* maybe add some timeout factor ? */ |
2333 | break; | 2326 | break; |
2334 | } | 2327 | } |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index c2ec095d2163..118bdf483593 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -279,7 +279,6 @@ struct e1000_adapter { | |||
279 | 279 | ||
280 | struct napi_struct napi; | 280 | struct napi_struct napi; |
281 | 281 | ||
282 | unsigned long tx_queue_len; | ||
283 | unsigned int restart_queue; | 282 | unsigned int restart_queue; |
284 | u32 txd_cmd; | 283 | u32 txd_cmd; |
285 | 284 | ||
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 88d54d3efcef..e1cceb606576 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -2289,8 +2289,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2289 | ew32(TCTL, tctl); | 2289 | ew32(TCTL, tctl); |
2290 | 2290 | ||
2291 | e1000e_config_collision_dist(hw); | 2291 | e1000e_config_collision_dist(hw); |
2292 | |||
2293 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | ||
2294 | } | 2292 | } |
2295 | 2293 | ||
2296 | /** | 2294 | /** |
@@ -2877,7 +2875,6 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2877 | del_timer_sync(&adapter->watchdog_timer); | 2875 | del_timer_sync(&adapter->watchdog_timer); |
2878 | del_timer_sync(&adapter->phy_info_timer); | 2876 | del_timer_sync(&adapter->phy_info_timer); |
2879 | 2877 | ||
2880 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
2881 | netif_carrier_off(netdev); | 2878 | netif_carrier_off(netdev); |
2882 | adapter->link_speed = 0; | 2879 | adapter->link_speed = 0; |
2883 | adapter->link_duplex = 0; | 2880 | adapter->link_duplex = 0; |
@@ -3588,21 +3585,15 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3588 | "link gets many collisions.\n"); | 3585 | "link gets many collisions.\n"); |
3589 | } | 3586 | } |
3590 | 3587 | ||
3591 | /* | 3588 | /* adjust timeout factor according to speed/duplex */ |
3592 | * tweak tx_queue_len according to speed/duplex | ||
3593 | * and adjust the timeout factor | ||
3594 | */ | ||
3595 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
3596 | adapter->tx_timeout_factor = 1; | 3589 | adapter->tx_timeout_factor = 1; |
3597 | switch (adapter->link_speed) { | 3590 | switch (adapter->link_speed) { |
3598 | case SPEED_10: | 3591 | case SPEED_10: |
3599 | txb2b = 0; | 3592 | txb2b = 0; |
3600 | netdev->tx_queue_len = 10; | ||
3601 | adapter->tx_timeout_factor = 16; | 3593 | adapter->tx_timeout_factor = 16; |
3602 | break; | 3594 | break; |
3603 | case SPEED_100: | 3595 | case SPEED_100: |
3604 | txb2b = 0; | 3596 | txb2b = 0; |
3605 | netdev->tx_queue_len = 100; | ||
3606 | adapter->tx_timeout_factor = 10; | 3597 | adapter->tx_timeout_factor = 10; |
3607 | break; | 3598 | break; |
3608 | } | 3599 | } |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b6715553cf17..669de028d44f 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -2393,6 +2393,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev) | |||
2393 | * as many bytes as needed to align the data properly | 2393 | * as many bytes as needed to align the data properly |
2394 | */ | 2394 | */ |
2395 | skb_reserve(skb, alignamount); | 2395 | skb_reserve(skb, alignamount); |
2396 | GFAR_CB(skb)->alignamount = alignamount; | ||
2396 | 2397 | ||
2397 | return skb; | 2398 | return skb; |
2398 | } | 2399 | } |
@@ -2533,13 +2534,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
2533 | newskb = skb; | 2534 | newskb = skb; |
2534 | else if (skb) { | 2535 | else if (skb) { |
2535 | /* | 2536 | /* |
2536 | * We need to reset ->data to what it | 2537 | * We need to un-reserve() the skb to what it |
2537 | * was before gfar_new_skb() re-aligned | 2538 | * was before gfar_new_skb() re-aligned |
2538 | * it to an RXBUF_ALIGNMENT boundary | 2539 | * it to an RXBUF_ALIGNMENT boundary |
2539 | * before we put the skb back on the | 2540 | * before we put the skb back on the |
2540 | * recycle list. | 2541 | * recycle list. |
2541 | */ | 2542 | */ |
2542 | skb->data = skb->head + NET_SKB_PAD; | 2543 | skb_reserve(skb, -GFAR_CB(skb)->alignamount); |
2543 | __skb_queue_head(&priv->rx_recycle, skb); | 2544 | __skb_queue_head(&priv->rx_recycle, skb); |
2544 | } | 2545 | } |
2545 | } else { | 2546 | } else { |
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 3d72dc43dca5..17d25e714236 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -566,6 +566,12 @@ struct rxfcb { | |||
566 | u16 vlctl; /* VLAN control word */ | 566 | u16 vlctl; /* VLAN control word */ |
567 | }; | 567 | }; |
568 | 568 | ||
569 | struct gianfar_skb_cb { | ||
570 | int alignamount; | ||
571 | }; | ||
572 | |||
573 | #define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) | ||
574 | |||
569 | struct rmon_mib | 575 | struct rmon_mib |
570 | { | 576 | { |
571 | u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ | 577 | u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ |
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 9d7fa2fb85ea..0bc990ec4a8e 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -94,6 +94,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
94 | case E1000_DEV_ID_82576_FIBER: | 94 | case E1000_DEV_ID_82576_FIBER: |
95 | case E1000_DEV_ID_82576_SERDES: | 95 | case E1000_DEV_ID_82576_SERDES: |
96 | case E1000_DEV_ID_82576_QUAD_COPPER: | 96 | case E1000_DEV_ID_82576_QUAD_COPPER: |
97 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
97 | case E1000_DEV_ID_82576_SERDES_QUAD: | 98 | case E1000_DEV_ID_82576_SERDES_QUAD: |
98 | mac->type = e1000_82576; | 99 | mac->type = e1000_82576; |
99 | break; | 100 | break; |
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 448005276b26..82a533f5192a 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h | |||
@@ -41,6 +41,7 @@ struct e1000_hw; | |||
41 | #define E1000_DEV_ID_82576_FIBER 0x10E6 | 41 | #define E1000_DEV_ID_82576_FIBER 0x10E6 |
42 | #define E1000_DEV_ID_82576_SERDES 0x10E7 | 42 | #define E1000_DEV_ID_82576_SERDES 0x10E7 |
43 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 | 43 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 |
44 | #define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 | ||
44 | #define E1000_DEV_ID_82576_NS 0x150A | 45 | #define E1000_DEV_ID_82576_NS 0x150A |
45 | #define E1000_DEV_ID_82576_NS_SERDES 0x1518 | 46 | #define E1000_DEV_ID_82576_NS_SERDES 0x1518 |
46 | #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D | 47 | #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D |
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 2a8a886b37eb..be8d010e4021 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c | |||
@@ -1367,7 +1367,8 @@ out: | |||
1367 | * igb_enable_mng_pass_thru - Enable processing of ARP's | 1367 | * igb_enable_mng_pass_thru - Enable processing of ARP's |
1368 | * @hw: pointer to the HW structure | 1368 | * @hw: pointer to the HW structure |
1369 | * | 1369 | * |
1370 | * Verifies the hardware needs to allow ARPs to be processed by the host. | 1370 | * Verifies the hardware needs to leave interface enabled so that frames can |
1371 | * be directed to and from the management interface. | ||
1371 | **/ | 1372 | **/ |
1372 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw) | 1373 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw) |
1373 | { | 1374 | { |
@@ -1380,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw) | |||
1380 | 1381 | ||
1381 | manc = rd32(E1000_MANC); | 1382 | manc = rd32(E1000_MANC); |
1382 | 1383 | ||
1383 | if (!(manc & E1000_MANC_RCV_TCO_EN) || | 1384 | if (!(manc & E1000_MANC_RCV_TCO_EN)) |
1384 | !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) | ||
1385 | goto out; | 1385 | goto out; |
1386 | 1386 | ||
1387 | if (hw->mac.arc_subsystem_valid) { | 1387 | if (hw->mac.arc_subsystem_valid) { |
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index a1775705b24c..3b772b822a5d 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -267,7 +267,6 @@ struct igb_adapter { | |||
267 | 267 | ||
268 | /* TX */ | 268 | /* TX */ |
269 | struct igb_ring *tx_ring[16]; | 269 | struct igb_ring *tx_ring[16]; |
270 | unsigned long tx_queue_len; | ||
271 | u32 tx_timeout_count; | 270 | u32 tx_timeout_count; |
272 | 271 | ||
273 | /* RX */ | 272 | /* RX */ |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 0ed25f059a00..01c65c7447e1 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -72,6 +72,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { | |||
72 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, | 72 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, |
73 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, | 73 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, |
74 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, | 74 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, |
75 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, | ||
75 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, | 76 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, |
76 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, | 77 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, |
77 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, | 78 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, |
@@ -1104,9 +1105,6 @@ static void igb_configure(struct igb_adapter *adapter) | |||
1104 | struct igb_ring *ring = adapter->rx_ring[i]; | 1105 | struct igb_ring *ring = adapter->rx_ring[i]; |
1105 | igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); | 1106 | igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); |
1106 | } | 1107 | } |
1107 | |||
1108 | |||
1109 | adapter->tx_queue_len = netdev->tx_queue_len; | ||
1110 | } | 1108 | } |
1111 | 1109 | ||
1112 | /** | 1110 | /** |
@@ -1212,7 +1210,6 @@ void igb_down(struct igb_adapter *adapter) | |||
1212 | del_timer_sync(&adapter->watchdog_timer); | 1210 | del_timer_sync(&adapter->watchdog_timer); |
1213 | del_timer_sync(&adapter->phy_info_timer); | 1211 | del_timer_sync(&adapter->phy_info_timer); |
1214 | 1212 | ||
1215 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1216 | netif_carrier_off(netdev); | 1213 | netif_carrier_off(netdev); |
1217 | 1214 | ||
1218 | /* record the stats before reset*/ | 1215 | /* record the stats before reset*/ |
@@ -3105,17 +3102,13 @@ static void igb_watchdog_task(struct work_struct *work) | |||
3105 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : | 3102 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : |
3106 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); | 3103 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); |
3107 | 3104 | ||
3108 | /* tweak tx_queue_len according to speed/duplex and | 3105 | /* adjust timeout factor according to speed/duplex */ |
3109 | * adjust the timeout factor */ | ||
3110 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
3111 | adapter->tx_timeout_factor = 1; | 3106 | adapter->tx_timeout_factor = 1; |
3112 | switch (adapter->link_speed) { | 3107 | switch (adapter->link_speed) { |
3113 | case SPEED_10: | 3108 | case SPEED_10: |
3114 | netdev->tx_queue_len = 10; | ||
3115 | adapter->tx_timeout_factor = 14; | 3109 | adapter->tx_timeout_factor = 14; |
3116 | break; | 3110 | break; |
3117 | case SPEED_100: | 3111 | case SPEED_100: |
3118 | netdev->tx_queue_len = 100; | ||
3119 | /* maybe add some timeout factor ? */ | 3112 | /* maybe add some timeout factor ? */ |
3120 | break; | 3113 | break; |
3121 | } | 3114 | } |
@@ -3962,7 +3955,7 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3962 | struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); | 3955 | struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); |
3963 | struct e1000_hw *hw = &adapter->hw; | 3956 | struct e1000_hw *hw = &adapter->hw; |
3964 | struct pci_dev *pdev = adapter->pdev; | 3957 | struct pci_dev *pdev = adapter->pdev; |
3965 | u32 rnbc, reg; | 3958 | u32 reg, mpc; |
3966 | u16 phy_tmp; | 3959 | u16 phy_tmp; |
3967 | int i; | 3960 | int i; |
3968 | u64 bytes, packets; | 3961 | u64 bytes, packets; |
@@ -4020,7 +4013,9 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
4020 | adapter->stats.symerrs += rd32(E1000_SYMERRS); | 4013 | adapter->stats.symerrs += rd32(E1000_SYMERRS); |
4021 | adapter->stats.sec += rd32(E1000_SEC); | 4014 | adapter->stats.sec += rd32(E1000_SEC); |
4022 | 4015 | ||
4023 | adapter->stats.mpc += rd32(E1000_MPC); | 4016 | mpc = rd32(E1000_MPC); |
4017 | adapter->stats.mpc += mpc; | ||
4018 | net_stats->rx_fifo_errors += mpc; | ||
4024 | adapter->stats.scc += rd32(E1000_SCC); | 4019 | adapter->stats.scc += rd32(E1000_SCC); |
4025 | adapter->stats.ecol += rd32(E1000_ECOL); | 4020 | adapter->stats.ecol += rd32(E1000_ECOL); |
4026 | adapter->stats.mcc += rd32(E1000_MCC); | 4021 | adapter->stats.mcc += rd32(E1000_MCC); |
@@ -4035,9 +4030,7 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
4035 | adapter->stats.gptc += rd32(E1000_GPTC); | 4030 | adapter->stats.gptc += rd32(E1000_GPTC); |
4036 | adapter->stats.gotc += rd32(E1000_GOTCL); | 4031 | adapter->stats.gotc += rd32(E1000_GOTCL); |
4037 | rd32(E1000_GOTCH); /* clear GOTCL */ | 4032 | rd32(E1000_GOTCH); /* clear GOTCL */ |
4038 | rnbc = rd32(E1000_RNBC); | 4033 | adapter->stats.rnbc += rd32(E1000_RNBC); |
4039 | adapter->stats.rnbc += rnbc; | ||
4040 | net_stats->rx_fifo_errors += rnbc; | ||
4041 | adapter->stats.ruc += rd32(E1000_RUC); | 4034 | adapter->stats.ruc += rd32(E1000_RUC); |
4042 | adapter->stats.rfc += rd32(E1000_RFC); | 4035 | adapter->stats.rfc += rd32(E1000_RFC); |
4043 | adapter->stats.rjc += rd32(E1000_RJC); | 4036 | adapter->stats.rjc += rd32(E1000_RJC); |
@@ -5109,7 +5102,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector, | |||
5109 | { | 5102 | { |
5110 | struct igb_adapter *adapter = q_vector->adapter; | 5103 | struct igb_adapter *adapter = q_vector->adapter; |
5111 | 5104 | ||
5112 | if (vlan_tag) | 5105 | if (vlan_tag && adapter->vlgrp) |
5113 | vlan_gro_receive(&q_vector->napi, adapter->vlgrp, | 5106 | vlan_gro_receive(&q_vector->napi, adapter->vlgrp, |
5114 | vlan_tag, skb); | 5107 | vlan_tag, skb); |
5115 | else | 5108 | else |
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index a1774b29d222..debeee2dc717 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h | |||
@@ -198,7 +198,6 @@ struct igbvf_adapter { | |||
198 | struct igbvf_ring *tx_ring /* One per active queue */ | 198 | struct igbvf_ring *tx_ring /* One per active queue */ |
199 | ____cacheline_aligned_in_smp; | 199 | ____cacheline_aligned_in_smp; |
200 | 200 | ||
201 | unsigned long tx_queue_len; | ||
202 | unsigned int restart_queue; | 201 | unsigned int restart_queue; |
203 | u32 txd_cmd; | 202 | u32 txd_cmd; |
204 | 203 | ||
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index a77afd8a14bb..b41037ed8083 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -1304,8 +1304,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) | |||
1304 | 1304 | ||
1305 | /* enable Report Status bit */ | 1305 | /* enable Report Status bit */ |
1306 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; | 1306 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; |
1307 | |||
1308 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | ||
1309 | } | 1307 | } |
1310 | 1308 | ||
1311 | /** | 1309 | /** |
@@ -1524,7 +1522,6 @@ void igbvf_down(struct igbvf_adapter *adapter) | |||
1524 | 1522 | ||
1525 | del_timer_sync(&adapter->watchdog_timer); | 1523 | del_timer_sync(&adapter->watchdog_timer); |
1526 | 1524 | ||
1527 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1528 | netif_carrier_off(netdev); | 1525 | netif_carrier_off(netdev); |
1529 | 1526 | ||
1530 | /* record the stats before reset*/ | 1527 | /* record the stats before reset*/ |
@@ -1857,21 +1854,15 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1857 | &adapter->link_duplex); | 1854 | &adapter->link_duplex); |
1858 | igbvf_print_link_info(adapter); | 1855 | igbvf_print_link_info(adapter); |
1859 | 1856 | ||
1860 | /* | 1857 | /* adjust timeout factor according to speed/duplex */ |
1861 | * tweak tx_queue_len according to speed/duplex | ||
1862 | * and adjust the timeout factor | ||
1863 | */ | ||
1864 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1865 | adapter->tx_timeout_factor = 1; | 1858 | adapter->tx_timeout_factor = 1; |
1866 | switch (adapter->link_speed) { | 1859 | switch (adapter->link_speed) { |
1867 | case SPEED_10: | 1860 | case SPEED_10: |
1868 | txb2b = 0; | 1861 | txb2b = 0; |
1869 | netdev->tx_queue_len = 10; | ||
1870 | adapter->tx_timeout_factor = 16; | 1862 | adapter->tx_timeout_factor = 16; |
1871 | break; | 1863 | break; |
1872 | case SPEED_100: | 1864 | case SPEED_100: |
1873 | txb2b = 0; | 1865 | txb2b = 0; |
1874 | netdev->tx_queue_len = 100; | ||
1875 | /* maybe add some timeout factor ? */ | 1866 | /* maybe add some timeout factor ? */ |
1876 | break; | 1867 | break; |
1877 | } | 1868 | } |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 19e94ee155a2..79c35ae3718c 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -204,14 +204,17 @@ enum ixgbe_ring_f_enum { | |||
204 | #define IXGBE_MAX_FDIR_INDICES 64 | 204 | #define IXGBE_MAX_FDIR_INDICES 64 |
205 | #ifdef IXGBE_FCOE | 205 | #ifdef IXGBE_FCOE |
206 | #define IXGBE_MAX_FCOE_INDICES 8 | 206 | #define IXGBE_MAX_FCOE_INDICES 8 |
207 | #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) | ||
208 | #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) | ||
209 | #else | ||
210 | #define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES | ||
211 | #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES | ||
207 | #endif /* IXGBE_FCOE */ | 212 | #endif /* IXGBE_FCOE */ |
208 | struct ixgbe_ring_feature { | 213 | struct ixgbe_ring_feature { |
209 | int indices; | 214 | int indices; |
210 | int mask; | 215 | int mask; |
211 | } ____cacheline_internodealigned_in_smp; | 216 | } ____cacheline_internodealigned_in_smp; |
212 | 217 | ||
213 | #define MAX_RX_QUEUES 128 | ||
214 | #define MAX_TX_QUEUES 128 | ||
215 | 218 | ||
216 | #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ | 219 | #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ |
217 | ? 8 : 1) | 220 | ? 8 : 1) |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 1f30e163bd9c..b405a00817c6 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #define IXGBE_82599_MC_TBL_SIZE 128 | 39 | #define IXGBE_82599_MC_TBL_SIZE 128 |
40 | #define IXGBE_82599_VFT_TBL_SIZE 128 | 40 | #define IXGBE_82599_VFT_TBL_SIZE 128 |
41 | 41 | ||
42 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | ||
42 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | 43 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
43 | ixgbe_link_speed speed, | 44 | ixgbe_link_speed speed, |
44 | bool autoneg, | 45 | bool autoneg, |
@@ -68,7 +69,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) | |||
68 | if (hw->phy.multispeed_fiber) { | 69 | if (hw->phy.multispeed_fiber) { |
69 | /* Set up dual speed SFP+ support */ | 70 | /* Set up dual speed SFP+ support */ |
70 | mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; | 71 | mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; |
72 | mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; | ||
71 | } else { | 73 | } else { |
74 | mac->ops.flap_tx_laser = NULL; | ||
72 | if ((mac->ops.get_media_type(hw) == | 75 | if ((mac->ops.get_media_type(hw) == |
73 | ixgbe_media_type_backplane) && | 76 | ixgbe_media_type_backplane) && |
74 | (hw->phy.smart_speed == ixgbe_smart_speed_auto || | 77 | (hw->phy.smart_speed == ixgbe_smart_speed_auto || |
@@ -413,6 +416,41 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | |||
413 | } | 416 | } |
414 | 417 | ||
415 | /** | 418 | /** |
419 | * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser | ||
420 | * @hw: pointer to hardware structure | ||
421 | * | ||
422 | * When the driver changes the link speeds that it can support, | ||
423 | * it sets autotry_restart to true to indicate that we need to | ||
424 | * initiate a new autotry session with the link partner. To do | ||
425 | * so, we set the speed then disable and re-enable the tx laser, to | ||
426 | * alert the link partner that it also needs to restart autotry on its | ||
427 | * end. This is consistent with true clause 37 autoneg, which also | ||
428 | * involves a loss of signal. | ||
429 | **/ | ||
430 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | ||
431 | { | ||
432 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
433 | |||
434 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); | ||
435 | |||
436 | if (hw->mac.autotry_restart) { | ||
437 | /* Disable tx laser; allow 100us to go dark per spec */ | ||
438 | esdp_reg |= IXGBE_ESDP_SDP3; | ||
439 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
440 | IXGBE_WRITE_FLUSH(hw); | ||
441 | udelay(100); | ||
442 | |||
443 | /* Enable tx laser; allow 100ms to light up */ | ||
444 | esdp_reg &= ~IXGBE_ESDP_SDP3; | ||
445 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
446 | IXGBE_WRITE_FLUSH(hw); | ||
447 | msleep(100); | ||
448 | |||
449 | hw->mac.autotry_restart = false; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | /** | ||
416 | * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed | 454 | * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed |
417 | * @hw: pointer to hardware structure | 455 | * @hw: pointer to hardware structure |
418 | * @speed: new link speed | 456 | * @speed: new link speed |
@@ -440,16 +478,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | |||
440 | speed &= phy_link_speed; | 478 | speed &= phy_link_speed; |
441 | 479 | ||
442 | /* | 480 | /* |
443 | * When the driver changes the link speeds that it can support, | ||
444 | * it sets autotry_restart to true to indicate that we need to | ||
445 | * initiate a new autotry session with the link partner. To do | ||
446 | * so, we set the speed then disable and re-enable the tx laser, to | ||
447 | * alert the link partner that it also needs to restart autotry on its | ||
448 | * end. This is consistent with true clause 37 autoneg, which also | ||
449 | * involves a loss of signal. | ||
450 | */ | ||
451 | |||
452 | /* | ||
453 | * Try each speed one by one, highest priority first. We do this in | 481 | * Try each speed one by one, highest priority first. We do this in |
454 | * software because 10gb fiber doesn't support speed autonegotiation. | 482 | * software because 10gb fiber doesn't support speed autonegotiation. |
455 | */ | 483 | */ |
@@ -466,6 +494,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | |||
466 | /* Set the module link speed */ | 494 | /* Set the module link speed */ |
467 | esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); | 495 | esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); |
468 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | 496 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); |
497 | IXGBE_WRITE_FLUSH(hw); | ||
469 | 498 | ||
470 | /* Allow module to change analog characteristics (1G->10G) */ | 499 | /* Allow module to change analog characteristics (1G->10G) */ |
471 | msleep(40); | 500 | msleep(40); |
@@ -478,19 +507,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | |||
478 | return status; | 507 | return status; |
479 | 508 | ||
480 | /* Flap the tx laser if it has not already been done */ | 509 | /* Flap the tx laser if it has not already been done */ |
481 | if (hw->mac.autotry_restart) { | 510 | hw->mac.ops.flap_tx_laser(hw); |
482 | /* Disable tx laser; allow 100us to go dark per spec */ | ||
483 | esdp_reg |= IXGBE_ESDP_SDP3; | ||
484 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
485 | udelay(100); | ||
486 | |||
487 | /* Enable tx laser; allow 2ms to light up per spec */ | ||
488 | esdp_reg &= ~IXGBE_ESDP_SDP3; | ||
489 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
490 | msleep(2); | ||
491 | |||
492 | hw->mac.autotry_restart = false; | ||
493 | } | ||
494 | 511 | ||
495 | /* | 512 | /* |
496 | * Wait for the controller to acquire link. Per IEEE 802.3ap, | 513 | * Wait for the controller to acquire link. Per IEEE 802.3ap, |
@@ -525,6 +542,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | |||
525 | esdp_reg &= ~IXGBE_ESDP_SDP5; | 542 | esdp_reg &= ~IXGBE_ESDP_SDP5; |
526 | esdp_reg |= IXGBE_ESDP_SDP5_DIR; | 543 | esdp_reg |= IXGBE_ESDP_SDP5_DIR; |
527 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | 544 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); |
545 | IXGBE_WRITE_FLUSH(hw); | ||
528 | 546 | ||
529 | /* Allow module to change analog characteristics (10G->1G) */ | 547 | /* Allow module to change analog characteristics (10G->1G) */ |
530 | msleep(40); | 548 | msleep(40); |
@@ -537,19 +555,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | |||
537 | return status; | 555 | return status; |
538 | 556 | ||
539 | /* Flap the tx laser if it has not already been done */ | 557 | /* Flap the tx laser if it has not already been done */ |
540 | if (hw->mac.autotry_restart) { | 558 | hw->mac.ops.flap_tx_laser(hw); |
541 | /* Disable tx laser; allow 100us to go dark per spec */ | ||
542 | esdp_reg |= IXGBE_ESDP_SDP3; | ||
543 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
544 | udelay(100); | ||
545 | |||
546 | /* Enable tx laser; allow 2ms to light up per spec */ | ||
547 | esdp_reg &= ~IXGBE_ESDP_SDP3; | ||
548 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
549 | msleep(2); | ||
550 | |||
551 | hw->mac.autotry_restart = false; | ||
552 | } | ||
553 | 559 | ||
554 | /* Wait for the link partner to also set speed */ | 560 | /* Wait for the link partner to also set speed */ |
555 | msleep(100); | 561 | msleep(100); |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 7949a446e4c7..1959ef76c962 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1853,6 +1853,26 @@ static void ixgbe_diag_test(struct net_device *netdev, | |||
1853 | if (ixgbe_link_test(adapter, &data[4])) | 1853 | if (ixgbe_link_test(adapter, &data[4])) |
1854 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1854 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1855 | 1855 | ||
1856 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
1857 | int i; | ||
1858 | for (i = 0; i < adapter->num_vfs; i++) { | ||
1859 | if (adapter->vfinfo[i].clear_to_send) { | ||
1860 | netdev_warn(netdev, "%s", | ||
1861 | "offline diagnostic is not " | ||
1862 | "supported when VFs are " | ||
1863 | "present\n"); | ||
1864 | data[0] = 1; | ||
1865 | data[1] = 1; | ||
1866 | data[2] = 1; | ||
1867 | data[3] = 1; | ||
1868 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1869 | clear_bit(__IXGBE_TESTING, | ||
1870 | &adapter->state); | ||
1871 | goto skip_ol_tests; | ||
1872 | } | ||
1873 | } | ||
1874 | } | ||
1875 | |||
1856 | if (if_running) | 1876 | if (if_running) |
1857 | /* indicate we're in test mode */ | 1877 | /* indicate we're in test mode */ |
1858 | dev_close(netdev); | 1878 | dev_close(netdev); |
@@ -1908,6 +1928,7 @@ skip_loopback: | |||
1908 | 1928 | ||
1909 | clear_bit(__IXGBE_TESTING, &adapter->state); | 1929 | clear_bit(__IXGBE_TESTING, &adapter->state); |
1910 | } | 1930 | } |
1931 | skip_ol_tests: | ||
1911 | msleep_interruptible(4 * 1000); | 1932 | msleep_interruptible(4 * 1000); |
1912 | } | 1933 | } |
1913 | 1934 | ||
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 4123dec0dfb7..9276d5965b0d 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -202,6 +202,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
202 | addr = sg_dma_address(sg); | 202 | addr = sg_dma_address(sg); |
203 | len = sg_dma_len(sg); | 203 | len = sg_dma_len(sg); |
204 | while (len) { | 204 | while (len) { |
205 | /* max number of buffers allowed in one DDP context */ | ||
206 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
207 | netif_err(adapter, drv, adapter->netdev, | ||
208 | "xid=%x:%d,%d,%d:addr=%llx " | ||
209 | "not enough descriptors\n", | ||
210 | xid, i, j, dmacount, (u64)addr); | ||
211 | goto out_noddp_free; | ||
212 | } | ||
213 | |||
205 | /* get the offset of length of current buffer */ | 214 | /* get the offset of length of current buffer */ |
206 | thisoff = addr & ((dma_addr_t)bufflen - 1); | 215 | thisoff = addr & ((dma_addr_t)bufflen - 1); |
207 | thislen = min((bufflen - thisoff), len); | 216 | thislen = min((bufflen - thisoff), len); |
@@ -227,20 +236,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
227 | len -= thislen; | 236 | len -= thislen; |
228 | addr += thislen; | 237 | addr += thislen; |
229 | j++; | 238 | j++; |
230 | /* max number of buffers allowed in one DDP context */ | ||
231 | if (j > IXGBE_BUFFCNT_MAX) { | ||
232 | DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx " | ||
233 | "not enough descriptors\n", | ||
234 | xid, i, j, dmacount, (u64)addr); | ||
235 | goto out_noddp_free; | ||
236 | } | ||
237 | } | 239 | } |
238 | } | 240 | } |
239 | /* only the last buffer may have non-full bufflen */ | 241 | /* only the last buffer may have non-full bufflen */ |
240 | lastsize = thisoff + thislen; | 242 | lastsize = thisoff + thislen; |
241 | 243 | ||
242 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 244 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
243 | fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 245 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
244 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 246 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
245 | fcbuff |= (IXGBE_FCBUFF_VALID); | 247 | fcbuff |= (IXGBE_FCBUFF_VALID); |
246 | 248 | ||
@@ -520,6 +522,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
520 | /* Enable L2 eth type filter for FCoE */ | 522 | /* Enable L2 eth type filter for FCoE */ |
521 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), | 523 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), |
522 | (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); | 524 | (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); |
525 | /* Enable L2 eth type filter for FIP */ | ||
526 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), | ||
527 | (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); | ||
523 | if (adapter->ring_feature[RING_F_FCOE].indices) { | 528 | if (adapter->ring_feature[RING_F_FCOE].indices) { |
524 | /* Use multiple rx queues for FCoE by redirection table */ | 529 | /* Use multiple rx queues for FCoE by redirection table */ |
525 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { | 530 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { |
@@ -530,6 +535,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
530 | } | 535 | } |
531 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); | 536 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); |
532 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); | 537 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); |
538 | fcoe_i = f->mask; | ||
539 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; | ||
540 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; | ||
541 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), | ||
542 | IXGBE_ETQS_QUEUE_EN | | ||
543 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | ||
533 | } else { | 544 | } else { |
534 | /* Use single rx queue for FCoE */ | 545 | /* Use single rx queue for FCoE */ |
535 | fcoe_i = f->mask; | 546 | fcoe_i = f->mask; |
@@ -539,6 +550,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
539 | IXGBE_ETQS_QUEUE_EN | | 550 | IXGBE_ETQS_QUEUE_EN | |
540 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | 551 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); |
541 | } | 552 | } |
553 | /* send FIP frames to the first FCoE queue */ | ||
554 | fcoe_i = f->mask; | ||
555 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; | ||
556 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), | ||
557 | IXGBE_ETQS_QUEUE_EN | | ||
558 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | ||
542 | 559 | ||
543 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, | 560 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, |
544 | IXGBE_FCRXCTRL_FCOELLI | | 561 | IXGBE_FCRXCTRL_FCOELLI | |
@@ -614,9 +631,9 @@ int ixgbe_fcoe_enable(struct net_device *netdev) | |||
614 | netdev->vlan_features |= NETIF_F_FSO; | 631 | netdev->vlan_features |= NETIF_F_FSO; |
615 | netdev->vlan_features |= NETIF_F_FCOE_MTU; | 632 | netdev->vlan_features |= NETIF_F_FCOE_MTU; |
616 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; | 633 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; |
617 | netdev_features_change(netdev); | ||
618 | 634 | ||
619 | ixgbe_init_interrupt_scheme(adapter); | 635 | ixgbe_init_interrupt_scheme(adapter); |
636 | netdev_features_change(netdev); | ||
620 | 637 | ||
621 | if (netif_running(netdev)) | 638 | if (netif_running(netdev)) |
622 | netdev->netdev_ops->ndo_open(netdev); | 639 | netdev->netdev_ops->ndo_open(netdev); |
@@ -660,11 +677,11 @@ int ixgbe_fcoe_disable(struct net_device *netdev) | |||
660 | netdev->vlan_features &= ~NETIF_F_FSO; | 677 | netdev->vlan_features &= ~NETIF_F_FSO; |
661 | netdev->vlan_features &= ~NETIF_F_FCOE_MTU; | 678 | netdev->vlan_features &= ~NETIF_F_FCOE_MTU; |
662 | netdev->fcoe_ddp_xid = 0; | 679 | netdev->fcoe_ddp_xid = 0; |
663 | netdev_features_change(netdev); | ||
664 | 680 | ||
665 | ixgbe_cleanup_fcoe(adapter); | 681 | ixgbe_cleanup_fcoe(adapter); |
666 | |||
667 | ixgbe_init_interrupt_scheme(adapter); | 682 | ixgbe_init_interrupt_scheme(adapter); |
683 | netdev_features_change(netdev); | ||
684 | |||
668 | if (netif_running(netdev)) | 685 | if (netif_running(netdev)) |
669 | netdev->netdev_ops->ndo_open(netdev); | 686 | netdev->netdev_ops->ndo_open(netdev); |
670 | rc = 0; | 687 | rc = 0; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 684af371462d..0c553f6cb534 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -935,10 +935,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
935 | if (skb->prev) | 935 | if (skb->prev) |
936 | skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); | 936 | skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); |
937 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | 937 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
938 | if (IXGBE_RSC_CB(skb)->dma) | 938 | if (IXGBE_RSC_CB(skb)->dma) { |
939 | pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, | 939 | pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, |
940 | rx_ring->rx_buf_len, | 940 | rx_ring->rx_buf_len, |
941 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
942 | IXGBE_RSC_CB(skb)->dma = 0; | ||
943 | } | ||
942 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) | 944 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) |
943 | rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; | 945 | rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; |
944 | else | 946 | else |
@@ -3054,6 +3056,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) | |||
3054 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | 3056 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
3055 | msleep(1); | 3057 | msleep(1); |
3056 | ixgbe_down(adapter); | 3058 | ixgbe_down(adapter); |
3059 | /* | ||
3060 | * If SR-IOV enabled then wait a bit before bringing the adapter | ||
3061 | * back up to give the VFs time to respond to the reset. The | ||
3062 | * two second wait is based upon the watchdog timer cycle in | ||
3063 | * the VF driver. | ||
3064 | */ | ||
3065 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
3066 | msleep(2000); | ||
3057 | ixgbe_up(adapter); | 3067 | ixgbe_up(adapter); |
3058 | clear_bit(__IXGBE_RESETTING, &adapter->state); | 3068 | clear_bit(__IXGBE_RESETTING, &adapter->state); |
3059 | } | 3069 | } |
@@ -3126,10 +3136,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3126 | rx_buffer_info->skb = NULL; | 3136 | rx_buffer_info->skb = NULL; |
3127 | do { | 3137 | do { |
3128 | struct sk_buff *this = skb; | 3138 | struct sk_buff *this = skb; |
3129 | if (IXGBE_RSC_CB(this)->dma) | 3139 | if (IXGBE_RSC_CB(this)->dma) { |
3130 | pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, | 3140 | pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, |
3131 | rx_ring->rx_buf_len, | 3141 | rx_ring->rx_buf_len, |
3132 | PCI_DMA_FROMDEVICE); | 3142 | PCI_DMA_FROMDEVICE); |
3143 | IXGBE_RSC_CB(this)->dma = 0; | ||
3144 | } | ||
3133 | skb = skb->prev; | 3145 | skb = skb->prev; |
3134 | dev_kfree_skb(this); | 3146 | dev_kfree_skb(this); |
3135 | } while (skb); | 3147 | } while (skb); |
@@ -3232,13 +3244,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3232 | 3244 | ||
3233 | /* disable receive for all VFs and wait one second */ | 3245 | /* disable receive for all VFs and wait one second */ |
3234 | if (adapter->num_vfs) { | 3246 | if (adapter->num_vfs) { |
3235 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
3236 | adapter->vfinfo[i].clear_to_send = 0; | ||
3237 | |||
3238 | /* ping all the active vfs to let them know we are going down */ | 3247 | /* ping all the active vfs to let them know we are going down */ |
3239 | ixgbe_ping_all_vfs(adapter); | 3248 | ixgbe_ping_all_vfs(adapter); |
3249 | |||
3240 | /* Disable all VFTE/VFRE TX/RX */ | 3250 | /* Disable all VFTE/VFRE TX/RX */ |
3241 | ixgbe_disable_tx_rx(adapter); | 3251 | ixgbe_disable_tx_rx(adapter); |
3252 | |||
3253 | /* Mark all the VFs as inactive */ | ||
3254 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
3255 | adapter->vfinfo[i].clear_to_send = 0; | ||
3242 | } | 3256 | } |
3243 | 3257 | ||
3244 | /* disable receives */ | 3258 | /* disable receives */ |
@@ -5018,6 +5032,7 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work) | |||
5018 | autoneg = hw->phy.autoneg_advertised; | 5032 | autoneg = hw->phy.autoneg_advertised; |
5019 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | 5033 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) |
5020 | hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); | 5034 | hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); |
5035 | hw->mac.autotry_restart = false; | ||
5021 | if (hw->mac.ops.setup_link) | 5036 | if (hw->mac.ops.setup_link) |
5022 | hw->mac.ops.setup_link(hw, autoneg, negotiation, true); | 5037 | hw->mac.ops.setup_link(hw, autoneg, negotiation, true); |
5023 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 5038 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
@@ -5633,7 +5648,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
5633 | 5648 | ||
5634 | #ifdef IXGBE_FCOE | 5649 | #ifdef IXGBE_FCOE |
5635 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5650 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
5636 | (skb->protocol == htons(ETH_P_FCOE))) { | 5651 | ((skb->protocol == htons(ETH_P_FCOE)) || |
5652 | (skb->protocol == htons(ETH_P_FIP)))) { | ||
5637 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | 5653 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); |
5638 | txq += adapter->ring_feature[RING_F_FCOE].mask; | 5654 | txq += adapter->ring_feature[RING_F_FCOE].mask; |
5639 | return txq; | 5655 | return txq; |
@@ -5680,18 +5696,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5680 | 5696 | ||
5681 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | 5697 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
5682 | 5698 | ||
5683 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | ||
5684 | (skb->protocol == htons(ETH_P_FCOE))) { | ||
5685 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
5686 | #ifdef IXGBE_FCOE | 5699 | #ifdef IXGBE_FCOE |
5700 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
5687 | #ifdef CONFIG_IXGBE_DCB | 5701 | #ifdef CONFIG_IXGBE_DCB |
5688 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK | 5702 | /* for FCoE with DCB, we force the priority to what |
5689 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | 5703 | * was specified by the switch */ |
5690 | tx_flags |= ((adapter->fcoe.up << 13) | 5704 | if ((skb->protocol == htons(ETH_P_FCOE)) || |
5691 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | 5705 | (skb->protocol == htons(ETH_P_FIP))) { |
5692 | #endif | 5706 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK |
5707 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
5708 | tx_flags |= ((adapter->fcoe.up << 13) | ||
5709 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
5710 | } | ||
5693 | #endif | 5711 | #endif |
5712 | /* flag for FCoE offloads */ | ||
5713 | if (skb->protocol == htons(ETH_P_FCOE)) | ||
5714 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
5694 | } | 5715 | } |
5716 | #endif | ||
5717 | |||
5695 | /* four things can cause us to need a context descriptor */ | 5718 | /* four things can cause us to need a context descriptor */ |
5696 | if (skb_is_gso(skb) || | 5719 | if (skb_is_gso(skb) || |
5697 | (skb->ip_summed == CHECKSUM_PARTIAL) || | 5720 | (skb->ip_summed == CHECKSUM_PARTIAL) || |
@@ -6046,7 +6069,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6046 | indices += min_t(unsigned int, num_possible_cpus(), | 6069 | indices += min_t(unsigned int, num_possible_cpus(), |
6047 | IXGBE_MAX_FCOE_INDICES); | 6070 | IXGBE_MAX_FCOE_INDICES); |
6048 | #endif | 6071 | #endif |
6049 | indices = min_t(unsigned int, indices, MAX_TX_QUEUES); | ||
6050 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); | 6072 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); |
6051 | if (!netdev) { | 6073 | if (!netdev) { |
6052 | err = -ENOMEM; | 6074 | err = -ENOMEM; |
@@ -6245,9 +6267,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6245 | case IXGBE_DEV_ID_82599_KX4: | 6267 | case IXGBE_DEV_ID_82599_KX4: |
6246 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | | 6268 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | |
6247 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); | 6269 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); |
6248 | /* Enable ACPI wakeup in GRC */ | ||
6249 | IXGBE_WRITE_REG(hw, IXGBE_GRC, | ||
6250 | (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME)); | ||
6251 | break; | 6270 | break; |
6252 | default: | 6271 | default: |
6253 | adapter->wol = 0; | 6272 | adapter->wol = 0; |
@@ -6380,6 +6399,16 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6380 | del_timer_sync(&adapter->sfp_timer); | 6399 | del_timer_sync(&adapter->sfp_timer); |
6381 | cancel_work_sync(&adapter->watchdog_task); | 6400 | cancel_work_sync(&adapter->watchdog_task); |
6382 | cancel_work_sync(&adapter->sfp_task); | 6401 | cancel_work_sync(&adapter->sfp_task); |
6402 | if (adapter->hw.phy.multispeed_fiber) { | ||
6403 | struct ixgbe_hw *hw = &adapter->hw; | ||
6404 | /* | ||
6405 | * Restart clause 37 autoneg, disable and re-enable | ||
6406 | * the tx laser, to clear & alert the link partner | ||
6407 | * that it needs to restart autotry | ||
6408 | */ | ||
6409 | hw->mac.autotry_restart = true; | ||
6410 | hw->mac.ops.flap_tx_laser(hw); | ||
6411 | } | ||
6383 | cancel_work_sync(&adapter->multispeed_fiber_task); | 6412 | cancel_work_sync(&adapter->multispeed_fiber_task); |
6384 | cancel_work_sync(&adapter->sfp_config_module_task); | 6413 | cancel_work_sync(&adapter->sfp_config_module_task); |
6385 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 6414 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 2be907466593..4ec6dc1a5b75 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -1298,6 +1298,7 @@ | |||
1298 | #define IXGBE_ETQF_FILTER_BCN 1 | 1298 | #define IXGBE_ETQF_FILTER_BCN 1 |
1299 | #define IXGBE_ETQF_FILTER_FCOE 2 | 1299 | #define IXGBE_ETQF_FILTER_FCOE 2 |
1300 | #define IXGBE_ETQF_FILTER_1588 3 | 1300 | #define IXGBE_ETQF_FILTER_1588 3 |
1301 | #define IXGBE_ETQF_FILTER_FIP 4 | ||
1301 | /* VLAN Control Bit Masks */ | 1302 | /* VLAN Control Bit Masks */ |
1302 | #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ | 1303 | #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ |
1303 | #define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ | 1304 | #define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ |
@@ -2397,6 +2398,7 @@ struct ixgbe_mac_operations { | |||
2397 | s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); | 2398 | s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); |
2398 | 2399 | ||
2399 | /* Link */ | 2400 | /* Link */ |
2401 | void (*flap_tx_laser)(struct ixgbe_hw *); | ||
2400 | s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); | 2402 | s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); |
2401 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); | 2403 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); |
2402 | s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, | 2404 | s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, |
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c index 399be0c34c36..6fdd651abcd1 100644 --- a/drivers/net/ixgbevf/ethtool.c +++ b/drivers/net/ixgbevf/ethtool.c | |||
@@ -46,22 +46,32 @@ struct ixgbe_stats { | |||
46 | int sizeof_stat; | 46 | int sizeof_stat; |
47 | int stat_offset; | 47 | int stat_offset; |
48 | int base_stat_offset; | 48 | int base_stat_offset; |
49 | int saved_reset_offset; | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | #define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \ | 52 | #define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ |
52 | offsetof(struct ixgbevf_adapter, m), \ | 53 | offsetof(struct ixgbevf_adapter, m), \ |
53 | offsetof(struct ixgbevf_adapter, b) | 54 | offsetof(struct ixgbevf_adapter, b), \ |
55 | offsetof(struct ixgbevf_adapter, r) | ||
54 | static struct ixgbe_stats ixgbe_gstrings_stats[] = { | 56 | static struct ixgbe_stats ixgbe_gstrings_stats[] = { |
55 | {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)}, | 57 | {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, |
56 | {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)}, | 58 | stats.saved_reset_vfgprc)}, |
57 | {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)}, | 59 | {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, |
58 | {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)}, | 60 | stats.saved_reset_vfgptc)}, |
59 | {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)}, | 61 | {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, |
60 | {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)}, | 62 | stats.saved_reset_vfgorc)}, |
61 | {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)}, | 63 | {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, |
62 | {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)}, | 64 | stats.saved_reset_vfgotc)}, |
63 | {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)}, | 65 | {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, |
64 | {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)}, | 66 | {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, |
67 | stats.saved_reset_vfmprc)}, | ||
68 | {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, | ||
69 | zero_base)}, | ||
70 | {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, | ||
71 | zero_base)}, | ||
72 | {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, | ||
73 | zero_base)}, | ||
74 | {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)}, | ||
65 | }; | 75 | }; |
66 | 76 | ||
67 | #define IXGBE_QUEUE_STATS_LEN 0 | 77 | #define IXGBE_QUEUE_STATS_LEN 0 |
@@ -455,10 +465,14 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, | |||
455 | ixgbe_gstrings_stats[i].stat_offset; | 465 | ixgbe_gstrings_stats[i].stat_offset; |
456 | char *b = (char *)adapter + | 466 | char *b = (char *)adapter + |
457 | ixgbe_gstrings_stats[i].base_stat_offset; | 467 | ixgbe_gstrings_stats[i].base_stat_offset; |
468 | char *r = (char *)adapter + | ||
469 | ixgbe_gstrings_stats[i].saved_reset_offset; | ||
458 | data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == | 470 | data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == |
459 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - | 471 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - |
460 | ((ixgbe_gstrings_stats[i].sizeof_stat == | 472 | ((ixgbe_gstrings_stats[i].sizeof_stat == |
461 | sizeof(u64)) ? *(u64 *)b : *(u32 *)b); | 473 | sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + |
474 | ((ixgbe_gstrings_stats[i].sizeof_stat == | ||
475 | sizeof(u64)) ? *(u64 *)r : *(u32 *)r); | ||
462 | } | 476 | } |
463 | } | 477 | } |
464 | 478 | ||
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index ca653c49b765..1bbbef3ee3f4 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c | |||
@@ -965,7 +965,7 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) | |||
965 | 965 | ||
966 | if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) | 966 | if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) |
967 | mod_timer(&adapter->watchdog_timer, | 967 | mod_timer(&adapter->watchdog_timer, |
968 | round_jiffies(jiffies + 10)); | 968 | round_jiffies(jiffies + 1)); |
969 | 969 | ||
970 | return IRQ_HANDLED; | 970 | return IRQ_HANDLED; |
971 | } | 971 | } |
@@ -1610,6 +1610,44 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, | |||
1610 | (adapter->rx_ring[rxr].count - 1)); | 1610 | (adapter->rx_ring[rxr].count - 1)); |
1611 | } | 1611 | } |
1612 | 1612 | ||
1613 | static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) | ||
1614 | { | ||
1615 | /* Only save pre-reset stats if there are some */ | ||
1616 | if (adapter->stats.vfgprc || adapter->stats.vfgptc) { | ||
1617 | adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - | ||
1618 | adapter->stats.base_vfgprc; | ||
1619 | adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - | ||
1620 | adapter->stats.base_vfgptc; | ||
1621 | adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - | ||
1622 | adapter->stats.base_vfgorc; | ||
1623 | adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - | ||
1624 | adapter->stats.base_vfgotc; | ||
1625 | adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - | ||
1626 | adapter->stats.base_vfmprc; | ||
1627 | } | ||
1628 | } | ||
1629 | |||
1630 | static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) | ||
1631 | { | ||
1632 | struct ixgbe_hw *hw = &adapter->hw; | ||
1633 | |||
1634 | adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); | ||
1635 | adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); | ||
1636 | adapter->stats.last_vfgorc |= | ||
1637 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); | ||
1638 | adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); | ||
1639 | adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); | ||
1640 | adapter->stats.last_vfgotc |= | ||
1641 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); | ||
1642 | adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); | ||
1643 | |||
1644 | adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; | ||
1645 | adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; | ||
1646 | adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; | ||
1647 | adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; | ||
1648 | adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; | ||
1649 | } | ||
1650 | |||
1613 | static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) | 1651 | static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) |
1614 | { | 1652 | { |
1615 | struct net_device *netdev = adapter->netdev; | 1653 | struct net_device *netdev = adapter->netdev; |
@@ -1656,6 +1694,9 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) | |||
1656 | /* enable transmits */ | 1694 | /* enable transmits */ |
1657 | netif_tx_start_all_queues(netdev); | 1695 | netif_tx_start_all_queues(netdev); |
1658 | 1696 | ||
1697 | ixgbevf_save_reset_stats(adapter); | ||
1698 | ixgbevf_init_last_counter_stats(adapter); | ||
1699 | |||
1659 | /* bring the link up in the watchdog, this could race with our first | 1700 | /* bring the link up in the watchdog, this could race with our first |
1660 | * link up interrupt but shouldn't be a problem */ | 1701 | * link up interrupt but shouldn't be a problem */ |
1661 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 1702 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
@@ -2228,27 +2269,6 @@ out: | |||
2228 | return err; | 2269 | return err; |
2229 | } | 2270 | } |
2230 | 2271 | ||
2231 | static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) | ||
2232 | { | ||
2233 | struct ixgbe_hw *hw = &adapter->hw; | ||
2234 | |||
2235 | adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); | ||
2236 | adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); | ||
2237 | adapter->stats.last_vfgorc |= | ||
2238 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); | ||
2239 | adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); | ||
2240 | adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); | ||
2241 | adapter->stats.last_vfgotc |= | ||
2242 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); | ||
2243 | adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); | ||
2244 | |||
2245 | adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; | ||
2246 | adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; | ||
2247 | adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; | ||
2248 | adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; | ||
2249 | adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; | ||
2250 | } | ||
2251 | |||
2252 | #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ | 2272 | #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ |
2253 | { \ | 2273 | { \ |
2254 | u32 current_counter = IXGBE_READ_REG(hw, reg); \ | 2274 | u32 current_counter = IXGBE_READ_REG(hw, reg); \ |
@@ -2399,7 +2419,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work) | |||
2399 | if (!netif_carrier_ok(netdev)) { | 2419 | if (!netif_carrier_ok(netdev)) { |
2400 | hw_dbg(&adapter->hw, "NIC Link is Up %s, ", | 2420 | hw_dbg(&adapter->hw, "NIC Link is Up %s, ", |
2401 | ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? | 2421 | ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? |
2402 | "10 Gbps" : "1 Gbps")); | 2422 | "10 Gbps\n" : "1 Gbps\n")); |
2403 | netif_carrier_on(netdev); | 2423 | netif_carrier_on(netdev); |
2404 | netif_tx_wake_all_queues(netdev); | 2424 | netif_tx_wake_all_queues(netdev); |
2405 | } else { | 2425 | } else { |
@@ -2416,9 +2436,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work) | |||
2416 | } | 2436 | } |
2417 | } | 2437 | } |
2418 | 2438 | ||
2419 | pf_has_reset: | ||
2420 | ixgbevf_update_stats(adapter); | 2439 | ixgbevf_update_stats(adapter); |
2421 | 2440 | ||
2441 | pf_has_reset: | ||
2422 | /* Force detection of hung controller every watchdog period */ | 2442 | /* Force detection of hung controller every watchdog period */ |
2423 | adapter->detect_tx_hung = true; | 2443 | adapter->detect_tx_hung = true; |
2424 | 2444 | ||
@@ -2675,7 +2695,7 @@ static int ixgbevf_open(struct net_device *netdev) | |||
2675 | if (hw->adapter_stopped) { | 2695 | if (hw->adapter_stopped) { |
2676 | err = IXGBE_ERR_MBX; | 2696 | err = IXGBE_ERR_MBX; |
2677 | printk(KERN_ERR "Unable to start - perhaps the PF" | 2697 | printk(KERN_ERR "Unable to start - perhaps the PF" |
2678 | "Driver isn't up yet\n"); | 2698 | " Driver isn't up yet\n"); |
2679 | goto err_setup_reset; | 2699 | goto err_setup_reset; |
2680 | } | 2700 | } |
2681 | } | 2701 | } |
@@ -2923,9 +2943,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, | |||
2923 | struct ixgbevf_tx_buffer *tx_buffer_info; | 2943 | struct ixgbevf_tx_buffer *tx_buffer_info; |
2924 | unsigned int len; | 2944 | unsigned int len; |
2925 | unsigned int total = skb->len; | 2945 | unsigned int total = skb->len; |
2926 | unsigned int offset = 0, size, count = 0, i; | 2946 | unsigned int offset = 0, size, count = 0; |
2927 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 2947 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
2928 | unsigned int f; | 2948 | unsigned int f; |
2949 | int i; | ||
2929 | 2950 | ||
2930 | i = tx_ring->next_to_use; | 2951 | i = tx_ring->next_to_use; |
2931 | 2952 | ||
@@ -3390,8 +3411,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3390 | /* setup the private structure */ | 3411 | /* setup the private structure */ |
3391 | err = ixgbevf_sw_init(adapter); | 3412 | err = ixgbevf_sw_init(adapter); |
3392 | 3413 | ||
3393 | ixgbevf_init_last_counter_stats(adapter); | ||
3394 | |||
3395 | #ifdef MAX_SKB_FRAGS | 3414 | #ifdef MAX_SKB_FRAGS |
3396 | netdev->features = NETIF_F_SG | | 3415 | netdev->features = NETIF_F_SG | |
3397 | NETIF_F_IP_CSUM | | 3416 | NETIF_F_IP_CSUM | |
@@ -3449,6 +3468,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3449 | 3468 | ||
3450 | adapter->netdev_registered = true; | 3469 | adapter->netdev_registered = true; |
3451 | 3470 | ||
3471 | ixgbevf_init_last_counter_stats(adapter); | ||
3472 | |||
3452 | /* print the MAC address */ | 3473 | /* print the MAC address */ |
3453 | hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", | 3474 | hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", |
3454 | netdev->dev_addr[0], | 3475 | netdev->dev_addr[0], |
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h index 799600e92700..1f31b052d4b4 100644 --- a/drivers/net/ixgbevf/vf.h +++ b/drivers/net/ixgbevf/vf.h | |||
@@ -157,6 +157,12 @@ struct ixgbevf_hw_stats { | |||
157 | u64 vfgorc; | 157 | u64 vfgorc; |
158 | u64 vfgotc; | 158 | u64 vfgotc; |
159 | u64 vfmprc; | 159 | u64 vfmprc; |
160 | |||
161 | u64 saved_reset_vfgprc; | ||
162 | u64 saved_reset_vfgptc; | ||
163 | u64 saved_reset_vfgorc; | ||
164 | u64 saved_reset_vfgotc; | ||
165 | u64 saved_reset_vfmprc; | ||
160 | }; | 166 | }; |
161 | 167 | ||
162 | struct ixgbevf_info { | 168 | struct ixgbevf_info { |
diff --git a/drivers/net/jme.c b/drivers/net/jme.c index 0f31497833df..c0b59a555384 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c | |||
@@ -946,6 +946,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) | |||
946 | jme->jme_vlan_rx(skb, jme->vlgrp, | 946 | jme->jme_vlan_rx(skb, jme->vlgrp, |
947 | le16_to_cpu(rxdesc->descwb.vlan)); | 947 | le16_to_cpu(rxdesc->descwb.vlan)); |
948 | NET_STAT(jme).rx_bytes += 4; | 948 | NET_STAT(jme).rx_bytes += 4; |
949 | } else { | ||
950 | dev_kfree_skb(skb); | ||
949 | } | 951 | } |
950 | } else { | 952 | } else { |
951 | jme->jme_rx(skb); | 953 | jme->jme_rx(skb); |
@@ -2081,12 +2083,45 @@ jme_tx_timeout(struct net_device *netdev) | |||
2081 | jme_reset_link(jme); | 2083 | jme_reset_link(jme); |
2082 | } | 2084 | } |
2083 | 2085 | ||
2086 | static inline void jme_pause_rx(struct jme_adapter *jme) | ||
2087 | { | ||
2088 | atomic_dec(&jme->link_changing); | ||
2089 | |||
2090 | jme_set_rx_pcc(jme, PCC_OFF); | ||
2091 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { | ||
2092 | JME_NAPI_DISABLE(jme); | ||
2093 | } else { | ||
2094 | tasklet_disable(&jme->rxclean_task); | ||
2095 | tasklet_disable(&jme->rxempty_task); | ||
2096 | } | ||
2097 | } | ||
2098 | |||
2099 | static inline void jme_resume_rx(struct jme_adapter *jme) | ||
2100 | { | ||
2101 | struct dynpcc_info *dpi = &(jme->dpi); | ||
2102 | |||
2103 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { | ||
2104 | JME_NAPI_ENABLE(jme); | ||
2105 | } else { | ||
2106 | tasklet_hi_enable(&jme->rxclean_task); | ||
2107 | tasklet_hi_enable(&jme->rxempty_task); | ||
2108 | } | ||
2109 | dpi->cur = PCC_P1; | ||
2110 | dpi->attempt = PCC_P1; | ||
2111 | dpi->cnt = 0; | ||
2112 | jme_set_rx_pcc(jme, PCC_P1); | ||
2113 | |||
2114 | atomic_inc(&jme->link_changing); | ||
2115 | } | ||
2116 | |||
2084 | static void | 2117 | static void |
2085 | jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | 2118 | jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) |
2086 | { | 2119 | { |
2087 | struct jme_adapter *jme = netdev_priv(netdev); | 2120 | struct jme_adapter *jme = netdev_priv(netdev); |
2088 | 2121 | ||
2122 | jme_pause_rx(jme); | ||
2089 | jme->vlgrp = grp; | 2123 | jme->vlgrp = grp; |
2124 | jme_resume_rx(jme); | ||
2090 | } | 2125 | } |
2091 | 2126 | ||
2092 | static void | 2127 | static void |
diff --git a/drivers/net/jme.h b/drivers/net/jme.h index c19db9146a2f..07ad3a457185 100644 --- a/drivers/net/jme.h +++ b/drivers/net/jme.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #define __JME_H_INCLUDED__ | 25 | #define __JME_H_INCLUDED__ |
26 | 26 | ||
27 | #define DRV_NAME "jme" | 27 | #define DRV_NAME "jme" |
28 | #define DRV_VERSION "1.0.5" | 28 | #define DRV_VERSION "1.0.6" |
29 | #define PFX DRV_NAME ": " | 29 | #define PFX DRV_NAME ": " |
30 | 30 | ||
31 | #define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 | 31 | #define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index 0573e0bb4444..13cc1ca261d9 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c | |||
@@ -976,7 +976,6 @@ static void ks8851_set_rx_mode(struct net_device *dev) | |||
976 | crc >>= (32 - 6); /* get top six bits */ | 976 | crc >>= (32 - 6); /* get top six bits */ |
977 | 977 | ||
978 | rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); | 978 | rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); |
979 | mcptr = mcptr->next; | ||
980 | } | 979 | } |
981 | 980 | ||
982 | rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; | 981 | rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; |
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 0f59099ee72f..6c5327af1bf9 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c | |||
@@ -6322,7 +6322,7 @@ static int netdev_set_eeprom(struct net_device *dev, | |||
6322 | int len; | 6322 | int len; |
6323 | 6323 | ||
6324 | if (eeprom->magic != EEPROM_MAGIC) | 6324 | if (eeprom->magic != EEPROM_MAGIC) |
6325 | return 1; | 6325 | return -EINVAL; |
6326 | 6326 | ||
6327 | len = (eeprom->offset + eeprom->len + 1) / 2; | 6327 | len = (eeprom->offset + eeprom->len + 1) / 2; |
6328 | for (i = eeprom->offset / 2; i < len; i++) | 6328 | for (i = eeprom->offset / 2; i < len; i++) |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 144d2e880422..0f703838e21a 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -53,8 +53,8 @@ | |||
53 | 53 | ||
54 | #define _NETXEN_NIC_LINUX_MAJOR 4 | 54 | #define _NETXEN_NIC_LINUX_MAJOR 4 |
55 | #define _NETXEN_NIC_LINUX_MINOR 0 | 55 | #define _NETXEN_NIC_LINUX_MINOR 0 |
56 | #define _NETXEN_NIC_LINUX_SUBVERSION 72 | 56 | #define _NETXEN_NIC_LINUX_SUBVERSION 73 |
57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.72" | 57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.73" |
58 | 58 | ||
59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) | 59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) |
60 | #define _major(v) (((v) >> 24) & 0xff) | 60 | #define _major(v) (((v) >> 24) & 0xff) |
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 2a8ef5fc9663..f26e54716c88 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c | |||
@@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
669 | } | 669 | } |
670 | sds_ring->desc_head = (struct status_desc *)addr; | 670 | sds_ring->desc_head = (struct status_desc *)addr; |
671 | 671 | ||
672 | sds_ring->crb_sts_consumer = | 672 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { |
673 | netxen_get_ioaddr(adapter, | 673 | sds_ring->crb_sts_consumer = |
674 | recv_crb_registers[port].crb_sts_consumer[ring]); | 674 | netxen_get_ioaddr(adapter, |
675 | recv_crb_registers[port].crb_sts_consumer[ring]); | ||
675 | 676 | ||
676 | sds_ring->crb_intr_mask = | 677 | sds_ring->crb_intr_mask = |
677 | netxen_get_ioaddr(adapter, | 678 | netxen_get_ioaddr(adapter, |
678 | recv_crb_registers[port].sw_int_mask[ring]); | 679 | recv_crb_registers[port].sw_int_mask[ring]); |
680 | } | ||
679 | } | 681 | } |
680 | 682 | ||
681 | 683 | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1c63610ead42..7eb925a9f36e 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -761,7 +761,7 @@ nx_get_bios_version(struct netxen_adapter *adapter) | |||
761 | if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { | 761 | if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { |
762 | bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) | 762 | bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) |
763 | + NX_UNI_BIOS_VERSION_OFF)); | 763 | + NX_UNI_BIOS_VERSION_OFF)); |
764 | return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + | 764 | return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + |
765 | (bios_ver >> 24); | 765 | (bios_ver >> 24); |
766 | } else | 766 | } else |
767 | return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); | 767 | return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 08780ef1c1f8..01808b28d1b6 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -604,16 +604,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter) | |||
604 | static int | 604 | static int |
605 | netxen_setup_pci_map(struct netxen_adapter *adapter) | 605 | netxen_setup_pci_map(struct netxen_adapter *adapter) |
606 | { | 606 | { |
607 | void __iomem *mem_ptr0 = NULL; | ||
608 | void __iomem *mem_ptr1 = NULL; | ||
609 | void __iomem *mem_ptr2 = NULL; | ||
610 | void __iomem *db_ptr = NULL; | 607 | void __iomem *db_ptr = NULL; |
611 | 608 | ||
612 | resource_size_t mem_base, db_base; | 609 | resource_size_t mem_base, db_base; |
613 | unsigned long mem_len, db_len = 0, pci_len0 = 0; | 610 | unsigned long mem_len, db_len = 0; |
614 | 611 | ||
615 | struct pci_dev *pdev = adapter->pdev; | 612 | struct pci_dev *pdev = adapter->pdev; |
616 | int pci_func = adapter->ahw.pci_func; | 613 | int pci_func = adapter->ahw.pci_func; |
614 | struct netxen_hardware_context *ahw = &adapter->ahw; | ||
617 | 615 | ||
618 | int err = 0; | 616 | int err = 0; |
619 | 617 | ||
@@ -630,24 +628,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) | |||
630 | 628 | ||
631 | /* 128 Meg of memory */ | 629 | /* 128 Meg of memory */ |
632 | if (mem_len == NETXEN_PCI_128MB_SIZE) { | 630 | if (mem_len == NETXEN_PCI_128MB_SIZE) { |
633 | mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); | 631 | |
634 | mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, | 632 | ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); |
633 | ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, | ||
635 | SECOND_PAGE_GROUP_SIZE); | 634 | SECOND_PAGE_GROUP_SIZE); |
636 | mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, | 635 | ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, |
637 | THIRD_PAGE_GROUP_SIZE); | 636 | THIRD_PAGE_GROUP_SIZE); |
638 | pci_len0 = FIRST_PAGE_GROUP_SIZE; | 637 | if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || |
638 | ahw->pci_base2 == NULL) { | ||
639 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); | ||
640 | err = -EIO; | ||
641 | goto err_out; | ||
642 | } | ||
643 | |||
644 | ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; | ||
645 | |||
639 | } else if (mem_len == NETXEN_PCI_32MB_SIZE) { | 646 | } else if (mem_len == NETXEN_PCI_32MB_SIZE) { |
640 | mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); | 647 | |
641 | mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - | 648 | ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); |
649 | ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - | ||
642 | SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); | 650 | SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); |
651 | if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { | ||
652 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); | ||
653 | err = -EIO; | ||
654 | goto err_out; | ||
655 | } | ||
656 | |||
643 | } else if (mem_len == NETXEN_PCI_2MB_SIZE) { | 657 | } else if (mem_len == NETXEN_PCI_2MB_SIZE) { |
644 | 658 | ||
645 | mem_ptr0 = pci_ioremap_bar(pdev, 0); | 659 | ahw->pci_base0 = pci_ioremap_bar(pdev, 0); |
646 | if (mem_ptr0 == NULL) { | 660 | if (ahw->pci_base0 == NULL) { |
647 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); | 661 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); |
648 | return -EIO; | 662 | return -EIO; |
649 | } | 663 | } |
650 | pci_len0 = mem_len; | 664 | ahw->pci_len0 = mem_len; |
651 | } else { | 665 | } else { |
652 | return -EIO; | 666 | return -EIO; |
653 | } | 667 | } |
@@ -656,11 +670,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) | |||
656 | 670 | ||
657 | dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); | 671 | dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); |
658 | 672 | ||
659 | adapter->ahw.pci_base0 = mem_ptr0; | ||
660 | adapter->ahw.pci_len0 = pci_len0; | ||
661 | adapter->ahw.pci_base1 = mem_ptr1; | ||
662 | adapter->ahw.pci_base2 = mem_ptr2; | ||
663 | |||
664 | if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { | 673 | if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { |
665 | adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, | 674 | adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, |
666 | NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); | 675 | NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); |
@@ -1246,8 +1255,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1246 | int pci_func_id = PCI_FUNC(pdev->devfn); | 1255 | int pci_func_id = PCI_FUNC(pdev->devfn); |
1247 | uint8_t revision_id; | 1256 | uint8_t revision_id; |
1248 | 1257 | ||
1249 | if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { | 1258 | if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { |
1250 | pr_warning("%s: chip revisions between 0x%x-0x%x" | 1259 | pr_warning("%s: chip revisions between 0x%x-0x%x " |
1251 | "will not be enabled.\n", | 1260 | "will not be enabled.\n", |
1252 | module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); | 1261 | module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); |
1253 | return -ENODEV; | 1262 | return -ENODEV; |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 776cad2f5715..1028fcb91a28 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -1549,6 +1549,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1549 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), | 1549 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), |
1550 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), | 1550 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), |
1551 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), | 1551 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), |
1552 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), | ||
1552 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), | 1553 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), |
1553 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), | 1554 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), |
1554 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), | 1555 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), |
@@ -1740,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1740 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), | 1741 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), |
1741 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), | 1742 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), |
1742 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), | 1743 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), |
1743 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), | 1744 | PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), |
1744 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), | 1745 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), |
1745 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), | 1746 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), |
1746 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), | 1747 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 9d3ebf3e975e..96740051cdcc 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -186,8 +186,13 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { | |||
186 | 186 | ||
187 | MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); | 187 | MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); |
188 | 188 | ||
189 | static int rx_copybreak = 200; | 189 | /* |
190 | static int use_dac = -1; | 190 | * we set our copybreak very high so that we don't have |
191 | * to allocate 16k frames all the time (see note in | ||
192 | * rtl8169_open() | ||
193 | */ | ||
194 | static int rx_copybreak = 16383; | ||
195 | static int use_dac; | ||
191 | static struct { | 196 | static struct { |
192 | u32 msg_enable; | 197 | u32 msg_enable; |
193 | } debug = { -1 }; | 198 | } debug = { -1 }; |
@@ -511,8 +516,7 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); | |||
511 | module_param(rx_copybreak, int, 0); | 516 | module_param(rx_copybreak, int, 0); |
512 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | 517 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); |
513 | module_param(use_dac, int, 0); | 518 | module_param(use_dac, int, 0); |
514 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only." | 519 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); |
515 | " Unsafe on 32 bit PCI slot."); | ||
516 | module_param_named(debug, debug.msg_enable, int, 0); | 520 | module_param_named(debug, debug.msg_enable, int, 0); |
517 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); | 521 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); |
518 | MODULE_LICENSE("GPL"); | 522 | MODULE_LICENSE("GPL"); |
@@ -2821,8 +2825,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | |||
2821 | spin_lock_irq(&tp->lock); | 2825 | spin_lock_irq(&tp->lock); |
2822 | 2826 | ||
2823 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 2827 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
2824 | RTL_W32(MAC0, low); | ||
2825 | RTL_W32(MAC4, high); | 2828 | RTL_W32(MAC4, high); |
2829 | RTL_W32(MAC0, low); | ||
2826 | RTL_W8(Cfg9346, Cfg9346_Lock); | 2830 | RTL_W8(Cfg9346, Cfg9346_Lock); |
2827 | 2831 | ||
2828 | spin_unlock_irq(&tp->lock); | 2832 | spin_unlock_irq(&tp->lock); |
@@ -2974,7 +2978,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2974 | void __iomem *ioaddr; | 2978 | void __iomem *ioaddr; |
2975 | unsigned int i; | 2979 | unsigned int i; |
2976 | int rc; | 2980 | int rc; |
2977 | int this_use_dac = use_dac; | ||
2978 | 2981 | ||
2979 | if (netif_msg_drv(&debug)) { | 2982 | if (netif_msg_drv(&debug)) { |
2980 | printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", | 2983 | printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", |
@@ -3040,17 +3043,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3040 | 3043 | ||
3041 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3044 | tp->cp_cmd = PCIMulRW | RxChkSum; |
3042 | 3045 | ||
3043 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
3044 | if (!tp->pcie_cap) | ||
3045 | netif_info(tp, probe, dev, "no PCI Express capability\n"); | ||
3046 | |||
3047 | if (this_use_dac < 0) | ||
3048 | this_use_dac = tp->pcie_cap != 0; | ||
3049 | |||
3050 | if ((sizeof(dma_addr_t) > 4) && | 3046 | if ((sizeof(dma_addr_t) > 4) && |
3051 | this_use_dac && | 3047 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
3052 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
3053 | netif_info(tp, probe, dev, "using 64-bit DMA\n"); | ||
3054 | tp->cp_cmd |= PCIDAC; | 3048 | tp->cp_cmd |= PCIDAC; |
3055 | dev->features |= NETIF_F_HIGHDMA; | 3049 | dev->features |= NETIF_F_HIGHDMA; |
3056 | } else { | 3050 | } else { |
@@ -3069,6 +3063,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3069 | goto err_out_free_res_4; | 3063 | goto err_out_free_res_4; |
3070 | } | 3064 | } |
3071 | 3065 | ||
3066 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
3067 | if (!tp->pcie_cap) | ||
3068 | netif_info(tp, probe, dev, "no PCI Express capability\n"); | ||
3069 | |||
3072 | RTL_W16(IntrMask, 0x0000); | 3070 | RTL_W16(IntrMask, 0x0000); |
3073 | 3071 | ||
3074 | /* Soft reset the chip. */ | 3072 | /* Soft reset the chip. */ |
@@ -3224,9 +3222,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
3224 | } | 3222 | } |
3225 | 3223 | ||
3226 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, | 3224 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, |
3227 | struct net_device *dev) | 3225 | unsigned int mtu) |
3228 | { | 3226 | { |
3229 | unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; | 3227 | unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; |
3228 | |||
3229 | if (max_frame != 16383) | ||
3230 | printk(KERN_WARNING "WARNING! Changing of MTU on this NIC" | ||
3231 | "May lead to frame reception errors!\n"); | ||
3230 | 3232 | ||
3231 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; | 3233 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; |
3232 | } | 3234 | } |
@@ -3238,7 +3240,17 @@ static int rtl8169_open(struct net_device *dev) | |||
3238 | int retval = -ENOMEM; | 3240 | int retval = -ENOMEM; |
3239 | 3241 | ||
3240 | 3242 | ||
3241 | rtl8169_set_rxbufsize(tp, dev); | 3243 | /* |
3244 | * Note that we use a magic value here, its wierd I know | ||
3245 | * its done because, some subset of rtl8169 hardware suffers from | ||
3246 | * a problem in which frames received that are longer than | ||
3247 | * the size set in RxMaxSize register return garbage sizes | ||
3248 | * when received. To avoid this we need to turn off filtering, | ||
3249 | * which is done by setting a value of 16383 in the RxMaxSize register | ||
3250 | * and allocating 16k frames to handle the largest possible rx value | ||
3251 | * thats what the magic math below does. | ||
3252 | */ | ||
3253 | rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN); | ||
3242 | 3254 | ||
3243 | /* | 3255 | /* |
3244 | * Rx and Tx desscriptors needs 256 bytes alignment. | 3256 | * Rx and Tx desscriptors needs 256 bytes alignment. |
@@ -3891,7 +3903,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) | |||
3891 | 3903 | ||
3892 | rtl8169_down(dev); | 3904 | rtl8169_down(dev); |
3893 | 3905 | ||
3894 | rtl8169_set_rxbufsize(tp, dev); | 3906 | rtl8169_set_rxbufsize(tp, dev->mtu); |
3895 | 3907 | ||
3896 | ret = rtl8169_init_ring(dev); | 3908 | ret = rtl8169_init_ring(dev); |
3897 | if (ret < 0) | 3909 | if (ret < 0) |
@@ -4754,8 +4766,8 @@ static void rtl_set_rx_mode(struct net_device *dev) | |||
4754 | mc_filter[1] = swab32(data); | 4766 | mc_filter[1] = swab32(data); |
4755 | } | 4767 | } |
4756 | 4768 | ||
4757 | RTL_W32(MAR0 + 0, mc_filter[0]); | ||
4758 | RTL_W32(MAR0 + 4, mc_filter[1]); | 4769 | RTL_W32(MAR0 + 4, mc_filter[1]); |
4770 | RTL_W32(MAR0 + 0, mc_filter[0]); | ||
4759 | 4771 | ||
4760 | RTL_W32(RxConfig, tmp); | 4772 | RTL_W32(RxConfig, tmp); |
4761 | 4773 | ||
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 0ab05af237e5..a4f09d490531 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -851,13 +851,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info | |||
851 | 851 | ||
852 | if ( !(rdes0 & 0x8000) || | 852 | if ( !(rdes0 & 0x8000) || |
853 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { | 853 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { |
854 | struct sk_buff *new_skb = NULL; | ||
855 | |||
854 | skb = rxptr->rx_skb_ptr; | 856 | skb = rxptr->rx_skb_ptr; |
855 | 857 | ||
856 | /* Good packet, send to upper layer */ | 858 | /* Good packet, send to upper layer */ |
857 | /* Shorst packet used new SKB */ | 859 | /* Shorst packet used new SKB */ |
858 | if ( (rxlen < RX_COPY_SIZE) && | 860 | if ((rxlen < RX_COPY_SIZE) && |
859 | ( (skb = dev_alloc_skb(rxlen + 2) ) | 861 | (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) { |
860 | != NULL) ) { | 862 | skb = new_skb; |
861 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 863 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
862 | skb_reserve(skb, 2); /* 16byte align */ | 864 | skb_reserve(skb, 2); /* 16byte align */ |
863 | memcpy(skb_put(skb, rxlen), | 865 | memcpy(skb_put(skb, rxlen), |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d222d7e25273..73f9a31cf94d 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -1189,9 +1189,21 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, | |||
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | if (csum) { | 1191 | if (csum) { |
1192 | u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); | 1192 | if (skb->len <= 45) { |
1193 | skb_push(skb, 4); | 1193 | /* workaround - hardware tx checksum does not work |
1194 | memcpy(skb->data, &csum_preamble, 4); | 1194 | * properly with extremely small packets */ |
1195 | long csstart = skb->csum_start - skb_headroom(skb); | ||
1196 | __wsum calc = csum_partial(skb->data + csstart, | ||
1197 | skb->len - csstart, 0); | ||
1198 | *((__sum16 *)(skb->data + csstart | ||
1199 | + skb->csum_offset)) = csum_fold(calc); | ||
1200 | |||
1201 | csum = false; | ||
1202 | } else { | ||
1203 | u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); | ||
1204 | skb_push(skb, 4); | ||
1205 | memcpy(skb->data, &csum_preamble, 4); | ||
1206 | } | ||
1195 | } | 1207 | } |
1196 | 1208 | ||
1197 | skb_push(skb, 4); | 1209 | skb_push(skb, 4); |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 3a486f3bad3d..bc278d4ee89d 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -812,7 +812,7 @@ static void set_mii_flow_control(struct velocity_info *vptr) | |||
812 | 812 | ||
813 | case FLOW_CNTL_TX_RX: | 813 | case FLOW_CNTL_TX_RX: |
814 | MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); | 814 | MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); |
815 | MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); | 815 | MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); |
816 | break; | 816 | break; |
817 | 817 | ||
818 | case FLOW_CNTL_DISABLE: | 818 | case FLOW_CNTL_DISABLE: |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index b2c8207f7bc1..294b486bc3ed 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1353,25 +1353,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) | |||
1353 | return htype; | 1353 | return htype; |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | static bool is_pae(struct sk_buff *skb) | ||
1357 | { | ||
1358 | struct ieee80211_hdr *hdr; | ||
1359 | __le16 fc; | ||
1360 | |||
1361 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1362 | fc = hdr->frame_control; | ||
1363 | |||
1364 | if (ieee80211_is_data(fc)) { | ||
1365 | if (ieee80211_is_nullfunc(fc) || | ||
1366 | /* Port Access Entity (IEEE 802.1X) */ | ||
1367 | (skb->protocol == cpu_to_be16(ETH_P_PAE))) { | ||
1368 | return true; | ||
1369 | } | ||
1370 | } | ||
1371 | |||
1372 | return false; | ||
1373 | } | ||
1374 | |||
1375 | static int get_hw_crypto_keytype(struct sk_buff *skb) | 1356 | static int get_hw_crypto_keytype(struct sk_buff *skb) |
1376 | { | 1357 | { |
1377 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 1358 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
@@ -1696,7 +1677,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1696 | goto tx_done; | 1677 | goto tx_done; |
1697 | } | 1678 | } |
1698 | 1679 | ||
1699 | if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { | 1680 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { |
1700 | /* | 1681 | /* |
1701 | * Try aggregation if it's a unicast data frame | 1682 | * Try aggregation if it's a unicast data frame |
1702 | * and the destination is HT capable. | 1683 | * and the destination is HT capable. |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 1ed5206721ec..8c12311dbb0a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -124,7 +124,7 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv, | |||
124 | if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) | 124 | if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) |
125 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 125 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; |
126 | else { | 126 | else { |
127 | IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n", | 127 | IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", |
128 | priv->stations[sta_id].tid[tid].tfds_in_queue, | 128 | priv->stations[sta_id].tid[tid].tfds_in_queue, |
129 | freed); | 129 | freed); |
130 | priv->stations[sta_id].tid[tid].tfds_in_queue = 0; | 130 | priv->stations[sta_id].tid[tid].tfds_in_queue = 0; |
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c index 0ccba57fb9fb..05e4d68eb4cc 100644 --- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c | |||
@@ -466,7 +466,8 @@ out: | |||
466 | 466 | ||
467 | void wl1251_debugfs_reset(struct wl1251 *wl) | 467 | void wl1251_debugfs_reset(struct wl1251 *wl) |
468 | { | 468 | { |
469 | memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); | 469 | if (wl->stats.fw_stats != NULL) |
470 | memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); | ||
470 | wl->stats.retry_count = 0; | 471 | wl->stats.retry_count = 0; |
471 | wl->stats.excessive_retries = 0; | 472 | wl->stats.excessive_retries = 0; |
472 | } | 473 | } |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 40b48f569b1e..9665d6b17a2a 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -832,9 +832,8 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
832 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 832 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
833 | if (!pci_resource_len(pdev, i)) | 833 | if (!pci_resource_len(pdev, i)) |
834 | continue; | 834 | continue; |
835 | ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n", | 835 | ctrl_info(ctrl, " PCI resource [%d] : %pR\n", |
836 | i, (unsigned long long)pci_resource_len(pdev, i), | 836 | i, &pdev->resource[i]); |
837 | (unsigned long long)pci_resource_start(pdev, i)); | ||
838 | } | 837 | } |
839 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); | 838 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); |
840 | ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); | 839 | ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); |
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 3e0d7b5dd1b9..fb9fdf4a42bf 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c | |||
@@ -31,9 +31,9 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
31 | acpi_status status; | 31 | acpi_status status; |
32 | unsigned long long gsb; | 32 | unsigned long long gsb; |
33 | struct ioapic *ioapic; | 33 | struct ioapic *ioapic; |
34 | u64 addr; | ||
35 | int ret; | 34 | int ret; |
36 | char *type; | 35 | char *type; |
36 | struct resource *res; | ||
37 | 37 | ||
38 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 38 | handle = DEVICE_ACPI_HANDLE(&dev->dev); |
39 | if (!handle) | 39 | if (!handle) |
@@ -69,13 +69,12 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
69 | if (pci_request_region(dev, 0, type)) | 69 | if (pci_request_region(dev, 0, type)) |
70 | goto exit_disable; | 70 | goto exit_disable; |
71 | 71 | ||
72 | addr = pci_resource_start(dev, 0); | 72 | res = &dev->resource[0]; |
73 | if (acpi_register_ioapic(ioapic->handle, addr, ioapic->gsi_base)) | 73 | if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base)) |
74 | goto exit_release; | 74 | goto exit_release; |
75 | 75 | ||
76 | pci_set_drvdata(dev, ioapic); | 76 | pci_set_drvdata(dev, ioapic); |
77 | dev_info(&dev->dev, "%s at %#llx, GSI %u\n", type, addr, | 77 | dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base); |
78 | ioapic->gsi_base); | ||
79 | return 0; | 78 | return 0; |
80 | 79 | ||
81 | exit_release: | 80 | exit_release: |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index cb1dd5f4988c..1531f3a49879 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -2576,18 +2576,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function); | |||
2576 | */ | 2576 | */ |
2577 | int pcix_get_max_mmrbc(struct pci_dev *dev) | 2577 | int pcix_get_max_mmrbc(struct pci_dev *dev) |
2578 | { | 2578 | { |
2579 | int err, cap; | 2579 | int cap; |
2580 | u32 stat; | 2580 | u32 stat; |
2581 | 2581 | ||
2582 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2582 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2583 | if (!cap) | 2583 | if (!cap) |
2584 | return -EINVAL; | 2584 | return -EINVAL; |
2585 | 2585 | ||
2586 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2586 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2587 | if (err) | ||
2588 | return -EINVAL; | 2587 | return -EINVAL; |
2589 | 2588 | ||
2590 | return (stat & PCI_X_STATUS_MAX_READ) >> 12; | 2589 | return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); |
2591 | } | 2590 | } |
2592 | EXPORT_SYMBOL(pcix_get_max_mmrbc); | 2591 | EXPORT_SYMBOL(pcix_get_max_mmrbc); |
2593 | 2592 | ||
@@ -2600,18 +2599,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); | |||
2600 | */ | 2599 | */ |
2601 | int pcix_get_mmrbc(struct pci_dev *dev) | 2600 | int pcix_get_mmrbc(struct pci_dev *dev) |
2602 | { | 2601 | { |
2603 | int ret, cap; | 2602 | int cap; |
2604 | u32 cmd; | 2603 | u16 cmd; |
2605 | 2604 | ||
2606 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2605 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2607 | if (!cap) | 2606 | if (!cap) |
2608 | return -EINVAL; | 2607 | return -EINVAL; |
2609 | 2608 | ||
2610 | ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2609 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2611 | if (!ret) | 2610 | return -EINVAL; |
2612 | ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); | ||
2613 | 2611 | ||
2614 | return ret; | 2612 | return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); |
2615 | } | 2613 | } |
2616 | EXPORT_SYMBOL(pcix_get_mmrbc); | 2614 | EXPORT_SYMBOL(pcix_get_mmrbc); |
2617 | 2615 | ||
@@ -2626,28 +2624,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc); | |||
2626 | */ | 2624 | */ |
2627 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | 2625 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) |
2628 | { | 2626 | { |
2629 | int cap, err = -EINVAL; | 2627 | int cap; |
2630 | u32 stat, cmd, v, o; | 2628 | u32 stat, v, o; |
2629 | u16 cmd; | ||
2631 | 2630 | ||
2632 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) | 2631 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) |
2633 | goto out; | 2632 | return -EINVAL; |
2634 | 2633 | ||
2635 | v = ffs(mmrbc) - 10; | 2634 | v = ffs(mmrbc) - 10; |
2636 | 2635 | ||
2637 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2636 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2638 | if (!cap) | 2637 | if (!cap) |
2639 | goto out; | 2638 | return -EINVAL; |
2640 | 2639 | ||
2641 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2640 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2642 | if (err) | 2641 | return -EINVAL; |
2643 | goto out; | ||
2644 | 2642 | ||
2645 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) | 2643 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) |
2646 | return -E2BIG; | 2644 | return -E2BIG; |
2647 | 2645 | ||
2648 | err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2646 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2649 | if (err) | 2647 | return -EINVAL; |
2650 | goto out; | ||
2651 | 2648 | ||
2652 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; | 2649 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; |
2653 | if (o != v) { | 2650 | if (o != v) { |
@@ -2657,10 +2654,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | |||
2657 | 2654 | ||
2658 | cmd &= ~PCI_X_CMD_MAX_READ; | 2655 | cmd &= ~PCI_X_CMD_MAX_READ; |
2659 | cmd |= v << 2; | 2656 | cmd |= v << 2; |
2660 | err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); | 2657 | if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) |
2658 | return -EIO; | ||
2661 | } | 2659 | } |
2662 | out: | 2660 | return 0; |
2663 | return err; | ||
2664 | } | 2661 | } |
2665 | EXPORT_SYMBOL(pcix_set_mmrbc); | 2662 | EXPORT_SYMBOL(pcix_set_mmrbc); |
2666 | 2663 | ||
@@ -3023,7 +3020,6 @@ EXPORT_SYMBOL(pcim_pin_device); | |||
3023 | EXPORT_SYMBOL(pci_disable_device); | 3020 | EXPORT_SYMBOL(pci_disable_device); |
3024 | EXPORT_SYMBOL(pci_find_capability); | 3021 | EXPORT_SYMBOL(pci_find_capability); |
3025 | EXPORT_SYMBOL(pci_bus_find_capability); | 3022 | EXPORT_SYMBOL(pci_bus_find_capability); |
3026 | EXPORT_SYMBOL(pci_register_set_vga_state); | ||
3027 | EXPORT_SYMBOL(pci_release_regions); | 3023 | EXPORT_SYMBOL(pci_release_regions); |
3028 | EXPORT_SYMBOL(pci_request_regions); | 3024 | EXPORT_SYMBOL(pci_request_regions); |
3029 | EXPORT_SYMBOL(pci_request_regions_exclusive); | 3025 | EXPORT_SYMBOL(pci_request_regions_exclusive); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2a943090a3b7..882bd8d29fe3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -174,14 +174,19 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
174 | pci_read_config_dword(dev, pos, &sz); | 174 | pci_read_config_dword(dev, pos, &sz); |
175 | pci_write_config_dword(dev, pos, l); | 175 | pci_write_config_dword(dev, pos, l); |
176 | 176 | ||
177 | if (!sz) | ||
178 | goto fail; /* BAR not implemented */ | ||
179 | |||
177 | /* | 180 | /* |
178 | * All bits set in sz means the device isn't working properly. | 181 | * All bits set in sz means the device isn't working properly. |
179 | * If the BAR isn't implemented, all bits must be 0. If it's a | 182 | * If it's a memory BAR or a ROM, bit 0 must be clear; if it's |
180 | * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit | 183 | * an io BAR, bit 1 must be clear. |
181 | * 1 must be clear. | ||
182 | */ | 184 | */ |
183 | if (!sz || sz == 0xffffffff) | 185 | if (sz == 0xffffffff) { |
186 | dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n", | ||
187 | pos, sz); | ||
184 | goto fail; | 188 | goto fail; |
189 | } | ||
185 | 190 | ||
186 | /* | 191 | /* |
187 | * I don't know how l can have all bits set. Copied from old code. | 192 | * I don't know how l can have all bits set. Copied from old code. |
@@ -244,13 +249,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
244 | pos, res); | 249 | pos, res); |
245 | } | 250 | } |
246 | } else { | 251 | } else { |
247 | sz = pci_size(l, sz, mask); | 252 | u32 size = pci_size(l, sz, mask); |
248 | 253 | ||
249 | if (!sz) | 254 | if (!size) { |
255 | dev_err(&dev->dev, "reg %x: invalid size " | ||
256 | "(l %#x sz %#x mask %#x); broken device?", | ||
257 | pos, l, sz, mask); | ||
250 | goto fail; | 258 | goto fail; |
259 | } | ||
251 | 260 | ||
252 | res->start = l; | 261 | res->start = l; |
253 | res->end = l + sz; | 262 | res->end = l + size; |
254 | 263 | ||
255 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); | 264 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); |
256 | } | 265 | } |
@@ -312,7 +321,7 @@ static void __devinit pci_read_bridge_io(struct pci_bus *child) | |||
312 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 321 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
313 | } else { | 322 | } else { |
314 | dev_printk(KERN_DEBUG, &dev->dev, | 323 | dev_printk(KERN_DEBUG, &dev->dev, |
315 | " bridge window [io %04lx - %04lx] reg reading\n", | 324 | " bridge window [io %#06lx-%#06lx] (disabled)\n", |
316 | base, limit); | 325 | base, limit); |
317 | } | 326 | } |
318 | } | 327 | } |
@@ -336,7 +345,7 @@ static void __devinit pci_read_bridge_mmio(struct pci_bus *child) | |||
336 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 345 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
337 | } else { | 346 | } else { |
338 | dev_printk(KERN_DEBUG, &dev->dev, | 347 | dev_printk(KERN_DEBUG, &dev->dev, |
339 | " bridge window [mem 0x%08lx - 0x%08lx] reg reading\n", | 348 | " bridge window [mem %#010lx-%#010lx] (disabled)\n", |
340 | base, limit + 0xfffff); | 349 | base, limit + 0xfffff); |
341 | } | 350 | } |
342 | } | 351 | } |
@@ -387,7 +396,7 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) | |||
387 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 396 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
388 | } else { | 397 | } else { |
389 | dev_printk(KERN_DEBUG, &dev->dev, | 398 | dev_printk(KERN_DEBUG, &dev->dev, |
390 | " bridge window [mem 0x%08lx - %08lx pref] reg reading\n", | 399 | " bridge window [mem %#010lx-%#010lx pref] (disabled)\n", |
391 | base, limit + 0xfffff); | 400 | base, limit + 0xfffff); |
392 | } | 401 | } |
393 | } | 402 | } |
@@ -673,16 +682,20 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
673 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); | 682 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); |
674 | u32 buses, i, j = 0; | 683 | u32 buses, i, j = 0; |
675 | u16 bctl; | 684 | u16 bctl; |
685 | u8 primary, secondary, subordinate; | ||
676 | int broken = 0; | 686 | int broken = 0; |
677 | 687 | ||
678 | pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); | 688 | pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); |
689 | primary = buses & 0xFF; | ||
690 | secondary = (buses >> 8) & 0xFF; | ||
691 | subordinate = (buses >> 16) & 0xFF; | ||
679 | 692 | ||
680 | dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", | 693 | dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", |
681 | buses & 0xffffff, pass); | 694 | secondary, subordinate, pass); |
682 | 695 | ||
683 | /* Check if setup is sensible at all */ | 696 | /* Check if setup is sensible at all */ |
684 | if (!pass && | 697 | if (!pass && |
685 | ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) { | 698 | (primary != bus->number || secondary <= bus->number)) { |
686 | dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); | 699 | dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); |
687 | broken = 1; | 700 | broken = 1; |
688 | } | 701 | } |
@@ -693,15 +706,15 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
693 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, | 706 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, |
694 | bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); | 707 | bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); |
695 | 708 | ||
696 | if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { | 709 | if ((secondary || subordinate) && !pcibios_assign_all_busses() && |
697 | unsigned int cmax, busnr; | 710 | !is_cardbus && !broken) { |
711 | unsigned int cmax; | ||
698 | /* | 712 | /* |
699 | * Bus already configured by firmware, process it in the first | 713 | * Bus already configured by firmware, process it in the first |
700 | * pass and just note the configuration. | 714 | * pass and just note the configuration. |
701 | */ | 715 | */ |
702 | if (pass) | 716 | if (pass) |
703 | goto out; | 717 | goto out; |
704 | busnr = (buses >> 8) & 0xFF; | ||
705 | 718 | ||
706 | /* | 719 | /* |
707 | * If we already got to this bus through a different bridge, | 720 | * If we already got to this bus through a different bridge, |
@@ -710,13 +723,13 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
710 | * However, we continue to descend down the hierarchy and | 723 | * However, we continue to descend down the hierarchy and |
711 | * scan remaining child buses. | 724 | * scan remaining child buses. |
712 | */ | 725 | */ |
713 | child = pci_find_bus(pci_domain_nr(bus), busnr); | 726 | child = pci_find_bus(pci_domain_nr(bus), secondary); |
714 | if (!child) { | 727 | if (!child) { |
715 | child = pci_add_new_bus(bus, dev, busnr); | 728 | child = pci_add_new_bus(bus, dev, secondary); |
716 | if (!child) | 729 | if (!child) |
717 | goto out; | 730 | goto out; |
718 | child->primary = buses & 0xFF; | 731 | child->primary = primary; |
719 | child->subordinate = (buses >> 16) & 0xFF; | 732 | child->subordinate = subordinate; |
720 | child->bridge_ctl = bctl; | 733 | child->bridge_ctl = bctl; |
721 | } | 734 | } |
722 | 735 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 81d19d5683ac..3ea0b29c0104 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -368,8 +368,9 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | |||
368 | bus_region.end = res->end; | 368 | bus_region.end = res->end; |
369 | pcibios_bus_to_resource(dev, res, &bus_region); | 369 | pcibios_bus_to_resource(dev, res, &bus_region); |
370 | 370 | ||
371 | pci_claim_resource(dev, nr); | 371 | if (pci_claim_resource(dev, nr) == 0) |
372 | dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name); | 372 | dev_info(&dev->dev, "quirk: %pR claimed by %s\n", |
373 | res, name); | ||
373 | } | 374 | } |
374 | } | 375 | } |
375 | 376 | ||
@@ -1977,11 +1978,25 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
1977 | /* | 1978 | /* |
1978 | * Disable PCI Bus Parking and PCI Master read caching on CX700 | 1979 | * Disable PCI Bus Parking and PCI Master read caching on CX700 |
1979 | * which causes unspecified timing errors with a VT6212L on the PCI | 1980 | * which causes unspecified timing errors with a VT6212L on the PCI |
1980 | * bus leading to USB2.0 packet loss. The defaults are that these | 1981 | * bus leading to USB2.0 packet loss. |
1981 | * features are turned off but some BIOSes turn them on. | 1982 | * |
1983 | * This quirk is only enabled if a second (on the external PCI bus) | ||
1984 | * VT6212L is found -- the CX700 core itself also contains a USB | ||
1985 | * host controller with the same PCI ID as the VT6212L. | ||
1982 | */ | 1986 | */ |
1983 | 1987 | ||
1988 | /* Count VT6212L instances */ | ||
1989 | struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA, | ||
1990 | PCI_DEVICE_ID_VIA_8235_USB_2, NULL); | ||
1984 | uint8_t b; | 1991 | uint8_t b; |
1992 | |||
1993 | /* p should contain the first (internal) VT6212L -- see if we have | ||
1994 | an external one by searching again */ | ||
1995 | p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p); | ||
1996 | if (!p) | ||
1997 | return; | ||
1998 | pci_dev_put(p); | ||
1999 | |||
1985 | if (pci_read_config_byte(dev, 0x76, &b) == 0) { | 2000 | if (pci_read_config_byte(dev, 0x76, &b) == 0) { |
1986 | if (b & 0x40) { | 2001 | if (b & 0x40) { |
1987 | /* Turn off PCI Bus Parking */ | 2002 | /* Turn off PCI Bus Parking */ |
@@ -2008,7 +2023,7 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
2008 | } | 2023 | } |
2009 | } | 2024 | } |
2010 | } | 2025 | } |
2011 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching); | 2026 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching); |
2012 | 2027 | ||
2013 | /* | 2028 | /* |
2014 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the | 2029 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the |
@@ -2108,6 +2123,7 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev) | |||
2108 | } | 2123 | } |
2109 | } | 2124 | } |
2110 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); | 2125 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); |
2126 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); | ||
2111 | 2127 | ||
2112 | /* Go through the list of Hypertransport capabilities and | 2128 | /* Go through the list of Hypertransport capabilities and |
2113 | * return 1 if a HT MSI capability is found and enabled */ | 2129 | * return 1 if a HT MSI capability is found and enabled */ |
@@ -2479,6 +2495,39 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374, | |||
2479 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, | 2495 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, |
2480 | quirk_msi_intx_disable_bug); | 2496 | quirk_msi_intx_disable_bug); |
2481 | 2497 | ||
2498 | /* | ||
2499 | * MSI does not work with the AMD RS780/RS880 internal graphics and HDMI audio | ||
2500 | * devices unless the BIOS has initialized the nb_cntl.strap_msi_enable bit. | ||
2501 | */ | ||
2502 | static void __init rs780_int_gfx_disable_msi(struct pci_dev *int_gfx_bridge) | ||
2503 | { | ||
2504 | u32 nb_cntl; | ||
2505 | |||
2506 | if (!int_gfx_bridge->subordinate) | ||
2507 | return; | ||
2508 | |||
2509 | pci_bus_write_config_dword(int_gfx_bridge->bus, PCI_DEVFN(0, 0), | ||
2510 | 0x60, 0); | ||
2511 | pci_bus_read_config_dword(int_gfx_bridge->bus, PCI_DEVFN(0, 0), | ||
2512 | 0x64, &nb_cntl); | ||
2513 | |||
2514 | if (!(nb_cntl & BIT(10))) { | ||
2515 | dev_warn(&int_gfx_bridge->dev, | ||
2516 | FW_WARN "RS780: MSI for internal graphics disabled\n"); | ||
2517 | int_gfx_bridge->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; | ||
2518 | } | ||
2519 | } | ||
2520 | |||
2521 | #define PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX 0x9602 | ||
2522 | |||
2523 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, | ||
2524 | PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX, | ||
2525 | rs780_int_gfx_disable_msi); | ||
2526 | /* wrong vendor ID on M4A785TD motherboard: */ | ||
2527 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, | ||
2528 | PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX, | ||
2529 | rs780_int_gfx_disable_msi); | ||
2530 | |||
2482 | #endif /* CONFIG_PCI_MSI */ | 2531 | #endif /* CONFIG_PCI_MSI */ |
2483 | 2532 | ||
2484 | #ifdef CONFIG_PCI_IOV | 2533 | #ifdef CONFIG_PCI_IOV |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7d678bb15ffb..17bed18d24ad 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -93,8 +93,7 @@ void pci_update_resource(struct pci_dev *dev, int resno) | |||
93 | int pci_claim_resource(struct pci_dev *dev, int resource) | 93 | int pci_claim_resource(struct pci_dev *dev, int resource) |
94 | { | 94 | { |
95 | struct resource *res = &dev->resource[resource]; | 95 | struct resource *res = &dev->resource[resource]; |
96 | struct resource *root; | 96 | struct resource *root, *conflict; |
97 | int err; | ||
98 | 97 | ||
99 | root = pci_find_parent_resource(dev, res); | 98 | root = pci_find_parent_resource(dev, res); |
100 | if (!root) { | 99 | if (!root) { |
@@ -103,12 +102,15 @@ int pci_claim_resource(struct pci_dev *dev, int resource) | |||
103 | return -EINVAL; | 102 | return -EINVAL; |
104 | } | 103 | } |
105 | 104 | ||
106 | err = request_resource(root, res); | 105 | conflict = request_resource_conflict(root, res); |
107 | if (err) | 106 | if (conflict) { |
108 | dev_err(&dev->dev, | 107 | dev_err(&dev->dev, |
109 | "address space collision: %pR already in use\n", res); | 108 | "address space collision: %pR conflicts with %s %pR\n", |
109 | res, conflict->name, conflict); | ||
110 | return -EBUSY; | ||
111 | } | ||
110 | 112 | ||
111 | return err; | 113 | return 0; |
112 | } | 114 | } |
113 | EXPORT_SYMBOL(pci_claim_resource); | 115 | EXPORT_SYMBOL(pci_claim_resource); |
114 | 116 | ||
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c index 5d228071ec69..fb904f444d90 100644 --- a/drivers/pcmcia/at91_cf.c +++ b/drivers/pcmcia/at91_cf.c | |||
@@ -361,7 +361,6 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg) | |||
361 | struct at91_cf_socket *cf = platform_get_drvdata(pdev); | 361 | struct at91_cf_socket *cf = platform_get_drvdata(pdev); |
362 | struct at91_cf_data *board = cf->board; | 362 | struct at91_cf_data *board = cf->board; |
363 | 363 | ||
364 | pcmcia_socket_dev_suspend(&pdev->dev); | ||
365 | if (device_may_wakeup(&pdev->dev)) { | 364 | if (device_may_wakeup(&pdev->dev)) { |
366 | enable_irq_wake(board->det_pin); | 365 | enable_irq_wake(board->det_pin); |
367 | if (board->irq_pin) | 366 | if (board->irq_pin) |
@@ -381,7 +380,6 @@ static int at91_cf_resume(struct platform_device *pdev) | |||
381 | disable_irq_wake(board->irq_pin); | 380 | disable_irq_wake(board->irq_pin); |
382 | } | 381 | } |
383 | 382 | ||
384 | pcmcia_socket_dev_resume(&pdev->dev); | ||
385 | return 0; | 383 | return 0; |
386 | } | 384 | } |
387 | 385 | ||
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c index 171c8a654887..ac4d089430fd 100644 --- a/drivers/pcmcia/au1000_generic.c +++ b/drivers/pcmcia/au1000_generic.c | |||
@@ -510,17 +510,6 @@ static int au1x00_drv_pcmcia_probe(struct platform_device *dev) | |||
510 | return ret; | 510 | return ret; |
511 | } | 511 | } |
512 | 512 | ||
513 | static int au1x00_drv_pcmcia_suspend(struct platform_device *dev, | ||
514 | pm_message_t state) | ||
515 | { | ||
516 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
517 | } | ||
518 | |||
519 | static int au1x00_drv_pcmcia_resume(struct platform_device *dev) | ||
520 | { | ||
521 | return pcmcia_socket_dev_resume(&dev->dev); | ||
522 | } | ||
523 | |||
524 | static struct platform_driver au1x00_pcmcia_driver = { | 513 | static struct platform_driver au1x00_pcmcia_driver = { |
525 | .driver = { | 514 | .driver = { |
526 | .name = "au1x00-pcmcia", | 515 | .name = "au1x00-pcmcia", |
@@ -528,8 +517,6 @@ static struct platform_driver au1x00_pcmcia_driver = { | |||
528 | }, | 517 | }, |
529 | .probe = au1x00_drv_pcmcia_probe, | 518 | .probe = au1x00_drv_pcmcia_probe, |
530 | .remove = au1x00_drv_pcmcia_remove, | 519 | .remove = au1x00_drv_pcmcia_remove, |
531 | .suspend = au1x00_drv_pcmcia_suspend, | ||
532 | .resume = au1x00_drv_pcmcia_resume, | ||
533 | }; | 520 | }; |
534 | 521 | ||
535 | 522 | ||
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c index 2482ce7ac6dc..93f9ddeb0c36 100644 --- a/drivers/pcmcia/bfin_cf_pcmcia.c +++ b/drivers/pcmcia/bfin_cf_pcmcia.c | |||
@@ -300,16 +300,6 @@ static int __devexit bfin_cf_remove(struct platform_device *pdev) | |||
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
302 | 302 | ||
303 | static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg) | ||
304 | { | ||
305 | return pcmcia_socket_dev_suspend(&pdev->dev); | ||
306 | } | ||
307 | |||
308 | static int bfin_cf_resume(struct platform_device *pdev) | ||
309 | { | ||
310 | return pcmcia_socket_dev_resume(&pdev->dev); | ||
311 | } | ||
312 | |||
313 | static struct platform_driver bfin_cf_driver = { | 303 | static struct platform_driver bfin_cf_driver = { |
314 | .driver = { | 304 | .driver = { |
315 | .name = (char *)driver_name, | 305 | .name = (char *)driver_name, |
@@ -317,8 +307,6 @@ static struct platform_driver bfin_cf_driver = { | |||
317 | }, | 307 | }, |
318 | .probe = bfin_cf_probe, | 308 | .probe = bfin_cf_probe, |
319 | .remove = __devexit_p(bfin_cf_remove), | 309 | .remove = __devexit_p(bfin_cf_remove), |
320 | .suspend = bfin_cf_suspend, | ||
321 | .resume = bfin_cf_resume, | ||
322 | }; | 310 | }; |
323 | 311 | ||
324 | static int __init bfin_cf_init(void) | 312 | static int __init bfin_cf_init(void) |
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index e679e708db63..75ed866e6953 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c | |||
@@ -76,65 +76,6 @@ DECLARE_RWSEM(pcmcia_socket_list_rwsem); | |||
76 | EXPORT_SYMBOL(pcmcia_socket_list_rwsem); | 76 | EXPORT_SYMBOL(pcmcia_socket_list_rwsem); |
77 | 77 | ||
78 | 78 | ||
79 | /* | ||
80 | * Low-level PCMCIA socket drivers need to register with the PCCard | ||
81 | * core using pcmcia_register_socket. | ||
82 | * | ||
83 | * socket drivers are expected to use the following callbacks in their | ||
84 | * .drv struct: | ||
85 | * - pcmcia_socket_dev_suspend | ||
86 | * - pcmcia_socket_dev_resume | ||
87 | * These functions check for the appropriate struct pcmcia_soket arrays, | ||
88 | * and pass them to the low-level functions pcmcia_{suspend,resume}_socket | ||
89 | */ | ||
90 | static int socket_early_resume(struct pcmcia_socket *skt); | ||
91 | static int socket_late_resume(struct pcmcia_socket *skt); | ||
92 | static int socket_resume(struct pcmcia_socket *skt); | ||
93 | static int socket_suspend(struct pcmcia_socket *skt); | ||
94 | |||
95 | static void pcmcia_socket_dev_run(struct device *dev, | ||
96 | int (*cb)(struct pcmcia_socket *)) | ||
97 | { | ||
98 | struct pcmcia_socket *socket; | ||
99 | |||
100 | down_read(&pcmcia_socket_list_rwsem); | ||
101 | list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { | ||
102 | if (socket->dev.parent != dev) | ||
103 | continue; | ||
104 | mutex_lock(&socket->skt_mutex); | ||
105 | cb(socket); | ||
106 | mutex_unlock(&socket->skt_mutex); | ||
107 | } | ||
108 | up_read(&pcmcia_socket_list_rwsem); | ||
109 | } | ||
110 | |||
111 | int pcmcia_socket_dev_suspend(struct device *dev) | ||
112 | { | ||
113 | pcmcia_socket_dev_run(dev, socket_suspend); | ||
114 | return 0; | ||
115 | } | ||
116 | EXPORT_SYMBOL(pcmcia_socket_dev_suspend); | ||
117 | |||
118 | void pcmcia_socket_dev_early_resume(struct device *dev) | ||
119 | { | ||
120 | pcmcia_socket_dev_run(dev, socket_early_resume); | ||
121 | } | ||
122 | EXPORT_SYMBOL(pcmcia_socket_dev_early_resume); | ||
123 | |||
124 | void pcmcia_socket_dev_late_resume(struct device *dev) | ||
125 | { | ||
126 | pcmcia_socket_dev_run(dev, socket_late_resume); | ||
127 | } | ||
128 | EXPORT_SYMBOL(pcmcia_socket_dev_late_resume); | ||
129 | |||
130 | int pcmcia_socket_dev_resume(struct device *dev) | ||
131 | { | ||
132 | pcmcia_socket_dev_run(dev, socket_resume); | ||
133 | return 0; | ||
134 | } | ||
135 | EXPORT_SYMBOL(pcmcia_socket_dev_resume); | ||
136 | |||
137 | |||
138 | struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt) | 79 | struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt) |
139 | { | 80 | { |
140 | struct device *dev = get_device(&skt->dev); | 81 | struct device *dev = get_device(&skt->dev); |
@@ -578,12 +519,18 @@ static int socket_early_resume(struct pcmcia_socket *skt) | |||
578 | 519 | ||
579 | static int socket_late_resume(struct pcmcia_socket *skt) | 520 | static int socket_late_resume(struct pcmcia_socket *skt) |
580 | { | 521 | { |
522 | int ret; | ||
523 | |||
581 | mutex_lock(&skt->ops_mutex); | 524 | mutex_lock(&skt->ops_mutex); |
582 | skt->state &= ~SOCKET_SUSPEND; | 525 | skt->state &= ~SOCKET_SUSPEND; |
583 | mutex_unlock(&skt->ops_mutex); | 526 | mutex_unlock(&skt->ops_mutex); |
584 | 527 | ||
585 | if (!(skt->state & SOCKET_PRESENT)) | 528 | if (!(skt->state & SOCKET_PRESENT)) { |
586 | return socket_insert(skt); | 529 | ret = socket_insert(skt); |
530 | if (ret == -ENODEV) | ||
531 | ret = 0; | ||
532 | return ret; | ||
533 | } | ||
587 | 534 | ||
588 | if (skt->resume_status) { | 535 | if (skt->resume_status) { |
589 | socket_shutdown(skt); | 536 | socket_shutdown(skt); |
@@ -919,11 +866,66 @@ static void pcmcia_release_socket_class(struct class *data) | |||
919 | } | 866 | } |
920 | 867 | ||
921 | 868 | ||
869 | #ifdef CONFIG_PM | ||
870 | |||
871 | static int __pcmcia_pm_op(struct device *dev, | ||
872 | int (*callback) (struct pcmcia_socket *skt)) | ||
873 | { | ||
874 | struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); | ||
875 | int ret; | ||
876 | |||
877 | mutex_lock(&s->skt_mutex); | ||
878 | ret = callback(s); | ||
879 | mutex_unlock(&s->skt_mutex); | ||
880 | |||
881 | return ret; | ||
882 | } | ||
883 | |||
884 | static int pcmcia_socket_dev_suspend_noirq(struct device *dev) | ||
885 | { | ||
886 | return __pcmcia_pm_op(dev, socket_suspend); | ||
887 | } | ||
888 | |||
889 | static int pcmcia_socket_dev_resume_noirq(struct device *dev) | ||
890 | { | ||
891 | return __pcmcia_pm_op(dev, socket_early_resume); | ||
892 | } | ||
893 | |||
894 | static int pcmcia_socket_dev_resume(struct device *dev) | ||
895 | { | ||
896 | return __pcmcia_pm_op(dev, socket_late_resume); | ||
897 | } | ||
898 | |||
899 | static const struct dev_pm_ops pcmcia_socket_pm_ops = { | ||
900 | /* dev_resume may be called with IRQs enabled */ | ||
901 | SET_SYSTEM_SLEEP_PM_OPS(NULL, | ||
902 | pcmcia_socket_dev_resume) | ||
903 | |||
904 | /* late suspend must be called with IRQs disabled */ | ||
905 | .suspend_noirq = pcmcia_socket_dev_suspend_noirq, | ||
906 | .freeze_noirq = pcmcia_socket_dev_suspend_noirq, | ||
907 | .poweroff_noirq = pcmcia_socket_dev_suspend_noirq, | ||
908 | |||
909 | /* early resume must be called with IRQs disabled */ | ||
910 | .resume_noirq = pcmcia_socket_dev_resume_noirq, | ||
911 | .thaw_noirq = pcmcia_socket_dev_resume_noirq, | ||
912 | .restore_noirq = pcmcia_socket_dev_resume_noirq, | ||
913 | }; | ||
914 | |||
915 | #define PCMCIA_SOCKET_CLASS_PM_OPS (&pcmcia_socket_pm_ops) | ||
916 | |||
917 | #else /* CONFIG_PM */ | ||
918 | |||
919 | #define PCMCIA_SOCKET_CLASS_PM_OPS NULL | ||
920 | |||
921 | #endif /* CONFIG_PM */ | ||
922 | |||
922 | struct class pcmcia_socket_class = { | 923 | struct class pcmcia_socket_class = { |
923 | .name = "pcmcia_socket", | 924 | .name = "pcmcia_socket", |
924 | .dev_uevent = pcmcia_socket_uevent, | 925 | .dev_uevent = pcmcia_socket_uevent, |
925 | .dev_release = pcmcia_release_socket, | 926 | .dev_release = pcmcia_release_socket, |
926 | .class_release = pcmcia_release_socket_class, | 927 | .class_release = pcmcia_release_socket_class, |
928 | .pm = PCMCIA_SOCKET_CLASS_PM_OPS, | ||
927 | }; | 929 | }; |
928 | EXPORT_SYMBOL(pcmcia_socket_class); | 930 | EXPORT_SYMBOL(pcmcia_socket_class); |
929 | 931 | ||
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 9254ab0b29b1..a520193b6453 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c | |||
@@ -558,37 +558,10 @@ static int __devexit db1x_pcmcia_socket_remove(struct platform_device *pdev) | |||
558 | return 0; | 558 | return 0; |
559 | } | 559 | } |
560 | 560 | ||
561 | #ifdef CONFIG_PM | ||
562 | static int db1x_pcmcia_suspend(struct device *dev) | ||
563 | { | ||
564 | return pcmcia_socket_dev_suspend(dev); | ||
565 | } | ||
566 | |||
567 | static int db1x_pcmcia_resume(struct device *dev) | ||
568 | { | ||
569 | return pcmcia_socket_dev_resume(dev); | ||
570 | } | ||
571 | |||
572 | static struct dev_pm_ops db1x_pcmcia_pmops = { | ||
573 | .resume = db1x_pcmcia_resume, | ||
574 | .suspend = db1x_pcmcia_suspend, | ||
575 | .thaw = db1x_pcmcia_resume, | ||
576 | .freeze = db1x_pcmcia_suspend, | ||
577 | }; | ||
578 | |||
579 | #define DB1XXX_SS_PMOPS &db1x_pcmcia_pmops | ||
580 | |||
581 | #else | ||
582 | |||
583 | #define DB1XXX_SS_PMOPS NULL | ||
584 | |||
585 | #endif | ||
586 | |||
587 | static struct platform_driver db1x_pcmcia_socket_driver = { | 561 | static struct platform_driver db1x_pcmcia_socket_driver = { |
588 | .driver = { | 562 | .driver = { |
589 | .name = "db1xxx_pcmcia", | 563 | .name = "db1xxx_pcmcia", |
590 | .owner = THIS_MODULE, | 564 | .owner = THIS_MODULE, |
591 | .pm = DB1XXX_SS_PMOPS | ||
592 | }, | 565 | }, |
593 | .probe = db1x_pcmcia_socket_probe, | 566 | .probe = db1x_pcmcia_socket_probe, |
594 | .remove = __devexit_p(db1x_pcmcia_socket_remove), | 567 | .remove = __devexit_p(db1x_pcmcia_socket_remove), |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index ad93ebd7b2a2..52d33b2a5bc5 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -509,8 +509,12 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu | |||
509 | p_dev->device_no = (s->device_count++); | 509 | p_dev->device_no = (s->device_count++); |
510 | mutex_unlock(&s->ops_mutex); | 510 | mutex_unlock(&s->ops_mutex); |
511 | 511 | ||
512 | /* max of 2 devices per card */ | 512 | /* max of 2 PFC devices */ |
513 | if (p_dev->device_no >= 2) | 513 | if ((p_dev->device_no >= 2) && (function == 0)) |
514 | goto err_free; | ||
515 | |||
516 | /* max of 4 devices overall */ | ||
517 | if (p_dev->device_no >= 4) | ||
514 | goto err_free; | 518 | goto err_free; |
515 | 519 | ||
516 | p_dev->socket = s; | 520 | p_dev->socket = s; |
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c index f5da62653313..3003bb3dfcc0 100644 --- a/drivers/pcmcia/i82092.c +++ b/drivers/pcmcia/i82092.c | |||
@@ -39,27 +39,11 @@ static struct pci_device_id i82092aa_pci_ids[] = { | |||
39 | }; | 39 | }; |
40 | MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids); | 40 | MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids); |
41 | 41 | ||
42 | #ifdef CONFIG_PM | ||
43 | static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state) | ||
44 | { | ||
45 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
46 | } | ||
47 | |||
48 | static int i82092aa_socket_resume (struct pci_dev *dev) | ||
49 | { | ||
50 | return pcmcia_socket_dev_resume(&dev->dev); | ||
51 | } | ||
52 | #endif | ||
53 | |||
54 | static struct pci_driver i82092aa_pci_driver = { | 42 | static struct pci_driver i82092aa_pci_driver = { |
55 | .name = "i82092aa", | 43 | .name = "i82092aa", |
56 | .id_table = i82092aa_pci_ids, | 44 | .id_table = i82092aa_pci_ids, |
57 | .probe = i82092aa_pci_probe, | 45 | .probe = i82092aa_pci_probe, |
58 | .remove = __devexit_p(i82092aa_pci_remove), | 46 | .remove = __devexit_p(i82092aa_pci_remove), |
59 | #ifdef CONFIG_PM | ||
60 | .suspend = i82092aa_socket_suspend, | ||
61 | .resume = i82092aa_socket_resume, | ||
62 | #endif | ||
63 | }; | 47 | }; |
64 | 48 | ||
65 | 49 | ||
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index c13fd9360511..d53d9b5659c7 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c | |||
@@ -1223,16 +1223,7 @@ static int pcic_init(struct pcmcia_socket *s) | |||
1223 | return 0; | 1223 | return 0; |
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | static int i82365_drv_pcmcia_suspend(struct platform_device *dev, | ||
1227 | pm_message_t state) | ||
1228 | { | ||
1229 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
1230 | } | ||
1231 | 1226 | ||
1232 | static int i82365_drv_pcmcia_resume(struct platform_device *dev) | ||
1233 | { | ||
1234 | return pcmcia_socket_dev_resume(&dev->dev); | ||
1235 | } | ||
1236 | static struct pccard_operations pcic_operations = { | 1227 | static struct pccard_operations pcic_operations = { |
1237 | .init = pcic_init, | 1228 | .init = pcic_init, |
1238 | .get_status = pcic_get_status, | 1229 | .get_status = pcic_get_status, |
@@ -1248,8 +1239,6 @@ static struct platform_driver i82365_driver = { | |||
1248 | .name = "i82365", | 1239 | .name = "i82365", |
1249 | .owner = THIS_MODULE, | 1240 | .owner = THIS_MODULE, |
1250 | }, | 1241 | }, |
1251 | .suspend = i82365_drv_pcmcia_suspend, | ||
1252 | .resume = i82365_drv_pcmcia_resume, | ||
1253 | }; | 1242 | }; |
1254 | 1243 | ||
1255 | static struct platform_device *i82365_device; | 1244 | static struct platform_device *i82365_device; |
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c index 0ece2cd4a85e..ab21264468d6 100644 --- a/drivers/pcmcia/m32r_cfc.c +++ b/drivers/pcmcia/m32r_cfc.c | |||
@@ -685,16 +685,7 @@ static struct pccard_operations pcc_operations = { | |||
685 | .set_mem_map = pcc_set_mem_map, | 685 | .set_mem_map = pcc_set_mem_map, |
686 | }; | 686 | }; |
687 | 687 | ||
688 | static int cfc_drv_pcmcia_suspend(struct platform_device *dev, | ||
689 | pm_message_t state) | ||
690 | { | ||
691 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
692 | } | ||
693 | 688 | ||
694 | static int cfc_drv_pcmcia_resume(struct platform_device *dev) | ||
695 | { | ||
696 | return pcmcia_socket_dev_resume(&dev->dev); | ||
697 | } | ||
698 | /*====================================================================*/ | 689 | /*====================================================================*/ |
699 | 690 | ||
700 | static struct platform_driver pcc_driver = { | 691 | static struct platform_driver pcc_driver = { |
@@ -702,8 +693,6 @@ static struct platform_driver pcc_driver = { | |||
702 | .name = "cfc", | 693 | .name = "cfc", |
703 | .owner = THIS_MODULE, | 694 | .owner = THIS_MODULE, |
704 | }, | 695 | }, |
705 | .suspend = cfc_drv_pcmcia_suspend, | ||
706 | .resume = cfc_drv_pcmcia_resume, | ||
707 | }; | 696 | }; |
708 | 697 | ||
709 | static struct platform_device pcc_device = { | 698 | static struct platform_device pcc_device = { |
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index 72844c5a6d05..0caf3db7c700 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c | |||
@@ -663,16 +663,6 @@ static struct pccard_operations pcc_operations = { | |||
663 | .set_mem_map = pcc_set_mem_map, | 663 | .set_mem_map = pcc_set_mem_map, |
664 | }; | 664 | }; |
665 | 665 | ||
666 | static int pcc_drv_pcmcia_suspend(struct platform_device *dev, | ||
667 | pm_message_t state) | ||
668 | { | ||
669 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
670 | } | ||
671 | |||
672 | static int pcc_drv_pcmcia_resume(struct platform_device *dev) | ||
673 | { | ||
674 | return pcmcia_socket_dev_resume(&dev->dev); | ||
675 | } | ||
676 | /*====================================================================*/ | 666 | /*====================================================================*/ |
677 | 667 | ||
678 | static struct platform_driver pcc_driver = { | 668 | static struct platform_driver pcc_driver = { |
@@ -680,8 +670,6 @@ static struct platform_driver pcc_driver = { | |||
680 | .name = "pcc", | 670 | .name = "pcc", |
681 | .owner = THIS_MODULE, | 671 | .owner = THIS_MODULE, |
682 | }, | 672 | }, |
683 | .suspend = pcc_drv_pcmcia_suspend, | ||
684 | .resume = pcc_drv_pcmcia_resume, | ||
685 | }; | 673 | }; |
686 | 674 | ||
687 | static struct platform_device pcc_device = { | 675 | static struct platform_device pcc_device = { |
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 61c215918128..01ef7de15322 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c | |||
@@ -1288,21 +1288,6 @@ static int m8xx_remove(struct of_device *ofdev) | |||
1288 | return 0; | 1288 | return 0; |
1289 | } | 1289 | } |
1290 | 1290 | ||
1291 | #ifdef CONFIG_PM | ||
1292 | static int m8xx_suspend(struct platform_device *pdev, pm_message_t state) | ||
1293 | { | ||
1294 | return pcmcia_socket_dev_suspend(&pdev->dev); | ||
1295 | } | ||
1296 | |||
1297 | static int m8xx_resume(struct platform_device *pdev) | ||
1298 | { | ||
1299 | return pcmcia_socket_dev_resume(&pdev->dev); | ||
1300 | } | ||
1301 | #else | ||
1302 | #define m8xx_suspend NULL | ||
1303 | #define m8xx_resume NULL | ||
1304 | #endif | ||
1305 | |||
1306 | static const struct of_device_id m8xx_pcmcia_match[] = { | 1291 | static const struct of_device_id m8xx_pcmcia_match[] = { |
1307 | { | 1292 | { |
1308 | .type = "pcmcia", | 1293 | .type = "pcmcia", |
@@ -1318,8 +1303,6 @@ static struct of_platform_driver m8xx_pcmcia_driver = { | |||
1318 | .match_table = m8xx_pcmcia_match, | 1303 | .match_table = m8xx_pcmcia_match, |
1319 | .probe = m8xx_probe, | 1304 | .probe = m8xx_probe, |
1320 | .remove = m8xx_remove, | 1305 | .remove = m8xx_remove, |
1321 | .suspend = m8xx_suspend, | ||
1322 | .resume = m8xx_resume, | ||
1323 | }; | 1306 | }; |
1324 | 1307 | ||
1325 | static int __init m8xx_init(void) | 1308 | static int __init m8xx_init(void) |
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c index 3ef991552398..9edc396577b9 100644 --- a/drivers/pcmcia/omap_cf.c +++ b/drivers/pcmcia/omap_cf.c | |||
@@ -330,24 +330,12 @@ static int __exit omap_cf_remove(struct platform_device *pdev) | |||
330 | return 0; | 330 | return 0; |
331 | } | 331 | } |
332 | 332 | ||
333 | static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg) | ||
334 | { | ||
335 | return pcmcia_socket_dev_suspend(&pdev->dev); | ||
336 | } | ||
337 | |||
338 | static int omap_cf_resume(struct platform_device *pdev) | ||
339 | { | ||
340 | return pcmcia_socket_dev_resume(&pdev->dev); | ||
341 | } | ||
342 | |||
343 | static struct platform_driver omap_cf_driver = { | 333 | static struct platform_driver omap_cf_driver = { |
344 | .driver = { | 334 | .driver = { |
345 | .name = (char *) driver_name, | 335 | .name = (char *) driver_name, |
346 | .owner = THIS_MODULE, | 336 | .owner = THIS_MODULE, |
347 | }, | 337 | }, |
348 | .remove = __exit_p(omap_cf_remove), | 338 | .remove = __exit_p(omap_cf_remove), |
349 | .suspend = omap_cf_suspend, | ||
350 | .resume = omap_cf_resume, | ||
351 | }; | 339 | }; |
352 | 340 | ||
353 | static int __init omap_cf_init(void) | 341 | static int __init omap_cf_init(void) |
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 7ba57a565cd7..4a34268cc512 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c | |||
@@ -14,13 +14,13 @@ | |||
14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/io.h> | ||
17 | 18 | ||
18 | #include <pcmcia/cs_types.h> | 19 | #include <pcmcia/cs_types.h> |
19 | #include <pcmcia/ss.h> | 20 | #include <pcmcia/ss.h> |
20 | #include <pcmcia/cs.h> | 21 | #include <pcmcia/cs.h> |
21 | 22 | ||
22 | #include <asm/system.h> | 23 | #include <asm/system.h> |
23 | #include <asm/io.h> | ||
24 | 24 | ||
25 | #include "pd6729.h" | 25 | #include "pd6729.h" |
26 | #include "i82365.h" | 26 | #include "i82365.h" |
@@ -222,9 +222,9 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev) | |||
222 | ? SS_READY : 0; | 222 | ? SS_READY : 0; |
223 | } | 223 | } |
224 | 224 | ||
225 | if (events) { | 225 | if (events) |
226 | pcmcia_parse_events(&socket[i].socket, events); | 226 | pcmcia_parse_events(&socket[i].socket, events); |
227 | } | 227 | |
228 | active |= events; | 228 | active |= events; |
229 | } | 229 | } |
230 | 230 | ||
@@ -256,9 +256,8 @@ static int pd6729_get_status(struct pcmcia_socket *sock, u_int *value) | |||
256 | status = indirect_read(socket, I365_STATUS); | 256 | status = indirect_read(socket, I365_STATUS); |
257 | *value = 0; | 257 | *value = 0; |
258 | 258 | ||
259 | if ((status & I365_CS_DETECT) == I365_CS_DETECT) { | 259 | if ((status & I365_CS_DETECT) == I365_CS_DETECT) |
260 | *value |= SS_DETECT; | 260 | *value |= SS_DETECT; |
261 | } | ||
262 | 261 | ||
263 | /* | 262 | /* |
264 | * IO cards have a different meaning of bits 0,1 | 263 | * IO cards have a different meaning of bits 0,1 |
@@ -308,7 +307,7 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state) | |||
308 | socket->card_irq = state->io_irq; | 307 | socket->card_irq = state->io_irq; |
309 | 308 | ||
310 | reg = 0; | 309 | reg = 0; |
311 | /* The reset bit has "inverse" logic */ | 310 | /* The reset bit has "inverse" logic */ |
312 | if (!(state->flags & SS_RESET)) | 311 | if (!(state->flags & SS_RESET)) |
313 | reg |= I365_PC_RESET; | 312 | reg |= I365_PC_RESET; |
314 | if (state->flags & SS_IOCARD) | 313 | if (state->flags & SS_IOCARD) |
@@ -380,7 +379,7 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state) | |||
380 | indirect_write(socket, I365_POWER, reg); | 379 | indirect_write(socket, I365_POWER, reg); |
381 | 380 | ||
382 | if (irq_mode == 1) { | 381 | if (irq_mode == 1) { |
383 | /* all interrupts are to be done as PCI interrupts */ | 382 | /* all interrupts are to be done as PCI interrupts */ |
384 | data = PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ; | 383 | data = PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ; |
385 | } else | 384 | } else |
386 | data = 0; | 385 | data = 0; |
@@ -391,9 +390,9 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state) | |||
391 | /* Enable specific interrupt events */ | 390 | /* Enable specific interrupt events */ |
392 | 391 | ||
393 | reg = 0x00; | 392 | reg = 0x00; |
394 | if (state->csc_mask & SS_DETECT) { | 393 | if (state->csc_mask & SS_DETECT) |
395 | reg |= I365_CSC_DETECT; | 394 | reg |= I365_CSC_DETECT; |
396 | } | 395 | |
397 | if (state->flags & SS_IOCARD) { | 396 | if (state->flags & SS_IOCARD) { |
398 | if (state->csc_mask & SS_STSCHG) | 397 | if (state->csc_mask & SS_STSCHG) |
399 | reg |= I365_CSC_STSCHG; | 398 | reg |= I365_CSC_STSCHG; |
@@ -450,9 +449,12 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock, | |||
450 | 449 | ||
451 | ioctl = indirect_read(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map); | 450 | ioctl = indirect_read(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map); |
452 | 451 | ||
453 | if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map); | 452 | if (io->flags & MAP_0WS) |
454 | if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map); | 453 | ioctl |= I365_IOCTL_0WS(map); |
455 | if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map); | 454 | if (io->flags & MAP_16BIT) |
455 | ioctl |= I365_IOCTL_16BIT(map); | ||
456 | if (io->flags & MAP_AUTOSZ) | ||
457 | ioctl |= I365_IOCTL_IOCS16(map); | ||
456 | 458 | ||
457 | indirect_write(socket, I365_IOCTL, ioctl); | 459 | indirect_write(socket, I365_IOCTL, ioctl); |
458 | 460 | ||
@@ -497,7 +499,7 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock, | |||
497 | 499 | ||
498 | /* write the stop address */ | 500 | /* write the stop address */ |
499 | 501 | ||
500 | i= (mem->res->end >> 12) & 0x0fff; | 502 | i = (mem->res->end >> 12) & 0x0fff; |
501 | switch (to_cycles(mem->speed)) { | 503 | switch (to_cycles(mem->speed)) { |
502 | case 0: | 504 | case 0: |
503 | break; | 505 | break; |
@@ -563,7 +565,7 @@ static int pd6729_init(struct pcmcia_socket *sock) | |||
563 | 565 | ||
564 | /* the pccard structure and its functions */ | 566 | /* the pccard structure and its functions */ |
565 | static struct pccard_operations pd6729_operations = { | 567 | static struct pccard_operations pd6729_operations = { |
566 | .init = pd6729_init, | 568 | .init = pd6729_init, |
567 | .get_status = pd6729_get_status, | 569 | .get_status = pd6729_get_status, |
568 | .set_socket = pd6729_set_socket, | 570 | .set_socket = pd6729_set_socket, |
569 | .set_io_map = pd6729_set_io_map, | 571 | .set_io_map = pd6729_set_io_map, |
@@ -578,8 +580,13 @@ static irqreturn_t pd6729_test(int irq, void *dev) | |||
578 | 580 | ||
579 | static int pd6729_check_irq(int irq) | 581 | static int pd6729_check_irq(int irq) |
580 | { | 582 | { |
581 | if (request_irq(irq, pd6729_test, IRQF_PROBE_SHARED, "x", pd6729_test) | 583 | int ret; |
582 | != 0) return -1; | 584 | |
585 | ret = request_irq(irq, pd6729_test, IRQF_PROBE_SHARED, "x", | ||
586 | pd6729_test); | ||
587 | if (ret) | ||
588 | return -1; | ||
589 | |||
583 | free_irq(irq, pd6729_test); | 590 | free_irq(irq, pd6729_test); |
584 | return 0; | 591 | return 0; |
585 | } | 592 | } |
@@ -591,7 +598,7 @@ static u_int __devinit pd6729_isa_scan(void) | |||
591 | 598 | ||
592 | if (irq_mode == 1) { | 599 | if (irq_mode == 1) { |
593 | printk(KERN_INFO "pd6729: PCI card interrupts, " | 600 | printk(KERN_INFO "pd6729: PCI card interrupts, " |
594 | "PCI status changes\n"); | 601 | "PCI status changes\n"); |
595 | return 0; | 602 | return 0; |
596 | } | 603 | } |
597 | 604 | ||
@@ -607,9 +614,10 @@ static u_int __devinit pd6729_isa_scan(void) | |||
607 | if (mask & (1<<i)) | 614 | if (mask & (1<<i)) |
608 | printk("%s%d", ((mask & ((1<<i)-1)) ? "," : ""), i); | 615 | printk("%s%d", ((mask & ((1<<i)-1)) ? "," : ""), i); |
609 | 616 | ||
610 | if (mask == 0) printk("none!"); | 617 | if (mask == 0) |
611 | 618 | printk("none!"); | |
612 | printk(" polling status changes.\n"); | 619 | else |
620 | printk(" polling status changes.\n"); | ||
613 | 621 | ||
614 | return mask; | 622 | return mask; |
615 | } | 623 | } |
@@ -624,11 +632,16 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
624 | 632 | ||
625 | socket = kzalloc(sizeof(struct pd6729_socket) * MAX_SOCKETS, | 633 | socket = kzalloc(sizeof(struct pd6729_socket) * MAX_SOCKETS, |
626 | GFP_KERNEL); | 634 | GFP_KERNEL); |
627 | if (!socket) | 635 | if (!socket) { |
636 | dev_warn(&dev->dev, "failed to kzalloc socket.\n"); | ||
628 | return -ENOMEM; | 637 | return -ENOMEM; |
638 | } | ||
629 | 639 | ||
630 | if ((ret = pci_enable_device(dev))) | 640 | ret = pci_enable_device(dev); |
641 | if (ret) { | ||
642 | dev_warn(&dev->dev, "failed to enable pci_device.\n"); | ||
631 | goto err_out_free_mem; | 643 | goto err_out_free_mem; |
644 | } | ||
632 | 645 | ||
633 | if (!pci_resource_start(dev, 0)) { | 646 | if (!pci_resource_start(dev, 0)) { |
634 | dev_warn(&dev->dev, "refusing to load the driver as the " | 647 | dev_warn(&dev->dev, "refusing to load the driver as the " |
@@ -639,7 +652,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
639 | dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " | 652 | dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " |
640 | "on irq %d\n", | 653 | "on irq %d\n", |
641 | (unsigned long long)pci_resource_start(dev, 0), dev->irq); | 654 | (unsigned long long)pci_resource_start(dev, 0), dev->irq); |
642 | /* | 655 | /* |
643 | * Since we have no memory BARs some firmware may not | 656 | * Since we have no memory BARs some firmware may not |
644 | * have had PCI_COMMAND_MEMORY enabled, yet the device needs it. | 657 | * have had PCI_COMMAND_MEMORY enabled, yet the device needs it. |
645 | */ | 658 | */ |
@@ -685,8 +698,9 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
685 | pci_set_drvdata(dev, socket); | 698 | pci_set_drvdata(dev, socket); |
686 | if (irq_mode == 1) { | 699 | if (irq_mode == 1) { |
687 | /* Register the interrupt handler */ | 700 | /* Register the interrupt handler */ |
688 | if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED, | 701 | ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED, |
689 | "pd6729", socket))) { | 702 | "pd6729", socket); |
703 | if (ret) { | ||
690 | dev_err(&dev->dev, "Failed to register irq %d\n", | 704 | dev_err(&dev->dev, "Failed to register irq %d\n", |
691 | dev->irq); | 705 | dev->irq); |
692 | goto err_out_free_res; | 706 | goto err_out_free_res; |
@@ -750,18 +764,6 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev) | |||
750 | kfree(socket); | 764 | kfree(socket); |
751 | } | 765 | } |
752 | 766 | ||
753 | #ifdef CONFIG_PM | ||
754 | static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state) | ||
755 | { | ||
756 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
757 | } | ||
758 | |||
759 | static int pd6729_socket_resume(struct pci_dev *dev) | ||
760 | { | ||
761 | return pcmcia_socket_dev_resume(&dev->dev); | ||
762 | } | ||
763 | #endif | ||
764 | |||
765 | static struct pci_device_id pd6729_pci_ids[] = { | 767 | static struct pci_device_id pd6729_pci_ids[] = { |
766 | { | 768 | { |
767 | .vendor = PCI_VENDOR_ID_CIRRUS, | 769 | .vendor = PCI_VENDOR_ID_CIRRUS, |
@@ -778,10 +780,6 @@ static struct pci_driver pd6729_pci_driver = { | |||
778 | .id_table = pd6729_pci_ids, | 780 | .id_table = pd6729_pci_ids, |
779 | .probe = pd6729_pci_probe, | 781 | .probe = pd6729_pci_probe, |
780 | .remove = __devexit_p(pd6729_pci_remove), | 782 | .remove = __devexit_p(pd6729_pci_remove), |
781 | #ifdef CONFIG_PM | ||
782 | .suspend = pd6729_socket_suspend, | ||
783 | .resume = pd6729_socket_resume, | ||
784 | #endif | ||
785 | }; | 783 | }; |
786 | 784 | ||
787 | static int pd6729_module_init(void) | 785 | static int pd6729_module_init(void) |
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 76e640bccde8..0a876fabfe44 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c | |||
@@ -325,19 +325,13 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) | |||
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
327 | 327 | ||
328 | static int pxa2xx_drv_pcmcia_suspend(struct device *dev) | ||
329 | { | ||
330 | return pcmcia_socket_dev_suspend(dev); | ||
331 | } | ||
332 | |||
333 | static int pxa2xx_drv_pcmcia_resume(struct device *dev) | 328 | static int pxa2xx_drv_pcmcia_resume(struct device *dev) |
334 | { | 329 | { |
335 | pxa2xx_configure_sockets(dev); | 330 | pxa2xx_configure_sockets(dev); |
336 | return pcmcia_socket_dev_resume(dev); | 331 | return 0; |
337 | } | 332 | } |
338 | 333 | ||
339 | static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { | 334 | static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { |
340 | .suspend = pxa2xx_drv_pcmcia_suspend, | ||
341 | .resume = pxa2xx_drv_pcmcia_resume, | 335 | .resume = pxa2xx_drv_pcmcia_resume, |
342 | }; | 336 | }; |
343 | 337 | ||
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 4663b3fa9f96..2e47991eccf6 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c | |||
@@ -810,6 +810,13 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long | |||
810 | unsigned long size = end - start + 1; | 810 | unsigned long size = end - start + 1; |
811 | int ret = 0; | 811 | int ret = 0; |
812 | 812 | ||
813 | #if defined(CONFIG_X86) | ||
814 | /* on x86, avoid anything < 0x100 for it is often used for | ||
815 | * legacy platform devices */ | ||
816 | if (start < 0x100) | ||
817 | start = 0x100; | ||
818 | #endif | ||
819 | |||
813 | if (end < start) | 820 | if (end < start) |
814 | return -EINVAL; | 821 | return -EINVAL; |
815 | 822 | ||
@@ -867,10 +874,8 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) | |||
867 | if (res == &ioport_resource) | 874 | if (res == &ioport_resource) |
868 | continue; | 875 | continue; |
869 | dev_printk(KERN_INFO, &s->cb_dev->dev, | 876 | dev_printk(KERN_INFO, &s->cb_dev->dev, |
870 | "pcmcia: parent PCI bridge I/O " | 877 | "pcmcia: parent PCI bridge window: %pR\n", |
871 | "window: 0x%llx - 0x%llx\n", | 878 | res); |
872 | (unsigned long long)res->start, | ||
873 | (unsigned long long)res->end); | ||
874 | if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end)) | 879 | if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end)) |
875 | done |= IORESOURCE_IO; | 880 | done |= IORESOURCE_IO; |
876 | 881 | ||
@@ -880,10 +885,8 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) | |||
880 | if (res == &iomem_resource) | 885 | if (res == &iomem_resource) |
881 | continue; | 886 | continue; |
882 | dev_printk(KERN_INFO, &s->cb_dev->dev, | 887 | dev_printk(KERN_INFO, &s->cb_dev->dev, |
883 | "pcmcia: parent PCI bridge Memory " | 888 | "pcmcia: parent PCI bridge window: %pR\n", |
884 | "window: 0x%llx - 0x%llx\n", | 889 | res); |
885 | (unsigned long long)res->start, | ||
886 | (unsigned long long)res->end); | ||
887 | if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end)) | 890 | if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end)) |
888 | done |= IORESOURCE_MEM; | 891 | done |= IORESOURCE_MEM; |
889 | } | 892 | } |
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c index 8db86b90c200..518896241429 100644 --- a/drivers/pcmcia/sa1100_generic.c +++ b/drivers/pcmcia/sa1100_generic.c | |||
@@ -95,17 +95,6 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) | |||
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, | ||
99 | pm_message_t state) | ||
100 | { | ||
101 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
102 | } | ||
103 | |||
104 | static int sa11x0_drv_pcmcia_resume(struct platform_device *dev) | ||
105 | { | ||
106 | return pcmcia_socket_dev_resume(&dev->dev); | ||
107 | } | ||
108 | |||
109 | static struct platform_driver sa11x0_pcmcia_driver = { | 98 | static struct platform_driver sa11x0_pcmcia_driver = { |
110 | .driver = { | 99 | .driver = { |
111 | .name = "sa11x0-pcmcia", | 100 | .name = "sa11x0-pcmcia", |
@@ -113,8 +102,6 @@ static struct platform_driver sa11x0_pcmcia_driver = { | |||
113 | }, | 102 | }, |
114 | .probe = sa11x0_drv_pcmcia_probe, | 103 | .probe = sa11x0_drv_pcmcia_probe, |
115 | .remove = sa11x0_drv_pcmcia_remove, | 104 | .remove = sa11x0_drv_pcmcia_remove, |
116 | .suspend = sa11x0_drv_pcmcia_suspend, | ||
117 | .resume = sa11x0_drv_pcmcia_resume, | ||
118 | }; | 105 | }; |
119 | 106 | ||
120 | /* sa11x0_pcmcia_init() | 107 | /* sa11x0_pcmcia_init() |
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c index db79ca61cf96..799e9793e49e 100644 --- a/drivers/pcmcia/sa1111_generic.c +++ b/drivers/pcmcia/sa1111_generic.c | |||
@@ -213,16 +213,6 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev) | |||
213 | return 0; | 213 | return 0; |
214 | } | 214 | } |
215 | 215 | ||
216 | static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state) | ||
217 | { | ||
218 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
219 | } | ||
220 | |||
221 | static int pcmcia_resume(struct sa1111_dev *dev) | ||
222 | { | ||
223 | return pcmcia_socket_dev_resume(&dev->dev); | ||
224 | } | ||
225 | |||
226 | static struct sa1111_driver pcmcia_driver = { | 216 | static struct sa1111_driver pcmcia_driver = { |
227 | .drv = { | 217 | .drv = { |
228 | .name = "sa1111-pcmcia", | 218 | .name = "sa1111-pcmcia", |
@@ -230,8 +220,6 @@ static struct sa1111_driver pcmcia_driver = { | |||
230 | .devid = SA1111_DEVID_PCMCIA, | 220 | .devid = SA1111_DEVID_PCMCIA, |
231 | .probe = pcmcia_probe, | 221 | .probe = pcmcia_probe, |
232 | .remove = __devexit_p(pcmcia_remove), | 222 | .remove = __devexit_p(pcmcia_remove), |
233 | .suspend = pcmcia_suspend, | ||
234 | .resume = pcmcia_resume, | ||
235 | }; | 223 | }; |
236 | 224 | ||
237 | static int __init sa1111_drv_pcmcia_init(void) | 225 | static int __init sa1111_drv_pcmcia_init(void) |
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index 12c49ee135e1..bac85f3236bb 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c | |||
@@ -348,16 +348,6 @@ static int __init get_tcic_id(void) | |||
348 | return id; | 348 | return id; |
349 | } | 349 | } |
350 | 350 | ||
351 | static int tcic_drv_pcmcia_suspend(struct platform_device *dev, | ||
352 | pm_message_t state) | ||
353 | { | ||
354 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
355 | } | ||
356 | |||
357 | static int tcic_drv_pcmcia_resume(struct platform_device *dev) | ||
358 | { | ||
359 | return pcmcia_socket_dev_resume(&dev->dev); | ||
360 | } | ||
361 | /*====================================================================*/ | 351 | /*====================================================================*/ |
362 | 352 | ||
363 | static struct platform_driver tcic_driver = { | 353 | static struct platform_driver tcic_driver = { |
@@ -365,8 +355,6 @@ static struct platform_driver tcic_driver = { | |||
365 | .name = "tcic-pcmcia", | 355 | .name = "tcic-pcmcia", |
366 | .owner = THIS_MODULE, | 356 | .owner = THIS_MODULE, |
367 | }, | 357 | }, |
368 | .suspend = tcic_drv_pcmcia_suspend, | ||
369 | .resume = tcic_drv_pcmcia_resume, | ||
370 | }; | 358 | }; |
371 | 359 | ||
372 | static struct platform_device tcic_device = { | 360 | static struct platform_device tcic_device = { |
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c index aaccdb9f4ba1..86e4a1a3c642 100644 --- a/drivers/pcmcia/vrc4171_card.c +++ b/drivers/pcmcia/vrc4171_card.c | |||
@@ -705,24 +705,11 @@ static int __devinit vrc4171_card_setup(char *options) | |||
705 | 705 | ||
706 | __setup("vrc4171_card=", vrc4171_card_setup); | 706 | __setup("vrc4171_card=", vrc4171_card_setup); |
707 | 707 | ||
708 | static int vrc4171_card_suspend(struct platform_device *dev, | ||
709 | pm_message_t state) | ||
710 | { | ||
711 | return pcmcia_socket_dev_suspend(&dev->dev); | ||
712 | } | ||
713 | |||
714 | static int vrc4171_card_resume(struct platform_device *dev) | ||
715 | { | ||
716 | return pcmcia_socket_dev_resume(&dev->dev); | ||
717 | } | ||
718 | |||
719 | static struct platform_driver vrc4171_card_driver = { | 708 | static struct platform_driver vrc4171_card_driver = { |
720 | .driver = { | 709 | .driver = { |
721 | .name = vrc4171_card_name, | 710 | .name = vrc4171_card_name, |
722 | .owner = THIS_MODULE, | 711 | .owner = THIS_MODULE, |
723 | }, | 712 | }, |
724 | .suspend = vrc4171_card_suspend, | ||
725 | .resume = vrc4171_card_resume, | ||
726 | }; | 713 | }; |
727 | 714 | ||
728 | static int __devinit vrc4171_card_init(void) | 715 | static int __devinit vrc4171_card_init(void) |
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 418988ab6edf..f19ad02374d9 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c | |||
@@ -1290,12 +1290,9 @@ static int yenta_dev_suspend_noirq(struct device *dev) | |||
1290 | { | 1290 | { |
1291 | struct pci_dev *pdev = to_pci_dev(dev); | 1291 | struct pci_dev *pdev = to_pci_dev(dev); |
1292 | struct yenta_socket *socket = pci_get_drvdata(pdev); | 1292 | struct yenta_socket *socket = pci_get_drvdata(pdev); |
1293 | int ret; | ||
1294 | |||
1295 | ret = pcmcia_socket_dev_suspend(dev); | ||
1296 | 1293 | ||
1297 | if (!socket) | 1294 | if (!socket) |
1298 | return ret; | 1295 | return 0; |
1299 | 1296 | ||
1300 | if (socket->type && socket->type->save_state) | 1297 | if (socket->type && socket->type->save_state) |
1301 | socket->type->save_state(socket); | 1298 | socket->type->save_state(socket); |
@@ -1312,7 +1309,7 @@ static int yenta_dev_suspend_noirq(struct device *dev) | |||
1312 | */ | 1309 | */ |
1313 | /* pci_set_power_state(dev, 3); */ | 1310 | /* pci_set_power_state(dev, 3); */ |
1314 | 1311 | ||
1315 | return ret; | 1312 | return 0; |
1316 | } | 1313 | } |
1317 | 1314 | ||
1318 | static int yenta_dev_resume_noirq(struct device *dev) | 1315 | static int yenta_dev_resume_noirq(struct device *dev) |
@@ -1336,26 +1333,16 @@ static int yenta_dev_resume_noirq(struct device *dev) | |||
1336 | if (socket->type && socket->type->restore_state) | 1333 | if (socket->type && socket->type->restore_state) |
1337 | socket->type->restore_state(socket); | 1334 | socket->type->restore_state(socket); |
1338 | 1335 | ||
1339 | pcmcia_socket_dev_early_resume(dev); | ||
1340 | return 0; | ||
1341 | } | ||
1342 | |||
1343 | static int yenta_dev_resume(struct device *dev) | ||
1344 | { | ||
1345 | pcmcia_socket_dev_late_resume(dev); | ||
1346 | return 0; | 1336 | return 0; |
1347 | } | 1337 | } |
1348 | 1338 | ||
1349 | static const struct dev_pm_ops yenta_pm_ops = { | 1339 | static const struct dev_pm_ops yenta_pm_ops = { |
1350 | .suspend_noirq = yenta_dev_suspend_noirq, | 1340 | .suspend_noirq = yenta_dev_suspend_noirq, |
1351 | .resume_noirq = yenta_dev_resume_noirq, | 1341 | .resume_noirq = yenta_dev_resume_noirq, |
1352 | .resume = yenta_dev_resume, | ||
1353 | .freeze_noirq = yenta_dev_suspend_noirq, | 1342 | .freeze_noirq = yenta_dev_suspend_noirq, |
1354 | .thaw_noirq = yenta_dev_resume_noirq, | 1343 | .thaw_noirq = yenta_dev_resume_noirq, |
1355 | .thaw = yenta_dev_resume, | ||
1356 | .poweroff_noirq = yenta_dev_suspend_noirq, | 1344 | .poweroff_noirq = yenta_dev_suspend_noirq, |
1357 | .restore_noirq = yenta_dev_resume_noirq, | 1345 | .restore_noirq = yenta_dev_resume_noirq, |
1358 | .restore = yenta_dev_resume, | ||
1359 | }; | 1346 | }; |
1360 | 1347 | ||
1361 | #define YENTA_PM_OPS (¥ta_pm_ops) | 1348 | #define YENTA_PM_OPS (¥ta_pm_ops) |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index e631dbeafd79..7bec4588c268 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -385,6 +385,16 @@ config EEEPC_LAPTOP | |||
385 | 385 | ||
386 | If you have an Eee PC laptop, say Y or M here. | 386 | If you have an Eee PC laptop, say Y or M here. |
387 | 387 | ||
388 | config EEEPC_WMI | ||
389 | tristate "Eee PC WMI Hotkey Driver (EXPERIMENTAL)" | ||
390 | depends on ACPI_WMI | ||
391 | depends on INPUT | ||
392 | depends on EXPERIMENTAL | ||
393 | ---help--- | ||
394 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. | ||
395 | |||
396 | To compile this driver as a module, choose M here: the module will | ||
397 | be called eeepc-wmi. | ||
388 | 398 | ||
389 | config ACPI_WMI | 399 | config ACPI_WMI |
390 | tristate "WMI" | 400 | tristate "WMI" |
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 9cd9fa0a27e6..a906490e3530 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | # | 4 | # |
5 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o | 5 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o |
6 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o | 6 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o |
7 | obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o | ||
7 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | 8 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o |
8 | obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o | 9 | obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o |
9 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | 10 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o |
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index db5f7db2ba33..475ab50732ab 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " | |||
139 | 139 | ||
140 | /* Backlight */ | 140 | /* Backlight */ |
141 | static acpi_handle lcd_switch_handle; | 141 | static acpi_handle lcd_switch_handle; |
142 | static const char *lcd_switch_paths[] = { | 142 | static char *lcd_switch_paths[] = { |
143 | "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ | 143 | "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ |
144 | "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ | 144 | "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ |
145 | "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ | 145 | "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ |
@@ -153,7 +153,7 @@ static const char *lcd_switch_paths[] = { | |||
153 | #define METHOD_SWITCH_DISPLAY "SDSP" | 153 | #define METHOD_SWITCH_DISPLAY "SDSP" |
154 | 154 | ||
155 | static acpi_handle display_get_handle; | 155 | static acpi_handle display_get_handle; |
156 | static const char *display_get_paths[] = { | 156 | static char *display_get_paths[] = { |
157 | /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ | 157 | /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ |
158 | "\\_SB.PCI0.P0P1.VGA.GETD", | 158 | "\\_SB.PCI0.P0P1.VGA.GETD", |
159 | /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ | 159 | /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ |
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c new file mode 100644 index 000000000000..2466b7b7fb0e --- /dev/null +++ b/drivers/platform/x86/eeepc-wmi.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Eee PC WMI hotkey driver | ||
3 | * | ||
4 | * Copyright(C) 2010 Intel Corporation. | ||
5 | * | ||
6 | * Portions based on wistron_btns.c: | ||
7 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | ||
8 | * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> | ||
9 | * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/types.h> | ||
30 | #include <linux/input.h> | ||
31 | #include <linux/input/sparse-keymap.h> | ||
32 | #include <acpi/acpi_bus.h> | ||
33 | #include <acpi/acpi_drivers.h> | ||
34 | |||
35 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | ||
36 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); | ||
37 | MODULE_LICENSE("GPL"); | ||
38 | |||
39 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" | ||
40 | |||
41 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); | ||
42 | |||
43 | #define NOTIFY_BRNUP_MIN 0x11 | ||
44 | #define NOTIFY_BRNUP_MAX 0x1f | ||
45 | #define NOTIFY_BRNDOWN_MIN 0x20 | ||
46 | #define NOTIFY_BRNDOWN_MAX 0x2e | ||
47 | |||
48 | static const struct key_entry eeepc_wmi_keymap[] = { | ||
49 | /* Sleep already handled via generic ACPI code */ | ||
50 | { KE_KEY, 0x5d, { KEY_WLAN } }, | ||
51 | { KE_KEY, 0x32, { KEY_MUTE } }, | ||
52 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, | ||
53 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, | ||
54 | { KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } }, | ||
55 | { KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } }, | ||
56 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, | ||
57 | { KE_END, 0}, | ||
58 | }; | ||
59 | |||
60 | static struct input_dev *eeepc_wmi_input_dev; | ||
61 | |||
62 | static void eeepc_wmi_notify(u32 value, void *context) | ||
63 | { | ||
64 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
65 | union acpi_object *obj; | ||
66 | acpi_status status; | ||
67 | int code; | ||
68 | |||
69 | status = wmi_get_event_data(value, &response); | ||
70 | if (status != AE_OK) { | ||
71 | pr_err("EEEPC WMI: bad event status 0x%x\n", status); | ||
72 | return; | ||
73 | } | ||
74 | |||
75 | obj = (union acpi_object *)response.pointer; | ||
76 | |||
77 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | ||
78 | code = obj->integer.value; | ||
79 | |||
80 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
81 | code = NOTIFY_BRNUP_MIN; | ||
82 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
83 | code = NOTIFY_BRNDOWN_MIN; | ||
84 | |||
85 | if (!sparse_keymap_report_event(eeepc_wmi_input_dev, | ||
86 | code, 1, true)) | ||
87 | pr_info("EEEPC WMI: Unknown key %x pressed\n", code); | ||
88 | } | ||
89 | |||
90 | kfree(obj); | ||
91 | } | ||
92 | |||
93 | static int eeepc_wmi_input_setup(void) | ||
94 | { | ||
95 | int err; | ||
96 | |||
97 | eeepc_wmi_input_dev = input_allocate_device(); | ||
98 | if (!eeepc_wmi_input_dev) | ||
99 | return -ENOMEM; | ||
100 | |||
101 | eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys"; | ||
102 | eeepc_wmi_input_dev->phys = "wmi/input0"; | ||
103 | eeepc_wmi_input_dev->id.bustype = BUS_HOST; | ||
104 | |||
105 | err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL); | ||
106 | if (err) | ||
107 | goto err_free_dev; | ||
108 | |||
109 | err = input_register_device(eeepc_wmi_input_dev); | ||
110 | if (err) | ||
111 | goto err_free_keymap; | ||
112 | |||
113 | return 0; | ||
114 | |||
115 | err_free_keymap: | ||
116 | sparse_keymap_free(eeepc_wmi_input_dev); | ||
117 | err_free_dev: | ||
118 | input_free_device(eeepc_wmi_input_dev); | ||
119 | return err; | ||
120 | } | ||
121 | |||
122 | static int __init eeepc_wmi_init(void) | ||
123 | { | ||
124 | int err; | ||
125 | acpi_status status; | ||
126 | |||
127 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) { | ||
128 | pr_warning("EEEPC WMI: No known WMI GUID found\n"); | ||
129 | return -ENODEV; | ||
130 | } | ||
131 | |||
132 | err = eeepc_wmi_input_setup(); | ||
133 | if (err) | ||
134 | return err; | ||
135 | |||
136 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | ||
137 | eeepc_wmi_notify, NULL); | ||
138 | if (ACPI_FAILURE(status)) { | ||
139 | sparse_keymap_free(eeepc_wmi_input_dev); | ||
140 | input_unregister_device(eeepc_wmi_input_dev); | ||
141 | pr_err("EEEPC WMI: Unable to register notify handler - %d\n", | ||
142 | status); | ||
143 | return -ENODEV; | ||
144 | } | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static void __exit eeepc_wmi_exit(void) | ||
150 | { | ||
151 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
152 | sparse_keymap_free(eeepc_wmi_input_dev); | ||
153 | input_unregister_device(eeepc_wmi_input_dev); | ||
154 | } | ||
155 | |||
156 | module_init(eeepc_wmi_init); | ||
157 | module_exit(eeepc_wmi_exit); | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index c7bbe30010f7..5af16c2bb540 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1038,6 +1038,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, | |||
1038 | goto overflow_err; | 1038 | goto overflow_err; |
1039 | 1039 | ||
1040 | regulator->dev = dev; | 1040 | regulator->dev = dev; |
1041 | sysfs_attr_init(®ulator->dev_attr.attr); | ||
1041 | regulator->dev_attr.attr.name = kstrdup(buf, GFP_KERNEL); | 1042 | regulator->dev_attr.attr.name = kstrdup(buf, GFP_KERNEL); |
1042 | if (regulator->dev_attr.attr.name == NULL) | 1043 | if (regulator->dev_attr.attr.name == NULL) |
1043 | goto attr_name_err; | 1044 | goto attr_name_err; |
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index f5532ed79272..b20b3e1d821a 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c | |||
@@ -45,7 +45,7 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val); | |||
45 | LP3971_BUCK2 -> 4 | 45 | LP3971_BUCK2 -> 4 |
46 | LP3971_BUCK3 -> 6 | 46 | LP3971_BUCK3 -> 6 |
47 | */ | 47 | */ |
48 | #define BUCK_VOL_CHANGE_SHIFT(x) (((1 << x) & ~0x01) << 1) | 48 | #define BUCK_VOL_CHANGE_SHIFT(x) (((!!x) << 2) | (x & ~0x01)) |
49 | #define BUCK_VOL_CHANGE_FLAG_GO 0x01 | 49 | #define BUCK_VOL_CHANGE_FLAG_GO 0x01 |
50 | #define BUCK_VOL_CHANGE_FLAG_TARGET 0x02 | 50 | #define BUCK_VOL_CHANGE_FLAG_TARGET 0x02 |
51 | #define BUCK_VOL_CHANGE_FLAG_MASK 0x03 | 51 | #define BUCK_VOL_CHANGE_FLAG_MASK 0x03 |
@@ -187,7 +187,8 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev, | |||
187 | return -EINVAL; | 187 | return -EINVAL; |
188 | 188 | ||
189 | return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), | 189 | return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), |
190 | LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo), val); | 190 | LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo), |
191 | val << LDO_VOL_CONTR_SHIFT(ldo)); | ||
191 | } | 192 | } |
192 | 193 | ||
193 | static struct regulator_ops lp3971_ldo_ops = { | 194 | static struct regulator_ops lp3971_ldo_ops = { |
@@ -439,6 +440,10 @@ static int __devinit setup_regulators(struct lp3971 *lp3971, | |||
439 | lp3971->num_regulators = pdata->num_regulators; | 440 | lp3971->num_regulators = pdata->num_regulators; |
440 | lp3971->rdev = kcalloc(pdata->num_regulators, | 441 | lp3971->rdev = kcalloc(pdata->num_regulators, |
441 | sizeof(struct regulator_dev *), GFP_KERNEL); | 442 | sizeof(struct regulator_dev *), GFP_KERNEL); |
443 | if (!lp3971->rdev) { | ||
444 | err = -ENOMEM; | ||
445 | goto err_nomem; | ||
446 | } | ||
442 | 447 | ||
443 | /* Instantiate the regulators */ | 448 | /* Instantiate the regulators */ |
444 | for (i = 0; i < pdata->num_regulators; i++) { | 449 | for (i = 0; i < pdata->num_regulators; i++) { |
@@ -461,6 +466,7 @@ error: | |||
461 | regulator_unregister(lp3971->rdev[i]); | 466 | regulator_unregister(lp3971->rdev[i]); |
462 | kfree(lp3971->rdev); | 467 | kfree(lp3971->rdev); |
463 | lp3971->rdev = NULL; | 468 | lp3971->rdev = NULL; |
469 | err_nomem: | ||
464 | return err; | 470 | return err; |
465 | } | 471 | } |
466 | 472 | ||
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index a49fc952c9a9..c0b09e15edb6 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c | |||
@@ -243,8 +243,8 @@ static int __devexit max1586_pmic_remove(struct i2c_client *client) | |||
243 | for (i = 0; i <= MAX1586_V6; i++) | 243 | for (i = 0; i <= MAX1586_V6; i++) |
244 | if (rdev[i]) | 244 | if (rdev[i]) |
245 | regulator_unregister(rdev[i]); | 245 | regulator_unregister(rdev[i]); |
246 | kfree(rdev); | ||
247 | i2c_set_clientdata(client, NULL); | 246 | i2c_set_clientdata(client, NULL); |
247 | kfree(rdev); | ||
248 | 248 | ||
249 | return 0; | 249 | return 0; |
250 | } | 250 | } |
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 3ebdf698c648..833aaedc7e64 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
@@ -356,6 +356,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, | |||
356 | dev_info(info->dev, "Max8649 regulator device is detected.\n"); | 356 | dev_info(info->dev, "Max8649 regulator device is detected.\n"); |
357 | return 0; | 357 | return 0; |
358 | out: | 358 | out: |
359 | i2c_set_clientdata(client, NULL); | ||
359 | kfree(info); | 360 | kfree(info); |
360 | return ret; | 361 | return ret; |
361 | } | 362 | } |
@@ -367,9 +368,9 @@ static int __devexit max8649_regulator_remove(struct i2c_client *client) | |||
367 | if (info) { | 368 | if (info) { |
368 | if (info->regulator) | 369 | if (info->regulator) |
369 | regulator_unregister(info->regulator); | 370 | regulator_unregister(info->regulator); |
371 | i2c_set_clientdata(client, NULL); | ||
370 | kfree(info); | 372 | kfree(info); |
371 | } | 373 | } |
372 | i2c_set_clientdata(client, NULL); | ||
373 | 374 | ||
374 | return 0; | 375 | return 0; |
375 | } | 376 | } |
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c index f12f1bb62138..47f90b2fc290 100644 --- a/drivers/regulator/max8660.c +++ b/drivers/regulator/max8660.c | |||
@@ -470,8 +470,8 @@ static int __devexit max8660_remove(struct i2c_client *client) | |||
470 | for (i = 0; i < MAX8660_V_END; i++) | 470 | for (i = 0; i < MAX8660_V_END; i++) |
471 | if (rdev[i]) | 471 | if (rdev[i]) |
472 | regulator_unregister(rdev[i]); | 472 | regulator_unregister(rdev[i]); |
473 | kfree(rdev); | ||
474 | i2c_set_clientdata(client, NULL); | 473 | i2c_set_clientdata(client, NULL); |
474 | kfree(rdev); | ||
475 | 475 | ||
476 | return 0; | 476 | return 0; |
477 | } | 477 | } |
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c index 67873f08ed40..b6218f11c957 100644 --- a/drivers/regulator/max8925-regulator.c +++ b/drivers/regulator/max8925-regulator.c | |||
@@ -230,7 +230,7 @@ static struct max8925_regulator_info max8925_regulator_info[] = { | |||
230 | MAX8925_LDO(20, 750, 3900, 50), | 230 | MAX8925_LDO(20, 750, 3900, 50), |
231 | }; | 231 | }; |
232 | 232 | ||
233 | static inline struct max8925_regulator_info *find_regulator_info(int id) | 233 | static struct max8925_regulator_info * __devinit find_regulator_info(int id) |
234 | { | 234 | { |
235 | struct max8925_regulator_info *ri; | 235 | struct max8925_regulator_info *ri; |
236 | int i; | 236 | int i; |
@@ -247,7 +247,7 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev) | |||
247 | { | 247 | { |
248 | struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); | 248 | struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); |
249 | struct max8925_platform_data *pdata = chip->dev->platform_data; | 249 | struct max8925_platform_data *pdata = chip->dev->platform_data; |
250 | struct max8925_regulator_info *ri = NULL; | 250 | struct max8925_regulator_info *ri; |
251 | struct regulator_dev *rdev; | 251 | struct regulator_dev *rdev; |
252 | 252 | ||
253 | ri = find_regulator_info(pdev->id); | 253 | ri = find_regulator_info(pdev->id); |
@@ -274,7 +274,9 @@ static int __devexit max8925_regulator_remove(struct platform_device *pdev) | |||
274 | { | 274 | { |
275 | struct regulator_dev *rdev = platform_get_drvdata(pdev); | 275 | struct regulator_dev *rdev = platform_get_drvdata(pdev); |
276 | 276 | ||
277 | platform_set_drvdata(pdev, NULL); | ||
277 | regulator_unregister(rdev); | 278 | regulator_unregister(rdev); |
279 | |||
278 | return 0; | 280 | return 0; |
279 | } | 281 | } |
280 | 282 | ||
diff --git a/drivers/rtc/rtc-mc13783.c b/drivers/rtc/rtc-mc13783.c index d60c81b7b693..1379c7faa448 100644 --- a/drivers/rtc/rtc-mc13783.c +++ b/drivers/rtc/rtc-mc13783.c | |||
@@ -319,35 +319,38 @@ static int __devinit mc13783_rtc_probe(struct platform_device *pdev) | |||
319 | { | 319 | { |
320 | int ret; | 320 | int ret; |
321 | struct mc13783_rtc *priv; | 321 | struct mc13783_rtc *priv; |
322 | struct mc13783 *mc13783; | ||
322 | int rtcrst_pending; | 323 | int rtcrst_pending; |
323 | 324 | ||
324 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 325 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
325 | if (!priv) | 326 | if (!priv) |
326 | return -ENOMEM; | 327 | return -ENOMEM; |
327 | 328 | ||
328 | priv->mc13783 = dev_get_drvdata(pdev->dev.parent); | 329 | mc13783 = dev_get_drvdata(pdev->dev.parent); |
330 | priv->mc13783 = mc13783; | ||
331 | |||
329 | platform_set_drvdata(pdev, priv); | 332 | platform_set_drvdata(pdev, priv); |
330 | 333 | ||
331 | mc13783_lock(priv->mc13783); | 334 | mc13783_lock(mc13783); |
332 | 335 | ||
333 | ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_RTCRST, | 336 | ret = mc13783_irq_request(mc13783, MC13783_IRQ_RTCRST, |
334 | mc13783_rtc_reset_handler, DRIVER_NAME, priv); | 337 | mc13783_rtc_reset_handler, DRIVER_NAME, priv); |
335 | if (ret) | 338 | if (ret) |
336 | goto err_reset_irq_request; | 339 | goto err_reset_irq_request; |
337 | 340 | ||
338 | ret = mc13783_irq_status(priv->mc13783, MC13783_IRQ_RTCRST, | 341 | ret = mc13783_irq_status(mc13783, MC13783_IRQ_RTCRST, |
339 | NULL, &rtcrst_pending); | 342 | NULL, &rtcrst_pending); |
340 | if (ret) | 343 | if (ret) |
341 | goto err_reset_irq_status; | 344 | goto err_reset_irq_status; |
342 | 345 | ||
343 | priv->valid = !rtcrst_pending; | 346 | priv->valid = !rtcrst_pending; |
344 | 347 | ||
345 | ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_1HZ, | 348 | ret = mc13783_irq_request_nounmask(mc13783, MC13783_IRQ_1HZ, |
346 | mc13783_rtc_update_handler, DRIVER_NAME, priv); | 349 | mc13783_rtc_update_handler, DRIVER_NAME, priv); |
347 | if (ret) | 350 | if (ret) |
348 | goto err_update_irq_request; | 351 | goto err_update_irq_request; |
349 | 352 | ||
350 | ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_TODA, | 353 | ret = mc13783_irq_request_nounmask(mc13783, MC13783_IRQ_TODA, |
351 | mc13783_rtc_alarm_handler, DRIVER_NAME, priv); | 354 | mc13783_rtc_alarm_handler, DRIVER_NAME, priv); |
352 | if (ret) | 355 | if (ret) |
353 | goto err_alarm_irq_request; | 356 | goto err_alarm_irq_request; |
@@ -357,22 +360,22 @@ static int __devinit mc13783_rtc_probe(struct platform_device *pdev) | |||
357 | if (IS_ERR(priv->rtc)) { | 360 | if (IS_ERR(priv->rtc)) { |
358 | ret = PTR_ERR(priv->rtc); | 361 | ret = PTR_ERR(priv->rtc); |
359 | 362 | ||
360 | mc13783_irq_free(priv->mc13783, MC13783_IRQ_TODA, priv); | 363 | mc13783_irq_free(mc13783, MC13783_IRQ_TODA, priv); |
361 | err_alarm_irq_request: | 364 | err_alarm_irq_request: |
362 | 365 | ||
363 | mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv); | 366 | mc13783_irq_free(mc13783, MC13783_IRQ_1HZ, priv); |
364 | err_update_irq_request: | 367 | err_update_irq_request: |
365 | 368 | ||
366 | err_reset_irq_status: | 369 | err_reset_irq_status: |
367 | 370 | ||
368 | mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv); | 371 | mc13783_irq_free(mc13783, MC13783_IRQ_RTCRST, priv); |
369 | err_reset_irq_request: | 372 | err_reset_irq_request: |
370 | 373 | ||
371 | platform_set_drvdata(pdev, NULL); | 374 | platform_set_drvdata(pdev, NULL); |
372 | kfree(priv); | 375 | kfree(priv); |
373 | } | 376 | } |
374 | 377 | ||
375 | mc13783_unlock(priv->mc13783); | 378 | mc13783_unlock(mc13783); |
376 | 379 | ||
377 | return ret; | 380 | return ret; |
378 | } | 381 | } |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 51224f76b980..b3736b8aad39 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -2287,7 +2287,8 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) | |||
2287 | 2287 | ||
2288 | if (cqr->cpmode == 1) { | 2288 | if (cqr->cpmode == 1) { |
2289 | cplength = 0; | 2289 | cplength = 0; |
2290 | datasize = sizeof(struct tcw) + sizeof(struct tsb); | 2290 | /* TCW needs to be 64 byte aligned, so leave enough room */ |
2291 | datasize = 64 + sizeof(struct tcw) + sizeof(struct tsb); | ||
2291 | } else { | 2292 | } else { |
2292 | cplength = 2; | 2293 | cplength = 2; |
2293 | datasize = 0; | 2294 | datasize = 0; |
@@ -2316,8 +2317,8 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) | |||
2316 | if (cqr->cpmode == 1) { | 2317 | if (cqr->cpmode == 1) { |
2317 | /* make a shallow copy of the original tcw but set new tsb */ | 2318 | /* make a shallow copy of the original tcw but set new tsb */ |
2318 | erp->cpmode = 1; | 2319 | erp->cpmode = 1; |
2319 | erp->cpaddr = erp->data; | 2320 | erp->cpaddr = PTR_ALIGN(erp->data, 64); |
2320 | tcw = erp->data; | 2321 | tcw = erp->cpaddr; |
2321 | tsb = (struct tsb *) &tcw[1]; | 2322 | tsb = (struct tsb *) &tcw[1]; |
2322 | *tcw = *((struct tcw *)cqr->cpaddr); | 2323 | *tcw = *((struct tcw *)cqr->cpaddr); |
2323 | tcw->tsb = (long)tsb; | 2324 | tcw->tsb = (long)tsb; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 01f4e7a34aa8..0cb233116855 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -3155,11 +3155,11 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, | |||
3155 | 3155 | ||
3156 | tsb = NULL; | 3156 | tsb = NULL; |
3157 | sense = NULL; | 3157 | sense = NULL; |
3158 | if (irb->scsw.tm.tcw) | 3158 | if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01)) |
3159 | tsb = tcw_get_tsb( | 3159 | tsb = tcw_get_tsb( |
3160 | (struct tcw *)(unsigned long)irb->scsw.tm.tcw); | 3160 | (struct tcw *)(unsigned long)irb->scsw.tm.tcw); |
3161 | 3161 | ||
3162 | if (tsb && (irb->scsw.tm.fcxs == 0x01)) { | 3162 | if (tsb) { |
3163 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3163 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3164 | " tsb->length %d\n", tsb->length); | 3164 | " tsb->length %d\n", tsb->length); |
3165 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3165 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 740fe405c395..f449c696e503 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c | |||
@@ -84,6 +84,7 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, | |||
84 | rc = copy_from_user(buf, buffer, sizeof(buf)); | 84 | rc = copy_from_user(buf, buffer, sizeof(buf)); |
85 | if (rc != 0) | 85 | if (rc != 0) |
86 | return -EFAULT; | 86 | return -EFAULT; |
87 | buf[len - 1] = '\0'; | ||
87 | if (strict_strtoul(buf, 0, &val) != 0) | 88 | if (strict_strtoul(buf, 0, &val) != 0) |
88 | return -EINVAL; | 89 | return -EINVAL; |
89 | if (val != 0 && val != 1) | 90 | if (val != 0 && val != 1) |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index fc7ae05ce48a..4b60ede07f0e 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -308,6 +308,13 @@ struct assign_storage_sccb { | |||
308 | u16 rn; | 308 | u16 rn; |
309 | } __packed; | 309 | } __packed; |
310 | 310 | ||
311 | int arch_get_memory_phys_device(unsigned long start_pfn) | ||
312 | { | ||
313 | if (!rzm) | ||
314 | return 0; | ||
315 | return PFN_PHYS(start_pfn) >> ilog2(rzm); | ||
316 | } | ||
317 | |||
311 | static unsigned long long rn2addr(u16 rn) | 318 | static unsigned long long rn2addr(u16 rn) |
312 | { | 319 | { |
313 | return (unsigned long long) (rn - 1) * rzm; | 320 | return (unsigned long long) (rn - 1) * rzm; |
@@ -704,13 +711,6 @@ int sclp_chp_deconfigure(struct chp_id chpid) | |||
704 | return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); | 711 | return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); |
705 | } | 712 | } |
706 | 713 | ||
707 | int arch_get_memory_phys_device(unsigned long start_pfn) | ||
708 | { | ||
709 | if (!rzm) | ||
710 | return 0; | ||
711 | return PFN_PHYS(start_pfn) / rzm; | ||
712 | } | ||
713 | |||
714 | struct chp_info_sccb { | 714 | struct chp_info_sccb { |
715 | struct sccb_header header; | 715 | struct sccb_header header; |
716 | u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; | 716 | u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 3438658b66b7..3166d85914f2 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -141,33 +141,6 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) | |||
141 | return memcpy_hsa(dest, src, count, TO_KERNEL); | 141 | return memcpy_hsa(dest, src, count, TO_KERNEL); |
142 | } | 142 | } |
143 | 143 | ||
144 | static int memcpy_real(void *dest, unsigned long src, size_t count) | ||
145 | { | ||
146 | unsigned long flags; | ||
147 | int rc = -EFAULT; | ||
148 | register unsigned long _dest asm("2") = (unsigned long) dest; | ||
149 | register unsigned long _len1 asm("3") = (unsigned long) count; | ||
150 | register unsigned long _src asm("4") = src; | ||
151 | register unsigned long _len2 asm("5") = (unsigned long) count; | ||
152 | |||
153 | if (count == 0) | ||
154 | return 0; | ||
155 | flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */ | ||
156 | asm volatile ( | ||
157 | "0: mvcle %1,%2,0x0\n" | ||
158 | "1: jo 0b\n" | ||
159 | " lhi %0,0x0\n" | ||
160 | "2:\n" | ||
161 | EX_TABLE(1b,2b) | ||
162 | : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), | ||
163 | "+d" (_len2), "=m" (*((long*)dest)) | ||
164 | : "m" (*((long*)src)) | ||
165 | : "cc", "memory"); | ||
166 | __raw_local_irq_ssm(flags); | ||
167 | |||
168 | return rc; | ||
169 | } | ||
170 | |||
171 | static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) | 144 | static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) |
172 | { | 145 | { |
173 | static char buf[4096]; | 146 | static char buf[4096]; |
@@ -175,7 +148,7 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) | |||
175 | 148 | ||
176 | while (offs < count) { | 149 | while (offs < count) { |
177 | size = min(sizeof(buf), count - offs); | 150 | size = min(sizeof(buf), count - offs); |
178 | if (memcpy_real(buf, src + offs, size)) | 151 | if (memcpy_real(buf, (void *) src + offs, size)) |
179 | return -EFAULT; | 152 | return -EFAULT; |
180 | if (copy_to_user(dest + offs, buf, size)) | 153 | if (copy_to_user(dest + offs, buf, size)) |
181 | return -EFAULT; | 154 | return -EFAULT; |
@@ -663,7 +636,7 @@ static int __init zcore_reipl_init(void) | |||
663 | if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) | 636 | if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) |
664 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); | 637 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); |
665 | else | 638 | else |
666 | rc = memcpy_real(ipl_block, ipib_info.ipib, PAGE_SIZE); | 639 | rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); |
667 | if (rc) { | 640 | if (rc) { |
668 | free_page((unsigned long) ipl_block); | 641 | free_page((unsigned long) ipl_block); |
669 | return rc; | 642 | return rc; |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index e91db4b38012..175d202ab37e 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -745,6 +745,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
745 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), | 745 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), |
746 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), | 746 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), |
747 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), | 747 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), |
748 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), | ||
748 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), | 749 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), |
749 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), | 750 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), |
750 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), | 751 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), |
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index f7b9aff88f4a..309de6be8204 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -779,10 +779,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
779 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) | 779 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) |
780 | ret = sci_br_interrupt(irq, ptr); | 780 | ret = sci_br_interrupt(irq, ptr); |
781 | 781 | ||
782 | WARN_ONCE(ret == IRQ_NONE, | ||
783 | "%s: %d IRQ %d, status %x, control %x\n", __func__, | ||
784 | irq, port->line, ssr_status, scr_status); | ||
785 | |||
786 | return ret; | 782 | return ret; |
787 | } | 783 | } |
788 | 784 | ||
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index fad67d33b0bd..f70c49f915fa 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h | |||
@@ -31,7 +31,9 @@ | |||
31 | # define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0 | 31 | # define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0 |
32 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 32 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
33 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | 33 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ |
34 | defined(CONFIG_ARCH_SHMOBILE) | 34 | defined(CONFIG_ARCH_SH7367) || \ |
35 | defined(CONFIG_ARCH_SH7377) || \ | ||
36 | defined(CONFIG_ARCH_SH7372) | ||
35 | # define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ | 37 | # define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ |
36 | # define PORT_PTCR 0xA405011EUL | 38 | # define PORT_PTCR 0xA405011EUL |
37 | # define PORT_PVCR 0xA4050122UL | 39 | # define PORT_PVCR 0xA4050122UL |
@@ -94,7 +96,9 @@ | |||
94 | # define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | 96 | # define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ |
95 | #elif defined(CONFIG_CPU_SUBTYPE_SH7724) | 97 | #elif defined(CONFIG_CPU_SUBTYPE_SH7724) |
96 | # define SCIF_ORER 0x0001 /* overrun error bit */ | 98 | # define SCIF_ORER 0x0001 /* overrun error bit */ |
97 | # define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | 99 | # define SCSCR_INIT(port) ((port)->type == PORT_SCIFA ? \ |
100 | 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \ | ||
101 | 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ ) | ||
98 | #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) | 102 | #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) |
99 | # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ | 103 | # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ |
100 | # define SCIF_ORER 0x0001 /* overrun error bit */ | 104 | # define SCIF_ORER 0x0001 /* overrun error bit */ |
@@ -197,6 +201,8 @@ | |||
197 | defined(CONFIG_CPU_SUBTYPE_SH7786) || \ | 201 | defined(CONFIG_CPU_SUBTYPE_SH7786) || \ |
198 | defined(CONFIG_CPU_SUBTYPE_SHX3) | 202 | defined(CONFIG_CPU_SUBTYPE_SHX3) |
199 | #define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */ | 203 | #define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */ |
204 | #elif defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
205 | #define SCI_CTRL_FLAGS_REIE ((port)->type == PORT_SCIFA ? 0 : 8) | ||
200 | #else | 206 | #else |
201 | #define SCI_CTRL_FLAGS_REIE 0 | 207 | #define SCI_CTRL_FLAGS_REIE 0 |
202 | #endif | 208 | #endif |
@@ -230,7 +236,9 @@ | |||
230 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 236 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
231 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 237 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
232 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | 238 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ |
233 | defined(CONFIG_ARCH_SHMOBILE) | 239 | defined(CONFIG_ARCH_SH7367) || \ |
240 | defined(CONFIG_ARCH_SH7377) || \ | ||
241 | defined(CONFIG_ARCH_SH7372) | ||
234 | # define SCIF_ORER 0x0200 | 242 | # define SCIF_ORER 0x0200 |
235 | # define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER) | 243 | # define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER) |
236 | # define SCIF_RFDC_MASK 0x007f | 244 | # define SCIF_RFDC_MASK 0x007f |
@@ -264,7 +272,9 @@ | |||
264 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 272 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
265 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 273 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
266 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | 274 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ |
267 | defined(CONFIG_ARCH_SHMOBILE) | 275 | defined(CONFIG_ARCH_SH7367) || \ |
276 | defined(CONFIG_ARCH_SH7377) || \ | ||
277 | defined(CONFIG_ARCH_SH7372) | ||
268 | # define SCxSR_RDxF_CLEAR(port) (sci_in(port, SCxSR) & 0xfffc) | 278 | # define SCxSR_RDxF_CLEAR(port) (sci_in(port, SCxSR) & 0xfffc) |
269 | # define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73) | 279 | # define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73) |
270 | # define SCxSR_TDxE_CLEAR(port) (sci_in(port, SCxSR) & 0xffdf) | 280 | # define SCxSR_TDxE_CLEAR(port) (sci_in(port, SCxSR) & 0xffdf) |
@@ -359,7 +369,10 @@ | |||
359 | SCI_OUT(sci_size, sci_offset, value); \ | 369 | SCI_OUT(sci_size, sci_offset, value); \ |
360 | } | 370 | } |
361 | 371 | ||
362 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_ARCH_SHMOBILE) | 372 | #if defined(CONFIG_CPU_SH3) || \ |
373 | defined(CONFIG_ARCH_SH7367) || \ | ||
374 | defined(CONFIG_ARCH_SH7377) || \ | ||
375 | defined(CONFIG_ARCH_SH7372) | ||
363 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | 376 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) |
364 | #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ | 377 | #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ |
365 | sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ | 378 | sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ |
@@ -370,7 +383,9 @@ | |||
370 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 383 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
371 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 384 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
372 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | 385 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ |
373 | defined(CONFIG_ARCH_SHMOBILE) | 386 | defined(CONFIG_ARCH_SH7367) || \ |
387 | defined(CONFIG_ARCH_SH7377) || \ | ||
388 | defined(CONFIG_ARCH_SH7372) | ||
374 | #define SCIF_FNS(name, scif_offset, scif_size) \ | 389 | #define SCIF_FNS(name, scif_offset, scif_size) \ |
375 | CPU_SCIF_FNS(name, scif_offset, scif_size) | 390 | CPU_SCIF_FNS(name, scif_offset, scif_size) |
376 | #else | 391 | #else |
@@ -406,7 +421,9 @@ | |||
406 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 421 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
407 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 422 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
408 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | 423 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ |
409 | defined(CONFIG_ARCH_SHMOBILE) | 424 | defined(CONFIG_ARCH_SH7367) || \ |
425 | defined(CONFIG_ARCH_SH7377) || \ | ||
426 | defined(CONFIG_ARCH_SH7372) | ||
410 | 427 | ||
411 | SCIF_FNS(SCSMR, 0x00, 16) | 428 | SCIF_FNS(SCSMR, 0x00, 16) |
412 | SCIF_FNS(SCBRR, 0x04, 8) | 429 | SCIF_FNS(SCBRR, 0x04, 8) |
@@ -589,7 +606,9 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
589 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 606 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
590 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 607 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
591 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | 608 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ |
592 | defined(CONFIG_ARCH_SHMOBILE) | 609 | defined(CONFIG_ARCH_SH7367) || \ |
610 | defined(CONFIG_ARCH_SH7377) || \ | ||
611 | defined(CONFIG_ARCH_SH7372) | ||
593 | #define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1) | 612 | #define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1) |
594 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ | 613 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ |
595 | defined(CONFIG_CPU_SUBTYPE_SH7724) | 614 | defined(CONFIG_CPU_SUBTYPE_SH7724) |
diff --git a/drivers/staging/et131x/et1310_mac.c b/drivers/staging/et131x/et1310_mac.c index a292b1edc414..737a9f5401d1 100644 --- a/drivers/staging/et131x/et1310_mac.c +++ b/drivers/staging/et131x/et1310_mac.c | |||
@@ -226,7 +226,7 @@ void ConfigMACRegs2(struct et131x_adapter *etdev) | |||
226 | } | 226 | } |
227 | 227 | ||
228 | /* Enable TXMAC */ | 228 | /* Enable TXMAC */ |
229 | ctl |= 0x05; /* TX mac enable, FC disable */ | 229 | ctl |= 0x09; /* TX mac enable, FC disable */ |
230 | writel(ctl, &etdev->regs->txmac.ctl); | 230 | writel(ctl, &etdev->regs->txmac.ctl); |
231 | 231 | ||
232 | /* Ready to start the RXDMA/TXDMA engine */ | 232 | /* Ready to start the RXDMA/TXDMA engine */ |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ad37da2b6cb5..a6a88dfd5029 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -125,7 +125,7 @@ static void handle_tx(struct vhost_net *net) | |||
125 | mutex_lock(&vq->mutex); | 125 | mutex_lock(&vq->mutex); |
126 | vhost_disable_notify(vq); | 126 | vhost_disable_notify(vq); |
127 | 127 | ||
128 | if (wmem < sock->sk->sk_sndbuf * 2) | 128 | if (wmem < sock->sk->sk_sndbuf / 2) |
129 | tx_poll_stop(net); | 129 | tx_poll_stop(net); |
130 | hdr_size = vq->hdr_size; | 130 | hdr_size = vq->hdr_size; |
131 | 131 | ||
@@ -508,12 +508,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
508 | /* Verify that ring has been setup correctly. */ | 508 | /* Verify that ring has been setup correctly. */ |
509 | if (!vhost_vq_access_ok(vq)) { | 509 | if (!vhost_vq_access_ok(vq)) { |
510 | r = -EFAULT; | 510 | r = -EFAULT; |
511 | goto err; | 511 | goto err_vq; |
512 | } | 512 | } |
513 | sock = get_socket(fd); | 513 | sock = get_socket(fd); |
514 | if (IS_ERR(sock)) { | 514 | if (IS_ERR(sock)) { |
515 | r = PTR_ERR(sock); | 515 | r = PTR_ERR(sock); |
516 | goto err; | 516 | goto err_vq; |
517 | } | 517 | } |
518 | 518 | ||
519 | /* start polling new socket */ | 519 | /* start polling new socket */ |
@@ -524,12 +524,14 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
524 | vhost_net_disable_vq(n, vq); | 524 | vhost_net_disable_vq(n, vq); |
525 | rcu_assign_pointer(vq->private_data, sock); | 525 | rcu_assign_pointer(vq->private_data, sock); |
526 | vhost_net_enable_vq(n, vq); | 526 | vhost_net_enable_vq(n, vq); |
527 | mutex_unlock(&vq->mutex); | ||
528 | done: | 527 | done: |
529 | if (oldsock) { | 528 | if (oldsock) { |
530 | vhost_net_flush_vq(n, index); | 529 | vhost_net_flush_vq(n, index); |
531 | fput(oldsock->file); | 530 | fput(oldsock->file); |
532 | } | 531 | } |
532 | |||
533 | err_vq: | ||
534 | mutex_unlock(&vq->mutex); | ||
533 | err: | 535 | err: |
534 | mutex_unlock(&n->dev.mutex); | 536 | mutex_unlock(&n->dev.mutex); |
535 | return r; | 537 | return r; |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 7cd55e078794..7bd7a1e4409d 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -476,8 +476,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
476 | if (r < 0) | 476 | if (r < 0) |
477 | break; | 477 | break; |
478 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); | 478 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); |
479 | if (IS_ERR(eventfp)) | 479 | if (IS_ERR(eventfp)) { |
480 | return PTR_ERR(eventfp); | 480 | r = PTR_ERR(eventfp); |
481 | break; | ||
482 | } | ||
481 | if (eventfp != vq->kick) { | 483 | if (eventfp != vq->kick) { |
482 | pollstop = filep = vq->kick; | 484 | pollstop = filep = vq->kick; |
483 | pollstart = vq->kick = eventfp; | 485 | pollstart = vq->kick = eventfp; |
@@ -489,8 +491,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
489 | if (r < 0) | 491 | if (r < 0) |
490 | break; | 492 | break; |
491 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); | 493 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); |
492 | if (IS_ERR(eventfp)) | 494 | if (IS_ERR(eventfp)) { |
493 | return PTR_ERR(eventfp); | 495 | r = PTR_ERR(eventfp); |
496 | break; | ||
497 | } | ||
494 | if (eventfp != vq->call) { | 498 | if (eventfp != vq->call) { |
495 | filep = vq->call; | 499 | filep = vq->call; |
496 | ctx = vq->call_ctx; | 500 | ctx = vq->call_ctx; |
@@ -505,8 +509,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
505 | if (r < 0) | 509 | if (r < 0) |
506 | break; | 510 | break; |
507 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); | 511 | eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); |
508 | if (IS_ERR(eventfp)) | 512 | if (IS_ERR(eventfp)) { |
509 | return PTR_ERR(eventfp); | 513 | r = PTR_ERR(eventfp); |
514 | break; | ||
515 | } | ||
510 | if (eventfp != vq->error) { | 516 | if (eventfp != vq->error) { |
511 | filep = vq->error; | 517 | filep = vq->error; |
512 | vq->error = eventfp; | 518 | vq->error = eventfp; |
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h index cc781c00f75d..e4c4d89b7860 100644 --- a/drivers/video/geode/lxfb.h +++ b/drivers/video/geode/lxfb.h | |||
@@ -365,6 +365,8 @@ enum fp_registers { | |||
365 | FP_CRC, /* 0x458 */ | 365 | FP_CRC, /* 0x458 */ |
366 | }; | 366 | }; |
367 | 367 | ||
368 | #define FP_PT2_HSP (1 << 22) | ||
369 | #define FP_PT2_VSP (1 << 23) | ||
368 | #define FP_PT2_SCRC (1 << 27) /* shfclk free */ | 370 | #define FP_PT2_SCRC (1 << 27) /* shfclk free */ |
369 | 371 | ||
370 | #define FP_PM_P (1 << 24) /* panel power ctl */ | 372 | #define FP_PM_P (1 << 24) /* panel power ctl */ |
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index 0e5d8c7c3eba..bc35a95e59d4 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c | |||
@@ -274,7 +274,15 @@ static void lx_graphics_enable(struct fb_info *info) | |||
274 | u32 msrlo, msrhi; | 274 | u32 msrlo, msrhi; |
275 | 275 | ||
276 | write_fp(par, FP_PT1, 0); | 276 | write_fp(par, FP_PT1, 0); |
277 | write_fp(par, FP_PT2, FP_PT2_SCRC); | 277 | temp = FP_PT2_SCRC; |
278 | |||
279 | if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) | ||
280 | temp |= FP_PT2_HSP; | ||
281 | |||
282 | if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) | ||
283 | temp |= FP_PT2_VSP; | ||
284 | |||
285 | write_fp(par, FP_PT2, temp); | ||
278 | write_fp(par, FP_DFC, FP_DFC_BC); | 286 | write_fp(par, FP_DFC, FP_DFC_BC); |
279 | 287 | ||
280 | msrlo = MSR_LX_MSR_PADSEL_TFT_SEL_LOW; | 288 | msrlo = MSR_LX_MSR_PADSEL_TFT_SEL_LOW; |
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c index c59e4baed8b2..300eff5de1b4 100644 --- a/drivers/video/omap2/displays/panel-generic.c +++ b/drivers/video/omap2/displays/panel-generic.c | |||
@@ -116,6 +116,24 @@ static int generic_panel_resume(struct omap_dss_device *dssdev) | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | static void generic_panel_set_timings(struct omap_dss_device *dssdev, | ||
120 | struct omap_video_timings *timings) | ||
121 | { | ||
122 | dpi_set_timings(dssdev, timings); | ||
123 | } | ||
124 | |||
125 | static void generic_panel_get_timings(struct omap_dss_device *dssdev, | ||
126 | struct omap_video_timings *timings) | ||
127 | { | ||
128 | *timings = dssdev->panel.timings; | ||
129 | } | ||
130 | |||
131 | static int generic_panel_check_timings(struct omap_dss_device *dssdev, | ||
132 | struct omap_video_timings *timings) | ||
133 | { | ||
134 | return dpi_check_timings(dssdev, timings); | ||
135 | } | ||
136 | |||
119 | static struct omap_dss_driver generic_driver = { | 137 | static struct omap_dss_driver generic_driver = { |
120 | .probe = generic_panel_probe, | 138 | .probe = generic_panel_probe, |
121 | .remove = generic_panel_remove, | 139 | .remove = generic_panel_remove, |
@@ -125,6 +143,10 @@ static struct omap_dss_driver generic_driver = { | |||
125 | .suspend = generic_panel_suspend, | 143 | .suspend = generic_panel_suspend, |
126 | .resume = generic_panel_resume, | 144 | .resume = generic_panel_resume, |
127 | 145 | ||
146 | .set_timings = generic_panel_set_timings, | ||
147 | .get_timings = generic_panel_get_timings, | ||
148 | .check_timings = generic_panel_check_timings, | ||
149 | |||
128 | .driver = { | 150 | .driver = { |
129 | .name = "generic_panel", | 151 | .name = "generic_panel", |
130 | .owner = THIS_MODULE, | 152 | .owner = THIS_MODULE, |
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 8254a4232a53..54344184dd73 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
@@ -590,6 +590,9 @@ int dss_init(bool skip_init) | |||
590 | } | 590 | } |
591 | } | 591 | } |
592 | 592 | ||
593 | dss.dsi_clk_source = DSS_SRC_DSS1_ALWON_FCLK; | ||
594 | dss.dispc_clk_source = DSS_SRC_DSS1_ALWON_FCLK; | ||
595 | |||
593 | dss_save_context(); | 596 | dss_save_context(); |
594 | 597 | ||
595 | rev = dss_read_reg(DSS_REVISION); | 598 | rev = dss_read_reg(DSS_REVISION); |
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c index 55a4de5e5d10..b266ffae0bde 100644 --- a/drivers/video/omap2/vram.c +++ b/drivers/video/omap2/vram.c | |||
@@ -511,13 +511,14 @@ static u32 omap_vram_sdram_size __initdata; | |||
511 | static u32 omap_vram_def_sdram_size __initdata; | 511 | static u32 omap_vram_def_sdram_size __initdata; |
512 | static u32 omap_vram_def_sdram_start __initdata; | 512 | static u32 omap_vram_def_sdram_start __initdata; |
513 | 513 | ||
514 | static void __init omap_vram_early_vram(char **p) | 514 | static int __init omap_vram_early_vram(char *p) |
515 | { | 515 | { |
516 | omap_vram_def_sdram_size = memparse(*p, p); | 516 | omap_vram_def_sdram_size = memparse(p, &p); |
517 | if (**p == ',') | 517 | if (*p == ',') |
518 | omap_vram_def_sdram_start = simple_strtoul((*p) + 1, p, 16); | 518 | omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16); |
519 | return 0; | ||
519 | } | 520 | } |
520 | __early_param("vram=", omap_vram_early_vram); | 521 | early_param("vram", omap_vram_early_vram); |
521 | 522 | ||
522 | /* | 523 | /* |
523 | * Called from map_io. We need to call to this early enough so that we | 524 | * Called from map_io. We need to call to this early enough so that we |
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c index 75285d3f393c..c91a7f70f7b0 100644 --- a/drivers/video/pxa168fb.c +++ b/drivers/video/pxa168fb.c | |||
@@ -668,7 +668,7 @@ static int __init pxa168fb_probe(struct platform_device *pdev) | |||
668 | /* | 668 | /* |
669 | * Map LCD controller registers. | 669 | * Map LCD controller registers. |
670 | */ | 670 | */ |
671 | fbi->reg_base = ioremap_nocache(res->start, res->end - res->start); | 671 | fbi->reg_base = ioremap_nocache(res->start, resource_size(res)); |
672 | if (fbi->reg_base == NULL) { | 672 | if (fbi->reg_base == NULL) { |
673 | ret = -ENOMEM; | 673 | ret = -ENOMEM; |
674 | goto failed; | 674 | goto failed; |
diff --git a/fs/afs/security.c b/fs/afs/security.c index 3ef504370034..bb4ed144d0e4 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c | |||
@@ -189,8 +189,9 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, long acl_order) | |||
189 | if (!permits) | 189 | if (!permits) |
190 | goto out_unlock; | 190 | goto out_unlock; |
191 | 191 | ||
192 | memcpy(permits->permits, xpermits->permits, | 192 | if (xpermits) |
193 | count * sizeof(struct afs_permit)); | 193 | memcpy(permits->permits, xpermits->permits, |
194 | count * sizeof(struct afs_permit)); | ||
194 | 195 | ||
195 | _debug("key %x access %x", | 196 | _debug("key %x access %x", |
196 | key_serial(key), vnode->status.caller_access); | 197 | key_serial(key), vnode->status.caller_access); |
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 15d80bb35d6f..9b6aef0f75e5 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
@@ -75,14 +75,16 @@ static int aout_core_dump(struct coredump_params *cprm) | |||
75 | struct file *file = cprm->file; | 75 | struct file *file = cprm->file; |
76 | mm_segment_t fs; | 76 | mm_segment_t fs; |
77 | int has_dumped = 0; | 77 | int has_dumped = 0; |
78 | unsigned long dump_start, dump_size; | 78 | void __user *dump_start; |
79 | int dump_size; | ||
79 | struct user dump; | 80 | struct user dump; |
80 | #ifdef __alpha__ | 81 | #ifdef __alpha__ |
81 | # define START_DATA(u) (u.start_data) | 82 | # define START_DATA(u) ((void __user *)u.start_data) |
82 | #else | 83 | #else |
83 | # define START_DATA(u) ((u.u_tsize << PAGE_SHIFT) + u.start_code) | 84 | # define START_DATA(u) ((void __user *)((u.u_tsize << PAGE_SHIFT) + \ |
85 | u.start_code)) | ||
84 | #endif | 86 | #endif |
85 | # define START_STACK(u) (u.start_stack) | 87 | # define START_STACK(u) ((void __user *)u.start_stack) |
86 | 88 | ||
87 | fs = get_fs(); | 89 | fs = get_fs(); |
88 | set_fs(KERNEL_DS); | 90 | set_fs(KERNEL_DS); |
@@ -104,9 +106,9 @@ static int aout_core_dump(struct coredump_params *cprm) | |||
104 | 106 | ||
105 | /* make sure we actually have a data and stack area to dump */ | 107 | /* make sure we actually have a data and stack area to dump */ |
106 | set_fs(USER_DS); | 108 | set_fs(USER_DS); |
107 | if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) | 109 | if (!access_ok(VERIFY_READ, START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) |
108 | dump.u_dsize = 0; | 110 | dump.u_dsize = 0; |
109 | if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) | 111 | if (!access_ok(VERIFY_READ, START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) |
110 | dump.u_ssize = 0; | 112 | dump.u_ssize = 0; |
111 | 113 | ||
112 | set_fs(KERNEL_DS); | 114 | set_fs(KERNEL_DS); |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 2c32d00a6690..7ab23e006e4c 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -1590,7 +1590,7 @@ static size_t elf_core_vma_data_size(unsigned long mm_flags) | |||
1590 | struct vm_area_struct *vma; | 1590 | struct vm_area_struct *vma; |
1591 | size_t size = 0; | 1591 | size_t size = 0; |
1592 | 1592 | ||
1593 | for (vma = current->mm->mmap; vma; vma->vm_next) | 1593 | for (vma = current->mm->mmap; vma; vma = vma->vm_next) |
1594 | if (maydump(vma, mm_flags)) | 1594 | if (maydump(vma, mm_flags)) |
1595 | size += vma->vm_end - vma->vm_start; | 1595 | size += vma->vm_end - vma->vm_start; |
1596 | return size; | 1596 | return size; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 23bb0ceabe31..ce8ef6107727 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -919,6 +919,10 @@ static int context_is_writeable_or_written(struct inode *inode, | |||
919 | /* | 919 | /* |
920 | * We are only allowed to write into/dirty the page if the page is | 920 | * We are only allowed to write into/dirty the page if the page is |
921 | * clean, or already dirty within the same snap context. | 921 | * clean, or already dirty within the same snap context. |
922 | * | ||
923 | * called with page locked. | ||
924 | * return success with page locked, | ||
925 | * or any failure (incl -EAGAIN) with page unlocked. | ||
922 | */ | 926 | */ |
923 | static int ceph_update_writeable_page(struct file *file, | 927 | static int ceph_update_writeable_page(struct file *file, |
924 | loff_t pos, unsigned len, | 928 | loff_t pos, unsigned len, |
@@ -961,9 +965,11 @@ retry_locked: | |||
961 | snapc = ceph_get_snap_context((void *)page->private); | 965 | snapc = ceph_get_snap_context((void *)page->private); |
962 | unlock_page(page); | 966 | unlock_page(page); |
963 | ceph_queue_writeback(inode); | 967 | ceph_queue_writeback(inode); |
964 | wait_event_interruptible(ci->i_cap_wq, | 968 | r = wait_event_interruptible(ci->i_cap_wq, |
965 | context_is_writeable_or_written(inode, snapc)); | 969 | context_is_writeable_or_written(inode, snapc)); |
966 | ceph_put_snap_context(snapc); | 970 | ceph_put_snap_context(snapc); |
971 | if (r == -ERESTARTSYS) | ||
972 | return r; | ||
967 | return -EAGAIN; | 973 | return -EAGAIN; |
968 | } | 974 | } |
969 | 975 | ||
@@ -1035,7 +1041,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, | |||
1035 | int r; | 1041 | int r; |
1036 | 1042 | ||
1037 | do { | 1043 | do { |
1038 | /* get a page*/ | 1044 | /* get a page */ |
1039 | page = grab_cache_page_write_begin(mapping, index, 0); | 1045 | page = grab_cache_page_write_begin(mapping, index, 0); |
1040 | if (!page) | 1046 | if (!page) |
1041 | return -ENOMEM; | 1047 | return -ENOMEM; |
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c index f0318427b6da..8d8a84964763 100644 --- a/fs/ceph/auth_x.c +++ b/fs/ceph/auth_x.c | |||
@@ -28,6 +28,12 @@ static int ceph_x_is_authenticated(struct ceph_auth_client *ac) | |||
28 | return (ac->want_keys & xi->have_keys) == ac->want_keys; | 28 | return (ac->want_keys & xi->have_keys) == ac->want_keys; |
29 | } | 29 | } |
30 | 30 | ||
31 | static int ceph_x_encrypt_buflen(int ilen) | ||
32 | { | ||
33 | return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + | ||
34 | sizeof(u32); | ||
35 | } | ||
36 | |||
31 | static int ceph_x_encrypt(struct ceph_crypto_key *secret, | 37 | static int ceph_x_encrypt(struct ceph_crypto_key *secret, |
32 | void *ibuf, int ilen, void *obuf, size_t olen) | 38 | void *ibuf, int ilen, void *obuf, size_t olen) |
33 | { | 39 | { |
@@ -150,6 +156,11 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, | |||
150 | struct timespec validity; | 156 | struct timespec validity; |
151 | struct ceph_crypto_key old_key; | 157 | struct ceph_crypto_key old_key; |
152 | void *tp, *tpend; | 158 | void *tp, *tpend; |
159 | struct ceph_timespec new_validity; | ||
160 | struct ceph_crypto_key new_session_key; | ||
161 | struct ceph_buffer *new_ticket_blob; | ||
162 | unsigned long new_expires, new_renew_after; | ||
163 | u64 new_secret_id; | ||
153 | 164 | ||
154 | ceph_decode_need(&p, end, sizeof(u32) + 1, bad); | 165 | ceph_decode_need(&p, end, sizeof(u32) + 1, bad); |
155 | 166 | ||
@@ -182,16 +193,16 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, | |||
182 | goto bad; | 193 | goto bad; |
183 | 194 | ||
184 | memcpy(&old_key, &th->session_key, sizeof(old_key)); | 195 | memcpy(&old_key, &th->session_key, sizeof(old_key)); |
185 | ret = ceph_crypto_key_decode(&th->session_key, &dp, dend); | 196 | ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); |
186 | if (ret) | 197 | if (ret) |
187 | goto out; | 198 | goto out; |
188 | 199 | ||
189 | ceph_decode_copy(&dp, &th->validity, sizeof(th->validity)); | 200 | ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); |
190 | ceph_decode_timespec(&validity, &th->validity); | 201 | ceph_decode_timespec(&validity, &new_validity); |
191 | th->expires = get_seconds() + validity.tv_sec; | 202 | new_expires = get_seconds() + validity.tv_sec; |
192 | th->renew_after = th->expires - (validity.tv_sec / 4); | 203 | new_renew_after = new_expires - (validity.tv_sec / 4); |
193 | dout(" expires=%lu renew_after=%lu\n", th->expires, | 204 | dout(" expires=%lu renew_after=%lu\n", new_expires, |
194 | th->renew_after); | 205 | new_renew_after); |
195 | 206 | ||
196 | /* ticket blob for service */ | 207 | /* ticket blob for service */ |
197 | ceph_decode_8_safe(&p, end, is_enc, bad); | 208 | ceph_decode_8_safe(&p, end, is_enc, bad); |
@@ -216,10 +227,21 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, | |||
216 | dout(" ticket blob is %d bytes\n", dlen); | 227 | dout(" ticket blob is %d bytes\n", dlen); |
217 | ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); | 228 | ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); |
218 | struct_v = ceph_decode_8(&tp); | 229 | struct_v = ceph_decode_8(&tp); |
219 | th->secret_id = ceph_decode_64(&tp); | 230 | new_secret_id = ceph_decode_64(&tp); |
220 | ret = ceph_decode_buffer(&th->ticket_blob, &tp, tpend); | 231 | ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); |
221 | if (ret) | 232 | if (ret) |
222 | goto out; | 233 | goto out; |
234 | |||
235 | /* all is well, update our ticket */ | ||
236 | ceph_crypto_key_destroy(&th->session_key); | ||
237 | if (th->ticket_blob) | ||
238 | ceph_buffer_put(th->ticket_blob); | ||
239 | th->session_key = new_session_key; | ||
240 | th->ticket_blob = new_ticket_blob; | ||
241 | th->validity = new_validity; | ||
242 | th->secret_id = new_secret_id; | ||
243 | th->expires = new_expires; | ||
244 | th->renew_after = new_renew_after; | ||
223 | dout(" got ticket service %d (%s) secret_id %lld len %d\n", | 245 | dout(" got ticket service %d (%s) secret_id %lld len %d\n", |
224 | type, ceph_entity_type_name(type), th->secret_id, | 246 | type, ceph_entity_type_name(type), th->secret_id, |
225 | (int)th->ticket_blob->vec.iov_len); | 247 | (int)th->ticket_blob->vec.iov_len); |
@@ -242,7 +264,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, | |||
242 | struct ceph_x_ticket_handler *th, | 264 | struct ceph_x_ticket_handler *th, |
243 | struct ceph_x_authorizer *au) | 265 | struct ceph_x_authorizer *au) |
244 | { | 266 | { |
245 | int len; | 267 | int maxlen; |
246 | struct ceph_x_authorize_a *msg_a; | 268 | struct ceph_x_authorize_a *msg_a; |
247 | struct ceph_x_authorize_b msg_b; | 269 | struct ceph_x_authorize_b msg_b; |
248 | void *p, *end; | 270 | void *p, *end; |
@@ -253,15 +275,15 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, | |||
253 | dout("build_authorizer for %s %p\n", | 275 | dout("build_authorizer for %s %p\n", |
254 | ceph_entity_type_name(th->service), au); | 276 | ceph_entity_type_name(th->service), au); |
255 | 277 | ||
256 | len = sizeof(*msg_a) + sizeof(msg_b) + sizeof(u32) + | 278 | maxlen = sizeof(*msg_a) + sizeof(msg_b) + |
257 | ticket_blob_len + 16; | 279 | ceph_x_encrypt_buflen(ticket_blob_len); |
258 | dout(" need len %d\n", len); | 280 | dout(" need len %d\n", maxlen); |
259 | if (au->buf && au->buf->alloc_len < len) { | 281 | if (au->buf && au->buf->alloc_len < maxlen) { |
260 | ceph_buffer_put(au->buf); | 282 | ceph_buffer_put(au->buf); |
261 | au->buf = NULL; | 283 | au->buf = NULL; |
262 | } | 284 | } |
263 | if (!au->buf) { | 285 | if (!au->buf) { |
264 | au->buf = ceph_buffer_new(len, GFP_NOFS); | 286 | au->buf = ceph_buffer_new(maxlen, GFP_NOFS); |
265 | if (!au->buf) | 287 | if (!au->buf) |
266 | return -ENOMEM; | 288 | return -ENOMEM; |
267 | } | 289 | } |
@@ -296,6 +318,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, | |||
296 | au->buf->vec.iov_len = p - au->buf->vec.iov_base; | 318 | au->buf->vec.iov_len = p - au->buf->vec.iov_base; |
297 | dout(" built authorizer nonce %llx len %d\n", au->nonce, | 319 | dout(" built authorizer nonce %llx len %d\n", au->nonce, |
298 | (int)au->buf->vec.iov_len); | 320 | (int)au->buf->vec.iov_len); |
321 | BUG_ON(au->buf->vec.iov_len > maxlen); | ||
299 | return 0; | 322 | return 0; |
300 | 323 | ||
301 | out_buf: | 324 | out_buf: |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index db122bb357b8..7d0a0d0adc18 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -1407,6 +1407,7 @@ static int try_nonblocking_invalidate(struct inode *inode) | |||
1407 | */ | 1407 | */ |
1408 | void ceph_check_caps(struct ceph_inode_info *ci, int flags, | 1408 | void ceph_check_caps(struct ceph_inode_info *ci, int flags, |
1409 | struct ceph_mds_session *session) | 1409 | struct ceph_mds_session *session) |
1410 | __releases(session->s_mutex) | ||
1410 | { | 1411 | { |
1411 | struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode); | 1412 | struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode); |
1412 | struct ceph_mds_client *mdsc = &client->mdsc; | 1413 | struct ceph_mds_client *mdsc = &client->mdsc; |
@@ -1414,7 +1415,6 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, | |||
1414 | struct ceph_cap *cap; | 1415 | struct ceph_cap *cap; |
1415 | int file_wanted, used; | 1416 | int file_wanted, used; |
1416 | int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ | 1417 | int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ |
1417 | int drop_session_lock = session ? 0 : 1; | ||
1418 | int issued, implemented, want, retain, revoking, flushing = 0; | 1418 | int issued, implemented, want, retain, revoking, flushing = 0; |
1419 | int mds = -1; /* keep track of how far we've gone through i_caps list | 1419 | int mds = -1; /* keep track of how far we've gone through i_caps list |
1420 | to avoid an infinite loop on retry */ | 1420 | to avoid an infinite loop on retry */ |
@@ -1639,7 +1639,7 @@ ack: | |||
1639 | if (queue_invalidate) | 1639 | if (queue_invalidate) |
1640 | ceph_queue_invalidate(inode); | 1640 | ceph_queue_invalidate(inode); |
1641 | 1641 | ||
1642 | if (session && drop_session_lock) | 1642 | if (session) |
1643 | mutex_unlock(&session->s_mutex); | 1643 | mutex_unlock(&session->s_mutex); |
1644 | if (took_snap_rwsem) | 1644 | if (took_snap_rwsem) |
1645 | up_read(&mdsc->snap_rwsem); | 1645 | up_read(&mdsc->snap_rwsem); |
@@ -2195,18 +2195,19 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
2195 | * Handle a cap GRANT message from the MDS. (Note that a GRANT may | 2195 | * Handle a cap GRANT message from the MDS. (Note that a GRANT may |
2196 | * actually be a revocation if it specifies a smaller cap set.) | 2196 | * actually be a revocation if it specifies a smaller cap set.) |
2197 | * | 2197 | * |
2198 | * caller holds s_mutex. | 2198 | * caller holds s_mutex and i_lock, we drop both. |
2199 | * | ||
2199 | * return value: | 2200 | * return value: |
2200 | * 0 - ok | 2201 | * 0 - ok |
2201 | * 1 - check_caps on auth cap only (writeback) | 2202 | * 1 - check_caps on auth cap only (writeback) |
2202 | * 2 - check_caps (ack revoke) | 2203 | * 2 - check_caps (ack revoke) |
2203 | */ | 2204 | */ |
2204 | static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | 2205 | static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, |
2205 | struct ceph_mds_session *session, | 2206 | struct ceph_mds_session *session, |
2206 | struct ceph_cap *cap, | 2207 | struct ceph_cap *cap, |
2207 | struct ceph_buffer *xattr_buf) | 2208 | struct ceph_buffer *xattr_buf) |
2208 | __releases(inode->i_lock) | 2209 | __releases(inode->i_lock) |
2209 | 2210 | __releases(session->s_mutex) | |
2210 | { | 2211 | { |
2211 | struct ceph_inode_info *ci = ceph_inode(inode); | 2212 | struct ceph_inode_info *ci = ceph_inode(inode); |
2212 | int mds = session->s_mds; | 2213 | int mds = session->s_mds; |
@@ -2216,7 +2217,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2216 | u64 size = le64_to_cpu(grant->size); | 2217 | u64 size = le64_to_cpu(grant->size); |
2217 | u64 max_size = le64_to_cpu(grant->max_size); | 2218 | u64 max_size = le64_to_cpu(grant->max_size); |
2218 | struct timespec mtime, atime, ctime; | 2219 | struct timespec mtime, atime, ctime; |
2219 | int reply = 0; | 2220 | int check_caps = 0; |
2220 | int wake = 0; | 2221 | int wake = 0; |
2221 | int writeback = 0; | 2222 | int writeback = 0; |
2222 | int revoked_rdcache = 0; | 2223 | int revoked_rdcache = 0; |
@@ -2329,11 +2330,12 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2329 | if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER) | 2330 | if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER) |
2330 | writeback = 1; /* will delay ack */ | 2331 | writeback = 1; /* will delay ack */ |
2331 | else if (dirty & ~newcaps) | 2332 | else if (dirty & ~newcaps) |
2332 | reply = 1; /* initiate writeback in check_caps */ | 2333 | check_caps = 1; /* initiate writeback in check_caps */ |
2333 | else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 || | 2334 | else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 || |
2334 | revoked_rdcache) | 2335 | revoked_rdcache) |
2335 | reply = 2; /* send revoke ack in check_caps */ | 2336 | check_caps = 2; /* send revoke ack in check_caps */ |
2336 | cap->issued = newcaps; | 2337 | cap->issued = newcaps; |
2338 | cap->implemented |= newcaps; | ||
2337 | } else if (cap->issued == newcaps) { | 2339 | } else if (cap->issued == newcaps) { |
2338 | dout("caps unchanged: %s -> %s\n", | 2340 | dout("caps unchanged: %s -> %s\n", |
2339 | ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); | 2341 | ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); |
@@ -2346,6 +2348,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2346 | * pending revocation */ | 2348 | * pending revocation */ |
2347 | wake = 1; | 2349 | wake = 1; |
2348 | } | 2350 | } |
2351 | BUG_ON(cap->issued & ~cap->implemented); | ||
2349 | 2352 | ||
2350 | spin_unlock(&inode->i_lock); | 2353 | spin_unlock(&inode->i_lock); |
2351 | if (writeback) | 2354 | if (writeback) |
@@ -2359,7 +2362,14 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2359 | ceph_queue_invalidate(inode); | 2362 | ceph_queue_invalidate(inode); |
2360 | if (wake) | 2363 | if (wake) |
2361 | wake_up(&ci->i_cap_wq); | 2364 | wake_up(&ci->i_cap_wq); |
2362 | return reply; | 2365 | |
2366 | if (check_caps == 1) | ||
2367 | ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, | ||
2368 | session); | ||
2369 | else if (check_caps == 2) | ||
2370 | ceph_check_caps(ci, CHECK_CAPS_NODELAY, session); | ||
2371 | else | ||
2372 | mutex_unlock(&session->s_mutex); | ||
2363 | } | 2373 | } |
2364 | 2374 | ||
2365 | /* | 2375 | /* |
@@ -2548,9 +2558,8 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, | |||
2548 | ci->i_cap_exporting_issued = cap->issued; | 2558 | ci->i_cap_exporting_issued = cap->issued; |
2549 | } | 2559 | } |
2550 | __ceph_remove_cap(cap); | 2560 | __ceph_remove_cap(cap); |
2551 | } else { | ||
2552 | WARN_ON(!cap); | ||
2553 | } | 2561 | } |
2562 | /* else, we already released it */ | ||
2554 | 2563 | ||
2555 | spin_unlock(&inode->i_lock); | 2564 | spin_unlock(&inode->i_lock); |
2556 | } | 2565 | } |
@@ -2621,9 +2630,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2621 | u64 cap_id; | 2630 | u64 cap_id; |
2622 | u64 size, max_size; | 2631 | u64 size, max_size; |
2623 | u64 tid; | 2632 | u64 tid; |
2624 | int check_caps = 0; | ||
2625 | void *snaptrace; | 2633 | void *snaptrace; |
2626 | int r; | ||
2627 | 2634 | ||
2628 | dout("handle_caps from mds%d\n", mds); | 2635 | dout("handle_caps from mds%d\n", mds); |
2629 | 2636 | ||
@@ -2668,8 +2675,9 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2668 | case CEPH_CAP_OP_IMPORT: | 2675 | case CEPH_CAP_OP_IMPORT: |
2669 | handle_cap_import(mdsc, inode, h, session, | 2676 | handle_cap_import(mdsc, inode, h, session, |
2670 | snaptrace, le32_to_cpu(h->snap_trace_len)); | 2677 | snaptrace, le32_to_cpu(h->snap_trace_len)); |
2671 | check_caps = 1; /* we may have sent a RELEASE to the old auth */ | 2678 | ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, |
2672 | goto done; | 2679 | session); |
2680 | goto done_unlocked; | ||
2673 | } | 2681 | } |
2674 | 2682 | ||
2675 | /* the rest require a cap */ | 2683 | /* the rest require a cap */ |
@@ -2686,16 +2694,8 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2686 | switch (op) { | 2694 | switch (op) { |
2687 | case CEPH_CAP_OP_REVOKE: | 2695 | case CEPH_CAP_OP_REVOKE: |
2688 | case CEPH_CAP_OP_GRANT: | 2696 | case CEPH_CAP_OP_GRANT: |
2689 | r = handle_cap_grant(inode, h, session, cap, msg->middle); | 2697 | handle_cap_grant(inode, h, session, cap, msg->middle); |
2690 | if (r == 1) | 2698 | goto done_unlocked; |
2691 | ceph_check_caps(ceph_inode(inode), | ||
2692 | CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, | ||
2693 | session); | ||
2694 | else if (r == 2) | ||
2695 | ceph_check_caps(ceph_inode(inode), | ||
2696 | CHECK_CAPS_NODELAY, | ||
2697 | session); | ||
2698 | break; | ||
2699 | 2699 | ||
2700 | case CEPH_CAP_OP_FLUSH_ACK: | 2700 | case CEPH_CAP_OP_FLUSH_ACK: |
2701 | handle_cap_flush_ack(inode, tid, h, session, cap); | 2701 | handle_cap_flush_ack(inode, tid, h, session, cap); |
@@ -2713,9 +2713,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2713 | 2713 | ||
2714 | done: | 2714 | done: |
2715 | mutex_unlock(&session->s_mutex); | 2715 | mutex_unlock(&session->s_mutex); |
2716 | 2716 | done_unlocked: | |
2717 | if (check_caps) | ||
2718 | ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL); | ||
2719 | if (inode) | 2717 | if (inode) |
2720 | iput(inode); | 2718 | iput(inode); |
2721 | return; | 2719 | return; |
@@ -2838,11 +2836,18 @@ int ceph_encode_inode_release(void **p, struct inode *inode, | |||
2838 | struct ceph_cap *cap; | 2836 | struct ceph_cap *cap; |
2839 | struct ceph_mds_request_release *rel = *p; | 2837 | struct ceph_mds_request_release *rel = *p; |
2840 | int ret = 0; | 2838 | int ret = 0; |
2841 | 2839 | int used = 0; | |
2842 | dout("encode_inode_release %p mds%d drop %s unless %s\n", inode, | ||
2843 | mds, ceph_cap_string(drop), ceph_cap_string(unless)); | ||
2844 | 2840 | ||
2845 | spin_lock(&inode->i_lock); | 2841 | spin_lock(&inode->i_lock); |
2842 | used = __ceph_caps_used(ci); | ||
2843 | |||
2844 | dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode, | ||
2845 | mds, ceph_cap_string(used), ceph_cap_string(drop), | ||
2846 | ceph_cap_string(unless)); | ||
2847 | |||
2848 | /* only drop unused caps */ | ||
2849 | drop &= ~used; | ||
2850 | |||
2846 | cap = __get_cap_for_mds(ci, mds); | 2851 | cap = __get_cap_for_mds(ci, mds); |
2847 | if (cap && __cap_is_valid(cap)) { | 2852 | if (cap && __cap_is_valid(cap)) { |
2848 | if (force || | 2853 | if (force || |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 5107384ee029..8a9116e15b70 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -288,8 +288,10 @@ more: | |||
288 | CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; | 288 | CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; |
289 | 289 | ||
290 | /* discard old result, if any */ | 290 | /* discard old result, if any */ |
291 | if (fi->last_readdir) | 291 | if (fi->last_readdir) { |
292 | ceph_mdsc_put_request(fi->last_readdir); | 292 | ceph_mdsc_put_request(fi->last_readdir); |
293 | fi->last_readdir = NULL; | ||
294 | } | ||
293 | 295 | ||
294 | /* requery frag tree, as the frag topology may have changed */ | 296 | /* requery frag tree, as the frag topology may have changed */ |
295 | frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); | 297 | frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 7abe1aed819b..aca82d55cc53 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -378,6 +378,22 @@ void ceph_destroy_inode(struct inode *inode) | |||
378 | 378 | ||
379 | ceph_queue_caps_release(inode); | 379 | ceph_queue_caps_release(inode); |
380 | 380 | ||
381 | /* | ||
382 | * we may still have a snap_realm reference if there are stray | ||
383 | * caps in i_cap_exporting_issued or i_snap_caps. | ||
384 | */ | ||
385 | if (ci->i_snap_realm) { | ||
386 | struct ceph_mds_client *mdsc = | ||
387 | &ceph_client(ci->vfs_inode.i_sb)->mdsc; | ||
388 | struct ceph_snap_realm *realm = ci->i_snap_realm; | ||
389 | |||
390 | dout(" dropping residual ref to snap realm %p\n", realm); | ||
391 | spin_lock(&realm->inodes_with_caps_lock); | ||
392 | list_del_init(&ci->i_snap_realm_item); | ||
393 | spin_unlock(&realm->inodes_with_caps_lock); | ||
394 | ceph_put_snap_realm(mdsc, realm); | ||
395 | } | ||
396 | |||
381 | kfree(ci->i_symlink); | 397 | kfree(ci->i_symlink); |
382 | while ((n = rb_first(&ci->i_fragtree)) != NULL) { | 398 | while ((n = rb_first(&ci->i_fragtree)) != NULL) { |
383 | frag = rb_entry(n, struct ceph_inode_frag, node); | 399 | frag = rb_entry(n, struct ceph_inode_frag, node); |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a2600101ec22..5c7920be6420 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -328,6 +328,8 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |||
328 | struct ceph_mds_session *s; | 328 | struct ceph_mds_session *s; |
329 | 329 | ||
330 | s = kzalloc(sizeof(*s), GFP_NOFS); | 330 | s = kzalloc(sizeof(*s), GFP_NOFS); |
331 | if (!s) | ||
332 | return ERR_PTR(-ENOMEM); | ||
331 | s->s_mdsc = mdsc; | 333 | s->s_mdsc = mdsc; |
332 | s->s_mds = mds; | 334 | s->s_mds = mds; |
333 | s->s_state = CEPH_MDS_SESSION_NEW; | 335 | s->s_state = CEPH_MDS_SESSION_NEW; |
@@ -529,7 +531,7 @@ static void __unregister_request(struct ceph_mds_client *mdsc, | |||
529 | { | 531 | { |
530 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | 532 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); |
531 | rb_erase(&req->r_node, &mdsc->request_tree); | 533 | rb_erase(&req->r_node, &mdsc->request_tree); |
532 | ceph_mdsc_put_request(req); | 534 | RB_CLEAR_NODE(&req->r_node); |
533 | 535 | ||
534 | if (req->r_unsafe_dir) { | 536 | if (req->r_unsafe_dir) { |
535 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); | 537 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); |
@@ -538,6 +540,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc, | |||
538 | list_del_init(&req->r_unsafe_dir_item); | 540 | list_del_init(&req->r_unsafe_dir_item); |
539 | spin_unlock(&ci->i_unsafe_lock); | 541 | spin_unlock(&ci->i_unsafe_lock); |
540 | } | 542 | } |
543 | |||
544 | ceph_mdsc_put_request(req); | ||
541 | } | 545 | } |
542 | 546 | ||
543 | /* | 547 | /* |
@@ -862,6 +866,7 @@ static int send_renew_caps(struct ceph_mds_client *mdsc, | |||
862 | if (time_after_eq(jiffies, session->s_cap_ttl) && | 866 | if (time_after_eq(jiffies, session->s_cap_ttl) && |
863 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) | 867 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) |
864 | pr_info("mds%d caps stale\n", session->s_mds); | 868 | pr_info("mds%d caps stale\n", session->s_mds); |
869 | session->s_renew_requested = jiffies; | ||
865 | 870 | ||
866 | /* do not try to renew caps until a recovering mds has reconnected | 871 | /* do not try to renew caps until a recovering mds has reconnected |
867 | * with its clients. */ | 872 | * with its clients. */ |
@@ -874,7 +879,6 @@ static int send_renew_caps(struct ceph_mds_client *mdsc, | |||
874 | 879 | ||
875 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, | 880 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, |
876 | ceph_mds_state_name(state)); | 881 | ceph_mds_state_name(state)); |
877 | session->s_renew_requested = jiffies; | ||
878 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, | 882 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
879 | ++session->s_renew_seq); | 883 | ++session->s_renew_seq); |
880 | if (IS_ERR(msg)) | 884 | if (IS_ERR(msg)) |
@@ -1566,8 +1570,13 @@ static int __do_request(struct ceph_mds_client *mdsc, | |||
1566 | 1570 | ||
1567 | /* get, open session */ | 1571 | /* get, open session */ |
1568 | session = __ceph_lookup_mds_session(mdsc, mds); | 1572 | session = __ceph_lookup_mds_session(mdsc, mds); |
1569 | if (!session) | 1573 | if (!session) { |
1570 | session = register_session(mdsc, mds); | 1574 | session = register_session(mdsc, mds); |
1575 | if (IS_ERR(session)) { | ||
1576 | err = PTR_ERR(session); | ||
1577 | goto finish; | ||
1578 | } | ||
1579 | } | ||
1571 | dout("do_request mds%d session %p state %s\n", mds, session, | 1580 | dout("do_request mds%d session %p state %s\n", mds, session, |
1572 | session_state_name(session->s_state)); | 1581 | session_state_name(session->s_state)); |
1573 | if (session->s_state != CEPH_MDS_SESSION_OPEN && | 1582 | if (session->s_state != CEPH_MDS_SESSION_OPEN && |
@@ -1770,7 +1779,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
1770 | dout("handle_reply %p\n", req); | 1779 | dout("handle_reply %p\n", req); |
1771 | 1780 | ||
1772 | /* correct session? */ | 1781 | /* correct session? */ |
1773 | if (!req->r_session && req->r_session != session) { | 1782 | if (req->r_session != session) { |
1774 | pr_err("mdsc_handle_reply got %llu on session mds%d" | 1783 | pr_err("mdsc_handle_reply got %llu on session mds%d" |
1775 | " not mds%d\n", tid, session->s_mds, | 1784 | " not mds%d\n", tid, session->s_mds, |
1776 | req->r_session ? req->r_session->s_mds : -1); | 1785 | req->r_session ? req->r_session->s_mds : -1); |
@@ -2682,29 +2691,41 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |||
2682 | */ | 2691 | */ |
2683 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) | 2692 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) |
2684 | { | 2693 | { |
2685 | struct ceph_mds_request *req = NULL; | 2694 | struct ceph_mds_request *req = NULL, *nextreq; |
2686 | struct rb_node *n; | 2695 | struct rb_node *n; |
2687 | 2696 | ||
2688 | mutex_lock(&mdsc->mutex); | 2697 | mutex_lock(&mdsc->mutex); |
2689 | dout("wait_unsafe_requests want %lld\n", want_tid); | 2698 | dout("wait_unsafe_requests want %lld\n", want_tid); |
2699 | restart: | ||
2690 | req = __get_oldest_req(mdsc); | 2700 | req = __get_oldest_req(mdsc); |
2691 | while (req && req->r_tid <= want_tid) { | 2701 | while (req && req->r_tid <= want_tid) { |
2702 | /* find next request */ | ||
2703 | n = rb_next(&req->r_node); | ||
2704 | if (n) | ||
2705 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); | ||
2706 | else | ||
2707 | nextreq = NULL; | ||
2692 | if ((req->r_op & CEPH_MDS_OP_WRITE)) { | 2708 | if ((req->r_op & CEPH_MDS_OP_WRITE)) { |
2693 | /* write op */ | 2709 | /* write op */ |
2694 | ceph_mdsc_get_request(req); | 2710 | ceph_mdsc_get_request(req); |
2711 | if (nextreq) | ||
2712 | ceph_mdsc_get_request(nextreq); | ||
2695 | mutex_unlock(&mdsc->mutex); | 2713 | mutex_unlock(&mdsc->mutex); |
2696 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", | 2714 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", |
2697 | req->r_tid, want_tid); | 2715 | req->r_tid, want_tid); |
2698 | wait_for_completion(&req->r_safe_completion); | 2716 | wait_for_completion(&req->r_safe_completion); |
2699 | mutex_lock(&mdsc->mutex); | 2717 | mutex_lock(&mdsc->mutex); |
2700 | n = rb_next(&req->r_node); | ||
2701 | ceph_mdsc_put_request(req); | 2718 | ceph_mdsc_put_request(req); |
2702 | } else { | 2719 | if (!nextreq) |
2703 | n = rb_next(&req->r_node); | 2720 | break; /* next dne before, so we're done! */ |
2721 | if (RB_EMPTY_NODE(&nextreq->r_node)) { | ||
2722 | /* next request was removed from tree */ | ||
2723 | ceph_mdsc_put_request(nextreq); | ||
2724 | goto restart; | ||
2725 | } | ||
2726 | ceph_mdsc_put_request(nextreq); /* won't go away */ | ||
2704 | } | 2727 | } |
2705 | if (!n) | 2728 | req = nextreq; |
2706 | break; | ||
2707 | req = rb_entry(n, struct ceph_mds_request, r_node); | ||
2708 | } | 2729 | } |
2709 | mutex_unlock(&mdsc->mutex); | 2730 | mutex_unlock(&mdsc->mutex); |
2710 | dout("wait_unsafe_requests done\n"); | 2731 | dout("wait_unsafe_requests done\n"); |
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 781656a49bf8..a32f0f896d9f 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c | |||
@@ -366,6 +366,14 @@ void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) | |||
366 | } | 366 | } |
367 | 367 | ||
368 | /* | 368 | /* |
369 | * return true if this connection ever successfully opened | ||
370 | */ | ||
371 | bool ceph_con_opened(struct ceph_connection *con) | ||
372 | { | ||
373 | return con->connect_seq > 0; | ||
374 | } | ||
375 | |||
376 | /* | ||
369 | * generic get/put | 377 | * generic get/put |
370 | */ | 378 | */ |
371 | struct ceph_connection *ceph_con_get(struct ceph_connection *con) | 379 | struct ceph_connection *ceph_con_get(struct ceph_connection *con) |
@@ -830,13 +838,6 @@ static void prepare_read_connect(struct ceph_connection *con) | |||
830 | con->in_base_pos = 0; | 838 | con->in_base_pos = 0; |
831 | } | 839 | } |
832 | 840 | ||
833 | static void prepare_read_connect_retry(struct ceph_connection *con) | ||
834 | { | ||
835 | dout("prepare_read_connect_retry %p\n", con); | ||
836 | con->in_base_pos = strlen(CEPH_BANNER) + sizeof(con->actual_peer_addr) | ||
837 | + sizeof(con->peer_addr_for_me); | ||
838 | } | ||
839 | |||
840 | static void prepare_read_ack(struct ceph_connection *con) | 841 | static void prepare_read_ack(struct ceph_connection *con) |
841 | { | 842 | { |
842 | dout("prepare_read_ack %p\n", con); | 843 | dout("prepare_read_ack %p\n", con); |
@@ -1146,7 +1147,7 @@ static int process_connect(struct ceph_connection *con) | |||
1146 | } | 1147 | } |
1147 | con->auth_retry = 1; | 1148 | con->auth_retry = 1; |
1148 | prepare_write_connect(con->msgr, con, 0); | 1149 | prepare_write_connect(con->msgr, con, 0); |
1149 | prepare_read_connect_retry(con); | 1150 | prepare_read_connect(con); |
1150 | break; | 1151 | break; |
1151 | 1152 | ||
1152 | case CEPH_MSGR_TAG_RESETSESSION: | 1153 | case CEPH_MSGR_TAG_RESETSESSION: |
@@ -1843,8 +1844,6 @@ static void ceph_fault(struct ceph_connection *con) | |||
1843 | goto out; | 1844 | goto out; |
1844 | } | 1845 | } |
1845 | 1846 | ||
1846 | clear_bit(BUSY, &con->state); /* to avoid an improbable race */ | ||
1847 | |||
1848 | mutex_lock(&con->mutex); | 1847 | mutex_lock(&con->mutex); |
1849 | if (test_bit(CLOSED, &con->state)) | 1848 | if (test_bit(CLOSED, &con->state)) |
1850 | goto out_unlock; | 1849 | goto out_unlock; |
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index 4caaa5911110..a343dae73cdc 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h | |||
@@ -223,6 +223,7 @@ extern void ceph_con_init(struct ceph_messenger *msgr, | |||
223 | struct ceph_connection *con); | 223 | struct ceph_connection *con); |
224 | extern void ceph_con_open(struct ceph_connection *con, | 224 | extern void ceph_con_open(struct ceph_connection *con, |
225 | struct ceph_entity_addr *addr); | 225 | struct ceph_entity_addr *addr); |
226 | extern bool ceph_con_opened(struct ceph_connection *con); | ||
226 | extern void ceph_con_close(struct ceph_connection *con); | 227 | extern void ceph_con_close(struct ceph_connection *con); |
227 | extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); | 228 | extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); |
228 | extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); | 229 | extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); |
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index dbe63db9762f..c7b4dedaace6 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c | |||
@@ -413,11 +413,22 @@ static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) | |||
413 | */ | 413 | */ |
414 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | 414 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
415 | { | 415 | { |
416 | struct ceph_osd_request *req; | ||
416 | int ret = 0; | 417 | int ret = 0; |
417 | 418 | ||
418 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); | 419 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); |
419 | if (list_empty(&osd->o_requests)) { | 420 | if (list_empty(&osd->o_requests)) { |
420 | __remove_osd(osdc, osd); | 421 | __remove_osd(osdc, osd); |
422 | } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], | ||
423 | &osd->o_con.peer_addr, | ||
424 | sizeof(osd->o_con.peer_addr)) == 0 && | ||
425 | !ceph_con_opened(&osd->o_con)) { | ||
426 | dout(" osd addr hasn't changed and connection never opened," | ||
427 | " letting msgr retry"); | ||
428 | /* touch each r_stamp for handle_timeout()'s benfit */ | ||
429 | list_for_each_entry(req, &osd->o_requests, r_osd_item) | ||
430 | req->r_stamp = jiffies; | ||
431 | ret = -EAGAIN; | ||
421 | } else { | 432 | } else { |
422 | ceph_con_close(&osd->o_con); | 433 | ceph_con_close(&osd->o_con); |
423 | ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); | 434 | ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); |
@@ -633,7 +644,7 @@ static int __send_request(struct ceph_osd_client *osdc, | |||
633 | reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ | 644 | reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ |
634 | reqhead->reassert_version = req->r_reassert_version; | 645 | reqhead->reassert_version = req->r_reassert_version; |
635 | 646 | ||
636 | req->r_sent_stamp = jiffies; | 647 | req->r_stamp = jiffies; |
637 | list_move_tail(&osdc->req_lru, &req->r_req_lru_item); | 648 | list_move_tail(&osdc->req_lru, &req->r_req_lru_item); |
638 | 649 | ||
639 | ceph_msg_get(req->r_request); /* send consumes a ref */ | 650 | ceph_msg_get(req->r_request); /* send consumes a ref */ |
@@ -660,7 +671,7 @@ static void handle_timeout(struct work_struct *work) | |||
660 | unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ; | 671 | unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ; |
661 | unsigned long keepalive = | 672 | unsigned long keepalive = |
662 | osdc->client->mount_args->osd_keepalive_timeout * HZ; | 673 | osdc->client->mount_args->osd_keepalive_timeout * HZ; |
663 | unsigned long last_sent = 0; | 674 | unsigned long last_stamp = 0; |
664 | struct rb_node *p; | 675 | struct rb_node *p; |
665 | struct list_head slow_osds; | 676 | struct list_head slow_osds; |
666 | 677 | ||
@@ -697,12 +708,12 @@ static void handle_timeout(struct work_struct *work) | |||
697 | req = list_entry(osdc->req_lru.next, struct ceph_osd_request, | 708 | req = list_entry(osdc->req_lru.next, struct ceph_osd_request, |
698 | r_req_lru_item); | 709 | r_req_lru_item); |
699 | 710 | ||
700 | if (time_before(jiffies, req->r_sent_stamp + timeout)) | 711 | if (time_before(jiffies, req->r_stamp + timeout)) |
701 | break; | 712 | break; |
702 | 713 | ||
703 | BUG_ON(req == last_req && req->r_sent_stamp == last_sent); | 714 | BUG_ON(req == last_req && req->r_stamp == last_stamp); |
704 | last_req = req; | 715 | last_req = req; |
705 | last_sent = req->r_sent_stamp; | 716 | last_stamp = req->r_stamp; |
706 | 717 | ||
707 | osd = req->r_osd; | 718 | osd = req->r_osd; |
708 | BUG_ON(!osd); | 719 | BUG_ON(!osd); |
@@ -718,7 +729,7 @@ static void handle_timeout(struct work_struct *work) | |||
718 | */ | 729 | */ |
719 | INIT_LIST_HEAD(&slow_osds); | 730 | INIT_LIST_HEAD(&slow_osds); |
720 | list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { | 731 | list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { |
721 | if (time_before(jiffies, req->r_sent_stamp + keepalive)) | 732 | if (time_before(jiffies, req->r_stamp + keepalive)) |
722 | break; | 733 | break; |
723 | 734 | ||
724 | osd = req->r_osd; | 735 | osd = req->r_osd; |
@@ -862,7 +873,9 @@ static int __kick_requests(struct ceph_osd_client *osdc, | |||
862 | 873 | ||
863 | dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1); | 874 | dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1); |
864 | if (kickosd) { | 875 | if (kickosd) { |
865 | __reset_osd(osdc, kickosd); | 876 | err = __reset_osd(osdc, kickosd); |
877 | if (err == -EAGAIN) | ||
878 | return 1; | ||
866 | } else { | 879 | } else { |
867 | for (p = rb_first(&osdc->osds); p; p = n) { | 880 | for (p = rb_first(&osdc->osds); p; p = n) { |
868 | struct ceph_osd *osd = | 881 | struct ceph_osd *osd = |
@@ -913,7 +926,7 @@ static int __kick_requests(struct ceph_osd_client *osdc, | |||
913 | 926 | ||
914 | kick: | 927 | kick: |
915 | dout("kicking %p tid %llu osd%d\n", req, req->r_tid, | 928 | dout("kicking %p tid %llu osd%d\n", req, req->r_tid, |
916 | req->r_osd->o_osd); | 929 | req->r_osd ? req->r_osd->o_osd : -1); |
917 | req->r_flags |= CEPH_OSD_FLAG_RETRY; | 930 | req->r_flags |= CEPH_OSD_FLAG_RETRY; |
918 | err = __send_request(osdc, req); | 931 | err = __send_request(osdc, req); |
919 | if (err) { | 932 | if (err) { |
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 1b1a3ca43afc..b0759911e7c3 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h | |||
@@ -70,7 +70,7 @@ struct ceph_osd_request { | |||
70 | 70 | ||
71 | char r_oid[40]; /* object name */ | 71 | char r_oid[40]; /* object name */ |
72 | int r_oid_len; | 72 | int r_oid_len; |
73 | unsigned long r_sent_stamp; | 73 | unsigned long r_stamp; /* send OR check time */ |
74 | bool r_resend; /* msg send failed, needs retry */ | 74 | bool r_resend; /* msg send failed, needs retry */ |
75 | 75 | ||
76 | struct ceph_file_layout r_file_layout; | 76 | struct ceph_file_layout r_file_layout; |
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index b83f2692b835..d82fe87c2a6e 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c | |||
@@ -480,6 +480,14 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) | |||
480 | return NULL; | 480 | return NULL; |
481 | } | 481 | } |
482 | 482 | ||
483 | void __decode_pool(void **p, struct ceph_pg_pool_info *pi) | ||
484 | { | ||
485 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); | ||
486 | calc_pg_masks(pi); | ||
487 | *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64); | ||
488 | *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; | ||
489 | } | ||
490 | |||
483 | /* | 491 | /* |
484 | * decode a full map. | 492 | * decode a full map. |
485 | */ | 493 | */ |
@@ -526,12 +534,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
526 | ev, CEPH_PG_POOL_VERSION); | 534 | ev, CEPH_PG_POOL_VERSION); |
527 | goto bad; | 535 | goto bad; |
528 | } | 536 | } |
529 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); | 537 | __decode_pool(p, pi); |
530 | __insert_pg_pool(&map->pg_pools, pi); | 538 | __insert_pg_pool(&map->pg_pools, pi); |
531 | calc_pg_masks(pi); | ||
532 | *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64); | ||
533 | *p += le32_to_cpu(pi->v.num_removed_snap_intervals) | ||
534 | * sizeof(u64) * 2; | ||
535 | } | 539 | } |
536 | ceph_decode_32_safe(p, end, map->pool_max, bad); | 540 | ceph_decode_32_safe(p, end, map->pool_max, bad); |
537 | 541 | ||
@@ -714,8 +718,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
714 | pi->id = pool; | 718 | pi->id = pool; |
715 | __insert_pg_pool(&map->pg_pools, pi); | 719 | __insert_pg_pool(&map->pg_pools, pi); |
716 | } | 720 | } |
717 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); | 721 | __decode_pool(p, pi); |
718 | calc_pg_masks(pi); | ||
719 | } | 722 | } |
720 | 723 | ||
721 | /* old_pool */ | 724 | /* old_pool */ |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index bf2a5f3846a4..df04e210a055 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -314,9 +314,9 @@ static int build_snap_context(struct ceph_snap_realm *realm) | |||
314 | because we rebuild_snap_realms() works _downward_ in | 314 | because we rebuild_snap_realms() works _downward_ in |
315 | hierarchy after each update.) */ | 315 | hierarchy after each update.) */ |
316 | if (realm->cached_context && | 316 | if (realm->cached_context && |
317 | realm->cached_context->seq <= realm->seq && | 317 | realm->cached_context->seq == realm->seq && |
318 | (!parent || | 318 | (!parent || |
319 | realm->cached_context->seq <= parent->cached_context->seq)) { | 319 | realm->cached_context->seq >= parent->cached_context->seq)) { |
320 | dout("build_snap_context %llx %p: %p seq %lld (%d snaps)" | 320 | dout("build_snap_context %llx %p: %p seq %lld (%d snaps)" |
321 | " (unchanged)\n", | 321 | " (unchanged)\n", |
322 | realm->ino, realm, realm->cached_context, | 322 | realm->ino, realm, realm->cached_context, |
@@ -818,7 +818,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, | |||
818 | * queued (again) by ceph_update_snap_trace() | 818 | * queued (again) by ceph_update_snap_trace() |
819 | * below. Queue it _now_, under the old context. | 819 | * below. Queue it _now_, under the old context. |
820 | */ | 820 | */ |
821 | spin_lock(&realm->inodes_with_caps_lock); | ||
821 | list_del_init(&ci->i_snap_realm_item); | 822 | list_del_init(&ci->i_snap_realm_item); |
823 | spin_unlock(&realm->inodes_with_caps_lock); | ||
822 | spin_unlock(&inode->i_lock); | 824 | spin_unlock(&inode->i_lock); |
823 | 825 | ||
824 | ceph_queue_cap_snap(ci, | 826 | ceph_queue_cap_snap(ci, |
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index ef9008b885b5..0d0e97ed3ff6 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c | |||
@@ -582,7 +582,9 @@ got: | |||
582 | inode->i_generation = sbi->s_next_generation++; | 582 | inode->i_generation = sbi->s_next_generation++; |
583 | spin_unlock(&sbi->s_next_gen_lock); | 583 | spin_unlock(&sbi->s_next_gen_lock); |
584 | 584 | ||
585 | ei->i_state = EXT3_STATE_NEW; | 585 | ei->i_state_flags = 0; |
586 | ext3_set_inode_state(inode, EXT3_STATE_NEW); | ||
587 | |||
586 | ei->i_extra_isize = | 588 | ei->i_extra_isize = |
587 | (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ? | 589 | (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ? |
588 | sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; | 590 | sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 7f920b7263a4..ea33bdf0a300 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -2811,7 +2811,7 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino) | |||
2811 | inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); | 2811 | inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); |
2812 | inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0; | 2812 | inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0; |
2813 | 2813 | ||
2814 | ei->i_state = 0; | 2814 | ei->i_state_flags = 0; |
2815 | ei->i_dir_start_lookup = 0; | 2815 | ei->i_dir_start_lookup = 0; |
2816 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); | 2816 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); |
2817 | /* We now have enough fields to check if the inode was active or not. | 2817 | /* We now have enough fields to check if the inode was active or not. |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 361c0b9962a8..57f6eef6ccd6 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -263,7 +263,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
263 | ext4_group_t f; | 263 | ext4_group_t f; |
264 | 264 | ||
265 | f = ext4_flex_group(sbi, block_group); | 265 | f = ext4_flex_group(sbi, block_group); |
266 | atomic_dec(&sbi->s_flex_groups[f].free_inodes); | 266 | atomic_dec(&sbi->s_flex_groups[f].used_dirs); |
267 | } | 267 | } |
268 | 268 | ||
269 | } | 269 | } |
@@ -773,7 +773,7 @@ static int ext4_claim_inode(struct super_block *sb, | |||
773 | if (sbi->s_log_groups_per_flex) { | 773 | if (sbi->s_log_groups_per_flex) { |
774 | ext4_group_t f = ext4_flex_group(sbi, group); | 774 | ext4_group_t f = ext4_flex_group(sbi, group); |
775 | 775 | ||
776 | atomic_inc(&sbi->s_flex_groups[f].free_inodes); | 776 | atomic_inc(&sbi->s_flex_groups[f].used_dirs); |
777 | } | 777 | } |
778 | } | 778 | } |
779 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); | 779 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 986120f30066..11119e07233b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1035,7 +1035,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode, | |||
1035 | sector_t lblock) | 1035 | sector_t lblock) |
1036 | { | 1036 | { |
1037 | struct ext4_inode_info *ei = EXT4_I(inode); | 1037 | struct ext4_inode_info *ei = EXT4_I(inode); |
1038 | int dind_mask = EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1; | 1038 | sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); |
1039 | int blk_bits; | 1039 | int blk_bits; |
1040 | 1040 | ||
1041 | if (lblock < EXT4_NDIR_BLOCKS) | 1041 | if (lblock < EXT4_NDIR_BLOCKS) |
@@ -1050,7 +1050,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode, | |||
1050 | } | 1050 | } |
1051 | ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; | 1051 | ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; |
1052 | ei->i_da_metadata_calc_len = 1; | 1052 | ei->i_da_metadata_calc_len = 1; |
1053 | blk_bits = roundup_pow_of_two(lblock + 1); | 1053 | blk_bits = order_base_2(lblock); |
1054 | return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; | 1054 | return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; |
1055 | } | 1055 | } |
1056 | 1056 | ||
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index ba191dae8730..e14d22c170d5 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -68,7 +68,21 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); | |||
68 | static int ext4_unfreeze(struct super_block *sb); | 68 | static int ext4_unfreeze(struct super_block *sb); |
69 | static void ext4_write_super(struct super_block *sb); | 69 | static void ext4_write_super(struct super_block *sb); |
70 | static int ext4_freeze(struct super_block *sb); | 70 | static int ext4_freeze(struct super_block *sb); |
71 | static int ext4_get_sb(struct file_system_type *fs_type, int flags, | ||
72 | const char *dev_name, void *data, struct vfsmount *mnt); | ||
71 | 73 | ||
74 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | ||
75 | static struct file_system_type ext3_fs_type = { | ||
76 | .owner = THIS_MODULE, | ||
77 | .name = "ext3", | ||
78 | .get_sb = ext4_get_sb, | ||
79 | .kill_sb = kill_block_super, | ||
80 | .fs_flags = FS_REQUIRES_DEV, | ||
81 | }; | ||
82 | #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) | ||
83 | #else | ||
84 | #define IS_EXT3_SB(sb) (0) | ||
85 | #endif | ||
72 | 86 | ||
73 | ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, | 87 | ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, |
74 | struct ext4_group_desc *bg) | 88 | struct ext4_group_desc *bg) |
@@ -2539,7 +2553,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2539 | * enable delayed allocation by default | 2553 | * enable delayed allocation by default |
2540 | * Use -o nodelalloc to turn it off | 2554 | * Use -o nodelalloc to turn it off |
2541 | */ | 2555 | */ |
2542 | set_opt(sbi->s_mount_opt, DELALLOC); | 2556 | if (!IS_EXT3_SB(sb)) |
2557 | set_opt(sbi->s_mount_opt, DELALLOC); | ||
2543 | 2558 | ||
2544 | if (!parse_options((char *) data, sb, &journal_devnum, | 2559 | if (!parse_options((char *) data, sb, &journal_devnum, |
2545 | &journal_ioprio, NULL, 0)) | 2560 | &journal_ioprio, NULL, 0)) |
@@ -4068,7 +4083,7 @@ static int ext4_get_sb(struct file_system_type *fs_type, int flags, | |||
4068 | return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt); | 4083 | return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt); |
4069 | } | 4084 | } |
4070 | 4085 | ||
4071 | #if !defined(CONTIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 4086 | #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
4072 | static struct file_system_type ext2_fs_type = { | 4087 | static struct file_system_type ext2_fs_type = { |
4073 | .owner = THIS_MODULE, | 4088 | .owner = THIS_MODULE, |
4074 | .name = "ext2", | 4089 | .name = "ext2", |
@@ -4095,15 +4110,7 @@ static inline void register_as_ext2(void) { } | |||
4095 | static inline void unregister_as_ext2(void) { } | 4110 | static inline void unregister_as_ext2(void) { } |
4096 | #endif | 4111 | #endif |
4097 | 4112 | ||
4098 | #if !defined(CONTIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 4113 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
4099 | static struct file_system_type ext3_fs_type = { | ||
4100 | .owner = THIS_MODULE, | ||
4101 | .name = "ext3", | ||
4102 | .get_sb = ext4_get_sb, | ||
4103 | .kill_sb = kill_block_super, | ||
4104 | .fs_flags = FS_REQUIRES_DEV, | ||
4105 | }; | ||
4106 | |||
4107 | static inline void register_as_ext3(void) | 4114 | static inline void register_as_ext3(void) |
4108 | { | 4115 | { |
4109 | int err = register_filesystem(&ext3_fs_type); | 4116 | int err = register_filesystem(&ext3_fs_type); |
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index c1ef50154868..6fcc7e71fbaa 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c | |||
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, | |||
309 | { | 309 | { |
310 | struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; | 310 | struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; |
311 | wchar_t *ip, *ext_start, *end, *name_start; | 311 | wchar_t *ip, *ext_start, *end, *name_start; |
312 | unsigned char base[9], ext[4], buf[8], *p; | 312 | unsigned char base[9], ext[4], buf[5], *p; |
313 | unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; | 313 | unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; |
314 | int chl, chi; | 314 | int chl, chi; |
315 | int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; | 315 | int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; |
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, | |||
467 | return 0; | 467 | return 0; |
468 | } | 468 | } |
469 | 469 | ||
470 | i = jiffies & 0xffff; | 470 | i = jiffies; |
471 | sz = (jiffies >> 16) & 0x7; | 471 | sz = (jiffies >> 16) & 0x7; |
472 | if (baselen > 2) { | 472 | if (baselen > 2) { |
473 | baselen = numtail2_baselen; | 473 | baselen = numtail2_baselen; |
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, | |||
476 | name_res[baselen + 4] = '~'; | 476 | name_res[baselen + 4] = '~'; |
477 | name_res[baselen + 5] = '1' + sz; | 477 | name_res[baselen + 5] = '1' + sz; |
478 | while (1) { | 478 | while (1) { |
479 | sprintf(buf, "%04X", i); | 479 | snprintf(buf, sizeof(buf), "%04X", i & 0xffff); |
480 | memcpy(&name_res[baselen], buf, 4); | 480 | memcpy(&name_res[baselen], buf, 4); |
481 | if (vfat_find_form(dir, name_res) < 0) | 481 | if (vfat_find_form(dir, name_res) < 0) |
482 | break; | 482 | break; |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index e513ac599c8e..0b589a9b4ffc 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
@@ -53,7 +53,7 @@ const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { | |||
53 | static void fscache_object_slow_work_put_ref(struct slow_work *); | 53 | static void fscache_object_slow_work_put_ref(struct slow_work *); |
54 | static int fscache_object_slow_work_get_ref(struct slow_work *); | 54 | static int fscache_object_slow_work_get_ref(struct slow_work *); |
55 | static void fscache_object_slow_work_execute(struct slow_work *); | 55 | static void fscache_object_slow_work_execute(struct slow_work *); |
56 | #ifdef CONFIG_SLOW_WORK_PROC | 56 | #ifdef CONFIG_SLOW_WORK_DEBUG |
57 | static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *); | 57 | static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *); |
58 | #endif | 58 | #endif |
59 | static void fscache_initialise_object(struct fscache_object *); | 59 | static void fscache_initialise_object(struct fscache_object *); |
@@ -69,7 +69,7 @@ const struct slow_work_ops fscache_object_slow_work_ops = { | |||
69 | .get_ref = fscache_object_slow_work_get_ref, | 69 | .get_ref = fscache_object_slow_work_get_ref, |
70 | .put_ref = fscache_object_slow_work_put_ref, | 70 | .put_ref = fscache_object_slow_work_put_ref, |
71 | .execute = fscache_object_slow_work_execute, | 71 | .execute = fscache_object_slow_work_execute, |
72 | #ifdef CONFIG_SLOW_WORK_PROC | 72 | #ifdef CONFIG_SLOW_WORK_DEBUG |
73 | .desc = fscache_object_slow_work_desc, | 73 | .desc = fscache_object_slow_work_desc, |
74 | #endif | 74 | #endif |
75 | }; | 75 | }; |
@@ -364,7 +364,7 @@ static void fscache_object_slow_work_execute(struct slow_work *work) | |||
364 | /* | 364 | /* |
365 | * describe an object for slow-work debugging | 365 | * describe an object for slow-work debugging |
366 | */ | 366 | */ |
367 | #ifdef CONFIG_SLOW_WORK_PROC | 367 | #ifdef CONFIG_SLOW_WORK_DEBUG |
368 | static void fscache_object_slow_work_desc(struct slow_work *work, | 368 | static void fscache_object_slow_work_desc(struct slow_work *work, |
369 | struct seq_file *m) | 369 | struct seq_file *m) |
370 | { | 370 | { |
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 313e79a14266..9f6c928d4586 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c | |||
@@ -500,7 +500,7 @@ static void fscache_op_execute(struct slow_work *work) | |||
500 | /* | 500 | /* |
501 | * describe an operation for slow-work debugging | 501 | * describe an operation for slow-work debugging |
502 | */ | 502 | */ |
503 | #ifdef CONFIG_SLOW_WORK_PROC | 503 | #ifdef CONFIG_SLOW_WORK_DEBUG |
504 | static void fscache_op_desc(struct slow_work *work, struct seq_file *m) | 504 | static void fscache_op_desc(struct slow_work *work, struct seq_file *m) |
505 | { | 505 | { |
506 | struct fscache_operation *op = | 506 | struct fscache_operation *op = |
@@ -517,7 +517,7 @@ const struct slow_work_ops fscache_op_slow_work_ops = { | |||
517 | .get_ref = fscache_op_get_ref, | 517 | .get_ref = fscache_op_get_ref, |
518 | .put_ref = fscache_op_put_ref, | 518 | .put_ref = fscache_op_put_ref, |
519 | .execute = fscache_op_execute, | 519 | .execute = fscache_op_execute, |
520 | #ifdef CONFIG_SLOW_WORK_PROC | 520 | #ifdef CONFIG_SLOW_WORK_DEBUG |
521 | .desc = fscache_op_desc, | 521 | .desc = fscache_op_desc, |
522 | #endif | 522 | #endif |
523 | }; | 523 | }; |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index c598ea4c4e7d..69809024d71d 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -881,6 +881,7 @@ submit_failed: | |||
881 | goto nobufs; | 881 | goto nobufs; |
882 | 882 | ||
883 | nobufs_unlock_obj: | 883 | nobufs_unlock_obj: |
884 | spin_unlock(&cookie->stores_lock); | ||
884 | spin_unlock(&object->lock); | 885 | spin_unlock(&object->lock); |
885 | nobufs: | 886 | nobufs: |
886 | spin_unlock(&cookie->lock); | 887 | spin_unlock(&cookie->lock); |
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index 9718c22f186d..a5d0c56d3ebc 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
@@ -80,6 +80,7 @@ static void writeseg_end_io(struct bio *bio, int err) | |||
80 | prefetchw(&bvec->bv_page->flags); | 80 | prefetchw(&bvec->bv_page->flags); |
81 | 81 | ||
82 | end_page_writeback(page); | 82 | end_page_writeback(page); |
83 | page_cache_release(page); | ||
83 | } while (bvec >= bio->bi_io_vec); | 84 | } while (bvec >= bio->bi_io_vec); |
84 | bio_put(bio); | 85 | bio_put(bio); |
85 | if (atomic_dec_and_test(&super->s_pending_writes)) | 86 | if (atomic_dec_and_test(&super->s_pending_writes)) |
@@ -97,8 +98,10 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
97 | unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); | 98 | unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); |
98 | int i; | 99 | int i; |
99 | 100 | ||
101 | if (max_pages > BIO_MAX_PAGES) | ||
102 | max_pages = BIO_MAX_PAGES; | ||
100 | bio = bio_alloc(GFP_NOFS, max_pages); | 103 | bio = bio_alloc(GFP_NOFS, max_pages); |
101 | BUG_ON(!bio); /* FIXME: handle this */ | 104 | BUG_ON(!bio); |
102 | 105 | ||
103 | for (i = 0; i < nr_pages; i++) { | 106 | for (i = 0; i < nr_pages; i++) { |
104 | if (i >= max_pages) { | 107 | if (i >= max_pages) { |
@@ -191,8 +194,10 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
191 | unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); | 194 | unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); |
192 | int i; | 195 | int i; |
193 | 196 | ||
197 | if (max_pages > BIO_MAX_PAGES) | ||
198 | max_pages = BIO_MAX_PAGES; | ||
194 | bio = bio_alloc(GFP_NOFS, max_pages); | 199 | bio = bio_alloc(GFP_NOFS, max_pages); |
195 | BUG_ON(!bio); /* FIXME: handle this */ | 200 | BUG_ON(!bio); |
196 | 201 | ||
197 | for (i = 0; i < nr_pages; i++) { | 202 | for (i = 0; i < nr_pages; i++) { |
198 | if (i >= max_pages) { | 203 | if (i >= max_pages) { |
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 56a8bfbb0120..c76b4b5c7ff6 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c | |||
@@ -303,12 +303,12 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir) | |||
303 | (filler_t *)logfs_readpage, NULL); | 303 | (filler_t *)logfs_readpage, NULL); |
304 | if (IS_ERR(page)) | 304 | if (IS_ERR(page)) |
305 | return PTR_ERR(page); | 305 | return PTR_ERR(page); |
306 | dd = kmap_atomic(page, KM_USER0); | 306 | dd = kmap(page); |
307 | BUG_ON(dd->namelen == 0); | 307 | BUG_ON(dd->namelen == 0); |
308 | 308 | ||
309 | full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), | 309 | full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), |
310 | pos, be64_to_cpu(dd->ino), dd->type); | 310 | pos, be64_to_cpu(dd->ino), dd->type); |
311 | kunmap_atomic(dd, KM_USER0); | 311 | kunmap(page); |
312 | page_cache_release(page); | 312 | page_cache_release(page); |
313 | if (full) | 313 | if (full) |
314 | break; | 314 | break; |
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 6ad30a4c9052..d57c7b07b60b 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c | |||
@@ -800,6 +800,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb) | |||
800 | { | 800 | { |
801 | struct logfs_super *super = logfs_super(sb); | 801 | struct logfs_super *super = logfs_super(sb); |
802 | struct logfs_area *area = super->s_journal_area; | 802 | struct logfs_area *area = super->s_journal_area; |
803 | struct btree_head32 *head = &super->s_reserved_segments; | ||
803 | u32 segno, ec; | 804 | u32 segno, ec; |
804 | int i, err; | 805 | int i, err; |
805 | 806 | ||
@@ -807,6 +808,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb) | |||
807 | /* Drop old segments */ | 808 | /* Drop old segments */ |
808 | journal_for_each(i) | 809 | journal_for_each(i) |
809 | if (super->s_journal_seg[i]) { | 810 | if (super->s_journal_seg[i]) { |
811 | btree_remove32(head, super->s_journal_seg[i]); | ||
810 | logfs_set_segment_unreserved(sb, | 812 | logfs_set_segment_unreserved(sb, |
811 | super->s_journal_seg[i], | 813 | super->s_journal_seg[i], |
812 | super->s_journal_ec[i]); | 814 | super->s_journal_ec[i]); |
@@ -819,8 +821,13 @@ void do_logfs_journal_wl_pass(struct super_block *sb) | |||
819 | super->s_journal_seg[i] = segno; | 821 | super->s_journal_seg[i] = segno; |
820 | super->s_journal_ec[i] = ec; | 822 | super->s_journal_ec[i] = ec; |
821 | logfs_set_segment_reserved(sb, segno); | 823 | logfs_set_segment_reserved(sb, segno); |
824 | err = btree_insert32(head, segno, (void *)1, GFP_KERNEL); | ||
825 | BUG_ON(err); /* mempool should prevent this */ | ||
826 | err = logfs_erase_segment(sb, segno, 1); | ||
827 | BUG_ON(err); /* FIXME: remount-ro would be nicer */ | ||
822 | } | 828 | } |
823 | /* Manually move journal_area */ | 829 | /* Manually move journal_area */ |
830 | freeseg(sb, area->a_segno); | ||
824 | area->a_segno = super->s_journal_seg[0]; | 831 | area->a_segno = super->s_journal_seg[0]; |
825 | area->a_is_open = 0; | 832 | area->a_is_open = 0; |
826 | area->a_used_bytes = 0; | 833 | area->a_used_bytes = 0; |
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index 129779431373..b84b0eec6024 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h | |||
@@ -587,6 +587,7 @@ void move_page_to_btree(struct page *page); | |||
587 | int logfs_init_mapping(struct super_block *sb); | 587 | int logfs_init_mapping(struct super_block *sb); |
588 | void logfs_sync_area(struct logfs_area *area); | 588 | void logfs_sync_area(struct logfs_area *area); |
589 | void logfs_sync_segments(struct super_block *sb); | 589 | void logfs_sync_segments(struct super_block *sb); |
590 | void freeseg(struct super_block *sb, u32 segno); | ||
590 | 591 | ||
591 | /* area handling */ | 592 | /* area handling */ |
592 | int logfs_init_areas(struct super_block *sb); | 593 | int logfs_init_areas(struct super_block *sb); |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index 7a23b3e7c0a7..c3a3a6814b84 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
@@ -1594,7 +1594,6 @@ int logfs_delete(struct inode *inode, pgoff_t index, | |||
1594 | return ret; | 1594 | return ret; |
1595 | } | 1595 | } |
1596 | 1596 | ||
1597 | /* Rewrite cannot mark the inode dirty but has to write it immediatly. */ | ||
1598 | int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, | 1597 | int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, |
1599 | gc_level_t gc_level, long flags) | 1598 | gc_level_t gc_level, long flags) |
1600 | { | 1599 | { |
@@ -1611,6 +1610,18 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, | |||
1611 | if (level != 0) | 1610 | if (level != 0) |
1612 | alloc_indirect_block(inode, page, 0); | 1611 | alloc_indirect_block(inode, page, 0); |
1613 | err = logfs_write_buf(inode, page, flags); | 1612 | err = logfs_write_buf(inode, page, flags); |
1613 | if (!err && shrink_level(gc_level) == 0) { | ||
1614 | /* Rewrite cannot mark the inode dirty but has to | ||
1615 | * write it immediatly. | ||
1616 | * Q: Can't we just create an alias for the inode | ||
1617 | * instead? And if not, why not? | ||
1618 | */ | ||
1619 | if (inode->i_ino == LOGFS_INO_MASTER) | ||
1620 | logfs_write_anchor(inode->i_sb); | ||
1621 | else { | ||
1622 | err = __logfs_write_inode(inode, flags); | ||
1623 | } | ||
1624 | } | ||
1614 | } | 1625 | } |
1615 | logfs_put_write_page(page); | 1626 | logfs_put_write_page(page); |
1616 | return err; | 1627 | return err; |
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index 1a14f9910d55..0ecd8f07c11e 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
@@ -93,50 +93,58 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, | |||
93 | } while (len); | 93 | } while (len); |
94 | } | 94 | } |
95 | 95 | ||
96 | /* | 96 | static void pad_partial_page(struct logfs_area *area) |
97 | * bdev_writeseg will write full pages. Memset the tail to prevent data leaks. | ||
98 | */ | ||
99 | static void pad_wbuf(struct logfs_area *area, int final) | ||
100 | { | 97 | { |
101 | struct super_block *sb = area->a_sb; | 98 | struct super_block *sb = area->a_sb; |
102 | struct logfs_super *super = logfs_super(sb); | ||
103 | struct page *page; | 99 | struct page *page; |
104 | u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); | 100 | u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); |
105 | pgoff_t index = ofs >> PAGE_SHIFT; | 101 | pgoff_t index = ofs >> PAGE_SHIFT; |
106 | long offset = ofs & (PAGE_SIZE-1); | 102 | long offset = ofs & (PAGE_SIZE-1); |
107 | u32 len = PAGE_SIZE - offset; | 103 | u32 len = PAGE_SIZE - offset; |
108 | 104 | ||
109 | if (len == PAGE_SIZE) { | 105 | if (len % PAGE_SIZE) { |
110 | /* The math in this function can surely use some love */ | 106 | page = get_mapping_page(sb, index, 0); |
111 | len = 0; | ||
112 | } | ||
113 | if (len) { | ||
114 | BUG_ON(area->a_used_bytes >= super->s_segsize); | ||
115 | |||
116 | page = get_mapping_page(area->a_sb, index, 0); | ||
117 | BUG_ON(!page); /* FIXME: reserve a pool */ | 107 | BUG_ON(!page); /* FIXME: reserve a pool */ |
118 | memset(page_address(page) + offset, 0xff, len); | 108 | memset(page_address(page) + offset, 0xff, len); |
119 | SetPagePrivate(page); | 109 | SetPagePrivate(page); |
120 | page_cache_release(page); | 110 | page_cache_release(page); |
121 | } | 111 | } |
112 | } | ||
122 | 113 | ||
123 | if (!final) | 114 | static void pad_full_pages(struct logfs_area *area) |
124 | return; | 115 | { |
116 | struct super_block *sb = area->a_sb; | ||
117 | struct logfs_super *super = logfs_super(sb); | ||
118 | u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); | ||
119 | u32 len = super->s_segsize - area->a_used_bytes; | ||
120 | pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT; | ||
121 | pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT; | ||
122 | struct page *page; | ||
125 | 123 | ||
126 | area->a_used_bytes += len; | 124 | while (no_indizes) { |
127 | for ( ; area->a_used_bytes < super->s_segsize; | 125 | page = get_mapping_page(sb, index, 0); |
128 | area->a_used_bytes += PAGE_SIZE) { | ||
129 | /* Memset another page */ | ||
130 | index++; | ||
131 | page = get_mapping_page(area->a_sb, index, 0); | ||
132 | BUG_ON(!page); /* FIXME: reserve a pool */ | 126 | BUG_ON(!page); /* FIXME: reserve a pool */ |
133 | memset(page_address(page), 0xff, PAGE_SIZE); | 127 | SetPageUptodate(page); |
128 | memset(page_address(page), 0xff, PAGE_CACHE_SIZE); | ||
134 | SetPagePrivate(page); | 129 | SetPagePrivate(page); |
135 | page_cache_release(page); | 130 | page_cache_release(page); |
131 | index++; | ||
132 | no_indizes--; | ||
136 | } | 133 | } |
137 | } | 134 | } |
138 | 135 | ||
139 | /* | 136 | /* |
137 | * bdev_writeseg will write full pages. Memset the tail to prevent data leaks. | ||
138 | * Also make sure we allocate (and memset) all pages for final writeout. | ||
139 | */ | ||
140 | static void pad_wbuf(struct logfs_area *area, int final) | ||
141 | { | ||
142 | pad_partial_page(area); | ||
143 | if (final) | ||
144 | pad_full_pages(area); | ||
145 | } | ||
146 | |||
147 | /* | ||
140 | * We have to be careful with the alias tree. Since lookup is done by bix, | 148 | * We have to be careful with the alias tree. Since lookup is done by bix, |
141 | * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with | 149 | * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with |
142 | * indirect blocks. So always use it through accessor functions. | 150 | * indirect blocks. So always use it through accessor functions. |
@@ -683,7 +691,7 @@ int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow) | |||
683 | return 0; | 691 | return 0; |
684 | } | 692 | } |
685 | 693 | ||
686 | static void freeseg(struct super_block *sb, u32 segno) | 694 | void freeseg(struct super_block *sb, u32 segno) |
687 | { | 695 | { |
688 | struct logfs_super *super = logfs_super(sb); | 696 | struct logfs_super *super = logfs_super(sb); |
689 | struct address_space *mapping = super->s_mapping_inode->i_mapping; | 697 | struct address_space *mapping = super->s_mapping_inode->i_mapping; |
diff --git a/fs/logfs/super.c b/fs/logfs/super.c index c66beab78dee..9d856c49afc5 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c | |||
@@ -277,7 +277,7 @@ static int logfs_recover_sb(struct super_block *sb) | |||
277 | } | 277 | } |
278 | if (valid0 && valid1 && ds_cmp(ds0, ds1)) { | 278 | if (valid0 && valid1 && ds_cmp(ds0, ds1)) { |
279 | printk(KERN_INFO"Superblocks don't match - fixing.\n"); | 279 | printk(KERN_INFO"Superblocks don't match - fixing.\n"); |
280 | return write_one_sb(sb, super->s_devops->find_last_sb); | 280 | return logfs_write_sb(sb); |
281 | } | 281 | } |
282 | /* If neither is valid now, something's wrong. Didn't we properly | 282 | /* If neither is valid now, something's wrong. Didn't we properly |
283 | * check them before?!? */ | 283 | * check them before?!? */ |
@@ -289,6 +289,10 @@ static int logfs_make_writeable(struct super_block *sb) | |||
289 | { | 289 | { |
290 | int err; | 290 | int err; |
291 | 291 | ||
292 | err = logfs_open_segfile(sb); | ||
293 | if (err) | ||
294 | return err; | ||
295 | |||
292 | /* Repair any broken superblock copies */ | 296 | /* Repair any broken superblock copies */ |
293 | err = logfs_recover_sb(sb); | 297 | err = logfs_recover_sb(sb); |
294 | if (err) | 298 | if (err) |
@@ -299,10 +303,6 @@ static int logfs_make_writeable(struct super_block *sb) | |||
299 | if (err) | 303 | if (err) |
300 | return err; | 304 | return err; |
301 | 305 | ||
302 | err = logfs_open_segfile(sb); | ||
303 | if (err) | ||
304 | return err; | ||
305 | |||
306 | /* Do one GC pass before any data gets dirtied */ | 306 | /* Do one GC pass before any data gets dirtied */ |
307 | logfs_gc_pass(sb); | 307 | logfs_gc_pass(sb); |
308 | 308 | ||
@@ -328,7 +328,7 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt) | |||
328 | 328 | ||
329 | sb->s_root = d_alloc_root(rootdir); | 329 | sb->s_root = d_alloc_root(rootdir); |
330 | if (!sb->s_root) | 330 | if (!sb->s_root) |
331 | goto fail; | 331 | goto fail2; |
332 | 332 | ||
333 | super->s_erase_page = alloc_pages(GFP_KERNEL, 0); | 333 | super->s_erase_page = alloc_pages(GFP_KERNEL, 0); |
334 | if (!super->s_erase_page) | 334 | if (!super->s_erase_page) |
@@ -572,8 +572,7 @@ int logfs_get_sb_device(struct file_system_type *type, int flags, | |||
572 | return 0; | 572 | return 0; |
573 | 573 | ||
574 | err1: | 574 | err1: |
575 | up_write(&sb->s_umount); | 575 | deactivate_locked_super(sb); |
576 | deactivate_super(sb); | ||
577 | return err; | 576 | return err; |
578 | err0: | 577 | err0: |
579 | kfree(super); | 578 | kfree(super); |
diff --git a/fs/namei.c b/fs/namei.c index 1c0fca6e899e..a7dce91a7e42 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1610,8 +1610,7 @@ exit: | |||
1610 | 1610 | ||
1611 | static struct file *do_last(struct nameidata *nd, struct path *path, | 1611 | static struct file *do_last(struct nameidata *nd, struct path *path, |
1612 | int open_flag, int acc_mode, | 1612 | int open_flag, int acc_mode, |
1613 | int mode, const char *pathname, | 1613 | int mode, const char *pathname) |
1614 | int *want_dir) | ||
1615 | { | 1614 | { |
1616 | struct dentry *dir = nd->path.dentry; | 1615 | struct dentry *dir = nd->path.dentry; |
1617 | struct file *filp; | 1616 | struct file *filp; |
@@ -1642,7 +1641,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
1642 | if (nd->last.name[nd->last.len]) { | 1641 | if (nd->last.name[nd->last.len]) { |
1643 | if (open_flag & O_CREAT) | 1642 | if (open_flag & O_CREAT) |
1644 | goto exit; | 1643 | goto exit; |
1645 | *want_dir = 1; | 1644 | nd->flags |= LOOKUP_DIRECTORY; |
1646 | } | 1645 | } |
1647 | 1646 | ||
1648 | /* just plain open? */ | 1647 | /* just plain open? */ |
@@ -1656,8 +1655,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
1656 | if (path->dentry->d_inode->i_op->follow_link) | 1655 | if (path->dentry->d_inode->i_op->follow_link) |
1657 | return NULL; | 1656 | return NULL; |
1658 | error = -ENOTDIR; | 1657 | error = -ENOTDIR; |
1659 | if (*want_dir && !path->dentry->d_inode->i_op->lookup) | 1658 | if (nd->flags & LOOKUP_DIRECTORY) { |
1660 | goto exit_dput; | 1659 | if (!path->dentry->d_inode->i_op->lookup) |
1660 | goto exit_dput; | ||
1661 | } | ||
1661 | path_to_nameidata(path, nd); | 1662 | path_to_nameidata(path, nd); |
1662 | audit_inode(pathname, nd->path.dentry); | 1663 | audit_inode(pathname, nd->path.dentry); |
1663 | goto ok; | 1664 | goto ok; |
@@ -1766,7 +1767,6 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
1766 | int count = 0; | 1767 | int count = 0; |
1767 | int flag = open_to_namei_flags(open_flag); | 1768 | int flag = open_to_namei_flags(open_flag); |
1768 | int force_reval = 0; | 1769 | int force_reval = 0; |
1769 | int want_dir = open_flag & O_DIRECTORY; | ||
1770 | 1770 | ||
1771 | if (!(open_flag & O_CREAT)) | 1771 | if (!(open_flag & O_CREAT)) |
1772 | mode = 0; | 1772 | mode = 0; |
@@ -1828,7 +1828,9 @@ reval: | |||
1828 | if (open_flag & O_EXCL) | 1828 | if (open_flag & O_EXCL) |
1829 | nd.flags |= LOOKUP_EXCL; | 1829 | nd.flags |= LOOKUP_EXCL; |
1830 | } | 1830 | } |
1831 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir); | 1831 | if (open_flag & O_DIRECTORY) |
1832 | nd.flags |= LOOKUP_DIRECTORY; | ||
1833 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); | ||
1832 | while (unlikely(!filp)) { /* trailing symlink */ | 1834 | while (unlikely(!filp)) { /* trailing symlink */ |
1833 | struct path holder; | 1835 | struct path holder; |
1834 | struct inode *inode = path.dentry->d_inode; | 1836 | struct inode *inode = path.dentry->d_inode; |
@@ -1866,7 +1868,7 @@ reval: | |||
1866 | } | 1868 | } |
1867 | holder = path; | 1869 | holder = path; |
1868 | nd.flags &= ~LOOKUP_PARENT; | 1870 | nd.flags &= ~LOOKUP_PARENT; |
1869 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir); | 1871 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); |
1870 | if (inode->i_op->put_link) | 1872 | if (inode->i_op->put_link) |
1871 | inode->i_op->put_link(holder.dentry, &nd, cookie); | 1873 | inode->i_op->put_link(holder.dentry, &nd, cookie); |
1872 | path_put(&holder); | 1874 | path_put(&holder); |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index ae8d02294e46..ae0d92736531 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -491,7 +491,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp) | |||
491 | { | 491 | { |
492 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); | 492 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); |
493 | 493 | ||
494 | if (gfp & __GFP_WAIT) | 494 | /* Only do I/O if gfp is a superset of GFP_KERNEL */ |
495 | if ((gfp & GFP_KERNEL) == GFP_KERNEL) | ||
495 | nfs_wb_page(page->mapping->host, page); | 496 | nfs_wb_page(page->mapping->host, page); |
496 | /* If PagePrivate() is set, then the page is not freeable */ | 497 | /* If PagePrivate() is set, then the page is not freeable */ |
497 | if (PagePrivate(page)) | 498 | if (PagePrivate(page)) |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4d338be492cb..dd17713413a5 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -5552,6 +5552,8 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nf | |||
5552 | if (status != 0) | 5552 | if (status != 0) |
5553 | goto out; | 5553 | goto out; |
5554 | status = decode_delegreturn(&xdr); | 5554 | status = decode_delegreturn(&xdr); |
5555 | if (status != 0) | ||
5556 | goto out; | ||
5555 | decode_getfattr(&xdr, res->fattr, res->server, | 5557 | decode_getfattr(&xdr, res->fattr, res->server, |
5556 | !RPC_IS_ASYNC(rqstp->rq_task)); | 5558 | !RPC_IS_ASYNC(rqstp->rq_task)); |
5557 | out: | 5559 | out: |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 636eaafd6ea2..6129a431aa34 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -323,14 +323,14 @@ int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs) | |||
323 | int nilfs_wait_on_logs(struct list_head *logs) | 323 | int nilfs_wait_on_logs(struct list_head *logs) |
324 | { | 324 | { |
325 | struct nilfs_segment_buffer *segbuf; | 325 | struct nilfs_segment_buffer *segbuf; |
326 | int err; | 326 | int err, ret = 0; |
327 | 327 | ||
328 | list_for_each_entry(segbuf, logs, sb_list) { | 328 | list_for_each_entry(segbuf, logs, sb_list) { |
329 | err = nilfs_segbuf_wait(segbuf); | 329 | err = nilfs_segbuf_wait(segbuf); |
330 | if (err) | 330 | if (err && !ret) |
331 | return err; | 331 | ret = err; |
332 | } | 332 | } |
333 | return 0; | 333 | return ret; |
334 | } | 334 | } |
335 | 335 | ||
336 | /* | 336 | /* |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 69576a95e13f..c161d89061b5 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1510,6 +1510,12 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, | |||
1510 | if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE) | 1510 | if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE) |
1511 | break; | 1511 | break; |
1512 | 1512 | ||
1513 | nilfs_clear_logs(&sci->sc_segbufs); | ||
1514 | |||
1515 | err = nilfs_segctor_extend_segments(sci, nilfs, nadd); | ||
1516 | if (unlikely(err)) | ||
1517 | return err; | ||
1518 | |||
1513 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { | 1519 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1514 | err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, | 1520 | err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, |
1515 | sci->sc_freesegs, | 1521 | sci->sc_freesegs, |
@@ -1517,12 +1523,6 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, | |||
1517 | NULL); | 1523 | NULL); |
1518 | WARN_ON(err); /* do not happen */ | 1524 | WARN_ON(err); /* do not happen */ |
1519 | } | 1525 | } |
1520 | nilfs_clear_logs(&sci->sc_segbufs); | ||
1521 | |||
1522 | err = nilfs_segctor_extend_segments(sci, nilfs, nadd); | ||
1523 | if (unlikely(err)) | ||
1524 | return err; | ||
1525 | |||
1526 | nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); | 1526 | nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); |
1527 | sci->sc_stage = prev_stage; | 1527 | sci->sc_stage = prev_stage; |
1528 | } | 1528 | } |
@@ -1897,8 +1897,7 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, | |||
1897 | 1897 | ||
1898 | list_splice_tail_init(&sci->sc_write_logs, &logs); | 1898 | list_splice_tail_init(&sci->sc_write_logs, &logs); |
1899 | ret = nilfs_wait_on_logs(&logs); | 1899 | ret = nilfs_wait_on_logs(&logs); |
1900 | if (ret) | 1900 | nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err); |
1901 | nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret); | ||
1902 | 1901 | ||
1903 | list_splice_tail_init(&sci->sc_segbufs, &logs); | 1902 | list_splice_tail_init(&sci->sc_segbufs, &logs); |
1904 | nilfs_cancel_segusage(&logs, nilfs->ns_sufile); | 1903 | nilfs_cancel_segusage(&logs, nilfs->ns_sufile); |
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 0501974bedd0..8ccf0f8c9cc8 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "alloc.h" | 30 | #include "alloc.h" |
31 | #include "dlmglue.h" | 31 | #include "dlmglue.h" |
32 | #include "file.h" | 32 | #include "file.h" |
33 | #include "inode.h" | ||
34 | #include "journal.h" | ||
33 | #include "ocfs2_fs.h" | 35 | #include "ocfs2_fs.h" |
34 | 36 | ||
35 | #include "xattr.h" | 37 | #include "xattr.h" |
@@ -166,6 +168,60 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type) | |||
166 | } | 168 | } |
167 | 169 | ||
168 | /* | 170 | /* |
171 | * Helper function to set i_mode in memory and disk. Some call paths | ||
172 | * will not have di_bh or a journal handle to pass, in which case it | ||
173 | * will create it's own. | ||
174 | */ | ||
175 | static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh, | ||
176 | handle_t *handle, umode_t new_mode) | ||
177 | { | ||
178 | int ret, commit_handle = 0; | ||
179 | struct ocfs2_dinode *di; | ||
180 | |||
181 | if (di_bh == NULL) { | ||
182 | ret = ocfs2_read_inode_block(inode, &di_bh); | ||
183 | if (ret) { | ||
184 | mlog_errno(ret); | ||
185 | goto out; | ||
186 | } | ||
187 | } else | ||
188 | get_bh(di_bh); | ||
189 | |||
190 | if (handle == NULL) { | ||
191 | handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), | ||
192 | OCFS2_INODE_UPDATE_CREDITS); | ||
193 | if (IS_ERR(handle)) { | ||
194 | ret = PTR_ERR(handle); | ||
195 | mlog_errno(ret); | ||
196 | goto out_brelse; | ||
197 | } | ||
198 | |||
199 | commit_handle = 1; | ||
200 | } | ||
201 | |||
202 | di = (struct ocfs2_dinode *)di_bh->b_data; | ||
203 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, | ||
204 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
205 | if (ret) { | ||
206 | mlog_errno(ret); | ||
207 | goto out_commit; | ||
208 | } | ||
209 | |||
210 | inode->i_mode = new_mode; | ||
211 | di->i_mode = cpu_to_le16(inode->i_mode); | ||
212 | |||
213 | ocfs2_journal_dirty(handle, di_bh); | ||
214 | |||
215 | out_commit: | ||
216 | if (commit_handle) | ||
217 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); | ||
218 | out_brelse: | ||
219 | brelse(di_bh); | ||
220 | out: | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | /* | ||
169 | * Set the access or default ACL of an inode. | 225 | * Set the access or default ACL of an inode. |
170 | */ | 226 | */ |
171 | static int ocfs2_set_acl(handle_t *handle, | 227 | static int ocfs2_set_acl(handle_t *handle, |
@@ -193,9 +249,14 @@ static int ocfs2_set_acl(handle_t *handle, | |||
193 | if (ret < 0) | 249 | if (ret < 0) |
194 | return ret; | 250 | return ret; |
195 | else { | 251 | else { |
196 | inode->i_mode = mode; | ||
197 | if (ret == 0) | 252 | if (ret == 0) |
198 | acl = NULL; | 253 | acl = NULL; |
254 | |||
255 | ret = ocfs2_acl_set_mode(inode, di_bh, | ||
256 | handle, mode); | ||
257 | if (ret) | ||
258 | return ret; | ||
259 | |||
199 | } | 260 | } |
200 | } | 261 | } |
201 | break; | 262 | break; |
@@ -283,6 +344,7 @@ int ocfs2_init_acl(handle_t *handle, | |||
283 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 344 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
284 | struct posix_acl *acl = NULL; | 345 | struct posix_acl *acl = NULL; |
285 | int ret = 0; | 346 | int ret = 0; |
347 | mode_t mode; | ||
286 | 348 | ||
287 | if (!S_ISLNK(inode->i_mode)) { | 349 | if (!S_ISLNK(inode->i_mode)) { |
288 | if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { | 350 | if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { |
@@ -291,12 +353,17 @@ int ocfs2_init_acl(handle_t *handle, | |||
291 | if (IS_ERR(acl)) | 353 | if (IS_ERR(acl)) |
292 | return PTR_ERR(acl); | 354 | return PTR_ERR(acl); |
293 | } | 355 | } |
294 | if (!acl) | 356 | if (!acl) { |
295 | inode->i_mode &= ~current_umask(); | 357 | mode = inode->i_mode & ~current_umask(); |
358 | ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); | ||
359 | if (ret) { | ||
360 | mlog_errno(ret); | ||
361 | goto cleanup; | ||
362 | } | ||
363 | } | ||
296 | } | 364 | } |
297 | if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) { | 365 | if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) { |
298 | struct posix_acl *clone; | 366 | struct posix_acl *clone; |
299 | mode_t mode; | ||
300 | 367 | ||
301 | if (S_ISDIR(inode->i_mode)) { | 368 | if (S_ISDIR(inode->i_mode)) { |
302 | ret = ocfs2_set_acl(handle, inode, di_bh, | 369 | ret = ocfs2_set_acl(handle, inode, di_bh, |
@@ -313,7 +380,7 @@ int ocfs2_init_acl(handle_t *handle, | |||
313 | mode = inode->i_mode; | 380 | mode = inode->i_mode; |
314 | ret = posix_acl_create_masq(clone, &mode); | 381 | ret = posix_acl_create_masq(clone, &mode); |
315 | if (ret >= 0) { | 382 | if (ret >= 0) { |
316 | inode->i_mode = mode; | 383 | ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); |
317 | if (ret > 0) { | 384 | if (ret > 0) { |
318 | ret = ocfs2_set_acl(handle, inode, | 385 | ret = ocfs2_set_acl(handle, inode, |
319 | di_bh, ACL_TYPE_ACCESS, | 386 | di_bh, ACL_TYPE_ACCESS, |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index a659606dcb95..9289b4357d27 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -1875,7 +1875,6 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1875 | ok: | 1875 | ok: |
1876 | spin_unlock(&res->spinlock); | 1876 | spin_unlock(&res->spinlock); |
1877 | } | 1877 | } |
1878 | spin_unlock(&dlm->spinlock); | ||
1879 | 1878 | ||
1880 | // mlog(0, "woo! got an assert_master from node %u!\n", | 1879 | // mlog(0, "woo! got an assert_master from node %u!\n", |
1881 | // assert->node_idx); | 1880 | // assert->node_idx); |
@@ -1926,7 +1925,6 @@ ok: | |||
1926 | /* master is known, detach if not already detached. | 1925 | /* master is known, detach if not already detached. |
1927 | * ensures that only one assert_master call will happen | 1926 | * ensures that only one assert_master call will happen |
1928 | * on this mle. */ | 1927 | * on this mle. */ |
1929 | spin_lock(&dlm->spinlock); | ||
1930 | spin_lock(&dlm->master_lock); | 1928 | spin_lock(&dlm->master_lock); |
1931 | 1929 | ||
1932 | rr = atomic_read(&mle->mle_refs.refcount); | 1930 | rr = atomic_read(&mle->mle_refs.refcount); |
@@ -1959,7 +1957,6 @@ ok: | |||
1959 | __dlm_put_mle(mle); | 1957 | __dlm_put_mle(mle); |
1960 | } | 1958 | } |
1961 | spin_unlock(&dlm->master_lock); | 1959 | spin_unlock(&dlm->master_lock); |
1962 | spin_unlock(&dlm->spinlock); | ||
1963 | } else if (res) { | 1960 | } else if (res) { |
1964 | if (res->owner != assert->node_idx) { | 1961 | if (res->owner != assert->node_idx) { |
1965 | mlog(0, "assert_master from %u, but current " | 1962 | mlog(0, "assert_master from %u, but current " |
@@ -1967,6 +1964,7 @@ ok: | |||
1967 | res->owner, namelen, name); | 1964 | res->owner, namelen, name); |
1968 | } | 1965 | } |
1969 | } | 1966 | } |
1967 | spin_unlock(&dlm->spinlock); | ||
1970 | 1968 | ||
1971 | done: | 1969 | done: |
1972 | ret = 0; | 1970 | ret = 0; |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 278a223aae14..ab207901d32a 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
@@ -891,6 +891,21 @@ static int ocfs2_query_inode_wipe(struct inode *inode, | |||
891 | /* Do some basic inode verification... */ | 891 | /* Do some basic inode verification... */ |
892 | di = (struct ocfs2_dinode *) di_bh->b_data; | 892 | di = (struct ocfs2_dinode *) di_bh->b_data; |
893 | if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) { | 893 | if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) { |
894 | /* | ||
895 | * Inodes in the orphan dir must have ORPHANED_FL. The only | ||
896 | * inodes that come back out of the orphan dir are reflink | ||
897 | * targets. A reflink target may be moved out of the orphan | ||
898 | * dir between the time we scan the directory and the time we | ||
899 | * process it. This would lead to HAS_REFCOUNT_FL being set but | ||
900 | * ORPHANED_FL not. | ||
901 | */ | ||
902 | if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) { | ||
903 | mlog(0, "Reflinked inode %llu is no longer orphaned. " | ||
904 | "it shouldn't be deleted\n", | ||
905 | (unsigned long long)oi->ip_blkno); | ||
906 | goto bail; | ||
907 | } | ||
908 | |||
894 | /* for lack of a better error? */ | 909 | /* for lack of a better error? */ |
895 | status = -EEXIST; | 910 | status = -EEXIST; |
896 | mlog(ML_ERROR, | 911 | mlog(ML_ERROR, |
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index ca992d91f511..c983715d8d8c 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -872,8 +872,10 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, | |||
872 | (unsigned long long)la_start_blk, | 872 | (unsigned long long)la_start_blk, |
873 | (unsigned long long)blkno); | 873 | (unsigned long long)blkno); |
874 | 874 | ||
875 | status = ocfs2_free_clusters(handle, main_bm_inode, | 875 | status = ocfs2_release_clusters(handle, |
876 | main_bm_bh, blkno, count); | 876 | main_bm_inode, |
877 | main_bm_bh, blkno, | ||
878 | count); | ||
877 | if (status < 0) { | 879 | if (status < 0) { |
878 | mlog_errno(status); | 880 | mlog_errno(status); |
879 | goto bail; | 881 | goto bail; |
@@ -984,8 +986,7 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, | |||
984 | } | 986 | } |
985 | 987 | ||
986 | retry_enospc: | 988 | retry_enospc: |
987 | (*ac)->ac_bits_wanted = osb->local_alloc_bits; | 989 | (*ac)->ac_bits_wanted = osb->local_alloc_default_bits; |
988 | |||
989 | status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); | 990 | status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); |
990 | if (status == -ENOSPC) { | 991 | if (status == -ENOSPC) { |
991 | if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) == | 992 | if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) == |
@@ -1061,6 +1062,7 @@ retry_enospc: | |||
1061 | OCFS2_LA_DISABLED) | 1062 | OCFS2_LA_DISABLED) |
1062 | goto bail; | 1063 | goto bail; |
1063 | 1064 | ||
1065 | ac->ac_bits_wanted = osb->local_alloc_default_bits; | ||
1064 | status = ocfs2_claim_clusters(osb, handle, ac, | 1066 | status = ocfs2_claim_clusters(osb, handle, ac, |
1065 | osb->local_alloc_bits, | 1067 | osb->local_alloc_bits, |
1066 | &cluster_off, | 1068 | &cluster_off, |
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c index 544ac6245175..b5cb3ede9408 100644 --- a/fs/ocfs2/locks.c +++ b/fs/ocfs2/locks.c | |||
@@ -133,7 +133,7 @@ int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl) | |||
133 | 133 | ||
134 | if (!(fl->fl_flags & FL_POSIX)) | 134 | if (!(fl->fl_flags & FL_POSIX)) |
135 | return -ENOLCK; | 135 | return -ENOLCK; |
136 | if (__mandatory_lock(inode)) | 136 | if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) |
137 | return -ENOLCK; | 137 | return -ENOLCK; |
138 | 138 | ||
139 | return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl); | 139 | return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl); |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index d9cd4e373a53..b1eb50ae4097 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -84,7 +84,7 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, | |||
84 | static int ocfs2_orphan_add(struct ocfs2_super *osb, | 84 | static int ocfs2_orphan_add(struct ocfs2_super *osb, |
85 | handle_t *handle, | 85 | handle_t *handle, |
86 | struct inode *inode, | 86 | struct inode *inode, |
87 | struct ocfs2_dinode *fe, | 87 | struct buffer_head *fe_bh, |
88 | char *name, | 88 | char *name, |
89 | struct ocfs2_dir_lookup_result *lookup, | 89 | struct ocfs2_dir_lookup_result *lookup, |
90 | struct inode *orphan_dir_inode); | 90 | struct inode *orphan_dir_inode); |
@@ -879,7 +879,7 @@ static int ocfs2_unlink(struct inode *dir, | |||
879 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | 879 | fe = (struct ocfs2_dinode *) fe_bh->b_data; |
880 | 880 | ||
881 | if (inode_is_unlinkable(inode)) { | 881 | if (inode_is_unlinkable(inode)) { |
882 | status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name, | 882 | status = ocfs2_orphan_add(osb, handle, inode, fe_bh, orphan_name, |
883 | &orphan_insert, orphan_dir); | 883 | &orphan_insert, orphan_dir); |
884 | if (status < 0) { | 884 | if (status < 0) { |
885 | mlog_errno(status); | 885 | mlog_errno(status); |
@@ -1300,7 +1300,7 @@ static int ocfs2_rename(struct inode *old_dir, | |||
1300 | if (S_ISDIR(new_inode->i_mode) || | 1300 | if (S_ISDIR(new_inode->i_mode) || |
1301 | (ocfs2_read_links_count(newfe) == 1)) { | 1301 | (ocfs2_read_links_count(newfe) == 1)) { |
1302 | status = ocfs2_orphan_add(osb, handle, new_inode, | 1302 | status = ocfs2_orphan_add(osb, handle, new_inode, |
1303 | newfe, orphan_name, | 1303 | newfe_bh, orphan_name, |
1304 | &orphan_insert, orphan_dir); | 1304 | &orphan_insert, orphan_dir); |
1305 | if (status < 0) { | 1305 | if (status < 0) { |
1306 | mlog_errno(status); | 1306 | mlog_errno(status); |
@@ -1911,7 +1911,7 @@ leave: | |||
1911 | static int ocfs2_orphan_add(struct ocfs2_super *osb, | 1911 | static int ocfs2_orphan_add(struct ocfs2_super *osb, |
1912 | handle_t *handle, | 1912 | handle_t *handle, |
1913 | struct inode *inode, | 1913 | struct inode *inode, |
1914 | struct ocfs2_dinode *fe, | 1914 | struct buffer_head *fe_bh, |
1915 | char *name, | 1915 | char *name, |
1916 | struct ocfs2_dir_lookup_result *lookup, | 1916 | struct ocfs2_dir_lookup_result *lookup, |
1917 | struct inode *orphan_dir_inode) | 1917 | struct inode *orphan_dir_inode) |
@@ -1919,6 +1919,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
1919 | struct buffer_head *orphan_dir_bh = NULL; | 1919 | struct buffer_head *orphan_dir_bh = NULL; |
1920 | int status = 0; | 1920 | int status = 0; |
1921 | struct ocfs2_dinode *orphan_fe; | 1921 | struct ocfs2_dinode *orphan_fe; |
1922 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; | ||
1922 | 1923 | ||
1923 | mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); | 1924 | mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); |
1924 | 1925 | ||
@@ -1959,6 +1960,21 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
1959 | goto leave; | 1960 | goto leave; |
1960 | } | 1961 | } |
1961 | 1962 | ||
1963 | /* | ||
1964 | * We're going to journal the change of i_flags and i_orphaned_slot. | ||
1965 | * It's safe anyway, though some callers may duplicate the journaling. | ||
1966 | * Journaling within the func just make the logic look more | ||
1967 | * straightforward. | ||
1968 | */ | ||
1969 | status = ocfs2_journal_access_di(handle, | ||
1970 | INODE_CACHE(inode), | ||
1971 | fe_bh, | ||
1972 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
1973 | if (status < 0) { | ||
1974 | mlog_errno(status); | ||
1975 | goto leave; | ||
1976 | } | ||
1977 | |||
1962 | le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL); | 1978 | le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL); |
1963 | 1979 | ||
1964 | /* Record which orphan dir our inode now resides | 1980 | /* Record which orphan dir our inode now resides |
@@ -1966,6 +1982,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
1966 | * dir to lock. */ | 1982 | * dir to lock. */ |
1967 | fe->i_orphaned_slot = cpu_to_le16(osb->slot_num); | 1983 | fe->i_orphaned_slot = cpu_to_le16(osb->slot_num); |
1968 | 1984 | ||
1985 | ocfs2_journal_dirty(handle, fe_bh); | ||
1986 | |||
1969 | mlog(0, "Inode %llu orphaned in slot %d\n", | 1987 | mlog(0, "Inode %llu orphaned in slot %d\n", |
1970 | (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); | 1988 | (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); |
1971 | 1989 | ||
@@ -2123,7 +2141,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
2123 | } | 2141 | } |
2124 | 2142 | ||
2125 | di = (struct ocfs2_dinode *)new_di_bh->b_data; | 2143 | di = (struct ocfs2_dinode *)new_di_bh->b_data; |
2126 | status = ocfs2_orphan_add(osb, handle, inode, di, orphan_name, | 2144 | status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name, |
2127 | &orphan_insert, orphan_dir); | 2145 | &orphan_insert, orphan_dir); |
2128 | if (status < 0) { | 2146 | if (status < 0) { |
2129 | mlog_errno(status); | 2147 | mlog_errno(status); |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 1238b491db90..adf5e2ebc2c4 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -763,8 +763,18 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb, | |||
763 | return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits); | 763 | return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits); |
764 | } | 764 | } |
765 | 765 | ||
766 | #define ocfs2_set_bit ext2_set_bit | 766 | static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap) |
767 | #define ocfs2_clear_bit ext2_clear_bit | 767 | { |
768 | ext2_set_bit(bit, bitmap); | ||
769 | } | ||
770 | #define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr)) | ||
771 | |||
772 | static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap) | ||
773 | { | ||
774 | ext2_clear_bit(bit, bitmap); | ||
775 | } | ||
776 | #define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr)) | ||
777 | |||
768 | #define ocfs2_test_bit ext2_test_bit | 778 | #define ocfs2_test_bit ext2_test_bit |
769 | #define ocfs2_find_next_zero_bit ext2_find_next_zero_bit | 779 | #define ocfs2_find_next_zero_bit ext2_find_next_zero_bit |
770 | #define ocfs2_find_next_bit ext2_find_next_bit | 780 | #define ocfs2_find_next_bit ext2_find_next_bit |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 9e96921dffda..29405f2ff616 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -4075,6 +4075,7 @@ static int ocfs2_complete_reflink(struct inode *s_inode, | |||
4075 | OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; | 4075 | OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; |
4076 | spin_unlock(&OCFS2_I(t_inode)->ip_lock); | 4076 | spin_unlock(&OCFS2_I(t_inode)->ip_lock); |
4077 | i_size_write(t_inode, size); | 4077 | i_size_write(t_inode, size); |
4078 | t_inode->i_blocks = s_inode->i_blocks; | ||
4078 | 4079 | ||
4079 | di->i_xattr_inline_size = s_di->i_xattr_inline_size; | 4080 | di->i_xattr_inline_size = s_di->i_xattr_inline_size; |
4080 | di->i_clusters = s_di->i_clusters; | 4081 | di->i_clusters = s_di->i_clusters; |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index c3c60bc3e072..19ba00f28547 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -95,13 +95,6 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle, | |||
95 | struct buffer_head *group_bh, | 95 | struct buffer_head *group_bh, |
96 | unsigned int bit_off, | 96 | unsigned int bit_off, |
97 | unsigned int num_bits); | 97 | unsigned int num_bits); |
98 | static inline int ocfs2_block_group_clear_bits(handle_t *handle, | ||
99 | struct inode *alloc_inode, | ||
100 | struct ocfs2_group_desc *bg, | ||
101 | struct buffer_head *group_bh, | ||
102 | unsigned int bit_off, | ||
103 | unsigned int num_bits); | ||
104 | |||
105 | static int ocfs2_relink_block_group(handle_t *handle, | 98 | static int ocfs2_relink_block_group(handle_t *handle, |
106 | struct inode *alloc_inode, | 99 | struct inode *alloc_inode, |
107 | struct buffer_head *fe_bh, | 100 | struct buffer_head *fe_bh, |
@@ -152,7 +145,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) | |||
152 | 145 | ||
153 | #define do_error(fmt, ...) \ | 146 | #define do_error(fmt, ...) \ |
154 | do{ \ | 147 | do{ \ |
155 | if (clean_error) \ | 148 | if (resize) \ |
156 | mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \ | 149 | mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \ |
157 | else \ | 150 | else \ |
158 | ocfs2_error(sb, fmt, ##__VA_ARGS__); \ | 151 | ocfs2_error(sb, fmt, ##__VA_ARGS__); \ |
@@ -160,7 +153,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) | |||
160 | 153 | ||
161 | static int ocfs2_validate_gd_self(struct super_block *sb, | 154 | static int ocfs2_validate_gd_self(struct super_block *sb, |
162 | struct buffer_head *bh, | 155 | struct buffer_head *bh, |
163 | int clean_error) | 156 | int resize) |
164 | { | 157 | { |
165 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; | 158 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; |
166 | 159 | ||
@@ -211,7 +204,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb, | |||
211 | static int ocfs2_validate_gd_parent(struct super_block *sb, | 204 | static int ocfs2_validate_gd_parent(struct super_block *sb, |
212 | struct ocfs2_dinode *di, | 205 | struct ocfs2_dinode *di, |
213 | struct buffer_head *bh, | 206 | struct buffer_head *bh, |
214 | int clean_error) | 207 | int resize) |
215 | { | 208 | { |
216 | unsigned int max_bits; | 209 | unsigned int max_bits; |
217 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; | 210 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; |
@@ -233,8 +226,11 @@ static int ocfs2_validate_gd_parent(struct super_block *sb, | |||
233 | return -EINVAL; | 226 | return -EINVAL; |
234 | } | 227 | } |
235 | 228 | ||
236 | if (le16_to_cpu(gd->bg_chain) >= | 229 | /* In resize, we may meet the case bg_chain == cl_next_free_rec. */ |
237 | le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { | 230 | if ((le16_to_cpu(gd->bg_chain) > |
231 | le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) || | ||
232 | ((le16_to_cpu(gd->bg_chain) == | ||
233 | le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) { | ||
238 | do_error("Group descriptor #%llu has bad chain %u", | 234 | do_error("Group descriptor #%llu has bad chain %u", |
239 | (unsigned long long)bh->b_blocknr, | 235 | (unsigned long long)bh->b_blocknr, |
240 | le16_to_cpu(gd->bg_chain)); | 236 | le16_to_cpu(gd->bg_chain)); |
@@ -1975,18 +1971,18 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, | |||
1975 | bits_wanted, cluster_start, num_clusters); | 1971 | bits_wanted, cluster_start, num_clusters); |
1976 | } | 1972 | } |
1977 | 1973 | ||
1978 | static inline int ocfs2_block_group_clear_bits(handle_t *handle, | 1974 | static int ocfs2_block_group_clear_bits(handle_t *handle, |
1979 | struct inode *alloc_inode, | 1975 | struct inode *alloc_inode, |
1980 | struct ocfs2_group_desc *bg, | 1976 | struct ocfs2_group_desc *bg, |
1981 | struct buffer_head *group_bh, | 1977 | struct buffer_head *group_bh, |
1982 | unsigned int bit_off, | 1978 | unsigned int bit_off, |
1983 | unsigned int num_bits) | 1979 | unsigned int num_bits, |
1980 | void (*undo_fn)(unsigned int bit, | ||
1981 | unsigned long *bmap)) | ||
1984 | { | 1982 | { |
1985 | int status; | 1983 | int status; |
1986 | unsigned int tmp; | 1984 | unsigned int tmp; |
1987 | int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; | ||
1988 | struct ocfs2_group_desc *undo_bg = NULL; | 1985 | struct ocfs2_group_desc *undo_bg = NULL; |
1989 | int cluster_bitmap = 0; | ||
1990 | 1986 | ||
1991 | mlog_entry_void(); | 1987 | mlog_entry_void(); |
1992 | 1988 | ||
@@ -1996,20 +1992,18 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle, | |||
1996 | 1992 | ||
1997 | mlog(0, "off = %u, num = %u\n", bit_off, num_bits); | 1993 | mlog(0, "off = %u, num = %u\n", bit_off, num_bits); |
1998 | 1994 | ||
1999 | if (ocfs2_is_cluster_bitmap(alloc_inode)) | 1995 | BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode)); |
2000 | journal_type = OCFS2_JOURNAL_ACCESS_UNDO; | ||
2001 | |||
2002 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), | 1996 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), |
2003 | group_bh, journal_type); | 1997 | group_bh, |
1998 | undo_fn ? | ||
1999 | OCFS2_JOURNAL_ACCESS_UNDO : | ||
2000 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
2004 | if (status < 0) { | 2001 | if (status < 0) { |
2005 | mlog_errno(status); | 2002 | mlog_errno(status); |
2006 | goto bail; | 2003 | goto bail; |
2007 | } | 2004 | } |
2008 | 2005 | ||
2009 | if (ocfs2_is_cluster_bitmap(alloc_inode)) | 2006 | if (undo_fn) { |
2010 | cluster_bitmap = 1; | ||
2011 | |||
2012 | if (cluster_bitmap) { | ||
2013 | jbd_lock_bh_state(group_bh); | 2007 | jbd_lock_bh_state(group_bh); |
2014 | undo_bg = (struct ocfs2_group_desc *) | 2008 | undo_bg = (struct ocfs2_group_desc *) |
2015 | bh2jh(group_bh)->b_committed_data; | 2009 | bh2jh(group_bh)->b_committed_data; |
@@ -2020,13 +2014,13 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle, | |||
2020 | while(tmp--) { | 2014 | while(tmp--) { |
2021 | ocfs2_clear_bit((bit_off + tmp), | 2015 | ocfs2_clear_bit((bit_off + tmp), |
2022 | (unsigned long *) bg->bg_bitmap); | 2016 | (unsigned long *) bg->bg_bitmap); |
2023 | if (cluster_bitmap) | 2017 | if (undo_fn) |
2024 | ocfs2_set_bit(bit_off + tmp, | 2018 | undo_fn(bit_off + tmp, |
2025 | (unsigned long *) undo_bg->bg_bitmap); | 2019 | (unsigned long *) undo_bg->bg_bitmap); |
2026 | } | 2020 | } |
2027 | le16_add_cpu(&bg->bg_free_bits_count, num_bits); | 2021 | le16_add_cpu(&bg->bg_free_bits_count, num_bits); |
2028 | 2022 | ||
2029 | if (cluster_bitmap) | 2023 | if (undo_fn) |
2030 | jbd_unlock_bh_state(group_bh); | 2024 | jbd_unlock_bh_state(group_bh); |
2031 | 2025 | ||
2032 | status = ocfs2_journal_dirty(handle, group_bh); | 2026 | status = ocfs2_journal_dirty(handle, group_bh); |
@@ -2039,12 +2033,14 @@ bail: | |||
2039 | /* | 2033 | /* |
2040 | * expects the suballoc inode to already be locked. | 2034 | * expects the suballoc inode to already be locked. |
2041 | */ | 2035 | */ |
2042 | int ocfs2_free_suballoc_bits(handle_t *handle, | 2036 | static int _ocfs2_free_suballoc_bits(handle_t *handle, |
2043 | struct inode *alloc_inode, | 2037 | struct inode *alloc_inode, |
2044 | struct buffer_head *alloc_bh, | 2038 | struct buffer_head *alloc_bh, |
2045 | unsigned int start_bit, | 2039 | unsigned int start_bit, |
2046 | u64 bg_blkno, | 2040 | u64 bg_blkno, |
2047 | unsigned int count) | 2041 | unsigned int count, |
2042 | void (*undo_fn)(unsigned int bit, | ||
2043 | unsigned long *bitmap)) | ||
2048 | { | 2044 | { |
2049 | int status = 0; | 2045 | int status = 0; |
2050 | u32 tmp_used; | 2046 | u32 tmp_used; |
@@ -2079,7 +2075,7 @@ int ocfs2_free_suballoc_bits(handle_t *handle, | |||
2079 | 2075 | ||
2080 | status = ocfs2_block_group_clear_bits(handle, alloc_inode, | 2076 | status = ocfs2_block_group_clear_bits(handle, alloc_inode, |
2081 | group, group_bh, | 2077 | group, group_bh, |
2082 | start_bit, count); | 2078 | start_bit, count, undo_fn); |
2083 | if (status < 0) { | 2079 | if (status < 0) { |
2084 | mlog_errno(status); | 2080 | mlog_errno(status); |
2085 | goto bail; | 2081 | goto bail; |
@@ -2110,6 +2106,17 @@ bail: | |||
2110 | return status; | 2106 | return status; |
2111 | } | 2107 | } |
2112 | 2108 | ||
2109 | int ocfs2_free_suballoc_bits(handle_t *handle, | ||
2110 | struct inode *alloc_inode, | ||
2111 | struct buffer_head *alloc_bh, | ||
2112 | unsigned int start_bit, | ||
2113 | u64 bg_blkno, | ||
2114 | unsigned int count) | ||
2115 | { | ||
2116 | return _ocfs2_free_suballoc_bits(handle, alloc_inode, alloc_bh, | ||
2117 | start_bit, bg_blkno, count, NULL); | ||
2118 | } | ||
2119 | |||
2113 | int ocfs2_free_dinode(handle_t *handle, | 2120 | int ocfs2_free_dinode(handle_t *handle, |
2114 | struct inode *inode_alloc_inode, | 2121 | struct inode *inode_alloc_inode, |
2115 | struct buffer_head *inode_alloc_bh, | 2122 | struct buffer_head *inode_alloc_bh, |
@@ -2123,11 +2130,13 @@ int ocfs2_free_dinode(handle_t *handle, | |||
2123 | inode_alloc_bh, bit, bg_blkno, 1); | 2130 | inode_alloc_bh, bit, bg_blkno, 1); |
2124 | } | 2131 | } |
2125 | 2132 | ||
2126 | int ocfs2_free_clusters(handle_t *handle, | 2133 | static int _ocfs2_free_clusters(handle_t *handle, |
2127 | struct inode *bitmap_inode, | 2134 | struct inode *bitmap_inode, |
2128 | struct buffer_head *bitmap_bh, | 2135 | struct buffer_head *bitmap_bh, |
2129 | u64 start_blk, | 2136 | u64 start_blk, |
2130 | unsigned int num_clusters) | 2137 | unsigned int num_clusters, |
2138 | void (*undo_fn)(unsigned int bit, | ||
2139 | unsigned long *bitmap)) | ||
2131 | { | 2140 | { |
2132 | int status; | 2141 | int status; |
2133 | u16 bg_start_bit; | 2142 | u16 bg_start_bit; |
@@ -2154,9 +2163,9 @@ int ocfs2_free_clusters(handle_t *handle, | |||
2154 | mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n", | 2163 | mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n", |
2155 | (unsigned long long)bg_blkno, bg_start_bit); | 2164 | (unsigned long long)bg_blkno, bg_start_bit); |
2156 | 2165 | ||
2157 | status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh, | 2166 | status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh, |
2158 | bg_start_bit, bg_blkno, | 2167 | bg_start_bit, bg_blkno, |
2159 | num_clusters); | 2168 | num_clusters, undo_fn); |
2160 | if (status < 0) { | 2169 | if (status < 0) { |
2161 | mlog_errno(status); | 2170 | mlog_errno(status); |
2162 | goto out; | 2171 | goto out; |
@@ -2170,6 +2179,32 @@ out: | |||
2170 | return status; | 2179 | return status; |
2171 | } | 2180 | } |
2172 | 2181 | ||
2182 | int ocfs2_free_clusters(handle_t *handle, | ||
2183 | struct inode *bitmap_inode, | ||
2184 | struct buffer_head *bitmap_bh, | ||
2185 | u64 start_blk, | ||
2186 | unsigned int num_clusters) | ||
2187 | { | ||
2188 | return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh, | ||
2189 | start_blk, num_clusters, | ||
2190 | _ocfs2_set_bit); | ||
2191 | } | ||
2192 | |||
2193 | /* | ||
2194 | * Give never-used clusters back to the global bitmap. We don't need | ||
2195 | * to protect these bits in the undo buffer. | ||
2196 | */ | ||
2197 | int ocfs2_release_clusters(handle_t *handle, | ||
2198 | struct inode *bitmap_inode, | ||
2199 | struct buffer_head *bitmap_bh, | ||
2200 | u64 start_blk, | ||
2201 | unsigned int num_clusters) | ||
2202 | { | ||
2203 | return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh, | ||
2204 | start_blk, num_clusters, | ||
2205 | _ocfs2_clear_bit); | ||
2206 | } | ||
2207 | |||
2173 | static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg) | 2208 | static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg) |
2174 | { | 2209 | { |
2175 | printk("Block Group:\n"); | 2210 | printk("Block Group:\n"); |
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index fa60723c43e8..e0f46df357e6 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h | |||
@@ -127,6 +127,11 @@ int ocfs2_free_clusters(handle_t *handle, | |||
127 | struct buffer_head *bitmap_bh, | 127 | struct buffer_head *bitmap_bh, |
128 | u64 start_blk, | 128 | u64 start_blk, |
129 | unsigned int num_clusters); | 129 | unsigned int num_clusters); |
130 | int ocfs2_release_clusters(handle_t *handle, | ||
131 | struct inode *bitmap_inode, | ||
132 | struct buffer_head *bitmap_bh, | ||
133 | u64 start_blk, | ||
134 | unsigned int num_clusters); | ||
130 | 135 | ||
131 | static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit) | 136 | static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit) |
132 | { | 137 | { |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d1b0d386f6d1..3e7773089b96 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -1622,7 +1622,7 @@ static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc) | |||
1622 | /* Now tell xh->xh_entries about it */ | 1622 | /* Now tell xh->xh_entries about it */ |
1623 | for (i = 0; i < count; i++) { | 1623 | for (i = 0; i < count; i++) { |
1624 | offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); | 1624 | offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); |
1625 | if (offset < namevalue_offset) | 1625 | if (offset <= namevalue_offset) |
1626 | le16_add_cpu(&xh->xh_entries[i].xe_name_offset, | 1626 | le16_add_cpu(&xh->xh_entries[i].xe_name_offset, |
1627 | namevalue_size); | 1627 | namevalue_size); |
1628 | } | 1628 | } |
@@ -6528,13 +6528,11 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode, | |||
6528 | int indexed) | 6528 | int indexed) |
6529 | { | 6529 | { |
6530 | int ret; | 6530 | int ret; |
6531 | struct ocfs2_alloc_context *meta_ac; | ||
6532 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 6531 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
6533 | struct ocfs2_xattr_set_ctxt ctxt = { | 6532 | struct ocfs2_xattr_set_ctxt ctxt; |
6534 | .meta_ac = meta_ac, | ||
6535 | }; | ||
6536 | 6533 | ||
6537 | ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); | 6534 | memset(&ctxt, 0, sizeof(ctxt)); |
6535 | ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac); | ||
6538 | if (ret < 0) { | 6536 | if (ret < 0) { |
6539 | mlog_errno(ret); | 6537 | mlog_errno(ret); |
6540 | return ret; | 6538 | return ret; |
@@ -6556,7 +6554,7 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode, | |||
6556 | 6554 | ||
6557 | ocfs2_commit_trans(osb, ctxt.handle); | 6555 | ocfs2_commit_trans(osb, ctxt.handle); |
6558 | out: | 6556 | out: |
6559 | ocfs2_free_alloc_context(meta_ac); | 6557 | ocfs2_free_alloc_context(ctxt.meta_ac); |
6560 | return ret; | 6558 | return ret; |
6561 | } | 6559 | } |
6562 | 6560 | ||
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c index 0028d2ef0662..90be97f1f5a8 100644 --- a/fs/partitions/msdos.c +++ b/fs/partitions/msdos.c | |||
@@ -31,14 +31,17 @@ | |||
31 | */ | 31 | */ |
32 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
33 | 33 | ||
34 | #define SYS_IND(p) (get_unaligned(&p->sys_ind)) | 34 | #define SYS_IND(p) get_unaligned(&p->sys_ind) |
35 | #define NR_SECTS(p) ({ __le32 __a = get_unaligned(&p->nr_sects); \ | ||
36 | le32_to_cpu(__a); \ | ||
37 | }) | ||
38 | 35 | ||
39 | #define START_SECT(p) ({ __le32 __a = get_unaligned(&p->start_sect); \ | 36 | static inline sector_t nr_sects(struct partition *p) |
40 | le32_to_cpu(__a); \ | 37 | { |
41 | }) | 38 | return (sector_t)get_unaligned_le32(&p->nr_sects); |
39 | } | ||
40 | |||
41 | static inline sector_t start_sect(struct partition *p) | ||
42 | { | ||
43 | return (sector_t)get_unaligned_le32(&p->start_sect); | ||
44 | } | ||
42 | 45 | ||
43 | static inline int is_extended_partition(struct partition *p) | 46 | static inline int is_extended_partition(struct partition *p) |
44 | { | 47 | { |
@@ -104,13 +107,13 @@ static int aix_magic_present(unsigned char *p, struct block_device *bdev) | |||
104 | 107 | ||
105 | static void | 108 | static void |
106 | parse_extended(struct parsed_partitions *state, struct block_device *bdev, | 109 | parse_extended(struct parsed_partitions *state, struct block_device *bdev, |
107 | u32 first_sector, u32 first_size) | 110 | sector_t first_sector, sector_t first_size) |
108 | { | 111 | { |
109 | struct partition *p; | 112 | struct partition *p; |
110 | Sector sect; | 113 | Sector sect; |
111 | unsigned char *data; | 114 | unsigned char *data; |
112 | u32 this_sector, this_size; | 115 | sector_t this_sector, this_size; |
113 | int sector_size = bdev_logical_block_size(bdev) / 512; | 116 | sector_t sector_size = bdev_logical_block_size(bdev) / 512; |
114 | int loopct = 0; /* number of links followed | 117 | int loopct = 0; /* number of links followed |
115 | without finding a data partition */ | 118 | without finding a data partition */ |
116 | int i; | 119 | int i; |
@@ -145,14 +148,14 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev, | |||
145 | * First process the data partition(s) | 148 | * First process the data partition(s) |
146 | */ | 149 | */ |
147 | for (i=0; i<4; i++, p++) { | 150 | for (i=0; i<4; i++, p++) { |
148 | u32 offs, size, next; | 151 | sector_t offs, size, next; |
149 | if (!NR_SECTS(p) || is_extended_partition(p)) | 152 | if (!nr_sects(p) || is_extended_partition(p)) |
150 | continue; | 153 | continue; |
151 | 154 | ||
152 | /* Check the 3rd and 4th entries - | 155 | /* Check the 3rd and 4th entries - |
153 | these sometimes contain random garbage */ | 156 | these sometimes contain random garbage */ |
154 | offs = START_SECT(p)*sector_size; | 157 | offs = start_sect(p)*sector_size; |
155 | size = NR_SECTS(p)*sector_size; | 158 | size = nr_sects(p)*sector_size; |
156 | next = this_sector + offs; | 159 | next = this_sector + offs; |
157 | if (i >= 2) { | 160 | if (i >= 2) { |
158 | if (offs + size > this_size) | 161 | if (offs + size > this_size) |
@@ -179,13 +182,13 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev, | |||
179 | */ | 182 | */ |
180 | p -= 4; | 183 | p -= 4; |
181 | for (i=0; i<4; i++, p++) | 184 | for (i=0; i<4; i++, p++) |
182 | if (NR_SECTS(p) && is_extended_partition(p)) | 185 | if (nr_sects(p) && is_extended_partition(p)) |
183 | break; | 186 | break; |
184 | if (i == 4) | 187 | if (i == 4) |
185 | goto done; /* nothing left to do */ | 188 | goto done; /* nothing left to do */ |
186 | 189 | ||
187 | this_sector = first_sector + START_SECT(p) * sector_size; | 190 | this_sector = first_sector + start_sect(p) * sector_size; |
188 | this_size = NR_SECTS(p) * sector_size; | 191 | this_size = nr_sects(p) * sector_size; |
189 | put_dev_sector(sect); | 192 | put_dev_sector(sect); |
190 | } | 193 | } |
191 | done: | 194 | done: |
@@ -197,7 +200,7 @@ done: | |||
197 | 200 | ||
198 | static void | 201 | static void |
199 | parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev, | 202 | parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev, |
200 | u32 offset, u32 size, int origin) | 203 | sector_t offset, sector_t size, int origin) |
201 | { | 204 | { |
202 | #ifdef CONFIG_SOLARIS_X86_PARTITION | 205 | #ifdef CONFIG_SOLARIS_X86_PARTITION |
203 | Sector sect; | 206 | Sector sect; |
@@ -244,7 +247,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev, | |||
244 | */ | 247 | */ |
245 | static void | 248 | static void |
246 | parse_bsd(struct parsed_partitions *state, struct block_device *bdev, | 249 | parse_bsd(struct parsed_partitions *state, struct block_device *bdev, |
247 | u32 offset, u32 size, int origin, char *flavour, | 250 | sector_t offset, sector_t size, int origin, char *flavour, |
248 | int max_partitions) | 251 | int max_partitions) |
249 | { | 252 | { |
250 | Sector sect; | 253 | Sector sect; |
@@ -263,7 +266,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev, | |||
263 | if (le16_to_cpu(l->d_npartitions) < max_partitions) | 266 | if (le16_to_cpu(l->d_npartitions) < max_partitions) |
264 | max_partitions = le16_to_cpu(l->d_npartitions); | 267 | max_partitions = le16_to_cpu(l->d_npartitions); |
265 | for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) { | 268 | for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) { |
266 | u32 bsd_start, bsd_size; | 269 | sector_t bsd_start, bsd_size; |
267 | 270 | ||
268 | if (state->next == state->limit) | 271 | if (state->next == state->limit) |
269 | break; | 272 | break; |
@@ -290,7 +293,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev, | |||
290 | 293 | ||
291 | static void | 294 | static void |
292 | parse_freebsd(struct parsed_partitions *state, struct block_device *bdev, | 295 | parse_freebsd(struct parsed_partitions *state, struct block_device *bdev, |
293 | u32 offset, u32 size, int origin) | 296 | sector_t offset, sector_t size, int origin) |
294 | { | 297 | { |
295 | #ifdef CONFIG_BSD_DISKLABEL | 298 | #ifdef CONFIG_BSD_DISKLABEL |
296 | parse_bsd(state, bdev, offset, size, origin, | 299 | parse_bsd(state, bdev, offset, size, origin, |
@@ -300,7 +303,7 @@ parse_freebsd(struct parsed_partitions *state, struct block_device *bdev, | |||
300 | 303 | ||
301 | static void | 304 | static void |
302 | parse_netbsd(struct parsed_partitions *state, struct block_device *bdev, | 305 | parse_netbsd(struct parsed_partitions *state, struct block_device *bdev, |
303 | u32 offset, u32 size, int origin) | 306 | sector_t offset, sector_t size, int origin) |
304 | { | 307 | { |
305 | #ifdef CONFIG_BSD_DISKLABEL | 308 | #ifdef CONFIG_BSD_DISKLABEL |
306 | parse_bsd(state, bdev, offset, size, origin, | 309 | parse_bsd(state, bdev, offset, size, origin, |
@@ -310,7 +313,7 @@ parse_netbsd(struct parsed_partitions *state, struct block_device *bdev, | |||
310 | 313 | ||
311 | static void | 314 | static void |
312 | parse_openbsd(struct parsed_partitions *state, struct block_device *bdev, | 315 | parse_openbsd(struct parsed_partitions *state, struct block_device *bdev, |
313 | u32 offset, u32 size, int origin) | 316 | sector_t offset, sector_t size, int origin) |
314 | { | 317 | { |
315 | #ifdef CONFIG_BSD_DISKLABEL | 318 | #ifdef CONFIG_BSD_DISKLABEL |
316 | parse_bsd(state, bdev, offset, size, origin, | 319 | parse_bsd(state, bdev, offset, size, origin, |
@@ -324,7 +327,7 @@ parse_openbsd(struct parsed_partitions *state, struct block_device *bdev, | |||
324 | */ | 327 | */ |
325 | static void | 328 | static void |
326 | parse_unixware(struct parsed_partitions *state, struct block_device *bdev, | 329 | parse_unixware(struct parsed_partitions *state, struct block_device *bdev, |
327 | u32 offset, u32 size, int origin) | 330 | sector_t offset, sector_t size, int origin) |
328 | { | 331 | { |
329 | #ifdef CONFIG_UNIXWARE_DISKLABEL | 332 | #ifdef CONFIG_UNIXWARE_DISKLABEL |
330 | Sector sect; | 333 | Sector sect; |
@@ -348,7 +351,8 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev, | |||
348 | 351 | ||
349 | if (p->s_label != UNIXWARE_FS_UNUSED) | 352 | if (p->s_label != UNIXWARE_FS_UNUSED) |
350 | put_partition(state, state->next++, | 353 | put_partition(state, state->next++, |
351 | START_SECT(p), NR_SECTS(p)); | 354 | le32_to_cpu(p->start_sect), |
355 | le32_to_cpu(p->nr_sects)); | ||
352 | p++; | 356 | p++; |
353 | } | 357 | } |
354 | put_dev_sector(sect); | 358 | put_dev_sector(sect); |
@@ -363,7 +367,7 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev, | |||
363 | */ | 367 | */ |
364 | static void | 368 | static void |
365 | parse_minix(struct parsed_partitions *state, struct block_device *bdev, | 369 | parse_minix(struct parsed_partitions *state, struct block_device *bdev, |
366 | u32 offset, u32 size, int origin) | 370 | sector_t offset, sector_t size, int origin) |
367 | { | 371 | { |
368 | #ifdef CONFIG_MINIX_SUBPARTITION | 372 | #ifdef CONFIG_MINIX_SUBPARTITION |
369 | Sector sect; | 373 | Sector sect; |
@@ -390,7 +394,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev, | |||
390 | /* add each partition in use */ | 394 | /* add each partition in use */ |
391 | if (SYS_IND(p) == MINIX_PARTITION) | 395 | if (SYS_IND(p) == MINIX_PARTITION) |
392 | put_partition(state, state->next++, | 396 | put_partition(state, state->next++, |
393 | START_SECT(p), NR_SECTS(p)); | 397 | start_sect(p), nr_sects(p)); |
394 | } | 398 | } |
395 | printk(" >\n"); | 399 | printk(" >\n"); |
396 | } | 400 | } |
@@ -401,7 +405,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev, | |||
401 | static struct { | 405 | static struct { |
402 | unsigned char id; | 406 | unsigned char id; |
403 | void (*parse)(struct parsed_partitions *, struct block_device *, | 407 | void (*parse)(struct parsed_partitions *, struct block_device *, |
404 | u32, u32, int); | 408 | sector_t, sector_t, int); |
405 | } subtypes[] = { | 409 | } subtypes[] = { |
406 | {FREEBSD_PARTITION, parse_freebsd}, | 410 | {FREEBSD_PARTITION, parse_freebsd}, |
407 | {NETBSD_PARTITION, parse_netbsd}, | 411 | {NETBSD_PARTITION, parse_netbsd}, |
@@ -415,7 +419,7 @@ static struct { | |||
415 | 419 | ||
416 | int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) | 420 | int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) |
417 | { | 421 | { |
418 | int sector_size = bdev_logical_block_size(bdev) / 512; | 422 | sector_t sector_size = bdev_logical_block_size(bdev) / 512; |
419 | Sector sect; | 423 | Sector sect; |
420 | unsigned char *data; | 424 | unsigned char *data; |
421 | struct partition *p; | 425 | struct partition *p; |
@@ -483,14 +487,21 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
483 | 487 | ||
484 | state->next = 5; | 488 | state->next = 5; |
485 | for (slot = 1 ; slot <= 4 ; slot++, p++) { | 489 | for (slot = 1 ; slot <= 4 ; slot++, p++) { |
486 | u32 start = START_SECT(p)*sector_size; | 490 | sector_t start = start_sect(p)*sector_size; |
487 | u32 size = NR_SECTS(p)*sector_size; | 491 | sector_t size = nr_sects(p)*sector_size; |
488 | if (!size) | 492 | if (!size) |
489 | continue; | 493 | continue; |
490 | if (is_extended_partition(p)) { | 494 | if (is_extended_partition(p)) { |
491 | /* prevent someone doing mkfs or mkswap on an | 495 | /* |
492 | extended partition, but leave room for LILO */ | 496 | * prevent someone doing mkfs or mkswap on an |
493 | put_partition(state, slot, start, size == 1 ? 1 : 2); | 497 | * extended partition, but leave room for LILO |
498 | * FIXME: this uses one logical sector for > 512b | ||
499 | * sector, although it may not be enough/proper. | ||
500 | */ | ||
501 | sector_t n = 2; | ||
502 | n = min(size, max(sector_size, n)); | ||
503 | put_partition(state, slot, start, n); | ||
504 | |||
494 | printk(" <"); | 505 | printk(" <"); |
495 | parse_extended(state, bdev, start, size); | 506 | parse_extended(state, bdev, start, size); |
496 | printk(" >"); | 507 | printk(" >"); |
@@ -513,7 +524,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
513 | unsigned char id = SYS_IND(p); | 524 | unsigned char id = SYS_IND(p); |
514 | int n; | 525 | int n; |
515 | 526 | ||
516 | if (!NR_SECTS(p)) | 527 | if (!nr_sects(p)) |
517 | continue; | 528 | continue; |
518 | 529 | ||
519 | for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++) | 530 | for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++) |
@@ -521,8 +532,8 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
521 | 532 | ||
522 | if (!subtypes[n].parse) | 533 | if (!subtypes[n].parse) |
523 | continue; | 534 | continue; |
524 | subtypes[n].parse(state, bdev, START_SECT(p)*sector_size, | 535 | subtypes[n].parse(state, bdev, start_sect(p)*sector_size, |
525 | NR_SECTS(p)*sector_size, slot); | 536 | nr_sects(p)*sector_size, slot); |
526 | } | 537 | } |
527 | put_dev_sector(sect); | 538 | put_dev_sector(sect); |
528 | return 1; | 539 | return 1; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index a7310841c831..b1f6e62773d3 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -442,12 +442,13 @@ static const struct file_operations proc_lstats_operations = { | |||
442 | unsigned long badness(struct task_struct *p, unsigned long uptime); | 442 | unsigned long badness(struct task_struct *p, unsigned long uptime); |
443 | static int proc_oom_score(struct task_struct *task, char *buffer) | 443 | static int proc_oom_score(struct task_struct *task, char *buffer) |
444 | { | 444 | { |
445 | unsigned long points; | 445 | unsigned long points = 0; |
446 | struct timespec uptime; | 446 | struct timespec uptime; |
447 | 447 | ||
448 | do_posix_clock_monotonic_gettime(&uptime); | 448 | do_posix_clock_monotonic_gettime(&uptime); |
449 | read_lock(&tasklist_lock); | 449 | read_lock(&tasklist_lock); |
450 | points = badness(task->group_leader, uptime.tv_sec); | 450 | if (pid_alive(task)) |
451 | points = badness(task, uptime.tv_sec); | ||
451 | read_unlock(&tasklist_lock); | 452 | read_unlock(&tasklist_lock); |
452 | return sprintf(buffer, "%lu\n", points); | 453 | return sprintf(buffer, "%lu\n", points); |
453 | } | 454 | } |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index a44a7897fd4d..b442dac8f5f9 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -490,7 +490,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
490 | } | 490 | } |
491 | read_unlock(&kclist_lock); | 491 | read_unlock(&kclist_lock); |
492 | 492 | ||
493 | if (m == NULL) { | 493 | if (&m->list == &kclist_head) { |
494 | if (clear_user(buffer, tsz)) | 494 | if (clear_user(buffer, tsz)) |
495 | return -EFAULT; | 495 | return -EFAULT; |
496 | } else if (is_vmalloc_or_module_addr((void *)start)) { | 496 | } else if (is_vmalloc_or_module_addr((void *)start)) { |
diff --git a/fs/read_write.c b/fs/read_write.c index b7f4a1f94d48..113386d6fd2d 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -258,6 +258,7 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp | |||
258 | init_sync_kiocb(&kiocb, filp); | 258 | init_sync_kiocb(&kiocb, filp); |
259 | kiocb.ki_pos = *ppos; | 259 | kiocb.ki_pos = *ppos; |
260 | kiocb.ki_left = len; | 260 | kiocb.ki_left = len; |
261 | kiocb.ki_nbytes = len; | ||
261 | 262 | ||
262 | for (;;) { | 263 | for (;;) { |
263 | ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos); | 264 | ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos); |
@@ -313,6 +314,7 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof | |||
313 | init_sync_kiocb(&kiocb, filp); | 314 | init_sync_kiocb(&kiocb, filp); |
314 | kiocb.ki_pos = *ppos; | 315 | kiocb.ki_pos = *ppos; |
315 | kiocb.ki_left = len; | 316 | kiocb.ki_left = len; |
317 | kiocb.ki_nbytes = len; | ||
316 | 318 | ||
317 | for (;;) { | 319 | for (;;) { |
318 | ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); | 320 | ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index ba98546fabbd..f3de5e8a2ae8 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -2217,6 +2217,15 @@ static int journal_read_transaction(struct super_block *sb, | |||
2217 | brelse(d_bh); | 2217 | brelse(d_bh); |
2218 | return 1; | 2218 | return 1; |
2219 | } | 2219 | } |
2220 | |||
2221 | if (bdev_read_only(sb->s_bdev)) { | ||
2222 | reiserfs_warning(sb, "clm-2076", | ||
2223 | "device is readonly, unable to replay log"); | ||
2224 | brelse(c_bh); | ||
2225 | brelse(d_bh); | ||
2226 | return -EROFS; | ||
2227 | } | ||
2228 | |||
2220 | trans_id = get_desc_trans_id(desc); | 2229 | trans_id = get_desc_trans_id(desc); |
2221 | /* now we know we've got a good transaction, and it was inside the valid time ranges */ | 2230 | /* now we know we've got a good transaction, and it was inside the valid time ranges */ |
2222 | log_blocks = kmalloc(get_desc_trans_len(desc) * | 2231 | log_blocks = kmalloc(get_desc_trans_len(desc) * |
@@ -2459,12 +2468,6 @@ static int journal_read(struct super_block *sb) | |||
2459 | goto start_log_replay; | 2468 | goto start_log_replay; |
2460 | } | 2469 | } |
2461 | 2470 | ||
2462 | if (continue_replay && bdev_read_only(sb->s_bdev)) { | ||
2463 | reiserfs_warning(sb, "clm-2076", | ||
2464 | "device is readonly, unable to replay log"); | ||
2465 | return -1; | ||
2466 | } | ||
2467 | |||
2468 | /* ok, there are transactions that need to be replayed. start with the first log block, find | 2471 | /* ok, there are transactions that need to be replayed. start with the first log block, find |
2469 | ** all the valid transactions, and pick out the oldest. | 2472 | ** all the valid transactions, and pick out the oldest. |
2470 | */ | 2473 | */ |
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index d8b5bfcbdd30..de1fcffd906b 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c | |||
@@ -76,7 +76,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode, | |||
76 | return error; | 76 | return error; |
77 | } | 77 | } |
78 | 78 | ||
79 | if (sec->length) { | 79 | if (sec->length && reiserfs_xattrs_initialized(inode->i_sb)) { |
80 | blocks = reiserfs_xattr_jcreate_nblocks(inode) + | 80 | blocks = reiserfs_xattr_jcreate_nblocks(inode) + |
81 | reiserfs_xattr_nblocks(inode, sec->length); | 81 | reiserfs_xattr_nblocks(inode, sec->length); |
82 | /* We don't want to count the directories twice if we have | 82 | /* We don't want to count the directories twice if we have |
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 3a4767c01c5f..4f7b44866b76 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h | |||
@@ -65,6 +65,8 @@ | |||
65 | #define ACPI_VIDEO_HID "LNXVIDEO" | 65 | #define ACPI_VIDEO_HID "LNXVIDEO" |
66 | #define ACPI_BAY_HID "LNXIOBAY" | 66 | #define ACPI_BAY_HID "LNXIOBAY" |
67 | #define ACPI_DOCK_HID "LNXDOCK" | 67 | #define ACPI_DOCK_HID "LNXDOCK" |
68 | /* Quirk for broken IBM BIOSes */ | ||
69 | #define ACPI_SMBUS_IBM_HID "SMBUSIBM" | ||
68 | 70 | ||
69 | /* | 71 | /* |
70 | * For fixed hardware buttons, we fabricate acpi_devices with HID | 72 | * For fixed hardware buttons, we fabricate acpi_devices with HID |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 4a3c4e441027..de2f82efb15f 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1545,39 +1545,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map) | |||
1545 | { | 1545 | { |
1546 | } | 1546 | } |
1547 | 1547 | ||
1548 | 1548 | #include "drm_mem_util.h" | |
1549 | static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) | ||
1550 | { | ||
1551 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
1552 | return NULL; | ||
1553 | |||
1554 | if (size * nmemb <= PAGE_SIZE) | ||
1555 | return kcalloc(nmemb, size, GFP_KERNEL); | ||
1556 | |||
1557 | return __vmalloc(size * nmemb, | ||
1558 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
1559 | } | ||
1560 | |||
1561 | /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ | ||
1562 | static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) | ||
1563 | { | ||
1564 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
1565 | return NULL; | ||
1566 | |||
1567 | if (size * nmemb <= PAGE_SIZE) | ||
1568 | return kmalloc(nmemb * size, GFP_KERNEL); | ||
1569 | |||
1570 | return __vmalloc(size * nmemb, | ||
1571 | GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | ||
1572 | } | ||
1573 | |||
1574 | static __inline void drm_free_large(void *ptr) | ||
1575 | { | ||
1576 | if (!is_vmalloc_addr(ptr)) | ||
1577 | return kfree(ptr); | ||
1578 | |||
1579 | vfree(ptr); | ||
1580 | } | ||
1581 | /*@}*/ | 1549 | /*@}*/ |
1582 | 1550 | ||
1583 | #endif /* __KERNEL__ */ | 1551 | #endif /* __KERNEL__ */ |
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h new file mode 100644 index 000000000000..6bd325fedc87 --- /dev/null +++ b/include/drm/drm_mem_util.h | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Jesse Barnes <jbarnes@virtuousgeek.org> | ||
25 | * | ||
26 | */ | ||
27 | #ifndef _DRM_MEM_UTIL_H_ | ||
28 | #define _DRM_MEM_UTIL_H_ | ||
29 | |||
30 | #include <linux/vmalloc.h> | ||
31 | |||
32 | static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) | ||
33 | { | ||
34 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
35 | return NULL; | ||
36 | |||
37 | if (size * nmemb <= PAGE_SIZE) | ||
38 | return kcalloc(nmemb, size, GFP_KERNEL); | ||
39 | |||
40 | return __vmalloc(size * nmemb, | ||
41 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
42 | } | ||
43 | |||
44 | /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ | ||
45 | static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) | ||
46 | { | ||
47 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
48 | return NULL; | ||
49 | |||
50 | if (size * nmemb <= PAGE_SIZE) | ||
51 | return kmalloc(nmemb * size, GFP_KERNEL); | ||
52 | |||
53 | return __vmalloc(size * nmemb, | ||
54 | GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | ||
55 | } | ||
56 | |||
57 | static __inline void drm_free_large(void *ptr) | ||
58 | { | ||
59 | if (!is_vmalloc_addr(ptr)) | ||
60 | return kfree(ptr); | ||
61 | |||
62 | vfree(ptr); | ||
63 | } | ||
64 | |||
65 | #endif | ||
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 676104b7818c..04a6ebc27b96 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -410,6 +410,7 @@ | |||
410 | {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 410 | {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
411 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 411 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
412 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 412 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
413 | {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
413 | {0, 0, 0} | 414 | {0, 0, 0} |
414 | 415 | ||
415 | #define r128_PCI_IDS \ | 416 | #define r128_PCI_IDS \ |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e3f1b4a4b601..e929c27ede22 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -115,7 +115,6 @@ struct ttm_backend { | |||
115 | struct ttm_backend_func *func; | 115 | struct ttm_backend_func *func; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | #define TTM_PAGE_FLAG_VMALLOC (1 << 0) | ||
119 | #define TTM_PAGE_FLAG_USER (1 << 1) | 118 | #define TTM_PAGE_FLAG_USER (1 << 1) |
120 | #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) | 119 | #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) |
121 | #define TTM_PAGE_FLAG_WRITE (1 << 3) | 120 | #define TTM_PAGE_FLAG_WRITE (1 << 3) |
diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h index a2ed0591fb19..90f2471dc6f2 100644 --- a/include/linux/circ_buf.h +++ b/include/linux/circ_buf.h | |||
@@ -1,3 +1,7 @@ | |||
1 | /* | ||
2 | * See Documentation/circular-buffers.txt for more information. | ||
3 | */ | ||
4 | |||
1 | #ifndef _LINUX_CIRC_BUF_H | 5 | #ifndef _LINUX_CIRC_BUF_H |
2 | #define _LINUX_CIRC_BUF_H 1 | 6 | #define _LINUX_CIRC_BUF_H 1 |
3 | 7 | ||
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 0cf725bdd2a1..fc53492b6ad7 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -73,6 +73,7 @@ enum clock_event_nofitiers { | |||
73 | * @list: list head for the management code | 73 | * @list: list head for the management code |
74 | * @mode: operating mode assigned by the management code | 74 | * @mode: operating mode assigned by the management code |
75 | * @next_event: local storage for the next event in oneshot mode | 75 | * @next_event: local storage for the next event in oneshot mode |
76 | * @retries: number of forced programming retries | ||
76 | */ | 77 | */ |
77 | struct clock_event_device { | 78 | struct clock_event_device { |
78 | const char *name; | 79 | const char *name; |
@@ -93,6 +94,7 @@ struct clock_event_device { | |||
93 | struct list_head list; | 94 | struct list_head list; |
94 | enum clock_event_mode mode; | 95 | enum clock_event_mode mode; |
95 | ktime_t next_event; | 96 | ktime_t next_event; |
97 | unsigned long retries; | ||
96 | }; | 98 | }; |
97 | 99 | ||
98 | /* | 100 | /* |
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index cac84b006667..5f494b465097 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
@@ -565,17 +565,17 @@ enum { | |||
565 | 565 | ||
566 | static inline int ext3_test_inode_state(struct inode *inode, int bit) | 566 | static inline int ext3_test_inode_state(struct inode *inode, int bit) |
567 | { | 567 | { |
568 | return test_bit(bit, &EXT3_I(inode)->i_state); | 568 | return test_bit(bit, &EXT3_I(inode)->i_state_flags); |
569 | } | 569 | } |
570 | 570 | ||
571 | static inline void ext3_set_inode_state(struct inode *inode, int bit) | 571 | static inline void ext3_set_inode_state(struct inode *inode, int bit) |
572 | { | 572 | { |
573 | set_bit(bit, &EXT3_I(inode)->i_state); | 573 | set_bit(bit, &EXT3_I(inode)->i_state_flags); |
574 | } | 574 | } |
575 | 575 | ||
576 | static inline void ext3_clear_inode_state(struct inode *inode, int bit) | 576 | static inline void ext3_clear_inode_state(struct inode *inode, int bit) |
577 | { | 577 | { |
578 | clear_bit(bit, &EXT3_I(inode)->i_state); | 578 | clear_bit(bit, &EXT3_I(inode)->i_state_flags); |
579 | } | 579 | } |
580 | #else | 580 | #else |
581 | /* Assume that user mode programs are passing in an ext3fs superblock, not | 581 | /* Assume that user mode programs are passing in an ext3fs superblock, not |
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h index 7679acdb519a..f42c098aed8d 100644 --- a/include/linux/ext3_fs_i.h +++ b/include/linux/ext3_fs_i.h | |||
@@ -87,7 +87,7 @@ struct ext3_inode_info { | |||
87 | * near to their parent directory's inode. | 87 | * near to their parent directory's inode. |
88 | */ | 88 | */ |
89 | __u32 i_block_group; | 89 | __u32 i_block_group; |
90 | unsigned long i_state; /* Dynamic state flags for ext3 */ | 90 | unsigned long i_state_flags; /* Dynamic state flags for ext3 */ |
91 | 91 | ||
92 | /* block reservation info */ | 92 | /* block reservation info */ |
93 | struct ext3_block_alloc_info *i_block_alloc_info; | 93 | struct ext3_block_alloc_info *i_block_alloc_info; |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 7be0c6fbe880..c57db27ac861 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
@@ -105,7 +105,7 @@ struct fscache_operation { | |||
105 | /* operation releaser */ | 105 | /* operation releaser */ |
106 | fscache_operation_release_t release; | 106 | fscache_operation_release_t release; |
107 | 107 | ||
108 | #ifdef CONFIG_SLOW_WORK_PROC | 108 | #ifdef CONFIG_SLOW_WORK_DEBUG |
109 | const char *name; /* operation name */ | 109 | const char *name; /* operation name */ |
110 | const char *state; /* operation state */ | 110 | const char *state; /* operation state */ |
111 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) | 111 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) |
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index 1822d635be6b..16b92d008bed 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _IF_TUNNEL_H_ | 2 | #define _IF_TUNNEL_H_ |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/byteorder.h> | ||
5 | 6 | ||
6 | #ifdef __KERNEL__ | 7 | #ifdef __KERNEL__ |
7 | #include <linux/ip.h> | 8 | #include <linux/ip.h> |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 71ab79da7e7f..26fad187d661 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -112,12 +112,14 @@ struct resource_list { | |||
112 | extern struct resource ioport_resource; | 112 | extern struct resource ioport_resource; |
113 | extern struct resource iomem_resource; | 113 | extern struct resource iomem_resource; |
114 | 114 | ||
115 | extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); | ||
115 | extern int request_resource(struct resource *root, struct resource *new); | 116 | extern int request_resource(struct resource *root, struct resource *new); |
116 | extern int release_resource(struct resource *new); | 117 | extern int release_resource(struct resource *new); |
117 | void release_child_resources(struct resource *new); | 118 | void release_child_resources(struct resource *new); |
118 | extern void reserve_region_with_split(struct resource *root, | 119 | extern void reserve_region_with_split(struct resource *root, |
119 | resource_size_t start, resource_size_t end, | 120 | resource_size_t start, resource_size_t end, |
120 | const char *name); | 121 | const char *name); |
122 | extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); | ||
121 | extern int insert_resource(struct resource *parent, struct resource *new); | 123 | extern int insert_resource(struct resource *parent, struct resource *new); |
122 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); | 124 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); |
123 | extern int allocate_resource(struct resource *root, struct resource *new, | 125 | extern int allocate_resource(struct resource *root, struct resource *new, |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index bc0fc795bd35..ece0b1c33816 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
@@ -102,8 +102,6 @@ union { \ | |||
102 | unsigned char name##kfifo_buffer[size]; \ | 102 | unsigned char name##kfifo_buffer[size]; \ |
103 | struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer) | 103 | struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer) |
104 | 104 | ||
105 | #undef __kfifo_initializer | ||
106 | |||
107 | extern void kfifo_init(struct kfifo *fifo, void *buffer, | 105 | extern void kfifo_init(struct kfifo *fifo, void *buffer, |
108 | unsigned int size); | 106 | unsigned int size); |
109 | extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size, | 107 | extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size, |
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index c02c8db73701..8a49cbf0376d 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
@@ -268,6 +268,7 @@ struct _mmc_csd { | |||
268 | 268 | ||
269 | #define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ | 269 | #define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ |
270 | #define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ | 270 | #define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ |
271 | #define EXT_CSD_CARD_TYPE_MASK 0x3 /* Mask out reserved and DDR bits */ | ||
271 | 272 | ||
272 | #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ | 273 | #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ |
273 | #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ | 274 | #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c79a88be7c33..fa8b47637997 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2059,12 +2059,12 @@ static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, | |||
2059 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and | 2059 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and |
2060 | * ARP on active-backup slaves with arp_validate enabled. | 2060 | * ARP on active-backup slaves with arp_validate enabled. |
2061 | */ | 2061 | */ |
2062 | static inline int skb_bond_should_drop(struct sk_buff *skb) | 2062 | static inline int skb_bond_should_drop(struct sk_buff *skb, |
2063 | struct net_device *master) | ||
2063 | { | 2064 | { |
2064 | struct net_device *dev = skb->dev; | ||
2065 | struct net_device *master = dev->master; | ||
2066 | |||
2067 | if (master) { | 2065 | if (master) { |
2066 | struct net_device *dev = skb->dev; | ||
2067 | |||
2068 | if (master->priv_flags & IFF_MASTER_ARPMON) | 2068 | if (master->priv_flags & IFF_MASTER_ARPMON) |
2069 | dev->last_rx = jiffies; | 2069 | dev->last_rx = jiffies; |
2070 | 2070 | ||
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 53923868c9bd..361d6b5630ee 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h | |||
@@ -76,7 +76,7 @@ extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); | |||
76 | extern int nfnetlink_has_listeners(struct net *net, unsigned int group); | 76 | extern int nfnetlink_has_listeners(struct net *net, unsigned int group); |
77 | extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, | 77 | extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, |
78 | int echo, gfp_t flags); | 78 | int echo, gfp_t flags); |
79 | extern void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); | 79 | extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); |
80 | extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); | 80 | extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); |
81 | 81 | ||
82 | extern void nfnl_lock(void); | 82 | extern void nfnl_lock(void); |
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index d654873aa25a..1f7e300094cd 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h | |||
@@ -59,6 +59,7 @@ | |||
59 | enum nf_ip6_hook_priorities { | 59 | enum nf_ip6_hook_priorities { |
60 | NF_IP6_PRI_FIRST = INT_MIN, | 60 | NF_IP6_PRI_FIRST = INT_MIN, |
61 | NF_IP6_PRI_CONNTRACK_DEFRAG = -400, | 61 | NF_IP6_PRI_CONNTRACK_DEFRAG = -400, |
62 | NF_IP6_PRI_RAW = -300, | ||
62 | NF_IP6_PRI_SELINUX_FIRST = -225, | 63 | NF_IP6_PRI_SELINUX_FIRST = -225, |
63 | NF_IP6_PRI_CONNTRACK = -200, | 64 | NF_IP6_PRI_CONNTRACK = -200, |
64 | NF_IP6_PRI_MANGLE = -150, | 65 | NF_IP6_PRI_MANGLE = -150, |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index fde27c017326..6eaca5e1e8ca 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -188,7 +188,7 @@ extern int netlink_has_listeners(struct sock *sk, unsigned int group); | |||
188 | extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); | 188 | extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); |
189 | extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, | 189 | extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, |
190 | __u32 group, gfp_t allocation); | 190 | __u32 group, gfp_t allocation); |
191 | extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); | 191 | extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); |
192 | extern int netlink_register_notifier(struct notifier_block *nb); | 192 | extern int netlink_register_notifier(struct notifier_block *nb); |
193 | extern int netlink_unregister_notifier(struct notifier_block *nb); | 193 | extern int netlink_unregister_notifier(struct notifier_block *nb); |
194 | 194 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2bccb7b9da2d..6e96cc8225d4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -860,13 +860,6 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |||
860 | 860 | ||
861 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 861 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
862 | 862 | ||
863 | static inline void | ||
864 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
865 | { | ||
866 | if (atomic_read(&perf_swevent_enabled[event_id])) | ||
867 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
868 | } | ||
869 | |||
870 | extern void | 863 | extern void |
871 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | 864 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); |
872 | 865 | ||
@@ -905,6 +898,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | |||
905 | return perf_arch_fetch_caller_regs(regs, ip, skip); | 898 | return perf_arch_fetch_caller_regs(regs, ip, skip); |
906 | } | 899 | } |
907 | 900 | ||
901 | static inline void | ||
902 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
903 | { | ||
904 | if (atomic_read(&perf_swevent_enabled[event_id])) { | ||
905 | struct pt_regs hot_regs; | ||
906 | |||
907 | if (!regs) { | ||
908 | perf_fetch_caller_regs(&hot_regs, 1); | ||
909 | regs = &hot_regs; | ||
910 | } | ||
911 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
912 | } | ||
913 | } | ||
914 | |||
908 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 915 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
909 | 916 | ||
910 | static inline void perf_event_mmap(struct vm_area_struct *vma) | 917 | static inline void perf_event_mmap(struct vm_area_struct *vma) |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 3024050c82a1..872a98e13d6a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -123,22 +123,11 @@ static inline int rcu_read_lock_held(void) | |||
123 | return lock_is_held(&rcu_lock_map); | 123 | return lock_is_held(&rcu_lock_map); |
124 | } | 124 | } |
125 | 125 | ||
126 | /** | 126 | /* |
127 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | 127 | * rcu_read_lock_bh_held() is defined out of line to avoid #include-file |
128 | * | 128 | * hell. |
129 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
130 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
131 | * this assumes we are in an RCU-bh read-side critical section unless it can | ||
132 | * prove otherwise. | ||
133 | * | ||
134 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
135 | */ | 129 | */ |
136 | static inline int rcu_read_lock_bh_held(void) | 130 | extern int rcu_read_lock_bh_held(void); |
137 | { | ||
138 | if (!debug_lockdep_rcu_enabled()) | ||
139 | return 1; | ||
140 | return lock_is_held(&rcu_bh_lock_map); | ||
141 | } | ||
142 | 131 | ||
143 | /** | 132 | /** |
144 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 133 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? |
@@ -160,7 +149,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
160 | return 1; | 149 | return 1; |
161 | if (debug_locks) | 150 | if (debug_locks) |
162 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 151 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
163 | return lockdep_opinion || preempt_count() != 0; | 152 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
164 | } | 153 | } |
165 | #else /* #ifdef CONFIG_PREEMPT */ | 154 | #else /* #ifdef CONFIG_PREEMPT */ |
166 | static inline int rcu_read_lock_sched_held(void) | 155 | static inline int rcu_read_lock_sched_held(void) |
@@ -191,7 +180,7 @@ static inline int rcu_read_lock_bh_held(void) | |||
191 | #ifdef CONFIG_PREEMPT | 180 | #ifdef CONFIG_PREEMPT |
192 | static inline int rcu_read_lock_sched_held(void) | 181 | static inline int rcu_read_lock_sched_held(void) |
193 | { | 182 | { |
194 | return !rcu_scheduler_active || preempt_count() != 0; | 183 | return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); |
195 | } | 184 | } |
196 | #else /* #ifdef CONFIG_PREEMPT */ | 185 | #else /* #ifdef CONFIG_PREEMPT */ |
197 | static inline int rcu_read_lock_sched_held(void) | 186 | static inline int rcu_read_lock_sched_held(void) |
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 99928dce37ea..7fa02b4af838 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h | |||
@@ -70,6 +70,11 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th, | |||
70 | void reiserfs_security_free(struct reiserfs_security_handle *sec); | 70 | void reiserfs_security_free(struct reiserfs_security_handle *sec); |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | static inline int reiserfs_xattrs_initialized(struct super_block *sb) | ||
74 | { | ||
75 | return REISERFS_SB(sb)->priv_root != NULL; | ||
76 | } | ||
77 | |||
73 | #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) | 78 | #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) |
74 | static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size) | 79 | static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size) |
75 | { | 80 | { |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 03f816a9b659..124f90cd5a38 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -190,9 +190,6 @@ struct skb_shared_info { | |||
190 | atomic_t dataref; | 190 | atomic_t dataref; |
191 | unsigned short nr_frags; | 191 | unsigned short nr_frags; |
192 | unsigned short gso_size; | 192 | unsigned short gso_size; |
193 | #ifdef CONFIG_HAS_DMA | ||
194 | dma_addr_t dma_head; | ||
195 | #endif | ||
196 | /* Warning: this field is not always filled in (UFO)! */ | 193 | /* Warning: this field is not always filled in (UFO)! */ |
197 | unsigned short gso_segs; | 194 | unsigned short gso_segs; |
198 | unsigned short gso_type; | 195 | unsigned short gso_type; |
@@ -201,9 +198,6 @@ struct skb_shared_info { | |||
201 | struct sk_buff *frag_list; | 198 | struct sk_buff *frag_list; |
202 | struct skb_shared_hwtstamps hwtstamps; | 199 | struct skb_shared_hwtstamps hwtstamps; |
203 | skb_frag_t frags[MAX_SKB_FRAGS]; | 200 | skb_frag_t frags[MAX_SKB_FRAGS]; |
204 | #ifdef CONFIG_HAS_DMA | ||
205 | dma_addr_t dma_maps[MAX_SKB_FRAGS]; | ||
206 | #endif | ||
207 | /* Intermediate layers must ensure that destructor_arg | 201 | /* Intermediate layers must ensure that destructor_arg |
208 | * remains valid until skb destructor */ | 202 | * remains valid until skb destructor */ |
209 | void * destructor_arg; | 203 | void * destructor_arg; |
diff --git a/include/linux/socket.h b/include/linux/socket.h index 7b3aae2052a6..354cc5617f8b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -255,6 +255,7 @@ struct ucred { | |||
255 | #define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ | 255 | #define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ |
256 | #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ | 256 | #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ |
257 | #define MSG_MORE 0x8000 /* Sender will send more */ | 257 | #define MSG_MORE 0x8000 /* Sender will send more */ |
258 | #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ | ||
258 | 259 | ||
259 | #define MSG_EOF MSG_FIN | 260 | #define MSG_EOF MSG_FIN |
260 | 261 | ||
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index d7152b451e21..7c91260c44a9 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h | |||
@@ -36,7 +36,6 @@ struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt); | |||
36 | void xprt_free_bc_request(struct rpc_rqst *req); | 36 | void xprt_free_bc_request(struct rpc_rqst *req); |
37 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); | 37 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); |
38 | void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); | 38 | void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); |
39 | void bc_release_request(struct rpc_task *); | ||
40 | int bc_send(struct rpc_rqst *req); | 39 | int bc_send(struct rpc_rqst *req); |
41 | 40 | ||
42 | /* | 41 | /* |
@@ -59,6 +58,10 @@ static inline int svc_is_backchannel(const struct svc_rqst *rqstp) | |||
59 | { | 58 | { |
60 | return 0; | 59 | return 0; |
61 | } | 60 | } |
61 | |||
62 | static inline void xprt_free_bc_request(struct rpc_rqst *req) | ||
63 | { | ||
64 | } | ||
62 | #endif /* CONFIG_NFS_V4_1 */ | 65 | #endif /* CONFIG_NFS_V4_1 */ |
63 | #endif /* _LINUX_SUNRPC_BC_XPRT_H */ | 66 | #endif /* _LINUX_SUNRPC_BC_XPRT_H */ |
64 | 67 | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index f994ae58a002..057929b0a651 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -688,7 +688,7 @@ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); | |||
688 | asmlinkage long sys_shmget(key_t key, size_t size, int flag); | 688 | asmlinkage long sys_shmget(key_t key, size_t size, int flag); |
689 | asmlinkage long sys_shmdt(char __user *shmaddr); | 689 | asmlinkage long sys_shmdt(char __user *shmaddr); |
690 | asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); | 690 | asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); |
691 | asmlinkage long sys_ipc(unsigned int call, int first, int second, | 691 | asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, |
692 | unsigned long third, void __user *ptr, long fifth); | 692 | unsigned long third, void __user *ptr, long fifth); |
693 | 693 | ||
694 | asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr); | 694 | asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr); |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index f59604ed0ec6..78b4bd3be496 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -49,7 +49,7 @@ struct tracepoint { | |||
49 | void **it_func; \ | 49 | void **it_func; \ |
50 | \ | 50 | \ |
51 | rcu_read_lock_sched_notrace(); \ | 51 | rcu_read_lock_sched_notrace(); \ |
52 | it_func = rcu_dereference((tp)->funcs); \ | 52 | it_func = rcu_dereference_sched((tp)->funcs); \ |
53 | if (it_func) { \ | 53 | if (it_func) { \ |
54 | do { \ | 54 | do { \ |
55 | ((void(*)(proto))(*it_func))(args); \ | 55 | ((void(*)(proto))(*it_func))(args); \ |
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 04a6908e38d2..ff77e8f882f1 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h | |||
@@ -176,6 +176,6 @@ extern void hci_sock_cleanup(void); | |||
176 | extern int bt_sysfs_init(void); | 176 | extern int bt_sysfs_init(void); |
177 | extern void bt_sysfs_cleanup(void); | 177 | extern void bt_sysfs_cleanup(void); |
178 | 178 | ||
179 | extern struct class *bt_class; | 179 | extern struct dentry *bt_debugfs; |
180 | 180 | ||
181 | #endif /* __BLUETOOTH_H */ | 181 | #endif /* __BLUETOOTH_H */ |
diff --git a/include/net/netlink.h b/include/net/netlink.h index f82e463c875a..4fc05b58503e 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h | |||
@@ -945,7 +945,11 @@ static inline u64 nla_get_u64(const struct nlattr *nla) | |||
945 | */ | 945 | */ |
946 | static inline __be64 nla_get_be64(const struct nlattr *nla) | 946 | static inline __be64 nla_get_be64(const struct nlattr *nla) |
947 | { | 947 | { |
948 | return *(__be64 *) nla_data(nla); | 948 | __be64 tmp; |
949 | |||
950 | nla_memcpy(&tmp, nla, sizeof(tmp)); | ||
951 | |||
952 | return tmp; | ||
949 | } | 953 | } |
950 | 954 | ||
951 | /** | 955 | /** |
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 32896a773910..2e488b60bc76 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h | |||
@@ -277,12 +277,6 @@ extern struct pccard_resource_ops pccard_nonstatic_ops; | |||
277 | #endif | 277 | #endif |
278 | 278 | ||
279 | 279 | ||
280 | /* socket drivers are expected to use these callbacks in their .drv struct */ | ||
281 | extern int pcmcia_socket_dev_suspend(struct device *dev); | ||
282 | extern void pcmcia_socket_dev_early_resume(struct device *dev); | ||
283 | extern void pcmcia_socket_dev_late_resume(struct device *dev); | ||
284 | extern int pcmcia_socket_dev_resume(struct device *dev); | ||
285 | |||
286 | /* socket drivers use this callback in their IRQ handler */ | 280 | /* socket drivers use this callback in their IRQ handler */ |
287 | extern void pcmcia_parse_events(struct pcmcia_socket *socket, | 281 | extern void pcmcia_parse_events(struct pcmcia_socket *socket, |
288 | unsigned int events); | 282 | unsigned int events); |
diff --git a/init/main.c b/init/main.c index a1ab78ceb4b6..cbead27caefc 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -858,7 +858,7 @@ static int __init kernel_init(void * unused) | |||
858 | /* | 858 | /* |
859 | * init can allocate pages on any node | 859 | * init can allocate pages on any node |
860 | */ | 860 | */ |
861 | set_mems_allowed(node_possible_map); | 861 | set_mems_allowed(node_states[N_HIGH_MEMORY]); |
862 | /* | 862 | /* |
863 | * init can run on any cpu. | 863 | * init can run on any cpu. |
864 | */ | 864 | */ |
diff --git a/ipc/syscall.c b/ipc/syscall.c index 355a3da9ec73..1d6f53f6b562 100644 --- a/ipc/syscall.c +++ b/ipc/syscall.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | 15 | ||
16 | SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, int, second, | 16 | SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, |
17 | unsigned long, third, void __user *, ptr, long, fifth) | 17 | unsigned long, third, void __user *, ptr, long, fifth) |
18 | { | 18 | { |
19 | int version, ret; | 19 | int version, ret; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ef909a329750..e2769e13980c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -27,7 +27,6 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/cgroup.h> | 29 | #include <linux/cgroup.h> |
30 | #include <linux/module.h> | ||
31 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
32 | #include <linux/errno.h> | 31 | #include <linux/errno.h> |
33 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index ba401fab459f..d10946748ec2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -920,9 +920,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
920 | * call to guarantee_online_mems(), as we know no one is changing | 920 | * call to guarantee_online_mems(), as we know no one is changing |
921 | * our task's cpuset. | 921 | * our task's cpuset. |
922 | * | 922 | * |
923 | * Hold callback_mutex around the two modifications of our tasks | ||
924 | * mems_allowed to synchronize with cpuset_mems_allowed(). | ||
925 | * | ||
926 | * While the mm_struct we are migrating is typically from some | 923 | * While the mm_struct we are migrating is typically from some |
927 | * other task, the task_struct mems_allowed that we are hacking | 924 | * other task, the task_struct mems_allowed that we are hacking |
928 | * is for our current task, which must allocate new pages for that | 925 | * is for our current task, which must allocate new pages for that |
@@ -973,15 +970,20 @@ static void cpuset_change_nodemask(struct task_struct *p, | |||
973 | struct cpuset *cs; | 970 | struct cpuset *cs; |
974 | int migrate; | 971 | int migrate; |
975 | const nodemask_t *oldmem = scan->data; | 972 | const nodemask_t *oldmem = scan->data; |
976 | nodemask_t newmems; | 973 | NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL); |
974 | |||
975 | if (!newmems) | ||
976 | return; | ||
977 | 977 | ||
978 | cs = cgroup_cs(scan->cg); | 978 | cs = cgroup_cs(scan->cg); |
979 | guarantee_online_mems(cs, &newmems); | 979 | guarantee_online_mems(cs, newmems); |
980 | 980 | ||
981 | task_lock(p); | 981 | task_lock(p); |
982 | cpuset_change_task_nodemask(p, &newmems); | 982 | cpuset_change_task_nodemask(p, newmems); |
983 | task_unlock(p); | 983 | task_unlock(p); |
984 | 984 | ||
985 | NODEMASK_FREE(newmems); | ||
986 | |||
985 | mm = get_task_mm(p); | 987 | mm = get_task_mm(p); |
986 | if (!mm) | 988 | if (!mm) |
987 | return; | 989 | return; |
@@ -1051,16 +1053,21 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem, | |||
1051 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | 1053 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1052 | const char *buf) | 1054 | const char *buf) |
1053 | { | 1055 | { |
1054 | nodemask_t oldmem; | 1056 | NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL); |
1055 | int retval; | 1057 | int retval; |
1056 | struct ptr_heap heap; | 1058 | struct ptr_heap heap; |
1057 | 1059 | ||
1060 | if (!oldmem) | ||
1061 | return -ENOMEM; | ||
1062 | |||
1058 | /* | 1063 | /* |
1059 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | 1064 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; |
1060 | * it's read-only | 1065 | * it's read-only |
1061 | */ | 1066 | */ |
1062 | if (cs == &top_cpuset) | 1067 | if (cs == &top_cpuset) { |
1063 | return -EACCES; | 1068 | retval = -EACCES; |
1069 | goto done; | ||
1070 | } | ||
1064 | 1071 | ||
1065 | /* | 1072 | /* |
1066 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | 1073 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
@@ -1076,11 +1083,13 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1076 | goto done; | 1083 | goto done; |
1077 | 1084 | ||
1078 | if (!nodes_subset(trialcs->mems_allowed, | 1085 | if (!nodes_subset(trialcs->mems_allowed, |
1079 | node_states[N_HIGH_MEMORY])) | 1086 | node_states[N_HIGH_MEMORY])) { |
1080 | return -EINVAL; | 1087 | retval = -EINVAL; |
1088 | goto done; | ||
1089 | } | ||
1081 | } | 1090 | } |
1082 | oldmem = cs->mems_allowed; | 1091 | *oldmem = cs->mems_allowed; |
1083 | if (nodes_equal(oldmem, trialcs->mems_allowed)) { | 1092 | if (nodes_equal(*oldmem, trialcs->mems_allowed)) { |
1084 | retval = 0; /* Too easy - nothing to do */ | 1093 | retval = 0; /* Too easy - nothing to do */ |
1085 | goto done; | 1094 | goto done; |
1086 | } | 1095 | } |
@@ -1096,10 +1105,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1096 | cs->mems_allowed = trialcs->mems_allowed; | 1105 | cs->mems_allowed = trialcs->mems_allowed; |
1097 | mutex_unlock(&callback_mutex); | 1106 | mutex_unlock(&callback_mutex); |
1098 | 1107 | ||
1099 | update_tasks_nodemask(cs, &oldmem, &heap); | 1108 | update_tasks_nodemask(cs, oldmem, &heap); |
1100 | 1109 | ||
1101 | heap_free(&heap); | 1110 | heap_free(&heap); |
1102 | done: | 1111 | done: |
1112 | NODEMASK_FREE(oldmem); | ||
1103 | return retval; | 1113 | return retval; |
1104 | } | 1114 | } |
1105 | 1115 | ||
@@ -1384,40 +1394,47 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
1384 | struct cgroup *oldcont, struct task_struct *tsk, | 1394 | struct cgroup *oldcont, struct task_struct *tsk, |
1385 | bool threadgroup) | 1395 | bool threadgroup) |
1386 | { | 1396 | { |
1387 | nodemask_t from, to; | ||
1388 | struct mm_struct *mm; | 1397 | struct mm_struct *mm; |
1389 | struct cpuset *cs = cgroup_cs(cont); | 1398 | struct cpuset *cs = cgroup_cs(cont); |
1390 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1399 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1400 | NODEMASK_ALLOC(nodemask_t, from, GFP_KERNEL); | ||
1401 | NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL); | ||
1402 | |||
1403 | if (from == NULL || to == NULL) | ||
1404 | goto alloc_fail; | ||
1391 | 1405 | ||
1392 | if (cs == &top_cpuset) { | 1406 | if (cs == &top_cpuset) { |
1393 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1407 | cpumask_copy(cpus_attach, cpu_possible_mask); |
1394 | to = node_possible_map; | ||
1395 | } else { | 1408 | } else { |
1396 | guarantee_online_cpus(cs, cpus_attach); | 1409 | guarantee_online_cpus(cs, cpus_attach); |
1397 | guarantee_online_mems(cs, &to); | ||
1398 | } | 1410 | } |
1411 | guarantee_online_mems(cs, to); | ||
1399 | 1412 | ||
1400 | /* do per-task migration stuff possibly for each in the threadgroup */ | 1413 | /* do per-task migration stuff possibly for each in the threadgroup */ |
1401 | cpuset_attach_task(tsk, &to, cs); | 1414 | cpuset_attach_task(tsk, to, cs); |
1402 | if (threadgroup) { | 1415 | if (threadgroup) { |
1403 | struct task_struct *c; | 1416 | struct task_struct *c; |
1404 | rcu_read_lock(); | 1417 | rcu_read_lock(); |
1405 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | 1418 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
1406 | cpuset_attach_task(c, &to, cs); | 1419 | cpuset_attach_task(c, to, cs); |
1407 | } | 1420 | } |
1408 | rcu_read_unlock(); | 1421 | rcu_read_unlock(); |
1409 | } | 1422 | } |
1410 | 1423 | ||
1411 | /* change mm; only needs to be done once even if threadgroup */ | 1424 | /* change mm; only needs to be done once even if threadgroup */ |
1412 | from = oldcs->mems_allowed; | 1425 | *from = oldcs->mems_allowed; |
1413 | to = cs->mems_allowed; | 1426 | *to = cs->mems_allowed; |
1414 | mm = get_task_mm(tsk); | 1427 | mm = get_task_mm(tsk); |
1415 | if (mm) { | 1428 | if (mm) { |
1416 | mpol_rebind_mm(mm, &to); | 1429 | mpol_rebind_mm(mm, to); |
1417 | if (is_memory_migrate(cs)) | 1430 | if (is_memory_migrate(cs)) |
1418 | cpuset_migrate_mm(mm, &from, &to); | 1431 | cpuset_migrate_mm(mm, from, to); |
1419 | mmput(mm); | 1432 | mmput(mm); |
1420 | } | 1433 | } |
1434 | |||
1435 | alloc_fail: | ||
1436 | NODEMASK_FREE(from); | ||
1437 | NODEMASK_FREE(to); | ||
1421 | } | 1438 | } |
1422 | 1439 | ||
1423 | /* The various types of files and directories in a cpuset file system */ | 1440 | /* The various types of files and directories in a cpuset file system */ |
@@ -1562,13 +1579,21 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | |||
1562 | 1579 | ||
1563 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | 1580 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) |
1564 | { | 1581 | { |
1565 | nodemask_t mask; | 1582 | NODEMASK_ALLOC(nodemask_t, mask, GFP_KERNEL); |
1583 | int retval; | ||
1584 | |||
1585 | if (mask == NULL) | ||
1586 | return -ENOMEM; | ||
1566 | 1587 | ||
1567 | mutex_lock(&callback_mutex); | 1588 | mutex_lock(&callback_mutex); |
1568 | mask = cs->mems_allowed; | 1589 | *mask = cs->mems_allowed; |
1569 | mutex_unlock(&callback_mutex); | 1590 | mutex_unlock(&callback_mutex); |
1570 | 1591 | ||
1571 | return nodelist_scnprintf(page, PAGE_SIZE, mask); | 1592 | retval = nodelist_scnprintf(page, PAGE_SIZE, *mask); |
1593 | |||
1594 | NODEMASK_FREE(mask); | ||
1595 | |||
1596 | return retval; | ||
1572 | } | 1597 | } |
1573 | 1598 | ||
1574 | static ssize_t cpuset_common_file_read(struct cgroup *cont, | 1599 | static ssize_t cpuset_common_file_read(struct cgroup *cont, |
@@ -1997,7 +2022,10 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
1997 | struct cpuset *cp; /* scans cpusets being updated */ | 2022 | struct cpuset *cp; /* scans cpusets being updated */ |
1998 | struct cpuset *child; /* scans child cpusets of cp */ | 2023 | struct cpuset *child; /* scans child cpusets of cp */ |
1999 | struct cgroup *cont; | 2024 | struct cgroup *cont; |
2000 | nodemask_t oldmems; | 2025 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); |
2026 | |||
2027 | if (oldmems == NULL) | ||
2028 | return; | ||
2001 | 2029 | ||
2002 | list_add_tail((struct list_head *)&root->stack_list, &queue); | 2030 | list_add_tail((struct list_head *)&root->stack_list, &queue); |
2003 | 2031 | ||
@@ -2014,7 +2042,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
2014 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) | 2042 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) |
2015 | continue; | 2043 | continue; |
2016 | 2044 | ||
2017 | oldmems = cp->mems_allowed; | 2045 | *oldmems = cp->mems_allowed; |
2018 | 2046 | ||
2019 | /* Remove offline cpus and mems from this cpuset. */ | 2047 | /* Remove offline cpus and mems from this cpuset. */ |
2020 | mutex_lock(&callback_mutex); | 2048 | mutex_lock(&callback_mutex); |
@@ -2030,9 +2058,10 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
2030 | remove_tasks_in_empty_cpuset(cp); | 2058 | remove_tasks_in_empty_cpuset(cp); |
2031 | else { | 2059 | else { |
2032 | update_tasks_cpumask(cp, NULL); | 2060 | update_tasks_cpumask(cp, NULL); |
2033 | update_tasks_nodemask(cp, &oldmems, NULL); | 2061 | update_tasks_nodemask(cp, oldmems, NULL); |
2034 | } | 2062 | } |
2035 | } | 2063 | } |
2064 | NODEMASK_FREE(oldmems); | ||
2036 | } | 2065 | } |
2037 | 2066 | ||
2038 | /* | 2067 | /* |
@@ -2090,20 +2119,33 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2090 | static int cpuset_track_online_nodes(struct notifier_block *self, | 2119 | static int cpuset_track_online_nodes(struct notifier_block *self, |
2091 | unsigned long action, void *arg) | 2120 | unsigned long action, void *arg) |
2092 | { | 2121 | { |
2122 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); | ||
2123 | |||
2124 | if (oldmems == NULL) | ||
2125 | return NOTIFY_DONE; | ||
2126 | |||
2093 | cgroup_lock(); | 2127 | cgroup_lock(); |
2094 | switch (action) { | 2128 | switch (action) { |
2095 | case MEM_ONLINE: | 2129 | case MEM_ONLINE: |
2096 | case MEM_OFFLINE: | 2130 | *oldmems = top_cpuset.mems_allowed; |
2097 | mutex_lock(&callback_mutex); | 2131 | mutex_lock(&callback_mutex); |
2098 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2132 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
2099 | mutex_unlock(&callback_mutex); | 2133 | mutex_unlock(&callback_mutex); |
2100 | if (action == MEM_OFFLINE) | 2134 | update_tasks_nodemask(&top_cpuset, oldmems, NULL); |
2101 | scan_for_empty_cpusets(&top_cpuset); | 2135 | break; |
2136 | case MEM_OFFLINE: | ||
2137 | /* | ||
2138 | * needn't update top_cpuset.mems_allowed explicitly because | ||
2139 | * scan_for_empty_cpusets() will update it. | ||
2140 | */ | ||
2141 | scan_for_empty_cpusets(&top_cpuset); | ||
2102 | break; | 2142 | break; |
2103 | default: | 2143 | default: |
2104 | break; | 2144 | break; |
2105 | } | 2145 | } |
2106 | cgroup_unlock(); | 2146 | cgroup_unlock(); |
2147 | |||
2148 | NODEMASK_FREE(oldmems); | ||
2107 | return NOTIFY_OK; | 2149 | return NOTIFY_OK; |
2108 | } | 2150 | } |
2109 | #endif | 2151 | #endif |
diff --git a/kernel/cred.c b/kernel/cred.c index 1ed8ca18790c..1b1129d0cce8 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -364,7 +364,7 @@ struct cred *prepare_usermodehelper_creds(void) | |||
364 | 364 | ||
365 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); | 365 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); |
366 | if (!new) | 366 | if (!new) |
367 | return NULL; | 367 | goto free_tgcred; |
368 | 368 | ||
369 | kdebug("prepare_usermodehelper_creds() alloc %p", new); | 369 | kdebug("prepare_usermodehelper_creds() alloc %p", new); |
370 | 370 | ||
@@ -397,6 +397,10 @@ struct cred *prepare_usermodehelper_creds(void) | |||
397 | 397 | ||
398 | error: | 398 | error: |
399 | put_cred(new); | 399 | put_cred(new); |
400 | free_tgcred: | ||
401 | #ifdef CONFIG_KEYS | ||
402 | kfree(tgcred); | ||
403 | #endif | ||
400 | return NULL; | 404 | return NULL; |
401 | } | 405 | } |
402 | 406 | ||
diff --git a/kernel/early_res.c b/kernel/early_res.c index 3cb2c661bb78..31aa9332ef3f 100644 --- a/kernel/early_res.c +++ b/kernel/early_res.c | |||
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end) | |||
333 | struct early_res *r; | 333 | struct early_res *r; |
334 | int i; | 334 | int i; |
335 | 335 | ||
336 | if (start == end) | ||
337 | return; | ||
338 | |||
339 | if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end)) | ||
340 | return; | ||
341 | |||
336 | try_next: | 342 | try_next: |
337 | i = find_overlapped_early(start, end); | 343 | i = find_overlapped_early(start, end); |
338 | if (i >= max_early_res) | 344 | if (i >= max_early_res) |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 42ec11b2af8a..b7091d5ca2f8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
359 | if (desc->chip->ack) | 359 | if (desc->chip->ack) |
360 | desc->chip->ack(irq); | 360 | desc->chip->ack(irq); |
361 | } | 361 | } |
362 | desc->status |= IRQ_MASKED; | ||
363 | } | ||
364 | |||
365 | static inline void mask_irq(struct irq_desc *desc, int irq) | ||
366 | { | ||
367 | if (desc->chip->mask) { | ||
368 | desc->chip->mask(irq); | ||
369 | desc->status |= IRQ_MASKED; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | static inline void unmask_irq(struct irq_desc *desc, int irq) | ||
374 | { | ||
375 | if (desc->chip->unmask) { | ||
376 | desc->chip->unmask(irq); | ||
377 | desc->status &= ~IRQ_MASKED; | ||
378 | } | ||
362 | } | 379 | } |
363 | 380 | ||
364 | /* | 381 | /* |
@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
484 | raw_spin_lock(&desc->lock); | 501 | raw_spin_lock(&desc->lock); |
485 | desc->status &= ~IRQ_INPROGRESS; | 502 | desc->status &= ~IRQ_INPROGRESS; |
486 | 503 | ||
487 | if (unlikely(desc->status & IRQ_ONESHOT)) | 504 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) |
488 | desc->status |= IRQ_MASKED; | 505 | unmask_irq(desc, irq); |
489 | else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | ||
490 | desc->chip->unmask(irq); | ||
491 | out_unlock: | 506 | out_unlock: |
492 | raw_spin_unlock(&desc->lock); | 507 | raw_spin_unlock(&desc->lock); |
493 | } | 508 | } |
@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
524 | action = desc->action; | 539 | action = desc->action; |
525 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 540 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { |
526 | desc->status |= IRQ_PENDING; | 541 | desc->status |= IRQ_PENDING; |
527 | if (desc->chip->mask) | 542 | mask_irq(desc, irq); |
528 | desc->chip->mask(irq); | ||
529 | goto out; | 543 | goto out; |
530 | } | 544 | } |
531 | 545 | ||
@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
593 | irqreturn_t action_ret; | 607 | irqreturn_t action_ret; |
594 | 608 | ||
595 | if (unlikely(!action)) { | 609 | if (unlikely(!action)) { |
596 | desc->chip->mask(irq); | 610 | mask_irq(desc, irq); |
597 | goto out_unlock; | 611 | goto out_unlock; |
598 | } | 612 | } |
599 | 613 | ||
@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
605 | if (unlikely((desc->status & | 619 | if (unlikely((desc->status & |
606 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == | 620 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == |
607 | (IRQ_PENDING | IRQ_MASKED))) { | 621 | (IRQ_PENDING | IRQ_MASKED))) { |
608 | desc->chip->unmask(irq); | 622 | unmask_irq(desc, irq); |
609 | desc->status &= ~IRQ_MASKED; | ||
610 | } | 623 | } |
611 | 624 | ||
612 | desc->status &= ~IRQ_PENDING; | 625 | desc->status &= ~IRQ_PENDING; |
@@ -716,7 +729,7 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
716 | __set_irq_handler(irq, handle, 0, name); | 729 | __set_irq_handler(irq, handle, 0, name); |
717 | } | 730 | } |
718 | 731 | ||
719 | void __init set_irq_noprobe(unsigned int irq) | 732 | void set_irq_noprobe(unsigned int irq) |
720 | { | 733 | { |
721 | struct irq_desc *desc = irq_to_desc(irq); | 734 | struct irq_desc *desc = irq_to_desc(irq); |
722 | unsigned long flags; | 735 | unsigned long flags; |
@@ -731,7 +744,7 @@ void __init set_irq_noprobe(unsigned int irq) | |||
731 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 744 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
732 | } | 745 | } |
733 | 746 | ||
734 | void __init set_irq_probe(unsigned int irq) | 747 | void set_irq_probe(unsigned int irq) |
735 | { | 748 | { |
736 | struct irq_desc *desc = irq_to_desc(irq); | 749 | struct irq_desc *desc = irq_to_desc(irq); |
737 | unsigned long flags; | 750 | unsigned long flags; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index eb6078ca60c7..398fda155f6e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -382,6 +382,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
382 | { | 382 | { |
383 | struct irq_desc *desc = irq_to_desc(irq); | 383 | struct irq_desc *desc = irq_to_desc(irq); |
384 | struct irqaction *action; | 384 | struct irqaction *action; |
385 | unsigned long flags; | ||
385 | 386 | ||
386 | if (!desc) | 387 | if (!desc) |
387 | return 0; | 388 | return 0; |
@@ -389,11 +390,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
389 | if (desc->status & IRQ_NOREQUEST) | 390 | if (desc->status & IRQ_NOREQUEST) |
390 | return 0; | 391 | return 0; |
391 | 392 | ||
393 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
392 | action = desc->action; | 394 | action = desc->action; |
393 | if (action) | 395 | if (action) |
394 | if (irqflags & action->flags & IRQF_SHARED) | 396 | if (irqflags & action->flags & IRQF_SHARED) |
395 | action = NULL; | 397 | action = NULL; |
396 | 398 | ||
399 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
400 | |||
397 | return !action; | 401 | return !action; |
398 | } | 402 | } |
399 | 403 | ||
@@ -483,8 +487,26 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
483 | */ | 487 | */ |
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 488 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) |
485 | { | 489 | { |
490 | again: | ||
486 | chip_bus_lock(irq, desc); | 491 | chip_bus_lock(irq, desc); |
487 | raw_spin_lock_irq(&desc->lock); | 492 | raw_spin_lock_irq(&desc->lock); |
493 | |||
494 | /* | ||
495 | * Implausible though it may be we need to protect us against | ||
496 | * the following scenario: | ||
497 | * | ||
498 | * The thread is faster done than the hard interrupt handler | ||
499 | * on the other CPU. If we unmask the irq line then the | ||
500 | * interrupt can come in again and masks the line, leaves due | ||
501 | * to IRQ_INPROGRESS and the irq line is masked forever. | ||
502 | */ | ||
503 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | ||
504 | raw_spin_unlock_irq(&desc->lock); | ||
505 | chip_bus_sync_unlock(irq, desc); | ||
506 | cpu_relax(); | ||
507 | goto again; | ||
508 | } | ||
509 | |||
488 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 510 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
489 | desc->status &= ~IRQ_MASKED; | 511 | desc->status &= ~IRQ_MASKED; |
490 | desc->chip->unmask(irq); | 512 | desc->chip->unmask(irq); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 82ed0ea15194..83911c780175 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -219,7 +219,7 @@ int kthreadd(void *unused) | |||
219 | set_task_comm(tsk, "kthreadd"); | 219 | set_task_comm(tsk, "kthreadd"); |
220 | ignore_signals(tsk); | 220 | ignore_signals(tsk); |
221 | set_cpus_allowed_ptr(tsk, cpu_all_mask); | 221 | set_cpus_allowed_ptr(tsk, cpu_all_mask); |
222 | set_mems_allowed(node_possible_map); | 222 | set_mems_allowed(node_states[N_HIGH_MEMORY]); |
223 | 223 | ||
224 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; | 224 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; |
225 | 225 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 455393e71cab..074ba2e036c8 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1164,11 +1164,9 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1164 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1164 | struct perf_event_context *ctx = task->perf_event_ctxp; |
1165 | struct perf_event_context *next_ctx; | 1165 | struct perf_event_context *next_ctx; |
1166 | struct perf_event_context *parent; | 1166 | struct perf_event_context *parent; |
1167 | struct pt_regs *regs; | ||
1168 | int do_switch = 1; | 1167 | int do_switch = 1; |
1169 | 1168 | ||
1170 | regs = task_pt_regs(task); | 1169 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1171 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1172 | 1170 | ||
1173 | if (likely(!ctx || !cpuctx->task_ctx)) | 1171 | if (likely(!ctx || !cpuctx->task_ctx)) |
1174 | return; | 1172 | return; |
@@ -3382,15 +3380,23 @@ static void perf_event_task_output(struct perf_event *event, | |||
3382 | struct perf_task_event *task_event) | 3380 | struct perf_task_event *task_event) |
3383 | { | 3381 | { |
3384 | struct perf_output_handle handle; | 3382 | struct perf_output_handle handle; |
3385 | int size; | ||
3386 | struct task_struct *task = task_event->task; | 3383 | struct task_struct *task = task_event->task; |
3387 | int ret; | 3384 | unsigned long flags; |
3385 | int size, ret; | ||
3386 | |||
3387 | /* | ||
3388 | * If this CPU attempts to acquire an rq lock held by a CPU spinning | ||
3389 | * in perf_output_lock() from interrupt context, it's game over. | ||
3390 | */ | ||
3391 | local_irq_save(flags); | ||
3388 | 3392 | ||
3389 | size = task_event->event_id.header.size; | 3393 | size = task_event->event_id.header.size; |
3390 | ret = perf_output_begin(&handle, event, size, 0, 0); | 3394 | ret = perf_output_begin(&handle, event, size, 0, 0); |
3391 | 3395 | ||
3392 | if (ret) | 3396 | if (ret) { |
3397 | local_irq_restore(flags); | ||
3393 | return; | 3398 | return; |
3399 | } | ||
3394 | 3400 | ||
3395 | task_event->event_id.pid = perf_event_pid(event, task); | 3401 | task_event->event_id.pid = perf_event_pid(event, task); |
3396 | task_event->event_id.ppid = perf_event_pid(event, current); | 3402 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3401,6 +3407,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
3401 | perf_output_put(&handle, task_event->event_id); | 3407 | perf_output_put(&handle, task_event->event_id); |
3402 | 3408 | ||
3403 | perf_output_end(&handle); | 3409 | perf_output_end(&handle); |
3410 | local_irq_restore(flags); | ||
3404 | } | 3411 | } |
3405 | 3412 | ||
3406 | static int perf_event_task_match(struct perf_event *event) | 3413 | static int perf_event_task_match(struct perf_event *event) |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 1a22dfd42df9..bc7704b3a443 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -1061,9 +1061,9 @@ static void check_thread_timers(struct task_struct *tsk, | |||
1061 | } | 1061 | } |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | static void stop_process_timers(struct task_struct *tsk) | 1064 | static void stop_process_timers(struct signal_struct *sig) |
1065 | { | 1065 | { |
1066 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 1066 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
1067 | unsigned long flags; | 1067 | unsigned long flags; |
1068 | 1068 | ||
1069 | if (!cputimer->running) | 1069 | if (!cputimer->running) |
@@ -1072,6 +1072,10 @@ static void stop_process_timers(struct task_struct *tsk) | |||
1072 | spin_lock_irqsave(&cputimer->lock, flags); | 1072 | spin_lock_irqsave(&cputimer->lock, flags); |
1073 | cputimer->running = 0; | 1073 | cputimer->running = 0; |
1074 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1074 | spin_unlock_irqrestore(&cputimer->lock, flags); |
1075 | |||
1076 | sig->cputime_expires.prof_exp = cputime_zero; | ||
1077 | sig->cputime_expires.virt_exp = cputime_zero; | ||
1078 | sig->cputime_expires.sched_exp = 0; | ||
1075 | } | 1079 | } |
1076 | 1080 | ||
1077 | static u32 onecputick; | 1081 | static u32 onecputick; |
@@ -1133,7 +1137,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1133 | list_empty(&timers[CPUCLOCK_VIRT]) && | 1137 | list_empty(&timers[CPUCLOCK_VIRT]) && |
1134 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && | 1138 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && |
1135 | list_empty(&timers[CPUCLOCK_SCHED])) { | 1139 | list_empty(&timers[CPUCLOCK_SCHED])) { |
1136 | stop_process_timers(tsk); | 1140 | stop_process_timers(sig); |
1137 | return; | 1141 | return; |
1138 | } | 1142 | } |
1139 | 1143 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f1125c1a6321..63fe25433980 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | 47 | #include <linux/kernel_stat.h> |
48 | #include <linux/hardirq.h> | ||
48 | 49 | ||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 50 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | static struct lock_class_key rcu_lock_key; | 51 | static struct lock_class_key rcu_lock_key; |
@@ -66,6 +67,28 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | |||
66 | int rcu_scheduler_active __read_mostly; | 67 | int rcu_scheduler_active __read_mostly; |
67 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 68 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
68 | 69 | ||
70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
71 | |||
72 | /** | ||
73 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | ||
74 | * | ||
75 | * Check for bottom half being disabled, which covers both the | ||
76 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | ||
77 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | ||
78 | * will show the situation. | ||
79 | * | ||
80 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | ||
81 | */ | ||
82 | int rcu_read_lock_bh_held(void) | ||
83 | { | ||
84 | if (!debug_lockdep_rcu_enabled()) | ||
85 | return 1; | ||
86 | return in_softirq(); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | ||
89 | |||
90 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
91 | |||
69 | /* | 92 | /* |
70 | * This function is invoked towards the end of the scheduler's initialization | 93 | * This function is invoked towards the end of the scheduler's initialization |
71 | * process. Before this is called, the idle task might contain | 94 | * process. Before this is called, the idle task might contain |
diff --git a/kernel/resource.c b/kernel/resource.c index 2d5be5d9bf5f..9c358e263534 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -219,19 +219,34 @@ void release_child_resources(struct resource *r) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | /** | 221 | /** |
222 | * request_resource - request and reserve an I/O or memory resource | 222 | * request_resource_conflict - request and reserve an I/O or memory resource |
223 | * @root: root resource descriptor | 223 | * @root: root resource descriptor |
224 | * @new: resource descriptor desired by caller | 224 | * @new: resource descriptor desired by caller |
225 | * | 225 | * |
226 | * Returns 0 for success, negative error code on error. | 226 | * Returns 0 for success, conflict resource on error. |
227 | */ | 227 | */ |
228 | int request_resource(struct resource *root, struct resource *new) | 228 | struct resource *request_resource_conflict(struct resource *root, struct resource *new) |
229 | { | 229 | { |
230 | struct resource *conflict; | 230 | struct resource *conflict; |
231 | 231 | ||
232 | write_lock(&resource_lock); | 232 | write_lock(&resource_lock); |
233 | conflict = __request_resource(root, new); | 233 | conflict = __request_resource(root, new); |
234 | write_unlock(&resource_lock); | 234 | write_unlock(&resource_lock); |
235 | return conflict; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * request_resource - request and reserve an I/O or memory resource | ||
240 | * @root: root resource descriptor | ||
241 | * @new: resource descriptor desired by caller | ||
242 | * | ||
243 | * Returns 0 for success, negative error code on error. | ||
244 | */ | ||
245 | int request_resource(struct resource *root, struct resource *new) | ||
246 | { | ||
247 | struct resource *conflict; | ||
248 | |||
249 | conflict = request_resource_conflict(root, new); | ||
235 | return conflict ? -EBUSY : 0; | 250 | return conflict ? -EBUSY : 0; |
236 | } | 251 | } |
237 | 252 | ||
@@ -474,25 +489,40 @@ static struct resource * __insert_resource(struct resource *parent, struct resou | |||
474 | } | 489 | } |
475 | 490 | ||
476 | /** | 491 | /** |
477 | * insert_resource - Inserts a resource in the resource tree | 492 | * insert_resource_conflict - Inserts resource in the resource tree |
478 | * @parent: parent of the new resource | 493 | * @parent: parent of the new resource |
479 | * @new: new resource to insert | 494 | * @new: new resource to insert |
480 | * | 495 | * |
481 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | 496 | * Returns 0 on success, conflict resource if the resource can't be inserted. |
482 | * | 497 | * |
483 | * This function is equivalent to request_resource when no conflict | 498 | * This function is equivalent to request_resource_conflict when no conflict |
484 | * happens. If a conflict happens, and the conflicting resources | 499 | * happens. If a conflict happens, and the conflicting resources |
485 | * entirely fit within the range of the new resource, then the new | 500 | * entirely fit within the range of the new resource, then the new |
486 | * resource is inserted and the conflicting resources become children of | 501 | * resource is inserted and the conflicting resources become children of |
487 | * the new resource. | 502 | * the new resource. |
488 | */ | 503 | */ |
489 | int insert_resource(struct resource *parent, struct resource *new) | 504 | struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) |
490 | { | 505 | { |
491 | struct resource *conflict; | 506 | struct resource *conflict; |
492 | 507 | ||
493 | write_lock(&resource_lock); | 508 | write_lock(&resource_lock); |
494 | conflict = __insert_resource(parent, new); | 509 | conflict = __insert_resource(parent, new); |
495 | write_unlock(&resource_lock); | 510 | write_unlock(&resource_lock); |
511 | return conflict; | ||
512 | } | ||
513 | |||
514 | /** | ||
515 | * insert_resource - Inserts a resource in the resource tree | ||
516 | * @parent: parent of the new resource | ||
517 | * @new: new resource to insert | ||
518 | * | ||
519 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | ||
520 | */ | ||
521 | int insert_resource(struct resource *parent, struct resource *new) | ||
522 | { | ||
523 | struct resource *conflict; | ||
524 | |||
525 | conflict = insert_resource_conflict(parent, new); | ||
496 | return conflict ? -EBUSY : 0; | 526 | return conflict ? -EBUSY : 0; |
497 | } | 527 | } |
498 | 528 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 117b7cad31b3..1038ca163890 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2607,7 +2607,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2607 | { | 2607 | { |
2608 | unsigned long flags; | 2608 | unsigned long flags; |
2609 | struct rq *rq; | 2609 | struct rq *rq; |
2610 | int cpu = get_cpu(); | 2610 | int cpu __maybe_unused = get_cpu(); |
2611 | 2611 | ||
2612 | #ifdef CONFIG_SMP | 2612 | #ifdef CONFIG_SMP |
2613 | /* | 2613 | /* |
@@ -4859,7 +4859,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
4859 | int ret; | 4859 | int ret; |
4860 | cpumask_var_t mask; | 4860 | cpumask_var_t mask; |
4861 | 4861 | ||
4862 | if (len < cpumask_size()) | 4862 | if (len < nr_cpu_ids) |
4863 | return -EINVAL; | ||
4864 | if (len & (sizeof(unsigned long)-1)) | ||
4863 | return -EINVAL; | 4865 | return -EINVAL; |
4864 | 4866 | ||
4865 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | 4867 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
@@ -4867,10 +4869,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
4867 | 4869 | ||
4868 | ret = sched_getaffinity(pid, mask); | 4870 | ret = sched_getaffinity(pid, mask); |
4869 | if (ret == 0) { | 4871 | if (ret == 0) { |
4870 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | 4872 | size_t retlen = min_t(size_t, len, cpumask_size()); |
4873 | |||
4874 | if (copy_to_user(user_mask_ptr, mask, retlen)) | ||
4871 | ret = -EFAULT; | 4875 | ret = -EFAULT; |
4872 | else | 4876 | else |
4873 | ret = cpumask_size(); | 4877 | ret = retlen; |
4874 | } | 4878 | } |
4875 | free_cpumask_var(mask); | 4879 | free_cpumask_var(mask); |
4876 | 4880 | ||
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 7494bbf5a270..7d3f4fa9ef4f 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -637,7 +637,7 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | |||
637 | goto cancelled; | 637 | goto cancelled; |
638 | 638 | ||
639 | /* the timer holds a reference whilst it is pending */ | 639 | /* the timer holds a reference whilst it is pending */ |
640 | ret = work->ops->get_ref(work); | 640 | ret = slow_work_get_ref(work); |
641 | if (ret < 0) | 641 | if (ret < 0) |
642 | goto cant_get_ref; | 642 | goto cant_get_ref; |
643 | 643 | ||
diff --git a/kernel/slow-work.h b/kernel/slow-work.h index 321f3c59d732..a29ebd1ef41d 100644 --- a/kernel/slow-work.h +++ b/kernel/slow-work.h | |||
@@ -43,28 +43,28 @@ extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); | |||
43 | */ | 43 | */ |
44 | static inline void slow_work_set_thread_pid(int id, pid_t pid) | 44 | static inline void slow_work_set_thread_pid(int id, pid_t pid) |
45 | { | 45 | { |
46 | #ifdef CONFIG_SLOW_WORK_PROC | 46 | #ifdef CONFIG_SLOW_WORK_DEBUG |
47 | slow_work_pids[id] = pid; | 47 | slow_work_pids[id] = pid; |
48 | #endif | 48 | #endif |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline void slow_work_mark_time(struct slow_work *work) | 51 | static inline void slow_work_mark_time(struct slow_work *work) |
52 | { | 52 | { |
53 | #ifdef CONFIG_SLOW_WORK_PROC | 53 | #ifdef CONFIG_SLOW_WORK_DEBUG |
54 | work->mark = CURRENT_TIME; | 54 | work->mark = CURRENT_TIME; |
55 | #endif | 55 | #endif |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline void slow_work_begin_exec(int id, struct slow_work *work) | 58 | static inline void slow_work_begin_exec(int id, struct slow_work *work) |
59 | { | 59 | { |
60 | #ifdef CONFIG_SLOW_WORK_PROC | 60 | #ifdef CONFIG_SLOW_WORK_DEBUG |
61 | slow_work_execs[id] = work; | 61 | slow_work_execs[id] = work; |
62 | #endif | 62 | #endif |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void slow_work_end_exec(int id, struct slow_work *work) | 65 | static inline void slow_work_end_exec(int id, struct slow_work *work) |
66 | { | 66 | { |
67 | #ifdef CONFIG_SLOW_WORK_PROC | 67 | #ifdef CONFIG_SLOW_WORK_DEBUG |
68 | write_lock(&slow_work_execs_lock); | 68 | write_lock(&slow_work_execs_lock); |
69 | slow_work_execs[id] = NULL; | 69 | slow_work_execs[id] = NULL; |
70 | write_unlock(&slow_work_execs_lock); | 70 | write_unlock(&slow_work_execs_lock); |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 0d4c7898ab80..4b493f67dcb5 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -155,11 +155,11 @@ void softlockup_tick(void) | |||
155 | * Wake up the high-prio watchdog task twice per | 155 | * Wake up the high-prio watchdog task twice per |
156 | * threshold timespan. | 156 | * threshold timespan. |
157 | */ | 157 | */ |
158 | if (now > touch_ts + softlockup_thresh/2) | 158 | if (time_after(now - softlockup_thresh/2, touch_ts)) |
159 | wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); | 159 | wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); |
160 | 160 | ||
161 | /* Warn about unreasonable delays: */ | 161 | /* Warn about unreasonable delays: */ |
162 | if (now <= (touch_ts + softlockup_thresh)) | 162 | if (time_before_eq(now - softlockup_thresh, touch_ts)) |
163 | return; | 163 | return; |
164 | 164 | ||
165 | per_cpu(softlockup_print_ts, this_cpu) = touch_ts; | 165 | per_cpu(softlockup_print_ts, this_cpu) = touch_ts; |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 0a8a213016f0..aada0e52680a 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -22,6 +22,29 @@ | |||
22 | 22 | ||
23 | #include "tick-internal.h" | 23 | #include "tick-internal.h" |
24 | 24 | ||
25 | /* Limit min_delta to a jiffie */ | ||
26 | #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) | ||
27 | |||
28 | static int tick_increase_min_delta(struct clock_event_device *dev) | ||
29 | { | ||
30 | /* Nothing to do if we already reached the limit */ | ||
31 | if (dev->min_delta_ns >= MIN_DELTA_LIMIT) | ||
32 | return -ETIME; | ||
33 | |||
34 | if (dev->min_delta_ns < 5000) | ||
35 | dev->min_delta_ns = 5000; | ||
36 | else | ||
37 | dev->min_delta_ns += dev->min_delta_ns >> 1; | ||
38 | |||
39 | if (dev->min_delta_ns > MIN_DELTA_LIMIT) | ||
40 | dev->min_delta_ns = MIN_DELTA_LIMIT; | ||
41 | |||
42 | printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", | ||
43 | dev->name ? dev->name : "?", | ||
44 | (unsigned long long) dev->min_delta_ns); | ||
45 | return 0; | ||
46 | } | ||
47 | |||
25 | /** | 48 | /** |
26 | * tick_program_event internal worker function | 49 | * tick_program_event internal worker function |
27 | */ | 50 | */ |
@@ -37,23 +60,28 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, | |||
37 | if (!ret || !force) | 60 | if (!ret || !force) |
38 | return ret; | 61 | return ret; |
39 | 62 | ||
63 | dev->retries++; | ||
40 | /* | 64 | /* |
41 | * We tried 2 times to program the device with the given | 65 | * We tried 3 times to program the device with the given |
42 | * min_delta_ns. If that's not working then we double it | 66 | * min_delta_ns. If that's not working then we increase it |
43 | * and emit a warning. | 67 | * and emit a warning. |
44 | */ | 68 | */ |
45 | if (++i > 2) { | 69 | if (++i > 2) { |
46 | /* Increase the min. delta and try again */ | 70 | /* Increase the min. delta and try again */ |
47 | if (!dev->min_delta_ns) | 71 | if (tick_increase_min_delta(dev)) { |
48 | dev->min_delta_ns = 5000; | 72 | /* |
49 | else | 73 | * Get out of the loop if min_delta_ns |
50 | dev->min_delta_ns += dev->min_delta_ns >> 1; | 74 | * hit the limit already. That's |
51 | 75 | * better than staying here forever. | |
52 | printk(KERN_WARNING | 76 | * |
53 | "CE: %s increasing min_delta_ns to %llu nsec\n", | 77 | * We clear next_event so we have a |
54 | dev->name ? dev->name : "?", | 78 | * chance that the box survives. |
55 | (unsigned long long) dev->min_delta_ns << 1); | 79 | */ |
56 | 80 | printk(KERN_WARNING | |
81 | "CE: Reprogramming failure. Giving up\n"); | ||
82 | dev->next_event.tv64 = KTIME_MAX; | ||
83 | return -ETIME; | ||
84 | } | ||
57 | i = 0; | 85 | i = 0; |
58 | } | 86 | } |
59 | 87 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 16736379a9ca..39f6177fafac 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -818,7 +818,8 @@ void update_wall_time(void) | |||
818 | shift = min(shift, maxshift); | 818 | shift = min(shift, maxshift); |
819 | while (offset >= timekeeper.cycle_interval) { | 819 | while (offset >= timekeeper.cycle_interval) { |
820 | offset = logarithmic_accumulation(offset, shift); | 820 | offset = logarithmic_accumulation(offset, shift); |
821 | shift--; | 821 | if(offset < timekeeper.cycle_interval<<shift) |
822 | shift--; | ||
822 | } | 823 | } |
823 | 824 | ||
824 | /* correct the clock when NTP error is too big */ | 825 | /* correct the clock when NTP error is too big */ |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index bdfb8dd1050c..1a4a7dd78777 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -228,6 +228,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) | |||
228 | SEQ_printf(m, " event_handler: "); | 228 | SEQ_printf(m, " event_handler: "); |
229 | print_name_offset(m, dev->event_handler); | 229 | print_name_offset(m, dev->event_handler); |
230 | SEQ_printf(m, "\n"); | 230 | SEQ_printf(m, "\n"); |
231 | SEQ_printf(m, " retries: %lu\n", dev->retries); | ||
231 | } | 232 | } |
232 | 233 | ||
233 | static void timer_list_show_tickdevices(struct seq_file *m) | 234 | static void timer_list_show_tickdevices(struct seq_file *m) |
@@ -257,7 +258,7 @@ static int timer_list_show(struct seq_file *m, void *v) | |||
257 | u64 now = ktime_to_ns(ktime_get()); | 258 | u64 now = ktime_to_ns(ktime_get()); |
258 | int cpu; | 259 | int cpu; |
259 | 260 | ||
260 | SEQ_printf(m, "Timer List Version: v0.5\n"); | 261 | SEQ_printf(m, "Timer List Version: v0.6\n"); |
261 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); | 262 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); |
262 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); | 263 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); |
263 | 264 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index c61a7949387f..fc965eae0e87 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -880,6 +880,7 @@ int try_to_del_timer_sync(struct timer_list *timer) | |||
880 | if (base->running_timer == timer) | 880 | if (base->running_timer == timer) |
881 | goto out; | 881 | goto out; |
882 | 882 | ||
883 | timer_stats_timer_clear_start_info(timer); | ||
883 | ret = 0; | 884 | ret = 0; |
884 | if (timer_pending(timer)) { | 885 | if (timer_pending(timer)) { |
885 | detach_timer(timer, 1); | 886 | detach_timer(timer, 1); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 05a9f83b8819..d1187ef20caf 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -207,6 +207,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on); | |||
207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
208 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ | 208 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ |
209 | 209 | ||
210 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
211 | # define RB_FORCE_8BYTE_ALIGNMENT 0 | ||
212 | # define RB_ARCH_ALIGNMENT RB_ALIGNMENT | ||
213 | #else | ||
214 | # define RB_FORCE_8BYTE_ALIGNMENT 1 | ||
215 | # define RB_ARCH_ALIGNMENT 8U | ||
216 | #endif | ||
217 | |||
210 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 218 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ |
211 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 219 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX |
212 | 220 | ||
@@ -1547,7 +1555,7 @@ rb_update_event(struct ring_buffer_event *event, | |||
1547 | 1555 | ||
1548 | case 0: | 1556 | case 0: |
1549 | length -= RB_EVNT_HDR_SIZE; | 1557 | length -= RB_EVNT_HDR_SIZE; |
1550 | if (length > RB_MAX_SMALL_DATA) | 1558 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1551 | event->array[0] = length; | 1559 | event->array[0] = length; |
1552 | else | 1560 | else |
1553 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); | 1561 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); |
@@ -1722,11 +1730,11 @@ static unsigned rb_calculate_event_length(unsigned length) | |||
1722 | if (!length) | 1730 | if (!length) |
1723 | length = 1; | 1731 | length = 1; |
1724 | 1732 | ||
1725 | if (length > RB_MAX_SMALL_DATA) | 1733 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1726 | length += sizeof(event.array[0]); | 1734 | length += sizeof(event.array[0]); |
1727 | 1735 | ||
1728 | length += RB_EVNT_HDR_SIZE; | 1736 | length += RB_EVNT_HDR_SIZE; |
1729 | length = ALIGN(length, RB_ALIGNMENT); | 1737 | length = ALIGN(length, RB_ARCH_ALIGNMENT); |
1730 | 1738 | ||
1731 | return length; | 1739 | return length; |
1732 | } | 1740 | } |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 81f691eb3a30..0565bb42566f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | |||
17 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
18 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
19 | 19 | ||
20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 20 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | ||
22 | * suprises | ||
23 | */ | ||
24 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | ||
25 | perf_trace_t; | ||
21 | 26 | ||
22 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
23 | static int total_ref_count; | 28 | static int total_ref_count; |
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
130 | char *trace_buf, *raw_data; | 135 | char *trace_buf, *raw_data; |
131 | int pc, cpu; | 136 | int pc, cpu; |
132 | 137 | ||
138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | ||
139 | |||
133 | pc = preempt_count(); | 140 | pc = preempt_count(); |
134 | 141 | ||
135 | /* Protect the per cpu buffer, begin the rcu read side */ | 142 | /* Protect the per cpu buffer, begin the rcu read side */ |
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
152 | raw_data = per_cpu_ptr(trace_buf, cpu); | 159 | raw_data = per_cpu_ptr(trace_buf, cpu); |
153 | 160 | ||
154 | /* zero the dead bytes from align to not leak stack to user */ | 161 | /* zero the dead bytes from align to not leak stack to user */ |
155 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
156 | 163 | ||
157 | entry = (struct trace_entry *)raw_data; | 164 | entry = (struct trace_entry *)raw_data; |
158 | tracing_generic_entry_update(entry, *irq_flags, pc); | 165 | tracing_generic_entry_update(entry, *irq_flags, pc); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8e5ec5e1ab91..1fafb4b99c9b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -103,7 +103,8 @@ config HEADERS_CHECK | |||
103 | 103 | ||
104 | config DEBUG_SECTION_MISMATCH | 104 | config DEBUG_SECTION_MISMATCH |
105 | bool "Enable full Section mismatch analysis" | 105 | bool "Enable full Section mismatch analysis" |
106 | depends on UNDEFINED | 106 | depends on UNDEFINED || (BLACKFIN) |
107 | default y | ||
107 | # This option is on purpose disabled for now. | 108 | # This option is on purpose disabled for now. |
108 | # It will be enabled when we are down to a reasonable number | 109 | # It will be enabled when we are down to a reasonable number |
109 | # of section mismatch warnings (< 10 for an allyesconfig build) | 110 | # of section mismatch warnings (< 10 for an allyesconfig build) |
diff --git a/mm/bootmem.c b/mm/bootmem.c index d7c791ef0036..9b134460b016 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -180,19 +180,12 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) | |||
180 | end_aligned = end & ~(BITS_PER_LONG - 1); | 180 | end_aligned = end & ~(BITS_PER_LONG - 1); |
181 | 181 | ||
182 | if (end_aligned <= start_aligned) { | 182 | if (end_aligned <= start_aligned) { |
183 | #if 1 | ||
184 | printk(KERN_DEBUG " %lx - %lx\n", start, end); | ||
185 | #endif | ||
186 | for (i = start; i < end; i++) | 183 | for (i = start; i < end; i++) |
187 | __free_pages_bootmem(pfn_to_page(i), 0); | 184 | __free_pages_bootmem(pfn_to_page(i), 0); |
188 | 185 | ||
189 | return; | 186 | return; |
190 | } | 187 | } |
191 | 188 | ||
192 | #if 1 | ||
193 | printk(KERN_DEBUG " %lx %lx - %lx %lx\n", | ||
194 | start, start_aligned, end_aligned, end); | ||
195 | #endif | ||
196 | for (i = start; i < start_aligned; i++) | 189 | for (i = start; i < start_aligned; i++) |
197 | __free_pages_bootmem(pfn_to_page(i), 0); | 190 | __free_pages_bootmem(pfn_to_page(i), 0); |
198 | 191 | ||
@@ -428,9 +421,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
428 | { | 421 | { |
429 | #ifdef CONFIG_NO_BOOTMEM | 422 | #ifdef CONFIG_NO_BOOTMEM |
430 | free_early(physaddr, physaddr + size); | 423 | free_early(physaddr, physaddr + size); |
431 | #if 0 | ||
432 | printk(KERN_DEBUG "free %lx %lx\n", physaddr, size); | ||
433 | #endif | ||
434 | #else | 424 | #else |
435 | unsigned long start, end; | 425 | unsigned long start, end; |
436 | 426 | ||
@@ -456,9 +446,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size) | |||
456 | { | 446 | { |
457 | #ifdef CONFIG_NO_BOOTMEM | 447 | #ifdef CONFIG_NO_BOOTMEM |
458 | free_early(addr, addr + size); | 448 | free_early(addr, addr + size); |
459 | #if 0 | ||
460 | printk(KERN_DEBUG "free %lx %lx\n", addr, size); | ||
461 | #endif | ||
462 | #else | 449 | #else |
463 | unsigned long start, end; | 450 | unsigned long start, end; |
464 | 451 | ||
@@ -751,7 +751,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
751 | * page | 751 | * page |
752 | */ | 752 | */ |
753 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { | 753 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
754 | set_pte_at_notify(mm, addr, ptep, entry); | 754 | set_pte_at(mm, addr, ptep, entry); |
755 | goto out_unlock; | 755 | goto out_unlock; |
756 | } | 756 | } |
757 | entry = pte_wrprotect(entry); | 757 | entry = pte_wrprotect(entry); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7973b5221fb8..9ed760dc7448 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3691,8 +3691,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
3691 | else | 3691 | else |
3692 | mem = vmalloc(size); | 3692 | mem = vmalloc(size); |
3693 | 3693 | ||
3694 | if (mem) | 3694 | if (!mem) |
3695 | memset(mem, 0, size); | 3695 | return NULL; |
3696 | |||
3697 | memset(mem, 0, size); | ||
3696 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | 3698 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); |
3697 | if (!mem->stat) { | 3699 | if (!mem->stat) { |
3698 | if (size < PAGE_SIZE) | 3700 | if (size < PAGE_SIZE) |
@@ -3946,28 +3948,6 @@ one_by_one: | |||
3946 | } | 3948 | } |
3947 | return ret; | 3949 | return ret; |
3948 | } | 3950 | } |
3949 | #else /* !CONFIG_MMU */ | ||
3950 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | ||
3951 | struct cgroup *cgroup, | ||
3952 | struct task_struct *p, | ||
3953 | bool threadgroup) | ||
3954 | { | ||
3955 | return 0; | ||
3956 | } | ||
3957 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | ||
3958 | struct cgroup *cgroup, | ||
3959 | struct task_struct *p, | ||
3960 | bool threadgroup) | ||
3961 | { | ||
3962 | } | ||
3963 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | ||
3964 | struct cgroup *cont, | ||
3965 | struct cgroup *old_cont, | ||
3966 | struct task_struct *p, | ||
3967 | bool threadgroup) | ||
3968 | { | ||
3969 | } | ||
3970 | #endif | ||
3971 | 3951 | ||
3972 | /** | 3952 | /** |
3973 | * is_target_pte_for_mc - check a pte whether it is valid for move charge | 3953 | * is_target_pte_for_mc - check a pte whether it is valid for move charge |
@@ -4330,6 +4310,28 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
4330 | } | 4310 | } |
4331 | mem_cgroup_clear_mc(); | 4311 | mem_cgroup_clear_mc(); |
4332 | } | 4312 | } |
4313 | #else /* !CONFIG_MMU */ | ||
4314 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | ||
4315 | struct cgroup *cgroup, | ||
4316 | struct task_struct *p, | ||
4317 | bool threadgroup) | ||
4318 | { | ||
4319 | return 0; | ||
4320 | } | ||
4321 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | ||
4322 | struct cgroup *cgroup, | ||
4323 | struct task_struct *p, | ||
4324 | bool threadgroup) | ||
4325 | { | ||
4326 | } | ||
4327 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | ||
4328 | struct cgroup *cont, | ||
4329 | struct cgroup *old_cont, | ||
4330 | struct task_struct *p, | ||
4331 | bool threadgroup) | ||
4332 | { | ||
4333 | } | ||
4334 | #endif | ||
4333 | 4335 | ||
4334 | struct cgroup_subsys mem_cgroup_subsys = { | 4336 | struct cgroup_subsys mem_cgroup_subsys = { |
4335 | .name = "memory", | 4337 | .name = "memory", |
diff --git a/mm/memory.c b/mm/memory.c index 5b7f2002e54b..bc9ba5a1f5b9 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -130,6 +130,7 @@ void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) | |||
130 | 130 | ||
131 | for (i = 0; i < NR_MM_COUNTERS; i++) { | 131 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
132 | if (task->rss_stat.count[i]) { | 132 | if (task->rss_stat.count[i]) { |
133 | BUG_ON(!mm); | ||
133 | add_mm_counter(mm, i, task->rss_stat.count[i]); | 134 | add_mm_counter(mm, i, task->rss_stat.count[i]); |
134 | task->rss_stat.count[i] = 0; | 135 | task->rss_stat.count[i] = 0; |
135 | } | 136 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 643f66e10187..8034abd3a135 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -806,9 +806,13 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, | |||
806 | 806 | ||
807 | err = 0; | 807 | err = 0; |
808 | if (nmask) { | 808 | if (nmask) { |
809 | task_lock(current); | 809 | if (mpol_store_user_nodemask(pol)) { |
810 | get_policy_nodemask(pol, nmask); | 810 | *nmask = pol->w.user_nodemask; |
811 | task_unlock(current); | 811 | } else { |
812 | task_lock(current); | ||
813 | get_policy_nodemask(pol, nmask); | ||
814 | task_unlock(current); | ||
815 | } | ||
812 | } | 816 | } |
813 | 817 | ||
814 | out: | 818 | out: |
@@ -2195,8 +2199,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2195 | char *rest = nodelist; | 2199 | char *rest = nodelist; |
2196 | while (isdigit(*rest)) | 2200 | while (isdigit(*rest)) |
2197 | rest++; | 2201 | rest++; |
2198 | if (!*rest) | 2202 | if (*rest) |
2199 | err = 0; | 2203 | goto out; |
2200 | } | 2204 | } |
2201 | break; | 2205 | break; |
2202 | case MPOL_INTERLEAVE: | 2206 | case MPOL_INTERLEAVE: |
@@ -2205,7 +2209,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2205 | */ | 2209 | */ |
2206 | if (!nodelist) | 2210 | if (!nodelist) |
2207 | nodes = node_states[N_HIGH_MEMORY]; | 2211 | nodes = node_states[N_HIGH_MEMORY]; |
2208 | err = 0; | ||
2209 | break; | 2212 | break; |
2210 | case MPOL_LOCAL: | 2213 | case MPOL_LOCAL: |
2211 | /* | 2214 | /* |
@@ -2215,11 +2218,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2215 | goto out; | 2218 | goto out; |
2216 | mode = MPOL_PREFERRED; | 2219 | mode = MPOL_PREFERRED; |
2217 | break; | 2220 | break; |
2218 | 2221 | case MPOL_DEFAULT: | |
2219 | /* | 2222 | /* |
2220 | * case MPOL_BIND: mpol_new() enforces non-empty nodemask. | 2223 | * Insist on a empty nodelist |
2221 | * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags. | 2224 | */ |
2222 | */ | 2225 | if (!nodelist) |
2226 | err = 0; | ||
2227 | goto out; | ||
2228 | case MPOL_BIND: | ||
2229 | /* | ||
2230 | * Insist on a nodelist | ||
2231 | */ | ||
2232 | if (!nodelist) | ||
2233 | goto out; | ||
2223 | } | 2234 | } |
2224 | 2235 | ||
2225 | mode_flags = 0; | 2236 | mode_flags = 0; |
@@ -2233,13 +2244,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2233 | else if (!strcmp(flags, "relative")) | 2244 | else if (!strcmp(flags, "relative")) |
2234 | mode_flags |= MPOL_F_RELATIVE_NODES; | 2245 | mode_flags |= MPOL_F_RELATIVE_NODES; |
2235 | else | 2246 | else |
2236 | err = 1; | 2247 | goto out; |
2237 | } | 2248 | } |
2238 | 2249 | ||
2239 | new = mpol_new(mode, mode_flags, &nodes); | 2250 | new = mpol_new(mode, mode_flags, &nodes); |
2240 | if (IS_ERR(new)) | 2251 | if (IS_ERR(new)) |
2241 | err = 1; | 2252 | goto out; |
2242 | else { | 2253 | |
2254 | { | ||
2243 | int ret; | 2255 | int ret; |
2244 | NODEMASK_SCRATCH(scratch); | 2256 | NODEMASK_SCRATCH(scratch); |
2245 | if (scratch) { | 2257 | if (scratch) { |
@@ -2250,13 +2262,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2250 | ret = -ENOMEM; | 2262 | ret = -ENOMEM; |
2251 | NODEMASK_SCRATCH_FREE(scratch); | 2263 | NODEMASK_SCRATCH_FREE(scratch); |
2252 | if (ret) { | 2264 | if (ret) { |
2253 | err = 1; | ||
2254 | mpol_put(new); | 2265 | mpol_put(new); |
2255 | } else if (no_context) { | 2266 | goto out; |
2256 | /* save for contextualization */ | ||
2257 | new->w.user_nodemask = nodes; | ||
2258 | } | 2267 | } |
2259 | } | 2268 | } |
2269 | err = 0; | ||
2270 | if (no_context) { | ||
2271 | /* save for contextualization */ | ||
2272 | new->w.user_nodemask = nodes; | ||
2273 | } | ||
2260 | 2274 | ||
2261 | out: | 2275 | out: |
2262 | /* Restore string for error message */ | 2276 | /* Restore string for error message */ |
diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 0777654147c9..9e82e937000e 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c | |||
@@ -53,6 +53,7 @@ void unuse_mm(struct mm_struct *mm) | |||
53 | struct task_struct *tsk = current; | 53 | struct task_struct *tsk = current; |
54 | 54 | ||
55 | task_lock(tsk); | 55 | task_lock(tsk); |
56 | sync_mm_rss(tsk, mm); | ||
56 | tsk->mm = NULL; | 57 | tsk->mm = NULL; |
57 | /* active_mm is still 'mm' */ | 58 | /* active_mm is still 'mm' */ |
58 | enter_lazy_tlb(mm, tsk); | 59 | enter_lazy_tlb(mm, tsk); |
diff --git a/mm/nommu.c b/mm/nommu.c index 605ace8982a8..63fa17d121f0 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -146,7 +146,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
146 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 146 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
147 | 147 | ||
148 | for (i = 0; i < nr_pages; i++) { | 148 | for (i = 0; i < nr_pages; i++) { |
149 | vma = find_extend_vma(mm, start); | 149 | vma = find_vma(mm, start); |
150 | if (!vma) | 150 | if (!vma) |
151 | goto finish_or_fault; | 151 | goto finish_or_fault; |
152 | 152 | ||
@@ -162,7 +162,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
162 | } | 162 | } |
163 | if (vmas) | 163 | if (vmas) |
164 | vmas[i] = vma; | 164 | vmas[i] = vma; |
165 | start += PAGE_SIZE; | 165 | start = (start + PAGE_SIZE) & PAGE_MASK; |
166 | } | 166 | } |
167 | 167 | ||
168 | return i; | 168 | return i; |
@@ -764,7 +764,7 @@ EXPORT_SYMBOL(find_vma); | |||
764 | */ | 764 | */ |
765 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | 765 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) |
766 | { | 766 | { |
767 | return find_vma(mm, addr & PAGE_MASK); | 767 | return find_vma(mm, addr); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* | 770 | /* |
@@ -1040,10 +1040,9 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) | |||
1040 | if (ret != -ENOSYS) | 1040 | if (ret != -ENOSYS) |
1041 | return ret; | 1041 | return ret; |
1042 | 1042 | ||
1043 | /* getting an ENOSYS error indicates that direct mmap isn't | 1043 | /* getting -ENOSYS indicates that direct mmap isn't possible (as |
1044 | * possible (as opposed to tried but failed) so we'll fall | 1044 | * opposed to tried but failed) so we can only give a suitable error as |
1045 | * through to making a private copy of the data and mapping | 1045 | * it's not possible to make a private copy if MAP_SHARED was given */ |
1046 | * that if we can */ | ||
1047 | return -ENODEV; | 1046 | return -ENODEV; |
1048 | } | 1047 | } |
1049 | 1048 | ||
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 453512266ea1..db783d7af5a3 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -378,6 +378,8 @@ static void vlan_transfer_features(struct net_device *dev, | |||
378 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 378 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
379 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; | 379 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; |
380 | #endif | 380 | #endif |
381 | vlandev->real_num_tx_queues = dev->real_num_tx_queues; | ||
382 | BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues); | ||
381 | 383 | ||
382 | if (old_features != vlandev->features) | 384 | if (old_features != vlandev->features) |
383 | netdev_features_change(vlandev); | 385 | netdev_features_change(vlandev); |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index c0316e0ca6e8..c584a0af77d3 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -11,7 +11,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
11 | if (netpoll_rx(skb)) | 11 | if (netpoll_rx(skb)) |
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb)) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | goto drop; |
16 | 16 | ||
17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
@@ -83,7 +83,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
83 | { | 83 | { |
84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
85 | 85 | ||
86 | if (skb_bond_should_drop(skb)) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
87 | goto drop; | 87 | goto drop; |
88 | 88 | ||
89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9e83272fc5b0..2fd057c81bbf 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -361,6 +361,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | |||
361 | return ret; | 361 | return ret; |
362 | } | 362 | } |
363 | 363 | ||
364 | static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
365 | { | ||
366 | struct net_device *rdev = vlan_dev_info(dev)->real_dev; | ||
367 | const struct net_device_ops *ops = rdev->netdev_ops; | ||
368 | |||
369 | return ops->ndo_select_queue(rdev, skb); | ||
370 | } | ||
371 | |||
364 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) | 372 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
365 | { | 373 | { |
366 | /* TODO: gotta make sure the underlying layer can handle it, | 374 | /* TODO: gotta make sure the underlying layer can handle it, |
@@ -688,7 +696,8 @@ static const struct header_ops vlan_header_ops = { | |||
688 | .parse = eth_header_parse, | 696 | .parse = eth_header_parse, |
689 | }; | 697 | }; |
690 | 698 | ||
691 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; | 699 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops, |
700 | vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq; | ||
692 | 701 | ||
693 | static int vlan_dev_init(struct net_device *dev) | 702 | static int vlan_dev_init(struct net_device *dev) |
694 | { | 703 | { |
@@ -722,11 +731,17 @@ static int vlan_dev_init(struct net_device *dev) | |||
722 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { | 731 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { |
723 | dev->header_ops = real_dev->header_ops; | 732 | dev->header_ops = real_dev->header_ops; |
724 | dev->hard_header_len = real_dev->hard_header_len; | 733 | dev->hard_header_len = real_dev->hard_header_len; |
725 | dev->netdev_ops = &vlan_netdev_accel_ops; | 734 | if (real_dev->netdev_ops->ndo_select_queue) |
735 | dev->netdev_ops = &vlan_netdev_accel_ops_sq; | ||
736 | else | ||
737 | dev->netdev_ops = &vlan_netdev_accel_ops; | ||
726 | } else { | 738 | } else { |
727 | dev->header_ops = &vlan_header_ops; | 739 | dev->header_ops = &vlan_header_ops; |
728 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; | 740 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; |
729 | dev->netdev_ops = &vlan_netdev_ops; | 741 | if (real_dev->netdev_ops->ndo_select_queue) |
742 | dev->netdev_ops = &vlan_netdev_ops_sq; | ||
743 | else | ||
744 | dev->netdev_ops = &vlan_netdev_ops; | ||
730 | } | 745 | } |
731 | 746 | ||
732 | if (is_vlan_dev(real_dev)) | 747 | if (is_vlan_dev(real_dev)) |
@@ -865,6 +880,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
865 | #endif | 880 | #endif |
866 | }; | 881 | }; |
867 | 882 | ||
883 | static const struct net_device_ops vlan_netdev_ops_sq = { | ||
884 | .ndo_select_queue = vlan_dev_select_queue, | ||
885 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
886 | .ndo_init = vlan_dev_init, | ||
887 | .ndo_uninit = vlan_dev_uninit, | ||
888 | .ndo_open = vlan_dev_open, | ||
889 | .ndo_stop = vlan_dev_stop, | ||
890 | .ndo_start_xmit = vlan_dev_hard_start_xmit, | ||
891 | .ndo_validate_addr = eth_validate_addr, | ||
892 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
893 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
894 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
895 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
896 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
897 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
898 | .ndo_get_stats = vlan_dev_get_stats, | ||
899 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
900 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
901 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
902 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
903 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
904 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
905 | #endif | ||
906 | }; | ||
907 | |||
908 | static const struct net_device_ops vlan_netdev_accel_ops_sq = { | ||
909 | .ndo_select_queue = vlan_dev_select_queue, | ||
910 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
911 | .ndo_init = vlan_dev_init, | ||
912 | .ndo_uninit = vlan_dev_uninit, | ||
913 | .ndo_open = vlan_dev_open, | ||
914 | .ndo_stop = vlan_dev_stop, | ||
915 | .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit, | ||
916 | .ndo_validate_addr = eth_validate_addr, | ||
917 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
918 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
919 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
920 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
921 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
922 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
923 | .ndo_get_stats = vlan_dev_get_stats, | ||
924 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
925 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
926 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
927 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
928 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
929 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
930 | #endif | ||
931 | }; | ||
932 | |||
868 | void vlan_setup(struct net_device *dev) | 933 | void vlan_setup(struct net_device *dev) |
869 | { | 934 | { |
870 | ether_setup(dev); | 935 | ether_setup(dev); |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index cafb55b0cea5..05fd125f74fe 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -8,8 +8,7 @@ | |||
8 | #include <net/bluetooth/bluetooth.h> | 8 | #include <net/bluetooth/bluetooth.h> |
9 | #include <net/bluetooth/hci_core.h> | 9 | #include <net/bluetooth/hci_core.h> |
10 | 10 | ||
11 | struct class *bt_class = NULL; | 11 | static struct class *bt_class; |
12 | EXPORT_SYMBOL_GPL(bt_class); | ||
13 | 12 | ||
14 | struct dentry *bt_debugfs = NULL; | 13 | struct dentry *bt_debugfs = NULL; |
15 | EXPORT_SYMBOL_GPL(bt_debugfs); | 14 | EXPORT_SYMBOL_GPL(bt_debugfs); |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 4db7ae2fe07d..7794a2e2adce 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/device.h> | 42 | #include <linux/device.h> |
43 | #include <linux/debugfs.h> | ||
44 | #include <linux/seq_file.h> | ||
43 | #include <linux/uaccess.h> | 45 | #include <linux/uaccess.h> |
44 | #include <linux/crc16.h> | 46 | #include <linux/crc16.h> |
45 | #include <net/sock.h> | 47 | #include <net/sock.h> |
@@ -2830,6 +2832,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2830 | int len = cmd->len - sizeof(*rsp); | 2832 | int len = cmd->len - sizeof(*rsp); |
2831 | char req[64]; | 2833 | char req[64]; |
2832 | 2834 | ||
2835 | if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { | ||
2836 | l2cap_send_disconn_req(conn, sk); | ||
2837 | goto done; | ||
2838 | } | ||
2839 | |||
2833 | /* throw out any old stored conf requests */ | 2840 | /* throw out any old stored conf requests */ |
2834 | result = L2CAP_CONF_SUCCESS; | 2841 | result = L2CAP_CONF_SUCCESS; |
2835 | len = l2cap_parse_conf_rsp(sk, rsp->data, | 2842 | len = l2cap_parse_conf_rsp(sk, rsp->data, |
@@ -3937,31 +3944,42 @@ drop: | |||
3937 | return 0; | 3944 | return 0; |
3938 | } | 3945 | } |
3939 | 3946 | ||
3940 | static ssize_t l2cap_sysfs_show(struct class *dev, | 3947 | static int l2cap_debugfs_show(struct seq_file *f, void *p) |
3941 | struct class_attribute *attr, | ||
3942 | char *buf) | ||
3943 | { | 3948 | { |
3944 | struct sock *sk; | 3949 | struct sock *sk; |
3945 | struct hlist_node *node; | 3950 | struct hlist_node *node; |
3946 | char *str = buf; | ||
3947 | 3951 | ||
3948 | read_lock_bh(&l2cap_sk_list.lock); | 3952 | read_lock_bh(&l2cap_sk_list.lock); |
3949 | 3953 | ||
3950 | sk_for_each(sk, node, &l2cap_sk_list.head) { | 3954 | sk_for_each(sk, node, &l2cap_sk_list.head) { |
3951 | struct l2cap_pinfo *pi = l2cap_pi(sk); | 3955 | struct l2cap_pinfo *pi = l2cap_pi(sk); |
3952 | 3956 | ||
3953 | str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", | 3957 | seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", |
3954 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 3958 | batostr(&bt_sk(sk)->src), |
3955 | sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, | 3959 | batostr(&bt_sk(sk)->dst), |
3956 | pi->dcid, pi->imtu, pi->omtu, pi->sec_level); | 3960 | sk->sk_state, __le16_to_cpu(pi->psm), |
3961 | pi->scid, pi->dcid, | ||
3962 | pi->imtu, pi->omtu, pi->sec_level); | ||
3957 | } | 3963 | } |
3958 | 3964 | ||
3959 | read_unlock_bh(&l2cap_sk_list.lock); | 3965 | read_unlock_bh(&l2cap_sk_list.lock); |
3960 | 3966 | ||
3961 | return str - buf; | 3967 | return 0; |
3962 | } | 3968 | } |
3963 | 3969 | ||
3964 | static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); | 3970 | static int l2cap_debugfs_open(struct inode *inode, struct file *file) |
3971 | { | ||
3972 | return single_open(file, l2cap_debugfs_show, inode->i_private); | ||
3973 | } | ||
3974 | |||
3975 | static const struct file_operations l2cap_debugfs_fops = { | ||
3976 | .open = l2cap_debugfs_open, | ||
3977 | .read = seq_read, | ||
3978 | .llseek = seq_lseek, | ||
3979 | .release = single_release, | ||
3980 | }; | ||
3981 | |||
3982 | static struct dentry *l2cap_debugfs; | ||
3965 | 3983 | ||
3966 | static const struct proto_ops l2cap_sock_ops = { | 3984 | static const struct proto_ops l2cap_sock_ops = { |
3967 | .family = PF_BLUETOOTH, | 3985 | .family = PF_BLUETOOTH, |
@@ -4021,8 +4039,12 @@ static int __init l2cap_init(void) | |||
4021 | goto error; | 4039 | goto error; |
4022 | } | 4040 | } |
4023 | 4041 | ||
4024 | if (class_create_file(bt_class, &class_attr_l2cap) < 0) | 4042 | if (bt_debugfs) { |
4025 | BT_ERR("Failed to create L2CAP info file"); | 4043 | l2cap_debugfs = debugfs_create_file("l2cap", 0444, |
4044 | bt_debugfs, NULL, &l2cap_debugfs_fops); | ||
4045 | if (!l2cap_debugfs) | ||
4046 | BT_ERR("Failed to create L2CAP debug file"); | ||
4047 | } | ||
4026 | 4048 | ||
4027 | BT_INFO("L2CAP ver %s", VERSION); | 4049 | BT_INFO("L2CAP ver %s", VERSION); |
4028 | BT_INFO("L2CAP socket layer initialized"); | 4050 | BT_INFO("L2CAP socket layer initialized"); |
@@ -4036,7 +4058,7 @@ error: | |||
4036 | 4058 | ||
4037 | static void __exit l2cap_exit(void) | 4059 | static void __exit l2cap_exit(void) |
4038 | { | 4060 | { |
4039 | class_remove_file(bt_class, &class_attr_l2cap); | 4061 | debugfs_remove(l2cap_debugfs); |
4040 | 4062 | ||
4041 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) | 4063 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) |
4042 | BT_ERR("L2CAP socket unregistration failed"); | 4064 | BT_ERR("L2CAP socket unregistration failed"); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index db8a68e1a5ba..13f114e8b0f9 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
35 | #include <linux/device.h> | 35 | #include <linux/device.h> |
36 | #include <linux/debugfs.h> | ||
37 | #include <linux/seq_file.h> | ||
36 | #include <linux/net.h> | 38 | #include <linux/net.h> |
37 | #include <linux/mutex.h> | 39 | #include <linux/mutex.h> |
38 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
@@ -2098,13 +2100,10 @@ static struct hci_cb rfcomm_cb = { | |||
2098 | .security_cfm = rfcomm_security_cfm | 2100 | .security_cfm = rfcomm_security_cfm |
2099 | }; | 2101 | }; |
2100 | 2102 | ||
2101 | static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, | 2103 | static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) |
2102 | struct class_attribute *attr, | ||
2103 | char *buf) | ||
2104 | { | 2104 | { |
2105 | struct rfcomm_session *s; | 2105 | struct rfcomm_session *s; |
2106 | struct list_head *pp, *p; | 2106 | struct list_head *pp, *p; |
2107 | char *str = buf; | ||
2108 | 2107 | ||
2109 | rfcomm_lock(); | 2108 | rfcomm_lock(); |
2110 | 2109 | ||
@@ -2114,18 +2113,32 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, | |||
2114 | struct sock *sk = s->sock->sk; | 2113 | struct sock *sk = s->sock->sk; |
2115 | struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); | 2114 | struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); |
2116 | 2115 | ||
2117 | str += sprintf(str, "%s %s %ld %d %d %d %d\n", | 2116 | seq_printf(f, "%s %s %ld %d %d %d %d\n", |
2118 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 2117 | batostr(&bt_sk(sk)->src), |
2119 | d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); | 2118 | batostr(&bt_sk(sk)->dst), |
2119 | d->state, d->dlci, d->mtu, | ||
2120 | d->rx_credits, d->tx_credits); | ||
2120 | } | 2121 | } |
2121 | } | 2122 | } |
2122 | 2123 | ||
2123 | rfcomm_unlock(); | 2124 | rfcomm_unlock(); |
2124 | 2125 | ||
2125 | return (str - buf); | 2126 | return 0; |
2126 | } | 2127 | } |
2127 | 2128 | ||
2128 | static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); | 2129 | static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file) |
2130 | { | ||
2131 | return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private); | ||
2132 | } | ||
2133 | |||
2134 | static const struct file_operations rfcomm_dlc_debugfs_fops = { | ||
2135 | .open = rfcomm_dlc_debugfs_open, | ||
2136 | .read = seq_read, | ||
2137 | .llseek = seq_lseek, | ||
2138 | .release = single_release, | ||
2139 | }; | ||
2140 | |||
2141 | static struct dentry *rfcomm_dlc_debugfs; | ||
2129 | 2142 | ||
2130 | /* ---- Initialization ---- */ | 2143 | /* ---- Initialization ---- */ |
2131 | static int __init rfcomm_init(void) | 2144 | static int __init rfcomm_init(void) |
@@ -2142,8 +2155,12 @@ static int __init rfcomm_init(void) | |||
2142 | goto unregister; | 2155 | goto unregister; |
2143 | } | 2156 | } |
2144 | 2157 | ||
2145 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) | 2158 | if (bt_debugfs) { |
2146 | BT_ERR("Failed to create RFCOMM info file"); | 2159 | rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444, |
2160 | bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops); | ||
2161 | if (!rfcomm_dlc_debugfs) | ||
2162 | BT_ERR("Failed to create RFCOMM debug file"); | ||
2163 | } | ||
2147 | 2164 | ||
2148 | err = rfcomm_init_ttys(); | 2165 | err = rfcomm_init_ttys(); |
2149 | if (err < 0) | 2166 | if (err < 0) |
@@ -2171,7 +2188,7 @@ unregister: | |||
2171 | 2188 | ||
2172 | static void __exit rfcomm_exit(void) | 2189 | static void __exit rfcomm_exit(void) |
2173 | { | 2190 | { |
2174 | class_remove_file(bt_class, &class_attr_rfcomm_dlc); | 2191 | debugfs_remove(rfcomm_dlc_debugfs); |
2175 | 2192 | ||
2176 | hci_unregister_cb(&rfcomm_cb); | 2193 | hci_unregister_cb(&rfcomm_cb); |
2177 | 2194 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index ca87d6ac6a20..7f439765403d 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/device.h> | 42 | #include <linux/device.h> |
43 | #include <linux/debugfs.h> | ||
44 | #include <linux/seq_file.h> | ||
43 | #include <net/sock.h> | 45 | #include <net/sock.h> |
44 | 46 | ||
45 | #include <asm/system.h> | 47 | #include <asm/system.h> |
@@ -1061,28 +1063,38 @@ done: | |||
1061 | return result; | 1063 | return result; |
1062 | } | 1064 | } |
1063 | 1065 | ||
1064 | static ssize_t rfcomm_sock_sysfs_show(struct class *dev, | 1066 | static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) |
1065 | struct class_attribute *attr, | ||
1066 | char *buf) | ||
1067 | { | 1067 | { |
1068 | struct sock *sk; | 1068 | struct sock *sk; |
1069 | struct hlist_node *node; | 1069 | struct hlist_node *node; |
1070 | char *str = buf; | ||
1071 | 1070 | ||
1072 | read_lock_bh(&rfcomm_sk_list.lock); | 1071 | read_lock_bh(&rfcomm_sk_list.lock); |
1073 | 1072 | ||
1074 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 1073 | sk_for_each(sk, node, &rfcomm_sk_list.head) { |
1075 | str += sprintf(str, "%s %s %d %d\n", | 1074 | seq_printf(f, "%s %s %d %d\n", |
1076 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 1075 | batostr(&bt_sk(sk)->src), |
1076 | batostr(&bt_sk(sk)->dst), | ||
1077 | sk->sk_state, rfcomm_pi(sk)->channel); | 1077 | sk->sk_state, rfcomm_pi(sk)->channel); |
1078 | } | 1078 | } |
1079 | 1079 | ||
1080 | read_unlock_bh(&rfcomm_sk_list.lock); | 1080 | read_unlock_bh(&rfcomm_sk_list.lock); |
1081 | 1081 | ||
1082 | return (str - buf); | 1082 | return 0; |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); | 1085 | static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file) |
1086 | { | ||
1087 | return single_open(file, rfcomm_sock_debugfs_show, inode->i_private); | ||
1088 | } | ||
1089 | |||
1090 | static const struct file_operations rfcomm_sock_debugfs_fops = { | ||
1091 | .open = rfcomm_sock_debugfs_open, | ||
1092 | .read = seq_read, | ||
1093 | .llseek = seq_lseek, | ||
1094 | .release = single_release, | ||
1095 | }; | ||
1096 | |||
1097 | static struct dentry *rfcomm_sock_debugfs; | ||
1086 | 1098 | ||
1087 | static const struct proto_ops rfcomm_sock_ops = { | 1099 | static const struct proto_ops rfcomm_sock_ops = { |
1088 | .family = PF_BLUETOOTH, | 1100 | .family = PF_BLUETOOTH, |
@@ -1122,8 +1134,12 @@ int __init rfcomm_init_sockets(void) | |||
1122 | if (err < 0) | 1134 | if (err < 0) |
1123 | goto error; | 1135 | goto error; |
1124 | 1136 | ||
1125 | if (class_create_file(bt_class, &class_attr_rfcomm) < 0) | 1137 | if (bt_debugfs) { |
1126 | BT_ERR("Failed to create RFCOMM info file"); | 1138 | rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, |
1139 | bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); | ||
1140 | if (!rfcomm_sock_debugfs) | ||
1141 | BT_ERR("Failed to create RFCOMM debug file"); | ||
1142 | } | ||
1127 | 1143 | ||
1128 | BT_INFO("RFCOMM socket layer initialized"); | 1144 | BT_INFO("RFCOMM socket layer initialized"); |
1129 | 1145 | ||
@@ -1137,7 +1153,7 @@ error: | |||
1137 | 1153 | ||
1138 | void rfcomm_cleanup_sockets(void) | 1154 | void rfcomm_cleanup_sockets(void) |
1139 | { | 1155 | { |
1140 | class_remove_file(bt_class, &class_attr_rfcomm); | 1156 | debugfs_remove(rfcomm_sock_debugfs); |
1141 | 1157 | ||
1142 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) | 1158 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) |
1143 | BT_ERR("RFCOMM socket layer unregistration failed"); | 1159 | BT_ERR("RFCOMM socket layer unregistration failed"); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index f93b939539bc..e5b16b76b22e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <linux/socket.h> | 38 | #include <linux/socket.h> |
39 | #include <linux/skbuff.h> | 39 | #include <linux/skbuff.h> |
40 | #include <linux/device.h> | 40 | #include <linux/device.h> |
41 | #include <linux/debugfs.h> | ||
42 | #include <linux/seq_file.h> | ||
41 | #include <linux/list.h> | 43 | #include <linux/list.h> |
42 | #include <net/sock.h> | 44 | #include <net/sock.h> |
43 | 45 | ||
@@ -953,28 +955,36 @@ drop: | |||
953 | return 0; | 955 | return 0; |
954 | } | 956 | } |
955 | 957 | ||
956 | static ssize_t sco_sysfs_show(struct class *dev, | 958 | static int sco_debugfs_show(struct seq_file *f, void *p) |
957 | struct class_attribute *attr, | ||
958 | char *buf) | ||
959 | { | 959 | { |
960 | struct sock *sk; | 960 | struct sock *sk; |
961 | struct hlist_node *node; | 961 | struct hlist_node *node; |
962 | char *str = buf; | ||
963 | 962 | ||
964 | read_lock_bh(&sco_sk_list.lock); | 963 | read_lock_bh(&sco_sk_list.lock); |
965 | 964 | ||
966 | sk_for_each(sk, node, &sco_sk_list.head) { | 965 | sk_for_each(sk, node, &sco_sk_list.head) { |
967 | str += sprintf(str, "%s %s %d\n", | 966 | seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), |
968 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 967 | batostr(&bt_sk(sk)->dst), sk->sk_state); |
969 | sk->sk_state); | ||
970 | } | 968 | } |
971 | 969 | ||
972 | read_unlock_bh(&sco_sk_list.lock); | 970 | read_unlock_bh(&sco_sk_list.lock); |
973 | 971 | ||
974 | return (str - buf); | 972 | return 0; |
975 | } | 973 | } |
976 | 974 | ||
977 | static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); | 975 | static int sco_debugfs_open(struct inode *inode, struct file *file) |
976 | { | ||
977 | return single_open(file, sco_debugfs_show, inode->i_private); | ||
978 | } | ||
979 | |||
980 | static const struct file_operations sco_debugfs_fops = { | ||
981 | .open = sco_debugfs_open, | ||
982 | .read = seq_read, | ||
983 | .llseek = seq_lseek, | ||
984 | .release = single_release, | ||
985 | }; | ||
986 | |||
987 | static struct dentry *sco_debugfs; | ||
978 | 988 | ||
979 | static const struct proto_ops sco_sock_ops = { | 989 | static const struct proto_ops sco_sock_ops = { |
980 | .family = PF_BLUETOOTH, | 990 | .family = PF_BLUETOOTH, |
@@ -1032,8 +1042,12 @@ static int __init sco_init(void) | |||
1032 | goto error; | 1042 | goto error; |
1033 | } | 1043 | } |
1034 | 1044 | ||
1035 | if (class_create_file(bt_class, &class_attr_sco) < 0) | 1045 | if (bt_debugfs) { |
1036 | BT_ERR("Failed to create SCO info file"); | 1046 | sco_debugfs = debugfs_create_file("sco", 0444, |
1047 | bt_debugfs, NULL, &sco_debugfs_fops); | ||
1048 | if (!sco_debugfs) | ||
1049 | BT_ERR("Failed to create SCO debug file"); | ||
1050 | } | ||
1037 | 1051 | ||
1038 | BT_INFO("SCO (Voice Link) ver %s", VERSION); | 1052 | BT_INFO("SCO (Voice Link) ver %s", VERSION); |
1039 | BT_INFO("SCO socket layer initialized"); | 1053 | BT_INFO("SCO socket layer initialized"); |
@@ -1047,7 +1061,7 @@ error: | |||
1047 | 1061 | ||
1048 | static void __exit sco_exit(void) | 1062 | static void __exit sco_exit(void) |
1049 | { | 1063 | { |
1050 | class_remove_file(bt_class, &class_attr_sco); | 1064 | debugfs_remove(sco_debugfs); |
1051 | 1065 | ||
1052 | if (bt_sock_unregister(BTPROTO_SCO) < 0) | 1066 | if (bt_sock_unregister(BTPROTO_SCO) < 0) |
1053 | BT_ERR("SCO socket unregistration failed"); | 1067 | BT_ERR("SCO socket unregistration failed"); |
diff --git a/net/core/dev.c b/net/core/dev.c index bcc490cc9452..59d4394d2ce8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2483,6 +2483,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2483 | { | 2483 | { |
2484 | struct packet_type *ptype, *pt_prev; | 2484 | struct packet_type *ptype, *pt_prev; |
2485 | struct net_device *orig_dev; | 2485 | struct net_device *orig_dev; |
2486 | struct net_device *master; | ||
2486 | struct net_device *null_or_orig; | 2487 | struct net_device *null_or_orig; |
2487 | struct net_device *null_or_bond; | 2488 | struct net_device *null_or_bond; |
2488 | int ret = NET_RX_DROP; | 2489 | int ret = NET_RX_DROP; |
@@ -2503,11 +2504,12 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2503 | 2504 | ||
2504 | null_or_orig = NULL; | 2505 | null_or_orig = NULL; |
2505 | orig_dev = skb->dev; | 2506 | orig_dev = skb->dev; |
2506 | if (orig_dev->master) { | 2507 | master = ACCESS_ONCE(orig_dev->master); |
2507 | if (skb_bond_should_drop(skb)) | 2508 | if (master) { |
2509 | if (skb_bond_should_drop(skb, master)) | ||
2508 | null_or_orig = orig_dev; /* deliver only exact match */ | 2510 | null_or_orig = orig_dev; /* deliver only exact match */ |
2509 | else | 2511 | else |
2510 | skb->dev = orig_dev->master; | 2512 | skb->dev = master; |
2511 | } | 2513 | } |
2512 | 2514 | ||
2513 | __get_cpu_var(netdev_rx_stat).total++; | 2515 | __get_cpu_var(netdev_rx_stat).total++; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index d4ec38fa64e6..6f9206b36dc2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -614,7 +614,7 @@ void netpoll_print_options(struct netpoll *np) | |||
614 | np->name, np->local_port); | 614 | np->name, np->local_port); |
615 | printk(KERN_INFO "%s: local IP %pI4\n", | 615 | printk(KERN_INFO "%s: local IP %pI4\n", |
616 | np->name, &np->local_ip); | 616 | np->name, &np->local_ip); |
617 | printk(KERN_INFO "%s: interface %s\n", | 617 | printk(KERN_INFO "%s: interface '%s'\n", |
618 | np->name, np->dev_name); | 618 | np->name, np->dev_name); |
619 | printk(KERN_INFO "%s: remote port %d\n", | 619 | printk(KERN_INFO "%s: remote port %d\n", |
620 | np->name, np->remote_port); | 620 | np->name, np->remote_port); |
@@ -661,6 +661,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
661 | if ((delim = strchr(cur, '@')) == NULL) | 661 | if ((delim = strchr(cur, '@')) == NULL) |
662 | goto parse_failed; | 662 | goto parse_failed; |
663 | *delim = 0; | 663 | *delim = 0; |
664 | if (*cur == ' ' || *cur == '\t') | ||
665 | printk(KERN_INFO "%s: warning: whitespace" | ||
666 | "is not allowed\n", np->name); | ||
664 | np->remote_port = simple_strtol(cur, NULL, 10); | 667 | np->remote_port = simple_strtol(cur, NULL, 10); |
665 | cur = delim; | 668 | cur = delim; |
666 | } | 669 | } |
@@ -708,7 +711,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
708 | return 0; | 711 | return 0; |
709 | 712 | ||
710 | parse_failed: | 713 | parse_failed: |
711 | printk(KERN_INFO "%s: couldn't parse config at %s!\n", | 714 | printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", |
712 | np->name, cur); | 715 | np->name, cur); |
713 | return -1; | 716 | return -1; |
714 | } | 717 | } |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 51ca946e3392..3feb2b390308 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
1194 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 1194 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
1195 | if (idx < s_idx) | 1195 | if (idx < s_idx) |
1196 | goto cont; | 1196 | goto cont; |
1197 | if (idx > s_idx) | 1197 | if (h > s_h || idx > s_idx) |
1198 | s_ip_idx = 0; | 1198 | s_ip_idx = 0; |
1199 | in_dev = __in_dev_get_rcu(dev); | 1199 | in_dev = __in_dev_get_rcu(dev); |
1200 | if (!in_dev) | 1200 | if (!in_dev) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index af5d89792860..01ef8ba9025c 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -961,7 +961,9 @@ fib_find_node(struct trie *t, u32 key) | |||
961 | struct node *n; | 961 | struct node *n; |
962 | 962 | ||
963 | pos = 0; | 963 | pos = 0; |
964 | n = rcu_dereference(t->trie); | 964 | n = rcu_dereference_check(t->trie, |
965 | rcu_read_lock_held() || | ||
966 | lockdep_rtnl_is_held()); | ||
965 | 967 | ||
966 | while (n != NULL && NODE_TYPE(n) == T_TNODE) { | 968 | while (n != NULL && NODE_TYPE(n) == T_TNODE) { |
967 | tn = (struct tnode *) n; | 969 | tn = (struct tnode *) n; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f47c9f76754b..f78402d097b3 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -810,11 +810,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
810 | tunnel->err_count = 0; | 810 | tunnel->err_count = 0; |
811 | } | 811 | } |
812 | 812 | ||
813 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; | 813 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len; |
814 | 814 | ||
815 | if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| | 815 | if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| |
816 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { | 816 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { |
817 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 817 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
818 | if (max_headroom > dev->needed_headroom) | ||
819 | dev->needed_headroom = max_headroom; | ||
818 | if (!new_skb) { | 820 | if (!new_skb) { |
819 | ip_rt_put(rt); | 821 | ip_rt_put(rt); |
820 | txq->tx_dropped++; | 822 | txq->tx_dropped++; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 8582e12e4a62..d0a6092a67be 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -802,6 +802,9 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | |||
802 | int line; | 802 | int line; |
803 | struct mfc_cache *uc, *c, **cp; | 803 | struct mfc_cache *uc, *c, **cp; |
804 | 804 | ||
805 | if (mfc->mfcc_parent >= MAXVIFS) | ||
806 | return -ENFILE; | ||
807 | |||
805 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 808 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
806 | 809 | ||
807 | for (cp = &net->ipv4.mfc_cache_array[line]; | 810 | for (cp = &net->ipv4.mfc_cache_array[line]; |
@@ -1613,17 +1616,20 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | |||
1613 | int ct; | 1616 | int ct; |
1614 | struct rtnexthop *nhp; | 1617 | struct rtnexthop *nhp; |
1615 | struct net *net = mfc_net(c); | 1618 | struct net *net = mfc_net(c); |
1616 | struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev; | ||
1617 | u8 *b = skb_tail_pointer(skb); | 1619 | u8 *b = skb_tail_pointer(skb); |
1618 | struct rtattr *mp_head; | 1620 | struct rtattr *mp_head; |
1619 | 1621 | ||
1620 | if (dev) | 1622 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1621 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); | 1623 | if (c->mfc_parent > MAXVIFS) |
1624 | return -ENOENT; | ||
1625 | |||
1626 | if (VIF_EXISTS(net, c->mfc_parent)) | ||
1627 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex); | ||
1622 | 1628 | ||
1623 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 1629 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1624 | 1630 | ||
1625 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1631 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1626 | if (c->mfc_un.res.ttls[ct] < 255) { | 1632 | if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1627 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1633 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1628 | goto rtattr_failure; | 1634 | goto rtattr_failure; |
1629 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1635 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a770df2493d2..d413b57be9b3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1097,7 +1097,7 @@ static int slow_chain_length(const struct rtable *head) | |||
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | static int rt_intern_hash(unsigned hash, struct rtable *rt, | 1099 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1100 | struct rtable **rp, struct sk_buff *skb) | 1100 | struct rtable **rp, struct sk_buff *skb, int ifindex) |
1101 | { | 1101 | { |
1102 | struct rtable *rth, **rthp; | 1102 | struct rtable *rth, **rthp; |
1103 | unsigned long now; | 1103 | unsigned long now; |
@@ -1212,11 +1212,16 @@ restart: | |||
1212 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { | 1212 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { |
1213 | struct net *net = dev_net(rt->u.dst.dev); | 1213 | struct net *net = dev_net(rt->u.dst.dev); |
1214 | int num = ++net->ipv4.current_rt_cache_rebuild_count; | 1214 | int num = ++net->ipv4.current_rt_cache_rebuild_count; |
1215 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | 1215 | if (!rt_caching(net)) { |
1216 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", | 1216 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", |
1217 | rt->u.dst.dev->name, num); | 1217 | rt->u.dst.dev->name, num); |
1218 | } | 1218 | } |
1219 | rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); | 1219 | rt_emergency_hash_rebuild(net); |
1220 | spin_unlock_bh(rt_hash_lock_addr(hash)); | ||
1221 | |||
1222 | hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | ||
1223 | ifindex, rt_genid(net)); | ||
1224 | goto restart; | ||
1220 | } | 1225 | } |
1221 | } | 1226 | } |
1222 | 1227 | ||
@@ -1441,7 +1446,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1441 | dev_hold(rt->u.dst.dev); | 1446 | dev_hold(rt->u.dst.dev); |
1442 | if (rt->idev) | 1447 | if (rt->idev) |
1443 | in_dev_hold(rt->idev); | 1448 | in_dev_hold(rt->idev); |
1444 | rt->u.dst.obsolete = 0; | 1449 | rt->u.dst.obsolete = -1; |
1445 | rt->u.dst.lastuse = jiffies; | 1450 | rt->u.dst.lastuse = jiffies; |
1446 | rt->u.dst.path = &rt->u.dst; | 1451 | rt->u.dst.path = &rt->u.dst; |
1447 | rt->u.dst.neighbour = NULL; | 1452 | rt->u.dst.neighbour = NULL; |
@@ -1477,7 +1482,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1477 | &netevent); | 1482 | &netevent); |
1478 | 1483 | ||
1479 | rt_del(hash, rth); | 1484 | rt_del(hash, rth); |
1480 | if (!rt_intern_hash(hash, rt, &rt, NULL)) | 1485 | if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif)) |
1481 | ip_rt_put(rt); | 1486 | ip_rt_put(rt); |
1482 | goto do_next; | 1487 | goto do_next; |
1483 | } | 1488 | } |
@@ -1506,11 +1511,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1506 | struct dst_entry *ret = dst; | 1511 | struct dst_entry *ret = dst; |
1507 | 1512 | ||
1508 | if (rt) { | 1513 | if (rt) { |
1509 | if (dst->obsolete) { | 1514 | if (dst->obsolete > 0) { |
1510 | ip_rt_put(rt); | 1515 | ip_rt_put(rt); |
1511 | ret = NULL; | 1516 | ret = NULL; |
1512 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || | 1517 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || |
1513 | rt->u.dst.expires) { | 1518 | (rt->u.dst.expires && |
1519 | time_after_eq(jiffies, rt->u.dst.expires))) { | ||
1514 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | 1520 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, |
1515 | rt->fl.oif, | 1521 | rt->fl.oif, |
1516 | rt_genid(dev_net(dst->dev))); | 1522 | rt_genid(dev_net(dst->dev))); |
@@ -1726,7 +1732,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
1726 | 1732 | ||
1727 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | 1733 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) |
1728 | { | 1734 | { |
1729 | return NULL; | 1735 | if (rt_is_expired((struct rtable *)dst)) |
1736 | return NULL; | ||
1737 | return dst; | ||
1730 | } | 1738 | } |
1731 | 1739 | ||
1732 | static void ipv4_dst_destroy(struct dst_entry *dst) | 1740 | static void ipv4_dst_destroy(struct dst_entry *dst) |
@@ -1888,7 +1896,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1888 | if (!rth) | 1896 | if (!rth) |
1889 | goto e_nobufs; | 1897 | goto e_nobufs; |
1890 | 1898 | ||
1891 | rth->u.dst.output= ip_rt_bug; | 1899 | rth->u.dst.output = ip_rt_bug; |
1900 | rth->u.dst.obsolete = -1; | ||
1892 | 1901 | ||
1893 | atomic_set(&rth->u.dst.__refcnt, 1); | 1902 | atomic_set(&rth->u.dst.__refcnt, 1); |
1894 | rth->u.dst.flags= DST_HOST; | 1903 | rth->u.dst.flags= DST_HOST; |
@@ -1927,7 +1936,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1927 | 1936 | ||
1928 | in_dev_put(in_dev); | 1937 | in_dev_put(in_dev); |
1929 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); | 1938 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1930 | return rt_intern_hash(hash, rth, NULL, skb); | 1939 | return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); |
1931 | 1940 | ||
1932 | e_nobufs: | 1941 | e_nobufs: |
1933 | in_dev_put(in_dev); | 1942 | in_dev_put(in_dev); |
@@ -2054,6 +2063,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
2054 | rth->fl.oif = 0; | 2063 | rth->fl.oif = 0; |
2055 | rth->rt_spec_dst= spec_dst; | 2064 | rth->rt_spec_dst= spec_dst; |
2056 | 2065 | ||
2066 | rth->u.dst.obsolete = -1; | ||
2057 | rth->u.dst.input = ip_forward; | 2067 | rth->u.dst.input = ip_forward; |
2058 | rth->u.dst.output = ip_output; | 2068 | rth->u.dst.output = ip_output; |
2059 | rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); | 2069 | rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); |
@@ -2093,7 +2103,7 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
2093 | /* put it into the cache */ | 2103 | /* put it into the cache */ |
2094 | hash = rt_hash(daddr, saddr, fl->iif, | 2104 | hash = rt_hash(daddr, saddr, fl->iif, |
2095 | rt_genid(dev_net(rth->u.dst.dev))); | 2105 | rt_genid(dev_net(rth->u.dst.dev))); |
2096 | return rt_intern_hash(hash, rth, NULL, skb); | 2106 | return rt_intern_hash(hash, rth, NULL, skb, fl->iif); |
2097 | } | 2107 | } |
2098 | 2108 | ||
2099 | /* | 2109 | /* |
@@ -2218,6 +2228,7 @@ local_input: | |||
2218 | goto e_nobufs; | 2228 | goto e_nobufs; |
2219 | 2229 | ||
2220 | rth->u.dst.output= ip_rt_bug; | 2230 | rth->u.dst.output= ip_rt_bug; |
2231 | rth->u.dst.obsolete = -1; | ||
2221 | rth->rt_genid = rt_genid(net); | 2232 | rth->rt_genid = rt_genid(net); |
2222 | 2233 | ||
2223 | atomic_set(&rth->u.dst.__refcnt, 1); | 2234 | atomic_set(&rth->u.dst.__refcnt, 1); |
@@ -2249,7 +2260,7 @@ local_input: | |||
2249 | } | 2260 | } |
2250 | rth->rt_type = res.type; | 2261 | rth->rt_type = res.type; |
2251 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); | 2262 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); |
2252 | err = rt_intern_hash(hash, rth, NULL, skb); | 2263 | err = rt_intern_hash(hash, rth, NULL, skb, fl.iif); |
2253 | goto done; | 2264 | goto done; |
2254 | 2265 | ||
2255 | no_route: | 2266 | no_route: |
@@ -2444,6 +2455,7 @@ static int __mkroute_output(struct rtable **result, | |||
2444 | rth->rt_spec_dst= fl->fl4_src; | 2455 | rth->rt_spec_dst= fl->fl4_src; |
2445 | 2456 | ||
2446 | rth->u.dst.output=ip_output; | 2457 | rth->u.dst.output=ip_output; |
2458 | rth->u.dst.obsolete = -1; | ||
2447 | rth->rt_genid = rt_genid(dev_net(dev_out)); | 2459 | rth->rt_genid = rt_genid(dev_net(dev_out)); |
2448 | 2460 | ||
2449 | RT_CACHE_STAT_INC(out_slow_tot); | 2461 | RT_CACHE_STAT_INC(out_slow_tot); |
@@ -2495,7 +2507,7 @@ static int ip_mkroute_output(struct rtable **rp, | |||
2495 | if (err == 0) { | 2507 | if (err == 0) { |
2496 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, | 2508 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, |
2497 | rt_genid(dev_net(dev_out))); | 2509 | rt_genid(dev_net(dev_out))); |
2498 | err = rt_intern_hash(hash, rth, rp, NULL); | 2510 | err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif); |
2499 | } | 2511 | } |
2500 | 2512 | ||
2501 | return err; | 2513 | return err; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5901010fad55..6afb6d8662b2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -429,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
429 | if (tp->urg_seq == tp->copied_seq && | 429 | if (tp->urg_seq == tp->copied_seq && |
430 | !sock_flag(sk, SOCK_URGINLINE) && | 430 | !sock_flag(sk, SOCK_URGINLINE) && |
431 | tp->urg_data) | 431 | tp->urg_data) |
432 | target--; | 432 | target++; |
433 | 433 | ||
434 | /* Potential race condition. If read of tp below will | 434 | /* Potential race condition. If read of tp below will |
435 | * escape above sk->sk_state, we can be illegally awaken | 435 | * escape above sk->sk_state, we can be illegally awaken |
@@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk) | |||
1254 | tp->ucopy.memory = 0; | 1254 | tp->ucopy.memory = 0; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | #ifdef CONFIG_NET_DMA | ||
1258 | static void tcp_service_net_dma(struct sock *sk, bool wait) | ||
1259 | { | ||
1260 | dma_cookie_t done, used; | ||
1261 | dma_cookie_t last_issued; | ||
1262 | struct tcp_sock *tp = tcp_sk(sk); | ||
1263 | |||
1264 | if (!tp->ucopy.dma_chan) | ||
1265 | return; | ||
1266 | |||
1267 | last_issued = tp->ucopy.dma_cookie; | ||
1268 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1269 | |||
1270 | do { | ||
1271 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1272 | last_issued, &done, | ||
1273 | &used) == DMA_SUCCESS) { | ||
1274 | /* Safe to free early-copied skbs now */ | ||
1275 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1276 | break; | ||
1277 | } else { | ||
1278 | struct sk_buff *skb; | ||
1279 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1280 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1281 | used) == DMA_SUCCESS)) { | ||
1282 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1283 | kfree_skb(skb); | ||
1284 | } | ||
1285 | } | ||
1286 | } while (wait); | ||
1287 | } | ||
1288 | #endif | ||
1289 | |||
1257 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) | 1290 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1258 | { | 1291 | { |
1259 | struct sk_buff *skb; | 1292 | struct sk_buff *skb; |
@@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1546 | /* __ Set realtime policy in scheduler __ */ | 1579 | /* __ Set realtime policy in scheduler __ */ |
1547 | } | 1580 | } |
1548 | 1581 | ||
1582 | #ifdef CONFIG_NET_DMA | ||
1583 | if (tp->ucopy.dma_chan) | ||
1584 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1585 | #endif | ||
1549 | if (copied >= target) { | 1586 | if (copied >= target) { |
1550 | /* Do not sleep, just process backlog. */ | 1587 | /* Do not sleep, just process backlog. */ |
1551 | release_sock(sk); | 1588 | release_sock(sk); |
@@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1554 | sk_wait_data(sk, &timeo); | 1591 | sk_wait_data(sk, &timeo); |
1555 | 1592 | ||
1556 | #ifdef CONFIG_NET_DMA | 1593 | #ifdef CONFIG_NET_DMA |
1594 | tcp_service_net_dma(sk, false); /* Don't block */ | ||
1557 | tp->ucopy.wakeup = 0; | 1595 | tp->ucopy.wakeup = 0; |
1558 | #endif | 1596 | #endif |
1559 | 1597 | ||
@@ -1633,6 +1671,9 @@ do_prequeue: | |||
1633 | copied = -EFAULT; | 1671 | copied = -EFAULT; |
1634 | break; | 1672 | break; |
1635 | } | 1673 | } |
1674 | |||
1675 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1676 | |||
1636 | if ((offset + used) == skb->len) | 1677 | if ((offset + used) == skb->len) |
1637 | copied_early = 1; | 1678 | copied_early = 1; |
1638 | 1679 | ||
@@ -1702,27 +1743,9 @@ skip_copy: | |||
1702 | } | 1743 | } |
1703 | 1744 | ||
1704 | #ifdef CONFIG_NET_DMA | 1745 | #ifdef CONFIG_NET_DMA |
1705 | if (tp->ucopy.dma_chan) { | 1746 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ |
1706 | dma_cookie_t done, used; | 1747 | tp->ucopy.dma_chan = NULL; |
1707 | |||
1708 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1709 | |||
1710 | while (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1711 | tp->ucopy.dma_cookie, &done, | ||
1712 | &used) == DMA_IN_PROGRESS) { | ||
1713 | /* do partial cleanup of sk_async_wait_queue */ | ||
1714 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1715 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1716 | used) == DMA_SUCCESS)) { | ||
1717 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1718 | kfree_skb(skb); | ||
1719 | } | ||
1720 | } | ||
1721 | 1748 | ||
1722 | /* Safe to free early-copied skbs now */ | ||
1723 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1724 | tp->ucopy.dma_chan = NULL; | ||
1725 | } | ||
1726 | if (tp->ucopy.pinned_list) { | 1749 | if (tp->ucopy.pinned_list) { |
1727 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); | 1750 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); |
1728 | tp->ucopy.pinned_list = NULL; | 1751 | tp->ucopy.pinned_list = NULL; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 788851ca8c5d..c096a4218b8f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2511,6 +2511,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets) | |||
2511 | int err; | 2511 | int err; |
2512 | unsigned int mss; | 2512 | unsigned int mss; |
2513 | 2513 | ||
2514 | if (packets == 0) | ||
2515 | return; | ||
2516 | |||
2514 | WARN_ON(packets > tp->packets_out); | 2517 | WARN_ON(packets > tp->packets_out); |
2515 | if (tp->lost_skb_hint) { | 2518 | if (tp->lost_skb_hint) { |
2516 | skb = tp->lost_skb_hint; | 2519 | skb = tp->lost_skb_hint; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 70df40980a87..f4df5f931f36 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -370,6 +370,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
370 | if (sk->sk_state == TCP_CLOSE) | 370 | if (sk->sk_state == TCP_CLOSE) |
371 | goto out; | 371 | goto out; |
372 | 372 | ||
373 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { | ||
374 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
375 | goto out; | ||
376 | } | ||
377 | |||
373 | icsk = inet_csk(sk); | 378 | icsk = inet_csk(sk); |
374 | tp = tcp_sk(sk); | 379 | tp = tcp_sk(sk); |
375 | seq = ntohl(th->seq); | 380 | seq = ntohl(th->seq); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3381b4317c27..7e567ae5eaab 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3610,7 +3610,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3610 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 3610 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
3611 | if (idx < s_idx) | 3611 | if (idx < s_idx) |
3612 | goto cont; | 3612 | goto cont; |
3613 | if (idx > s_idx) | 3613 | if (h > s_h || idx > s_idx) |
3614 | s_ip_idx = 0; | 3614 | s_ip_idx = 0; |
3615 | ip_idx = 0; | 3615 | ip_idx = 0; |
3616 | if ((idev = __in6_dev_get(dev)) == NULL) | 3616 | if ((idev = __in6_dev_get(dev)) == NULL) |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 52e0f74fdfe0..27acfb58650a 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -1113,6 +1113,9 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1113 | unsigned char ttls[MAXMIFS]; | 1113 | unsigned char ttls[MAXMIFS]; |
1114 | int i; | 1114 | int i; |
1115 | 1115 | ||
1116 | if (mfc->mf6cc_parent >= MAXMIFS) | ||
1117 | return -ENFILE; | ||
1118 | |||
1116 | memset(ttls, 255, MAXMIFS); | 1119 | memset(ttls, 255, MAXMIFS); |
1117 | for (i = 0; i < MAXMIFS; i++) { | 1120 | for (i = 0; i < MAXMIFS; i++) { |
1118 | if (IF_ISSET(i, &mfc->mf6cc_ifset)) | 1121 | if (IF_ISSET(i, &mfc->mf6cc_ifset)) |
@@ -1692,17 +1695,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) | |||
1692 | int ct; | 1695 | int ct; |
1693 | struct rtnexthop *nhp; | 1696 | struct rtnexthop *nhp; |
1694 | struct net *net = mfc6_net(c); | 1697 | struct net *net = mfc6_net(c); |
1695 | struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev; | ||
1696 | u8 *b = skb_tail_pointer(skb); | 1698 | u8 *b = skb_tail_pointer(skb); |
1697 | struct rtattr *mp_head; | 1699 | struct rtattr *mp_head; |
1698 | 1700 | ||
1699 | if (dev) | 1701 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1700 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); | 1702 | if (c->mf6c_parent > MAXMIFS) |
1703 | return -ENOENT; | ||
1704 | |||
1705 | if (MIF_EXISTS(net, c->mf6c_parent)) | ||
1706 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); | ||
1701 | 1707 | ||
1702 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 1708 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1703 | 1709 | ||
1704 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1710 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1705 | if (c->mfc_un.res.ttls[ct] < 255) { | 1711 | if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1706 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1712 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1707 | goto rtattr_failure; | 1713 | goto rtattr_failure; |
1708 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1714 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index aef31a29de9e..b9cf7cd61923 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c | |||
@@ -13,7 +13,7 @@ static const struct xt_table packet_raw = { | |||
13 | .valid_hooks = RAW_VALID_HOOKS, | 13 | .valid_hooks = RAW_VALID_HOOKS, |
14 | .me = THIS_MODULE, | 14 | .me = THIS_MODULE, |
15 | .af = NFPROTO_IPV6, | 15 | .af = NFPROTO_IPV6, |
16 | .priority = NF_IP6_PRI_FIRST, | 16 | .priority = NF_IP6_PRI_RAW, |
17 | }; | 17 | }; |
18 | 18 | ||
19 | /* The work comes in here from netfilter.c. */ | 19 | /* The work comes in here from netfilter.c. */ |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 52cd3eff31dc..0d7713c5c206 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -879,7 +879,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
879 | 879 | ||
880 | rt = (struct rt6_info *) dst; | 880 | rt = (struct rt6_info *) dst; |
881 | 881 | ||
882 | if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) | 882 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) |
883 | return dst; | 883 | return dst; |
884 | 884 | ||
885 | return NULL; | 885 | return NULL; |
@@ -890,12 +890,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) | |||
890 | struct rt6_info *rt = (struct rt6_info *) dst; | 890 | struct rt6_info *rt = (struct rt6_info *) dst; |
891 | 891 | ||
892 | if (rt) { | 892 | if (rt) { |
893 | if (rt->rt6i_flags & RTF_CACHE) | 893 | if (rt->rt6i_flags & RTF_CACHE) { |
894 | ip6_del_rt(rt); | 894 | if (rt6_check_expired(rt)) { |
895 | else | 895 | ip6_del_rt(rt); |
896 | dst = NULL; | ||
897 | } | ||
898 | } else { | ||
896 | dst_release(dst); | 899 | dst_release(dst); |
900 | dst = NULL; | ||
901 | } | ||
897 | } | 902 | } |
898 | return NULL; | 903 | return dst; |
899 | } | 904 | } |
900 | 905 | ||
901 | static void ip6_link_failure(struct sk_buff *skb) | 906 | static void ip6_link_failure(struct sk_buff *skb) |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 368707882647..344145f23c34 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -2129,10 +2129,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c | |||
2129 | int err; | 2129 | int err; |
2130 | 2130 | ||
2131 | out_skb = pfkey_xfrm_policy2msg_prep(xp); | 2131 | out_skb = pfkey_xfrm_policy2msg_prep(xp); |
2132 | if (IS_ERR(out_skb)) { | 2132 | if (IS_ERR(out_skb)) |
2133 | err = PTR_ERR(out_skb); | 2133 | return PTR_ERR(out_skb); |
2134 | goto out; | 2134 | |
2135 | } | ||
2136 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2135 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
2137 | if (err < 0) | 2136 | if (err < 0) |
2138 | return err; | 2137 | return err; |
@@ -2148,7 +2147,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c | |||
2148 | out_hdr->sadb_msg_seq = c->seq; | 2147 | out_hdr->sadb_msg_seq = c->seq; |
2149 | out_hdr->sadb_msg_pid = c->pid; | 2148 | out_hdr->sadb_msg_pid = c->pid; |
2150 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); | 2149 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); |
2151 | out: | ||
2152 | return 0; | 2150 | return 0; |
2153 | 2151 | ||
2154 | } | 2152 | } |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 2b2af631d2b8..569410a85953 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -582,7 +582,9 @@ nla_put_failure: | |||
582 | nlmsg_failure: | 582 | nlmsg_failure: |
583 | kfree_skb(skb); | 583 | kfree_skb(skb); |
584 | errout: | 584 | errout: |
585 | nfnetlink_set_err(net, 0, group, -ENOBUFS); | 585 | if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) |
586 | return -ENOBUFS; | ||
587 | |||
586 | return 0; | 588 | return 0; |
587 | } | 589 | } |
588 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ | 590 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 8eb0cc23ada3..6afa3d52ea5f 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -113,9 +113,9 @@ int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL_GPL(nfnetlink_send); | 114 | EXPORT_SYMBOL_GPL(nfnetlink_send); |
115 | 115 | ||
116 | void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) | 116 | int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) |
117 | { | 117 | { |
118 | netlink_set_err(net->nfnl, pid, group, error); | 118 | return netlink_set_err(net->nfnl, pid, group, error); |
119 | } | 119 | } |
120 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); | 120 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); |
121 | 121 | ||
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9e9c48963942..215a64835de8 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -493,6 +493,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) | |||
493 | case 64 ... 95: | 493 | case 64 ... 95: |
494 | i[2] = maskl(i[2], p - 64); | 494 | i[2] = maskl(i[2], p - 64); |
495 | i[3] = 0; | 495 | i[3] = 0; |
496 | break; | ||
496 | case 96 ... 127: | 497 | case 96 ... 127: |
497 | i[3] = maskl(i[3], p - 96); | 498 | i[3] = maskl(i[3], p - 96); |
498 | break; | 499 | break; |
@@ -879,7 +880,8 @@ static void dl_seq_stop(struct seq_file *s, void *v) | |||
879 | struct xt_hashlimit_htable *htable = s->private; | 880 | struct xt_hashlimit_htable *htable = s->private; |
880 | unsigned int *bucket = (unsigned int *)v; | 881 | unsigned int *bucket = (unsigned int *)v; |
881 | 882 | ||
882 | kfree(bucket); | 883 | if (!IS_ERR(bucket)) |
884 | kfree(bucket); | ||
883 | spin_unlock_bh(&htable->lock); | 885 | spin_unlock_bh(&htable->lock); |
884 | } | 886 | } |
885 | 887 | ||
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 7073dbb8100c..971d172afece 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -267,7 +267,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
267 | for (i = 0; i < e->nstamps; i++) { | 267 | for (i = 0; i < e->nstamps; i++) { |
268 | if (info->seconds && time_after(time, e->stamps[i])) | 268 | if (info->seconds && time_after(time, e->stamps[i])) |
269 | continue; | 269 | continue; |
270 | if (info->hit_count && ++hits >= info->hit_count) { | 270 | if (!info->hit_count || ++hits >= info->hit_count) { |
271 | ret = !ret; | 271 | ret = !ret; |
272 | break; | 272 | break; |
273 | } | 273 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 320d0423a240..acbbae1e89b5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1093,6 +1093,7 @@ static inline int do_one_set_err(struct sock *sk, | |||
1093 | struct netlink_set_err_data *p) | 1093 | struct netlink_set_err_data *p) |
1094 | { | 1094 | { |
1095 | struct netlink_sock *nlk = nlk_sk(sk); | 1095 | struct netlink_sock *nlk = nlk_sk(sk); |
1096 | int ret = 0; | ||
1096 | 1097 | ||
1097 | if (sk == p->exclude_sk) | 1098 | if (sk == p->exclude_sk) |
1098 | goto out; | 1099 | goto out; |
@@ -1104,10 +1105,15 @@ static inline int do_one_set_err(struct sock *sk, | |||
1104 | !test_bit(p->group - 1, nlk->groups)) | 1105 | !test_bit(p->group - 1, nlk->groups)) |
1105 | goto out; | 1106 | goto out; |
1106 | 1107 | ||
1108 | if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) { | ||
1109 | ret = 1; | ||
1110 | goto out; | ||
1111 | } | ||
1112 | |||
1107 | sk->sk_err = p->code; | 1113 | sk->sk_err = p->code; |
1108 | sk->sk_error_report(sk); | 1114 | sk->sk_error_report(sk); |
1109 | out: | 1115 | out: |
1110 | return 0; | 1116 | return ret; |
1111 | } | 1117 | } |
1112 | 1118 | ||
1113 | /** | 1119 | /** |
@@ -1116,12 +1122,16 @@ out: | |||
1116 | * @pid: the PID of a process that we want to skip (if any) | 1122 | * @pid: the PID of a process that we want to skip (if any) |
1117 | * @groups: the broadcast group that will notice the error | 1123 | * @groups: the broadcast group that will notice the error |
1118 | * @code: error code, must be negative (as usual in kernelspace) | 1124 | * @code: error code, must be negative (as usual in kernelspace) |
1125 | * | ||
1126 | * This function returns the number of broadcast listeners that have set the | ||
1127 | * NETLINK_RECV_NO_ENOBUFS socket option. | ||
1119 | */ | 1128 | */ |
1120 | void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | 1129 | int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) |
1121 | { | 1130 | { |
1122 | struct netlink_set_err_data info; | 1131 | struct netlink_set_err_data info; |
1123 | struct hlist_node *node; | 1132 | struct hlist_node *node; |
1124 | struct sock *sk; | 1133 | struct sock *sk; |
1134 | int ret = 0; | ||
1125 | 1135 | ||
1126 | info.exclude_sk = ssk; | 1136 | info.exclude_sk = ssk; |
1127 | info.pid = pid; | 1137 | info.pid = pid; |
@@ -1132,9 +1142,10 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | |||
1132 | read_lock(&nl_table_lock); | 1142 | read_lock(&nl_table_lock); |
1133 | 1143 | ||
1134 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 1144 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) |
1135 | do_one_set_err(sk, &info); | 1145 | ret += do_one_set_err(sk, &info); |
1136 | 1146 | ||
1137 | read_unlock(&nl_table_lock); | 1147 | read_unlock(&nl_table_lock); |
1148 | return ret; | ||
1138 | } | 1149 | } |
1139 | EXPORT_SYMBOL(netlink_set_err); | 1150 | EXPORT_SYMBOL(netlink_set_err); |
1140 | 1151 | ||
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c index 77228f28fa36..2d744f22a9a1 100644 --- a/net/rxrpc/ar-accept.c +++ b/net/rxrpc/ar-accept.c | |||
@@ -88,6 +88,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, | |||
88 | 88 | ||
89 | /* get a notification message to send to the server app */ | 89 | /* get a notification message to send to the server app */ |
90 | notification = alloc_skb(0, GFP_NOFS); | 90 | notification = alloc_skb(0, GFP_NOFS); |
91 | if (!notification) { | ||
92 | _debug("no memory"); | ||
93 | ret = -ENOMEM; | ||
94 | goto error_nofree; | ||
95 | } | ||
91 | rxrpc_new_skb(notification); | 96 | rxrpc_new_skb(notification); |
92 | notification->mark = RXRPC_SKB_MARK_NEW_CALL; | 97 | notification->mark = RXRPC_SKB_MARK_NEW_CALL; |
93 | 98 | ||
@@ -189,6 +194,7 @@ invalid_service: | |||
189 | ret = -ECONNREFUSED; | 194 | ret = -ECONNREFUSED; |
190 | error: | 195 | error: |
191 | rxrpc_free_skb(notification); | 196 | rxrpc_free_skb(notification); |
197 | error_nofree: | ||
192 | _leave(" = %d", ret); | 198 | _leave(" = %d", ret); |
193 | return ret; | 199 | return ret; |
194 | } | 200 | } |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 21f9c7678aa3..2f691fb180d1 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -328,13 +328,16 @@ config NET_CLS_FLOW | |||
328 | module will be called cls_flow. | 328 | module will be called cls_flow. |
329 | 329 | ||
330 | config NET_CLS_CGROUP | 330 | config NET_CLS_CGROUP |
331 | bool "Control Group Classifier" | 331 | tristate "Control Group Classifier" |
332 | select NET_CLS | 332 | select NET_CLS |
333 | depends on CGROUPS | 333 | depends on CGROUPS |
334 | ---help--- | 334 | ---help--- |
335 | Say Y here if you want to classify packets based on the control | 335 | Say Y here if you want to classify packets based on the control |
336 | cgroup of their process. | 336 | cgroup of their process. |
337 | 337 | ||
338 | To compile this code as a module, choose M here: the | ||
339 | module will be called cls_cgroup. | ||
340 | |||
338 | config NET_EMATCH | 341 | config NET_EMATCH |
339 | bool "Extended Matches" | 342 | bool "Extended Matches" |
340 | select NET_CLS | 343 | select NET_CLS |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index e4877ca6727c..7f27d2c15e08 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -24,6 +24,25 @@ struct cgroup_cls_state | |||
24 | u32 classid; | 24 | u32 classid; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | ||
28 | struct cgroup *cgrp); | ||
29 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); | ||
30 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); | ||
31 | |||
32 | struct cgroup_subsys net_cls_subsys = { | ||
33 | .name = "net_cls", | ||
34 | .create = cgrp_create, | ||
35 | .destroy = cgrp_destroy, | ||
36 | .populate = cgrp_populate, | ||
37 | #ifdef CONFIG_NET_CLS_CGROUP | ||
38 | .subsys_id = net_cls_subsys_id, | ||
39 | #else | ||
40 | #define net_cls_subsys_id net_cls_subsys.subsys_id | ||
41 | #endif | ||
42 | .module = THIS_MODULE, | ||
43 | }; | ||
44 | |||
45 | |||
27 | static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) | 46 | static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) |
28 | { | 47 | { |
29 | return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), | 48 | return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), |
@@ -79,14 +98,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
79 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); | 98 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); |
80 | } | 99 | } |
81 | 100 | ||
82 | struct cgroup_subsys net_cls_subsys = { | ||
83 | .name = "net_cls", | ||
84 | .create = cgrp_create, | ||
85 | .destroy = cgrp_destroy, | ||
86 | .populate = cgrp_populate, | ||
87 | .subsys_id = net_cls_subsys_id, | ||
88 | }; | ||
89 | |||
90 | struct cls_cgroup_head | 101 | struct cls_cgroup_head |
91 | { | 102 | { |
92 | u32 handle; | 103 | u32 handle; |
@@ -277,12 +288,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { | |||
277 | 288 | ||
278 | static int __init init_cgroup_cls(void) | 289 | static int __init init_cgroup_cls(void) |
279 | { | 290 | { |
280 | return register_tcf_proto_ops(&cls_cgroup_ops); | 291 | int ret = register_tcf_proto_ops(&cls_cgroup_ops); |
292 | if (ret) | ||
293 | return ret; | ||
294 | ret = cgroup_load_subsys(&net_cls_subsys); | ||
295 | if (ret) | ||
296 | unregister_tcf_proto_ops(&cls_cgroup_ops); | ||
297 | return ret; | ||
281 | } | 298 | } |
282 | 299 | ||
283 | static void __exit exit_cgroup_cls(void) | 300 | static void __exit exit_cgroup_cls(void) |
284 | { | 301 | { |
285 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 302 | unregister_tcf_proto_ops(&cls_cgroup_ops); |
303 | cgroup_unload_subsys(&net_cls_subsys); | ||
286 | } | 304 | } |
287 | 305 | ||
288 | module_init(init_cgroup_cls); | 306 | module_init(init_cgroup_cls); |
diff --git a/net/socket.c b/net/socket.c index 769c386bd428..f55ffe9f8c87 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2135,6 +2135,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | |||
2135 | break; | 2135 | break; |
2136 | ++datagrams; | 2136 | ++datagrams; |
2137 | 2137 | ||
2138 | /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ | ||
2139 | if (flags & MSG_WAITFORONE) | ||
2140 | flags |= MSG_DONTWAIT; | ||
2141 | |||
2138 | if (timeout) { | 2142 | if (timeout) { |
2139 | ktime_get_ts(timeout); | 2143 | ktime_get_ts(timeout); |
2140 | *timeout = timespec_sub(end_time, *timeout); | 2144 | *timeout = timespec_sub(end_time, *timeout); |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 0cfccc2a0297..c389ccf6437d 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -1280,9 +1280,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp) | |||
1280 | rqstp->rq_release_snd_buf = priv_release_snd_buf; | 1280 | rqstp->rq_release_snd_buf = priv_release_snd_buf; |
1281 | return 0; | 1281 | return 0; |
1282 | out_free: | 1282 | out_free: |
1283 | for (i--; i >= 0; i--) { | 1283 | rqstp->rq_enc_pages_num = i; |
1284 | __free_page(rqstp->rq_enc_pages[i]); | 1284 | priv_release_snd_buf(rqstp); |
1285 | } | ||
1286 | out: | 1285 | out: |
1287 | return -EAGAIN; | 1286 | return -EAGAIN; |
1288 | } | 1287 | } |
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index 13f214f53120..f0c05d3311c1 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c | |||
@@ -37,21 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
37 | 37 | ||
38 | #define RPCDBG_FACILITY RPCDBG_SVCDSP | 38 | #define RPCDBG_FACILITY RPCDBG_SVCDSP |
39 | 39 | ||
40 | void bc_release_request(struct rpc_task *task) | ||
41 | { | ||
42 | struct rpc_rqst *req = task->tk_rqstp; | ||
43 | |||
44 | dprintk("RPC: bc_release_request: task= %p\n", task); | ||
45 | |||
46 | /* | ||
47 | * Release this request only if it's a backchannel | ||
48 | * preallocated request | ||
49 | */ | ||
50 | if (!bc_prealloc(req)) | ||
51 | return; | ||
52 | xprt_free_bc_request(req); | ||
53 | } | ||
54 | |||
55 | /* Empty callback ops */ | 40 | /* Empty callback ops */ |
56 | static const struct rpc_call_ops nfs41_callback_ops = { | 41 | static const struct rpc_call_ops nfs41_callback_ops = { |
57 | }; | 42 | }; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 154034b675bd..19c9983d5360 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -659,6 +659,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, | |||
659 | task = rpc_new_task(&task_setup_data); | 659 | task = rpc_new_task(&task_setup_data); |
660 | if (!task) { | 660 | if (!task) { |
661 | xprt_free_bc_request(req); | 661 | xprt_free_bc_request(req); |
662 | task = ERR_PTR(-ENOMEM); | ||
662 | goto out; | 663 | goto out; |
663 | } | 664 | } |
664 | task->tk_rqstp = req; | 665 | task->tk_rqstp = req; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 8d63f8fd29b7..20e30c6f8355 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, | |||
587 | struct dentry *dentry; | 587 | struct dentry *dentry; |
588 | 588 | ||
589 | dentry = __rpc_lookup_create(parent, name); | 589 | dentry = __rpc_lookup_create(parent, name); |
590 | if (IS_ERR(dentry)) | ||
591 | return dentry; | ||
590 | if (dentry->d_inode == NULL) | 592 | if (dentry->d_inode == NULL) |
591 | return dentry; | 593 | return dentry; |
592 | dput(dentry); | 594 | dput(dentry); |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 469de292c23c..42f09ade0044 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -46,6 +46,7 @@ | |||
46 | 46 | ||
47 | #include <linux/sunrpc/clnt.h> | 47 | #include <linux/sunrpc/clnt.h> |
48 | #include <linux/sunrpc/metrics.h> | 48 | #include <linux/sunrpc/metrics.h> |
49 | #include <linux/sunrpc/bc_xprt.h> | ||
49 | 50 | ||
50 | #include "sunrpc.h" | 51 | #include "sunrpc.h" |
51 | 52 | ||
@@ -1032,21 +1033,16 @@ void xprt_release(struct rpc_task *task) | |||
1032 | if (req->rq_release_snd_buf) | 1033 | if (req->rq_release_snd_buf) |
1033 | req->rq_release_snd_buf(req); | 1034 | req->rq_release_snd_buf(req); |
1034 | 1035 | ||
1035 | /* | ||
1036 | * Early exit if this is a backchannel preallocated request. | ||
1037 | * There is no need to have it added to the RPC slot list. | ||
1038 | */ | ||
1039 | if (is_bc_request) | ||
1040 | return; | ||
1041 | |||
1042 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
1043 | |||
1044 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); | 1036 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); |
1037 | if (likely(!is_bc_request)) { | ||
1038 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
1045 | 1039 | ||
1046 | spin_lock(&xprt->reserve_lock); | 1040 | spin_lock(&xprt->reserve_lock); |
1047 | list_add(&req->rq_list, &xprt->free); | 1041 | list_add(&req->rq_list, &xprt->free); |
1048 | rpc_wake_up_next(&xprt->backlog); | 1042 | rpc_wake_up_next(&xprt->backlog); |
1049 | spin_unlock(&xprt->reserve_lock); | 1043 | spin_unlock(&xprt->reserve_lock); |
1044 | } else | ||
1045 | xprt_free_bc_request(req); | ||
1050 | } | 1046 | } |
1051 | 1047 | ||
1052 | /** | 1048 | /** |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index e4839c07c913..9847c30b5001 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2251,9 +2251,6 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
2251 | .buf_free = rpc_free, | 2251 | .buf_free = rpc_free, |
2252 | .send_request = xs_tcp_send_request, | 2252 | .send_request = xs_tcp_send_request, |
2253 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2253 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
2254 | #if defined(CONFIG_NFS_V4_1) | ||
2255 | .release_request = bc_release_request, | ||
2256 | #endif /* CONFIG_NFS_V4_1 */ | ||
2257 | .close = xs_tcp_close, | 2254 | .close = xs_tcp_close, |
2258 | .destroy = xs_destroy, | 2255 | .destroy = xs_destroy, |
2259 | .print_stats = xs_tcp_print_stats, | 2256 | .print_stats = xs_tcp_print_stats, |
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index f76f3d13276d..6f97a13bcee4 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -284,7 +284,7 @@ foreach my $file (@ARGV) { | |||
284 | my $file_cnt = @files; | 284 | my $file_cnt = @files; |
285 | my $lastfile; | 285 | my $lastfile; |
286 | 286 | ||
287 | open(my $patch, '<', $file) | 287 | open(my $patch, "< $file") |
288 | or die "$P: Can't open $file: $!\n"; | 288 | or die "$P: Can't open $file: $!\n"; |
289 | while (<$patch>) { | 289 | while (<$patch>) { |
290 | my $patch_line = $_; | 290 | my $patch_line = $_; |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index c7865c362d28..fcdfb245a575 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
@@ -1424,6 +1424,8 @@ sub dump_struct($$) { | |||
1424 | $nested =~ s/\/\*.*?\*\///gos; | 1424 | $nested =~ s/\/\*.*?\*\///gos; |
1425 | # strip kmemcheck_bitfield_{begin,end}.*; | 1425 | # strip kmemcheck_bitfield_{begin,end}.*; |
1426 | $members =~ s/kmemcheck_bitfield_.*?;//gos; | 1426 | $members =~ s/kmemcheck_bitfield_.*?;//gos; |
1427 | # strip attributes | ||
1428 | $members =~ s/__aligned\s*\(\d+\)//gos; | ||
1427 | 1429 | ||
1428 | create_parameterlist($members, ';', $file); | 1430 | create_parameterlist($members, ';', $file); |
1429 | check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested); | 1431 | check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested); |
@@ -1728,6 +1730,7 @@ sub dump_function($$) { | |||
1728 | $prototype =~ s/^noinline +//; | 1730 | $prototype =~ s/^noinline +//; |
1729 | $prototype =~ s/__devinit +//; | 1731 | $prototype =~ s/__devinit +//; |
1730 | $prototype =~ s/__init +//; | 1732 | $prototype =~ s/__init +//; |
1733 | $prototype =~ s/__init_or_module +//; | ||
1731 | $prototype =~ s/^#\s*define\s+//; #ak added | 1734 | $prototype =~ s/^#\s*define\s+//; #ak added |
1732 | $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; | 1735 | $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; |
1733 | 1736 | ||
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c index 743ac6a29065..fd51fa8b06a1 100644 --- a/sound/arm/pxa2xx-pcm-lib.c +++ b/sound/arm/pxa2xx-pcm-lib.c | |||
@@ -205,6 +205,7 @@ int __pxa2xx_pcm_open(struct snd_pcm_substream *substream) | |||
205 | if (!rtd->dma_desc_array) | 205 | if (!rtd->dma_desc_array) |
206 | goto err1; | 206 | goto err1; |
207 | 207 | ||
208 | rtd->dma_ch = -1; | ||
208 | runtime->private_data = rtd; | 209 | runtime->private_data = rtd; |
209 | return 0; | 210 | return 0; |
210 | 211 | ||
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index b546ac2660f9..a2ff86189d2a 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -148,6 +148,9 @@ static void pcm_debug_name(struct snd_pcm_substream *substream, | |||
148 | 148 | ||
149 | #define xrun_debug(substream, mask) \ | 149 | #define xrun_debug(substream, mask) \ |
150 | ((substream)->pstr->xrun_debug & (mask)) | 150 | ((substream)->pstr->xrun_debug & (mask)) |
151 | #else | ||
152 | #define xrun_debug(substream, mask) 0 | ||
153 | #endif | ||
151 | 154 | ||
152 | #define dump_stack_on_xrun(substream) do { \ | 155 | #define dump_stack_on_xrun(substream) do { \ |
153 | if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ | 156 | if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ |
@@ -169,6 +172,7 @@ static void xrun(struct snd_pcm_substream *substream) | |||
169 | } | 172 | } |
170 | } | 173 | } |
171 | 174 | ||
175 | #ifdef CONFIG_SND_PCM_XRUN_DEBUG | ||
172 | #define hw_ptr_error(substream, fmt, args...) \ | 176 | #define hw_ptr_error(substream, fmt, args...) \ |
173 | do { \ | 177 | do { \ |
174 | if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ | 178 | if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ |
@@ -255,8 +259,6 @@ static void xrun_log_show(struct snd_pcm_substream *substream) | |||
255 | 259 | ||
256 | #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ | 260 | #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ |
257 | 261 | ||
258 | #define xrun_debug(substream, mask) 0 | ||
259 | #define xrun(substream) do { } while (0) | ||
260 | #define hw_ptr_error(substream, fmt, args...) do { } while (0) | 262 | #define hw_ptr_error(substream, fmt, args...) do { } while (0) |
261 | #define xrun_log(substream, pos) do { } while (0) | 263 | #define xrun_log(substream, pos) do { } while (0) |
262 | #define xrun_log_show(substream) do { } while (0) | 264 | #define xrun_log_show(substream) do { } while (0) |
diff --git a/sound/oss/vidc.c b/sound/oss/vidc.c index 725fef0f59a3..a4127bab9231 100644 --- a/sound/oss/vidc.c +++ b/sound/oss/vidc.c | |||
@@ -363,13 +363,13 @@ static void vidc_audio_trigger(int dev, int enable_bits) | |||
363 | struct audio_operations *adev = audio_devs[dev]; | 363 | struct audio_operations *adev = audio_devs[dev]; |
364 | 364 | ||
365 | if (enable_bits & PCM_ENABLE_OUTPUT) { | 365 | if (enable_bits & PCM_ENABLE_OUTPUT) { |
366 | if (!(adev->flags & DMA_ACTIVE)) { | 366 | if (!(adev->dmap_out->flags & DMA_ACTIVE)) { |
367 | unsigned long flags; | 367 | unsigned long flags; |
368 | 368 | ||
369 | local_irq_save(flags); | 369 | local_irq_save(flags); |
370 | 370 | ||
371 | /* prevent recusion */ | 371 | /* prevent recusion */ |
372 | adev->flags |= DMA_ACTIVE; | 372 | adev->dmap_out->flags |= DMA_ACTIVE; |
373 | 373 | ||
374 | dma_interrupt = vidc_audio_dma_interrupt; | 374 | dma_interrupt = vidc_audio_dma_interrupt; |
375 | vidc_sound_dma_irq(0, NULL); | 375 | vidc_sound_dma_irq(0, NULL); |
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c index 1caf5e3c1f6a..e68c98ef4041 100644 --- a/sound/pci/ac97/ac97_patch.c +++ b/sound/pci/ac97/ac97_patch.c | |||
@@ -1852,12 +1852,14 @@ static unsigned int ad1981_jacks_blacklist[] = { | |||
1852 | 0x10140523, /* Thinkpad R40 */ | 1852 | 0x10140523, /* Thinkpad R40 */ |
1853 | 0x10140534, /* Thinkpad X31 */ | 1853 | 0x10140534, /* Thinkpad X31 */ |
1854 | 0x10140537, /* Thinkpad T41p */ | 1854 | 0x10140537, /* Thinkpad T41p */ |
1855 | 0x1014053e, /* Thinkpad R40e */ | ||
1855 | 0x10140554, /* Thinkpad T42p/R50p */ | 1856 | 0x10140554, /* Thinkpad T42p/R50p */ |
1856 | 0x10140567, /* Thinkpad T43p 2668-G7U */ | 1857 | 0x10140567, /* Thinkpad T43p 2668-G7U */ |
1857 | 0x10140581, /* Thinkpad X41-2527 */ | 1858 | 0x10140581, /* Thinkpad X41-2527 */ |
1858 | 0x10280160, /* Dell Dimension 2400 */ | 1859 | 0x10280160, /* Dell Dimension 2400 */ |
1859 | 0x104380b0, /* Asus A7V8X-MX */ | 1860 | 0x104380b0, /* Asus A7V8X-MX */ |
1860 | 0x11790241, /* Toshiba Satellite A-15 S127 */ | 1861 | 0x11790241, /* Toshiba Satellite A-15 S127 */ |
1862 | 0x1179ff10, /* Toshiba P500 */ | ||
1861 | 0x144dc01a, /* Samsung NP-X20C004/SEG */ | 1863 | 0x144dc01a, /* Samsung NP-X20C004/SEG */ |
1862 | 0 /* end */ | 1864 | 0 /* end */ |
1863 | }; | 1865 | }; |
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index 1ded64e05643..329968edca9b 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c | |||
@@ -941,13 +941,21 @@ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci | |||
941 | struct snd_pcm_substream *substream) | 941 | struct snd_pcm_substream *substream) |
942 | { | 942 | { |
943 | size_t ptr; | 943 | size_t ptr; |
944 | unsigned int reg; | 944 | unsigned int reg, rem, tries; |
945 | |||
945 | if (!rec->running) | 946 | if (!rec->running) |
946 | return 0; | 947 | return 0; |
947 | #if 1 // this seems better.. | 948 | #if 1 // this seems better.. |
948 | reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; | 949 | reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; |
949 | ptr = rec->dma_size - (snd_cmipci_read_w(cm, reg) + 1); | 950 | for (tries = 0; tries < 3; tries++) { |
950 | ptr >>= rec->shift; | 951 | rem = snd_cmipci_read_w(cm, reg); |
952 | if (rem < rec->dma_size) | ||
953 | goto ok; | ||
954 | } | ||
955 | printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem); | ||
956 | return SNDRV_PCM_POS_XRUN; | ||
957 | ok: | ||
958 | ptr = (rec->dma_size - (rem + 1)) >> rec->shift; | ||
951 | #else | 959 | #else |
952 | reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1; | 960 | reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1; |
953 | ptr = snd_cmipci_read(cm, reg) - rec->offset; | 961 | ptr = snd_cmipci_read(cm, reg) - rec->offset; |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8b2915631cc3..4bb90675f70f 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2269,6 +2269,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
2269 | SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), | 2269 | SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), |
2270 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), | 2270 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), |
2271 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), | 2271 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), |
2272 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), | ||
2272 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), | 2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), |
2273 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), | 2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), |
2274 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), | 2275 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 194a28c54992..61682e1d09da 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -1591,6 +1591,21 @@ static int patch_cxt5047(struct hda_codec *codec) | |||
1591 | #endif | 1591 | #endif |
1592 | } | 1592 | } |
1593 | spec->vmaster_nid = 0x13; | 1593 | spec->vmaster_nid = 0x13; |
1594 | |||
1595 | switch (codec->subsystem_id >> 16) { | ||
1596 | case 0x103c: | ||
1597 | /* HP laptops have really bad sound over 0 dB on NID 0x10. | ||
1598 | * Fix max PCM level to 0 dB (originally it has 0x1e steps | ||
1599 | * with 0 dB offset 0x17) | ||
1600 | */ | ||
1601 | snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT, | ||
1602 | (0x17 << AC_AMPCAP_OFFSET_SHIFT) | | ||
1603 | (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) | | ||
1604 | (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) | | ||
1605 | (1 << AC_AMPCAP_MUTE_SHIFT)); | ||
1606 | break; | ||
1607 | } | ||
1608 | |||
1594 | return 0; | 1609 | return 0; |
1595 | } | 1610 | } |
1596 | 1611 | ||
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c index 70669a246902..3c10c0b149f4 100644 --- a/sound/pci/hda/patch_nvhdmi.c +++ b/sound/pci/hda/patch_nvhdmi.c | |||
@@ -538,8 +538,6 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec) | |||
538 | * patch entries | 538 | * patch entries |
539 | */ | 539 | */ |
540 | static struct hda_codec_preset snd_hda_preset_nvhdmi[] = { | 540 | static struct hda_codec_preset snd_hda_preset_nvhdmi[] = { |
541 | { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, | ||
542 | { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, | ||
543 | { .id = 0x10de0002, .name = "MCP77/78 HDMI", | 541 | { .id = 0x10de0002, .name = "MCP77/78 HDMI", |
544 | .patch = patch_nvhdmi_8ch_7x }, | 542 | .patch = patch_nvhdmi_8ch_7x }, |
545 | { .id = 0x10de0003, .name = "MCP77/78 HDMI", | 543 | { .id = 0x10de0003, .name = "MCP77/78 HDMI", |
@@ -550,12 +548,16 @@ static struct hda_codec_preset snd_hda_preset_nvhdmi[] = { | |||
550 | .patch = patch_nvhdmi_8ch_7x }, | 548 | .patch = patch_nvhdmi_8ch_7x }, |
551 | { .id = 0x10de0007, .name = "MCP79/7A HDMI", | 549 | { .id = 0x10de0007, .name = "MCP79/7A HDMI", |
552 | .patch = patch_nvhdmi_8ch_7x }, | 550 | .patch = patch_nvhdmi_8ch_7x }, |
553 | { .id = 0x10de000c, .name = "MCP89 HDMI", | 551 | { .id = 0x10de000a, .name = "GT220 HDMI", |
554 | .patch = patch_nvhdmi_8ch_89 }, | 552 | .patch = patch_nvhdmi_8ch_89 }, |
555 | { .id = 0x10de000b, .name = "GT21x HDMI", | 553 | { .id = 0x10de000b, .name = "GT21x HDMI", |
556 | .patch = patch_nvhdmi_8ch_89 }, | 554 | .patch = patch_nvhdmi_8ch_89 }, |
555 | { .id = 0x10de000c, .name = "MCP89 HDMI", | ||
556 | .patch = patch_nvhdmi_8ch_89 }, | ||
557 | { .id = 0x10de000d, .name = "GT240 HDMI", | 557 | { .id = 0x10de000d, .name = "GT240 HDMI", |
558 | .patch = patch_nvhdmi_8ch_89 }, | 558 | .patch = patch_nvhdmi_8ch_89 }, |
559 | { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, | ||
560 | { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, | ||
559 | {} /* terminator */ | 561 | {} /* terminator */ |
560 | }; | 562 | }; |
561 | 563 | ||
@@ -564,11 +566,12 @@ MODULE_ALIAS("snd-hda-codec-id:10de0003"); | |||
564 | MODULE_ALIAS("snd-hda-codec-id:10de0005"); | 566 | MODULE_ALIAS("snd-hda-codec-id:10de0005"); |
565 | MODULE_ALIAS("snd-hda-codec-id:10de0006"); | 567 | MODULE_ALIAS("snd-hda-codec-id:10de0006"); |
566 | MODULE_ALIAS("snd-hda-codec-id:10de0007"); | 568 | MODULE_ALIAS("snd-hda-codec-id:10de0007"); |
567 | MODULE_ALIAS("snd-hda-codec-id:10de0067"); | 569 | MODULE_ALIAS("snd-hda-codec-id:10de000a"); |
568 | MODULE_ALIAS("snd-hda-codec-id:10de8001"); | ||
569 | MODULE_ALIAS("snd-hda-codec-id:10de000c"); | ||
570 | MODULE_ALIAS("snd-hda-codec-id:10de000b"); | 570 | MODULE_ALIAS("snd-hda-codec-id:10de000b"); |
571 | MODULE_ALIAS("snd-hda-codec-id:10de000c"); | ||
571 | MODULE_ALIAS("snd-hda-codec-id:10de000d"); | 572 | MODULE_ALIAS("snd-hda-codec-id:10de000d"); |
573 | MODULE_ALIAS("snd-hda-codec-id:10de0067"); | ||
574 | MODULE_ALIAS("snd-hda-codec-id:10de8001"); | ||
572 | 575 | ||
573 | MODULE_LICENSE("GPL"); | 576 | MODULE_LICENSE("GPL"); |
574 | MODULE_DESCRIPTION("NVIDIA HDMI HD-audio codec"); | 577 | MODULE_DESCRIPTION("NVIDIA HDMI HD-audio codec"); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4ec57633af88..9a23444e9e7a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -2532,8 +2532,6 @@ static int alc_build_controls(struct hda_codec *codec) | |||
2532 | return err; | 2532 | return err; |
2533 | } | 2533 | } |
2534 | 2534 | ||
2535 | alc_free_kctls(codec); /* no longer needed */ | ||
2536 | |||
2537 | /* assign Capture Source enums to NID */ | 2535 | /* assign Capture Source enums to NID */ |
2538 | kctl = snd_hda_find_mixer_ctl(codec, "Capture Source"); | 2536 | kctl = snd_hda_find_mixer_ctl(codec, "Capture Source"); |
2539 | if (!kctl) | 2537 | if (!kctl) |
@@ -2602,6 +2600,9 @@ static int alc_build_controls(struct hda_codec *codec) | |||
2602 | } | 2600 | } |
2603 | } | 2601 | } |
2604 | } | 2602 | } |
2603 | |||
2604 | alc_free_kctls(codec); /* no longer needed */ | ||
2605 | |||
2605 | return 0; | 2606 | return 0; |
2606 | } | 2607 | } |
2607 | 2608 | ||
@@ -10042,8 +10043,11 @@ static void alc882_auto_set_output_and_unmute(struct hda_codec *codec, | |||
10042 | alc_set_pin_output(codec, nid, pin_type); | 10043 | alc_set_pin_output(codec, nid, pin_type); |
10043 | if (spec->multiout.dac_nids[dac_idx] == 0x25) | 10044 | if (spec->multiout.dac_nids[dac_idx] == 0x25) |
10044 | idx = 4; | 10045 | idx = 4; |
10045 | else | 10046 | else { |
10047 | if (spec->multiout.num_dacs >= dac_idx) | ||
10048 | return; | ||
10046 | idx = spec->multiout.dac_nids[dac_idx] - 2; | 10049 | idx = spec->multiout.dac_nids[dac_idx] - 2; |
10050 | } | ||
10047 | snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx); | 10051 | snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx); |
10048 | 10052 | ||
10049 | } | 10053 | } |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 8c416bb18a57..c4be3fab94e5 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -1730,6 +1730,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { | |||
1730 | "HP HDX", STAC_HP_HDX), /* HDX16 */ | 1730 | "HP HDX", STAC_HP_HDX), /* HDX16 */ |
1731 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620, | 1731 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620, |
1732 | "HP dv6", STAC_HP_DV5), | 1732 | "HP dv6", STAC_HP_DV5), |
1733 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061, | ||
1734 | "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */ | ||
1733 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, | 1735 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, |
1734 | "HP", STAC_HP_DV5), | 1736 | "HP", STAC_HP_DV5), |
1735 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, | 1737 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, |
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c index f9f367d29a90..d50f1699ccb2 100644 --- a/sound/soc/codecs/tlv320dac33.c +++ b/sound/soc/codecs/tlv320dac33.c | |||
@@ -778,7 +778,7 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream) | |||
778 | if (dac33->fifo_mode) { | 778 | if (dac33->fifo_mode) { |
779 | /* Generic for all FIFO modes */ | 779 | /* Generic for all FIFO modes */ |
780 | /* 50-51 : ASRC Control registers */ | 780 | /* 50-51 : ASRC Control registers */ |
781 | dac33_write(codec, DAC33_ASRC_CTRL_A, (1 << 4)); /* div=2 */ | 781 | dac33_write(codec, DAC33_ASRC_CTRL_A, DAC33_SRCLKDIV(1)); |
782 | dac33_write(codec, DAC33_ASRC_CTRL_B, 1); /* ??? */ | 782 | dac33_write(codec, DAC33_ASRC_CTRL_B, 1); /* ??? */ |
783 | 783 | ||
784 | /* Write registers 0x34 and 0x35 (MSB, LSB) */ | 784 | /* Write registers 0x34 and 0x35 (MSB, LSB) */ |
@@ -1038,11 +1038,7 @@ static int dac33_set_dai_fmt(struct snd_soc_dai *codec_dai, | |||
1038 | case SND_SOC_DAIFMT_DSP_A: | 1038 | case SND_SOC_DAIFMT_DSP_A: |
1039 | aictrl_a |= DAC33_AFMT_DSP; | 1039 | aictrl_a |= DAC33_AFMT_DSP; |
1040 | aictrl_b &= ~DAC33_DATA_DELAY_MASK; | 1040 | aictrl_b &= ~DAC33_DATA_DELAY_MASK; |
1041 | aictrl_b |= DAC33_DATA_DELAY(1); /* 1 bit delay */ | 1041 | aictrl_b |= DAC33_DATA_DELAY(0); |
1042 | break; | ||
1043 | case SND_SOC_DAIFMT_DSP_B: | ||
1044 | aictrl_a |= DAC33_AFMT_DSP; | ||
1045 | aictrl_b &= ~DAC33_DATA_DELAY_MASK; /* No delay */ | ||
1046 | break; | 1042 | break; |
1047 | case SND_SOC_DAIFMT_RIGHT_J: | 1043 | case SND_SOC_DAIFMT_RIGHT_J: |
1048 | aictrl_a |= DAC33_AFMT_RIGHT_J; | 1044 | aictrl_a |= DAC33_AFMT_RIGHT_J; |
@@ -1066,7 +1062,7 @@ static void dac33_init_chip(struct snd_soc_codec *codec) | |||
1066 | { | 1062 | { |
1067 | /* 44-46: DAC Control Registers */ | 1063 | /* 44-46: DAC Control Registers */ |
1068 | /* A : DAC sample rate Fsref/1.5 */ | 1064 | /* A : DAC sample rate Fsref/1.5 */ |
1069 | dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(1)); | 1065 | dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(0)); |
1070 | /* B : DAC src=normal, not muted */ | 1066 | /* B : DAC src=normal, not muted */ |
1071 | dac33_write(codec, DAC33_DAC_CTRL_B, DAC33_DACSRCR_RIGHT | | 1067 | dac33_write(codec, DAC33_DAC_CTRL_B, DAC33_DACSRCR_RIGHT | |
1072 | DAC33_DACSRCL_LEFT); | 1068 | DAC33_DACSRCL_LEFT); |
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 0ad9f5d536c6..486bdd21a98a 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c | |||
@@ -74,7 +74,7 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec) | |||
74 | msleep(1); | 74 | msleep(1); |
75 | reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_0); | 75 | reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_0); |
76 | dev_dbg(codec->dev, "DC servo: %x\n", reg); | 76 | dev_dbg(codec->dev, "DC servo: %x\n", reg); |
77 | } while (reg & WM8993_DCS_DATAPATH_BUSY); | 77 | } while (reg & WM8993_DCS_DATAPATH_BUSY && count < 400); |
78 | 78 | ||
79 | if (reg & WM8993_DCS_DATAPATH_BUSY) | 79 | if (reg & WM8993_DCS_DATAPATH_BUSY) |
80 | dev_err(codec->dev, "Timed out waiting for DC Servo\n"); | 80 | dev_err(codec->dev, "Timed out waiting for DC Servo\n"); |
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig index c7d0fd9b7de8..7174b4c710de 100644 --- a/sound/soc/imx/Kconfig +++ b/sound/soc/imx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config SND_IMX_SOC | 1 | config SND_IMX_SOC |
2 | tristate "SoC Audio for Freescale i.MX CPUs" | 2 | tristate "SoC Audio for Freescale i.MX CPUs" |
3 | depends on ARCH_MXC && BROKEN | 3 | depends on ARCH_MXC |
4 | select SND_PCM | 4 | select SND_PCM |
5 | select FIQ | 5 | select FIQ |
6 | select SND_SOC_AC97_BUS | 6 | select SND_SOC_AC97_BUS |
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig index 106674979b53..f07f6d8b93e1 100644 --- a/sound/soc/sh/Kconfig +++ b/sound/soc/sh/Kconfig | |||
@@ -32,6 +32,7 @@ config SND_SOC_SH4_SIU | |||
32 | select DMA_ENGINE | 32 | select DMA_ENGINE |
33 | select DMADEVICES | 33 | select DMADEVICES |
34 | select SH_DMAE | 34 | select SH_DMAE |
35 | select FW_LOADER | ||
35 | 36 | ||
36 | ## | 37 | ## |
37 | ## Boards | 38 | ## Boards |