diff options
486 files changed, 7888 insertions, 3098 deletions
diff --git a/Documentation/HOWTO b/Documentation/HOWTO index f5395af88a41..40ada93b820a 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO | |||
@@ -234,7 +234,7 @@ process is as follows: | |||
234 | Linus, usually the patches that have already been included in the | 234 | Linus, usually the patches that have already been included in the |
235 | -next kernel for a few weeks. The preferred way to submit big changes | 235 | -next kernel for a few weeks. The preferred way to submit big changes |
236 | is using git (the kernel's source management tool, more information | 236 | is using git (the kernel's source management tool, more information |
237 | can be found at http://git.or.cz/) but plain patches are also just | 237 | can be found at http://git-scm.com/) but plain patches are also just |
238 | fine. | 238 | fine. |
239 | - After two weeks a -rc1 kernel is released it is now possible to push | 239 | - After two weeks a -rc1 kernel is released it is now possible to push |
240 | only patches that do not include new features that could affect the | 240 | only patches that do not include new features that could affect the |
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt index a6d32e65d222..a8536cb88091 100644 --- a/Documentation/RCU/NMI-RCU.txt +++ b/Documentation/RCU/NMI-RCU.txt | |||
@@ -34,7 +34,7 @@ NMI handler. | |||
34 | cpu = smp_processor_id(); | 34 | cpu = smp_processor_id(); |
35 | ++nmi_count(cpu); | 35 | ++nmi_count(cpu); |
36 | 36 | ||
37 | if (!rcu_dereference(nmi_callback)(regs, cpu)) | 37 | if (!rcu_dereference_sched(nmi_callback)(regs, cpu)) |
38 | default_do_nmi(regs); | 38 | default_do_nmi(regs); |
39 | 39 | ||
40 | nmi_exit(); | 40 | nmi_exit(); |
@@ -47,12 +47,13 @@ function pointer. If this handler returns zero, do_nmi() invokes the | |||
47 | default_do_nmi() function to handle a machine-specific NMI. Finally, | 47 | default_do_nmi() function to handle a machine-specific NMI. Finally, |
48 | preemption is restored. | 48 | preemption is restored. |
49 | 49 | ||
50 | Strictly speaking, rcu_dereference() is not needed, since this code runs | 50 | In theory, rcu_dereference_sched() is not needed, since this code runs |
51 | only on i386, which does not need rcu_dereference() anyway. However, | 51 | only on i386, which in theory does not need rcu_dereference_sched() |
52 | it is a good documentation aid, particularly for anyone attempting to | 52 | anyway. However, in practice it is a good documentation aid, particularly |
53 | do something similar on Alpha. | 53 | for anyone attempting to do something similar on Alpha or on systems |
54 | with aggressive optimizing compilers. | ||
54 | 55 | ||
55 | Quick Quiz: Why might the rcu_dereference() be necessary on Alpha, | 56 | Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha, |
56 | given that the code referenced by the pointer is read-only? | 57 | given that the code referenced by the pointer is read-only? |
57 | 58 | ||
58 | 59 | ||
@@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively. | |||
99 | 100 | ||
100 | Answer to Quick Quiz | 101 | Answer to Quick Quiz |
101 | 102 | ||
102 | Why might the rcu_dereference() be necessary on Alpha, given | 103 | Why might the rcu_dereference_sched() be necessary on Alpha, given |
103 | that the code referenced by the pointer is read-only? | 104 | that the code referenced by the pointer is read-only? |
104 | 105 | ||
105 | Answer: The caller to set_nmi_callback() might well have | 106 | Answer: The caller to set_nmi_callback() might well have |
106 | initialized some data that is to be used by the | 107 | initialized some data that is to be used by the new NMI |
107 | new NMI handler. In this case, the rcu_dereference() | 108 | handler. In this case, the rcu_dereference_sched() would |
108 | would be needed, because otherwise a CPU that received | 109 | be needed, because otherwise a CPU that received an NMI |
109 | an NMI just after the new handler was set might see | 110 | just after the new handler was set might see the pointer |
110 | the pointer to the new NMI handler, but the old | 111 | to the new NMI handler, but the old pre-initialized |
111 | pre-initialized version of the handler's data. | 112 | version of the handler's data. |
112 | 113 | ||
113 | More important, the rcu_dereference() makes it clear | 114 | This same sad story can happen on other CPUs when using |
114 | to someone reading the code that the pointer is being | 115 | a compiler with aggressive pointer-value speculation |
115 | protected by RCU. | 116 | optimizations. |
117 | |||
118 | More important, the rcu_dereference_sched() makes it | ||
119 | clear to someone reading the code that the pointer is | ||
120 | being protected by RCU-sched. | ||
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index cbc180f90194..790d1a812376 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt | |||
@@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome! | |||
260 | The reason that it is permissible to use RCU list-traversal | 260 | The reason that it is permissible to use RCU list-traversal |
261 | primitives when the update-side lock is held is that doing so | 261 | primitives when the update-side lock is held is that doing so |
262 | can be quite helpful in reducing code bloat when common code is | 262 | can be quite helpful in reducing code bloat when common code is |
263 | shared between readers and updaters. | 263 | shared between readers and updaters. Additional primitives |
264 | are provided for this case, as discussed in lockdep.txt. | ||
264 | 265 | ||
265 | 10. Conversely, if you are in an RCU read-side critical section, | 266 | 10. Conversely, if you are in an RCU read-side critical section, |
266 | and you don't hold the appropriate update-side lock, you -must- | 267 | and you don't hold the appropriate update-side lock, you -must- |
@@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome! | |||
344 | requiring SRCU's read-side deadlock immunity or low read-side | 345 | requiring SRCU's read-side deadlock immunity or low read-side |
345 | realtime latency. | 346 | realtime latency. |
346 | 347 | ||
347 | Note that, rcu_assign_pointer() and rcu_dereference() relate to | 348 | Note that, rcu_assign_pointer() relates to SRCU just as they do |
348 | SRCU just as they do to other forms of RCU. | 349 | to other forms of RCU. |
349 | 350 | ||
350 | 15. The whole point of call_rcu(), synchronize_rcu(), and friends | 351 | 15. The whole point of call_rcu(), synchronize_rcu(), and friends |
351 | is to wait until all pre-existing readers have finished before | 352 | is to wait until all pre-existing readers have finished before |
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt index fe24b58627bd..d7a49b2f6994 100644 --- a/Documentation/RCU/lockdep.txt +++ b/Documentation/RCU/lockdep.txt | |||
@@ -32,9 +32,20 @@ checking of rcu_dereference() primitives: | |||
32 | srcu_dereference(p, sp): | 32 | srcu_dereference(p, sp): |
33 | Check for SRCU read-side critical section. | 33 | Check for SRCU read-side critical section. |
34 | rcu_dereference_check(p, c): | 34 | rcu_dereference_check(p, c): |
35 | Use explicit check expression "c". | 35 | Use explicit check expression "c". This is useful in |
36 | code that is invoked by both readers and updaters. | ||
36 | rcu_dereference_raw(p) | 37 | rcu_dereference_raw(p) |
37 | Don't check. (Use sparingly, if at all.) | 38 | Don't check. (Use sparingly, if at all.) |
39 | rcu_dereference_protected(p, c): | ||
40 | Use explicit check expression "c", and omit all barriers | ||
41 | and compiler constraints. This is useful when the data | ||
42 | structure cannot change, for example, in code that is | ||
43 | invoked only by updaters. | ||
44 | rcu_access_pointer(p): | ||
45 | Return the value of the pointer and omit all barriers, | ||
46 | but retain the compiler constraints that prevent duplicating | ||
47 | or coalescsing. This is useful when when testing the | ||
48 | value of the pointer itself, for example, against NULL. | ||
38 | 49 | ||
39 | The rcu_dereference_check() check expression can be any boolean | 50 | The rcu_dereference_check() check expression can be any boolean |
40 | expression, but would normally include one of the rcu_read_lock_held() | 51 | expression, but would normally include one of the rcu_read_lock_held() |
@@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla | |||
59 | RCU read-side critical sections, in case (2) the ->file_lock prevents | 70 | RCU read-side critical sections, in case (2) the ->file_lock prevents |
60 | any change from taking place, and finally, in case (3) the current task | 71 | any change from taking place, and finally, in case (3) the current task |
61 | is the only task accessing the file_struct, again preventing any change | 72 | is the only task accessing the file_struct, again preventing any change |
62 | from taking place. | 73 | from taking place. If the above statement was invoked only from updater |
74 | code, it could instead be written as follows: | ||
75 | |||
76 | file = rcu_dereference_protected(fdt->fd[fd], | ||
77 | lockdep_is_held(&files->file_lock) || | ||
78 | atomic_read(&files->count) == 1); | ||
79 | |||
80 | This would verify cases #2 and #3 above, and furthermore lockdep would | ||
81 | complain if this was used in an RCU read-side critical section unless one | ||
82 | of these two cases held. Because rcu_dereference_protected() omits all | ||
83 | barriers and compiler constraints, it generates better code than do the | ||
84 | other flavors of rcu_dereference(). On the other hand, it is illegal | ||
85 | to use rcu_dereference_protected() if either the RCU-protected pointer | ||
86 | or the RCU-protected data that it points to can change concurrently. | ||
63 | 87 | ||
64 | There are currently only "universal" versions of the rcu_assign_pointer() | 88 | There are currently only "universal" versions of the rcu_assign_pointer() |
65 | and RCU list-/tree-traversal primitives, which do not (yet) check for | 89 | and RCU list-/tree-traversal primitives, which do not (yet) check for |
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 1dc00ee97163..cfaac34c4557 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt | |||
@@ -840,6 +840,12 @@ SRCU: Initialization/cleanup | |||
840 | init_srcu_struct | 840 | init_srcu_struct |
841 | cleanup_srcu_struct | 841 | cleanup_srcu_struct |
842 | 842 | ||
843 | All: lockdep-checked RCU-protected pointer access | ||
844 | |||
845 | rcu_dereference_check | ||
846 | rcu_dereference_protected | ||
847 | rcu_access_pointer | ||
848 | |||
843 | See the comment headers in the source code (or the docbook generated | 849 | See the comment headers in the source code (or the docbook generated |
844 | from them) for more information. | 850 | from them) for more information. |
845 | 851 | ||
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index fd588ff0e296..a1ca5924faff 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt | |||
@@ -235,8 +235,7 @@ containing the following files describing that cgroup: | |||
235 | - cgroup.procs: list of tgids in the cgroup. This list is not | 235 | - cgroup.procs: list of tgids in the cgroup. This list is not |
236 | guaranteed to be sorted or free of duplicate tgids, and userspace | 236 | guaranteed to be sorted or free of duplicate tgids, and userspace |
237 | should sort/uniquify the list if this property is required. | 237 | should sort/uniquify the list if this property is required. |
238 | Writing a tgid into this file moves all threads with that tgid into | 238 | This is a read-only file, for now. |
239 | this cgroup. | ||
240 | - notify_on_release flag: run the release agent on exit? | 239 | - notify_on_release flag: run the release agent on exit? |
241 | - release_agent: the path to use for release notifications (this file | 240 | - release_agent: the path to use for release notifications (this file |
242 | exists in the top cgroup only) | 241 | exists in the top cgroup only) |
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index 8490480ce432..c0fc1c75fd88 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt | |||
@@ -68,6 +68,22 @@ like: | |||
68 | SYN_MT_REPORT | 68 | SYN_MT_REPORT |
69 | SYN_REPORT | 69 | SYN_REPORT |
70 | 70 | ||
71 | Here is the sequence after lifting one of the fingers: | ||
72 | |||
73 | ABS_MT_POSITION_X | ||
74 | ABS_MT_POSITION_Y | ||
75 | SYN_MT_REPORT | ||
76 | SYN_REPORT | ||
77 | |||
78 | And here is the sequence after lifting the remaining finger: | ||
79 | |||
80 | SYN_MT_REPORT | ||
81 | SYN_REPORT | ||
82 | |||
83 | If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the | ||
84 | ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the | ||
85 | last SYN_REPORT will be dropped by the input core, resulting in no | ||
86 | zero-finger event reaching userland. | ||
71 | 87 | ||
72 | Event Semantics | 88 | Event Semantics |
73 | --------------- | 89 | --------------- |
@@ -217,11 +233,6 @@ where examples can be found. | |||
217 | difference between the contact position and the approaching tool position | 233 | difference between the contact position and the approaching tool position |
218 | could be used to derive tilt. | 234 | could be used to derive tilt. |
219 | [2] The list can of course be extended. | 235 | [2] The list can of course be extended. |
220 | [3] The multi-touch X driver is currently in the prototyping stage. At the | 236 | [3] Multitouch X driver project: http://bitmath.org/code/multitouch/. |
221 | time of writing (April 2009), the MT protocol is not yet merged, and the | ||
222 | prototype implements finger matching, basic mouse support and two-finger | ||
223 | scrolling. The project aims at improving the quality of current multi-touch | ||
224 | functionality available in the Synaptics X driver, and in addition | ||
225 | implement more advanced gestures. | ||
226 | [4] See the section on event computation. | 237 | [4] See the section on event computation. |
227 | [5] See the section on finger tracking. | 238 | [5] See the section on finger tracking. |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e4cbca58536c..839b21b0699a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
320 | amd_iommu= [HW,X86-84] | 320 | amd_iommu= [HW,X86-84] |
321 | Pass parameters to the AMD IOMMU driver in the system. | 321 | Pass parameters to the AMD IOMMU driver in the system. |
322 | Possible values are: | 322 | Possible values are: |
323 | isolate - enable device isolation (each device, as far | ||
324 | as possible, will get its own protection | ||
325 | domain) [default] | ||
326 | share - put every device behind one IOMMU into the | ||
327 | same protection domain | ||
328 | fullflush - enable flushing of IO/TLB entries when | 323 | fullflush - enable flushing of IO/TLB entries when |
329 | they are unmapped. Otherwise they are | 324 | they are unmapped. Otherwise they are |
330 | flushed before they will be reused, which | 325 | flushed before they will be reused, which |
@@ -1199,7 +1194,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1199 | 1194 | ||
1200 | libata.force= [LIBATA] Force configurations. The format is comma | 1195 | libata.force= [LIBATA] Force configurations. The format is comma |
1201 | separated list of "[ID:]VAL" where ID is | 1196 | separated list of "[ID:]VAL" where ID is |
1202 | PORT[:DEVICE]. PORT and DEVICE are decimal numbers | 1197 | PORT[.DEVICE]. PORT and DEVICE are decimal numbers |
1203 | matching port, link or device. Basically, it matches | 1198 | matching port, link or device. Basically, it matches |
1204 | the ATA ID string printed on console by libata. If | 1199 | the ATA ID string printed on console by libata. If |
1205 | the whole ID part is omitted, the last PORT and DEVICE | 1200 | the whole ID part is omitted, the last PORT and DEVICE |
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 0e58b4539176..e8c8f4f06c67 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt | |||
@@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in | |||
41 | SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. | 41 | SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. |
42 | SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the | 42 | SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the |
43 | following control message: | 43 | following control message: |
44 | struct scm_timestamping { | 44 | |
45 | struct timespec systime; | 45 | struct scm_timestamping { |
46 | struct timespec hwtimetrans; | 46 | struct timespec systime; |
47 | struct timespec hwtimeraw; | 47 | struct timespec hwtimetrans; |
48 | }; | 48 | struct timespec hwtimeraw; |
49 | }; | ||
49 | 50 | ||
50 | recvmsg() can be used to get this control message for regular incoming | 51 | recvmsg() can be used to get this control message for regular incoming |
51 | packets. For send time stamps the outgoing packet is looped back to | 52 | packets. For send time stamps the outgoing packet is looped back to |
@@ -87,12 +88,13 @@ by the network device and will be empty without that support. | |||
87 | SIOCSHWTSTAMP: | 88 | SIOCSHWTSTAMP: |
88 | 89 | ||
89 | Hardware time stamping must also be initialized for each device driver | 90 | Hardware time stamping must also be initialized for each device driver |
90 | that is expected to do hardware time stamping. The parameter is: | 91 | that is expected to do hardware time stamping. The parameter is defined in |
92 | /include/linux/net_tstamp.h as: | ||
91 | 93 | ||
92 | struct hwtstamp_config { | 94 | struct hwtstamp_config { |
93 | int flags; /* no flags defined right now, must be zero */ | 95 | int flags; /* no flags defined right now, must be zero */ |
94 | int tx_type; /* HWTSTAMP_TX_* */ | 96 | int tx_type; /* HWTSTAMP_TX_* */ |
95 | int rx_filter; /* HWTSTAMP_FILTER_* */ | 97 | int rx_filter; /* HWTSTAMP_FILTER_* */ |
96 | }; | 98 | }; |
97 | 99 | ||
98 | Desired behavior is passed into the kernel and to a specific device by | 100 | Desired behavior is passed into the kernel and to a specific device by |
@@ -139,42 +141,56 @@ enum { | |||
139 | /* time stamp any incoming packet */ | 141 | /* time stamp any incoming packet */ |
140 | HWTSTAMP_FILTER_ALL, | 142 | HWTSTAMP_FILTER_ALL, |
141 | 143 | ||
142 | /* return value: time stamp all packets requested plus some others */ | 144 | /* return value: time stamp all packets requested plus some others */ |
143 | HWTSTAMP_FILTER_SOME, | 145 | HWTSTAMP_FILTER_SOME, |
144 | 146 | ||
145 | /* PTP v1, UDP, any kind of event packet */ | 147 | /* PTP v1, UDP, any kind of event packet */ |
146 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, | 148 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, |
147 | 149 | ||
148 | ... | 150 | /* for the complete list of values, please check |
151 | * the include file /include/linux/net_tstamp.h | ||
152 | */ | ||
149 | }; | 153 | }; |
150 | 154 | ||
151 | 155 | ||
152 | DEVICE IMPLEMENTATION | 156 | DEVICE IMPLEMENTATION |
153 | 157 | ||
154 | A driver which supports hardware time stamping must support the | 158 | A driver which supports hardware time stamping must support the |
155 | SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored | 159 | SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with |
156 | in the skb with skb_hwtstamp_set(). | 160 | the actual values as described in the section on SIOCSHWTSTAMP. |
161 | |||
162 | Time stamps for received packets must be stored in the skb. To get a pointer | ||
163 | to the shared time stamp structure of the skb call skb_hwtstamps(). Then | ||
164 | set the time stamps in the structure: | ||
165 | |||
166 | struct skb_shared_hwtstamps { | ||
167 | /* hardware time stamp transformed into duration | ||
168 | * since arbitrary point in time | ||
169 | */ | ||
170 | ktime_t hwtstamp; | ||
171 | ktime_t syststamp; /* hwtstamp transformed to system time base */ | ||
172 | }; | ||
157 | 173 | ||
158 | Time stamps for outgoing packets are to be generated as follows: | 174 | Time stamps for outgoing packets are to be generated as follows: |
159 | - In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware() | 175 | - In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero. |
160 | returns non-zero. If yes, then the driver is expected | 176 | If yes, then the driver is expected to do hardware time stamping. |
161 | to do hardware time stamping. | ||
162 | - If this is possible for the skb and requested, then declare | 177 | - If this is possible for the skb and requested, then declare |
163 | that the driver is doing the time stamping by calling | 178 | that the driver is doing the time stamping by setting the field |
164 | skb_hwtstamp_tx_in_progress(). A driver not supporting | 179 | skb_tx(skb)->in_progress non-zero. You might want to keep a pointer |
165 | hardware time stamping doesn't do that. A driver must never | 180 | to the associated skb for the next step and not free the skb. A driver |
166 | touch sk_buff::tstamp! It is used to store how time stamping | 181 | not supporting hardware time stamping doesn't do that. A driver must |
167 | for an outgoing packets is to be done. | 182 | never touch sk_buff::tstamp! It is used to store software generated |
183 | time stamps by the network subsystem. | ||
168 | - As soon as the driver has sent the packet and/or obtained a | 184 | - As soon as the driver has sent the packet and/or obtained a |
169 | hardware time stamp for it, it passes the time stamp back by | 185 | hardware time stamp for it, it passes the time stamp back by |
170 | calling skb_hwtstamp_tx() with the original skb, the raw | 186 | calling skb_hwtstamp_tx() with the original skb, the raw |
171 | hardware time stamp and a handle to the device (necessary | 187 | hardware time stamp. skb_hwtstamp_tx() clones the original skb and |
172 | to convert the hardware time stamp to system time). If obtaining | 188 | adds the timestamps, therefore the original skb has to be freed now. |
173 | the hardware time stamp somehow fails, then the driver should | 189 | If obtaining the hardware time stamp somehow fails, then the driver |
174 | not fall back to software time stamping. The rationale is that | 190 | should not fall back to software time stamping. The rationale is that |
175 | this would occur at a later time in the processing pipeline | 191 | this would occur at a later time in the processing pipeline than other |
176 | than other software time stamping and therefore could lead | 192 | software time stamping and therefore could lead to unexpected deltas |
177 | to unexpected deltas between time stamps. | 193 | between time stamps. |
178 | - If the driver did not call skb_hwtstamp_tx_in_progress(), then | 194 | - If the driver did not call set skb_tx(skb)->in_progress, then |
179 | dev_hard_start_xmit() checks whether software time stamping | 195 | dev_hard_start_xmit() checks whether software time stamping |
180 | is wanted as fallback and potentially generates the time stamp. | 196 | is wanted as fallback and potentially generates the time stamp. |
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt index 5effa5bd993b..e213f45cf9d7 100644 --- a/Documentation/stable_kernel_rules.txt +++ b/Documentation/stable_kernel_rules.txt | |||
@@ -18,16 +18,15 @@ Rules on what kind of patches are accepted, and which ones are not, into the | |||
18 | - It cannot contain any "trivial" fixes in it (spelling changes, | 18 | - It cannot contain any "trivial" fixes in it (spelling changes, |
19 | whitespace cleanups, etc). | 19 | whitespace cleanups, etc). |
20 | - It must follow the Documentation/SubmittingPatches rules. | 20 | - It must follow the Documentation/SubmittingPatches rules. |
21 | - It or an equivalent fix must already exist in Linus' tree. Quote the | 21 | - It or an equivalent fix must already exist in Linus' tree (upstream). |
22 | respective commit ID in Linus' tree in your patch submission to -stable. | ||
23 | 22 | ||
24 | 23 | ||
25 | Procedure for submitting patches to the -stable tree: | 24 | Procedure for submitting patches to the -stable tree: |
26 | 25 | ||
27 | - Send the patch, after verifying that it follows the above rules, to | 26 | - Send the patch, after verifying that it follows the above rules, to |
28 | stable@kernel.org. | 27 | stable@kernel.org. You must note the upstream commit ID in the changelog |
29 | - To have the patch automatically included in the stable tree, add the | 28 | of your submission. |
30 | the tag | 29 | - To have the patch automatically included in the stable tree, add the tag |
31 | Cc: stable@kernel.org | 30 | Cc: stable@kernel.org |
32 | in the sign-off area. Once the patch is merged it will be applied to | 31 | in the sign-off area. Once the patch is merged it will be applied to |
33 | the stable tree without anything else needing to be done by the author | 32 | the stable tree without anything else needing to be done by the author |
diff --git a/MAINTAINERS b/MAINTAINERS index 5b422908d0f3..183887518fe3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -485,8 +485,8 @@ S: Maintained | |||
485 | F: drivers/input/mouse/bcm5974.c | 485 | F: drivers/input/mouse/bcm5974.c |
486 | 486 | ||
487 | APPLE SMC DRIVER | 487 | APPLE SMC DRIVER |
488 | M: Nicolas Boichat <nicolas@boichat.ch> | 488 | M: Henrik Rydberg <rydberg@euromail.se> |
489 | L: mactel-linux-devel@lists.sourceforge.net | 489 | L: lm-sensors@lm-sensors.org |
490 | S: Maintained | 490 | S: Maintained |
491 | F: drivers/hwmon/applesmc.c | 491 | F: drivers/hwmon/applesmc.c |
492 | 492 | ||
@@ -1960,7 +1960,7 @@ F: lib/kobj* | |||
1960 | 1960 | ||
1961 | DRM DRIVERS | 1961 | DRM DRIVERS |
1962 | M: David Airlie <airlied@linux.ie> | 1962 | M: David Airlie <airlied@linux.ie> |
1963 | L: dri-devel@lists.sourceforge.net | 1963 | L: dri-devel@lists.freedesktop.org |
1964 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git | 1964 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git |
1965 | S: Maintained | 1965 | S: Maintained |
1966 | F: drivers/gpu/drm/ | 1966 | F: drivers/gpu/drm/ |
@@ -4791,12 +4791,11 @@ F: drivers/s390/crypto/ | |||
4791 | 4791 | ||
4792 | S390 ZFCP DRIVER | 4792 | S390 ZFCP DRIVER |
4793 | M: Christof Schmitt <christof.schmitt@de.ibm.com> | 4793 | M: Christof Schmitt <christof.schmitt@de.ibm.com> |
4794 | M: Martin Peschke <mp3@de.ibm.com> | 4794 | M: Swen Schillig <swen@vnet.ibm.com> |
4795 | M: linux390@de.ibm.com | 4795 | M: linux390@de.ibm.com |
4796 | L: linux-s390@vger.kernel.org | 4796 | L: linux-s390@vger.kernel.org |
4797 | W: http://www.ibm.com/developerworks/linux/linux390/ | 4797 | W: http://www.ibm.com/developerworks/linux/linux390/ |
4798 | S: Supported | 4798 | S: Supported |
4799 | F: Documentation/s390/zfcpdump.txt | ||
4800 | F: drivers/s390/scsi/zfcp_* | 4799 | F: drivers/s390/scsi/zfcp_* |
4801 | 4800 | ||
4802 | S390 IUCV NETWORK LAYER | 4801 | S390 IUCV NETWORK LAYER |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 34 | 3 | SUBLEVEL = 34 |
4 | EXTRAVERSION = -rc4 | 4 | EXTRAVERSION = -rc5 |
5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Sheep on Meth |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/arm/configs/n8x0_defconfig b/arch/arm/configs/n8x0_defconfig index 216ad00948af..9405e32783de 100644 --- a/arch/arm/configs/n8x0_defconfig +++ b/arch/arm/configs/n8x0_defconfig | |||
@@ -1058,7 +1058,6 @@ CONFIG_JFFS2_CMODE_PRIORITY=y | |||
1058 | # CONFIG_ROMFS_FS is not set | 1058 | # CONFIG_ROMFS_FS is not set |
1059 | # CONFIG_SYSV_FS is not set | 1059 | # CONFIG_SYSV_FS is not set |
1060 | # CONFIG_UFS_FS is not set | 1060 | # CONFIG_UFS_FS is not set |
1061 | # CONFIG_NILFS2_FS is not set | ||
1062 | CONFIG_NETWORK_FILESYSTEMS=y | 1061 | CONFIG_NETWORK_FILESYSTEMS=y |
1063 | # CONFIG_NFS_FS is not set | 1062 | # CONFIG_NFS_FS is not set |
1064 | # CONFIG_NFSD is not set | 1063 | # CONFIG_NFSD is not set |
diff --git a/arch/arm/configs/omap_zoom2_defconfig b/arch/arm/configs/omap_zoom2_defconfig index f5c6e11cf189..881faea03d79 100644 --- a/arch/arm/configs/omap_zoom2_defconfig +++ b/arch/arm/configs/omap_zoom2_defconfig | |||
@@ -661,7 +661,7 @@ CONFIG_DEVKMEM=y | |||
661 | CONFIG_SERIAL_8250=y | 661 | CONFIG_SERIAL_8250=y |
662 | CONFIG_SERIAL_8250_CONSOLE=y | 662 | CONFIG_SERIAL_8250_CONSOLE=y |
663 | CONFIG_SERIAL_8250_NR_UARTS=32 | 663 | CONFIG_SERIAL_8250_NR_UARTS=32 |
664 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | 664 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 |
665 | CONFIG_SERIAL_8250_EXTENDED=y | 665 | CONFIG_SERIAL_8250_EXTENDED=y |
666 | CONFIG_SERIAL_8250_MANY_PORTS=y | 666 | CONFIG_SERIAL_8250_MANY_PORTS=y |
667 | CONFIG_SERIAL_8250_SHARE_IRQ=y | 667 | CONFIG_SERIAL_8250_SHARE_IRQ=y |
diff --git a/arch/arm/configs/omap_zoom3_defconfig b/arch/arm/configs/omap_zoom3_defconfig index ea9a5012d332..5e55b550a408 100644 --- a/arch/arm/configs/omap_zoom3_defconfig +++ b/arch/arm/configs/omap_zoom3_defconfig | |||
@@ -680,7 +680,7 @@ CONFIG_DEVKMEM=y | |||
680 | CONFIG_SERIAL_8250=y | 680 | CONFIG_SERIAL_8250=y |
681 | CONFIG_SERIAL_8250_CONSOLE=y | 681 | CONFIG_SERIAL_8250_CONSOLE=y |
682 | CONFIG_SERIAL_8250_NR_UARTS=32 | 682 | CONFIG_SERIAL_8250_NR_UARTS=32 |
683 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | 683 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 |
684 | CONFIG_SERIAL_8250_EXTENDED=y | 684 | CONFIG_SERIAL_8250_EXTENDED=y |
685 | CONFIG_SERIAL_8250_MANY_PORTS=y | 685 | CONFIG_SERIAL_8250_MANY_PORTS=y |
686 | CONFIG_SERIAL_8250_SHARE_IRQ=y | 686 | CONFIG_SERIAL_8250_SHARE_IRQ=y |
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig index 45135ffadc57..473f9e13f08b 100644 --- a/arch/arm/configs/rx51_defconfig +++ b/arch/arm/configs/rx51_defconfig | |||
@@ -59,8 +59,6 @@ CONFIG_FAIR_GROUP_SCHED=y | |||
59 | CONFIG_USER_SCHED=y | 59 | CONFIG_USER_SCHED=y |
60 | # CONFIG_CGROUP_SCHED is not set | 60 | # CONFIG_CGROUP_SCHED is not set |
61 | # CONFIG_CGROUPS is not set | 61 | # CONFIG_CGROUPS is not set |
62 | CONFIG_SYSFS_DEPRECATED=y | ||
63 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
64 | # CONFIG_RELAY is not set | 62 | # CONFIG_RELAY is not set |
65 | # CONFIG_NAMESPACES is not set | 63 | # CONFIG_NAMESPACES is not set |
66 | CONFIG_BLK_DEV_INITRD=y | 64 | CONFIG_BLK_DEV_INITRD=y |
@@ -480,7 +478,6 @@ CONFIG_BT_HIDP=m | |||
480 | # CONFIG_BT_HCIBFUSB is not set | 478 | # CONFIG_BT_HCIBFUSB is not set |
481 | # CONFIG_BT_HCIVHCI is not set | 479 | # CONFIG_BT_HCIVHCI is not set |
482 | # CONFIG_AF_RXRPC is not set | 480 | # CONFIG_AF_RXRPC is not set |
483 | # CONFIG_PHONET is not set | ||
484 | CONFIG_WIRELESS=y | 481 | CONFIG_WIRELESS=y |
485 | CONFIG_CFG80211=y | 482 | CONFIG_CFG80211=y |
486 | # CONFIG_CFG80211_REG_DEBUG is not set | 483 | # CONFIG_CFG80211_REG_DEBUG is not set |
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7f36d00600b4..feb988a7ec37 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -11,7 +11,11 @@ | |||
11 | 11 | ||
12 | #define kmap_prot PAGE_KERNEL | 12 | #define kmap_prot PAGE_KERNEL |
13 | 13 | ||
14 | #define flush_cache_kmaps() flush_cache_all() | 14 | #define flush_cache_kmaps() \ |
15 | do { \ | ||
16 | if (cache_is_vivt()) \ | ||
17 | flush_cache_all(); \ | ||
18 | } while (0) | ||
15 | 19 | ||
16 | extern pte_t *pkmap_page_table; | 20 | extern pte_t *pkmap_page_table; |
17 | 21 | ||
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page); | |||
21 | extern void *kmap_high_get(struct page *page); | 25 | extern void *kmap_high_get(struct page *page); |
22 | extern void kunmap_high(struct page *page); | 26 | extern void kunmap_high(struct page *page); |
23 | 27 | ||
28 | extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); | ||
29 | extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); | ||
30 | |||
31 | /* | ||
32 | * The following functions are already defined by <linux/highmem.h> | ||
33 | * when CONFIG_HIGHMEM is not set. | ||
34 | */ | ||
35 | #ifdef CONFIG_HIGHMEM | ||
24 | extern void *kmap(struct page *page); | 36 | extern void *kmap(struct page *page); |
25 | extern void kunmap(struct page *page); | 37 | extern void kunmap(struct page *page); |
26 | extern void *kmap_atomic(struct page *page, enum km_type type); | 38 | extern void *kmap_atomic(struct page *page, enum km_type type); |
27 | extern void kunmap_atomic(void *kvaddr, enum km_type type); | 39 | extern void kunmap_atomic(void *kvaddr, enum km_type type); |
28 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 40 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); |
29 | extern struct page *kmap_atomic_to_page(const void *ptr); | 41 | extern struct page *kmap_atomic_to_page(const void *ptr); |
42 | #endif | ||
30 | 43 | ||
31 | #endif | 44 | #endif |
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index c019949a5189..c4b2ea3fbe42 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h | |||
@@ -18,6 +18,7 @@ enum km_type { | |||
18 | KM_IRQ1, | 18 | KM_IRQ1, |
19 | KM_SOFTIRQ0, | 19 | KM_SOFTIRQ0, |
20 | KM_SOFTIRQ1, | 20 | KM_SOFTIRQ1, |
21 | KM_L1_CACHE, | ||
21 | KM_L2_CACHE, | 22 | KM_L2_CACHE, |
22 | KM_TYPE_NR | 23 | KM_TYPE_NR |
23 | }; | 24 | }; |
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index bf65e9f4525d..47f023aa8495 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h | |||
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe { | |||
59 | #endif /* CONFIG_IWMMXT */ | 59 | #endif /* CONFIG_IWMMXT */ |
60 | 60 | ||
61 | #ifdef CONFIG_VFP | 61 | #ifdef CONFIG_VFP |
62 | #if __LINUX_ARM_ARCH__ < 6 | ||
63 | /* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra | ||
64 | * word after the registers, and a word of padding at the end for | ||
65 | * alignment. */ | ||
66 | #define VFP_MAGIC 0x56465001 | 62 | #define VFP_MAGIC 0x56465001 |
67 | #define VFP_STORAGE_SIZE 152 | ||
68 | #else | ||
69 | #define VFP_MAGIC 0x56465002 | ||
70 | #define VFP_STORAGE_SIZE 144 | ||
71 | #endif | ||
72 | 63 | ||
73 | struct vfp_sigframe | 64 | struct vfp_sigframe |
74 | { | 65 | { |
75 | unsigned long magic; | 66 | unsigned long magic; |
76 | unsigned long size; | 67 | unsigned long size; |
77 | union vfp_state storage; | 68 | struct user_vfp ufp; |
78 | }; | 69 | struct user_vfp_exc ufp_exc; |
70 | } __attribute__((__aligned__(8))); | ||
71 | |||
72 | /* | ||
73 | * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, | ||
74 | * 4 bytes padding. | ||
75 | */ | ||
76 | #define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) | ||
77 | |||
79 | #endif /* CONFIG_VFP */ | 78 | #endif /* CONFIG_VFP */ |
80 | 79 | ||
81 | /* | 80 | /* |
@@ -91,7 +90,7 @@ struct aux_sigframe { | |||
91 | #ifdef CONFIG_IWMMXT | 90 | #ifdef CONFIG_IWMMXT |
92 | struct iwmmxt_sigframe iwmmxt; | 91 | struct iwmmxt_sigframe iwmmxt; |
93 | #endif | 92 | #endif |
94 | #if 0 && defined CONFIG_VFP /* Not yet saved. */ | 93 | #ifdef CONFIG_VFP |
95 | struct vfp_sigframe vfp; | 94 | struct vfp_sigframe vfp; |
96 | #endif | 95 | #endif |
97 | /* Something that isn't a valid magic number for any coprocessor. */ | 96 | /* Something that isn't a valid magic number for any coprocessor. */ |
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index df95e050f9dd..05ac4b06876a 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h | |||
@@ -83,11 +83,21 @@ struct user{ | |||
83 | 83 | ||
84 | /* | 84 | /* |
85 | * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 | 85 | * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 |
86 | * are ignored by the ptrace system call. | 86 | * are ignored by the ptrace system call and the signal handler. |
87 | */ | 87 | */ |
88 | struct user_vfp { | 88 | struct user_vfp { |
89 | unsigned long long fpregs[32]; | 89 | unsigned long long fpregs[32]; |
90 | unsigned long fpscr; | 90 | unsigned long fpscr; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | /* | ||
94 | * VFP exception registers exposed to user space during signal delivery. | ||
95 | * Fields not relavant to the current VFP architecture are ignored. | ||
96 | */ | ||
97 | struct user_vfp_exc { | ||
98 | unsigned long fpexc; | ||
99 | unsigned long fpinst; | ||
100 | unsigned long fpinst2; | ||
101 | }; | ||
102 | |||
93 | #endif /* _ARM_USER_H */ | 103 | #endif /* _ARM_USER_H */ |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f367eb8..907d5a620bca 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/ucontext.h> | 19 | #include <asm/ucontext.h> |
20 | #include <asm/unistd.h> | 20 | #include <asm/unistd.h> |
21 | #include <asm/vfp.h> | ||
21 | 22 | ||
22 | #include "ptrace.h" | 23 | #include "ptrace.h" |
23 | #include "signal.h" | 24 | #include "signal.h" |
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) | |||
175 | 176 | ||
176 | #endif | 177 | #endif |
177 | 178 | ||
179 | #ifdef CONFIG_VFP | ||
180 | |||
181 | static int preserve_vfp_context(struct vfp_sigframe __user *frame) | ||
182 | { | ||
183 | struct thread_info *thread = current_thread_info(); | ||
184 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
185 | const unsigned long magic = VFP_MAGIC; | ||
186 | const unsigned long size = VFP_STORAGE_SIZE; | ||
187 | int err = 0; | ||
188 | |||
189 | vfp_sync_hwstate(thread); | ||
190 | __put_user_error(magic, &frame->magic, err); | ||
191 | __put_user_error(size, &frame->size, err); | ||
192 | |||
193 | /* | ||
194 | * Copy the floating point registers. There can be unused | ||
195 | * registers see asm/hwcap.h for details. | ||
196 | */ | ||
197 | err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, | ||
198 | sizeof(h->fpregs)); | ||
199 | /* | ||
200 | * Copy the status and control register. | ||
201 | */ | ||
202 | __put_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
203 | |||
204 | /* | ||
205 | * Copy the exception registers. | ||
206 | */ | ||
207 | __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); | ||
208 | __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
209 | __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
210 | |||
211 | return err ? -EFAULT : 0; | ||
212 | } | ||
213 | |||
214 | static int restore_vfp_context(struct vfp_sigframe __user *frame) | ||
215 | { | ||
216 | struct thread_info *thread = current_thread_info(); | ||
217 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
218 | unsigned long magic; | ||
219 | unsigned long size; | ||
220 | unsigned long fpexc; | ||
221 | int err = 0; | ||
222 | |||
223 | __get_user_error(magic, &frame->magic, err); | ||
224 | __get_user_error(size, &frame->size, err); | ||
225 | |||
226 | if (err) | ||
227 | return -EFAULT; | ||
228 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) | ||
229 | return -EINVAL; | ||
230 | |||
231 | /* | ||
232 | * Copy the floating point registers. There can be unused | ||
233 | * registers see asm/hwcap.h for details. | ||
234 | */ | ||
235 | err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, | ||
236 | sizeof(h->fpregs)); | ||
237 | /* | ||
238 | * Copy the status and control register. | ||
239 | */ | ||
240 | __get_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
241 | |||
242 | /* | ||
243 | * Sanitise and restore the exception registers. | ||
244 | */ | ||
245 | __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); | ||
246 | /* Ensure the VFP is enabled. */ | ||
247 | fpexc |= FPEXC_EN; | ||
248 | /* Ensure FPINST2 is invalid and the exception flag is cleared. */ | ||
249 | fpexc &= ~(FPEXC_EX | FPEXC_FP2V); | ||
250 | h->fpexc = fpexc; | ||
251 | |||
252 | __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
253 | __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
254 | |||
255 | if (!err) | ||
256 | vfp_flush_hwstate(thread); | ||
257 | |||
258 | return err ? -EFAULT : 0; | ||
259 | } | ||
260 | |||
261 | #endif | ||
262 | |||
178 | /* | 263 | /* |
179 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. | 264 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. |
180 | */ | 265 | */ |
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) | |||
233 | err |= restore_iwmmxt_context(&aux->iwmmxt); | 318 | err |= restore_iwmmxt_context(&aux->iwmmxt); |
234 | #endif | 319 | #endif |
235 | #ifdef CONFIG_VFP | 320 | #ifdef CONFIG_VFP |
236 | // if (err == 0) | 321 | if (err == 0) |
237 | // err |= vfp_restore_state(&sf->aux.vfp); | 322 | err |= restore_vfp_context(&aux->vfp); |
238 | #endif | 323 | #endif |
239 | 324 | ||
240 | return err; | 325 | return err; |
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) | |||
348 | err |= preserve_iwmmxt_context(&aux->iwmmxt); | 433 | err |= preserve_iwmmxt_context(&aux->iwmmxt); |
349 | #endif | 434 | #endif |
350 | #ifdef CONFIG_VFP | 435 | #ifdef CONFIG_VFP |
351 | // if (err == 0) | 436 | if (err == 0) |
352 | // err |= vfp_save_state(&sf->aux.vfp); | 437 | err |= preserve_vfp_context(&aux->vfp); |
353 | #endif | 438 | #endif |
354 | __put_user_error(0, &aux->end_magic, err); | 439 | __put_user_error(0, &aux->end_magic, err); |
355 | 440 | ||
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 027dd570dcc3..d4004557532a 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile | |||
@@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d | |||
16 | obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o | 16 | obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o |
17 | obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o | 17 | obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o |
18 | obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o | 18 | obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o |
19 | obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o | 19 | obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o |
20 | obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o | 20 | obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o |
21 | obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o | 21 | obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o |
22 | obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o | 22 | obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o |
23 | obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o | 23 | obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o |
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 9fcbd6ca0090..9c5b48e68a71 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S | |||
@@ -175,8 +175,6 @@ ENTRY(at91_slow_clock) | |||
175 | orr r3, r3, #(1 << 29) /* bit 29 always set */ | 175 | orr r3, r3, #(1 << 29) /* bit 29 always set */ |
176 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] | 176 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] |
177 | 177 | ||
178 | wait_pllalock | ||
179 | |||
180 | /* Save PLLB setting and disable it */ | 178 | /* Save PLLB setting and disable it */ |
181 | ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] | 179 | ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] |
182 | str r3, .saved_pllbr | 180 | str r3, .saved_pllbr |
@@ -184,8 +182,6 @@ ENTRY(at91_slow_clock) | |||
184 | mov r3, #AT91_PMC_PLLCOUNT | 182 | mov r3, #AT91_PMC_PLLCOUNT |
185 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] | 183 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] |
186 | 184 | ||
187 | wait_pllblock | ||
188 | |||
189 | /* Turn off the main oscillator */ | 185 | /* Turn off the main oscillator */ |
190 | ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] | 186 | ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] |
191 | bic r3, r3, #AT91_PMC_MOSCEN | 187 | bic r3, r3, #AT91_PMC_MOSCEN |
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c index 9ad118563f7d..20cfbcc6c60c 100644 --- a/arch/arm/mach-omap1/timer32k.c +++ b/arch/arm/mach-omap1/timer32k.c | |||
@@ -68,12 +68,6 @@ struct sys_timer omap_timer; | |||
68 | * --------------------------------------------------------------------------- | 68 | * --------------------------------------------------------------------------- |
69 | */ | 69 | */ |
70 | 70 | ||
71 | #if defined(CONFIG_ARCH_OMAP16XX) | ||
72 | #define TIMER_32K_SYNCHRONIZED 0xfffbc410 | ||
73 | #else | ||
74 | #error OMAP 32KHz timer does not currently work on 15XX! | ||
75 | #endif | ||
76 | |||
77 | /* 16xx specific defines */ | 71 | /* 16xx specific defines */ |
78 | #define OMAP1_32K_TIMER_BASE 0xfffb9000 | 72 | #define OMAP1_32K_TIMER_BASE 0xfffb9000 |
79 | #define OMAP1_32K_TIMER_CR 0x08 | 73 | #define OMAP1_32K_TIMER_CR 0x08 |
@@ -150,15 +144,6 @@ static struct clock_event_device clockevent_32k_timer = { | |||
150 | .set_mode = omap_32k_timer_set_mode, | 144 | .set_mode = omap_32k_timer_set_mode, |
151 | }; | 145 | }; |
152 | 146 | ||
153 | /* | ||
154 | * The 32KHz synchronized timer is an additional timer on 16xx. | ||
155 | * It is always running. | ||
156 | */ | ||
157 | static inline unsigned long omap_32k_sync_timer_read(void) | ||
158 | { | ||
159 | return omap_readl(TIMER_32K_SYNCHRONIZED); | ||
160 | } | ||
161 | |||
162 | static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id) | 147 | static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id) |
163 | { | 148 | { |
164 | struct clock_event_device *evt = &clockevent_32k_timer; | 149 | struct clock_event_device *evt = &clockevent_32k_timer; |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index a8a3d1e23e26..2455dcc744a0 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -59,8 +59,10 @@ config MACH_OMAP3_BEAGLE | |||
59 | select OMAP_PACKAGE_CBB | 59 | select OMAP_PACKAGE_CBB |
60 | 60 | ||
61 | config MACH_DEVKIT8000 | 61 | config MACH_DEVKIT8000 |
62 | bool "DEVKIT8000 board" | 62 | bool "DEVKIT8000 board" |
63 | depends on ARCH_OMAP3 | 63 | depends on ARCH_OMAP3 |
64 | select OMAP_PACKAGE_CUS | ||
65 | select OMAP_MUX | ||
64 | 66 | ||
65 | config MACH_OMAP_LDP | 67 | config MACH_OMAP_LDP |
66 | bool "OMAP3 LDP board" | 68 | bool "OMAP3 LDP board" |
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c index a0a2a113465c..504d2bd222fe 100644 --- a/arch/arm/mach-omap2/board-3630sdp.c +++ b/arch/arm/mach-omap2/board-3630sdp.c | |||
@@ -96,6 +96,7 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
96 | static void __init omap_sdp_init(void) | 96 | static void __init omap_sdp_init(void) |
97 | { | 97 | { |
98 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBP); | 98 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBP); |
99 | omap_serial_init(); | ||
99 | zoom_peripherals_init(); | 100 | zoom_peripherals_init(); |
100 | board_smc91x_init(); | 101 | board_smc91x_init(); |
101 | enable_board_wakeup_source(); | 102 | enable_board_wakeup_source(); |
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c index 6ae880585d54..c1c4389fbd8f 100644 --- a/arch/arm/mach-omap2/board-am3517evm.c +++ b/arch/arm/mach-omap2/board-am3517evm.c | |||
@@ -294,9 +294,9 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
294 | 294 | ||
295 | static void __init am3517_evm_init(void) | 295 | static void __init am3517_evm_init(void) |
296 | { | 296 | { |
297 | am3517_evm_i2c_init(); | ||
298 | |||
299 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 297 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
298 | |||
299 | am3517_evm_i2c_init(); | ||
300 | platform_add_devices(am3517_evm_devices, | 300 | platform_add_devices(am3517_evm_devices, |
301 | ARRAY_SIZE(am3517_evm_devices)); | 301 | ARRAY_SIZE(am3517_evm_devices)); |
302 | 302 | ||
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 5bfc13b3176c..47e3af2166d4 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c | |||
@@ -50,7 +50,6 @@ | |||
50 | #include <linux/input/matrix_keypad.h> | 50 | #include <linux/input/matrix_keypad.h> |
51 | #include <linux/spi/spi.h> | 51 | #include <linux/spi/spi.h> |
52 | #include <linux/spi/ads7846.h> | 52 | #include <linux/spi/ads7846.h> |
53 | #include <linux/usb/otg.h> | ||
54 | #include <linux/dm9000.h> | 53 | #include <linux/dm9000.h> |
55 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
56 | 55 | ||
@@ -269,20 +268,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev, | |||
269 | devkit8000_vmmc1_supply.dev = mmc[0].dev; | 268 | devkit8000_vmmc1_supply.dev = mmc[0].dev; |
270 | devkit8000_vsim_supply.dev = mmc[0].dev; | 269 | devkit8000_vsim_supply.dev = mmc[0].dev; |
271 | 270 | ||
272 | /* REVISIT: need ehci-omap hooks for external VBUS | ||
273 | * power switch and overcurrent detect | ||
274 | */ | ||
275 | |||
276 | gpio_request(gpio + 1, "EHCI_nOC"); | ||
277 | gpio_direction_input(gpio + 1); | ||
278 | |||
279 | /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */ | ||
280 | gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR"); | ||
281 | gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1); | ||
282 | |||
283 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ | ||
284 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | ||
285 | |||
286 | return 0; | 271 | return 0; |
287 | } | 272 | } |
288 | 273 | ||
@@ -303,7 +288,7 @@ static struct regulator_consumer_supply devkit8000_vpll2_supplies[] = { | |||
303 | .dev = &devkit8000_lcd_device.dev, | 288 | .dev = &devkit8000_lcd_device.dev, |
304 | }, | 289 | }, |
305 | { | 290 | { |
306 | .supply = "vdss_dsi", | 291 | .supply = "vdds_dsi", |
307 | .dev = &devkit8000_dss_device.dev, | 292 | .dev = &devkit8000_dss_device.dev, |
308 | } | 293 | } |
309 | }; | 294 | }; |
@@ -639,17 +624,21 @@ static struct omap_musb_board_data musb_board_data = { | |||
639 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 624 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { |
640 | 625 | ||
641 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 626 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, |
642 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 627 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, |
643 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 628 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, |
644 | 629 | ||
645 | .phy_reset = true, | 630 | .phy_reset = true, |
646 | .reset_gpio_port[0] = -EINVAL, | 631 | .reset_gpio_port[0] = -EINVAL, |
647 | .reset_gpio_port[1] = 147, | 632 | .reset_gpio_port[1] = -EINVAL, |
648 | .reset_gpio_port[2] = -EINVAL | 633 | .reset_gpio_port[2] = -EINVAL |
649 | }; | 634 | }; |
650 | 635 | ||
651 | static void __init devkit8000_init(void) | 636 | static void __init devkit8000_init(void) |
652 | { | 637 | { |
638 | omap_serial_init(); | ||
639 | |||
640 | omap_dm9000_init(); | ||
641 | |||
653 | devkit8000_i2c_init(); | 642 | devkit8000_i2c_init(); |
654 | platform_add_devices(devkit8000_devices, | 643 | platform_add_devices(devkit8000_devices, |
655 | ARRAY_SIZE(devkit8000_devices)); | 644 | ARRAY_SIZE(devkit8000_devices)); |
@@ -659,25 +648,15 @@ static void __init devkit8000_init(void) | |||
659 | spi_register_board_info(devkit8000_spi_board_info, | 648 | spi_register_board_info(devkit8000_spi_board_info, |
660 | ARRAY_SIZE(devkit8000_spi_board_info)); | 649 | ARRAY_SIZE(devkit8000_spi_board_info)); |
661 | 650 | ||
662 | omap_serial_init(); | ||
663 | |||
664 | omap_dm9000_init(); | ||
665 | |||
666 | devkit8000_ads7846_init(); | 651 | devkit8000_ads7846_init(); |
667 | 652 | ||
668 | omap_mux_init_gpio(170, OMAP_PIN_INPUT); | ||
669 | |||
670 | gpio_request(170, "DVI_nPD"); | ||
671 | /* REVISIT leave DVI powered down until it's needed ... */ | ||
672 | gpio_direction_output(170, true); | ||
673 | |||
674 | usb_musb_init(&musb_board_data); | 653 | usb_musb_init(&musb_board_data); |
675 | usb_ehci_init(&ehci_pdata); | 654 | usb_ehci_init(&ehci_pdata); |
676 | devkit8000_flash_init(); | 655 | devkit8000_flash_init(); |
677 | 656 | ||
678 | /* Ensure SDRC pins are mux'd for self-refresh */ | 657 | /* Ensure SDRC pins are mux'd for self-refresh */ |
679 | omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT); | 658 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); |
680 | omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT); | 659 | omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); |
681 | } | 660 | } |
682 | 661 | ||
683 | static void __init devkit8000_map_io(void) | 662 | static void __init devkit8000_map_io(void) |
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 3c7789d45051..d55c57b761a9 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c | |||
@@ -458,13 +458,13 @@ static struct omap_musb_board_data musb_board_data = { | |||
458 | }; | 458 | }; |
459 | 459 | ||
460 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 460 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { |
461 | .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 461 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, |
462 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 462 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, |
463 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 463 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, |
464 | 464 | ||
465 | .phy_reset = true, | 465 | .phy_reset = true, |
466 | .reset_gpio_port[0] = -EINVAL, | 466 | .reset_gpio_port[0] = IGEP2_GPIO_USBH_NRESET, |
467 | .reset_gpio_port[1] = IGEP2_GPIO_USBH_NRESET, | 467 | .reset_gpio_port[1] = -EINVAL, |
468 | .reset_gpio_port[2] = -EINVAL, | 468 | .reset_gpio_port[2] = -EINVAL, |
469 | }; | 469 | }; |
470 | 470 | ||
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index da9bcb898991..3ccc34ebdcc7 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c | |||
@@ -216,7 +216,7 @@ static void __init n8x0_onenand_init(void) {} | |||
216 | */ | 216 | */ |
217 | #define N8X0_SLOT_SWITCH_GPIO 96 | 217 | #define N8X0_SLOT_SWITCH_GPIO 96 |
218 | #define N810_EMMC_VSD_GPIO 23 | 218 | #define N810_EMMC_VSD_GPIO 23 |
219 | #define NN810_EMMC_VIO_GPIO 9 | 219 | #define N810_EMMC_VIO_GPIO 9 |
220 | 220 | ||
221 | static int n8x0_mmc_switch_slot(struct device *dev, int slot) | 221 | static int n8x0_mmc_switch_slot(struct device *dev, int slot) |
222 | { | 222 | { |
@@ -304,10 +304,10 @@ static void n810_set_power_emmc(struct device *dev, | |||
304 | if (power_on) { | 304 | if (power_on) { |
305 | gpio_set_value(N810_EMMC_VSD_GPIO, 1); | 305 | gpio_set_value(N810_EMMC_VSD_GPIO, 1); |
306 | msleep(1); | 306 | msleep(1); |
307 | gpio_set_value(NN810_EMMC_VIO_GPIO, 1); | 307 | gpio_set_value(N810_EMMC_VIO_GPIO, 1); |
308 | msleep(1); | 308 | msleep(1); |
309 | } else { | 309 | } else { |
310 | gpio_set_value(NN810_EMMC_VIO_GPIO, 0); | 310 | gpio_set_value(N810_EMMC_VIO_GPIO, 0); |
311 | msleep(50); | 311 | msleep(50); |
312 | gpio_set_value(N810_EMMC_VSD_GPIO, 0); | 312 | gpio_set_value(N810_EMMC_VSD_GPIO, 0); |
313 | msleep(50); | 313 | msleep(50); |
@@ -468,7 +468,7 @@ static void n8x0_mmc_cleanup(struct device *dev) | |||
468 | 468 | ||
469 | if (machine_is_nokia_n810()) { | 469 | if (machine_is_nokia_n810()) { |
470 | gpio_free(N810_EMMC_VSD_GPIO); | 470 | gpio_free(N810_EMMC_VSD_GPIO); |
471 | gpio_free(NN810_EMMC_VIO_GPIO); | 471 | gpio_free(N810_EMMC_VIO_GPIO); |
472 | } | 472 | } |
473 | } | 473 | } |
474 | 474 | ||
@@ -529,7 +529,7 @@ void __init n8x0_mmc_init(void) | |||
529 | 529 | ||
530 | err = gpio_request(N8X0_SLOT_SWITCH_GPIO, "MMC slot switch"); | 530 | err = gpio_request(N8X0_SLOT_SWITCH_GPIO, "MMC slot switch"); |
531 | if (err) | 531 | if (err) |
532 | return err; | 532 | return; |
533 | 533 | ||
534 | gpio_direction_output(N8X0_SLOT_SWITCH_GPIO, 0); | 534 | gpio_direction_output(N8X0_SLOT_SWITCH_GPIO, 0); |
535 | 535 | ||
@@ -537,17 +537,17 @@ void __init n8x0_mmc_init(void) | |||
537 | err = gpio_request(N810_EMMC_VSD_GPIO, "MMC slot 2 Vddf"); | 537 | err = gpio_request(N810_EMMC_VSD_GPIO, "MMC slot 2 Vddf"); |
538 | if (err) { | 538 | if (err) { |
539 | gpio_free(N8X0_SLOT_SWITCH_GPIO); | 539 | gpio_free(N8X0_SLOT_SWITCH_GPIO); |
540 | return err; | 540 | return; |
541 | } | 541 | } |
542 | gpio_direction_output(N810_EMMC_VSD_GPIO, 0); | 542 | gpio_direction_output(N810_EMMC_VSD_GPIO, 0); |
543 | 543 | ||
544 | err = gpio_request(NN810_EMMC_VIO_GPIO, "MMC slot 2 Vdd"); | 544 | err = gpio_request(N810_EMMC_VIO_GPIO, "MMC slot 2 Vdd"); |
545 | if (err) { | 545 | if (err) { |
546 | gpio_free(N8X0_SLOT_SWITCH_GPIO); | 546 | gpio_free(N8X0_SLOT_SWITCH_GPIO); |
547 | gpio_free(N810_EMMC_VSD_GPIO); | 547 | gpio_free(N810_EMMC_VSD_GPIO); |
548 | return err; | 548 | return; |
549 | } | 549 | } |
550 | gpio_direction_output(NN810_EMMC_VIO_GPIO, 0); | 550 | gpio_direction_output(N810_EMMC_VIO_GPIO, 0); |
551 | } | 551 | } |
552 | 552 | ||
553 | mmc_data[0] = &mmc1_data; | 553 | mmc_data[0] = &mmc1_data; |
diff --git a/arch/arm/mach-omap2/board-sdp-flash.c b/arch/arm/mach-omap2/board-sdp-flash.c index b1b88deec7f2..2d026328e385 100644 --- a/arch/arm/mach-omap2/board-sdp-flash.c +++ b/arch/arm/mach-omap2/board-sdp-flash.c | |||
@@ -253,20 +253,20 @@ void __init sdp_flash_init(struct flash_partitions sdp_partition_info[]) | |||
253 | } | 253 | } |
254 | 254 | ||
255 | if (norcs > GPMC_CS_NUM) | 255 | if (norcs > GPMC_CS_NUM) |
256 | printk(KERN_INFO "OneNAND: Unable to find configuration " | 256 | printk(KERN_INFO "NOR: Unable to find configuration " |
257 | " in GPMC\n "); | 257 | "in GPMC\n"); |
258 | else | 258 | else |
259 | board_nor_init(sdp_partition_info[0], norcs); | 259 | board_nor_init(sdp_partition_info[0], norcs); |
260 | 260 | ||
261 | if (onenandcs > GPMC_CS_NUM) | 261 | if (onenandcs > GPMC_CS_NUM) |
262 | printk(KERN_INFO "OneNAND: Unable to find configuration " | 262 | printk(KERN_INFO "OneNAND: Unable to find configuration " |
263 | " in GPMC\n "); | 263 | "in GPMC\n"); |
264 | else | 264 | else |
265 | board_onenand_init(sdp_partition_info[1], onenandcs); | 265 | board_onenand_init(sdp_partition_info[1], onenandcs); |
266 | 266 | ||
267 | if (nandcs > GPMC_CS_NUM) | 267 | if (nandcs > GPMC_CS_NUM) |
268 | printk(KERN_INFO "NAND: Unable to find configuration " | 268 | printk(KERN_INFO "NAND: Unable to find configuration " |
269 | " in GPMC\n "); | 269 | "in GPMC\n"); |
270 | else | 270 | else |
271 | board_nand_init(sdp_partition_info[2], nandcs); | 271 | board_nand_init(sdp_partition_info[2], nandcs); |
272 | } | 272 | } |
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c index bb4018b60642..e15d2e87cfc1 100644 --- a/arch/arm/mach-omap2/board-zoom-debugboard.c +++ b/arch/arm/mach-omap2/board-zoom-debugboard.c | |||
@@ -96,7 +96,7 @@ static struct plat_serial8250_port serial_platform_data[] = { | |||
96 | 96 | ||
97 | static struct platform_device zoom_debugboard_serial_device = { | 97 | static struct platform_device zoom_debugboard_serial_device = { |
98 | .name = "serial8250", | 98 | .name = "serial8250", |
99 | .id = 3, | 99 | .id = PLAT8250_DEV_PLATFORM, |
100 | .dev = { | 100 | .dev = { |
101 | .platform_data = serial_platform_data, | 101 | .platform_data = serial_platform_data, |
102 | }, | 102 | }, |
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c index ca95d8d64136..6b3984964cc5 100644 --- a/arch/arm/mach-omap2/board-zoom-peripherals.c +++ b/arch/arm/mach-omap2/board-zoom-peripherals.c | |||
@@ -280,7 +280,6 @@ static void enable_board_wakeup_source(void) | |||
280 | void __init zoom_peripherals_init(void) | 280 | void __init zoom_peripherals_init(void) |
281 | { | 281 | { |
282 | omap_i2c_init(); | 282 | omap_i2c_init(); |
283 | omap_serial_init(); | ||
284 | usb_musb_init(&musb_board_data); | 283 | usb_musb_init(&musb_board_data); |
285 | enable_board_wakeup_source(); | 284 | enable_board_wakeup_source(); |
286 | } | 285 | } |
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index d5153b6bd6cb..9cba5560519b 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c | |||
@@ -895,7 +895,7 @@ static struct clk dpll4_m4x2_ck = { | |||
895 | .ops = &clkops_omap2_dflt_wait, | 895 | .ops = &clkops_omap2_dflt_wait, |
896 | .parent = &dpll4_m4_ck, | 896 | .parent = &dpll4_m4_ck, |
897 | .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), | 897 | .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), |
898 | .enable_bit = OMAP3430_PWRDN_CAM_SHIFT, | 898 | .enable_bit = OMAP3430_PWRDN_DSS1_SHIFT, |
899 | .flags = INVERT_ENABLE, | 899 | .flags = INVERT_ENABLE, |
900 | .clkdm_name = "dpll4_clkdm", | 900 | .clkdm_name = "dpll4_clkdm", |
901 | .recalc = &omap3_clkoutx2_recalc, | 901 | .recalc = &omap3_clkoutx2_recalc, |
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index 28b107967c86..a5c0c9c8e496 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c | |||
@@ -2671,10 +2671,10 @@ static struct omap_clk omap44xx_clks[] = { | |||
2671 | CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X), | 2671 | CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X), |
2672 | CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X), | 2672 | CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X), |
2673 | CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_443X), | 2673 | CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_443X), |
2674 | CLK("omap-mcspi.1", "ick", &dummy_ck, CK_443X), | 2674 | CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_443X), |
2675 | CLK("omap-mcspi.2", "ick", &dummy_ck, CK_443X), | 2675 | CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X), |
2676 | CLK("omap-mcspi.3", "ick", &dummy_ck, CK_443X), | 2676 | CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X), |
2677 | CLK("omap-mcspi.4", "ick", &dummy_ck, CK_443X), | 2677 | CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X), |
2678 | CLK(NULL, "uart1_ick", &dummy_ck, CK_443X), | 2678 | CLK(NULL, "uart1_ick", &dummy_ck, CK_443X), |
2679 | CLK(NULL, "uart2_ick", &dummy_ck, CK_443X), | 2679 | CLK(NULL, "uart2_ick", &dummy_ck, CK_443X), |
2680 | CLK(NULL, "uart3_ick", &dummy_ck, CK_443X), | 2680 | CLK(NULL, "uart3_ick", &dummy_ck, CK_443X), |
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index b87ad66f083e..6e568ec995ee 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c | |||
@@ -240,7 +240,7 @@ static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable) | |||
240 | bits = OMAP24XX_CLKSTCTRL_ENABLE_AUTO; | 240 | bits = OMAP24XX_CLKSTCTRL_ENABLE_AUTO; |
241 | else | 241 | else |
242 | bits = OMAP24XX_CLKSTCTRL_DISABLE_AUTO; | 242 | bits = OMAP24XX_CLKSTCTRL_DISABLE_AUTO; |
243 | } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) { | 243 | } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) { |
244 | if (enable) | 244 | if (enable) |
245 | bits = OMAP34XX_CLKSTCTRL_ENABLE_AUTO; | 245 | bits = OMAP34XX_CLKSTCTRL_ENABLE_AUTO; |
246 | else | 246 | else |
@@ -812,7 +812,7 @@ int omap2_clkdm_sleep(struct clockdomain *clkdm) | |||
812 | cm_set_mod_reg_bits(OMAP24XX_FORCESTATE, | 812 | cm_set_mod_reg_bits(OMAP24XX_FORCESTATE, |
813 | clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL); | 813 | clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL); |
814 | 814 | ||
815 | } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) { | 815 | } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) { |
816 | 816 | ||
817 | u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_SLEEP << | 817 | u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_SLEEP << |
818 | __ffs(clkdm->clktrctrl_mask)); | 818 | __ffs(clkdm->clktrctrl_mask)); |
@@ -856,7 +856,7 @@ int omap2_clkdm_wakeup(struct clockdomain *clkdm) | |||
856 | cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE, | 856 | cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE, |
857 | clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL); | 857 | clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL); |
858 | 858 | ||
859 | } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) { | 859 | } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) { |
860 | 860 | ||
861 | u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_WAKEUP << | 861 | u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_WAKEUP << |
862 | __ffs(clkdm->clktrctrl_mask)); | 862 | __ffs(clkdm->clktrctrl_mask)); |
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 23e4d7733610..2271b9bd1f50 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c | |||
@@ -726,7 +726,7 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, | |||
726 | if (!cpu_is_omap44xx()) | 726 | if (!cpu_is_omap44xx()) |
727 | return; | 727 | return; |
728 | base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET; | 728 | base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET; |
729 | irq = OMAP44XX_IRQ_MMC4; | 729 | irq = OMAP44XX_IRQ_MMC5; |
730 | break; | 730 | break; |
731 | default: | 731 | default: |
732 | continue; | 732 | continue; |
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c index 64d74f05abbe..e57fb29ff855 100644 --- a/arch/arm/mach-omap2/gpmc-nand.c +++ b/arch/arm/mach-omap2/gpmc-nand.c | |||
@@ -39,6 +39,9 @@ static int omap2_nand_gpmc_retime(void) | |||
39 | struct gpmc_timings t; | 39 | struct gpmc_timings t; |
40 | int err; | 40 | int err; |
41 | 41 | ||
42 | if (!gpmc_nand_data->gpmc_t) | ||
43 | return 0; | ||
44 | |||
42 | memset(&t, 0, sizeof(t)); | 45 | memset(&t, 0, sizeof(t)); |
43 | t.sync_clk = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->sync_clk); | 46 | t.sync_clk = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->sync_clk); |
44 | t.cs_on = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->cs_on); | 47 | t.cs_on = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->cs_on); |
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S index ff25c7e4e606..50fd74916643 100644 --- a/arch/arm/mach-omap2/include/mach/entry-macro.S +++ b/arch/arm/mach-omap2/include/mach/entry-macro.S | |||
@@ -52,7 +52,7 @@ omap_irq_base: .word 0 | |||
52 | 52 | ||
53 | mrc p15, 0, \tmp, c0, c0, 0 @ get processor revision | 53 | mrc p15, 0, \tmp, c0, c0, 0 @ get processor revision |
54 | and \tmp, \tmp, #0x000f0000 @ only check architecture | 54 | and \tmp, \tmp, #0x000f0000 @ only check architecture |
55 | cmp \tmp, #0x00060000 @ is v6? | 55 | cmp \tmp, #0x00070000 @ is v6? |
56 | beq 2400f @ found v6 so it's omap24xx | 56 | beq 2400f @ found v6 so it's omap24xx |
57 | mrc p15, 0, \tmp, c0, c0, 0 @ get processor revision | 57 | mrc p15, 0, \tmp, c0, c0, 0 @ get processor revision |
58 | and \tmp, \tmp, #0x000000f0 @ check cortex 8 or 9 | 58 | and \tmp, \tmp, #0x000000f0 @ check cortex 8 or 9 |
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index aa3f65c2ac97..ef0e7a00dd6c 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S | |||
@@ -33,7 +33,7 @@ | |||
33 | ENTRY(omap_secondary_startup) | 33 | ENTRY(omap_secondary_startup) |
34 | hold: ldr r12,=0x103 | 34 | hold: ldr r12,=0x103 |
35 | dsb | 35 | dsb |
36 | smc @ read from AuxCoreBoot0 | 36 | smc #0 @ read from AuxCoreBoot0 |
37 | mov r0, r0, lsr #9 | 37 | mov r0, r0, lsr #9 |
38 | mrc p15, 0, r4, c0, c0, 5 | 38 | mrc p15, 0, r4, c0, c0, 5 |
39 | and r4, r4, #0x0f | 39 | and r4, r4, #0x0f |
@@ -52,7 +52,7 @@ ENTRY(omap_modify_auxcoreboot0) | |||
52 | stmfd sp!, {r1-r12, lr} | 52 | stmfd sp!, {r1-r12, lr} |
53 | ldr r12, =0x104 | 53 | ldr r12, =0x104 |
54 | dsb | 54 | dsb |
55 | smc | 55 | smc #0 |
56 | ldmfd sp!, {r1-r12, pc} | 56 | ldmfd sp!, {r1-r12, pc} |
57 | END(omap_modify_auxcoreboot0) | 57 | END(omap_modify_auxcoreboot0) |
58 | 58 | ||
@@ -60,6 +60,6 @@ ENTRY(omap_auxcoreboot_addr) | |||
60 | stmfd sp!, {r2-r12, lr} | 60 | stmfd sp!, {r2-r12, lr} |
61 | ldr r12, =0x105 | 61 | ldr r12, =0x105 |
62 | dsb | 62 | dsb |
63 | smc | 63 | smc #0 |
64 | ldmfd sp!, {r2-r12, pc} | 64 | ldmfd sp!, {r2-r12, pc} |
65 | END(omap_auxcoreboot_addr) | 65 | END(omap_auxcoreboot_addr) |
diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap44xx-smc.S index 89bb2b141473..f61c7771ca47 100644 --- a/arch/arm/mach-omap2/omap44xx-smc.S +++ b/arch/arm/mach-omap2/omap44xx-smc.S | |||
@@ -27,6 +27,6 @@ ENTRY(omap_smc1) | |||
27 | mov r12, r0 | 27 | mov r12, r0 |
28 | mov r0, r1 | 28 | mov r0, r1 |
29 | dsb | 29 | dsb |
30 | smc | 30 | smc #0 |
31 | ldmfd sp!, {r2-r12, pc} | 31 | ldmfd sp!, {r2-r12, pc} |
32 | END(omap_smc1) | 32 | END(omap_smc1) |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c6649472ce0d..e436dcb19795 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1511,6 +1511,9 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh) | |||
1511 | c = oh->slaves[oh->_mpu_port_index]->_clk; | 1511 | c = oh->slaves[oh->_mpu_port_index]->_clk; |
1512 | } | 1512 | } |
1513 | 1513 | ||
1514 | if (!c->clkdm) | ||
1515 | return NULL; | ||
1516 | |||
1514 | return c->clkdm->pwrdm.ptr; | 1517 | return c->clkdm->pwrdm.ptr; |
1515 | 1518 | ||
1516 | } | 1519 | } |
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 9a0fb385622b..ebfce7d1a5d3 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c | |||
@@ -222,7 +222,7 @@ void pwrdm_init(struct powerdomain **pwrdm_list) | |||
222 | { | 222 | { |
223 | struct powerdomain **p = NULL; | 223 | struct powerdomain **p = NULL; |
224 | 224 | ||
225 | if (cpu_is_omap24xx() | cpu_is_omap34xx()) { | 225 | if (cpu_is_omap24xx() || cpu_is_omap34xx()) { |
226 | pwrstctrl_reg_offs = OMAP2_PM_PWSTCTRL; | 226 | pwrstctrl_reg_offs = OMAP2_PM_PWSTCTRL; |
227 | pwrstst_reg_offs = OMAP2_PM_PWSTST; | 227 | pwrstst_reg_offs = OMAP2_PM_PWSTST; |
228 | } else if (cpu_is_omap44xx()) { | 228 | } else if (cpu_is_omap44xx()) { |
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c index 9537f6f2352d..07a60f1204ca 100644 --- a/arch/arm/mach-omap2/prcm.c +++ b/arch/arm/mach-omap2/prcm.c | |||
@@ -123,7 +123,7 @@ struct omap3_prcm_regs prcm_context; | |||
123 | u32 omap_prcm_get_reset_sources(void) | 123 | u32 omap_prcm_get_reset_sources(void) |
124 | { | 124 | { |
125 | /* XXX This presumably needs modification for 34XX */ | 125 | /* XXX This presumably needs modification for 34XX */ |
126 | if (cpu_is_omap24xx() | cpu_is_omap34xx()) | 126 | if (cpu_is_omap24xx() || cpu_is_omap34xx()) |
127 | return prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST) & 0x7f; | 127 | return prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST) & 0x7f; |
128 | if (cpu_is_omap44xx()) | 128 | if (cpu_is_omap44xx()) |
129 | return prm_read_mod_reg(WKUP_MOD, OMAP4_RM_RSTST) & 0x7f; | 129 | return prm_read_mod_reg(WKUP_MOD, OMAP4_RM_RSTST) & 0x7f; |
@@ -157,7 +157,7 @@ void omap_prcm_arch_reset(char mode, const char *cmd) | |||
157 | else | 157 | else |
158 | WARN_ON(1); | 158 | WARN_ON(1); |
159 | 159 | ||
160 | if (cpu_is_omap24xx() | cpu_is_omap34xx()) | 160 | if (cpu_is_omap24xx() || cpu_is_omap34xx()) |
161 | prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs, | 161 | prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs, |
162 | OMAP2_RM_RSTCTRL); | 162 | OMAP2_RM_RSTCTRL); |
163 | if (cpu_is_omap44xx()) | 163 | if (cpu_is_omap44xx()) |
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index da77930480e9..3771254dfa81 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c | |||
@@ -115,7 +115,6 @@ static struct plat_serial8250_port serial_platform_data2[] = { | |||
115 | } | 115 | } |
116 | }; | 116 | }; |
117 | 117 | ||
118 | #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) | ||
119 | static struct plat_serial8250_port serial_platform_data3[] = { | 118 | static struct plat_serial8250_port serial_platform_data3[] = { |
120 | { | 119 | { |
121 | .irq = 70, | 120 | .irq = 70, |
@@ -128,23 +127,12 @@ static struct plat_serial8250_port serial_platform_data3[] = { | |||
128 | } | 127 | } |
129 | }; | 128 | }; |
130 | 129 | ||
131 | static inline void omap2_set_globals_uart4(struct omap_globals *omap2_globals) | ||
132 | { | ||
133 | serial_platform_data3[0].mapbase = omap2_globals->uart4_phys; | ||
134 | } | ||
135 | #else | ||
136 | static inline void omap2_set_globals_uart4(struct omap_globals *omap2_globals) | ||
137 | { | ||
138 | } | ||
139 | #endif | ||
140 | |||
141 | void __init omap2_set_globals_uart(struct omap_globals *omap2_globals) | 130 | void __init omap2_set_globals_uart(struct omap_globals *omap2_globals) |
142 | { | 131 | { |
143 | serial_platform_data0[0].mapbase = omap2_globals->uart1_phys; | 132 | serial_platform_data0[0].mapbase = omap2_globals->uart1_phys; |
144 | serial_platform_data1[0].mapbase = omap2_globals->uart2_phys; | 133 | serial_platform_data1[0].mapbase = omap2_globals->uart2_phys; |
145 | serial_platform_data2[0].mapbase = omap2_globals->uart3_phys; | 134 | serial_platform_data2[0].mapbase = omap2_globals->uart3_phys; |
146 | if (cpu_is_omap3630() || cpu_is_omap44xx()) | 135 | serial_platform_data3[0].mapbase = omap2_globals->uart4_phys; |
147 | omap2_set_globals_uart4(omap2_globals); | ||
148 | } | 136 | } |
149 | 137 | ||
150 | static inline unsigned int __serial_read_reg(struct uart_port *up, | 138 | static inline unsigned int __serial_read_reg(struct uart_port *up, |
@@ -550,7 +538,7 @@ static ssize_t sleep_timeout_store(struct device *dev, | |||
550 | unsigned int value; | 538 | unsigned int value; |
551 | 539 | ||
552 | if (sscanf(buf, "%u", &value) != 1) { | 540 | if (sscanf(buf, "%u", &value) != 1) { |
553 | printk(KERN_ERR "sleep_timeout_store: Invalid value\n"); | 541 | dev_err(dev, "sleep_timeout_store: Invalid value\n"); |
554 | return -EINVAL; | 542 | return -EINVAL; |
555 | } | 543 | } |
556 | 544 | ||
@@ -664,27 +652,33 @@ void __init omap_serial_early_init(void) | |||
664 | struct device *dev = &pdev->dev; | 652 | struct device *dev = &pdev->dev; |
665 | struct plat_serial8250_port *p = dev->platform_data; | 653 | struct plat_serial8250_port *p = dev->platform_data; |
666 | 654 | ||
655 | /* Don't map zero-based physical address */ | ||
656 | if (p->mapbase == 0) { | ||
657 | dev_warn(dev, "no physical address for uart#%d," | ||
658 | " so skipping early_init...\n", i); | ||
659 | continue; | ||
660 | } | ||
667 | /* | 661 | /* |
668 | * Module 4KB + L4 interconnect 4KB | 662 | * Module 4KB + L4 interconnect 4KB |
669 | * Static mapping, never released | 663 | * Static mapping, never released |
670 | */ | 664 | */ |
671 | p->membase = ioremap(p->mapbase, SZ_8K); | 665 | p->membase = ioremap(p->mapbase, SZ_8K); |
672 | if (!p->membase) { | 666 | if (!p->membase) { |
673 | printk(KERN_ERR "ioremap failed for uart%i\n", i + 1); | 667 | dev_err(dev, "ioremap failed for uart%i\n", i + 1); |
674 | continue; | 668 | continue; |
675 | } | 669 | } |
676 | 670 | ||
677 | sprintf(name, "uart%d_ick", i + 1); | 671 | sprintf(name, "uart%d_ick", i + 1); |
678 | uart->ick = clk_get(NULL, name); | 672 | uart->ick = clk_get(NULL, name); |
679 | if (IS_ERR(uart->ick)) { | 673 | if (IS_ERR(uart->ick)) { |
680 | printk(KERN_ERR "Could not get uart%d_ick\n", i + 1); | 674 | dev_err(dev, "Could not get uart%d_ick\n", i + 1); |
681 | uart->ick = NULL; | 675 | uart->ick = NULL; |
682 | } | 676 | } |
683 | 677 | ||
684 | sprintf(name, "uart%d_fck", i+1); | 678 | sprintf(name, "uart%d_fck", i+1); |
685 | uart->fck = clk_get(NULL, name); | 679 | uart->fck = clk_get(NULL, name); |
686 | if (IS_ERR(uart->fck)) { | 680 | if (IS_ERR(uart->fck)) { |
687 | printk(KERN_ERR "Could not get uart%d_fck\n", i + 1); | 681 | dev_err(dev, "Could not get uart%d_fck\n", i + 1); |
688 | uart->fck = NULL; | 682 | uart->fck = NULL; |
689 | } | 683 | } |
690 | 684 | ||
@@ -727,6 +721,13 @@ void __init omap_serial_init_port(int port) | |||
727 | pdev = &uart->pdev; | 721 | pdev = &uart->pdev; |
728 | dev = &pdev->dev; | 722 | dev = &pdev->dev; |
729 | 723 | ||
724 | /* Don't proceed if there's no clocks available */ | ||
725 | if (unlikely(!uart->ick || !uart->fck)) { | ||
726 | WARN(1, "%s: can't init uart%d, no clocks available\n", | ||
727 | kobject_name(&dev->kobj), port); | ||
728 | return; | ||
729 | } | ||
730 | |||
730 | omap_uart_enable_clocks(uart); | 731 | omap_uart_enable_clocks(uart); |
731 | 732 | ||
732 | omap_uart_reset(uart); | 733 | omap_uart_reset(uart); |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 8bca4dea6dfa..f55fa1044f72 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | #ifdef CONFIG_HIGHMEM | 44 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); |
45 | /* | ||
46 | * kmap_atomic() doesn't set the page virtual address, and | ||
47 | * kunmap_atomic() takes care of cache flushing already. | ||
48 | */ | ||
49 | if (page_address(to) != NULL) | ||
50 | #endif | ||
51 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); | ||
52 | kunmap_atomic(kto, KM_USER1); | 45 | kunmap_atomic(kto, KM_USER1); |
53 | kunmap_atomic(kfrom, KM_USER0); | 46 | kunmap_atomic(kfrom, KM_USER0); |
54 | } | 47 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1351edc0b26f..13fa536d82e6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
464 | vaddr += offset; | 464 | vaddr += offset; |
465 | op(vaddr, len, dir); | 465 | op(vaddr, len, dir); |
466 | kunmap_high(page); | 466 | kunmap_high(page); |
467 | } else if (cache_is_vipt()) { | ||
468 | pte_t saved_pte; | ||
469 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | ||
470 | op(vaddr + offset, len, dir); | ||
471 | kunmap_high_l1_vipt(page, saved_pte); | ||
467 | } | 472 | } |
468 | } else { | 473 | } else { |
469 | vaddr = page_address(page) + offset; | 474 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index e34f095e2090..c6844cb9b508 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
15 | #include <asm/cachetype.h> | 15 | #include <asm/cachetype.h> |
16 | #include <asm/highmem.h> | ||
16 | #include <asm/smp_plat.h> | 17 | #include <asm/smp_plat.h> |
17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
18 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
152 | 153 | ||
153 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 154 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
154 | { | 155 | { |
155 | void *addr = page_address(page); | ||
156 | |||
157 | /* | 156 | /* |
158 | * Writeback any data associated with the kernel mapping of this | 157 | * Writeback any data associated with the kernel mapping of this |
159 | * page. This ensures that data in the physical page is mutually | 158 | * page. This ensures that data in the physical page is mutually |
160 | * coherent with the kernels mapping. | 159 | * coherent with the kernels mapping. |
161 | */ | 160 | */ |
162 | #ifdef CONFIG_HIGHMEM | 161 | if (!PageHighMem(page)) { |
163 | /* | 162 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
164 | * kmap_atomic() doesn't set the page virtual address, and | 163 | } else { |
165 | * kunmap_atomic() takes care of cache flushing already. | 164 | void *addr = kmap_high_get(page); |
166 | */ | 165 | if (addr) { |
167 | if (addr) | 166 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
168 | #endif | 167 | kunmap_high(page); |
169 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 168 | } else if (cache_is_vipt()) { |
169 | pte_t saved_pte; | ||
170 | addr = kmap_high_l1_vipt(page, &saved_pte); | ||
171 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
172 | kunmap_high_l1_vipt(page, saved_pte); | ||
173 | } | ||
174 | } | ||
170 | 175 | ||
171 | /* | 176 | /* |
172 | * If this is a page cache page, and we have an aliasing VIPT cache, | 177 | * If this is a page cache page, and we have an aliasing VIPT cache, |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7c1b41..77b030f5ec09 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); | 79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); |
80 | 80 | ||
81 | if (kvaddr >= (void *)FIXADDR_START) { | 81 | if (kvaddr >= (void *)FIXADDR_START) { |
82 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 82 | if (cache_is_vivt()) |
83 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | ||
83 | #ifdef CONFIG_DEBUG_HIGHMEM | 84 | #ifdef CONFIG_DEBUG_HIGHMEM |
84 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 85 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
85 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); | 86 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); |
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
124 | pte = TOP_PTE(vaddr); | 125 | pte = TOP_PTE(vaddr); |
125 | return pte_page(*pte); | 126 | return pte_page(*pte); |
126 | } | 127 | } |
128 | |||
129 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
130 | |||
131 | #include <linux/percpu.h> | ||
132 | |||
133 | /* | ||
134 | * The VIVT cache of a highmem page is always flushed before the page | ||
135 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
136 | * in that case. | ||
137 | * | ||
138 | * However unmapped pages may still be cached with a VIPT cache, and | ||
139 | * it is not possible to perform cache maintenance on them using physical | ||
140 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
141 | * virtual mapping for that purpose. | ||
142 | * | ||
143 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
144 | * functions which are possibly called from interrupt context. As we don't | ||
145 | * want to keep interrupt disabled all the time when such maintenance is | ||
146 | * taking place, we therefore allow for some reentrancy by preserving and | ||
147 | * restoring the previous fixmap entry before the interrupted context is | ||
148 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
149 | * the previous fixmap, and leaving the current one in place allow it to | ||
150 | * be reused the next time without a TLB flush (common with DMA). | ||
151 | */ | ||
152 | |||
153 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
154 | |||
155 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
156 | { | ||
157 | unsigned int idx, cpu = smp_processor_id(); | ||
158 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
159 | unsigned long vaddr, flags; | ||
160 | pte_t pte, *ptep; | ||
161 | |||
162 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
163 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
164 | ptep = TOP_PTE(vaddr); | ||
165 | pte = mk_pte(page, kmap_prot); | ||
166 | |||
167 | if (!in_interrupt()) | ||
168 | preempt_disable(); | ||
169 | |||
170 | raw_local_irq_save(flags); | ||
171 | (*depth)++; | ||
172 | if (pte_val(*ptep) == pte_val(pte)) { | ||
173 | *saved_pte = pte; | ||
174 | } else { | ||
175 | *saved_pte = *ptep; | ||
176 | set_pte_ext(ptep, pte, 0); | ||
177 | local_flush_tlb_kernel_page(vaddr); | ||
178 | } | ||
179 | raw_local_irq_restore(flags); | ||
180 | |||
181 | return (void *)vaddr; | ||
182 | } | ||
183 | |||
184 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
185 | { | ||
186 | unsigned int idx, cpu = smp_processor_id(); | ||
187 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
188 | unsigned long vaddr, flags; | ||
189 | pte_t pte, *ptep; | ||
190 | |||
191 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
192 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
193 | ptep = TOP_PTE(vaddr); | ||
194 | pte = mk_pte(page, kmap_prot); | ||
195 | |||
196 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
197 | BUG_ON(*depth <= 0); | ||
198 | |||
199 | raw_local_irq_save(flags); | ||
200 | (*depth)--; | ||
201 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
202 | set_pte_ext(ptep, saved_pte, 0); | ||
203 | local_flush_tlb_kernel_page(vaddr); | ||
204 | } | ||
205 | raw_local_irq_restore(flags); | ||
206 | |||
207 | if (!in_interrupt()) | ||
208 | preempt_enable(); | ||
209 | } | ||
210 | |||
211 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4223d086aa17..241c24a1c18f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode) | |||
1054 | pgd_t *pgd; | 1054 | pgd_t *pgd; |
1055 | int i; | 1055 | int i; |
1056 | 1056 | ||
1057 | if (current->mm && current->mm->pgd) | 1057 | /* |
1058 | pgd = current->mm->pgd; | 1058 | * We need to access to user-mode page tables here. For kernel threads |
1059 | else | 1059 | * we don't have any user-mode mappings so we use the context that we |
1060 | pgd = init_mm.pgd; | 1060 | * "borrowed". |
1061 | */ | ||
1062 | pgd = current->active_mm->pgd; | ||
1061 | 1063 | ||
1062 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | 1064 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
1063 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 1065 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index 088c1a03b946..f12f0e39ddf2 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c | |||
@@ -44,9 +44,6 @@ | |||
44 | 44 | ||
45 | #define NO_LENGTH_CHECK 0xffffffff | 45 | #define NO_LENGTH_CHECK 0xffffffff |
46 | 46 | ||
47 | unsigned char omap_bootloader_tag[512]; | ||
48 | int omap_bootloader_tag_len; | ||
49 | |||
50 | struct omap_board_config_kernel *omap_board_config; | 47 | struct omap_board_config_kernel *omap_board_config; |
51 | int omap_board_config_size; | 48 | int omap_board_config_size; |
52 | 49 | ||
@@ -100,10 +97,17 @@ EXPORT_SYMBOL(omap_get_var_config); | |||
100 | 97 | ||
101 | #include <linux/clocksource.h> | 98 | #include <linux/clocksource.h> |
102 | 99 | ||
100 | /* | ||
101 | * offset_32k holds the init time counter value. It is then subtracted | ||
102 | * from every counter read to achieve a counter that counts time from the | ||
103 | * kernel boot (needed for sched_clock()). | ||
104 | */ | ||
105 | static u32 offset_32k __read_mostly; | ||
106 | |||
103 | #ifdef CONFIG_ARCH_OMAP16XX | 107 | #ifdef CONFIG_ARCH_OMAP16XX |
104 | static cycle_t omap16xx_32k_read(struct clocksource *cs) | 108 | static cycle_t omap16xx_32k_read(struct clocksource *cs) |
105 | { | 109 | { |
106 | return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED); | 110 | return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k; |
107 | } | 111 | } |
108 | #else | 112 | #else |
109 | #define omap16xx_32k_read NULL | 113 | #define omap16xx_32k_read NULL |
@@ -112,7 +116,7 @@ static cycle_t omap16xx_32k_read(struct clocksource *cs) | |||
112 | #ifdef CONFIG_ARCH_OMAP2420 | 116 | #ifdef CONFIG_ARCH_OMAP2420 |
113 | static cycle_t omap2420_32k_read(struct clocksource *cs) | 117 | static cycle_t omap2420_32k_read(struct clocksource *cs) |
114 | { | 118 | { |
115 | return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10); | 119 | return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k; |
116 | } | 120 | } |
117 | #else | 121 | #else |
118 | #define omap2420_32k_read NULL | 122 | #define omap2420_32k_read NULL |
@@ -121,7 +125,7 @@ static cycle_t omap2420_32k_read(struct clocksource *cs) | |||
121 | #ifdef CONFIG_ARCH_OMAP2430 | 125 | #ifdef CONFIG_ARCH_OMAP2430 |
122 | static cycle_t omap2430_32k_read(struct clocksource *cs) | 126 | static cycle_t omap2430_32k_read(struct clocksource *cs) |
123 | { | 127 | { |
124 | return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10); | 128 | return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k; |
125 | } | 129 | } |
126 | #else | 130 | #else |
127 | #define omap2430_32k_read NULL | 131 | #define omap2430_32k_read NULL |
@@ -130,7 +134,7 @@ static cycle_t omap2430_32k_read(struct clocksource *cs) | |||
130 | #ifdef CONFIG_ARCH_OMAP3 | 134 | #ifdef CONFIG_ARCH_OMAP3 |
131 | static cycle_t omap34xx_32k_read(struct clocksource *cs) | 135 | static cycle_t omap34xx_32k_read(struct clocksource *cs) |
132 | { | 136 | { |
133 | return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10); | 137 | return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k; |
134 | } | 138 | } |
135 | #else | 139 | #else |
136 | #define omap34xx_32k_read NULL | 140 | #define omap34xx_32k_read NULL |
@@ -139,7 +143,7 @@ static cycle_t omap34xx_32k_read(struct clocksource *cs) | |||
139 | #ifdef CONFIG_ARCH_OMAP4 | 143 | #ifdef CONFIG_ARCH_OMAP4 |
140 | static cycle_t omap44xx_32k_read(struct clocksource *cs) | 144 | static cycle_t omap44xx_32k_read(struct clocksource *cs) |
141 | { | 145 | { |
142 | return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10); | 146 | return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k; |
143 | } | 147 | } |
144 | #else | 148 | #else |
145 | #define omap44xx_32k_read NULL | 149 | #define omap44xx_32k_read NULL |
@@ -227,6 +231,8 @@ static int __init omap_init_clocksource_32k(void) | |||
227 | clocksource_32k.mult = clocksource_hz2mult(32768, | 231 | clocksource_32k.mult = clocksource_hz2mult(32768, |
228 | clocksource_32k.shift); | 232 | clocksource_32k.shift); |
229 | 233 | ||
234 | offset_32k = clocksource_32k.read(&clocksource_32k); | ||
235 | |||
230 | if (clocksource_register(&clocksource_32k)) | 236 | if (clocksource_register(&clocksource_32k)) |
231 | printk(err, clocksource_32k.name); | 237 | printk(err, clocksource_32k.name); |
232 | } | 238 | } |
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 5c6c342c53f5..1d959965ff52 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -937,6 +937,15 @@ void omap_start_dma(int lch) | |||
937 | { | 937 | { |
938 | u32 l; | 938 | u32 l; |
939 | 939 | ||
940 | /* | ||
941 | * The CPC/CDAC register needs to be initialized to zero | ||
942 | * before starting dma transfer. | ||
943 | */ | ||
944 | if (cpu_is_omap15xx()) | ||
945 | dma_write(0, CPC(lch)); | ||
946 | else | ||
947 | dma_write(0, CDAC(lch)); | ||
948 | |||
940 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { | 949 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { |
941 | int next_lch, cur_lch; | 950 | int next_lch, cur_lch; |
942 | char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; | 951 | char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; |
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 76a347b3ce07..45a225d09125 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c | |||
@@ -798,7 +798,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) | |||
798 | case METHOD_MPUIO: | 798 | case METHOD_MPUIO: |
799 | reg += OMAP_MPUIO_GPIO_INT_EDGE; | 799 | reg += OMAP_MPUIO_GPIO_INT_EDGE; |
800 | l = __raw_readl(reg); | 800 | l = __raw_readl(reg); |
801 | if (trigger & IRQ_TYPE_EDGE_BOTH) | 801 | if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) |
802 | bank->toggle_mask |= 1 << gpio; | 802 | bank->toggle_mask |= 1 << gpio; |
803 | if (trigger & IRQ_TYPE_EDGE_RISING) | 803 | if (trigger & IRQ_TYPE_EDGE_RISING) |
804 | l |= 1 << gpio; | 804 | l |= 1 << gpio; |
@@ -812,7 +812,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) | |||
812 | case METHOD_GPIO_1510: | 812 | case METHOD_GPIO_1510: |
813 | reg += OMAP1510_GPIO_INT_CONTROL; | 813 | reg += OMAP1510_GPIO_INT_CONTROL; |
814 | l = __raw_readl(reg); | 814 | l = __raw_readl(reg); |
815 | if (trigger & IRQ_TYPE_EDGE_BOTH) | 815 | if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) |
816 | bank->toggle_mask |= 1 << gpio; | 816 | bank->toggle_mask |= 1 << gpio; |
817 | if (trigger & IRQ_TYPE_EDGE_RISING) | 817 | if (trigger & IRQ_TYPE_EDGE_RISING) |
818 | l |= 1 << gpio; | 818 | l |= 1 << gpio; |
@@ -846,7 +846,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) | |||
846 | case METHOD_GPIO_7XX: | 846 | case METHOD_GPIO_7XX: |
847 | reg += OMAP7XX_GPIO_INT_CONTROL; | 847 | reg += OMAP7XX_GPIO_INT_CONTROL; |
848 | l = __raw_readl(reg); | 848 | l = __raw_readl(reg); |
849 | if (trigger & IRQ_TYPE_EDGE_BOTH) | 849 | if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) |
850 | bank->toggle_mask |= 1 << gpio; | 850 | bank->toggle_mask |= 1 << gpio; |
851 | if (trigger & IRQ_TYPE_EDGE_RISING) | 851 | if (trigger & IRQ_TYPE_EDGE_RISING) |
852 | l |= 1 << gpio; | 852 | l |= 1 << gpio; |
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index b65088a869e9..401701977dbb 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h | |||
@@ -345,8 +345,6 @@ | |||
345 | #define INT_34XX_MMC3_IRQ 94 | 345 | #define INT_34XX_MMC3_IRQ 94 |
346 | #define INT_34XX_GPT12_IRQ 95 | 346 | #define INT_34XX_GPT12_IRQ 95 |
347 | 347 | ||
348 | #define INT_34XX_BENCH_MPU_EMUL 3 | ||
349 | |||
350 | #define INT_35XX_HECC0_IRQ 24 | 348 | #define INT_35XX_HECC0_IRQ 24 |
351 | #define INT_35XX_HECC1_IRQ 28 | 349 | #define INT_35XX_HECC1_IRQ 28 |
352 | #define INT_35XX_EMAC_C0_RXTHRESH_IRQ 67 | 350 | #define INT_35XX_EMAC_C0_RXTHRESH_IRQ 67 |
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h index 39748354ce45..7de903d7c1ce 100644 --- a/arch/arm/plat-omap/include/plat/mcbsp.h +++ b/arch/arm/plat-omap/include/plat/mcbsp.h | |||
@@ -59,7 +59,7 @@ | |||
59 | #define OMAP44XX_MCBSP1_BASE 0x49022000 | 59 | #define OMAP44XX_MCBSP1_BASE 0x49022000 |
60 | #define OMAP44XX_MCBSP2_BASE 0x49024000 | 60 | #define OMAP44XX_MCBSP2_BASE 0x49024000 |
61 | #define OMAP44XX_MCBSP3_BASE 0x49026000 | 61 | #define OMAP44XX_MCBSP3_BASE 0x49026000 |
62 | #define OMAP44XX_MCBSP4_BASE 0x48074000 | 62 | #define OMAP44XX_MCBSP4_BASE 0x48096000 |
63 | 63 | ||
64 | #if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) | 64 | #if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) |
65 | 65 | ||
diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h index 6ba88d2630d9..f8efd5466b1d 100644 --- a/arch/arm/plat-omap/include/plat/nand.h +++ b/arch/arm/plat-omap/include/plat/nand.h | |||
@@ -29,4 +29,11 @@ struct omap_nand_platform_data { | |||
29 | /* size (4 KiB) for IO mapping */ | 29 | /* size (4 KiB) for IO mapping */ |
30 | #define NAND_IO_SIZE SZ_4K | 30 | #define NAND_IO_SIZE SZ_4K |
31 | 31 | ||
32 | #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) | ||
32 | extern int gpmc_nand_init(struct omap_nand_platform_data *d); | 33 | extern int gpmc_nand_init(struct omap_nand_platform_data *d); |
34 | #else | ||
35 | static inline int gpmc_nand_init(struct omap_nand_platform_data *d) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | #endif | ||
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h index 2302474a3748..b3ef1a7f53cc 100644 --- a/arch/arm/plat-omap/include/plat/omap44xx.h +++ b/arch/arm/plat-omap/include/plat/omap44xx.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define OMAP4430_PRM_BASE 0x4a306000 | 32 | #define OMAP4430_PRM_BASE 0x4a306000 |
33 | #define OMAP44XX_GPMC_BASE 0x50000000 | 33 | #define OMAP44XX_GPMC_BASE 0x50000000 |
34 | #define OMAP443X_SCM_BASE 0x4a002000 | 34 | #define OMAP443X_SCM_BASE 0x4a002000 |
35 | #define OMAP443X_CTRL_BASE OMAP443X_SCM_BASE | 35 | #define OMAP443X_CTRL_BASE 0x4a100000 |
36 | #define OMAP44XX_IC_BASE 0x48200000 | 36 | #define OMAP44XX_IC_BASE 0x48200000 |
37 | #define OMAP44XX_IVA_INTC_BASE 0x40000000 | 37 | #define OMAP44XX_IVA_INTC_BASE 0x40000000 |
38 | #define IRQ_SIR_IRQ 0x0040 | 38 | #define IRQ_SIR_IRQ 0x0040 |
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index 440b4164f2f6..36d6ea56ab51 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h | |||
@@ -294,8 +294,8 @@ struct omap_hwmod_class_sysconfig { | |||
294 | u16 rev_offs; | 294 | u16 rev_offs; |
295 | u16 sysc_offs; | 295 | u16 sysc_offs; |
296 | u16 syss_offs; | 296 | u16 syss_offs; |
297 | u16 sysc_flags; | ||
297 | u8 idlemodes; | 298 | u8 idlemodes; |
298 | u8 sysc_flags; | ||
299 | u8 clockact; | 299 | u8 clockact; |
300 | struct omap_hwmod_sysc_fields *sysc_fields; | 300 | struct omap_hwmod_sysc_fields *sysc_fields; |
301 | }; | 301 | }; |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index a420cb949328..315a540c7ce5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -428,26 +428,6 @@ static void vfp_pm_init(void) | |||
428 | static inline void vfp_pm_init(void) { } | 428 | static inline void vfp_pm_init(void) { } |
429 | #endif /* CONFIG_PM */ | 429 | #endif /* CONFIG_PM */ |
430 | 430 | ||
431 | /* | ||
432 | * Synchronise the hardware VFP state of a thread other than current with the | ||
433 | * saved one. This function is used by the ptrace mechanism. | ||
434 | */ | ||
435 | #ifdef CONFIG_SMP | ||
436 | void vfp_sync_hwstate(struct thread_info *thread) | ||
437 | { | ||
438 | } | ||
439 | |||
440 | void vfp_flush_hwstate(struct thread_info *thread) | ||
441 | { | ||
442 | /* | ||
443 | * On SMP systems, the VFP state is automatically saved at every | ||
444 | * context switch. We mark the thread VFP state as belonging to a | ||
445 | * non-existent CPU so that the saved one will be reloaded when | ||
446 | * needed. | ||
447 | */ | ||
448 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
449 | } | ||
450 | #else | ||
451 | void vfp_sync_hwstate(struct thread_info *thread) | 431 | void vfp_sync_hwstate(struct thread_info *thread) |
452 | { | 432 | { |
453 | unsigned int cpu = get_cpu(); | 433 | unsigned int cpu = get_cpu(); |
@@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread) | |||
490 | last_VFP_context[cpu] = NULL; | 470 | last_VFP_context[cpu] = NULL; |
491 | } | 471 | } |
492 | 472 | ||
473 | #ifdef CONFIG_SMP | ||
474 | /* | ||
475 | * For SMP we still have to take care of the case where the thread | ||
476 | * migrates to another CPU and then back to the original CPU on which | ||
477 | * the last VFP user is still the same thread. Mark the thread VFP | ||
478 | * state as belonging to a non-existent CPU so that the saved one will | ||
479 | * be reloaded in the above case. | ||
480 | */ | ||
481 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
482 | #endif | ||
493 | put_cpu(); | 483 | put_cpu(); |
494 | } | 484 | } |
495 | #endif | ||
496 | 485 | ||
497 | #include <linux/smp.h> | 486 | #include <linux/smp.h> |
498 | 487 | ||
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c index dd5b882aab40..5e73c25f8f85 100644 --- a/arch/avr32/kernel/ptrace.c +++ b/arch/avr32/kernel/ptrace.c | |||
@@ -28,7 +28,7 @@ static struct pt_regs *get_user_regs(struct task_struct *tsk) | |||
28 | THREAD_SIZE - sizeof(struct pt_regs)); | 28 | THREAD_SIZE - sizeof(struct pt_regs)); |
29 | } | 29 | } |
30 | 30 | ||
31 | static void user_enable_single_step(struct task_struct *tsk) | 31 | void user_enable_single_step(struct task_struct *tsk) |
32 | { | 32 | { |
33 | pr_debug("user_enable_single_step: pid=%u, PC=0x%08lx, SR=0x%08lx\n", | 33 | pr_debug("user_enable_single_step: pid=%u, PC=0x%08lx, SR=0x%08lx\n", |
34 | tsk->pid, task_pt_regs(tsk)->pc, task_pt_regs(tsk)->sr); | 34 | tsk->pid, task_pt_regs(tsk)->pc, task_pt_regs(tsk)->sr); |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 73c5c2b05f64..7f3c0a2e60cd 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1802 | { | 1802 | { |
1803 | struct kvm_memory_slot *memslot; | 1803 | struct kvm_memory_slot *memslot; |
1804 | int r, i; | 1804 | int r, i; |
1805 | long n, base; | 1805 | long base; |
1806 | unsigned long n; | ||
1806 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | 1807 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1807 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | 1808 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1808 | 1809 | ||
@@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1815 | if (!memslot->dirty_bitmap) | 1816 | if (!memslot->dirty_bitmap) |
1816 | goto out; | 1817 | goto out; |
1817 | 1818 | ||
1818 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1819 | n = kvm_dirty_bitmap_bytes(memslot); |
1819 | base = memslot->base_gfn / BITS_PER_LONG; | 1820 | base = memslot->base_gfn / BITS_PER_LONG; |
1820 | 1821 | ||
1821 | for (i = 0; i < n/sizeof(long); ++i) { | 1822 | for (i = 0; i < n/sizeof(long); ++i) { |
@@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1831 | struct kvm_dirty_log *log) | 1832 | struct kvm_dirty_log *log) |
1832 | { | 1833 | { |
1833 | int r; | 1834 | int r; |
1834 | int n; | 1835 | unsigned long n; |
1835 | struct kvm_memory_slot *memslot; | 1836 | struct kvm_memory_slot *memslot; |
1836 | int is_dirty = 0; | 1837 | int is_dirty = 0; |
1837 | 1838 | ||
@@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1850 | if (is_dirty) { | 1851 | if (is_dirty) { |
1851 | kvm_flush_remote_tlbs(kvm); | 1852 | kvm_flush_remote_tlbs(kvm); |
1852 | memslot = &kvm->memslots->memslots[log->slot]; | 1853 | memslot = &kvm->memslots->memslots[log->slot]; |
1853 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1854 | n = kvm_dirty_bitmap_bytes(memslot); |
1854 | memset(memslot->dirty_bitmap, 0, n); | 1855 | memset(memslot->dirty_bitmap, 0, n); |
1855 | } | 1856 | } |
1856 | r = 0; | 1857 | r = 0; |
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index 88b7af20a996..d9d2ed647435 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h | |||
@@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new) | |||
148 | static inline int atomic_sub_and_test(int i, atomic_t *v) | 148 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
149 | { | 149 | { |
150 | char c; | 150 | char c; |
151 | __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); | 151 | __asm__ __volatile__("subl %2,%1; seq %0" |
152 | : "=d" (c), "+m" (*v) | ||
153 | : "id" (i)); | ||
152 | return c != 0; | 154 | return c != 0; |
153 | } | 155 | } |
154 | 156 | ||
155 | static inline int atomic_add_negative(int i, atomic_t *v) | 157 | static inline int atomic_add_negative(int i, atomic_t *v) |
156 | { | 158 | { |
157 | char c; | 159 | char c; |
158 | __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); | 160 | __asm__ __volatile__("addl %2,%1; smi %0" |
161 | : "=d" (c), "+m" (*v) | ||
162 | : "id" (i)); | ||
159 | return c != 0; | 163 | return c != 0; |
160 | } | 164 | } |
161 | 165 | ||
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h index ef2293873612..01a8716c5fc5 100644 --- a/arch/m68k/include/asm/mcfuart.h +++ b/arch/m68k/include/asm/mcfuart.h | |||
@@ -212,5 +212,10 @@ struct mcf_platform_uart { | |||
212 | #define MCFUART_URF_RXS 0xc0 /* Receiver status */ | 212 | #define MCFUART_URF_RXS 0xc0 /* Receiver status */ |
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | #if defined(CONFIG_M5272) | ||
216 | #define MCFUART_TXFIFOSIZE 25 | ||
217 | #else | ||
218 | #define MCFUART_TXFIFOSIZE 1 | ||
219 | #endif | ||
215 | /****************************************************************************/ | 220 | /****************************************************************************/ |
216 | #endif /* mcfuart_h */ | 221 | #endif /* mcfuart_h */ |
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h index 1320eaa4cc2a..a29dd74a17cb 100644 --- a/arch/m68k/include/asm/sigcontext.h +++ b/arch/m68k/include/asm/sigcontext.h | |||
@@ -17,13 +17,11 @@ struct sigcontext { | |||
17 | #ifndef __uClinux__ | 17 | #ifndef __uClinux__ |
18 | # ifdef __mcoldfire__ | 18 | # ifdef __mcoldfire__ |
19 | unsigned long sc_fpregs[2][2]; /* room for two fp registers */ | 19 | unsigned long sc_fpregs[2][2]; /* room for two fp registers */ |
20 | unsigned long sc_fpcntl[3]; | ||
21 | unsigned char sc_fpstate[16+6*8]; | ||
22 | # else | 20 | # else |
23 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ | 21 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ |
22 | # endif | ||
24 | unsigned long sc_fpcntl[3]; | 23 | unsigned long sc_fpcntl[3]; |
25 | unsigned char sc_fpstate[216]; | 24 | unsigned char sc_fpstate[216]; |
26 | # endif | ||
27 | #endif | 25 | #endif |
28 | }; | 26 | }; |
29 | 27 | ||
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile index ce404bc9ccbd..14042574ac21 100644 --- a/arch/m68knommu/Makefile +++ b/arch/m68knommu/Makefile | |||
@@ -94,7 +94,7 @@ cflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200) | |||
94 | cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) | 94 | cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) |
95 | cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) | 95 | cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) |
96 | cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) | 96 | cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) |
97 | cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200) | 97 | cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307) |
98 | cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) | 98 | cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) |
99 | cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) | 99 | cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) |
100 | cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) | 100 | cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) |
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S index 56043ade3941..aff6f57ef8b5 100644 --- a/arch/m68knommu/kernel/entry.S +++ b/arch/m68knommu/kernel/entry.S | |||
@@ -145,6 +145,6 @@ ENTRY(ret_from_user_signal) | |||
145 | trap #0 | 145 | trap #0 |
146 | 146 | ||
147 | ENTRY(ret_from_user_rt_signal) | 147 | ENTRY(ret_from_user_rt_signal) |
148 | move #__NR_rt_sigreturn,%d0 | 148 | movel #__NR_rt_sigreturn,%d0 |
149 | trap #0 | 149 | trap #0 |
150 | 150 | ||
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c index 1143f77caca4..6f22970d8c20 100644 --- a/arch/m68knommu/platform/68360/ints.c +++ b/arch/m68knommu/platform/68360/ints.c | |||
@@ -107,7 +107,6 @@ void init_IRQ(void) | |||
107 | _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ | 107 | _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ |
108 | _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ | 108 | _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ |
109 | _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ | 109 | _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ |
110 | _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* reserved */ | ||
111 | _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ | 110 | _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ |
112 | _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ | 111 | _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ |
113 | _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ | 112 | _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 25da07fd9f77..604af29b71ed 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1004 | struct kvm_vcpu *vcpu; | 1004 | struct kvm_vcpu *vcpu; |
1005 | ulong ga, ga_end; | 1005 | ulong ga, ga_end; |
1006 | int is_dirty = 0; | 1006 | int is_dirty = 0; |
1007 | int r, n; | 1007 | int r; |
1008 | unsigned long n; | ||
1008 | 1009 | ||
1009 | mutex_lock(&kvm->slots_lock); | 1010 | mutex_lock(&kvm->slots_lock); |
1010 | 1011 | ||
@@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1022 | kvm_for_each_vcpu(n, vcpu, kvm) | 1023 | kvm_for_each_vcpu(n, vcpu, kvm) |
1023 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | 1024 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); |
1024 | 1025 | ||
1025 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1026 | n = kvm_dirty_bitmap_bytes(memslot); |
1026 | memset(memslot->dirty_bitmap, 0, n); | 1027 | memset(memslot->dirty_bitmap, 0, n); |
1027 | } | 1028 | } |
1028 | 1029 | ||
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 4a76d9480cce..533f35751aeb 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h | |||
@@ -29,6 +29,7 @@ struct vdso_data { | |||
29 | __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ | 29 | __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ |
30 | __u32 tz_dsttime; /* Type of dst correction 0x34 */ | 30 | __u32 tz_dsttime; /* Type of dst correction 0x34 */ |
31 | __u32 ectg_available; | 31 | __u32 ectg_available; |
32 | __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | struct vdso_per_cpu_data { | 35 | struct vdso_per_cpu_data { |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 08db736dded0..a09408952ed0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -61,6 +61,7 @@ int main(void) | |||
61 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); | 61 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); |
62 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); | 62 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); |
63 | DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); | 63 | DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); |
64 | DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); | ||
64 | DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); | 65 | DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); |
65 | DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); | 66 | DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); |
66 | /* constants used by the vdso */ | 67 | /* constants used by the vdso */ |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index b354427e03b7..c56d3f56d020 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -256,6 +256,9 @@ restore_registers: | |||
256 | lghi %r2,0 | 256 | lghi %r2,0 |
257 | brasl %r14,arch_set_page_states | 257 | brasl %r14,arch_set_page_states |
258 | 258 | ||
259 | /* Reinitialize the channel subsystem */ | ||
260 | brasl %r14,channel_subsystem_reinit | ||
261 | |||
259 | /* Return 0 */ | 262 | /* Return 0 */ |
260 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 263 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
261 | lghi %r2,0 | 264 | lghi %r2,0 |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index fba6dec156bf..d906bf19c14a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -221,6 +221,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, | |||
221 | vdso_data->xtime_clock_nsec = wall_time->tv_nsec; | 221 | vdso_data->xtime_clock_nsec = wall_time->tv_nsec; |
222 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | 222 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; |
223 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | 223 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; |
224 | vdso_data->ntp_mult = mult; | ||
224 | smp_wmb(); | 225 | smp_wmb(); |
225 | ++vdso_data->tb_update_count; | 226 | ++vdso_data->tb_update_count; |
226 | } | 227 | } |
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index 4a98909a8310..969643954273 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S | |||
@@ -38,13 +38,13 @@ __kernel_clock_gettime: | |||
38 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | 38 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) |
39 | brc 3,2f | 39 | brc 3,2f |
40 | ahi %r0,-1 | 40 | ahi %r0,-1 |
41 | 2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | 41 | 2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ |
42 | lr %r2,%r0 | 42 | lr %r2,%r0 |
43 | lhi %r0,1000 | 43 | l %r0,__VDSO_NTP_MULT(%r5) |
44 | ltr %r1,%r1 | 44 | ltr %r1,%r1 |
45 | mr %r0,%r0 | 45 | mr %r0,%r0 |
46 | jnm 3f | 46 | jnm 3f |
47 | ahi %r0,1000 | 47 | a %r0,__VDSO_NTP_MULT(%r5) |
48 | 3: alr %r0,%r2 | 48 | 3: alr %r0,%r2 |
49 | srdl %r0,12 | 49 | srdl %r0,12 |
50 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 50 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
@@ -86,13 +86,13 @@ __kernel_clock_gettime: | |||
86 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | 86 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) |
87 | brc 3,12f | 87 | brc 3,12f |
88 | ahi %r0,-1 | 88 | ahi %r0,-1 |
89 | 12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | 89 | 12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ |
90 | lr %r2,%r0 | 90 | lr %r2,%r0 |
91 | lhi %r0,1000 | 91 | l %r0,__VDSO_NTP_MULT(%r5) |
92 | ltr %r1,%r1 | 92 | ltr %r1,%r1 |
93 | mr %r0,%r0 | 93 | mr %r0,%r0 |
94 | jnm 13f | 94 | jnm 13f |
95 | ahi %r0,1000 | 95 | a %r0,__VDSO_NTP_MULT(%r5) |
96 | 13: alr %r0,%r2 | 96 | 13: alr %r0,%r2 |
97 | srdl %r0,12 | 97 | srdl %r0,12 |
98 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 98 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index ad8acfc949fb..2d3633175e3b 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S | |||
@@ -35,13 +35,13 @@ __kernel_gettimeofday: | |||
35 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | 35 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) |
36 | brc 3,3f | 36 | brc 3,3f |
37 | ahi %r0,-1 | 37 | ahi %r0,-1 |
38 | 3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | 38 | 3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ |
39 | st %r0,24(%r15) | 39 | st %r0,24(%r15) |
40 | lhi %r0,1000 | 40 | l %r0,__VDSO_NTP_MULT(%r5) |
41 | ltr %r1,%r1 | 41 | ltr %r1,%r1 |
42 | mr %r0,%r0 | 42 | mr %r0,%r0 |
43 | jnm 4f | 43 | jnm 4f |
44 | ahi %r0,1000 | 44 | a %r0,__VDSO_NTP_MULT(%r5) |
45 | 4: al %r0,24(%r15) | 45 | 4: al %r0,24(%r15) |
46 | srdl %r0,12 | 46 | srdl %r0,12 |
47 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 47 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 49106c6e6f88..f40467884a03 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
@@ -36,7 +36,7 @@ __kernel_clock_gettime: | |||
36 | stck 48(%r15) /* Store TOD clock */ | 36 | stck 48(%r15) /* Store TOD clock */ |
37 | lg %r1,48(%r15) | 37 | lg %r1,48(%r15) |
38 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | 38 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ |
39 | mghi %r1,1000 | 39 | msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ |
40 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | 40 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ |
41 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 41 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
42 | lg %r0,__VDSO_XTIME_SEC(%r5) | 42 | lg %r0,__VDSO_XTIME_SEC(%r5) |
@@ -64,7 +64,7 @@ __kernel_clock_gettime: | |||
64 | stck 48(%r15) /* Store TOD clock */ | 64 | stck 48(%r15) /* Store TOD clock */ |
65 | lg %r1,48(%r15) | 65 | lg %r1,48(%r15) |
66 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | 66 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ |
67 | mghi %r1,1000 | 67 | msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ |
68 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | 68 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ |
69 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 69 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
70 | lg %r0,__VDSO_XTIME_SEC(%r5) | 70 | lg %r0,__VDSO_XTIME_SEC(%r5) |
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S index f873e75634e1..36ee674722ec 100644 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ b/arch/s390/kernel/vdso64/gettimeofday.S | |||
@@ -31,7 +31,7 @@ __kernel_gettimeofday: | |||
31 | stck 48(%r15) /* Store TOD clock */ | 31 | stck 48(%r15) /* Store TOD clock */ |
32 | lg %r1,48(%r15) | 32 | lg %r1,48(%r15) |
33 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | 33 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ |
34 | mghi %r1,1000 | 34 | msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ |
35 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | 35 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ |
36 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ | 36 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ |
37 | lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ | 37 | lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6db513674050..9908d477ccd9 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -37,6 +37,9 @@ config SPARC64 | |||
37 | def_bool 64BIT | 37 | def_bool 64BIT |
38 | select ARCH_SUPPORTS_MSI | 38 | select ARCH_SUPPORTS_MSI |
39 | select HAVE_FUNCTION_TRACER | 39 | select HAVE_FUNCTION_TRACER |
40 | select HAVE_FUNCTION_GRAPH_TRACER | ||
41 | select HAVE_FUNCTION_GRAPH_FP_TEST | ||
42 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
40 | select HAVE_KRETPROBES | 43 | select HAVE_KRETPROBES |
41 | select HAVE_KPROBES | 44 | select HAVE_KPROBES |
42 | select HAVE_LMB | 45 | select HAVE_LMB |
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 9d3c889718ac..1b4a831565f9 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug | |||
@@ -19,13 +19,10 @@ config DEBUG_DCFLUSH | |||
19 | bool "D-cache flush debugging" | 19 | bool "D-cache flush debugging" |
20 | depends on SPARC64 && DEBUG_KERNEL | 20 | depends on SPARC64 && DEBUG_KERNEL |
21 | 21 | ||
22 | config STACK_DEBUG | ||
23 | bool "Stack Overflow Detection Support" | ||
24 | |||
25 | config MCOUNT | 22 | config MCOUNT |
26 | bool | 23 | bool |
27 | depends on SPARC64 | 24 | depends on SPARC64 |
28 | depends on STACK_DEBUG || FUNCTION_TRACER | 25 | depends on FUNCTION_TRACER |
29 | default y | 26 | default y |
30 | 27 | ||
31 | config FRAME_POINTER | 28 | config FRAME_POINTER |
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index 926397d345ff..050ef35b9dcf 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
@@ -17,7 +17,7 @@ typedef struct { | |||
17 | unsigned int __nmi_count; | 17 | unsigned int __nmi_count; |
18 | unsigned long clock_tick; /* %tick's per second */ | 18 | unsigned long clock_tick; /* %tick's per second */ |
19 | unsigned long __pad; | 19 | unsigned long __pad; |
20 | unsigned int __pad1; | 20 | unsigned int irq0_irqs; |
21 | unsigned int __pad2; | 21 | unsigned int __pad2; |
22 | 22 | ||
23 | /* Dcache line 2, rarely used */ | 23 | /* Dcache line 2, rarely used */ |
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index 8b49bf920df3..bfa1ea45b4cd 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h | |||
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void) | |||
76 | */ | 76 | */ |
77 | static inline unsigned long __raw_local_irq_save(void) | 77 | static inline unsigned long __raw_local_irq_save(void) |
78 | { | 78 | { |
79 | unsigned long flags = __raw_local_save_flags(); | 79 | unsigned long flags, tmp; |
80 | 80 | ||
81 | raw_local_irq_disable(); | 81 | /* Disable interrupts to PIL_NORMAL_MAX unless we already |
82 | * are using PIL_NMI, in which case PIL_NMI is retained. | ||
83 | * | ||
84 | * The only values we ever program into the %pil are 0, | ||
85 | * PIL_NORMAL_MAX and PIL_NMI. | ||
86 | * | ||
87 | * Since PIL_NMI is the largest %pil value and all bits are | ||
88 | * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX | ||
89 | * actually is. | ||
90 | */ | ||
91 | __asm__ __volatile__( | ||
92 | "rdpr %%pil, %0\n\t" | ||
93 | "or %0, %2, %1\n\t" | ||
94 | "wrpr %1, 0x0, %%pil" | ||
95 | : "=r" (flags), "=r" (tmp) | ||
96 | : "i" (PIL_NORMAL_MAX) | ||
97 | : "memory" | ||
98 | ); | ||
82 | 99 | ||
83 | return flags; | 100 | return flags; |
84 | } | 101 | } |
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 9e2d9447f2ad..4827a3aeac7f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h | |||
@@ -111,7 +111,7 @@ struct thread_info { | |||
111 | #define THREAD_SHIFT PAGE_SHIFT | 111 | #define THREAD_SHIFT PAGE_SHIFT |
112 | #endif /* PAGE_SHIFT == 13 */ | 112 | #endif /* PAGE_SHIFT == 13 */ |
113 | 113 | ||
114 | #define PREEMPT_ACTIVE 0x4000000 | 114 | #define PREEMPT_ACTIVE 0x10000000 |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * macros/functions for gaining access to the thread information structure | 117 | * macros/functions for gaining access to the thread information structure |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index c6316142db4e..0c2dc1f24a9a 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -13,6 +13,14 @@ extra-y += init_task.o | |||
13 | CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) | 13 | CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) |
14 | extra-y += vmlinux.lds | 14 | extra-y += vmlinux.lds |
15 | 15 | ||
16 | ifdef CONFIG_FUNCTION_TRACER | ||
17 | # Do not profile debug and lowlevel utilities | ||
18 | CFLAGS_REMOVE_ftrace.o := -pg | ||
19 | CFLAGS_REMOVE_time_$(BITS).o := -pg | ||
20 | CFLAGS_REMOVE_perf_event.o := -pg | ||
21 | CFLAGS_REMOVE_pcr.o := -pg | ||
22 | endif | ||
23 | |||
16 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o | 24 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o |
17 | obj-$(CONFIG_SPARC32) += etrap_32.o | 25 | obj-$(CONFIG_SPARC32) += etrap_32.o |
18 | obj-$(CONFIG_SPARC32) += rtrap_32.o | 26 | obj-$(CONFIG_SPARC32) += rtrap_32.o |
@@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o | |||
85 | 93 | ||
86 | 94 | ||
87 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 95 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
88 | CFLAGS_REMOVE_ftrace.o := -pg | 96 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
89 | 97 | ||
90 | obj-$(CONFIG_EARLYFB) += btext.o | 98 | obj-$(CONFIG_EARLYFB) += btext.o |
91 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 99 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 9103a56b39e8..03ab022e51c5 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c | |||
@@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000; | |||
13 | 13 | ||
14 | static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) | 14 | static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) |
15 | { | 15 | { |
16 | static u32 call; | 16 | u32 call; |
17 | s32 off; | 17 | s32 off; |
18 | 18 | ||
19 | off = ((s32)addr - (s32)ip); | 19 | off = ((s32)addr - (s32)ip); |
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data) | |||
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | #endif | 93 | #endif |
94 | |||
95 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
96 | |||
97 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
98 | extern void ftrace_graph_call(void); | ||
99 | |||
100 | int ftrace_enable_ftrace_graph_caller(void) | ||
101 | { | ||
102 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
103 | u32 old, new; | ||
104 | |||
105 | old = *(u32 *) &ftrace_graph_call; | ||
106 | new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); | ||
107 | return ftrace_modify_code(ip, old, new); | ||
108 | } | ||
109 | |||
110 | int ftrace_disable_ftrace_graph_caller(void) | ||
111 | { | ||
112 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
113 | u32 old, new; | ||
114 | |||
115 | old = *(u32 *) &ftrace_graph_call; | ||
116 | new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); | ||
117 | |||
118 | return ftrace_modify_code(ip, old, new); | ||
119 | } | ||
120 | |||
121 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
122 | |||
123 | /* | ||
124 | * Hook the return address and push it in the stack of return addrs | ||
125 | * in current thread info. | ||
126 | */ | ||
127 | unsigned long prepare_ftrace_return(unsigned long parent, | ||
128 | unsigned long self_addr, | ||
129 | unsigned long frame_pointer) | ||
130 | { | ||
131 | unsigned long return_hooker = (unsigned long) &return_to_handler; | ||
132 | struct ftrace_graph_ent trace; | ||
133 | |||
134 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
135 | return parent + 8UL; | ||
136 | |||
137 | if (ftrace_push_return_trace(parent, self_addr, &trace.depth, | ||
138 | frame_pointer) == -EBUSY) | ||
139 | return parent + 8UL; | ||
140 | |||
141 | trace.func = self_addr; | ||
142 | |||
143 | /* Only trace if the calling function expects to */ | ||
144 | if (!ftrace_graph_entry(&trace)) { | ||
145 | current->curr_ret_stack--; | ||
146 | return parent + 8UL; | ||
147 | } | ||
148 | |||
149 | return return_hooker; | ||
150 | } | ||
151 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e1cbdb94d97b..830d70a3e20b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -20,7 +20,9 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/ftrace.h> | ||
23 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/kmemleak.h> | ||
24 | 26 | ||
25 | #include <asm/ptrace.h> | 27 | #include <asm/ptrace.h> |
26 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
@@ -45,6 +47,7 @@ | |||
45 | 47 | ||
46 | #include "entry.h" | 48 | #include "entry.h" |
47 | #include "cpumap.h" | 49 | #include "cpumap.h" |
50 | #include "kstack.h" | ||
48 | 51 | ||
49 | #define NUM_IVECS (IMAP_INR + 1) | 52 | #define NUM_IVECS (IMAP_INR + 1) |
50 | 53 | ||
@@ -647,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
647 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); | 650 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); |
648 | if (unlikely(!bucket)) | 651 | if (unlikely(!bucket)) |
649 | return 0; | 652 | return 0; |
653 | |||
654 | /* The only reference we store to the IRQ bucket is | ||
655 | * by physical address which kmemleak can't see, tell | ||
656 | * it that this object explicitly is not a leak and | ||
657 | * should be scanned. | ||
658 | */ | ||
659 | kmemleak_not_leak(bucket); | ||
660 | |||
650 | __flush_dcache_range((unsigned long) bucket, | 661 | __flush_dcache_range((unsigned long) bucket, |
651 | ((unsigned long) bucket + | 662 | ((unsigned long) bucket + |
652 | sizeof(struct ino_bucket))); | 663 | sizeof(struct ino_bucket))); |
@@ -703,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq) | |||
703 | void *hardirq_stack[NR_CPUS]; | 714 | void *hardirq_stack[NR_CPUS]; |
704 | void *softirq_stack[NR_CPUS]; | 715 | void *softirq_stack[NR_CPUS]; |
705 | 716 | ||
706 | static __attribute__((always_inline)) void *set_hardirq_stack(void) | 717 | void __irq_entry handler_irq(int irq, struct pt_regs *regs) |
707 | { | ||
708 | void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; | ||
709 | |||
710 | __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); | ||
711 | if (orig_sp < sp || | ||
712 | orig_sp > (sp + THREAD_SIZE)) { | ||
713 | sp += THREAD_SIZE - 192 - STACK_BIAS; | ||
714 | __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); | ||
715 | } | ||
716 | |||
717 | return orig_sp; | ||
718 | } | ||
719 | static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) | ||
720 | { | ||
721 | __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); | ||
722 | } | ||
723 | |||
724 | void handler_irq(int irq, struct pt_regs *regs) | ||
725 | { | 718 | { |
726 | unsigned long pstate, bucket_pa; | 719 | unsigned long pstate, bucket_pa; |
727 | struct pt_regs *old_regs; | 720 | struct pt_regs *old_regs; |
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index f5a0fd490b59..0a2bd0f99fc1 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <linux/kgdb.h> | 6 | #include <linux/kgdb.h> |
7 | #include <linux/kdebug.h> | 7 | #include <linux/kdebug.h> |
8 | #include <linux/ftrace.h> | ||
8 | 9 | ||
9 | #include <asm/kdebug.h> | 10 | #include <asm/kdebug.h> |
10 | #include <asm/ptrace.h> | 11 | #include <asm/ptrace.h> |
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
108 | } | 109 | } |
109 | 110 | ||
110 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
111 | void smp_kgdb_capture_client(int irq, struct pt_regs *regs) | 112 | void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) |
112 | { | 113 | { |
113 | unsigned long flags; | 114 | unsigned long flags; |
114 | 115 | ||
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h index 5247283d1c03..53dfb92e09fb 100644 --- a/arch/sparc/kernel/kstack.h +++ b/arch/sparc/kernel/kstack.h | |||
@@ -61,4 +61,23 @@ check_magic: | |||
61 | 61 | ||
62 | } | 62 | } |
63 | 63 | ||
64 | static inline __attribute__((always_inline)) void *set_hardirq_stack(void) | ||
65 | { | ||
66 | void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; | ||
67 | |||
68 | __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); | ||
69 | if (orig_sp < sp || | ||
70 | orig_sp > (sp + THREAD_SIZE)) { | ||
71 | sp += THREAD_SIZE - 192 - STACK_BIAS; | ||
72 | __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); | ||
73 | } | ||
74 | |||
75 | return orig_sp; | ||
76 | } | ||
77 | |||
78 | static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) | ||
79 | { | ||
80 | __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); | ||
81 | } | ||
82 | |||
64 | #endif /* _KSTACK_H */ | 83 | #endif /* _KSTACK_H */ |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index b287b62c7ea3..a4bd7ba74c89 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <asm/ptrace.h> | 23 | #include <asm/ptrace.h> |
24 | #include <asm/pcr.h> | 24 | #include <asm/pcr.h> |
25 | 25 | ||
26 | #include "kstack.h" | ||
27 | |||
26 | /* We don't have a real NMI on sparc64, but we can fake one | 28 | /* We don't have a real NMI on sparc64, but we can fake one |
27 | * up using profiling counter overflow interrupts and interrupt | 29 | * up using profiling counter overflow interrupts and interrupt |
28 | * levels. | 30 | * levels. |
@@ -92,7 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) | |||
92 | notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | 94 | notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) |
93 | { | 95 | { |
94 | unsigned int sum, touched = 0; | 96 | unsigned int sum, touched = 0; |
95 | int cpu = smp_processor_id(); | 97 | void *orig_sp; |
96 | 98 | ||
97 | clear_softint(1 << irq); | 99 | clear_softint(1 << irq); |
98 | 100 | ||
@@ -100,13 +102,15 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
100 | 102 | ||
101 | nmi_enter(); | 103 | nmi_enter(); |
102 | 104 | ||
105 | orig_sp = set_hardirq_stack(); | ||
106 | |||
103 | if (notify_die(DIE_NMI, "nmi", regs, 0, | 107 | if (notify_die(DIE_NMI, "nmi", regs, 0, |
104 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) | 108 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) |
105 | touched = 1; | 109 | touched = 1; |
106 | else | 110 | else |
107 | pcr_ops->write(PCR_PIC_PRIV); | 111 | pcr_ops->write(PCR_PIC_PRIV); |
108 | 112 | ||
109 | sum = kstat_irqs_cpu(0, cpu); | 113 | sum = local_cpu_data().irq0_irqs; |
110 | if (__get_cpu_var(nmi_touch)) { | 114 | if (__get_cpu_var(nmi_touch)) { |
111 | __get_cpu_var(nmi_touch) = 0; | 115 | __get_cpu_var(nmi_touch) = 0; |
112 | touched = 1; | 116 | touched = 1; |
@@ -125,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
125 | pcr_ops->write(pcr_enable); | 129 | pcr_ops->write(pcr_enable); |
126 | } | 130 | } |
127 | 131 | ||
132 | restore_hardirq_stack(orig_sp); | ||
133 | |||
128 | nmi_exit(); | 134 | nmi_exit(); |
129 | } | 135 | } |
130 | 136 | ||
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index b775658a927d..8a000583b5cf 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c | |||
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm) | |||
371 | struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); | 371 | struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); |
372 | 372 | ||
373 | if (!rp) { | 373 | if (!rp) { |
374 | prom_printf("Cannot allocate IOMMU resource.\n"); | 374 | pr_info("%s: Cannot allocate IOMMU resource.\n", |
375 | prom_halt(); | 375 | pbm->name); |
376 | return; | ||
376 | } | 377 | } |
377 | rp->name = "IOMMU"; | 378 | rp->name = "IOMMU"; |
378 | rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; | 379 | rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; |
379 | rp->end = rp->start + (unsigned long) vdma[1] - 1UL; | 380 | rp->end = rp->start + (unsigned long) vdma[1] - 1UL; |
380 | rp->flags = IORESOURCE_BUSY; | 381 | rp->flags = IORESOURCE_BUSY; |
381 | request_resource(&pbm->mem_space, rp); | 382 | if (request_resource(&pbm->mem_space, rp)) { |
383 | pr_info("%s: Unable to request IOMMU resource.\n", | ||
384 | pbm->name); | ||
385 | kfree(rp); | ||
386 | } | ||
382 | } | 387 | } |
383 | } | 388 | } |
384 | 389 | ||
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 2d94e7a03af5..c4a6a50b4849 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
9 | 9 | ||
10 | #include <linux/perf_event.h> | 10 | #include <linux/perf_event.h> |
11 | #include <linux/ftrace.h> | ||
11 | 12 | ||
12 | #include <asm/pil.h> | 13 | #include <asm/pil.h> |
13 | #include <asm/pcr.h> | 14 | #include <asm/pcr.h> |
@@ -34,7 +35,7 @@ unsigned int picl_shift; | |||
34 | * Therefore in such situations we defer the work by signalling | 35 | * Therefore in such situations we defer the work by signalling |
35 | * a lower level cpu IRQ. | 36 | * a lower level cpu IRQ. |
36 | */ | 37 | */ |
37 | void deferred_pcr_work_irq(int irq, struct pt_regs *regs) | 38 | void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) |
38 | { | 39 | { |
39 | struct pt_regs *old_regs; | 40 | struct pt_regs *old_regs; |
40 | 41 | ||
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 83f1873c6c13..090b9e9ad5e3 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S | |||
@@ -130,7 +130,17 @@ rtrap_xcall: | |||
130 | nop | 130 | nop |
131 | call trace_hardirqs_on | 131 | call trace_hardirqs_on |
132 | nop | 132 | nop |
133 | wrpr %l4, %pil | 133 | /* Do not actually set the %pil here. We will do that |
134 | * below after we clear PSTATE_IE in the %pstate register. | ||
135 | * If we re-enable interrupts here, we can recurse down | ||
136 | * the hardirq stack potentially endlessly, causing a | ||
137 | * stack overflow. | ||
138 | * | ||
139 | * It is tempting to put this test and trace_hardirqs_on | ||
140 | * call at the 'rt_continue' label, but that will not work | ||
141 | * as that path hits unconditionally and we do not want to | ||
142 | * execute this in NMI return paths, for example. | ||
143 | */ | ||
134 | #endif | 144 | #endif |
135 | rtrap_no_irq_enable: | 145 | rtrap_no_irq_enable: |
136 | andcc %l1, TSTATE_PRIV, %l3 | 146 | andcc %l1, TSTATE_PRIV, %l3 |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 4c5334528109..b6a2b8f47040 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/profile.h> | 22 | #include <linux/profile.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
25 | #include <linux/ftrace.h> | ||
25 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
27 | 28 | ||
@@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu) | |||
823 | &cpumask_of_cpu(cpu)); | 824 | &cpumask_of_cpu(cpu)); |
824 | } | 825 | } |
825 | 826 | ||
826 | void smp_call_function_client(int irq, struct pt_regs *regs) | 827 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) |
827 | { | 828 | { |
828 | clear_softint(1 << irq); | 829 | clear_softint(1 << irq); |
829 | generic_smp_call_function_interrupt(); | 830 | generic_smp_call_function_interrupt(); |
830 | } | 831 | } |
831 | 832 | ||
832 | void smp_call_function_single_client(int irq, struct pt_regs *regs) | 833 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) |
833 | { | 834 | { |
834 | clear_softint(1 << irq); | 835 | clear_softint(1 << irq); |
835 | generic_smp_call_function_single_interrupt(); | 836 | generic_smp_call_function_single_interrupt(); |
@@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
965 | put_cpu(); | 966 | put_cpu(); |
966 | } | 967 | } |
967 | 968 | ||
968 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | 969 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
969 | { | 970 | { |
970 | struct mm_struct *mm; | 971 | struct mm_struct *mm; |
971 | unsigned long flags; | 972 | unsigned long flags; |
@@ -1149,7 +1150,7 @@ void smp_release(void) | |||
1149 | */ | 1150 | */ |
1150 | extern void prom_world(int); | 1151 | extern void prom_world(int); |
1151 | 1152 | ||
1152 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) | 1153 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) |
1153 | { | 1154 | { |
1154 | clear_softint(1 << irq); | 1155 | clear_softint(1 << irq); |
1155 | 1156 | ||
@@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu) | |||
1365 | &cpumask_of_cpu(cpu)); | 1366 | &cpumask_of_cpu(cpu)); |
1366 | } | 1367 | } |
1367 | 1368 | ||
1368 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | 1369 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) |
1369 | { | 1370 | { |
1370 | clear_softint(1 << irq); | 1371 | clear_softint(1 << irq); |
1371 | } | 1372 | } |
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 67e165102885..c7bbe6cf7b85 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/clocksource.h> | 35 | #include <linux/clocksource.h> |
36 | #include <linux/of_device.h> | 36 | #include <linux/of_device.h> |
37 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
38 | #include <linux/ftrace.h> | ||
38 | 39 | ||
39 | #include <asm/oplib.h> | 40 | #include <asm/oplib.h> |
40 | #include <asm/timer.h> | 41 | #include <asm/timer.h> |
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = { | |||
717 | }; | 718 | }; |
718 | static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); | 719 | static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); |
719 | 720 | ||
720 | void timer_interrupt(int irq, struct pt_regs *regs) | 721 | void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) |
721 | { | 722 | { |
722 | struct pt_regs *old_regs = set_irq_regs(regs); | 723 | struct pt_regs *old_regs = set_irq_regs(regs); |
723 | unsigned long tick_mask = tick_ops->softint_mask; | 724 | unsigned long tick_mask = tick_ops->softint_mask; |
@@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs) | |||
728 | 729 | ||
729 | irq_enter(); | 730 | irq_enter(); |
730 | 731 | ||
732 | local_cpu_data().irq0_irqs++; | ||
731 | kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); | 733 | kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); |
732 | 734 | ||
733 | if (unlikely(!evt->event_handler)) { | 735 | if (unlikely(!evt->event_handler)) { |
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 837dfc2390d6..9da57f032983 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c | |||
@@ -2203,27 +2203,6 @@ void dump_stack(void) | |||
2203 | 2203 | ||
2204 | EXPORT_SYMBOL(dump_stack); | 2204 | EXPORT_SYMBOL(dump_stack); |
2205 | 2205 | ||
2206 | static inline int is_kernel_stack(struct task_struct *task, | ||
2207 | struct reg_window *rw) | ||
2208 | { | ||
2209 | unsigned long rw_addr = (unsigned long) rw; | ||
2210 | unsigned long thread_base, thread_end; | ||
2211 | |||
2212 | if (rw_addr < PAGE_OFFSET) { | ||
2213 | if (task != &init_task) | ||
2214 | return 0; | ||
2215 | } | ||
2216 | |||
2217 | thread_base = (unsigned long) task_stack_page(task); | ||
2218 | thread_end = thread_base + sizeof(union thread_union); | ||
2219 | if (rw_addr >= thread_base && | ||
2220 | rw_addr < thread_end && | ||
2221 | !(rw_addr & 0x7UL)) | ||
2222 | return 1; | ||
2223 | |||
2224 | return 0; | ||
2225 | } | ||
2226 | |||
2227 | static inline struct reg_window *kernel_stack_up(struct reg_window *rw) | 2206 | static inline struct reg_window *kernel_stack_up(struct reg_window *rw) |
2228 | { | 2207 | { |
2229 | unsigned long fp = rw->ins[6]; | 2208 | unsigned long fp = rw->ins[6]; |
@@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) | |||
2252 | show_regs(regs); | 2231 | show_regs(regs); |
2253 | add_taint(TAINT_DIE); | 2232 | add_taint(TAINT_DIE); |
2254 | if (regs->tstate & TSTATE_PRIV) { | 2233 | if (regs->tstate & TSTATE_PRIV) { |
2234 | struct thread_info *tp = current_thread_info(); | ||
2255 | struct reg_window *rw = (struct reg_window *) | 2235 | struct reg_window *rw = (struct reg_window *) |
2256 | (regs->u_regs[UREG_FP] + STACK_BIAS); | 2236 | (regs->u_regs[UREG_FP] + STACK_BIAS); |
2257 | 2237 | ||
@@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs) | |||
2259 | * find some badly aligned kernel stack. | 2239 | * find some badly aligned kernel stack. |
2260 | */ | 2240 | */ |
2261 | while (rw && | 2241 | while (rw && |
2262 | count++ < 30&& | 2242 | count++ < 30 && |
2263 | is_kernel_stack(current, rw)) { | 2243 | kstack_valid(tp, (unsigned long) rw)) { |
2264 | printk("Caller[%016lx]: %pS\n", rw->ins[7], | 2244 | printk("Caller[%016lx]: %pS\n", rw->ins[7], |
2265 | (void *) rw->ins[7]); | 2245 | (void *) rw->ins[7]); |
2266 | 2246 | ||
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index ebce43018c49..c752c4c479bd 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c | |||
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn) | |||
50 | } | 50 | } |
51 | 51 | ||
52 | /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ | 52 | /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ |
53 | static inline int decode_access_size(unsigned int insn) | 53 | static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) |
54 | { | 54 | { |
55 | unsigned int tmp; | 55 | unsigned int tmp; |
56 | 56 | ||
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn) | |||
66 | return 2; | 66 | return 2; |
67 | else { | 67 | else { |
68 | printk("Impossible unaligned trap. insn=%08x\n", insn); | 68 | printk("Impossible unaligned trap. insn=%08x\n", insn); |
69 | die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); | 69 | die_if_kernel("Byte sized unaligned access?!?!", regs); |
70 | 70 | ||
71 | /* GCC should never warn that control reaches the end | 71 | /* GCC should never warn that control reaches the end |
72 | * of this function without returning a value because | 72 | * of this function without returning a value because |
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs) | |||
286 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) | 286 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) |
287 | { | 287 | { |
288 | enum direction dir = decode_direction(insn); | 288 | enum direction dir = decode_direction(insn); |
289 | int size = decode_access_size(insn); | 289 | int size = decode_access_size(regs, insn); |
290 | int orig_asi, asi; | 290 | int orig_asi, asi; |
291 | 291 | ||
292 | current_thread_info()->kern_una_regs = regs; | 292 | current_thread_info()->kern_una_regs = regs; |
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 4e5992593967..0c1e6783657f 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
@@ -46,11 +46,16 @@ SECTIONS | |||
46 | SCHED_TEXT | 46 | SCHED_TEXT |
47 | LOCK_TEXT | 47 | LOCK_TEXT |
48 | KPROBES_TEXT | 48 | KPROBES_TEXT |
49 | IRQENTRY_TEXT | ||
49 | *(.gnu.warning) | 50 | *(.gnu.warning) |
50 | } = 0 | 51 | } = 0 |
51 | _etext = .; | 52 | _etext = .; |
52 | 53 | ||
53 | RO_DATA(PAGE_SIZE) | 54 | RO_DATA(PAGE_SIZE) |
55 | |||
56 | /* Start of data section */ | ||
57 | _sdata = .; | ||
58 | |||
54 | .data1 : { | 59 | .data1 : { |
55 | *(.data1) | 60 | *(.data1) |
56 | } | 61 | } |
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 24b8b12deed2..3ad6cbdc2163 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S | |||
@@ -7,26 +7,11 @@ | |||
7 | 7 | ||
8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | 9 | ||
10 | #include <asm/ptrace.h> | ||
11 | #include <asm/thread_info.h> | ||
12 | |||
13 | /* | 10 | /* |
14 | * This is the main variant and is called by C code. GCC's -pg option | 11 | * This is the main variant and is called by C code. GCC's -pg option |
15 | * automatically instruments every C function with a call to this. | 12 | * automatically instruments every C function with a call to this. |
16 | */ | 13 | */ |
17 | 14 | ||
18 | #ifdef CONFIG_STACK_DEBUG | ||
19 | |||
20 | #define OVSTACKSIZE 4096 /* lets hope this is enough */ | ||
21 | |||
22 | .data | ||
23 | .align 8 | ||
24 | panicstring: | ||
25 | .asciz "Stack overflow\n" | ||
26 | .align 8 | ||
27 | ovstack: | ||
28 | .skip OVSTACKSIZE | ||
29 | #endif | ||
30 | .text | 15 | .text |
31 | .align 32 | 16 | .align 32 |
32 | .globl _mcount | 17 | .globl _mcount |
@@ -35,84 +20,48 @@ ovstack: | |||
35 | .type mcount,#function | 20 | .type mcount,#function |
36 | _mcount: | 21 | _mcount: |
37 | mcount: | 22 | mcount: |
38 | #ifdef CONFIG_STACK_DEBUG | ||
39 | /* | ||
40 | * Check whether %sp is dangerously low. | ||
41 | */ | ||
42 | ldub [%g6 + TI_FPDEPTH], %g1 | ||
43 | srl %g1, 1, %g3 | ||
44 | add %g3, 1, %g3 | ||
45 | sllx %g3, 8, %g3 ! each fpregs frame is 256b | ||
46 | add %g3, 192, %g3 | ||
47 | add %g6, %g3, %g3 ! where does task_struct+frame end? | ||
48 | sub %g3, STACK_BIAS, %g3 | ||
49 | cmp %sp, %g3 | ||
50 | bg,pt %xcc, 1f | ||
51 | nop | ||
52 | lduh [%g6 + TI_CPU], %g1 | ||
53 | sethi %hi(hardirq_stack), %g3 | ||
54 | or %g3, %lo(hardirq_stack), %g3 | ||
55 | sllx %g1, 3, %g1 | ||
56 | ldx [%g3 + %g1], %g7 | ||
57 | sub %g7, STACK_BIAS, %g7 | ||
58 | cmp %sp, %g7 | ||
59 | bleu,pt %xcc, 2f | ||
60 | sethi %hi(THREAD_SIZE), %g3 | ||
61 | add %g7, %g3, %g7 | ||
62 | cmp %sp, %g7 | ||
63 | blu,pn %xcc, 1f | ||
64 | 2: sethi %hi(softirq_stack), %g3 | ||
65 | or %g3, %lo(softirq_stack), %g3 | ||
66 | ldx [%g3 + %g1], %g7 | ||
67 | sub %g7, STACK_BIAS, %g7 | ||
68 | cmp %sp, %g7 | ||
69 | bleu,pt %xcc, 3f | ||
70 | sethi %hi(THREAD_SIZE), %g3 | ||
71 | add %g7, %g3, %g7 | ||
72 | cmp %sp, %g7 | ||
73 | blu,pn %xcc, 1f | ||
74 | nop | ||
75 | /* If we are already on ovstack, don't hop onto it | ||
76 | * again, we are already trying to output the stack overflow | ||
77 | * message. | ||
78 | */ | ||
79 | 3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough | ||
80 | or %g7, %lo(ovstack), %g7 | ||
81 | add %g7, OVSTACKSIZE, %g3 | ||
82 | sub %g3, STACK_BIAS + 192, %g3 | ||
83 | sub %g7, STACK_BIAS, %g7 | ||
84 | cmp %sp, %g7 | ||
85 | blu,pn %xcc, 2f | ||
86 | cmp %sp, %g3 | ||
87 | bleu,pn %xcc, 1f | ||
88 | nop | ||
89 | 2: mov %g3, %sp | ||
90 | sethi %hi(panicstring), %g3 | ||
91 | call prom_printf | ||
92 | or %g3, %lo(panicstring), %o0 | ||
93 | call prom_halt | ||
94 | nop | ||
95 | 1: | ||
96 | #endif | ||
97 | #ifdef CONFIG_FUNCTION_TRACER | 23 | #ifdef CONFIG_FUNCTION_TRACER |
98 | #ifdef CONFIG_DYNAMIC_FTRACE | 24 | #ifdef CONFIG_DYNAMIC_FTRACE |
99 | mov %o7, %o0 | 25 | /* Do nothing, the retl/nop below is all we need. */ |
100 | .globl mcount_call | ||
101 | mcount_call: | ||
102 | call ftrace_stub | ||
103 | mov %o0, %o7 | ||
104 | #else | 26 | #else |
105 | sethi %hi(ftrace_trace_function), %g1 | 27 | sethi %hi(function_trace_stop), %g1 |
28 | lduw [%g1 + %lo(function_trace_stop)], %g2 | ||
29 | brnz,pn %g2, 2f | ||
30 | sethi %hi(ftrace_trace_function), %g1 | ||
106 | sethi %hi(ftrace_stub), %g2 | 31 | sethi %hi(ftrace_stub), %g2 |
107 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 | 32 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 |
108 | or %g2, %lo(ftrace_stub), %g2 | 33 | or %g2, %lo(ftrace_stub), %g2 |
109 | cmp %g1, %g2 | 34 | cmp %g1, %g2 |
110 | be,pn %icc, 1f | 35 | be,pn %icc, 1f |
111 | mov %i7, %o1 | 36 | mov %i7, %g3 |
112 | jmpl %g1, %g0 | 37 | save %sp, -176, %sp |
113 | mov %o7, %o0 | 38 | mov %g3, %o1 |
39 | jmpl %g1, %o7 | ||
40 | mov %i7, %o0 | ||
41 | ret | ||
42 | restore | ||
114 | /* not reached */ | 43 | /* not reached */ |
115 | 1: | 44 | 1: |
45 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
46 | sethi %hi(ftrace_graph_return), %g1 | ||
47 | ldx [%g1 + %lo(ftrace_graph_return)], %g3 | ||
48 | cmp %g2, %g3 | ||
49 | bne,pn %xcc, 5f | ||
50 | sethi %hi(ftrace_graph_entry_stub), %g2 | ||
51 | sethi %hi(ftrace_graph_entry), %g1 | ||
52 | or %g2, %lo(ftrace_graph_entry_stub), %g2 | ||
53 | ldx [%g1 + %lo(ftrace_graph_entry)], %g1 | ||
54 | cmp %g1, %g2 | ||
55 | be,pt %xcc, 2f | ||
56 | nop | ||
57 | 5: mov %i7, %g2 | ||
58 | mov %fp, %g3 | ||
59 | save %sp, -176, %sp | ||
60 | mov %g2, %l0 | ||
61 | ba,pt %xcc, ftrace_graph_caller | ||
62 | mov %g3, %l1 | ||
63 | #endif | ||
64 | 2: | ||
116 | #endif | 65 | #endif |
117 | #endif | 66 | #endif |
118 | retl | 67 | retl |
@@ -131,14 +80,50 @@ ftrace_stub: | |||
131 | .globl ftrace_caller | 80 | .globl ftrace_caller |
132 | .type ftrace_caller,#function | 81 | .type ftrace_caller,#function |
133 | ftrace_caller: | 82 | ftrace_caller: |
134 | mov %i7, %o1 | 83 | sethi %hi(function_trace_stop), %g1 |
135 | mov %o7, %o0 | 84 | mov %i7, %g2 |
85 | lduw [%g1 + %lo(function_trace_stop)], %g1 | ||
86 | brnz,pn %g1, ftrace_stub | ||
87 | mov %fp, %g3 | ||
88 | save %sp, -176, %sp | ||
89 | mov %g2, %o1 | ||
90 | mov %g2, %l0 | ||
91 | mov %g3, %l1 | ||
136 | .globl ftrace_call | 92 | .globl ftrace_call |
137 | ftrace_call: | 93 | ftrace_call: |
138 | call ftrace_stub | 94 | call ftrace_stub |
139 | mov %o0, %o7 | 95 | mov %i7, %o0 |
140 | retl | 96 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
97 | .globl ftrace_graph_call | ||
98 | ftrace_graph_call: | ||
99 | call ftrace_stub | ||
141 | nop | 100 | nop |
101 | #endif | ||
102 | ret | ||
103 | restore | ||
104 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
105 | .size ftrace_graph_call,.-ftrace_graph_call | ||
106 | #endif | ||
107 | .size ftrace_call,.-ftrace_call | ||
142 | .size ftrace_caller,.-ftrace_caller | 108 | .size ftrace_caller,.-ftrace_caller |
143 | #endif | 109 | #endif |
144 | #endif | 110 | #endif |
111 | |||
112 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
113 | ENTRY(ftrace_graph_caller) | ||
114 | mov %l0, %o0 | ||
115 | mov %i7, %o1 | ||
116 | call prepare_ftrace_return | ||
117 | mov %l1, %o2 | ||
118 | ret | ||
119 | restore %o0, -8, %i7 | ||
120 | END(ftrace_graph_caller) | ||
121 | |||
122 | ENTRY(return_to_handler) | ||
123 | save %sp, -176, %sp | ||
124 | call ftrace_return_to_handler | ||
125 | mov %fp, %o0 | ||
126 | jmpl %o0 + 8, %g0 | ||
127 | restore | ||
128 | END(return_to_handler) | ||
129 | #endif | ||
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 64cda95f59ca..7a656bd8bd3c 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include "linux/irqreturn.h" | 6 | #include "linux/irqreturn.h" |
7 | #include "linux/kd.h" | 7 | #include "linux/kd.h" |
8 | #include "linux/sched.h" | 8 | #include "linux/sched.h" |
9 | #include "linux/slab.h" | ||
9 | #include "chan_kern.h" | 10 | #include "chan_kern.h" |
10 | #include "irq_kern.h" | 11 | #include "irq_kern.h" |
11 | #include "irq_user.h" | 12 | #include "irq_user.h" |
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c index 06d6ccf0e444..b6b1096152aa 100644 --- a/arch/um/os-Linux/helper.c +++ b/arch/um/os-Linux/helper.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <errno.h> | 8 | #include <errno.h> |
9 | #include <sched.h> | 9 | #include <sched.h> |
10 | #include <linux/limits.h> | 10 | #include <linux/limits.h> |
11 | #include <linux/slab.h> | ||
12 | #include <sys/socket.h> | 11 | #include <sys/socket.h> |
13 | #include <sys/wait.h> | 12 | #include <sys/wait.h> |
14 | #include "kern_constants.h" | 13 | #include "kern_constants.h" |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 59b4556a5b92..e790bc1fbfa3 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -626,7 +626,7 @@ ia32_sys_call_table: | |||
626 | .quad stub32_sigreturn | 626 | .quad stub32_sigreturn |
627 | .quad stub32_clone /* 120 */ | 627 | .quad stub32_clone /* 120 */ |
628 | .quad sys_setdomainname | 628 | .quad sys_setdomainname |
629 | .quad sys_uname | 629 | .quad sys_newuname |
630 | .quad sys_modify_ldt | 630 | .quad sys_modify_ldt |
631 | .quad compat_sys_adjtimex | 631 | .quad compat_sys_adjtimex |
632 | .quad sys32_mprotect /* 125 */ | 632 | .quad sys32_mprotect /* 125 */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ba19ad4c47d0..86a0ff0aeac7 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define _ASM_X86_AMD_IOMMU_TYPES_H | 21 | #define _ASM_X86_AMD_IOMMU_TYPES_H |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/mutex.h> | ||
24 | #include <linux/list.h> | 25 | #include <linux/list.h> |
25 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
26 | 27 | ||
@@ -140,6 +141,7 @@ | |||
140 | 141 | ||
141 | /* constants to configure the command buffer */ | 142 | /* constants to configure the command buffer */ |
142 | #define CMD_BUFFER_SIZE 8192 | 143 | #define CMD_BUFFER_SIZE 8192 |
144 | #define CMD_BUFFER_UNINITIALIZED 1 | ||
143 | #define CMD_BUFFER_ENTRIES 512 | 145 | #define CMD_BUFFER_ENTRIES 512 |
144 | #define MMIO_CMD_SIZE_SHIFT 56 | 146 | #define MMIO_CMD_SIZE_SHIFT 56 |
145 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) | 147 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) |
@@ -237,6 +239,7 @@ struct protection_domain { | |||
237 | struct list_head list; /* for list of all protection domains */ | 239 | struct list_head list; /* for list of all protection domains */ |
238 | struct list_head dev_list; /* List of all devices in this domain */ | 240 | struct list_head dev_list; /* List of all devices in this domain */ |
239 | spinlock_t lock; /* mostly used to lock the page table*/ | 241 | spinlock_t lock; /* mostly used to lock the page table*/ |
242 | struct mutex api_lock; /* protect page tables in the iommu-api path */ | ||
240 | u16 id; /* the domain id written to the device table */ | 243 | u16 id; /* the domain id written to the device table */ |
241 | int mode; /* paging mode (0-6 levels) */ | 244 | int mode; /* paging mode (0-6 levels) */ |
242 | u64 *pt_root; /* page table root pointer */ | 245 | u64 *pt_root; /* page table root pointer */ |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index ba0eed8aa1a6..b60f2924c413 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -28,22 +28,39 @@ | |||
28 | 28 | ||
29 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/kvm_para.h> | ||
32 | 31 | ||
33 | /*G:030 | 32 | /*G:030 |
34 | * But first, how does our Guest contact the Host to ask for privileged | 33 | * But first, how does our Guest contact the Host to ask for privileged |
35 | * operations? There are two ways: the direct way is to make a "hypercall", | 34 | * operations? There are two ways: the direct way is to make a "hypercall", |
36 | * to make requests of the Host Itself. | 35 | * to make requests of the Host Itself. |
37 | * | 36 | * |
38 | * We use the KVM hypercall mechanism, though completely different hypercall | 37 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and |
39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in | 38 | * above are used by real hardware interrupts). Seventeen hypercalls are |
40 | * the %eax register, and the arguments (when required) are placed in %ebx, | 39 | * available: the hypercall number is put in the %eax register, and the |
41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. | 40 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. |
41 | * If a return value makes sense, it's returned in %eax. | ||
42 | * | 42 | * |
43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
44 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
45 | * definition of a gentleman: "someone who is only rude intentionally". | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
46 | :*/ | 46 | */ |
47 | static inline unsigned long | ||
48 | hcall(unsigned long call, | ||
49 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
50 | unsigned long arg4) | ||
51 | { | ||
52 | /* "int" is the Intel instruction to trigger a trap. */ | ||
53 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | ||
54 | /* The call in %eax (aka "a") might be overwritten */ | ||
55 | : "=a"(call) | ||
56 | /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */ | ||
57 | : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4) | ||
58 | /* "memory" means this might write somewhere in memory. | ||
59 | * This isn't true for all calls, but it's safe to tell | ||
60 | * gcc that it might happen so it doesn't get clever. */ | ||
61 | : "memory"); | ||
62 | return call; | ||
63 | } | ||
47 | 64 | ||
48 | /* Can't use our min() macro here: needs to be a constant */ | 65 | /* Can't use our min() macro here: needs to be a constant */ |
49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 66 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f3dadb571d9b..f854d89b7edf 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev) | |||
118 | return false; | 118 | return false; |
119 | 119 | ||
120 | /* No device or no PCI device */ | 120 | /* No device or no PCI device */ |
121 | if (!dev || dev->bus != &pci_bus_type) | 121 | if (dev->bus != &pci_bus_type) |
122 | return false; | 122 | return false; |
123 | 123 | ||
124 | devid = get_device_id(dev); | 124 | devid = get_device_id(dev); |
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
392 | u32 tail, head; | 392 | u32 tail, head; |
393 | u8 *target; | 393 | u8 *target; |
394 | 394 | ||
395 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
395 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 396 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
396 | target = iommu->cmd_buf + tail; | 397 | target = iommu->cmd_buf + tail; |
397 | memcpy_toio(target, cmd, sizeof(*cmd)); | 398 | memcpy_toio(target, cmd, sizeof(*cmd)); |
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) | |||
2186 | struct dma_ops_domain *dma_dom; | 2187 | struct dma_ops_domain *dma_dom; |
2187 | u16 devid; | 2188 | u16 devid; |
2188 | 2189 | ||
2189 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2190 | for_each_pci_dev(dev) { |
2190 | 2191 | ||
2191 | /* Do we handle this device? */ | 2192 | /* Do we handle this device? */ |
2192 | if (!check_device(&dev->dev)) | 2193 | if (!check_device(&dev->dev)) |
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
2298 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 2299 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
2299 | struct device *dev = dev_data->dev; | 2300 | struct device *dev = dev_data->dev; |
2300 | 2301 | ||
2301 | do_detach(dev); | 2302 | __detach_device(dev); |
2302 | atomic_set(&dev_data->bind, 0); | 2303 | atomic_set(&dev_data->bind, 0); |
2303 | } | 2304 | } |
2304 | 2305 | ||
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) | |||
2327 | return NULL; | 2328 | return NULL; |
2328 | 2329 | ||
2329 | spin_lock_init(&domain->lock); | 2330 | spin_lock_init(&domain->lock); |
2331 | mutex_init(&domain->api_lock); | ||
2330 | domain->id = domain_id_alloc(); | 2332 | domain->id = domain_id_alloc(); |
2331 | if (!domain->id) | 2333 | if (!domain->id) |
2332 | goto out_err; | 2334 | goto out_err; |
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
2379 | 2381 | ||
2380 | free_pagetable(domain); | 2382 | free_pagetable(domain); |
2381 | 2383 | ||
2382 | domain_id_free(domain->id); | 2384 | protection_domain_free(domain); |
2383 | |||
2384 | kfree(domain); | ||
2385 | 2385 | ||
2386 | dom->priv = NULL; | 2386 | dom->priv = NULL; |
2387 | } | 2387 | } |
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2456 | iova &= PAGE_MASK; | 2456 | iova &= PAGE_MASK; |
2457 | paddr &= PAGE_MASK; | 2457 | paddr &= PAGE_MASK; |
2458 | 2458 | ||
2459 | mutex_lock(&domain->api_lock); | ||
2460 | |||
2459 | for (i = 0; i < npages; ++i) { | 2461 | for (i = 0; i < npages; ++i) { |
2460 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); | 2462 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2461 | if (ret) | 2463 | if (ret) |
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2465 | paddr += PAGE_SIZE; | 2467 | paddr += PAGE_SIZE; |
2466 | } | 2468 | } |
2467 | 2469 | ||
2470 | mutex_unlock(&domain->api_lock); | ||
2471 | |||
2468 | return 0; | 2472 | return 0; |
2469 | } | 2473 | } |
2470 | 2474 | ||
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2477 | 2481 | ||
2478 | iova &= PAGE_MASK; | 2482 | iova &= PAGE_MASK; |
2479 | 2483 | ||
2484 | mutex_lock(&domain->api_lock); | ||
2485 | |||
2480 | for (i = 0; i < npages; ++i) { | 2486 | for (i = 0; i < npages; ++i) { |
2481 | iommu_unmap_page(domain, iova, PM_MAP_4k); | 2487 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
2482 | iova += PAGE_SIZE; | 2488 | iova += PAGE_SIZE; |
2483 | } | 2489 | } |
2484 | 2490 | ||
2485 | iommu_flush_tlb_pde(domain); | 2491 | iommu_flush_tlb_pde(domain); |
2492 | |||
2493 | mutex_unlock(&domain->api_lock); | ||
2486 | } | 2494 | } |
2487 | 2495 | ||
2488 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2496 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 42f5350b908f..6360abf993d4 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -138,9 +138,9 @@ int amd_iommus_present; | |||
138 | bool amd_iommu_np_cache __read_mostly; | 138 | bool amd_iommu_np_cache __read_mostly; |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Set to true if ACPI table parsing and hardware intialization went properly | 141 | * The ACPI table parsing functions set this variable on an error |
142 | */ | 142 | */ |
143 | static bool amd_iommu_initialized; | 143 | static int __initdata amd_iommu_init_err; |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * List of protection domains - used during resume | 146 | * List of protection domains - used during resume |
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) | |||
391 | */ | 391 | */ |
392 | for (i = 0; i < table->length; ++i) | 392 | for (i = 0; i < table->length; ++i) |
393 | checksum += p[i]; | 393 | checksum += p[i]; |
394 | if (checksum != 0) | 394 | if (checksum != 0) { |
395 | /* ACPI table corrupt */ | 395 | /* ACPI table corrupt */ |
396 | return -ENODEV; | 396 | amd_iommu_init_err = -ENODEV; |
397 | return 0; | ||
398 | } | ||
397 | 399 | ||
398 | p += IVRS_HEADER_LENGTH; | 400 | p += IVRS_HEADER_LENGTH; |
399 | 401 | ||
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
436 | if (cmd_buf == NULL) | 438 | if (cmd_buf == NULL) |
437 | return NULL; | 439 | return NULL; |
438 | 440 | ||
439 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; | 441 | iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; |
440 | 442 | ||
441 | return cmd_buf; | 443 | return cmd_buf; |
442 | } | 444 | } |
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
472 | &entry, sizeof(entry)); | 474 | &entry, sizeof(entry)); |
473 | 475 | ||
474 | amd_iommu_reset_cmd_buffer(iommu); | 476 | amd_iommu_reset_cmd_buffer(iommu); |
477 | iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); | ||
475 | } | 478 | } |
476 | 479 | ||
477 | static void __init free_command_buffer(struct amd_iommu *iommu) | 480 | static void __init free_command_buffer(struct amd_iommu *iommu) |
478 | { | 481 | { |
479 | free_pages((unsigned long)iommu->cmd_buf, | 482 | free_pages((unsigned long)iommu->cmd_buf, |
480 | get_order(iommu->cmd_buf_size)); | 483 | get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); |
481 | } | 484 | } |
482 | 485 | ||
483 | /* allocates the memory where the IOMMU will log its events to */ | 486 | /* allocates the memory where the IOMMU will log its events to */ |
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
920 | h->mmio_phys); | 923 | h->mmio_phys); |
921 | 924 | ||
922 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); | 925 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
923 | if (iommu == NULL) | 926 | if (iommu == NULL) { |
924 | return -ENOMEM; | 927 | amd_iommu_init_err = -ENOMEM; |
928 | return 0; | ||
929 | } | ||
930 | |||
925 | ret = init_iommu_one(iommu, h); | 931 | ret = init_iommu_one(iommu, h); |
926 | if (ret) | 932 | if (ret) { |
927 | return ret; | 933 | amd_iommu_init_err = ret; |
934 | return 0; | ||
935 | } | ||
928 | break; | 936 | break; |
929 | default: | 937 | default: |
930 | break; | 938 | break; |
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
934 | } | 942 | } |
935 | WARN_ON(p != end); | 943 | WARN_ON(p != end); |
936 | 944 | ||
937 | amd_iommu_initialized = true; | ||
938 | |||
939 | return 0; | 945 | return 0; |
940 | } | 946 | } |
941 | 947 | ||
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) | |||
1211 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) | 1217 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) |
1212 | return -ENODEV; | 1218 | return -ENODEV; |
1213 | 1219 | ||
1220 | ret = amd_iommu_init_err; | ||
1221 | if (ret) | ||
1222 | goto out; | ||
1223 | |||
1214 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); | 1224 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
1215 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); | 1225 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); |
1216 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); | 1226 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); |
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) | |||
1270 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | 1280 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) |
1271 | goto free; | 1281 | goto free; |
1272 | 1282 | ||
1273 | if (!amd_iommu_initialized) | 1283 | if (amd_iommu_init_err) { |
1284 | ret = amd_iommu_init_err; | ||
1274 | goto free; | 1285 | goto free; |
1286 | } | ||
1275 | 1287 | ||
1276 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | 1288 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) |
1277 | goto free; | 1289 | goto free; |
1278 | 1290 | ||
1291 | if (amd_iommu_init_err) { | ||
1292 | ret = amd_iommu_init_err; | ||
1293 | goto free; | ||
1294 | } | ||
1295 | |||
1279 | ret = sysdev_class_register(&amd_iommu_sysdev_class); | 1296 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
1280 | if (ret) | 1297 | if (ret) |
1281 | goto free; | 1298 | goto free; |
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) | |||
1288 | if (ret) | 1305 | if (ret) |
1289 | goto free; | 1306 | goto free; |
1290 | 1307 | ||
1308 | enable_iommus(); | ||
1309 | |||
1291 | if (iommu_pass_through) | 1310 | if (iommu_pass_through) |
1292 | ret = amd_iommu_init_passthrough(); | 1311 | ret = amd_iommu_init_passthrough(); |
1293 | else | 1312 | else |
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) | |||
1300 | 1319 | ||
1301 | amd_iommu_init_notifier(); | 1320 | amd_iommu_init_notifier(); |
1302 | 1321 | ||
1303 | enable_iommus(); | ||
1304 | |||
1305 | if (iommu_pass_through) | 1322 | if (iommu_pass_through) |
1306 | goto out; | 1323 | goto out; |
1307 | 1324 | ||
@@ -1315,6 +1332,7 @@ out: | |||
1315 | return ret; | 1332 | return ret; |
1316 | 1333 | ||
1317 | free: | 1334 | free: |
1335 | disable_iommus(); | ||
1318 | 1336 | ||
1319 | amd_iommu_uninit_devices(); | 1337 | amd_iommu_uninit_devices(); |
1320 | 1338 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 3704997e8b25..b5d8b0bcf235 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void) | |||
393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
394 | int bus; | 394 | int bus; |
395 | int dev_base, dev_limit; | 395 | int dev_base, dev_limit; |
396 | u32 ctl; | ||
396 | 397 | ||
397 | bus = bus_dev_ranges[i].bus; | 398 | bus = bus_dev_ranges[i].bus; |
398 | dev_base = bus_dev_ranges[i].dev_base; | 399 | dev_base = bus_dev_ranges[i].dev_base; |
@@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void) | |||
406 | gart_iommu_aperture = 1; | 407 | gart_iommu_aperture = 1; |
407 | x86_init.iommu.iommu_init = gart_iommu_init; | 408 | x86_init.iommu.iommu_init = gart_iommu_init; |
408 | 409 | ||
409 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; | 410 | ctl = read_pci_config(bus, slot, 3, |
411 | AMD64_GARTAPERTURECTL); | ||
412 | |||
413 | /* | ||
414 | * Before we do anything else disable the GART. It may | ||
415 | * still be enabled if we boot into a crash-kernel here. | ||
416 | * Reconfiguring the GART while it is enabled could have | ||
417 | * unknown side-effects. | ||
418 | */ | ||
419 | ctl &= ~GARTEN; | ||
420 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); | ||
421 | |||
422 | aper_order = (ctl >> 1) & 7; | ||
410 | aper_size = (32 * 1024 * 1024) << aper_order; | 423 | aper_size = (32 * 1024 * 1024) << aper_order; |
411 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; | 424 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; |
412 | aper_base <<= 25; | 425 | aper_base <<= 25; |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 1cbed97b59cf..dfdb4dba2320 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -22,6 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/dmi.h> | 24 | #include <linux/dmi.h> |
25 | #include <linux/module.h> | ||
25 | #include <asm/div64.h> | 26 | #include <asm/div64.h> |
26 | #include <asm/vmware.h> | 27 | #include <asm/vmware.h> |
27 | #include <asm/x86_init.h> | 28 | #include <asm/x86_init.h> |
@@ -101,6 +102,7 @@ int vmware_platform(void) | |||
101 | 102 | ||
102 | return 0; | 103 | return 0; |
103 | } | 104 | } |
105 | EXPORT_SYMBOL(vmware_platform); | ||
104 | 106 | ||
105 | /* | 107 | /* |
106 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. | 108 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a4849c10a77e..ebd4c51d096a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
28 | #include <asm/reboot.h> | 28 | #include <asm/reboot.h> |
29 | #include <asm/virtext.h> | 29 | #include <asm/virtext.h> |
30 | #include <asm/x86_init.h> | ||
31 | 30 | ||
32 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 31 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
33 | 32 | ||
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
103 | #ifdef CONFIG_HPET_TIMER | 102 | #ifdef CONFIG_HPET_TIMER |
104 | hpet_disable(); | 103 | hpet_disable(); |
105 | #endif | 104 | #endif |
106 | |||
107 | #ifdef CONFIG_X86_64 | ||
108 | x86_platform.iommu_shutdown(); | ||
109 | #endif | ||
110 | |||
111 | crash_save_cpu(regs, safe_smp_processor_id()); | 105 | crash_save_cpu(regs, safe_smp_processor_id()); |
112 | } | 106 | } |
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index e39e77168a37..e1a93be4fd44 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) | 14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | #include <linux/uaccess.h> | ||
18 | |||
17 | extern void | 19 | extern void |
18 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 20 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
19 | unsigned long *stack, unsigned long bp, char *log_lvl); | 21 | unsigned long *stack, unsigned long bp, char *log_lvl); |
@@ -42,8 +44,10 @@ static inline unsigned long rewind_frame_pointer(int n) | |||
42 | get_bp(frame); | 44 | get_bp(frame); |
43 | 45 | ||
44 | #ifdef CONFIG_FRAME_POINTER | 46 | #ifdef CONFIG_FRAME_POINTER |
45 | while (n--) | 47 | while (n--) { |
46 | frame = frame->next_frame; | 48 | if (probe_kernel_address(&frame->next_frame, frame)) |
49 | break; | ||
50 | } | ||
47 | #endif | 51 | #endif |
48 | 52 | ||
49 | return (unsigned long)frame; | 53 | return (unsigned long)frame; |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 68cd24f9deae..0f7f130caa67 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -565,6 +565,9 @@ static void enable_gart_translations(void) | |||
565 | 565 | ||
566 | enable_gart_translation(dev, __pa(agp_gatt_table)); | 566 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
567 | } | 567 | } |
568 | |||
569 | /* Flush the GART-TLB to remove stale entries */ | ||
570 | k8_flush_garts(); | ||
568 | } | 571 | } |
569 | 572 | ||
570 | /* | 573 | /* |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 48aeee8eefb0..19a8906bcaa2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, | |||
1490 | for_each_sp(pages, sp, parents, i) { | 1490 | for_each_sp(pages, sp, parents, i) { |
1491 | kvm_mmu_zap_page(kvm, sp); | 1491 | kvm_mmu_zap_page(kvm, sp); |
1492 | mmu_pages_clear_parents(&parents); | 1492 | mmu_pages_clear_parents(&parents); |
1493 | zapped++; | ||
1493 | } | 1494 | } |
1494 | zapped += pages.nr; | ||
1495 | kvm_mmu_pages_init(parent, &parents, &pages); | 1495 | kvm_mmu_pages_init(parent, &parents, &pages); |
1496 | } | 1496 | } |
1497 | 1497 | ||
@@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | |||
1542 | */ | 1542 | */ |
1543 | 1543 | ||
1544 | if (used_pages > kvm_nr_mmu_pages) { | 1544 | if (used_pages > kvm_nr_mmu_pages) { |
1545 | while (used_pages > kvm_nr_mmu_pages) { | 1545 | while (used_pages > kvm_nr_mmu_pages && |
1546 | !list_empty(&kvm->arch.active_mmu_pages)) { | ||
1546 | struct kvm_mmu_page *page; | 1547 | struct kvm_mmu_page *page; |
1547 | 1548 | ||
1548 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1549 | page = container_of(kvm->arch.active_mmu_pages.prev, |
1549 | struct kvm_mmu_page, link); | 1550 | struct kvm_mmu_page, link); |
1550 | kvm_mmu_zap_page(kvm, page); | 1551 | used_pages -= kvm_mmu_zap_page(kvm, page); |
1551 | used_pages--; | 1552 | used_pages--; |
1552 | } | 1553 | } |
1554 | kvm_nr_mmu_pages = used_pages; | ||
1553 | kvm->arch.n_free_mmu_pages = 0; | 1555 | kvm->arch.n_free_mmu_pages = 0; |
1554 | } | 1556 | } |
1555 | else | 1557 | else |
@@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
1596 | && !sp->role.invalid) { | 1598 | && !sp->role.invalid) { |
1597 | pgprintk("%s: zap %lx %x\n", | 1599 | pgprintk("%s: zap %lx %x\n", |
1598 | __func__, gfn, sp->role.word); | 1600 | __func__, gfn, sp->role.word); |
1599 | kvm_mmu_zap_page(kvm, sp); | 1601 | if (kvm_mmu_zap_page(kvm, sp)) |
1602 | nn = bucket->first; | ||
1600 | } | 1603 | } |
1601 | } | 1604 | } |
1602 | } | 1605 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 445c59411ed0..2ba58206812a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
706 | if (err) | 706 | if (err) |
707 | goto free_svm; | 707 | goto free_svm; |
708 | 708 | ||
709 | err = -ENOMEM; | ||
709 | page = alloc_page(GFP_KERNEL); | 710 | page = alloc_page(GFP_KERNEL); |
710 | if (!page) { | 711 | if (!page) |
711 | err = -ENOMEM; | ||
712 | goto uninit; | 712 | goto uninit; |
713 | } | ||
714 | 713 | ||
715 | err = -ENOMEM; | ||
716 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 714 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
717 | if (!msrpm_pages) | 715 | if (!msrpm_pages) |
718 | goto uninit; | 716 | goto free_page1; |
719 | 717 | ||
720 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 718 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
721 | if (!nested_msrpm_pages) | 719 | if (!nested_msrpm_pages) |
722 | goto uninit; | 720 | goto free_page2; |
723 | |||
724 | svm->msrpm = page_address(msrpm_pages); | ||
725 | svm_vcpu_init_msrpm(svm->msrpm); | ||
726 | 721 | ||
727 | hsave_page = alloc_page(GFP_KERNEL); | 722 | hsave_page = alloc_page(GFP_KERNEL); |
728 | if (!hsave_page) | 723 | if (!hsave_page) |
729 | goto uninit; | 724 | goto free_page3; |
725 | |||
730 | svm->nested.hsave = page_address(hsave_page); | 726 | svm->nested.hsave = page_address(hsave_page); |
731 | 727 | ||
728 | svm->msrpm = page_address(msrpm_pages); | ||
729 | svm_vcpu_init_msrpm(svm->msrpm); | ||
730 | |||
732 | svm->nested.msrpm = page_address(nested_msrpm_pages); | 731 | svm->nested.msrpm = page_address(nested_msrpm_pages); |
733 | 732 | ||
734 | svm->vmcb = page_address(page); | 733 | svm->vmcb = page_address(page); |
@@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
744 | 743 | ||
745 | return &svm->vcpu; | 744 | return &svm->vcpu; |
746 | 745 | ||
746 | free_page3: | ||
747 | __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); | ||
748 | free_page2: | ||
749 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); | ||
750 | free_page1: | ||
751 | __free_page(page); | ||
747 | uninit: | 752 | uninit: |
748 | kvm_vcpu_uninit(&svm->vcpu); | 753 | kvm_vcpu_uninit(&svm->vcpu); |
749 | free_svm: | 754 | free_svm: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 686492ed3079..bc933cfb4e66 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); | |||
77 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | 77 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
78 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | 78 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
79 | 79 | ||
80 | #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) | ||
81 | |||
80 | /* | 82 | /* |
81 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | 83 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
82 | * ple_gap: upper bound on the amount of time between two successive | 84 | * ple_gap: upper bound on the amount of time between two successive |
@@ -131,7 +133,7 @@ struct vcpu_vmx { | |||
131 | } host_state; | 133 | } host_state; |
132 | struct { | 134 | struct { |
133 | int vm86_active; | 135 | int vm86_active; |
134 | u8 save_iopl; | 136 | ulong save_rflags; |
135 | struct kvm_save_segment { | 137 | struct kvm_save_segment { |
136 | u16 selector; | 138 | u16 selector; |
137 | unsigned long base; | 139 | unsigned long base; |
@@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
818 | 820 | ||
819 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | 821 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
820 | { | 822 | { |
821 | unsigned long rflags; | 823 | unsigned long rflags, save_rflags; |
822 | 824 | ||
823 | rflags = vmcs_readl(GUEST_RFLAGS); | 825 | rflags = vmcs_readl(GUEST_RFLAGS); |
824 | if (to_vmx(vcpu)->rmode.vm86_active) | 826 | if (to_vmx(vcpu)->rmode.vm86_active) { |
825 | rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 827 | rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
828 | save_rflags = to_vmx(vcpu)->rmode.save_rflags; | ||
829 | rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; | ||
830 | } | ||
826 | return rflags; | 831 | return rflags; |
827 | } | 832 | } |
828 | 833 | ||
829 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 834 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
830 | { | 835 | { |
831 | if (to_vmx(vcpu)->rmode.vm86_active) | 836 | if (to_vmx(vcpu)->rmode.vm86_active) { |
837 | to_vmx(vcpu)->rmode.save_rflags = rflags; | ||
832 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 838 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
839 | } | ||
833 | vmcs_writel(GUEST_RFLAGS, rflags); | 840 | vmcs_writel(GUEST_RFLAGS, rflags); |
834 | } | 841 | } |
835 | 842 | ||
@@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1483 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | 1490 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); |
1484 | 1491 | ||
1485 | flags = vmcs_readl(GUEST_RFLAGS); | 1492 | flags = vmcs_readl(GUEST_RFLAGS); |
1486 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 1493 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
1487 | flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); | 1494 | flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
1488 | vmcs_writel(GUEST_RFLAGS, flags); | 1495 | vmcs_writel(GUEST_RFLAGS, flags); |
1489 | 1496 | ||
1490 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | 1497 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
@@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1557 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | 1564 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
1558 | 1565 | ||
1559 | flags = vmcs_readl(GUEST_RFLAGS); | 1566 | flags = vmcs_readl(GUEST_RFLAGS); |
1560 | vmx->rmode.save_iopl | 1567 | vmx->rmode.save_rflags = flags; |
1561 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
1562 | 1568 | ||
1563 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 1569 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
1564 | 1570 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 24cd0ee896e9..3c4ca98ad27f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
433 | 433 | ||
434 | #ifdef CONFIG_X86_64 | 434 | #ifdef CONFIG_X86_64 |
435 | if (cr0 & 0xffffffff00000000UL) { | 435 | if (cr0 & 0xffffffff00000000UL) { |
436 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | ||
437 | cr0, kvm_read_cr0(vcpu)); | ||
438 | kvm_inject_gp(vcpu, 0); | 436 | kvm_inject_gp(vcpu, 0); |
439 | return; | 437 | return; |
440 | } | 438 | } |
@@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
443 | cr0 &= ~CR0_RESERVED_BITS; | 441 | cr0 &= ~CR0_RESERVED_BITS; |
444 | 442 | ||
445 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 443 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
446 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | ||
447 | kvm_inject_gp(vcpu, 0); | 444 | kvm_inject_gp(vcpu, 0); |
448 | return; | 445 | return; |
449 | } | 446 | } |
450 | 447 | ||
451 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 448 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
452 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | ||
453 | "and a clear PE flag\n"); | ||
454 | kvm_inject_gp(vcpu, 0); | 449 | kvm_inject_gp(vcpu, 0); |
455 | return; | 450 | return; |
456 | } | 451 | } |
@@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
461 | int cs_db, cs_l; | 456 | int cs_db, cs_l; |
462 | 457 | ||
463 | if (!is_pae(vcpu)) { | 458 | if (!is_pae(vcpu)) { |
464 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
465 | "in long mode while PAE is disabled\n"); | ||
466 | kvm_inject_gp(vcpu, 0); | 459 | kvm_inject_gp(vcpu, 0); |
467 | return; | 460 | return; |
468 | } | 461 | } |
469 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 462 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
470 | if (cs_l) { | 463 | if (cs_l) { |
471 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
472 | "in long mode while CS.L == 1\n"); | ||
473 | kvm_inject_gp(vcpu, 0); | 464 | kvm_inject_gp(vcpu, 0); |
474 | return; | 465 | return; |
475 | 466 | ||
@@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
477 | } else | 468 | } else |
478 | #endif | 469 | #endif |
479 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 470 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
480 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | ||
481 | "reserved bits\n"); | ||
482 | kvm_inject_gp(vcpu, 0); | 471 | kvm_inject_gp(vcpu, 0); |
483 | return; | 472 | return; |
484 | } | 473 | } |
@@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
505 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | 494 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; |
506 | 495 | ||
507 | if (cr4 & CR4_RESERVED_BITS) { | 496 | if (cr4 & CR4_RESERVED_BITS) { |
508 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | ||
509 | kvm_inject_gp(vcpu, 0); | 497 | kvm_inject_gp(vcpu, 0); |
510 | return; | 498 | return; |
511 | } | 499 | } |
512 | 500 | ||
513 | if (is_long_mode(vcpu)) { | 501 | if (is_long_mode(vcpu)) { |
514 | if (!(cr4 & X86_CR4_PAE)) { | 502 | if (!(cr4 & X86_CR4_PAE)) { |
515 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | ||
516 | "in long mode\n"); | ||
517 | kvm_inject_gp(vcpu, 0); | 503 | kvm_inject_gp(vcpu, 0); |
518 | return; | 504 | return; |
519 | } | 505 | } |
520 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 506 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
521 | && ((cr4 ^ old_cr4) & pdptr_bits) | 507 | && ((cr4 ^ old_cr4) & pdptr_bits) |
522 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 508 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
523 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | ||
524 | kvm_inject_gp(vcpu, 0); | 509 | kvm_inject_gp(vcpu, 0); |
525 | return; | 510 | return; |
526 | } | 511 | } |
527 | 512 | ||
528 | if (cr4 & X86_CR4_VMXE) { | 513 | if (cr4 & X86_CR4_VMXE) { |
529 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | ||
530 | kvm_inject_gp(vcpu, 0); | 514 | kvm_inject_gp(vcpu, 0); |
531 | return; | 515 | return; |
532 | } | 516 | } |
@@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
547 | 531 | ||
548 | if (is_long_mode(vcpu)) { | 532 | if (is_long_mode(vcpu)) { |
549 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 533 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { |
550 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | ||
551 | kvm_inject_gp(vcpu, 0); | 534 | kvm_inject_gp(vcpu, 0); |
552 | return; | 535 | return; |
553 | } | 536 | } |
554 | } else { | 537 | } else { |
555 | if (is_pae(vcpu)) { | 538 | if (is_pae(vcpu)) { |
556 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 539 | if (cr3 & CR3_PAE_RESERVED_BITS) { |
557 | printk(KERN_DEBUG | ||
558 | "set_cr3: #GP, reserved bits\n"); | ||
559 | kvm_inject_gp(vcpu, 0); | 540 | kvm_inject_gp(vcpu, 0); |
560 | return; | 541 | return; |
561 | } | 542 | } |
562 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | 543 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { |
563 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | ||
564 | "reserved bits\n"); | ||
565 | kvm_inject_gp(vcpu, 0); | 544 | kvm_inject_gp(vcpu, 0); |
566 | return; | 545 | return; |
567 | } | 546 | } |
@@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); | |||
593 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 572 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
594 | { | 573 | { |
595 | if (cr8 & CR8_RESERVED_BITS) { | 574 | if (cr8 & CR8_RESERVED_BITS) { |
596 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | ||
597 | kvm_inject_gp(vcpu, 0); | 575 | kvm_inject_gp(vcpu, 0); |
598 | return; | 576 | return; |
599 | } | 577 | } |
@@ -649,15 +627,12 @@ static u32 emulated_msrs[] = { | |||
649 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | 627 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
650 | { | 628 | { |
651 | if (efer & efer_reserved_bits) { | 629 | if (efer & efer_reserved_bits) { |
652 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | ||
653 | efer); | ||
654 | kvm_inject_gp(vcpu, 0); | 630 | kvm_inject_gp(vcpu, 0); |
655 | return; | 631 | return; |
656 | } | 632 | } |
657 | 633 | ||
658 | if (is_paging(vcpu) | 634 | if (is_paging(vcpu) |
659 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { | 635 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { |
660 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | ||
661 | kvm_inject_gp(vcpu, 0); | 636 | kvm_inject_gp(vcpu, 0); |
662 | return; | 637 | return; |
663 | } | 638 | } |
@@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
667 | 642 | ||
668 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 643 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
669 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { | 644 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { |
670 | printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); | ||
671 | kvm_inject_gp(vcpu, 0); | 645 | kvm_inject_gp(vcpu, 0); |
672 | return; | 646 | return; |
673 | } | 647 | } |
@@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
678 | 652 | ||
679 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 653 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
680 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { | 654 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { |
681 | printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); | ||
682 | kvm_inject_gp(vcpu, 0); | 655 | kvm_inject_gp(vcpu, 0); |
683 | return; | 656 | return; |
684 | } | 657 | } |
@@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
967 | if (msr >= MSR_IA32_MC0_CTL && | 940 | if (msr >= MSR_IA32_MC0_CTL && |
968 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { | 941 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { |
969 | u32 offset = msr - MSR_IA32_MC0_CTL; | 942 | u32 offset = msr - MSR_IA32_MC0_CTL; |
970 | /* only 0 or all 1s can be written to IA32_MCi_CTL */ | 943 | /* only 0 or all 1s can be written to IA32_MCi_CTL |
944 | * some Linux kernels though clear bit 10 in bank 4 to | ||
945 | * workaround a BIOS/GART TBL issue on AMD K8s, ignore | ||
946 | * this to avoid an uncatched #GP in the guest | ||
947 | */ | ||
971 | if ((offset & 0x3) == 0 && | 948 | if ((offset & 0x3) == 0 && |
972 | data != 0 && data != ~(u64)0) | 949 | data != 0 && (data | (1 << 10)) != ~(u64)0) |
973 | return -1; | 950 | return -1; |
974 | vcpu->arch.mce_banks[offset] = data; | 951 | vcpu->arch.mce_banks[offset] = data; |
975 | break; | 952 | break; |
@@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, | |||
2635 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 2612 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
2636 | struct kvm_dirty_log *log) | 2613 | struct kvm_dirty_log *log) |
2637 | { | 2614 | { |
2638 | int r, n, i; | 2615 | int r, i; |
2639 | struct kvm_memory_slot *memslot; | 2616 | struct kvm_memory_slot *memslot; |
2617 | unsigned long n; | ||
2640 | unsigned long is_dirty = 0; | 2618 | unsigned long is_dirty = 0; |
2641 | unsigned long *dirty_bitmap = NULL; | 2619 | unsigned long *dirty_bitmap = NULL; |
2642 | 2620 | ||
@@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
2651 | if (!memslot->dirty_bitmap) | 2629 | if (!memslot->dirty_bitmap) |
2652 | goto out; | 2630 | goto out; |
2653 | 2631 | ||
2654 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 2632 | n = kvm_dirty_bitmap_bytes(memslot); |
2655 | 2633 | ||
2656 | r = -ENOMEM; | 2634 | r = -ENOMEM; |
2657 | dirty_bitmap = vmalloc(n); | 2635 | dirty_bitmap = vmalloc(n); |
@@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
4483 | kvm_set_cr8(vcpu, kvm_run->cr8); | 4461 | kvm_set_cr8(vcpu, kvm_run->cr8); |
4484 | 4462 | ||
4485 | if (vcpu->arch.pio.cur_count) { | 4463 | if (vcpu->arch.pio.cur_count) { |
4464 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
4486 | r = complete_pio(vcpu); | 4465 | r = complete_pio(vcpu); |
4466 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | ||
4487 | if (r) | 4467 | if (r) |
4488 | goto out; | 4468 | goto out; |
4489 | } | 4469 | } |
@@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
5146 | int ret = 0; | 5126 | int ret = 0; |
5147 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | 5127 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); |
5148 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | 5128 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); |
5129 | u32 desc_limit; | ||
5149 | 5130 | ||
5150 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); | 5131 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); |
5151 | 5132 | ||
@@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
5168 | } | 5149 | } |
5169 | } | 5150 | } |
5170 | 5151 | ||
5171 | if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { | 5152 | desc_limit = get_desc_limit(&nseg_desc); |
5153 | if (!nseg_desc.p || | ||
5154 | ((desc_limit < 0x67 && (nseg_desc.type & 8)) || | ||
5155 | desc_limit < 0x2b)) { | ||
5172 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); | 5156 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); |
5173 | return 1; | 5157 | return 1; |
5174 | } | 5158 | } |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 7e59dc1d3fc2..2bdf628066bd 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
115 | local_irq_save(flags); | 115 | local_irq_save(flags); |
116 | if (lguest_data.hcall_status[next_call] != 0xFF) { | 116 | if (lguest_data.hcall_status[next_call] != 0xFF) { |
117 | /* Table full, so do normal hcall which will flush table. */ | 117 | /* Table full, so do normal hcall which will flush table. */ |
118 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 118 | hcall(call, arg1, arg2, arg3, arg4); |
119 | } else { | 119 | } else { |
120 | lguest_data.hcalls[next_call].arg0 = call; | 120 | lguest_data.hcalls[next_call].arg0 = call; |
121 | lguest_data.hcalls[next_call].arg1 = arg1; | 121 | lguest_data.hcalls[next_call].arg1 = arg1; |
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
145 | * So, when we're in lazy mode, we call async_hcall() to store the call for | 145 | * So, when we're in lazy mode, we call async_hcall() to store the call for |
146 | * future processing: | 146 | * future processing: |
147 | */ | 147 | */ |
148 | static void lazy_hcall1(unsigned long call, | 148 | static void lazy_hcall1(unsigned long call, unsigned long arg1) |
149 | unsigned long arg1) | ||
150 | { | 149 | { |
151 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 150 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
152 | kvm_hypercall1(call, arg1); | 151 | hcall(call, arg1, 0, 0, 0); |
153 | else | 152 | else |
154 | async_hcall(call, arg1, 0, 0, 0); | 153 | async_hcall(call, arg1, 0, 0, 0); |
155 | } | 154 | } |
156 | 155 | ||
157 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ | 156 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ |
158 | static void lazy_hcall2(unsigned long call, | 157 | static void lazy_hcall2(unsigned long call, |
159 | unsigned long arg1, | 158 | unsigned long arg1, |
160 | unsigned long arg2) | 159 | unsigned long arg2) |
161 | { | 160 | { |
162 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 161 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
163 | kvm_hypercall2(call, arg1, arg2); | 162 | hcall(call, arg1, arg2, 0, 0); |
164 | else | 163 | else |
165 | async_hcall(call, arg1, arg2, 0, 0); | 164 | async_hcall(call, arg1, arg2, 0, 0); |
166 | } | 165 | } |
167 | 166 | ||
168 | static void lazy_hcall3(unsigned long call, | 167 | static void lazy_hcall3(unsigned long call, |
169 | unsigned long arg1, | 168 | unsigned long arg1, |
170 | unsigned long arg2, | 169 | unsigned long arg2, |
171 | unsigned long arg3) | 170 | unsigned long arg3) |
172 | { | 171 | { |
173 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 172 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
174 | kvm_hypercall3(call, arg1, arg2, arg3); | 173 | hcall(call, arg1, arg2, arg3, 0); |
175 | else | 174 | else |
176 | async_hcall(call, arg1, arg2, arg3, 0); | 175 | async_hcall(call, arg1, arg2, arg3, 0); |
177 | } | 176 | } |
178 | 177 | ||
179 | #ifdef CONFIG_X86_PAE | 178 | #ifdef CONFIG_X86_PAE |
180 | static void lazy_hcall4(unsigned long call, | 179 | static void lazy_hcall4(unsigned long call, |
181 | unsigned long arg1, | 180 | unsigned long arg1, |
182 | unsigned long arg2, | 181 | unsigned long arg2, |
183 | unsigned long arg3, | 182 | unsigned long arg3, |
184 | unsigned long arg4) | 183 | unsigned long arg4) |
185 | { | 184 | { |
186 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 185 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
187 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 186 | hcall(call, arg1, arg2, arg3, arg4); |
188 | else | 187 | else |
189 | async_hcall(call, arg1, arg2, arg3, arg4); | 188 | async_hcall(call, arg1, arg2, arg3, arg4); |
190 | } | 189 | } |
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call, | |||
196 | :*/ | 195 | :*/ |
197 | static void lguest_leave_lazy_mmu_mode(void) | 196 | static void lguest_leave_lazy_mmu_mode(void) |
198 | { | 197 | { |
199 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 198 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
200 | paravirt_leave_lazy_mmu(); | 199 | paravirt_leave_lazy_mmu(); |
201 | } | 200 | } |
202 | 201 | ||
203 | static void lguest_end_context_switch(struct task_struct *next) | 202 | static void lguest_end_context_switch(struct task_struct *next) |
204 | { | 203 | { |
205 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 204 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
206 | paravirt_end_context_switch(next); | 205 | paravirt_end_context_switch(next); |
207 | } | 206 | } |
208 | 207 | ||
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt, | |||
286 | /* Keep the local copy up to date. */ | 285 | /* Keep the local copy up to date. */ |
287 | native_write_idt_entry(dt, entrynum, g); | 286 | native_write_idt_entry(dt, entrynum, g); |
288 | /* Tell Host about this new entry. */ | 287 | /* Tell Host about this new entry. */ |
289 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); | 288 | hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0); |
290 | } | 289 | } |
291 | 290 | ||
292 | /* | 291 | /* |
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc) | |||
300 | struct desc_struct *idt = (void *)desc->address; | 299 | struct desc_struct *idt = (void *)desc->address; |
301 | 300 | ||
302 | for (i = 0; i < (desc->size+1)/8; i++) | 301 | for (i = 0; i < (desc->size+1)/8; i++) |
303 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); | 302 | hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0); |
304 | } | 303 | } |
305 | 304 | ||
306 | /* | 305 | /* |
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc) | |||
321 | struct desc_struct *gdt = (void *)desc->address; | 320 | struct desc_struct *gdt = (void *)desc->address; |
322 | 321 | ||
323 | for (i = 0; i < (desc->size+1)/8; i++) | 322 | for (i = 0; i < (desc->size+1)/8; i++) |
324 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); | 323 | hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0); |
325 | } | 324 | } |
326 | 325 | ||
327 | /* | 326 | /* |
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | |||
334 | { | 333 | { |
335 | native_write_gdt_entry(dt, entrynum, desc, type); | 334 | native_write_gdt_entry(dt, entrynum, desc, type); |
336 | /* Tell Host about this new entry. */ | 335 | /* Tell Host about this new entry. */ |
337 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum, | 336 | hcall(LHCALL_LOAD_GDT_ENTRY, entrynum, |
338 | dt[entrynum].a, dt[entrynum].b); | 337 | dt[entrynum].a, dt[entrynum].b, 0); |
339 | } | 338 | } |
340 | 339 | ||
341 | /* | 340 | /* |
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, | |||
931 | } | 930 | } |
932 | 931 | ||
933 | /* Please wake us this far in the future. */ | 932 | /* Please wake us this far in the future. */ |
934 | kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); | 933 | hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0); |
935 | return 0; | 934 | return 0; |
936 | } | 935 | } |
937 | 936 | ||
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode, | |||
942 | case CLOCK_EVT_MODE_UNUSED: | 941 | case CLOCK_EVT_MODE_UNUSED: |
943 | case CLOCK_EVT_MODE_SHUTDOWN: | 942 | case CLOCK_EVT_MODE_SHUTDOWN: |
944 | /* A 0 argument shuts the clock down. */ | 943 | /* A 0 argument shuts the clock down. */ |
945 | kvm_hypercall0(LHCALL_SET_CLOCKEVENT); | 944 | hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); |
946 | break; | 945 | break; |
947 | case CLOCK_EVT_MODE_ONESHOT: | 946 | case CLOCK_EVT_MODE_ONESHOT: |
948 | /* This is what we expect. */ | 947 | /* This is what we expect. */ |
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void) | |||
1100 | /* STOP! Until an interrupt comes in. */ | 1099 | /* STOP! Until an interrupt comes in. */ |
1101 | static void lguest_safe_halt(void) | 1100 | static void lguest_safe_halt(void) |
1102 | { | 1101 | { |
1103 | kvm_hypercall0(LHCALL_HALT); | 1102 | hcall(LHCALL_HALT, 0, 0, 0, 0); |
1104 | } | 1103 | } |
1105 | 1104 | ||
1106 | /* | 1105 | /* |
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void) | |||
1112 | */ | 1111 | */ |
1113 | static void lguest_power_off(void) | 1112 | static void lguest_power_off(void) |
1114 | { | 1113 | { |
1115 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), | 1114 | hcall(LHCALL_SHUTDOWN, __pa("Power down"), |
1116 | LGUEST_SHUTDOWN_POWEROFF); | 1115 | LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
1117 | } | 1116 | } |
1118 | 1117 | ||
1119 | /* | 1118 | /* |
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void) | |||
1123 | */ | 1122 | */ |
1124 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) | 1123 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) |
1125 | { | 1124 | { |
1126 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); | 1125 | hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
1127 | /* The hcall won't return, but to keep gcc happy, we're "done". */ | 1126 | /* The hcall won't return, but to keep gcc happy, we're "done". */ |
1128 | return NOTIFY_DONE; | 1127 | return NOTIFY_DONE; |
1129 | } | 1128 | } |
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1162 | len = sizeof(scratch) - 1; | 1161 | len = sizeof(scratch) - 1; |
1163 | scratch[len] = '\0'; | 1162 | scratch[len] = '\0'; |
1164 | memcpy(scratch, buf, len); | 1163 | memcpy(scratch, buf, len); |
1165 | kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); | 1164 | hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); |
1166 | 1165 | ||
1167 | /* This routine returns the number of bytes actually written. */ | 1166 | /* This routine returns the number of bytes actually written. */ |
1168 | return len; | 1167 | return len; |
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1174 | */ | 1173 | */ |
1175 | static void lguest_restart(char *reason) | 1174 | static void lguest_restart(char *reason) |
1176 | { | 1175 | { |
1177 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); | 1176 | hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); |
1178 | } | 1177 | } |
1179 | 1178 | ||
1180 | /*G:050 | 1179 | /*G:050 |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 27eac0faee48..4f420c2f2d55 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -32,7 +32,7 @@ ENTRY(lguest_entry) | |||
32 | */ | 32 | */ |
33 | movl $LHCALL_LGUEST_INIT, %eax | 33 | movl $LHCALL_LGUEST_INIT, %eax |
34 | movl $lguest_data - __PAGE_OFFSET, %ebx | 34 | movl $lguest_data - __PAGE_OFFSET, %ebx |
35 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 35 | int $LGUEST_TRAP_ENTRY |
36 | 36 | ||
37 | /* Set up the initial stack so we can run C code. */ | 37 | /* Set up the initial stack so we can run C code. */ |
38 | movl $(init_thread_union+THREAD_SIZE),%esp | 38 | movl $(init_thread_union+THREAD_SIZE),%esp |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index c7b1ebfb7da7..31930fd30ea9 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -66,14 +66,44 @@ resource_to_addr(struct acpi_resource *resource, | |||
66 | struct acpi_resource_address64 *addr) | 66 | struct acpi_resource_address64 *addr) |
67 | { | 67 | { |
68 | acpi_status status; | 68 | acpi_status status; |
69 | 69 | struct acpi_resource_memory24 *memory24; | |
70 | status = acpi_resource_to_address64(resource, addr); | 70 | struct acpi_resource_memory32 *memory32; |
71 | if (ACPI_SUCCESS(status) && | 71 | struct acpi_resource_fixed_memory32 *fixed_memory32; |
72 | (addr->resource_type == ACPI_MEMORY_RANGE || | 72 | |
73 | addr->resource_type == ACPI_IO_RANGE) && | 73 | memset(addr, 0, sizeof(*addr)); |
74 | addr->address_length > 0 && | 74 | switch (resource->type) { |
75 | addr->producer_consumer == ACPI_PRODUCER) { | 75 | case ACPI_RESOURCE_TYPE_MEMORY24: |
76 | memory24 = &resource->data.memory24; | ||
77 | addr->resource_type = ACPI_MEMORY_RANGE; | ||
78 | addr->minimum = memory24->minimum; | ||
79 | addr->address_length = memory24->address_length; | ||
80 | addr->maximum = addr->minimum + addr->address_length - 1; | ||
81 | return AE_OK; | ||
82 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
83 | memory32 = &resource->data.memory32; | ||
84 | addr->resource_type = ACPI_MEMORY_RANGE; | ||
85 | addr->minimum = memory32->minimum; | ||
86 | addr->address_length = memory32->address_length; | ||
87 | addr->maximum = addr->minimum + addr->address_length - 1; | ||
76 | return AE_OK; | 88 | return AE_OK; |
89 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
90 | fixed_memory32 = &resource->data.fixed_memory32; | ||
91 | addr->resource_type = ACPI_MEMORY_RANGE; | ||
92 | addr->minimum = fixed_memory32->address; | ||
93 | addr->address_length = fixed_memory32->address_length; | ||
94 | addr->maximum = addr->minimum + addr->address_length - 1; | ||
95 | return AE_OK; | ||
96 | case ACPI_RESOURCE_TYPE_ADDRESS16: | ||
97 | case ACPI_RESOURCE_TYPE_ADDRESS32: | ||
98 | case ACPI_RESOURCE_TYPE_ADDRESS64: | ||
99 | status = acpi_resource_to_address64(resource, addr); | ||
100 | if (ACPI_SUCCESS(status) && | ||
101 | (addr->resource_type == ACPI_MEMORY_RANGE || | ||
102 | addr->resource_type == ACPI_IO_RANGE) && | ||
103 | addr->address_length > 0) { | ||
104 | return AE_OK; | ||
105 | } | ||
106 | break; | ||
77 | } | 107 | } |
78 | return AE_ERROR; | 108 | return AE_ERROR; |
79 | } | 109 | } |
@@ -91,30 +121,6 @@ count_resource(struct acpi_resource *acpi_res, void *data) | |||
91 | return AE_OK; | 121 | return AE_OK; |
92 | } | 122 | } |
93 | 123 | ||
94 | static void | ||
95 | align_resource(struct acpi_device *bridge, struct resource *res) | ||
96 | { | ||
97 | int align = (res->flags & IORESOURCE_MEM) ? 16 : 4; | ||
98 | |||
99 | /* | ||
100 | * Host bridge windows are not BARs, but the decoders on the PCI side | ||
101 | * that claim this address space have starting alignment and length | ||
102 | * constraints, so fix any obvious BIOS goofs. | ||
103 | */ | ||
104 | if (!IS_ALIGNED(res->start, align)) { | ||
105 | dev_printk(KERN_DEBUG, &bridge->dev, | ||
106 | "host bridge window %pR invalid; " | ||
107 | "aligning start to %d-byte boundary\n", res, align); | ||
108 | res->start &= ~(align - 1); | ||
109 | } | ||
110 | if (!IS_ALIGNED(res->end + 1, align)) { | ||
111 | dev_printk(KERN_DEBUG, &bridge->dev, | ||
112 | "host bridge window %pR invalid; " | ||
113 | "aligning end to %d-byte boundary\n", res, align); | ||
114 | res->end = ALIGN(res->end, align) - 1; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | static acpi_status | 124 | static acpi_status |
119 | setup_resource(struct acpi_resource *acpi_res, void *data) | 125 | setup_resource(struct acpi_resource *acpi_res, void *data) |
120 | { | 126 | { |
@@ -124,7 +130,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
124 | acpi_status status; | 130 | acpi_status status; |
125 | unsigned long flags; | 131 | unsigned long flags; |
126 | struct resource *root, *conflict; | 132 | struct resource *root, *conflict; |
127 | u64 start, end, max_len; | 133 | u64 start, end; |
128 | 134 | ||
129 | status = resource_to_addr(acpi_res, &addr); | 135 | status = resource_to_addr(acpi_res, &addr); |
130 | if (!ACPI_SUCCESS(status)) | 136 | if (!ACPI_SUCCESS(status)) |
@@ -141,19 +147,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
141 | } else | 147 | } else |
142 | return AE_OK; | 148 | return AE_OK; |
143 | 149 | ||
144 | max_len = addr.maximum - addr.minimum + 1; | ||
145 | if (addr.address_length > max_len) { | ||
146 | dev_printk(KERN_DEBUG, &info->bridge->dev, | ||
147 | "host bridge window length %#llx doesn't fit in " | ||
148 | "%#llx-%#llx, trimming\n", | ||
149 | (unsigned long long) addr.address_length, | ||
150 | (unsigned long long) addr.minimum, | ||
151 | (unsigned long long) addr.maximum); | ||
152 | addr.address_length = max_len; | ||
153 | } | ||
154 | |||
155 | start = addr.minimum + addr.translation_offset; | 150 | start = addr.minimum + addr.translation_offset; |
156 | end = start + addr.address_length - 1; | 151 | end = addr.maximum + addr.translation_offset; |
157 | 152 | ||
158 | res = &info->res[info->res_num]; | 153 | res = &info->res[info->res_num]; |
159 | res->name = info->name; | 154 | res->name = info->name; |
@@ -161,7 +156,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
161 | res->start = start; | 156 | res->start = start; |
162 | res->end = end; | 157 | res->end = end; |
163 | res->child = NULL; | 158 | res->child = NULL; |
164 | align_resource(info->bridge, res); | ||
165 | 159 | ||
166 | if (!pci_use_crs) { | 160 | if (!pci_use_crs) { |
167 | dev_printk(KERN_DEBUG, &info->bridge->dev, | 161 | dev_printk(KERN_DEBUG, &info->bridge->dev, |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 46fd43f79103..97da2ba9344b 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -72,6 +72,9 @@ pcibios_align_resource(void *data, const struct resource *res, | |||
72 | return start; | 72 | return start; |
73 | if (start & 0x300) | 73 | if (start & 0x300) |
74 | start = (start + 0x3ff) & ~0x3ff; | 74 | start = (start + 0x3ff) & ~0x3ff; |
75 | } else if (res->flags & IORESOURCE_MEM) { | ||
76 | if (start < BIOS_END) | ||
77 | start = BIOS_END; | ||
75 | } | 78 | } |
76 | return start; | 79 | return start; |
77 | } | 80 | } |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 2bb7348d8d55..05eb32e0d949 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -46,6 +46,12 @@ struct authenc_request_ctx { | |||
46 | char tail[]; | 46 | char tail[]; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static void authenc_request_complete(struct aead_request *req, int err) | ||
50 | { | ||
51 | if (err != -EINPROGRESS) | ||
52 | aead_request_complete(req, err); | ||
53 | } | ||
54 | |||
49 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | 55 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, |
50 | unsigned int keylen) | 56 | unsigned int keylen) |
51 | { | 57 | { |
@@ -142,7 +148,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, | |||
142 | crypto_aead_authsize(authenc), 1); | 148 | crypto_aead_authsize(authenc), 1); |
143 | 149 | ||
144 | out: | 150 | out: |
145 | aead_request_complete(req, err); | 151 | authenc_request_complete(req, err); |
146 | } | 152 | } |
147 | 153 | ||
148 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) | 154 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) |
@@ -208,7 +214,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | |||
208 | err = crypto_ablkcipher_decrypt(abreq); | 214 | err = crypto_ablkcipher_decrypt(abreq); |
209 | 215 | ||
210 | out: | 216 | out: |
211 | aead_request_complete(req, err); | 217 | authenc_request_complete(req, err); |
212 | } | 218 | } |
213 | 219 | ||
214 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, | 220 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, |
@@ -245,7 +251,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, | |||
245 | err = crypto_ablkcipher_decrypt(abreq); | 251 | err = crypto_ablkcipher_decrypt(abreq); |
246 | 252 | ||
247 | out: | 253 | out: |
248 | aead_request_complete(req, err); | 254 | authenc_request_complete(req, err); |
249 | } | 255 | } |
250 | 256 | ||
251 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) | 257 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) |
@@ -379,7 +385,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req, | |||
379 | err = crypto_authenc_genicv(areq, iv, 0); | 385 | err = crypto_authenc_genicv(areq, iv, 0); |
380 | } | 386 | } |
381 | 387 | ||
382 | aead_request_complete(areq, err); | 388 | authenc_request_complete(areq, err); |
383 | } | 389 | } |
384 | 390 | ||
385 | static int crypto_authenc_encrypt(struct aead_request *req) | 391 | static int crypto_authenc_encrypt(struct aead_request *req) |
@@ -420,7 +426,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, | |||
420 | err = crypto_authenc_genicv(areq, greq->giv, 0); | 426 | err = crypto_authenc_genicv(areq, greq->giv, 0); |
421 | } | 427 | } |
422 | 428 | ||
423 | aead_request_complete(areq, err); | 429 | authenc_request_complete(areq, err); |
424 | } | 430 | } |
425 | 431 | ||
426 | static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) | 432 | static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) |
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index a610ebe18edd..2fbfe51fb141 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -471,13 +471,18 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) | |||
471 | /* allow full data read from EC address space */ | 471 | /* allow full data read from EC address space */ |
472 | if (obj_desc->field.region_obj->region.space_id == | 472 | if (obj_desc->field.region_obj->region.space_id == |
473 | ACPI_ADR_SPACE_EC) { | 473 | ACPI_ADR_SPACE_EC) { |
474 | if (obj_desc->common_field.bit_length > 8) | 474 | if (obj_desc->common_field.bit_length > 8) { |
475 | obj_desc->common_field.access_bit_width = | 475 | unsigned width = |
476 | ACPI_ROUND_UP(obj_desc->common_field. | 476 | ACPI_ROUND_BITS_UP_TO_BYTES( |
477 | bit_length, 8); | 477 | obj_desc->common_field.bit_length); |
478 | // access_bit_width is u8, don't overflow it | ||
479 | if (width > 8) | ||
480 | width = 8; | ||
478 | obj_desc->common_field.access_byte_width = | 481 | obj_desc->common_field.access_byte_width = |
479 | ACPI_DIV_8(obj_desc->common_field. | 482 | width; |
480 | access_bit_width); | 483 | obj_desc->common_field.access_bit_width = |
484 | 8 * width; | ||
485 | } | ||
481 | } | 486 | } |
482 | 487 | ||
483 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | 488 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9f6cfac0f2cc..228740f356c9 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) | |||
879 | void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | 879 | void ata_qc_schedule_eh(struct ata_queued_cmd *qc) |
880 | { | 880 | { |
881 | struct ata_port *ap = qc->ap; | 881 | struct ata_port *ap = qc->ap; |
882 | struct request_queue *q = qc->scsicmd->device->request_queue; | ||
883 | unsigned long flags; | ||
882 | 884 | ||
883 | WARN_ON(!ap->ops->error_handler); | 885 | WARN_ON(!ap->ops->error_handler); |
884 | 886 | ||
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | |||
890 | * Note that ATA_QCFLAG_FAILED is unconditionally set after | 892 | * Note that ATA_QCFLAG_FAILED is unconditionally set after |
891 | * this function completes. | 893 | * this function completes. |
892 | */ | 894 | */ |
895 | spin_lock_irqsave(q->queue_lock, flags); | ||
893 | blk_abort_request(qc->scsicmd->request); | 896 | blk_abort_request(qc->scsicmd->request); |
897 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
894 | } | 898 | } |
895 | 899 | ||
896 | /** | 900 | /** |
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) | |||
1624 | } | 1628 | } |
1625 | 1629 | ||
1626 | /* okay, this error is ours */ | 1630 | /* okay, this error is ours */ |
1631 | memset(&tf, 0, sizeof(tf)); | ||
1627 | rc = ata_eh_read_log_10h(dev, &tag, &tf); | 1632 | rc = ata_eh_read_log_10h(dev, &tag, &tf); |
1628 | if (rc) { | 1633 | if (rc) { |
1629 | ata_link_printk(link, KERN_ERR, "failed to read log page 10h " | 1634 | ata_link_printk(link, KERN_ERR, "failed to read log page 10h " |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 3c3172d3c34e..4164dd244dd0 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
424 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), | 424 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), |
425 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | 425 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), |
426 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 426 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
427 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), | ||
428 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), | ||
427 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), | 429 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), |
428 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 430 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
429 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 431 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
@@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
444 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), | 446 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), |
445 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), | 447 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), |
446 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), | 448 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), |
449 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), | ||
450 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), | ||
447 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), | 451 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), |
448 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), | 452 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), |
449 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), | 453 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 67e0fc542249..93d1f9b469d4 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -1695,6 +1695,7 @@ int drbd_send_protocol(struct drbd_conf *mdev) | |||
1695 | cf |= CF_DRY_RUN; | 1695 | cf |= CF_DRY_RUN; |
1696 | else { | 1696 | else { |
1697 | dev_err(DEV, "--dry-run is not supported by peer"); | 1697 | dev_err(DEV, "--dry-run is not supported by peer"); |
1698 | kfree(p); | ||
1698 | return 0; | 1699 | return 0; |
1699 | } | 1700 | } |
1700 | } | 1701 | } |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 54f56ea8a786..c786023001d2 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -899,7 +899,8 @@ retry: | |||
899 | 899 | ||
900 | drbd_thread_start(&mdev->asender); | 900 | drbd_thread_start(&mdev->asender); |
901 | 901 | ||
902 | drbd_send_protocol(mdev); | 902 | if (!drbd_send_protocol(mdev)) |
903 | return -1; | ||
903 | drbd_send_sync_param(mdev, &mdev->sync_conf); | 904 | drbd_send_sync_param(mdev, &mdev->sync_conf); |
904 | drbd_send_sizes(mdev, 0); | 905 | drbd_send_sizes(mdev, 0); |
905 | drbd_send_uuids(mdev); | 906 | drbd_send_uuids(mdev); |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index d41331bc2aa7..aa4248efc5d8 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -1817,8 +1817,6 @@ static int intel_845_configure(void) | |||
1817 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); | 1817 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); |
1818 | /* clear any possible error conditions */ | 1818 | /* clear any possible error conditions */ |
1819 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); | 1819 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); |
1820 | |||
1821 | intel_i830_setup_flush(); | ||
1822 | return 0; | 1820 | return 0; |
1823 | } | 1821 | } |
1824 | 1822 | ||
@@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = { | |||
2188 | .agp_destroy_page = agp_generic_destroy_page, | 2186 | .agp_destroy_page = agp_generic_destroy_page, |
2189 | .agp_destroy_pages = agp_generic_destroy_pages, | 2187 | .agp_destroy_pages = agp_generic_destroy_pages, |
2190 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 2188 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2191 | .chipset_flush = intel_i830_chipset_flush, | ||
2192 | }; | 2189 | }; |
2193 | 2190 | ||
2194 | static const struct agp_bridge_driver intel_850_driver = { | 2191 | static const struct agp_bridge_driver intel_850_driver = { |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index c9bc896d68af..90b199f97bec 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -1026,14 +1026,16 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, | |||
1026 | 1026 | ||
1027 | xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ | 1027 | xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ |
1028 | /* last check before exit */ | 1028 | /* last check before exit */ |
1029 | if (!io_detect_cm4000(iobase, dev)) | 1029 | if (!io_detect_cm4000(iobase, dev)) { |
1030 | count = -ENODEV; | 1030 | rc = -ENODEV; |
1031 | goto release_io; | ||
1032 | } | ||
1031 | 1033 | ||
1032 | if (test_bit(IS_INVREV, &dev->flags) && count > 0) | 1034 | if (test_bit(IS_INVREV, &dev->flags) && count > 0) |
1033 | str_invert_revert(dev->rbuf, count); | 1035 | str_invert_revert(dev->rbuf, count); |
1034 | 1036 | ||
1035 | if (copy_to_user(buf, dev->rbuf, count)) | 1037 | if (copy_to_user(buf, dev->rbuf, count)) |
1036 | return -EFAULT; | 1038 | rc = -EFAULT; |
1037 | 1039 | ||
1038 | release_io: | 1040 | release_io: |
1039 | clear_bit(LOCK_IO, &dev->flags); | 1041 | clear_bit(LOCK_IO, &dev->flags); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2d5d575e889d..75d293eeb3ee 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1113,6 +1113,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1113 | unsigned int cpu = sys_dev->id; | 1113 | unsigned int cpu = sys_dev->id; |
1114 | unsigned long flags; | 1114 | unsigned long flags; |
1115 | struct cpufreq_policy *data; | 1115 | struct cpufreq_policy *data; |
1116 | struct kobject *kobj; | ||
1117 | struct completion *cmp; | ||
1116 | #ifdef CONFIG_SMP | 1118 | #ifdef CONFIG_SMP |
1117 | struct sys_device *cpu_sys_dev; | 1119 | struct sys_device *cpu_sys_dev; |
1118 | unsigned int j; | 1120 | unsigned int j; |
@@ -1141,10 +1143,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1141 | dprintk("removing link\n"); | 1143 | dprintk("removing link\n"); |
1142 | cpumask_clear_cpu(cpu, data->cpus); | 1144 | cpumask_clear_cpu(cpu, data->cpus); |
1143 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1145 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1144 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | 1146 | kobj = &sys_dev->kobj; |
1145 | cpufreq_cpu_put(data); | 1147 | cpufreq_cpu_put(data); |
1146 | cpufreq_debug_enable_ratelimit(); | 1148 | cpufreq_debug_enable_ratelimit(); |
1147 | unlock_policy_rwsem_write(cpu); | 1149 | unlock_policy_rwsem_write(cpu); |
1150 | sysfs_remove_link(kobj, "cpufreq"); | ||
1148 | return 0; | 1151 | return 0; |
1149 | } | 1152 | } |
1150 | #endif | 1153 | #endif |
@@ -1181,7 +1184,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1181 | data->governor->name, CPUFREQ_NAME_LEN); | 1184 | data->governor->name, CPUFREQ_NAME_LEN); |
1182 | #endif | 1185 | #endif |
1183 | cpu_sys_dev = get_cpu_sysdev(j); | 1186 | cpu_sys_dev = get_cpu_sysdev(j); |
1184 | sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); | 1187 | kobj = &cpu_sys_dev->kobj; |
1188 | unlock_policy_rwsem_write(cpu); | ||
1189 | sysfs_remove_link(kobj, "cpufreq"); | ||
1190 | lock_policy_rwsem_write(cpu); | ||
1185 | cpufreq_cpu_put(data); | 1191 | cpufreq_cpu_put(data); |
1186 | } | 1192 | } |
1187 | } | 1193 | } |
@@ -1192,19 +1198,22 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1192 | if (cpufreq_driver->target) | 1198 | if (cpufreq_driver->target) |
1193 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1199 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1194 | 1200 | ||
1195 | kobject_put(&data->kobj); | 1201 | kobj = &data->kobj; |
1202 | cmp = &data->kobj_unregister; | ||
1203 | unlock_policy_rwsem_write(cpu); | ||
1204 | kobject_put(kobj); | ||
1196 | 1205 | ||
1197 | /* we need to make sure that the underlying kobj is actually | 1206 | /* we need to make sure that the underlying kobj is actually |
1198 | * not referenced anymore by anybody before we proceed with | 1207 | * not referenced anymore by anybody before we proceed with |
1199 | * unloading. | 1208 | * unloading. |
1200 | */ | 1209 | */ |
1201 | dprintk("waiting for dropping of refcount\n"); | 1210 | dprintk("waiting for dropping of refcount\n"); |
1202 | wait_for_completion(&data->kobj_unregister); | 1211 | wait_for_completion(cmp); |
1203 | dprintk("wait complete\n"); | 1212 | dprintk("wait complete\n"); |
1204 | 1213 | ||
1214 | lock_policy_rwsem_write(cpu); | ||
1205 | if (cpufreq_driver->exit) | 1215 | if (cpufreq_driver->exit) |
1206 | cpufreq_driver->exit(data); | 1216 | cpufreq_driver->exit(data); |
1207 | |||
1208 | unlock_policy_rwsem_write(cpu); | 1217 | unlock_policy_rwsem_write(cpu); |
1209 | 1218 | ||
1210 | free_cpumask_var(data->related_cpus); | 1219 | free_cpumask_var(data->related_cpus); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 599a40b25cb0..3a147874a465 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -444,6 +444,7 @@ static struct attribute_group dbs_attr_group_old = { | |||
444 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 444 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
445 | { | 445 | { |
446 | unsigned int load = 0; | 446 | unsigned int load = 0; |
447 | unsigned int max_load = 0; | ||
447 | unsigned int freq_target; | 448 | unsigned int freq_target; |
448 | 449 | ||
449 | struct cpufreq_policy *policy; | 450 | struct cpufreq_policy *policy; |
@@ -501,6 +502,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
501 | continue; | 502 | continue; |
502 | 503 | ||
503 | load = 100 * (wall_time - idle_time) / wall_time; | 504 | load = 100 * (wall_time - idle_time) / wall_time; |
505 | |||
506 | if (load > max_load) | ||
507 | max_load = load; | ||
504 | } | 508 | } |
505 | 509 | ||
506 | /* | 510 | /* |
@@ -511,7 +515,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
511 | return; | 515 | return; |
512 | 516 | ||
513 | /* Check for frequency increase */ | 517 | /* Check for frequency increase */ |
514 | if (load > dbs_tuners_ins.up_threshold) { | 518 | if (max_load > dbs_tuners_ins.up_threshold) { |
515 | this_dbs_info->down_skip = 0; | 519 | this_dbs_info->down_skip = 0; |
516 | 520 | ||
517 | /* if we are already at full speed then break out early */ | 521 | /* if we are already at full speed then break out early */ |
@@ -538,7 +542,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
538 | * can support the current CPU usage without triggering the up | 542 | * can support the current CPU usage without triggering the up |
539 | * policy. To be safe, we focus 10 points under the threshold. | 543 | * policy. To be safe, we focus 10 points under the threshold. |
540 | */ | 544 | */ |
541 | if (load < (dbs_tuners_ins.down_threshold - 10)) { | 545 | if (max_load < (dbs_tuners_ins.down_threshold - 10)) { |
542 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | 546 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; |
543 | 547 | ||
544 | this_dbs_info->requested_freq -= freq_target; | 548 | this_dbs_info->requested_freq -= freq_target; |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 702dcc98c074..14a34d99eea2 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
960 | u.packet.header_length = GET_HEADER_LENGTH(control); | 960 | u.packet.header_length = GET_HEADER_LENGTH(control); |
961 | 961 | ||
962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { | 962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { |
963 | if (u.packet.header_length % 4 != 0) | ||
964 | return -EINVAL; | ||
963 | header_length = u.packet.header_length; | 965 | header_length = u.packet.header_length; |
964 | } else { | 966 | } else { |
965 | /* | 967 | /* |
@@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
969 | if (ctx->header_size == 0) { | 971 | if (ctx->header_size == 0) { |
970 | if (u.packet.header_length > 0) | 972 | if (u.packet.header_length > 0) |
971 | return -EINVAL; | 973 | return -EINVAL; |
972 | } else if (u.packet.header_length % ctx->header_size != 0) { | 974 | } else if (u.packet.header_length == 0 || |
975 | u.packet.header_length % ctx->header_size != 0) { | ||
973 | return -EINVAL; | 976 | return -EINVAL; |
974 | } | 977 | } |
975 | header_length = 0; | 978 | header_length = 0; |
@@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client, | |||
1354 | return -ENODEV; | 1357 | return -ENODEV; |
1355 | 1358 | ||
1356 | if (_IOC_TYPE(cmd) != '#' || | 1359 | if (_IOC_TYPE(cmd) != '#' || |
1357 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) | 1360 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || |
1361 | _IOC_SIZE(cmd) > sizeof(buffer)) | ||
1358 | return -EINVAL; | 1362 | return -EINVAL; |
1359 | 1363 | ||
1360 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | 1364 | if (_IOC_DIR(cmd) == _IOC_READ) |
1361 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1365 | memset(&buffer, 0, _IOC_SIZE(cmd)); |
1362 | copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | 1366 | |
1367 | if (_IOC_DIR(cmd) & _IOC_WRITE) | ||
1368 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | ||
1363 | return -EFAULT; | 1369 | return -EFAULT; |
1364 | } | ||
1365 | 1370 | ||
1366 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); | 1371 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); |
1367 | if (ret < 0) | 1372 | if (ret < 0) |
1368 | return ret; | 1373 | return ret; |
1369 | 1374 | ||
1370 | if (_IOC_DIR(cmd) & _IOC_READ) { | 1375 | if (_IOC_DIR(cmd) & _IOC_READ) |
1371 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1376 | if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) |
1372 | copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) | ||
1373 | return -EFAULT; | 1377 | return -EFAULT; |
1374 | } | ||
1375 | 1378 | ||
1376 | return ret; | 1379 | return ret; |
1377 | } | 1380 | } |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 3784a47865b7..8f5aebfb29df 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
190 | for (try = 0; try < 5; try++) { | 190 | for (try = 0; try < 5; try++) { |
191 | new = allocate ? old - bandwidth : old + bandwidth; | 191 | new = allocate ? old - bandwidth : old + bandwidth; |
192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) | 192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) |
193 | break; | 193 | return -EBUSY; |
194 | 194 | ||
195 | data[0] = cpu_to_be32(old); | 195 | data[0] = cpu_to_be32(old); |
196 | data[1] = cpu_to_be32(new); | 196 | data[1] = cpu_to_be32(new); |
@@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) | 218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) |
219 | { | 219 | { |
220 | __be32 c, all, old; | 220 | __be32 c, all, old; |
221 | int i, retry = 5; | 221 | int i, ret = -EIO, retry = 5; |
222 | 222 | ||
223 | old = all = allocate ? cpu_to_be32(~0) : 0; | 223 | old = all = allocate ? cpu_to_be32(~0) : 0; |
224 | 224 | ||
@@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
226 | if (!(channels_mask & 1 << i)) | 226 | if (!(channels_mask & 1 << i)) |
227 | continue; | 227 | continue; |
228 | 228 | ||
229 | ret = -EBUSY; | ||
230 | |||
229 | c = cpu_to_be32(1 << (31 - i)); | 231 | c = cpu_to_be32(1 << (31 - i)); |
230 | if ((old & c) != (all & c)) | 232 | if ((old & c) != (all & c)) |
231 | continue; | 233 | continue; |
@@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
251 | 253 | ||
252 | /* 1394-1995 IRM, fall through to retry. */ | 254 | /* 1394-1995 IRM, fall through to retry. */ |
253 | default: | 255 | default: |
254 | if (retry--) | 256 | if (retry) { |
257 | retry--; | ||
255 | i--; | 258 | i--; |
259 | } else { | ||
260 | ret = -EIO; | ||
261 | } | ||
256 | } | 262 | } |
257 | } | 263 | } |
258 | 264 | ||
259 | return -EIO; | 265 | return ret; |
260 | } | 266 | } |
261 | 267 | ||
262 | static void deallocate_channel(struct fw_card *card, int irm_id, | 268 | static void deallocate_channel(struct fw_card *card, int irm_id, |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 0cf4d7f562c5..94b16e0340ae 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
1158 | struct fw_packet *packet, u32 csr) | 1158 | struct fw_packet *packet, u32 csr) |
1159 | { | 1159 | { |
1160 | struct fw_packet response; | 1160 | struct fw_packet response; |
1161 | int tcode, length, ext_tcode, sel; | 1161 | int tcode, length, ext_tcode, sel, try; |
1162 | __be32 *payload, lock_old; | 1162 | __be32 *payload, lock_old; |
1163 | u32 lock_arg, lock_data; | 1163 | u32 lock_arg, lock_data; |
1164 | 1164 | ||
@@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); | 1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); |
1186 | reg_write(ohci, OHCI1394_CSRControl, sel); | 1186 | reg_write(ohci, OHCI1394_CSRControl, sel); |
1187 | 1187 | ||
1188 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) | 1188 | for (try = 0; try < 20; try++) |
1189 | lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); | 1189 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { |
1190 | else | 1190 | lock_old = cpu_to_be32(reg_read(ohci, |
1191 | fw_notify("swap not done yet\n"); | 1191 | OHCI1394_CSRData)); |
1192 | fw_fill_response(&response, packet->header, | ||
1193 | RCODE_COMPLETE, | ||
1194 | &lock_old, sizeof(lock_old)); | ||
1195 | goto out; | ||
1196 | } | ||
1197 | |||
1198 | fw_error("swap not done (CSR lock timeout)\n"); | ||
1199 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); | ||
1192 | 1200 | ||
1193 | fw_fill_response(&response, packet->header, | ||
1194 | RCODE_COMPLETE, &lock_old, sizeof(lock_old)); | ||
1195 | out: | 1201 | out: |
1196 | fw_core_handle_response(&ohci->card, &response); | 1202 | fw_core_handle_response(&ohci->card, &response); |
1197 | } | 1203 | } |
1198 | 1204 | ||
1199 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) | 1205 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
1200 | { | 1206 | { |
1201 | u64 offset; | 1207 | u64 offset, csr; |
1202 | u32 csr; | ||
1203 | 1208 | ||
1204 | if (ctx == &ctx->ohci->at_request_ctx) { | 1209 | if (ctx == &ctx->ohci->at_request_ctx) { |
1205 | packet->ack = ACK_PENDING; | 1210 | packet->ack = ACK_PENDING; |
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 7d521e1d17e1..b827c976dc62 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
@@ -252,6 +252,18 @@ static void pca953x_irq_bus_lock(unsigned int irq) | |||
252 | static void pca953x_irq_bus_sync_unlock(unsigned int irq) | 252 | static void pca953x_irq_bus_sync_unlock(unsigned int irq) |
253 | { | 253 | { |
254 | struct pca953x_chip *chip = get_irq_chip_data(irq); | 254 | struct pca953x_chip *chip = get_irq_chip_data(irq); |
255 | uint16_t new_irqs; | ||
256 | uint16_t level; | ||
257 | |||
258 | /* Look for any newly setup interrupt */ | ||
259 | new_irqs = chip->irq_trig_fall | chip->irq_trig_raise; | ||
260 | new_irqs &= ~chip->reg_direction; | ||
261 | |||
262 | while (new_irqs) { | ||
263 | level = __ffs(new_irqs); | ||
264 | pca953x_gpio_direction_input(&chip->gpio_chip, level); | ||
265 | new_irqs &= ~(1 << level); | ||
266 | } | ||
255 | 267 | ||
256 | mutex_unlock(&chip->irq_lock); | 268 | mutex_unlock(&chip->irq_lock); |
257 | } | 269 | } |
@@ -278,7 +290,7 @@ static int pca953x_irq_set_type(unsigned int irq, unsigned int type) | |||
278 | else | 290 | else |
279 | chip->irq_trig_raise &= ~mask; | 291 | chip->irq_trig_raise &= ~mask; |
280 | 292 | ||
281 | return pca953x_gpio_direction_input(&chip->gpio_chip, level); | 293 | return 0; |
282 | } | 294 | } |
283 | 295 | ||
284 | static struct irq_chip pca953x_irq_chip = { | 296 | static struct irq_chip pca953x_irq_chip = { |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3bd872761567..a263b7070fc6 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -476,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc) | |||
476 | unsigned long irqflags; | 476 | unsigned long irqflags; |
477 | 477 | ||
478 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 478 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
479 | dev->driver->disable_vblank(dev, crtc); | ||
479 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 480 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
480 | dev->vblank_enabled[crtc] = 0; | 481 | dev->vblank_enabled[crtc] = 0; |
481 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); | 482 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index b743411d8144..a0c365f2e521 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev) | |||
516 | } | 516 | } |
517 | driver = dev->driver; | 517 | driver = dev->driver; |
518 | 518 | ||
519 | drm_vblank_cleanup(dev); | ||
520 | |||
521 | drm_lastclose(dev); | 519 | drm_lastclose(dev); |
522 | 520 | ||
523 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && | 521 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && |
@@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev) | |||
537 | dev->agp = NULL; | 535 | dev->agp = NULL; |
538 | } | 536 | } |
539 | 537 | ||
538 | drm_vblank_cleanup(dev); | ||
539 | |||
540 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) | 540 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) |
541 | drm_rmmap(dev, r_list->map); | 541 | drm_rmmap(dev, r_list->map); |
542 | drm_ht_remove(&dev->map_hash); | 542 | drm_ht_remove(&dev->map_hash); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b574503dddd0..a0b8447b06e7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
226 | } else { | 226 | } else { |
227 | struct drm_i915_gem_object *obj_priv; | 227 | struct drm_i915_gem_object *obj_priv; |
228 | 228 | ||
229 | obj_priv = obj->driver_private; | 229 | obj_priv = to_intel_bo(obj); |
230 | seq_printf(m, "Fenced object[%2d] = %p: %s " | 230 | seq_printf(m, "Fenced object[%2d] = %p: %s " |
231 | "%08x %08zx %08x %s %08x %08x %d", | 231 | "%08x %08zx %08x %s %08x %08x %d", |
232 | i, obj, get_pin_flag(obj_priv), | 232 | i, obj, get_pin_flag(obj_priv), |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dc93939507d..c3cfafcbfe7d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1357 | 1357 | ||
1358 | dev_priv->cfb_size = size; | 1358 | dev_priv->cfb_size = size; |
1359 | 1359 | ||
1360 | dev_priv->compressed_fb = compressed_fb; | ||
1361 | |||
1360 | if (IS_GM45(dev)) { | 1362 | if (IS_GM45(dev)) { |
1361 | g4x_disable_fbc(dev); | 1363 | g4x_disable_fbc(dev); |
1362 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1364 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1364 | i8xx_disable_fbc(dev); | 1366 | i8xx_disable_fbc(dev); |
1365 | I915_WRITE(FBC_CFB_BASE, cfb_base); | 1367 | I915_WRITE(FBC_CFB_BASE, cfb_base); |
1366 | I915_WRITE(FBC_LL_BASE, ll_base); | 1368 | I915_WRITE(FBC_LL_BASE, ll_base); |
1369 | dev_priv->compressed_llb = compressed_llb; | ||
1367 | } | 1370 | } |
1368 | 1371 | ||
1369 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1372 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
1370 | ll_base, size >> 20); | 1373 | ll_base, size >> 20); |
1371 | } | 1374 | } |
1372 | 1375 | ||
1376 | static void i915_cleanup_compression(struct drm_device *dev) | ||
1377 | { | ||
1378 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1379 | |||
1380 | drm_mm_put_block(dev_priv->compressed_fb); | ||
1381 | if (!IS_GM45(dev)) | ||
1382 | drm_mm_put_block(dev_priv->compressed_llb); | ||
1383 | } | ||
1384 | |||
1373 | /* true = enable decode, false = disable decoder */ | 1385 | /* true = enable decode, false = disable decoder */ |
1374 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 1386 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1375 | { | 1387 | { |
@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1787 | mutex_lock(&dev->struct_mutex); | 1799 | mutex_lock(&dev->struct_mutex); |
1788 | i915_gem_cleanup_ringbuffer(dev); | 1800 | i915_gem_cleanup_ringbuffer(dev); |
1789 | mutex_unlock(&dev->struct_mutex); | 1801 | mutex_unlock(&dev->struct_mutex); |
1802 | if (I915_HAS_FBC(dev) && i915_powersave) | ||
1803 | i915_cleanup_compression(dev); | ||
1790 | drm_mm_takedown(&dev_priv->vram); | 1804 | drm_mm_takedown(&dev_priv->vram); |
1791 | i915_gem_lastclose(dev); | 1805 | i915_gem_lastclose(dev); |
1792 | 1806 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4b26919abdb2..cc03537bb883 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = { | |||
69 | }; | 69 | }; |
70 | 70 | ||
71 | const static struct intel_device_info intel_i85x_info = { | 71 | const static struct intel_device_info intel_i85x_info = { |
72 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, |
73 | .cursor_needs_physical = 1, | ||
73 | }; | 74 | }; |
74 | 75 | ||
75 | const static struct intel_device_info intel_i865g_info = { | 76 | const static struct intel_device_info intel_i865g_info = { |
@@ -80,14 +81,14 @@ const static struct intel_device_info intel_i915g_info = { | |||
80 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | 81 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, |
81 | }; | 82 | }; |
82 | const static struct intel_device_info intel_i915gm_info = { | 83 | const static struct intel_device_info intel_i915gm_info = { |
83 | .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | 84 | .is_i9xx = 1, .is_mobile = 1, |
84 | .cursor_needs_physical = 1, | 85 | .cursor_needs_physical = 1, |
85 | }; | 86 | }; |
86 | const static struct intel_device_info intel_i945g_info = { | 87 | const static struct intel_device_info intel_i945g_info = { |
87 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | 88 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, |
88 | }; | 89 | }; |
89 | const static struct intel_device_info intel_i945gm_info = { | 90 | const static struct intel_device_info intel_i945gm_info = { |
90 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | 91 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, |
91 | .has_hotplug = 1, .cursor_needs_physical = 1, | 92 | .has_hotplug = 1, .cursor_needs_physical = 1, |
92 | }; | 93 | }; |
93 | 94 | ||
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = { | |||
151 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), |
152 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), |
153 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), |
154 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | 155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
155 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | 156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), |
156 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | 157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), |
157 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | 158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), |
@@ -361,7 +362,7 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
361 | !dev_priv->mm.suspended) { | 362 | !dev_priv->mm.suspended) { |
362 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | 363 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; |
363 | struct drm_gem_object *obj = ring->ring_obj; | 364 | struct drm_gem_object *obj = ring->ring_obj; |
364 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 365 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
365 | dev_priv->mm.suspended = 0; | 366 | dev_priv->mm.suspended = 0; |
366 | 367 | ||
367 | /* Stop the ring if it's running. */ | 368 | /* Stop the ring if it's running. */ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aba8260fbc5e..6e4790065d9e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -195,6 +195,7 @@ struct intel_overlay; | |||
195 | struct intel_device_info { | 195 | struct intel_device_info { |
196 | u8 is_mobile : 1; | 196 | u8 is_mobile : 1; |
197 | u8 is_i8xx : 1; | 197 | u8 is_i8xx : 1; |
198 | u8 is_i85x : 1; | ||
198 | u8 is_i915g : 1; | 199 | u8 is_i915g : 1; |
199 | u8 is_i9xx : 1; | 200 | u8 is_i9xx : 1; |
200 | u8 is_i945gm : 1; | 201 | u8 is_i945gm : 1; |
@@ -235,11 +236,14 @@ typedef struct drm_i915_private { | |||
235 | 236 | ||
236 | drm_dma_handle_t *status_page_dmah; | 237 | drm_dma_handle_t *status_page_dmah; |
237 | void *hw_status_page; | 238 | void *hw_status_page; |
239 | void *seqno_page; | ||
238 | dma_addr_t dma_status_page; | 240 | dma_addr_t dma_status_page; |
239 | uint32_t counter; | 241 | uint32_t counter; |
240 | unsigned int status_gfx_addr; | 242 | unsigned int status_gfx_addr; |
243 | unsigned int seqno_gfx_addr; | ||
241 | drm_local_map_t hws_map; | 244 | drm_local_map_t hws_map; |
242 | struct drm_gem_object *hws_obj; | 245 | struct drm_gem_object *hws_obj; |
246 | struct drm_gem_object *seqno_obj; | ||
243 | struct drm_gem_object *pwrctx; | 247 | struct drm_gem_object *pwrctx; |
244 | 248 | ||
245 | struct resource mch_res; | 249 | struct resource mch_res; |
@@ -611,6 +615,8 @@ typedef struct drm_i915_private { | |||
611 | /* Reclocking support */ | 615 | /* Reclocking support */ |
612 | bool render_reclock_avail; | 616 | bool render_reclock_avail; |
613 | bool lvds_downclock_avail; | 617 | bool lvds_downclock_avail; |
618 | /* indicate whether the LVDS EDID is OK */ | ||
619 | bool lvds_edid_good; | ||
614 | /* indicates the reduced downclock for LVDS*/ | 620 | /* indicates the reduced downclock for LVDS*/ |
615 | int lvds_downclock; | 621 | int lvds_downclock; |
616 | struct work_struct idle_work; | 622 | struct work_struct idle_work; |
@@ -628,6 +634,9 @@ typedef struct drm_i915_private { | |||
628 | u8 max_delay; | 634 | u8 max_delay; |
629 | 635 | ||
630 | enum no_fbc_reason no_fbc_reason; | 636 | enum no_fbc_reason no_fbc_reason; |
637 | |||
638 | struct drm_mm_node *compressed_fb; | ||
639 | struct drm_mm_node *compressed_llb; | ||
631 | } drm_i915_private_t; | 640 | } drm_i915_private_t; |
632 | 641 | ||
633 | /** driver private structure attached to each drm_gem_object */ | 642 | /** driver private structure attached to each drm_gem_object */ |
@@ -731,6 +740,8 @@ struct drm_i915_gem_object { | |||
731 | atomic_t pending_flip; | 740 | atomic_t pending_flip; |
732 | }; | 741 | }; |
733 | 742 | ||
743 | #define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) | ||
744 | |||
734 | /** | 745 | /** |
735 | * Request queue structure. | 746 | * Request queue structure. |
736 | * | 747 | * |
@@ -1066,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1066 | 1077 | ||
1067 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1078 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
1068 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1079 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
1069 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1080 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
1070 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1081 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
1071 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) | 1082 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) |
1072 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | 1083 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
@@ -1131,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1131 | 1142 | ||
1132 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ | 1143 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ |
1133 | IS_GEN6(dev)) | 1144 | IS_GEN6(dev)) |
1145 | #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) | ||
1134 | 1146 | ||
1135 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1147 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1136 | 1148 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 368d726853d1..ef3d91dda71a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages, | |||
163 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 163 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) |
164 | { | 164 | { |
165 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 165 | drm_i915_private_t *dev_priv = obj->dev->dev_private; |
166 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 166 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
167 | 167 | ||
168 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 168 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
169 | obj_priv->tiling_mode != I915_TILING_NONE; | 169 | obj_priv->tiling_mode != I915_TILING_NONE; |
@@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
264 | struct drm_i915_gem_pread *args, | 264 | struct drm_i915_gem_pread *args, |
265 | struct drm_file *file_priv) | 265 | struct drm_file *file_priv) |
266 | { | 266 | { |
267 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 267 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
268 | ssize_t remain; | 268 | ssize_t remain; |
269 | loff_t offset, page_base; | 269 | loff_t offset, page_base; |
270 | char __user *user_data; | 270 | char __user *user_data; |
@@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
285 | if (ret != 0) | 285 | if (ret != 0) |
286 | goto fail_put_pages; | 286 | goto fail_put_pages; |
287 | 287 | ||
288 | obj_priv = obj->driver_private; | 288 | obj_priv = to_intel_bo(obj); |
289 | offset = args->offset; | 289 | offset = args->offset; |
290 | 290 | ||
291 | while (remain > 0) { | 291 | while (remain > 0) { |
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
354 | struct drm_i915_gem_pread *args, | 354 | struct drm_i915_gem_pread *args, |
355 | struct drm_file *file_priv) | 355 | struct drm_file *file_priv) |
356 | { | 356 | { |
357 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 357 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
358 | struct mm_struct *mm = current->mm; | 358 | struct mm_struct *mm = current->mm; |
359 | struct page **user_pages; | 359 | struct page **user_pages; |
360 | ssize_t remain; | 360 | ssize_t remain; |
@@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
403 | if (ret != 0) | 403 | if (ret != 0) |
404 | goto fail_put_pages; | 404 | goto fail_put_pages; |
405 | 405 | ||
406 | obj_priv = obj->driver_private; | 406 | obj_priv = to_intel_bo(obj); |
407 | offset = args->offset; | 407 | offset = args->offset; |
408 | 408 | ||
409 | while (remain > 0) { | 409 | while (remain > 0) { |
@@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
479 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 479 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
480 | if (obj == NULL) | 480 | if (obj == NULL) |
481 | return -EBADF; | 481 | return -EBADF; |
482 | obj_priv = obj->driver_private; | 482 | obj_priv = to_intel_bo(obj); |
483 | 483 | ||
484 | /* Bounds check source. | 484 | /* Bounds check source. |
485 | * | 485 | * |
@@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
581 | struct drm_i915_gem_pwrite *args, | 581 | struct drm_i915_gem_pwrite *args, |
582 | struct drm_file *file_priv) | 582 | struct drm_file *file_priv) |
583 | { | 583 | { |
584 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 584 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
585 | drm_i915_private_t *dev_priv = dev->dev_private; | 585 | drm_i915_private_t *dev_priv = dev->dev_private; |
586 | ssize_t remain; | 586 | ssize_t remain; |
587 | loff_t offset, page_base; | 587 | loff_t offset, page_base; |
@@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
605 | if (ret) | 605 | if (ret) |
606 | goto fail; | 606 | goto fail; |
607 | 607 | ||
608 | obj_priv = obj->driver_private; | 608 | obj_priv = to_intel_bo(obj); |
609 | offset = obj_priv->gtt_offset + args->offset; | 609 | offset = obj_priv->gtt_offset + args->offset; |
610 | 610 | ||
611 | while (remain > 0) { | 611 | while (remain > 0) { |
@@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
655 | struct drm_i915_gem_pwrite *args, | 655 | struct drm_i915_gem_pwrite *args, |
656 | struct drm_file *file_priv) | 656 | struct drm_file *file_priv) |
657 | { | 657 | { |
658 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 658 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
659 | drm_i915_private_t *dev_priv = dev->dev_private; | 659 | drm_i915_private_t *dev_priv = dev->dev_private; |
660 | ssize_t remain; | 660 | ssize_t remain; |
661 | loff_t gtt_page_base, offset; | 661 | loff_t gtt_page_base, offset; |
@@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
699 | if (ret) | 699 | if (ret) |
700 | goto out_unpin_object; | 700 | goto out_unpin_object; |
701 | 701 | ||
702 | obj_priv = obj->driver_private; | 702 | obj_priv = to_intel_bo(obj); |
703 | offset = obj_priv->gtt_offset + args->offset; | 703 | offset = obj_priv->gtt_offset + args->offset; |
704 | 704 | ||
705 | while (remain > 0) { | 705 | while (remain > 0) { |
@@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
761 | struct drm_i915_gem_pwrite *args, | 761 | struct drm_i915_gem_pwrite *args, |
762 | struct drm_file *file_priv) | 762 | struct drm_file *file_priv) |
763 | { | 763 | { |
764 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 764 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
765 | ssize_t remain; | 765 | ssize_t remain; |
766 | loff_t offset, page_base; | 766 | loff_t offset, page_base; |
767 | char __user *user_data; | 767 | char __user *user_data; |
@@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
781 | if (ret != 0) | 781 | if (ret != 0) |
782 | goto fail_put_pages; | 782 | goto fail_put_pages; |
783 | 783 | ||
784 | obj_priv = obj->driver_private; | 784 | obj_priv = to_intel_bo(obj); |
785 | offset = args->offset; | 785 | offset = args->offset; |
786 | obj_priv->dirty = 1; | 786 | obj_priv->dirty = 1; |
787 | 787 | ||
@@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
829 | struct drm_i915_gem_pwrite *args, | 829 | struct drm_i915_gem_pwrite *args, |
830 | struct drm_file *file_priv) | 830 | struct drm_file *file_priv) |
831 | { | 831 | { |
832 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 832 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
833 | struct mm_struct *mm = current->mm; | 833 | struct mm_struct *mm = current->mm; |
834 | struct page **user_pages; | 834 | struct page **user_pages; |
835 | ssize_t remain; | 835 | ssize_t remain; |
@@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
877 | if (ret != 0) | 877 | if (ret != 0) |
878 | goto fail_put_pages; | 878 | goto fail_put_pages; |
879 | 879 | ||
880 | obj_priv = obj->driver_private; | 880 | obj_priv = to_intel_bo(obj); |
881 | offset = args->offset; | 881 | offset = args->offset; |
882 | obj_priv->dirty = 1; | 882 | obj_priv->dirty = 1; |
883 | 883 | ||
@@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
952 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 952 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
953 | if (obj == NULL) | 953 | if (obj == NULL) |
954 | return -EBADF; | 954 | return -EBADF; |
955 | obj_priv = obj->driver_private; | 955 | obj_priv = to_intel_bo(obj); |
956 | 956 | ||
957 | /* Bounds check destination. | 957 | /* Bounds check destination. |
958 | * | 958 | * |
@@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1034 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1034 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
1035 | if (obj == NULL) | 1035 | if (obj == NULL) |
1036 | return -EBADF; | 1036 | return -EBADF; |
1037 | obj_priv = obj->driver_private; | 1037 | obj_priv = to_intel_bo(obj); |
1038 | 1038 | ||
1039 | mutex_lock(&dev->struct_mutex); | 1039 | mutex_lock(&dev->struct_mutex); |
1040 | 1040 | ||
@@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1096 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", | 1096 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", |
1097 | __func__, args->handle, obj, obj->size); | 1097 | __func__, args->handle, obj, obj->size); |
1098 | #endif | 1098 | #endif |
1099 | obj_priv = obj->driver_private; | 1099 | obj_priv = to_intel_bo(obj); |
1100 | 1100 | ||
1101 | /* Pinned buffers may be scanout, so flush the cache */ | 1101 | /* Pinned buffers may be scanout, so flush the cache */ |
1102 | if (obj_priv->pin_count) | 1102 | if (obj_priv->pin_count) |
@@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1167 | struct drm_gem_object *obj = vma->vm_private_data; | 1167 | struct drm_gem_object *obj = vma->vm_private_data; |
1168 | struct drm_device *dev = obj->dev; | 1168 | struct drm_device *dev = obj->dev; |
1169 | struct drm_i915_private *dev_priv = dev->dev_private; | 1169 | struct drm_i915_private *dev_priv = dev->dev_private; |
1170 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1170 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1171 | pgoff_t page_offset; | 1171 | pgoff_t page_offset; |
1172 | unsigned long pfn; | 1172 | unsigned long pfn; |
1173 | int ret = 0; | 1173 | int ret = 0; |
@@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
1234 | { | 1234 | { |
1235 | struct drm_device *dev = obj->dev; | 1235 | struct drm_device *dev = obj->dev; |
1236 | struct drm_gem_mm *mm = dev->mm_private; | 1236 | struct drm_gem_mm *mm = dev->mm_private; |
1237 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1237 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1238 | struct drm_map_list *list; | 1238 | struct drm_map_list *list; |
1239 | struct drm_local_map *map; | 1239 | struct drm_local_map *map; |
1240 | int ret = 0; | 1240 | int ret = 0; |
@@ -1305,7 +1305,7 @@ void | |||
1305 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1305 | i915_gem_release_mmap(struct drm_gem_object *obj) |
1306 | { | 1306 | { |
1307 | struct drm_device *dev = obj->dev; | 1307 | struct drm_device *dev = obj->dev; |
1308 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1308 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1309 | 1309 | ||
1310 | if (dev->dev_mapping) | 1310 | if (dev->dev_mapping) |
1311 | unmap_mapping_range(dev->dev_mapping, | 1311 | unmap_mapping_range(dev->dev_mapping, |
@@ -1316,7 +1316,7 @@ static void | |||
1316 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1316 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) |
1317 | { | 1317 | { |
1318 | struct drm_device *dev = obj->dev; | 1318 | struct drm_device *dev = obj->dev; |
1319 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1319 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1320 | struct drm_gem_mm *mm = dev->mm_private; | 1320 | struct drm_gem_mm *mm = dev->mm_private; |
1321 | struct drm_map_list *list; | 1321 | struct drm_map_list *list; |
1322 | 1322 | ||
@@ -1347,7 +1347,7 @@ static uint32_t | |||
1347 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | 1347 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) |
1348 | { | 1348 | { |
1349 | struct drm_device *dev = obj->dev; | 1349 | struct drm_device *dev = obj->dev; |
1350 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1350 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1351 | int start, i; | 1351 | int start, i; |
1352 | 1352 | ||
1353 | /* | 1353 | /* |
@@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1406 | 1406 | ||
1407 | mutex_lock(&dev->struct_mutex); | 1407 | mutex_lock(&dev->struct_mutex); |
1408 | 1408 | ||
1409 | obj_priv = obj->driver_private; | 1409 | obj_priv = to_intel_bo(obj); |
1410 | 1410 | ||
1411 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1411 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
1412 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1412 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
@@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1450 | void | 1450 | void |
1451 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1451 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
1452 | { | 1452 | { |
1453 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1453 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1454 | int page_count = obj->size / PAGE_SIZE; | 1454 | int page_count = obj->size / PAGE_SIZE; |
1455 | int i; | 1455 | int i; |
1456 | 1456 | ||
@@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) | |||
1486 | { | 1486 | { |
1487 | struct drm_device *dev = obj->dev; | 1487 | struct drm_device *dev = obj->dev; |
1488 | drm_i915_private_t *dev_priv = dev->dev_private; | 1488 | drm_i915_private_t *dev_priv = dev->dev_private; |
1489 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1489 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1490 | 1490 | ||
1491 | /* Add a reference if we're newly entering the active list. */ | 1491 | /* Add a reference if we're newly entering the active list. */ |
1492 | if (!obj_priv->active) { | 1492 | if (!obj_priv->active) { |
@@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1506 | { | 1506 | { |
1507 | struct drm_device *dev = obj->dev; | 1507 | struct drm_device *dev = obj->dev; |
1508 | drm_i915_private_t *dev_priv = dev->dev_private; | 1508 | drm_i915_private_t *dev_priv = dev->dev_private; |
1509 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1509 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1510 | 1510 | ||
1511 | BUG_ON(!obj_priv->active); | 1511 | BUG_ON(!obj_priv->active); |
1512 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | 1512 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); |
@@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1517 | static void | 1517 | static void |
1518 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1518 | i915_gem_object_truncate(struct drm_gem_object *obj) |
1519 | { | 1519 | { |
1520 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1520 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1521 | struct inode *inode; | 1521 | struct inode *inode; |
1522 | 1522 | ||
1523 | inode = obj->filp->f_path.dentry->d_inode; | 1523 | inode = obj->filp->f_path.dentry->d_inode; |
@@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1538 | { | 1538 | { |
1539 | struct drm_device *dev = obj->dev; | 1539 | struct drm_device *dev = obj->dev; |
1540 | drm_i915_private_t *dev_priv = dev->dev_private; | 1540 | drm_i915_private_t *dev_priv = dev->dev_private; |
1541 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1541 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1542 | 1542 | ||
1543 | i915_verify_inactive(dev, __FILE__, __LINE__); | 1543 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1544 | if (obj_priv->pin_count != 0) | 1544 | if (obj_priv->pin_count != 0) |
@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1588 | } | 1588 | } |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | #define PIPE_CONTROL_FLUSH(addr) \ | ||
1592 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | ||
1593 | PIPE_CONTROL_DEPTH_STALL); \ | ||
1594 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | ||
1595 | OUT_RING(0); \ | ||
1596 | OUT_RING(0); \ | ||
1597 | |||
1591 | /** | 1598 | /** |
1592 | * Creates a new sequence number, emitting a write of it to the status page | 1599 | * Creates a new sequence number, emitting a write of it to the status page |
1593 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 1600 | * plus an interrupt, which will trigger i915_user_interrupt_handler. |
@@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1622 | if (dev_priv->mm.next_gem_seqno == 0) | 1629 | if (dev_priv->mm.next_gem_seqno == 0) |
1623 | dev_priv->mm.next_gem_seqno++; | 1630 | dev_priv->mm.next_gem_seqno++; |
1624 | 1631 | ||
1625 | BEGIN_LP_RING(4); | 1632 | if (HAS_PIPE_CONTROL(dev)) { |
1626 | OUT_RING(MI_STORE_DWORD_INDEX); | 1633 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
1627 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
1628 | OUT_RING(seqno); | ||
1629 | 1634 | ||
1630 | OUT_RING(MI_USER_INTERRUPT); | 1635 | /* |
1631 | ADVANCE_LP_RING(); | 1636 | * Workaround qword write incoherence by flushing the |
1637 | * PIPE_NOTIFY buffers out to memory before requesting | ||
1638 | * an interrupt. | ||
1639 | */ | ||
1640 | BEGIN_LP_RING(32); | ||
1641 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
1642 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
1643 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
1644 | OUT_RING(seqno); | ||
1645 | OUT_RING(0); | ||
1646 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1647 | scratch_addr += 128; /* write to separate cachelines */ | ||
1648 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1649 | scratch_addr += 128; | ||
1650 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1651 | scratch_addr += 128; | ||
1652 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1653 | scratch_addr += 128; | ||
1654 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1655 | scratch_addr += 128; | ||
1656 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1657 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
1658 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
1659 | PIPE_CONTROL_NOTIFY); | ||
1660 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
1661 | OUT_RING(seqno); | ||
1662 | OUT_RING(0); | ||
1663 | ADVANCE_LP_RING(); | ||
1664 | } else { | ||
1665 | BEGIN_LP_RING(4); | ||
1666 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
1667 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
1668 | OUT_RING(seqno); | ||
1669 | |||
1670 | OUT_RING(MI_USER_INTERRUPT); | ||
1671 | ADVANCE_LP_RING(); | ||
1672 | } | ||
1632 | 1673 | ||
1633 | DRM_DEBUG_DRIVER("%d\n", seqno); | 1674 | DRM_DEBUG_DRIVER("%d\n", seqno); |
1634 | 1675 | ||
@@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev) | |||
1752 | { | 1793 | { |
1753 | drm_i915_private_t *dev_priv = dev->dev_private; | 1794 | drm_i915_private_t *dev_priv = dev->dev_private; |
1754 | 1795 | ||
1755 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | 1796 | if (HAS_PIPE_CONTROL(dev)) |
1797 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | ||
1798 | else | ||
1799 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | ||
1756 | } | 1800 | } |
1757 | 1801 | ||
1758 | /** | 1802 | /** |
@@ -1965,7 +2009,7 @@ static int | |||
1965 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) | 2009 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) |
1966 | { | 2010 | { |
1967 | struct drm_device *dev = obj->dev; | 2011 | struct drm_device *dev = obj->dev; |
1968 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2012 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1969 | int ret; | 2013 | int ret; |
1970 | 2014 | ||
1971 | /* This function only exists to support waiting for existing rendering, | 2015 | /* This function only exists to support waiting for existing rendering, |
@@ -1997,7 +2041,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1997 | { | 2041 | { |
1998 | struct drm_device *dev = obj->dev; | 2042 | struct drm_device *dev = obj->dev; |
1999 | drm_i915_private_t *dev_priv = dev->dev_private; | 2043 | drm_i915_private_t *dev_priv = dev->dev_private; |
2000 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2044 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2001 | int ret = 0; | 2045 | int ret = 0; |
2002 | 2046 | ||
2003 | #if WATCH_BUF | 2047 | #if WATCH_BUF |
@@ -2173,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2173 | #if WATCH_LRU | 2217 | #if WATCH_LRU |
2174 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 2218 | DRM_INFO("%s: evicting %p\n", __func__, obj); |
2175 | #endif | 2219 | #endif |
2176 | obj_priv = obj->driver_private; | 2220 | obj_priv = to_intel_bo(obj); |
2177 | BUG_ON(obj_priv->pin_count != 0); | 2221 | BUG_ON(obj_priv->pin_count != 0); |
2178 | BUG_ON(obj_priv->active); | 2222 | BUG_ON(obj_priv->active); |
2179 | 2223 | ||
@@ -2244,7 +2288,7 @@ int | |||
2244 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 2288 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2245 | gfp_t gfpmask) | 2289 | gfp_t gfpmask) |
2246 | { | 2290 | { |
2247 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2291 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2248 | int page_count, i; | 2292 | int page_count, i; |
2249 | struct address_space *mapping; | 2293 | struct address_space *mapping; |
2250 | struct inode *inode; | 2294 | struct inode *inode; |
@@ -2297,7 +2341,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2297 | struct drm_gem_object *obj = reg->obj; | 2341 | struct drm_gem_object *obj = reg->obj; |
2298 | struct drm_device *dev = obj->dev; | 2342 | struct drm_device *dev = obj->dev; |
2299 | drm_i915_private_t *dev_priv = dev->dev_private; | 2343 | drm_i915_private_t *dev_priv = dev->dev_private; |
2300 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2344 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2301 | int regnum = obj_priv->fence_reg; | 2345 | int regnum = obj_priv->fence_reg; |
2302 | uint64_t val; | 2346 | uint64_t val; |
2303 | 2347 | ||
@@ -2319,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2319 | struct drm_gem_object *obj = reg->obj; | 2363 | struct drm_gem_object *obj = reg->obj; |
2320 | struct drm_device *dev = obj->dev; | 2364 | struct drm_device *dev = obj->dev; |
2321 | drm_i915_private_t *dev_priv = dev->dev_private; | 2365 | drm_i915_private_t *dev_priv = dev->dev_private; |
2322 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2366 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2323 | int regnum = obj_priv->fence_reg; | 2367 | int regnum = obj_priv->fence_reg; |
2324 | uint64_t val; | 2368 | uint64_t val; |
2325 | 2369 | ||
@@ -2339,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2339 | struct drm_gem_object *obj = reg->obj; | 2383 | struct drm_gem_object *obj = reg->obj; |
2340 | struct drm_device *dev = obj->dev; | 2384 | struct drm_device *dev = obj->dev; |
2341 | drm_i915_private_t *dev_priv = dev->dev_private; | 2385 | drm_i915_private_t *dev_priv = dev->dev_private; |
2342 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2386 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2343 | int regnum = obj_priv->fence_reg; | 2387 | int regnum = obj_priv->fence_reg; |
2344 | int tile_width; | 2388 | int tile_width; |
2345 | uint32_t fence_reg, val; | 2389 | uint32_t fence_reg, val; |
@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2362 | pitch_val = obj_priv->stride / tile_width; | 2406 | pitch_val = obj_priv->stride / tile_width; |
2363 | pitch_val = ffs(pitch_val) - 1; | 2407 | pitch_val = ffs(pitch_val) - 1; |
2364 | 2408 | ||
2409 | if (obj_priv->tiling_mode == I915_TILING_Y && | ||
2410 | HAS_128_BYTE_Y_TILING(dev)) | ||
2411 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
2412 | else | ||
2413 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | ||
2414 | |||
2365 | val = obj_priv->gtt_offset; | 2415 | val = obj_priv->gtt_offset; |
2366 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2416 | if (obj_priv->tiling_mode == I915_TILING_Y) |
2367 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2417 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
@@ -2381,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2381 | struct drm_gem_object *obj = reg->obj; | 2431 | struct drm_gem_object *obj = reg->obj; |
2382 | struct drm_device *dev = obj->dev; | 2432 | struct drm_device *dev = obj->dev; |
2383 | drm_i915_private_t *dev_priv = dev->dev_private; | 2433 | drm_i915_private_t *dev_priv = dev->dev_private; |
2384 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2434 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2385 | int regnum = obj_priv->fence_reg; | 2435 | int regnum = obj_priv->fence_reg; |
2386 | uint32_t val; | 2436 | uint32_t val; |
2387 | uint32_t pitch_val; | 2437 | uint32_t pitch_val; |
@@ -2425,7 +2475,7 @@ static int i915_find_fence_reg(struct drm_device *dev) | |||
2425 | if (!reg->obj) | 2475 | if (!reg->obj) |
2426 | return i; | 2476 | return i; |
2427 | 2477 | ||
2428 | obj_priv = reg->obj->driver_private; | 2478 | obj_priv = to_intel_bo(reg->obj); |
2429 | if (!obj_priv->pin_count) | 2479 | if (!obj_priv->pin_count) |
2430 | avail++; | 2480 | avail++; |
2431 | } | 2481 | } |
@@ -2480,7 +2530,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2480 | { | 2530 | { |
2481 | struct drm_device *dev = obj->dev; | 2531 | struct drm_device *dev = obj->dev; |
2482 | struct drm_i915_private *dev_priv = dev->dev_private; | 2532 | struct drm_i915_private *dev_priv = dev->dev_private; |
2483 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2533 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2484 | struct drm_i915_fence_reg *reg = NULL; | 2534 | struct drm_i915_fence_reg *reg = NULL; |
2485 | int ret; | 2535 | int ret; |
2486 | 2536 | ||
@@ -2547,7 +2597,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2547 | { | 2597 | { |
2548 | struct drm_device *dev = obj->dev; | 2598 | struct drm_device *dev = obj->dev; |
2549 | drm_i915_private_t *dev_priv = dev->dev_private; | 2599 | drm_i915_private_t *dev_priv = dev->dev_private; |
2550 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2600 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2551 | 2601 | ||
2552 | if (IS_GEN6(dev)) { | 2602 | if (IS_GEN6(dev)) { |
2553 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2603 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + |
@@ -2583,7 +2633,7 @@ int | |||
2583 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | 2633 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) |
2584 | { | 2634 | { |
2585 | struct drm_device *dev = obj->dev; | 2635 | struct drm_device *dev = obj->dev; |
2586 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2636 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2587 | 2637 | ||
2588 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2638 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) |
2589 | return 0; | 2639 | return 0; |
@@ -2621,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2621 | { | 2671 | { |
2622 | struct drm_device *dev = obj->dev; | 2672 | struct drm_device *dev = obj->dev; |
2623 | drm_i915_private_t *dev_priv = dev->dev_private; | 2673 | drm_i915_private_t *dev_priv = dev->dev_private; |
2624 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2674 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2625 | struct drm_mm_node *free_space; | 2675 | struct drm_mm_node *free_space; |
2626 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 2676 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2627 | int ret; | 2677 | int ret; |
@@ -2728,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2728 | void | 2778 | void |
2729 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2779 | i915_gem_clflush_object(struct drm_gem_object *obj) |
2730 | { | 2780 | { |
2731 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2781 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2732 | 2782 | ||
2733 | /* If we don't have a page list set up, then we're not pinned | 2783 | /* If we don't have a page list set up, then we're not pinned |
2734 | * to GPU, and we can ignore the cache flush because it'll happen | 2784 | * to GPU, and we can ignore the cache flush because it'll happen |
@@ -2829,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | |||
2829 | int | 2879 | int |
2830 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2880 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) |
2831 | { | 2881 | { |
2832 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2882 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2833 | uint32_t old_write_domain, old_read_domains; | 2883 | uint32_t old_write_domain, old_read_domains; |
2834 | int ret; | 2884 | int ret; |
2835 | 2885 | ||
@@ -2879,7 +2929,7 @@ int | |||
2879 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | 2929 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) |
2880 | { | 2930 | { |
2881 | struct drm_device *dev = obj->dev; | 2931 | struct drm_device *dev = obj->dev; |
2882 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2932 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2883 | uint32_t old_write_domain, old_read_domains; | 2933 | uint32_t old_write_domain, old_read_domains; |
2884 | int ret; | 2934 | int ret; |
2885 | 2935 | ||
@@ -3092,7 +3142,7 @@ static void | |||
3092 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 3142 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
3093 | { | 3143 | { |
3094 | struct drm_device *dev = obj->dev; | 3144 | struct drm_device *dev = obj->dev; |
3095 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3145 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3096 | uint32_t invalidate_domains = 0; | 3146 | uint32_t invalidate_domains = 0; |
3097 | uint32_t flush_domains = 0; | 3147 | uint32_t flush_domains = 0; |
3098 | uint32_t old_read_domains; | 3148 | uint32_t old_read_domains; |
@@ -3177,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3177 | static void | 3227 | static void |
3178 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3228 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) |
3179 | { | 3229 | { |
3180 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3230 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3181 | 3231 | ||
3182 | if (!obj_priv->page_cpu_valid) | 3232 | if (!obj_priv->page_cpu_valid) |
3183 | return; | 3233 | return; |
@@ -3217,7 +3267,7 @@ static int | |||
3217 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3267 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, |
3218 | uint64_t offset, uint64_t size) | 3268 | uint64_t offset, uint64_t size) |
3219 | { | 3269 | { |
3220 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3270 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3221 | uint32_t old_read_domains; | 3271 | uint32_t old_read_domains; |
3222 | int i, ret; | 3272 | int i, ret; |
3223 | 3273 | ||
@@ -3286,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3286 | { | 3336 | { |
3287 | struct drm_device *dev = obj->dev; | 3337 | struct drm_device *dev = obj->dev; |
3288 | drm_i915_private_t *dev_priv = dev->dev_private; | 3338 | drm_i915_private_t *dev_priv = dev->dev_private; |
3289 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3339 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3290 | int i, ret; | 3340 | int i, ret; |
3291 | void __iomem *reloc_page; | 3341 | void __iomem *reloc_page; |
3292 | bool need_fence; | 3342 | bool need_fence; |
@@ -3337,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3337 | i915_gem_object_unpin(obj); | 3387 | i915_gem_object_unpin(obj); |
3338 | return -EBADF; | 3388 | return -EBADF; |
3339 | } | 3389 | } |
3340 | target_obj_priv = target_obj->driver_private; | 3390 | target_obj_priv = to_intel_bo(target_obj); |
3341 | 3391 | ||
3342 | #if WATCH_RELOC | 3392 | #if WATCH_RELOC |
3343 | DRM_INFO("%s: obj %p offset %08x target %d " | 3393 | DRM_INFO("%s: obj %p offset %08x target %d " |
@@ -3689,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3689 | prepare_to_wait(&dev_priv->pending_flip_queue, | 3739 | prepare_to_wait(&dev_priv->pending_flip_queue, |
3690 | &wait, TASK_INTERRUPTIBLE); | 3740 | &wait, TASK_INTERRUPTIBLE); |
3691 | for (i = 0; i < count; i++) { | 3741 | for (i = 0; i < count; i++) { |
3692 | obj_priv = object_list[i]->driver_private; | 3742 | obj_priv = to_intel_bo(object_list[i]); |
3693 | if (atomic_read(&obj_priv->pending_flip) > 0) | 3743 | if (atomic_read(&obj_priv->pending_flip) > 0) |
3694 | break; | 3744 | break; |
3695 | } | 3745 | } |
@@ -3798,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3798 | goto err; | 3848 | goto err; |
3799 | } | 3849 | } |
3800 | 3850 | ||
3801 | obj_priv = object_list[i]->driver_private; | 3851 | obj_priv = to_intel_bo(object_list[i]); |
3802 | if (obj_priv->in_execbuffer) { | 3852 | if (obj_priv->in_execbuffer) { |
3803 | DRM_ERROR("Object %p appears more than once in object list\n", | 3853 | DRM_ERROR("Object %p appears more than once in object list\n", |
3804 | object_list[i]); | 3854 | object_list[i]); |
@@ -3924,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3924 | 3974 | ||
3925 | for (i = 0; i < args->buffer_count; i++) { | 3975 | for (i = 0; i < args->buffer_count; i++) { |
3926 | struct drm_gem_object *obj = object_list[i]; | 3976 | struct drm_gem_object *obj = object_list[i]; |
3927 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3977 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3928 | uint32_t old_write_domain = obj->write_domain; | 3978 | uint32_t old_write_domain = obj->write_domain; |
3929 | 3979 | ||
3930 | obj->write_domain = obj->pending_write_domain; | 3980 | obj->write_domain = obj->pending_write_domain; |
@@ -3999,7 +4049,7 @@ err: | |||
3999 | 4049 | ||
4000 | for (i = 0; i < args->buffer_count; i++) { | 4050 | for (i = 0; i < args->buffer_count; i++) { |
4001 | if (object_list[i]) { | 4051 | if (object_list[i]) { |
4002 | obj_priv = object_list[i]->driver_private; | 4052 | obj_priv = to_intel_bo(object_list[i]); |
4003 | obj_priv->in_execbuffer = false; | 4053 | obj_priv->in_execbuffer = false; |
4004 | } | 4054 | } |
4005 | drm_gem_object_unreference(object_list[i]); | 4055 | drm_gem_object_unreference(object_list[i]); |
@@ -4177,7 +4227,7 @@ int | |||
4177 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4227 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
4178 | { | 4228 | { |
4179 | struct drm_device *dev = obj->dev; | 4229 | struct drm_device *dev = obj->dev; |
4180 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4230 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4181 | int ret; | 4231 | int ret; |
4182 | 4232 | ||
4183 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4233 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -4210,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) | |||
4210 | { | 4260 | { |
4211 | struct drm_device *dev = obj->dev; | 4261 | struct drm_device *dev = obj->dev; |
4212 | drm_i915_private_t *dev_priv = dev->dev_private; | 4262 | drm_i915_private_t *dev_priv = dev->dev_private; |
4213 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4263 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4214 | 4264 | ||
4215 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4265 | i915_verify_inactive(dev, __FILE__, __LINE__); |
4216 | obj_priv->pin_count--; | 4266 | obj_priv->pin_count--; |
@@ -4250,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4250 | mutex_unlock(&dev->struct_mutex); | 4300 | mutex_unlock(&dev->struct_mutex); |
4251 | return -EBADF; | 4301 | return -EBADF; |
4252 | } | 4302 | } |
4253 | obj_priv = obj->driver_private; | 4303 | obj_priv = to_intel_bo(obj); |
4254 | 4304 | ||
4255 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4305 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
4256 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4306 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
@@ -4307,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
4307 | return -EBADF; | 4357 | return -EBADF; |
4308 | } | 4358 | } |
4309 | 4359 | ||
4310 | obj_priv = obj->driver_private; | 4360 | obj_priv = to_intel_bo(obj); |
4311 | if (obj_priv->pin_filp != file_priv) { | 4361 | if (obj_priv->pin_filp != file_priv) { |
4312 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4362 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4313 | args->handle); | 4363 | args->handle); |
@@ -4349,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4349 | */ | 4399 | */ |
4350 | i915_gem_retire_requests(dev); | 4400 | i915_gem_retire_requests(dev); |
4351 | 4401 | ||
4352 | obj_priv = obj->driver_private; | 4402 | obj_priv = to_intel_bo(obj); |
4353 | /* Don't count being on the flushing list against the object being | 4403 | /* Don't count being on the flushing list against the object being |
4354 | * done. Otherwise, a buffer left on the flushing list but not getting | 4404 | * done. Otherwise, a buffer left on the flushing list but not getting |
4355 | * flushed (because nobody's flushing that domain) won't ever return | 4405 | * flushed (because nobody's flushing that domain) won't ever return |
@@ -4395,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4395 | } | 4445 | } |
4396 | 4446 | ||
4397 | mutex_lock(&dev->struct_mutex); | 4447 | mutex_lock(&dev->struct_mutex); |
4398 | obj_priv = obj->driver_private; | 4448 | obj_priv = to_intel_bo(obj); |
4399 | 4449 | ||
4400 | if (obj_priv->pin_count) { | 4450 | if (obj_priv->pin_count) { |
4401 | drm_gem_object_unreference(obj); | 4451 | drm_gem_object_unreference(obj); |
@@ -4456,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4456 | void i915_gem_free_object(struct drm_gem_object *obj) | 4506 | void i915_gem_free_object(struct drm_gem_object *obj) |
4457 | { | 4507 | { |
4458 | struct drm_device *dev = obj->dev; | 4508 | struct drm_device *dev = obj->dev; |
4459 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4509 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4460 | 4510 | ||
4461 | trace_i915_gem_object_destroy(obj); | 4511 | trace_i915_gem_object_destroy(obj); |
4462 | 4512 | ||
@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev) | |||
4546 | return 0; | 4596 | return 0; |
4547 | } | 4597 | } |
4548 | 4598 | ||
4599 | /* | ||
4600 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
4601 | * over cache flushing. | ||
4602 | */ | ||
4603 | static int | ||
4604 | i915_gem_init_pipe_control(struct drm_device *dev) | ||
4605 | { | ||
4606 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4607 | struct drm_gem_object *obj; | ||
4608 | struct drm_i915_gem_object *obj_priv; | ||
4609 | int ret; | ||
4610 | |||
4611 | obj = drm_gem_object_alloc(dev, 4096); | ||
4612 | if (obj == NULL) { | ||
4613 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
4614 | ret = -ENOMEM; | ||
4615 | goto err; | ||
4616 | } | ||
4617 | obj_priv = to_intel_bo(obj); | ||
4618 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
4619 | |||
4620 | ret = i915_gem_object_pin(obj, 4096); | ||
4621 | if (ret) | ||
4622 | goto err_unref; | ||
4623 | |||
4624 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | ||
4625 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | ||
4626 | if (dev_priv->seqno_page == NULL) | ||
4627 | goto err_unpin; | ||
4628 | |||
4629 | dev_priv->seqno_obj = obj; | ||
4630 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | ||
4631 | |||
4632 | return 0; | ||
4633 | |||
4634 | err_unpin: | ||
4635 | i915_gem_object_unpin(obj); | ||
4636 | err_unref: | ||
4637 | drm_gem_object_unreference(obj); | ||
4638 | err: | ||
4639 | return ret; | ||
4640 | } | ||
4641 | |||
4549 | static int | 4642 | static int |
4550 | i915_gem_init_hws(struct drm_device *dev) | 4643 | i915_gem_init_hws(struct drm_device *dev) |
4551 | { | 4644 | { |
@@ -4563,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4563 | obj = drm_gem_object_alloc(dev, 4096); | 4656 | obj = drm_gem_object_alloc(dev, 4096); |
4564 | if (obj == NULL) { | 4657 | if (obj == NULL) { |
4565 | DRM_ERROR("Failed to allocate status page\n"); | 4658 | DRM_ERROR("Failed to allocate status page\n"); |
4566 | return -ENOMEM; | 4659 | ret = -ENOMEM; |
4660 | goto err; | ||
4567 | } | 4661 | } |
4568 | obj_priv = obj->driver_private; | 4662 | obj_priv = to_intel_bo(obj); |
4569 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 4663 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; |
4570 | 4664 | ||
4571 | ret = i915_gem_object_pin(obj, 4096); | 4665 | ret = i915_gem_object_pin(obj, 4096); |
4572 | if (ret != 0) { | 4666 | if (ret != 0) { |
4573 | drm_gem_object_unreference(obj); | 4667 | drm_gem_object_unreference(obj); |
4574 | return ret; | 4668 | goto err_unref; |
4575 | } | 4669 | } |
4576 | 4670 | ||
4577 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 4671 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4580 | if (dev_priv->hw_status_page == NULL) { | 4674 | if (dev_priv->hw_status_page == NULL) { |
4581 | DRM_ERROR("Failed to map status page.\n"); | 4675 | DRM_ERROR("Failed to map status page.\n"); |
4582 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4676 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
4583 | i915_gem_object_unpin(obj); | 4677 | ret = -EINVAL; |
4584 | drm_gem_object_unreference(obj); | 4678 | goto err_unpin; |
4585 | return -EINVAL; | ||
4586 | } | 4679 | } |
4680 | |||
4681 | if (HAS_PIPE_CONTROL(dev)) { | ||
4682 | ret = i915_gem_init_pipe_control(dev); | ||
4683 | if (ret) | ||
4684 | goto err_unpin; | ||
4685 | } | ||
4686 | |||
4587 | dev_priv->hws_obj = obj; | 4687 | dev_priv->hws_obj = obj; |
4588 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 4688 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
4589 | if (IS_GEN6(dev)) { | 4689 | if (IS_GEN6(dev)) { |
@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4596 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | 4696 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); |
4597 | 4697 | ||
4598 | return 0; | 4698 | return 0; |
4699 | |||
4700 | err_unpin: | ||
4701 | i915_gem_object_unpin(obj); | ||
4702 | err_unref: | ||
4703 | drm_gem_object_unreference(obj); | ||
4704 | err: | ||
4705 | return 0; | ||
4706 | } | ||
4707 | |||
4708 | static void | ||
4709 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | ||
4710 | { | ||
4711 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4712 | struct drm_gem_object *obj; | ||
4713 | struct drm_i915_gem_object *obj_priv; | ||
4714 | |||
4715 | obj = dev_priv->seqno_obj; | ||
4716 | obj_priv = to_intel_bo(obj); | ||
4717 | kunmap(obj_priv->pages[0]); | ||
4718 | i915_gem_object_unpin(obj); | ||
4719 | drm_gem_object_unreference(obj); | ||
4720 | dev_priv->seqno_obj = NULL; | ||
4721 | |||
4722 | dev_priv->seqno_page = NULL; | ||
4599 | } | 4723 | } |
4600 | 4724 | ||
4601 | static void | 4725 | static void |
@@ -4609,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
4609 | return; | 4733 | return; |
4610 | 4734 | ||
4611 | obj = dev_priv->hws_obj; | 4735 | obj = dev_priv->hws_obj; |
4612 | obj_priv = obj->driver_private; | 4736 | obj_priv = to_intel_bo(obj); |
4613 | 4737 | ||
4614 | kunmap(obj_priv->pages[0]); | 4738 | kunmap(obj_priv->pages[0]); |
4615 | i915_gem_object_unpin(obj); | 4739 | i915_gem_object_unpin(obj); |
@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
4619 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4743 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
4620 | dev_priv->hw_status_page = NULL; | 4744 | dev_priv->hw_status_page = NULL; |
4621 | 4745 | ||
4746 | if (HAS_PIPE_CONTROL(dev)) | ||
4747 | i915_gem_cleanup_pipe_control(dev); | ||
4748 | |||
4622 | /* Write high address into HWS_PGA when disabling. */ | 4749 | /* Write high address into HWS_PGA when disabling. */ |
4623 | I915_WRITE(HWS_PGA, 0x1ffff000); | 4750 | I915_WRITE(HWS_PGA, 0x1ffff000); |
4624 | } | 4751 | } |
@@ -4643,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4643 | i915_gem_cleanup_hws(dev); | 4770 | i915_gem_cleanup_hws(dev); |
4644 | return -ENOMEM; | 4771 | return -ENOMEM; |
4645 | } | 4772 | } |
4646 | obj_priv = obj->driver_private; | 4773 | obj_priv = to_intel_bo(obj); |
4647 | 4774 | ||
4648 | ret = i915_gem_object_pin(obj, 4096); | 4775 | ret = i915_gem_object_pin(obj, 4096); |
4649 | if (ret != 0) { | 4776 | if (ret != 0) { |
@@ -4936,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4936 | int ret; | 5063 | int ret; |
4937 | int page_count; | 5064 | int page_count; |
4938 | 5065 | ||
4939 | obj_priv = obj->driver_private; | 5066 | obj_priv = to_intel_bo(obj); |
4940 | if (!obj_priv->phys_obj) | 5067 | if (!obj_priv->phys_obj) |
4941 | return; | 5068 | return; |
4942 | 5069 | ||
@@ -4975,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4975 | if (id > I915_MAX_PHYS_OBJECT) | 5102 | if (id > I915_MAX_PHYS_OBJECT) |
4976 | return -EINVAL; | 5103 | return -EINVAL; |
4977 | 5104 | ||
4978 | obj_priv = obj->driver_private; | 5105 | obj_priv = to_intel_bo(obj); |
4979 | 5106 | ||
4980 | if (obj_priv->phys_obj) { | 5107 | if (obj_priv->phys_obj) { |
4981 | if (obj_priv->phys_obj->id == id) | 5108 | if (obj_priv->phys_obj->id == id) |
@@ -5026,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
5026 | struct drm_i915_gem_pwrite *args, | 5153 | struct drm_i915_gem_pwrite *args, |
5027 | struct drm_file *file_priv) | 5154 | struct drm_file *file_priv) |
5028 | { | 5155 | { |
5029 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 5156 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
5030 | void *obj_addr; | 5157 | void *obj_addr; |
5031 | int ret; | 5158 | int ret; |
5032 | char __user *user_data; | 5159 | char __user *user_data; |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index e602614bd3f8..35507cf53fa3 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -72,7 +72,7 @@ void | |||
72 | i915_gem_dump_object(struct drm_gem_object *obj, int len, | 72 | i915_gem_dump_object(struct drm_gem_object *obj, int len, |
73 | const char *where, uint32_t mark) | 73 | const char *where, uint32_t mark) |
74 | { | 74 | { |
75 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 75 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
76 | int page; | 76 | int page; |
77 | 77 | ||
78 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); | 78 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); |
@@ -137,7 +137,7 @@ void | |||
137 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | 137 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) |
138 | { | 138 | { |
139 | struct drm_device *dev = obj->dev; | 139 | struct drm_device *dev = obj->dev; |
140 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 140 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
141 | int page; | 141 | int page; |
142 | uint32_t *gtt_mapping; | 142 | uint32_t *gtt_mapping; |
143 | uint32_t *backing_map = NULL; | 143 | uint32_t *backing_map = NULL; |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c01c878e51ba..4bdccefcf2cf 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
202 | * reg, so dont bother to check the size */ | 202 | * reg, so dont bother to check the size */ |
203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | 203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
204 | return false; | 204 | return false; |
205 | } else if (IS_I9XX(dev)) { | 205 | } else if (IS_GEN3(dev) || IS_GEN2(dev)) { |
206 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | 206 | if (stride > 8192) |
207 | |||
208 | /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) | ||
209 | * instead of 4 (2KB) on 945s. | ||
210 | */ | ||
211 | if (pitch_val > I915_FENCE_MAX_PITCH_VAL || | ||
212 | size > (I830_FENCE_MAX_SIZE_VAL << 20)) | ||
213 | return false; | 207 | return false; |
214 | } else { | ||
215 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | ||
216 | 208 | ||
217 | if (pitch_val > I830_FENCE_MAX_PITCH_VAL || | 209 | if (IS_GEN3(dev)) { |
218 | size > (I830_FENCE_MAX_SIZE_VAL << 19)) | 210 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) |
219 | return false; | 211 | return false; |
212 | } else { | ||
213 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) | ||
214 | return false; | ||
215 | } | ||
220 | } | 216 | } |
221 | 217 | ||
222 | /* 965+ just needs multiples of tile width */ | 218 | /* 965+ just needs multiples of tile width */ |
@@ -240,7 +236,7 @@ bool | |||
240 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | 236 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) |
241 | { | 237 | { |
242 | struct drm_device *dev = obj->dev; | 238 | struct drm_device *dev = obj->dev; |
243 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 239 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
244 | 240 | ||
245 | if (obj_priv->gtt_space == NULL) | 241 | if (obj_priv->gtt_space == NULL) |
246 | return true; | 242 | return true; |
@@ -280,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
280 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 276 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
281 | if (obj == NULL) | 277 | if (obj == NULL) |
282 | return -EINVAL; | 278 | return -EINVAL; |
283 | obj_priv = obj->driver_private; | 279 | obj_priv = to_intel_bo(obj); |
284 | 280 | ||
285 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | 281 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { |
286 | drm_gem_object_unreference_unlocked(obj); | 282 | drm_gem_object_unreference_unlocked(obj); |
@@ -364,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
364 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 360 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
365 | if (obj == NULL) | 361 | if (obj == NULL) |
366 | return -EINVAL; | 362 | return -EINVAL; |
367 | obj_priv = obj->driver_private; | 363 | obj_priv = to_intel_bo(obj); |
368 | 364 | ||
369 | mutex_lock(&dev->struct_mutex); | 365 | mutex_lock(&dev->struct_mutex); |
370 | 366 | ||
@@ -427,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | |||
427 | { | 423 | { |
428 | struct drm_device *dev = obj->dev; | 424 | struct drm_device *dev = obj->dev; |
429 | drm_i915_private_t *dev_priv = dev->dev_private; | 425 | drm_i915_private_t *dev_priv = dev->dev_private; |
430 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 426 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
431 | int page_count = obj->size >> PAGE_SHIFT; | 427 | int page_count = obj->size >> PAGE_SHIFT; |
432 | int i; | 428 | int i; |
433 | 429 | ||
@@ -456,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |||
456 | { | 452 | { |
457 | struct drm_device *dev = obj->dev; | 453 | struct drm_device *dev = obj->dev; |
458 | drm_i915_private_t *dev_priv = dev->dev_private; | 454 | drm_i915_private_t *dev_priv = dev->dev_private; |
459 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 455 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
460 | int page_count = obj->size >> PAGE_SHIFT; | 456 | int page_count = obj->size >> PAGE_SHIFT; |
461 | int i; | 457 | int i; |
462 | 458 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 49c458bc6502..2b8b969d0c15 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
260 | 260 | ||
261 | if (mode_config->num_connector) { | 261 | if (mode_config->num_connector) { |
262 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 262 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
263 | struct intel_output *intel_output = to_intel_output(connector); | 263 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
264 | 264 | ||
265 | if (intel_output->hot_plug) | 265 | if (intel_encoder->hot_plug) |
266 | (*intel_output->hot_plug) (intel_output); | 266 | (*intel_encoder->hot_plug) (intel_encoder); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | /* Just fire off a uevent and let userspace tell us what to do */ | 269 | /* Just fire off a uevent and let userspace tell us what to do */ |
@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
349 | READ_BREADCRUMB(dev_priv); | 349 | READ_BREADCRUMB(dev_priv); |
350 | } | 350 | } |
351 | 351 | ||
352 | if (gt_iir & GT_USER_INTERRUPT) { | 352 | if (gt_iir & GT_PIPE_NOTIFY) { |
353 | u32 seqno = i915_get_gem_seqno(dev); | 353 | u32 seqno = i915_get_gem_seqno(dev); |
354 | dev_priv->mm.irq_gem_seqno = seqno; | 354 | dev_priv->mm.irq_gem_seqno = seqno; |
355 | trace_i915_gem_request_complete(dev, seqno); | 355 | trace_i915_gem_request_complete(dev, seqno); |
@@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev, | |||
444 | if (src == NULL) | 444 | if (src == NULL) |
445 | return NULL; | 445 | return NULL; |
446 | 446 | ||
447 | src_priv = src->driver_private; | 447 | src_priv = to_intel_bo(src); |
448 | if (src_priv->pages == NULL) | 448 | if (src_priv->pages == NULL) |
449 | return NULL; | 449 | return NULL; |
450 | 450 | ||
@@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev) | |||
1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { | 1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { |
1007 | if (HAS_PCH_SPLIT(dev)) | 1007 | if (HAS_PCH_SPLIT(dev)) |
1008 | ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1008 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
1009 | else | 1009 | else |
1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
1011 | } | 1011 | } |
@@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev) | |||
1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); | 1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); |
1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { | 1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { |
1023 | if (HAS_PCH_SPLIT(dev)) | 1023 | if (HAS_PCH_SPLIT(dev)) |
1024 | ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1024 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
1025 | else | 1025 | else |
1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
1027 | } | 1027 | } |
@@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1305 | /* enable kind of interrupts always enabled */ | 1305 | /* enable kind of interrupts always enabled */ |
1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1308 | u32 render_mask = GT_USER_INTERRUPT; | 1308 | u32 render_mask = GT_PIPE_NOTIFY; |
1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1311 | 1311 | ||
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 7cc8410239cb..8fcc75c1aa28 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
382 | struct drm_i915_private *dev_priv = dev->dev_private; | 382 | struct drm_i915_private *dev_priv = dev->dev_private; |
383 | struct intel_opregion *opregion = &dev_priv->opregion; | 383 | struct intel_opregion *opregion = &dev_priv->opregion; |
384 | struct drm_connector *connector; | 384 | struct drm_connector *connector; |
385 | acpi_handle handle; | ||
386 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; | ||
387 | unsigned long long device_id; | ||
388 | acpi_status status; | ||
385 | int i = 0; | 389 | int i = 0; |
386 | 390 | ||
391 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | ||
392 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) | ||
393 | return; | ||
394 | |||
395 | if (acpi_is_video_device(acpi_dev)) | ||
396 | acpi_video_bus = acpi_dev; | ||
397 | else { | ||
398 | list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { | ||
399 | if (acpi_is_video_device(acpi_cdev)) { | ||
400 | acpi_video_bus = acpi_cdev; | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | } | ||
405 | |||
406 | if (!acpi_video_bus) { | ||
407 | printk(KERN_WARNING "No ACPI video bus found\n"); | ||
408 | return; | ||
409 | } | ||
410 | |||
411 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { | ||
412 | if (i >= 8) { | ||
413 | dev_printk (KERN_ERR, &dev->pdev->dev, | ||
414 | "More than 8 outputs detected\n"); | ||
415 | return; | ||
416 | } | ||
417 | status = | ||
418 | acpi_evaluate_integer(acpi_cdev->handle, "_ADR", | ||
419 | NULL, &device_id); | ||
420 | if (ACPI_SUCCESS(status)) { | ||
421 | if (!device_id) | ||
422 | goto blind_set; | ||
423 | opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); | ||
424 | i++; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | end: | ||
429 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
430 | if (i < 8) | ||
431 | opregion->acpi->didl[i] = 0; | ||
432 | return; | ||
433 | |||
434 | blind_set: | ||
435 | i = 0; | ||
387 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 436 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
388 | int output_type = ACPI_OTHER_OUTPUT; | 437 | int output_type = ACPI_OTHER_OUTPUT; |
389 | if (i >= 8) { | 438 | if (i >= 8) { |
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
416 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; | 465 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; |
417 | i++; | 466 | i++; |
418 | } | 467 | } |
419 | 468 | goto end; | |
420 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
421 | if (i < 8) | ||
422 | opregion->acpi->didl[i] = 0; | ||
423 | } | 469 | } |
424 | 470 | ||
425 | int intel_opregion_init(struct drm_device *dev, int resume) | 471 | int intel_opregion_init(struct drm_device *dev, int resume) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cbbf59f56dfa..4cbc5210fd30 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -230,6 +230,16 @@ | |||
230 | #define ASYNC_FLIP (1<<22) | 230 | #define ASYNC_FLIP (1<<22) |
231 | #define DISPLAY_PLANE_A (0<<20) | 231 | #define DISPLAY_PLANE_A (0<<20) |
232 | #define DISPLAY_PLANE_B (1<<20) | 232 | #define DISPLAY_PLANE_B (1<<20) |
233 | #define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) | ||
234 | #define PIPE_CONTROL_QW_WRITE (1<<14) | ||
235 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) | ||
236 | #define PIPE_CONTROL_WC_FLUSH (1<<12) | ||
237 | #define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ | ||
238 | #define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ | ||
239 | #define PIPE_CONTROL_ISP_DIS (1<<9) | ||
240 | #define PIPE_CONTROL_NOTIFY (1<<8) | ||
241 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | ||
242 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ | ||
233 | 243 | ||
234 | /* | 244 | /* |
235 | * Fence registers | 245 | * Fence registers |
@@ -241,7 +251,7 @@ | |||
241 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) | 251 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) |
242 | #define I830_FENCE_PITCH_SHIFT 4 | 252 | #define I830_FENCE_PITCH_SHIFT 4 |
243 | #define I830_FENCE_REG_VALID (1<<0) | 253 | #define I830_FENCE_REG_VALID (1<<0) |
244 | #define I915_FENCE_MAX_PITCH_VAL 0x10 | 254 | #define I915_FENCE_MAX_PITCH_VAL 4 |
245 | #define I830_FENCE_MAX_PITCH_VAL 6 | 255 | #define I830_FENCE_MAX_PITCH_VAL 6 |
246 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) | 256 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) |
247 | 257 | ||
@@ -2285,6 +2295,7 @@ | |||
2285 | #define DEIER 0x4400c | 2295 | #define DEIER 0x4400c |
2286 | 2296 | ||
2287 | /* GT interrupt */ | 2297 | /* GT interrupt */ |
2298 | #define GT_PIPE_NOTIFY (1 << 4) | ||
2288 | #define GT_SYNC_STATUS (1 << 2) | 2299 | #define GT_SYNC_STATUS (1 << 2) |
2289 | #define GT_USER_INTERRUPT (1 << 0) | 2300 | #define GT_USER_INTERRUPT (1 << 0) |
2290 | 2301 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 38110ce742a5..759c2ef72eff 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -247,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
247 | 247 | ||
248 | static bool intel_crt_detect_ddc(struct drm_connector *connector) | 248 | static bool intel_crt_detect_ddc(struct drm_connector *connector) |
249 | { | 249 | { |
250 | struct intel_output *intel_output = to_intel_output(connector); | 250 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
251 | 251 | ||
252 | /* CRT should always be at 0, but check anyway */ | 252 | /* CRT should always be at 0, but check anyway */ |
253 | if (intel_output->type != INTEL_OUTPUT_ANALOG) | 253 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) |
254 | return false; | 254 | return false; |
255 | 255 | ||
256 | return intel_ddc_probe(intel_output); | 256 | return intel_ddc_probe(intel_encoder); |
257 | } | 257 | } |
258 | 258 | ||
259 | static enum drm_connector_status | 259 | static enum drm_connector_status |
260 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) | 260 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) |
261 | { | 261 | { |
262 | struct drm_encoder *encoder = &intel_output->enc; | 262 | struct drm_encoder *encoder = &intel_encoder->enc; |
263 | struct drm_device *dev = encoder->dev; | 263 | struct drm_device *dev = encoder->dev; |
264 | struct drm_i915_private *dev_priv = dev->dev_private; | 264 | struct drm_i915_private *dev_priv = dev->dev_private; |
265 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 265 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -387,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) | |||
387 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) | 387 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) |
388 | { | 388 | { |
389 | struct drm_device *dev = connector->dev; | 389 | struct drm_device *dev = connector->dev; |
390 | struct intel_output *intel_output = to_intel_output(connector); | 390 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
391 | struct drm_encoder *encoder = &intel_output->enc; | 391 | struct drm_encoder *encoder = &intel_encoder->enc; |
392 | struct drm_crtc *crtc; | 392 | struct drm_crtc *crtc; |
393 | int dpms_mode; | 393 | int dpms_mode; |
394 | enum drm_connector_status status; | 394 | enum drm_connector_status status; |
@@ -405,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
405 | 405 | ||
406 | /* for pre-945g platforms use load detect */ | 406 | /* for pre-945g platforms use load detect */ |
407 | if (encoder->crtc && encoder->crtc->enabled) { | 407 | if (encoder->crtc && encoder->crtc->enabled) { |
408 | status = intel_crt_load_detect(encoder->crtc, intel_output); | 408 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); |
409 | } else { | 409 | } else { |
410 | crtc = intel_get_load_detect_pipe(intel_output, | 410 | crtc = intel_get_load_detect_pipe(intel_encoder, |
411 | NULL, &dpms_mode); | 411 | NULL, &dpms_mode); |
412 | if (crtc) { | 412 | if (crtc) { |
413 | status = intel_crt_load_detect(crtc, intel_output); | 413 | status = intel_crt_load_detect(crtc, intel_encoder); |
414 | intel_release_load_detect_pipe(intel_output, dpms_mode); | 414 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); |
415 | } else | 415 | } else |
416 | status = connector_status_unknown; | 416 | status = connector_status_unknown; |
417 | } | 417 | } |
@@ -421,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
421 | 421 | ||
422 | static void intel_crt_destroy(struct drm_connector *connector) | 422 | static void intel_crt_destroy(struct drm_connector *connector) |
423 | { | 423 | { |
424 | struct intel_output *intel_output = to_intel_output(connector); | 424 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
425 | 425 | ||
426 | intel_i2c_destroy(intel_output->ddc_bus); | 426 | intel_i2c_destroy(intel_encoder->ddc_bus); |
427 | drm_sysfs_connector_remove(connector); | 427 | drm_sysfs_connector_remove(connector); |
428 | drm_connector_cleanup(connector); | 428 | drm_connector_cleanup(connector); |
429 | kfree(connector); | 429 | kfree(connector); |
@@ -432,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector) | |||
432 | static int intel_crt_get_modes(struct drm_connector *connector) | 432 | static int intel_crt_get_modes(struct drm_connector *connector) |
433 | { | 433 | { |
434 | int ret; | 434 | int ret; |
435 | struct intel_output *intel_output = to_intel_output(connector); | 435 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
436 | struct i2c_adapter *ddcbus; | 436 | struct i2c_adapter *ddcbus; |
437 | struct drm_device *dev = connector->dev; | 437 | struct drm_device *dev = connector->dev; |
438 | 438 | ||
439 | 439 | ||
440 | ret = intel_ddc_get_modes(intel_output); | 440 | ret = intel_ddc_get_modes(intel_encoder); |
441 | if (ret || !IS_G4X(dev)) | 441 | if (ret || !IS_G4X(dev)) |
442 | goto end; | 442 | goto end; |
443 | 443 | ||
444 | ddcbus = intel_output->ddc_bus; | 444 | ddcbus = intel_encoder->ddc_bus; |
445 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ | 445 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ |
446 | intel_output->ddc_bus = | 446 | intel_encoder->ddc_bus = |
447 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); | 447 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); |
448 | 448 | ||
449 | if (!intel_output->ddc_bus) { | 449 | if (!intel_encoder->ddc_bus) { |
450 | intel_output->ddc_bus = ddcbus; | 450 | intel_encoder->ddc_bus = ddcbus; |
451 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, | 451 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, |
452 | "DDC bus registration failed for CRTDDC_D.\n"); | 452 | "DDC bus registration failed for CRTDDC_D.\n"); |
453 | goto end; | 453 | goto end; |
454 | } | 454 | } |
455 | /* Try to get modes by GPIOD port */ | 455 | /* Try to get modes by GPIOD port */ |
456 | ret = intel_ddc_get_modes(intel_output); | 456 | ret = intel_ddc_get_modes(intel_encoder); |
457 | intel_i2c_destroy(ddcbus); | 457 | intel_i2c_destroy(ddcbus); |
458 | 458 | ||
459 | end: | 459 | end: |
@@ -506,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
506 | void intel_crt_init(struct drm_device *dev) | 506 | void intel_crt_init(struct drm_device *dev) |
507 | { | 507 | { |
508 | struct drm_connector *connector; | 508 | struct drm_connector *connector; |
509 | struct intel_output *intel_output; | 509 | struct intel_encoder *intel_encoder; |
510 | struct drm_i915_private *dev_priv = dev->dev_private; | 510 | struct drm_i915_private *dev_priv = dev->dev_private; |
511 | u32 i2c_reg; | 511 | u32 i2c_reg; |
512 | 512 | ||
513 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | 513 | intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); |
514 | if (!intel_output) | 514 | if (!intel_encoder) |
515 | return; | 515 | return; |
516 | 516 | ||
517 | connector = &intel_output->base; | 517 | connector = &intel_encoder->base; |
518 | drm_connector_init(dev, &intel_output->base, | 518 | drm_connector_init(dev, &intel_encoder->base, |
519 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 519 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
520 | 520 | ||
521 | drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, | 521 | drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, |
522 | DRM_MODE_ENCODER_DAC); | 522 | DRM_MODE_ENCODER_DAC); |
523 | 523 | ||
524 | drm_mode_connector_attach_encoder(&intel_output->base, | 524 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
525 | &intel_output->enc); | 525 | &intel_encoder->enc); |
526 | 526 | ||
527 | /* Set up the DDC bus. */ | 527 | /* Set up the DDC bus. */ |
528 | if (HAS_PCH_SPLIT(dev)) | 528 | if (HAS_PCH_SPLIT(dev)) |
@@ -533,22 +533,22 @@ void intel_crt_init(struct drm_device *dev) | |||
533 | if (dev_priv->crt_ddc_bus != 0) | 533 | if (dev_priv->crt_ddc_bus != 0) |
534 | i2c_reg = dev_priv->crt_ddc_bus; | 534 | i2c_reg = dev_priv->crt_ddc_bus; |
535 | } | 535 | } |
536 | intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); | 536 | intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); |
537 | if (!intel_output->ddc_bus) { | 537 | if (!intel_encoder->ddc_bus) { |
538 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | 538 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " |
539 | "failed.\n"); | 539 | "failed.\n"); |
540 | return; | 540 | return; |
541 | } | 541 | } |
542 | 542 | ||
543 | intel_output->type = INTEL_OUTPUT_ANALOG; | 543 | intel_encoder->type = INTEL_OUTPUT_ANALOG; |
544 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 544 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
545 | (1 << INTEL_ANALOG_CLONE_BIT) | | 545 | (1 << INTEL_ANALOG_CLONE_BIT) | |
546 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 546 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
547 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 547 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
548 | connector->interlace_allowed = 0; | 548 | connector->interlace_allowed = 0; |
549 | connector->doublescan_allowed = 0; | 549 | connector->doublescan_allowed = 0; |
550 | 550 | ||
551 | drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); | 551 | drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); |
552 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 552 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
553 | 553 | ||
554 | drm_sysfs_connector_add(connector); | 554 | drm_sysfs_connector_add(connector); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e7e753b2845f..c7502b6b1600 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -747,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
747 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | 747 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { |
748 | if (l_entry->encoder && | 748 | if (l_entry->encoder && |
749 | l_entry->encoder->crtc == crtc) { | 749 | l_entry->encoder->crtc == crtc) { |
750 | struct intel_output *intel_output = to_intel_output(l_entry); | 750 | struct intel_encoder *intel_encoder = to_intel_encoder(l_entry); |
751 | if (intel_output->type == type) | 751 | if (intel_encoder->type == type) |
752 | return true; | 752 | return true; |
753 | } | 753 | } |
754 | } | 754 | } |
755 | return false; | 755 | return false; |
756 | } | 756 | } |
757 | 757 | ||
758 | struct drm_connector * | 758 | static struct drm_connector * |
759 | intel_pipe_get_output (struct drm_crtc *crtc) | 759 | intel_pipe_get_connector (struct drm_crtc *crtc) |
760 | { | 760 | { |
761 | struct drm_device *dev = crtc->dev; | 761 | struct drm_device *dev = crtc->dev; |
762 | struct drm_mode_config *mode_config = &dev->mode_config; | 762 | struct drm_mode_config *mode_config = &dev->mode_config; |
@@ -1003,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1003 | struct drm_i915_private *dev_priv = dev->dev_private; | 1003 | struct drm_i915_private *dev_priv = dev->dev_private; |
1004 | struct drm_framebuffer *fb = crtc->fb; | 1004 | struct drm_framebuffer *fb = crtc->fb; |
1005 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1005 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1006 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | 1006 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); |
1007 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1007 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1008 | int plane, i; | 1008 | int plane, i; |
1009 | u32 fbc_ctl, fbc_ctl2; | 1009 | u32 fbc_ctl, fbc_ctl2; |
@@ -1080,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1080 | struct drm_i915_private *dev_priv = dev->dev_private; | 1080 | struct drm_i915_private *dev_priv = dev->dev_private; |
1081 | struct drm_framebuffer *fb = crtc->fb; | 1081 | struct drm_framebuffer *fb = crtc->fb; |
1082 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1082 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1083 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | 1083 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); |
1084 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1084 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1085 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | 1085 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : |
1086 | DPFC_CTL_PLANEB); | 1086 | DPFC_CTL_PLANEB); |
@@ -1176,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1176 | return; | 1176 | return; |
1177 | 1177 | ||
1178 | intel_fb = to_intel_framebuffer(fb); | 1178 | intel_fb = to_intel_framebuffer(fb); |
1179 | obj_priv = intel_fb->obj->driver_private; | 1179 | obj_priv = to_intel_bo(intel_fb->obj); |
1180 | 1180 | ||
1181 | /* | 1181 | /* |
1182 | * If FBC is already on, we just have to verify that we can | 1182 | * If FBC is already on, we just have to verify that we can |
@@ -1243,7 +1243,7 @@ out_disable: | |||
1243 | static int | 1243 | static int |
1244 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1244 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
1245 | { | 1245 | { |
1246 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1246 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1247 | u32 alignment; | 1247 | u32 alignment; |
1248 | int ret; | 1248 | int ret; |
1249 | 1249 | ||
@@ -1323,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1323 | 1323 | ||
1324 | intel_fb = to_intel_framebuffer(crtc->fb); | 1324 | intel_fb = to_intel_framebuffer(crtc->fb); |
1325 | obj = intel_fb->obj; | 1325 | obj = intel_fb->obj; |
1326 | obj_priv = obj->driver_private; | 1326 | obj_priv = to_intel_bo(obj); |
1327 | 1327 | ||
1328 | mutex_lock(&dev->struct_mutex); | 1328 | mutex_lock(&dev->struct_mutex); |
1329 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 1329 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
@@ -1401,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1401 | 1401 | ||
1402 | if (old_fb) { | 1402 | if (old_fb) { |
1403 | intel_fb = to_intel_framebuffer(old_fb); | 1403 | intel_fb = to_intel_framebuffer(old_fb); |
1404 | obj_priv = intel_fb->obj->driver_private; | 1404 | obj_priv = to_intel_bo(intel_fb->obj); |
1405 | i915_gem_object_unpin(intel_fb->obj); | 1405 | i915_gem_object_unpin(intel_fb->obj); |
1406 | } | 1406 | } |
1407 | intel_increase_pllclock(crtc, true); | 1407 | intel_increase_pllclock(crtc, true); |
@@ -2917,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2917 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; | 2917 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; |
2918 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; | 2918 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; |
2919 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | 2919 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; |
2920 | int refclk, num_outputs = 0; | 2920 | int refclk, num_connectors = 0; |
2921 | intel_clock_t clock, reduced_clock; | 2921 | intel_clock_t clock, reduced_clock; |
2922 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | 2922 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; |
2923 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 2923 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
@@ -2943,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2943 | drm_vblank_pre_modeset(dev, pipe); | 2943 | drm_vblank_pre_modeset(dev, pipe); |
2944 | 2944 | ||
2945 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 2945 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
2946 | struct intel_output *intel_output = to_intel_output(connector); | 2946 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
2947 | 2947 | ||
2948 | if (!connector->encoder || connector->encoder->crtc != crtc) | 2948 | if (!connector->encoder || connector->encoder->crtc != crtc) |
2949 | continue; | 2949 | continue; |
2950 | 2950 | ||
2951 | switch (intel_output->type) { | 2951 | switch (intel_encoder->type) { |
2952 | case INTEL_OUTPUT_LVDS: | 2952 | case INTEL_OUTPUT_LVDS: |
2953 | is_lvds = true; | 2953 | is_lvds = true; |
2954 | break; | 2954 | break; |
2955 | case INTEL_OUTPUT_SDVO: | 2955 | case INTEL_OUTPUT_SDVO: |
2956 | case INTEL_OUTPUT_HDMI: | 2956 | case INTEL_OUTPUT_HDMI: |
2957 | is_sdvo = true; | 2957 | is_sdvo = true; |
2958 | if (intel_output->needs_tv_clock) | 2958 | if (intel_encoder->needs_tv_clock) |
2959 | is_tv = true; | 2959 | is_tv = true; |
2960 | break; | 2960 | break; |
2961 | case INTEL_OUTPUT_DVO: | 2961 | case INTEL_OUTPUT_DVO: |
@@ -2975,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2975 | break; | 2975 | break; |
2976 | } | 2976 | } |
2977 | 2977 | ||
2978 | num_outputs++; | 2978 | num_connectors++; |
2979 | } | 2979 | } |
2980 | 2980 | ||
2981 | if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { | 2981 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { |
2982 | refclk = dev_priv->lvds_ssc_freq * 1000; | 2982 | refclk = dev_priv->lvds_ssc_freq * 1000; |
2983 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 2983 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
2984 | refclk / 1000); | 2984 | refclk / 1000); |
@@ -3049,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3049 | if (is_edp) { | 3049 | if (is_edp) { |
3050 | struct drm_connector *edp; | 3050 | struct drm_connector *edp; |
3051 | target_clock = mode->clock; | 3051 | target_clock = mode->clock; |
3052 | edp = intel_pipe_get_output(crtc); | 3052 | edp = intel_pipe_get_connector(crtc); |
3053 | intel_edp_link_config(to_intel_output(edp), | 3053 | intel_edp_link_config(to_intel_encoder(edp), |
3054 | &lane, &link_bw); | 3054 | &lane, &link_bw); |
3055 | } else { | 3055 | } else { |
3056 | /* DP over FDI requires target mode clock | 3056 | /* DP over FDI requires target mode clock |
@@ -3231,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3231 | /* XXX: just matching BIOS for now */ | 3231 | /* XXX: just matching BIOS for now */ |
3232 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 3232 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
3233 | dpll |= 3; | 3233 | dpll |= 3; |
3234 | else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) | 3234 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) |
3235 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 3235 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
3236 | else | 3236 | else |
3237 | dpll |= PLL_REF_INPUT_DREFCLK; | 3237 | dpll |= PLL_REF_INPUT_DREFCLK; |
@@ -3511,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3511 | if (!bo) | 3511 | if (!bo) |
3512 | return -ENOENT; | 3512 | return -ENOENT; |
3513 | 3513 | ||
3514 | obj_priv = bo->driver_private; | 3514 | obj_priv = to_intel_bo(bo); |
3515 | 3515 | ||
3516 | if (bo->size < width * height * 4) { | 3516 | if (bo->size < width * height * 4) { |
3517 | DRM_ERROR("buffer is to small\n"); | 3517 | DRM_ERROR("buffer is to small\n"); |
@@ -3655,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
3655 | * detection. | 3655 | * detection. |
3656 | * | 3656 | * |
3657 | * It will be up to the load-detect code to adjust the pipe as appropriate for | 3657 | * It will be up to the load-detect code to adjust the pipe as appropriate for |
3658 | * its requirements. The pipe will be connected to no other outputs. | 3658 | * its requirements. The pipe will be connected to no other encoders. |
3659 | * | 3659 | * |
3660 | * Currently this code will only succeed if there is a pipe with no outputs | 3660 | * Currently this code will only succeed if there is a pipe with no encoders |
3661 | * configured for it. In the future, it could choose to temporarily disable | 3661 | * configured for it. In the future, it could choose to temporarily disable |
3662 | * some outputs to free up a pipe for its use. | 3662 | * some outputs to free up a pipe for its use. |
3663 | * | 3663 | * |
@@ -3670,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = { | |||
3670 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 3670 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
3671 | }; | 3671 | }; |
3672 | 3672 | ||
3673 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | 3673 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
3674 | struct drm_display_mode *mode, | 3674 | struct drm_display_mode *mode, |
3675 | int *dpms_mode) | 3675 | int *dpms_mode) |
3676 | { | 3676 | { |
3677 | struct intel_crtc *intel_crtc; | 3677 | struct intel_crtc *intel_crtc; |
3678 | struct drm_crtc *possible_crtc; | 3678 | struct drm_crtc *possible_crtc; |
3679 | struct drm_crtc *supported_crtc =NULL; | 3679 | struct drm_crtc *supported_crtc =NULL; |
3680 | struct drm_encoder *encoder = &intel_output->enc; | 3680 | struct drm_encoder *encoder = &intel_encoder->enc; |
3681 | struct drm_crtc *crtc = NULL; | 3681 | struct drm_crtc *crtc = NULL; |
3682 | struct drm_device *dev = encoder->dev; | 3682 | struct drm_device *dev = encoder->dev; |
3683 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3683 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
@@ -3729,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | |||
3729 | } | 3729 | } |
3730 | 3730 | ||
3731 | encoder->crtc = crtc; | 3731 | encoder->crtc = crtc; |
3732 | intel_output->base.encoder = encoder; | 3732 | intel_encoder->base.encoder = encoder; |
3733 | intel_output->load_detect_temp = true; | 3733 | intel_encoder->load_detect_temp = true; |
3734 | 3734 | ||
3735 | intel_crtc = to_intel_crtc(crtc); | 3735 | intel_crtc = to_intel_crtc(crtc); |
3736 | *dpms_mode = intel_crtc->dpms_mode; | 3736 | *dpms_mode = intel_crtc->dpms_mode; |
@@ -3755,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | |||
3755 | return crtc; | 3755 | return crtc; |
3756 | } | 3756 | } |
3757 | 3757 | ||
3758 | void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) | 3758 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) |
3759 | { | 3759 | { |
3760 | struct drm_encoder *encoder = &intel_output->enc; | 3760 | struct drm_encoder *encoder = &intel_encoder->enc; |
3761 | struct drm_device *dev = encoder->dev; | 3761 | struct drm_device *dev = encoder->dev; |
3762 | struct drm_crtc *crtc = encoder->crtc; | 3762 | struct drm_crtc *crtc = encoder->crtc; |
3763 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3763 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3764 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 3764 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
3765 | 3765 | ||
3766 | if (intel_output->load_detect_temp) { | 3766 | if (intel_encoder->load_detect_temp) { |
3767 | encoder->crtc = NULL; | 3767 | encoder->crtc = NULL; |
3768 | intel_output->base.encoder = NULL; | 3768 | intel_encoder->base.encoder = NULL; |
3769 | intel_output->load_detect_temp = false; | 3769 | intel_encoder->load_detect_temp = false; |
3770 | crtc->enabled = drm_helper_crtc_in_use(crtc); | 3770 | crtc->enabled = drm_helper_crtc_in_use(crtc); |
3771 | drm_helper_disable_unused_functions(dev); | 3771 | drm_helper_disable_unused_functions(dev); |
3772 | } | 3772 | } |
3773 | 3773 | ||
3774 | /* Switch crtc and output back off if necessary */ | 3774 | /* Switch crtc and encoder back off if necessary */ |
3775 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { | 3775 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { |
3776 | if (encoder->crtc == crtc) | 3776 | if (encoder->crtc == crtc) |
3777 | encoder_funcs->dpms(encoder, dpms_mode); | 3777 | encoder_funcs->dpms(encoder, dpms_mode); |
@@ -4156,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4156 | work = intel_crtc->unpin_work; | 4156 | work = intel_crtc->unpin_work; |
4157 | if (work == NULL || !work->pending) { | 4157 | if (work == NULL || !work->pending) { |
4158 | if (work && !work->pending) { | 4158 | if (work && !work->pending) { |
4159 | obj_priv = work->pending_flip_obj->driver_private; | 4159 | obj_priv = to_intel_bo(work->pending_flip_obj); |
4160 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | 4160 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", |
4161 | obj_priv, | 4161 | obj_priv, |
4162 | atomic_read(&obj_priv->pending_flip)); | 4162 | atomic_read(&obj_priv->pending_flip)); |
@@ -4181,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4181 | 4181 | ||
4182 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4182 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4183 | 4183 | ||
4184 | obj_priv = work->pending_flip_obj->driver_private; | 4184 | obj_priv = to_intel_bo(work->pending_flip_obj); |
4185 | 4185 | ||
4186 | /* Initial scanout buffer will have a 0 pending flip count */ | 4186 | /* Initial scanout buffer will have a 0 pending flip count */ |
4187 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | 4187 | if ((atomic_read(&obj_priv->pending_flip) == 0) || |
@@ -4252,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4252 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4252 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4253 | if (ret != 0) { | 4253 | if (ret != 0) { |
4254 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | 4254 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", |
4255 | obj->driver_private); | 4255 | to_intel_bo(obj)); |
4256 | kfree(work); | 4256 | kfree(work); |
4257 | intel_crtc->unpin_work = NULL; | 4257 | intel_crtc->unpin_work = NULL; |
4258 | mutex_unlock(&dev->struct_mutex); | 4258 | mutex_unlock(&dev->struct_mutex); |
@@ -4266,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4266 | crtc->fb = fb; | 4266 | crtc->fb = fb; |
4267 | i915_gem_object_flush_write_domain(obj); | 4267 | i915_gem_object_flush_write_domain(obj); |
4268 | drm_vblank_get(dev, intel_crtc->pipe); | 4268 | drm_vblank_get(dev, intel_crtc->pipe); |
4269 | obj_priv = obj->driver_private; | 4269 | obj_priv = to_intel_bo(obj); |
4270 | atomic_inc(&obj_priv->pending_flip); | 4270 | atomic_inc(&obj_priv->pending_flip); |
4271 | work->pending_flip_obj = obj; | 4271 | work->pending_flip_obj = obj; |
4272 | 4272 | ||
@@ -4399,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) | |||
4399 | int entry = 0; | 4399 | int entry = 0; |
4400 | 4400 | ||
4401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
4402 | struct intel_output *intel_output = to_intel_output(connector); | 4402 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
4403 | if (type_mask & intel_output->clone_mask) | 4403 | if (type_mask & intel_encoder->clone_mask) |
4404 | index_mask |= (1 << entry); | 4404 | index_mask |= (1 << entry); |
4405 | entry++; | 4405 | entry++; |
4406 | } | 4406 | } |
@@ -4495,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4495 | intel_tv_init(dev); | 4495 | intel_tv_init(dev); |
4496 | 4496 | ||
4497 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4497 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
4498 | struct intel_output *intel_output = to_intel_output(connector); | 4498 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
4499 | struct drm_encoder *encoder = &intel_output->enc; | 4499 | struct drm_encoder *encoder = &intel_encoder->enc; |
4500 | 4500 | ||
4501 | encoder->possible_crtcs = intel_output->crtc_mask; | 4501 | encoder->possible_crtcs = intel_encoder->crtc_mask; |
4502 | encoder->possible_clones = intel_connector_clones(dev, | 4502 | encoder->possible_clones = intel_connector_clones(dev, |
4503 | intel_output->clone_mask); | 4503 | intel_encoder->clone_mask); |
4504 | } | 4504 | } |
4505 | } | 4505 | } |
4506 | 4506 | ||
@@ -4779,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4779 | struct drm_i915_gem_object *obj_priv = NULL; | 4779 | struct drm_i915_gem_object *obj_priv = NULL; |
4780 | 4780 | ||
4781 | if (dev_priv->pwrctx) { | 4781 | if (dev_priv->pwrctx) { |
4782 | obj_priv = dev_priv->pwrctx->driver_private; | 4782 | obj_priv = to_intel_bo(dev_priv->pwrctx); |
4783 | } else { | 4783 | } else { |
4784 | struct drm_gem_object *pwrctx; | 4784 | struct drm_gem_object *pwrctx; |
4785 | 4785 | ||
4786 | pwrctx = intel_alloc_power_context(dev); | 4786 | pwrctx = intel_alloc_power_context(dev); |
4787 | if (pwrctx) { | 4787 | if (pwrctx) { |
4788 | dev_priv->pwrctx = pwrctx; | 4788 | dev_priv->pwrctx = pwrctx; |
4789 | obj_priv = pwrctx->driver_private; | 4789 | obj_priv = to_intel_bo(pwrctx); |
4790 | } | 4790 | } |
4791 | } | 4791 | } |
4792 | 4792 | ||
@@ -4815,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev) | |||
4815 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 4815 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
4816 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 4816 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
4817 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 4817 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
4818 | } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { | 4818 | } else if (IS_I965GM(dev)) { |
4819 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 4819 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
4820 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | 4820 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
4821 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | 4821 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
@@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev) | |||
4853 | dev_priv->display.update_wm = g4x_update_wm; | 4853 | dev_priv->display.update_wm = g4x_update_wm; |
4854 | else if (IS_I965G(dev)) | 4854 | else if (IS_I965G(dev)) |
4855 | dev_priv->display.update_wm = i965_update_wm; | 4855 | dev_priv->display.update_wm = i965_update_wm; |
4856 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) { | 4856 | else if (IS_I9XX(dev)) { |
4857 | dev_priv->display.update_wm = i9xx_update_wm; | 4857 | dev_priv->display.update_wm = i9xx_update_wm; |
4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
4859 | } else if (IS_I85X(dev)) { | ||
4860 | dev_priv->display.update_wm = i9xx_update_wm; | ||
4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
4859 | } else { | 4862 | } else { |
4860 | if (IS_I85X(dev)) | 4863 | dev_priv->display.update_wm = i830_update_wm; |
4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | 4864 | if (IS_845G(dev)) |
4862 | else if (IS_845G(dev)) | ||
4863 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 4865 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
4864 | else | 4866 | else |
4865 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 4867 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
4866 | dev_priv->display.update_wm = i830_update_wm; | ||
4867 | } | 4868 | } |
4868 | } | 4869 | } |
4869 | 4870 | ||
@@ -4957,7 +4958,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4957 | if (dev_priv->pwrctx) { | 4958 | if (dev_priv->pwrctx) { |
4958 | struct drm_i915_gem_object *obj_priv; | 4959 | struct drm_i915_gem_object *obj_priv; |
4959 | 4960 | ||
4960 | obj_priv = dev_priv->pwrctx->driver_private; | 4961 | obj_priv = to_intel_bo(dev_priv->pwrctx); |
4961 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | 4962 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); |
4962 | I915_READ(PWRCTXA); | 4963 | I915_READ(PWRCTXA); |
4963 | i915_gem_object_unpin(dev_priv->pwrctx); | 4964 | i915_gem_object_unpin(dev_priv->pwrctx); |
@@ -4978,9 +4979,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4978 | */ | 4979 | */ |
4979 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) | 4980 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
4980 | { | 4981 | { |
4981 | struct intel_output *intel_output = to_intel_output(connector); | 4982 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
4982 | 4983 | ||
4983 | return &intel_output->enc; | 4984 | return &intel_encoder->enc; |
4984 | } | 4985 | } |
4985 | 4986 | ||
4986 | /* | 4987 | /* |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8e283f75941d..77e40cfcf216 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -55,23 +55,23 @@ struct intel_dp_priv { | |||
55 | uint8_t link_bw; | 55 | uint8_t link_bw; |
56 | uint8_t lane_count; | 56 | uint8_t lane_count; |
57 | uint8_t dpcd[4]; | 57 | uint8_t dpcd[4]; |
58 | struct intel_output *intel_output; | 58 | struct intel_encoder *intel_encoder; |
59 | struct i2c_adapter adapter; | 59 | struct i2c_adapter adapter; |
60 | struct i2c_algo_dp_aux_data algo; | 60 | struct i2c_algo_dp_aux_data algo; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static void | 63 | static void |
64 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | 64 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, |
65 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); | 65 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); |
66 | 66 | ||
67 | static void | 67 | static void |
68 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); | 68 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); |
69 | 69 | ||
70 | void | 70 | void |
71 | intel_edp_link_config (struct intel_output *intel_output, | 71 | intel_edp_link_config (struct intel_encoder *intel_encoder, |
72 | int *lane_num, int *link_bw) | 72 | int *lane_num, int *link_bw) |
73 | { | 73 | { |
74 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 74 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
75 | 75 | ||
76 | *lane_num = dp_priv->lane_count; | 76 | *lane_num = dp_priv->lane_count; |
77 | if (dp_priv->link_bw == DP_LINK_BW_1_62) | 77 | if (dp_priv->link_bw == DP_LINK_BW_1_62) |
@@ -81,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output, | |||
81 | } | 81 | } |
82 | 82 | ||
83 | static int | 83 | static int |
84 | intel_dp_max_lane_count(struct intel_output *intel_output) | 84 | intel_dp_max_lane_count(struct intel_encoder *intel_encoder) |
85 | { | 85 | { |
86 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 86 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
87 | int max_lane_count = 4; | 87 | int max_lane_count = 4; |
88 | 88 | ||
89 | if (dp_priv->dpcd[0] >= 0x11) { | 89 | if (dp_priv->dpcd[0] >= 0x11) { |
@@ -99,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static int | 101 | static int |
102 | intel_dp_max_link_bw(struct intel_output *intel_output) | 102 | intel_dp_max_link_bw(struct intel_encoder *intel_encoder) |
103 | { | 103 | { |
104 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 104 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
105 | int max_link_bw = dp_priv->dpcd[1]; | 105 | int max_link_bw = dp_priv->dpcd[1]; |
106 | 106 | ||
107 | switch (max_link_bw) { | 107 | switch (max_link_bw) { |
@@ -127,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw) | |||
127 | /* I think this is a fiction */ | 127 | /* I think this is a fiction */ |
128 | static int | 128 | static int |
129 | intel_dp_link_required(struct drm_device *dev, | 129 | intel_dp_link_required(struct drm_device *dev, |
130 | struct intel_output *intel_output, int pixel_clock) | 130 | struct intel_encoder *intel_encoder, int pixel_clock) |
131 | { | 131 | { |
132 | struct drm_i915_private *dev_priv = dev->dev_private; | 132 | struct drm_i915_private *dev_priv = dev->dev_private; |
133 | 133 | ||
134 | if (IS_eDP(intel_output)) | 134 | if (IS_eDP(intel_encoder)) |
135 | return (pixel_clock * dev_priv->edp_bpp) / 8; | 135 | return (pixel_clock * dev_priv->edp_bpp) / 8; |
136 | else | 136 | else |
137 | return pixel_clock * 3; | 137 | return pixel_clock * 3; |
@@ -141,11 +141,11 @@ static int | |||
141 | intel_dp_mode_valid(struct drm_connector *connector, | 141 | intel_dp_mode_valid(struct drm_connector *connector, |
142 | struct drm_display_mode *mode) | 142 | struct drm_display_mode *mode) |
143 | { | 143 | { |
144 | struct intel_output *intel_output = to_intel_output(connector); | 144 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
145 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); | 145 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
146 | int max_lanes = intel_dp_max_lane_count(intel_output); | 146 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
147 | 147 | ||
148 | if (intel_dp_link_required(connector->dev, intel_output, mode->clock) | 148 | if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) |
149 | > max_link_clock * max_lanes) | 149 | > max_link_clock * max_lanes) |
150 | return MODE_CLOCK_HIGH; | 150 | return MODE_CLOCK_HIGH; |
151 | 151 | ||
@@ -209,13 +209,13 @@ intel_hrawclk(struct drm_device *dev) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | static int | 211 | static int |
212 | intel_dp_aux_ch(struct intel_output *intel_output, | 212 | intel_dp_aux_ch(struct intel_encoder *intel_encoder, |
213 | uint8_t *send, int send_bytes, | 213 | uint8_t *send, int send_bytes, |
214 | uint8_t *recv, int recv_size) | 214 | uint8_t *recv, int recv_size) |
215 | { | 215 | { |
216 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 216 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
217 | uint32_t output_reg = dp_priv->output_reg; | 217 | uint32_t output_reg = dp_priv->output_reg; |
218 | struct drm_device *dev = intel_output->base.dev; | 218 | struct drm_device *dev = intel_encoder->base.dev; |
219 | struct drm_i915_private *dev_priv = dev->dev_private; | 219 | struct drm_i915_private *dev_priv = dev->dev_private; |
220 | uint32_t ch_ctl = output_reg + 0x10; | 220 | uint32_t ch_ctl = output_reg + 0x10; |
221 | uint32_t ch_data = ch_ctl + 4; | 221 | uint32_t ch_data = ch_ctl + 4; |
@@ -230,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
230 | * and would like to run at 2MHz. So, take the | 230 | * and would like to run at 2MHz. So, take the |
231 | * hrawclk value and divide by 2 and use that | 231 | * hrawclk value and divide by 2 and use that |
232 | */ | 232 | */ |
233 | if (IS_eDP(intel_output)) | 233 | if (IS_eDP(intel_encoder)) |
234 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 234 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
235 | else if (HAS_PCH_SPLIT(dev)) | 235 | else if (HAS_PCH_SPLIT(dev)) |
236 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ | 236 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ |
@@ -313,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
313 | 313 | ||
314 | /* Write data to the aux channel in native mode */ | 314 | /* Write data to the aux channel in native mode */ |
315 | static int | 315 | static int |
316 | intel_dp_aux_native_write(struct intel_output *intel_output, | 316 | intel_dp_aux_native_write(struct intel_encoder *intel_encoder, |
317 | uint16_t address, uint8_t *send, int send_bytes) | 317 | uint16_t address, uint8_t *send, int send_bytes) |
318 | { | 318 | { |
319 | int ret; | 319 | int ret; |
@@ -330,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output, | |||
330 | memcpy(&msg[4], send, send_bytes); | 330 | memcpy(&msg[4], send, send_bytes); |
331 | msg_bytes = send_bytes + 4; | 331 | msg_bytes = send_bytes + 4; |
332 | for (;;) { | 332 | for (;;) { |
333 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); | 333 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); |
334 | if (ret < 0) | 334 | if (ret < 0) |
335 | return ret; | 335 | return ret; |
336 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 336 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
@@ -345,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output, | |||
345 | 345 | ||
346 | /* Write a single byte to the aux channel in native mode */ | 346 | /* Write a single byte to the aux channel in native mode */ |
347 | static int | 347 | static int |
348 | intel_dp_aux_native_write_1(struct intel_output *intel_output, | 348 | intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, |
349 | uint16_t address, uint8_t byte) | 349 | uint16_t address, uint8_t byte) |
350 | { | 350 | { |
351 | return intel_dp_aux_native_write(intel_output, address, &byte, 1); | 351 | return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); |
352 | } | 352 | } |
353 | 353 | ||
354 | /* read bytes from a native aux channel */ | 354 | /* read bytes from a native aux channel */ |
355 | static int | 355 | static int |
356 | intel_dp_aux_native_read(struct intel_output *intel_output, | 356 | intel_dp_aux_native_read(struct intel_encoder *intel_encoder, |
357 | uint16_t address, uint8_t *recv, int recv_bytes) | 357 | uint16_t address, uint8_t *recv, int recv_bytes) |
358 | { | 358 | { |
359 | uint8_t msg[4]; | 359 | uint8_t msg[4]; |
@@ -372,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output, | |||
372 | reply_bytes = recv_bytes + 1; | 372 | reply_bytes = recv_bytes + 1; |
373 | 373 | ||
374 | for (;;) { | 374 | for (;;) { |
375 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, | 375 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, |
376 | reply, reply_bytes); | 376 | reply, reply_bytes); |
377 | if (ret == 0) | 377 | if (ret == 0) |
378 | return -EPROTO; | 378 | return -EPROTO; |
@@ -398,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
398 | struct intel_dp_priv *dp_priv = container_of(adapter, | 398 | struct intel_dp_priv *dp_priv = container_of(adapter, |
399 | struct intel_dp_priv, | 399 | struct intel_dp_priv, |
400 | adapter); | 400 | adapter); |
401 | struct intel_output *intel_output = dp_priv->intel_output; | 401 | struct intel_encoder *intel_encoder = dp_priv->intel_encoder; |
402 | uint16_t address = algo_data->address; | 402 | uint16_t address = algo_data->address; |
403 | uint8_t msg[5]; | 403 | uint8_t msg[5]; |
404 | uint8_t reply[2]; | 404 | uint8_t reply[2]; |
@@ -437,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
437 | } | 437 | } |
438 | 438 | ||
439 | for (;;) { | 439 | for (;;) { |
440 | ret = intel_dp_aux_ch(intel_output, | 440 | ret = intel_dp_aux_ch(intel_encoder, |
441 | msg, msg_bytes, | 441 | msg, msg_bytes, |
442 | reply, reply_bytes); | 442 | reply, reply_bytes); |
443 | if (ret < 0) { | 443 | if (ret < 0) { |
@@ -465,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
465 | } | 465 | } |
466 | 466 | ||
467 | static int | 467 | static int |
468 | intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | 468 | intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) |
469 | { | 469 | { |
470 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 470 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
471 | 471 | ||
472 | DRM_DEBUG_KMS("i2c_init %s\n", name); | 472 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
473 | dp_priv->algo.running = false; | 473 | dp_priv->algo.running = false; |
@@ -480,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | |||
480 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); | 480 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); |
481 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; | 481 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; |
482 | dp_priv->adapter.algo_data = &dp_priv->algo; | 482 | dp_priv->adapter.algo_data = &dp_priv->algo; |
483 | dp_priv->adapter.dev.parent = &intel_output->base.kdev; | 483 | dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; |
484 | 484 | ||
485 | return i2c_dp_aux_add_bus(&dp_priv->adapter); | 485 | return i2c_dp_aux_add_bus(&dp_priv->adapter); |
486 | } | 486 | } |
@@ -489,18 +489,18 @@ static bool | |||
489 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | 489 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, |
490 | struct drm_display_mode *adjusted_mode) | 490 | struct drm_display_mode *adjusted_mode) |
491 | { | 491 | { |
492 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 492 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
493 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 493 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
494 | int lane_count, clock; | 494 | int lane_count, clock; |
495 | int max_lane_count = intel_dp_max_lane_count(intel_output); | 495 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); |
496 | int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; | 496 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; |
497 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 497 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
498 | 498 | ||
499 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 499 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
500 | for (clock = 0; clock <= max_clock; clock++) { | 500 | for (clock = 0; clock <= max_clock; clock++) { |
501 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 501 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; |
502 | 502 | ||
503 | if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) | 503 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) |
504 | <= link_avail) { | 504 | <= link_avail) { |
505 | dp_priv->link_bw = bws[clock]; | 505 | dp_priv->link_bw = bws[clock]; |
506 | dp_priv->lane_count = lane_count; | 506 | dp_priv->lane_count = lane_count; |
@@ -562,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
562 | struct intel_dp_m_n m_n; | 562 | struct intel_dp_m_n m_n; |
563 | 563 | ||
564 | /* | 564 | /* |
565 | * Find the lane count in the intel_output private | 565 | * Find the lane count in the intel_encoder private |
566 | */ | 566 | */ |
567 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 567 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
568 | struct intel_output *intel_output = to_intel_output(connector); | 568 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
569 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 569 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
570 | 570 | ||
571 | if (!connector->encoder || connector->encoder->crtc != crtc) | 571 | if (!connector->encoder || connector->encoder->crtc != crtc) |
572 | continue; | 572 | continue; |
573 | 573 | ||
574 | if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { | 574 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
575 | lane_count = dp_priv->lane_count; | 575 | lane_count = dp_priv->lane_count; |
576 | break; | 576 | break; |
577 | } | 577 | } |
@@ -626,9 +626,9 @@ static void | |||
626 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | 626 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
627 | struct drm_display_mode *adjusted_mode) | 627 | struct drm_display_mode *adjusted_mode) |
628 | { | 628 | { |
629 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 629 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
630 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 630 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
631 | struct drm_crtc *crtc = intel_output->enc.crtc; | 631 | struct drm_crtc *crtc = intel_encoder->enc.crtc; |
632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
633 | 633 | ||
634 | dp_priv->DP = (DP_LINK_TRAIN_OFF | | 634 | dp_priv->DP = (DP_LINK_TRAIN_OFF | |
@@ -667,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
667 | if (intel_crtc->pipe == 1) | 667 | if (intel_crtc->pipe == 1) |
668 | dp_priv->DP |= DP_PIPEB_SELECT; | 668 | dp_priv->DP |= DP_PIPEB_SELECT; |
669 | 669 | ||
670 | if (IS_eDP(intel_output)) { | 670 | if (IS_eDP(intel_encoder)) { |
671 | /* don't miss out required setting for eDP */ | 671 | /* don't miss out required setting for eDP */ |
672 | dp_priv->DP |= DP_PLL_ENABLE; | 672 | dp_priv->DP |= DP_PLL_ENABLE; |
673 | if (adjusted_mode->clock < 200000) | 673 | if (adjusted_mode->clock < 200000) |
@@ -702,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) | |||
702 | static void | 702 | static void |
703 | intel_dp_dpms(struct drm_encoder *encoder, int mode) | 703 | intel_dp_dpms(struct drm_encoder *encoder, int mode) |
704 | { | 704 | { |
705 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 705 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
706 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 706 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
707 | struct drm_device *dev = intel_output->base.dev; | 707 | struct drm_device *dev = intel_encoder->base.dev; |
708 | struct drm_i915_private *dev_priv = dev->dev_private; | 708 | struct drm_i915_private *dev_priv = dev->dev_private; |
709 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | 709 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); |
710 | 710 | ||
711 | if (mode != DRM_MODE_DPMS_ON) { | 711 | if (mode != DRM_MODE_DPMS_ON) { |
712 | if (dp_reg & DP_PORT_EN) { | 712 | if (dp_reg & DP_PORT_EN) { |
713 | intel_dp_link_down(intel_output, dp_priv->DP); | 713 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
714 | if (IS_eDP(intel_output)) | 714 | if (IS_eDP(intel_encoder)) |
715 | ironlake_edp_backlight_off(dev); | 715 | ironlake_edp_backlight_off(dev); |
716 | } | 716 | } |
717 | } else { | 717 | } else { |
718 | if (!(dp_reg & DP_PORT_EN)) { | 718 | if (!(dp_reg & DP_PORT_EN)) { |
719 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 719 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
720 | if (IS_eDP(intel_output)) | 720 | if (IS_eDP(intel_encoder)) |
721 | ironlake_edp_backlight_on(dev); | 721 | ironlake_edp_backlight_on(dev); |
722 | } | 722 | } |
723 | } | 723 | } |
@@ -729,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
729 | * link status information | 729 | * link status information |
730 | */ | 730 | */ |
731 | static bool | 731 | static bool |
732 | intel_dp_get_link_status(struct intel_output *intel_output, | 732 | intel_dp_get_link_status(struct intel_encoder *intel_encoder, |
733 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | 733 | uint8_t link_status[DP_LINK_STATUS_SIZE]) |
734 | { | 734 | { |
735 | int ret; | 735 | int ret; |
736 | 736 | ||
737 | ret = intel_dp_aux_native_read(intel_output, | 737 | ret = intel_dp_aux_native_read(intel_encoder, |
738 | DP_LANE0_1_STATUS, | 738 | DP_LANE0_1_STATUS, |
739 | link_status, DP_LINK_STATUS_SIZE); | 739 | link_status, DP_LINK_STATUS_SIZE); |
740 | if (ret != DP_LINK_STATUS_SIZE) | 740 | if (ret != DP_LINK_STATUS_SIZE) |
@@ -752,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | |||
752 | static void | 752 | static void |
753 | intel_dp_save(struct drm_connector *connector) | 753 | intel_dp_save(struct drm_connector *connector) |
754 | { | 754 | { |
755 | struct intel_output *intel_output = to_intel_output(connector); | 755 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
756 | struct drm_device *dev = intel_output->base.dev; | 756 | struct drm_device *dev = intel_encoder->base.dev; |
757 | struct drm_i915_private *dev_priv = dev->dev_private; | 757 | struct drm_i915_private *dev_priv = dev->dev_private; |
758 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 758 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
759 | 759 | ||
760 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); | 760 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); |
761 | intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, | 761 | intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET, |
762 | dp_priv->save_link_configuration, | 762 | dp_priv->save_link_configuration, |
763 | sizeof (dp_priv->save_link_configuration)); | 763 | sizeof (dp_priv->save_link_configuration)); |
764 | } | 764 | } |
@@ -825,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) | |||
825 | } | 825 | } |
826 | 826 | ||
827 | static void | 827 | static void |
828 | intel_get_adjust_train(struct intel_output *intel_output, | 828 | intel_get_adjust_train(struct intel_encoder *intel_encoder, |
829 | uint8_t link_status[DP_LINK_STATUS_SIZE], | 829 | uint8_t link_status[DP_LINK_STATUS_SIZE], |
830 | int lane_count, | 830 | int lane_count, |
831 | uint8_t train_set[4]) | 831 | uint8_t train_set[4]) |
@@ -942,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | |||
942 | } | 942 | } |
943 | 943 | ||
944 | static bool | 944 | static bool |
945 | intel_dp_set_link_train(struct intel_output *intel_output, | 945 | intel_dp_set_link_train(struct intel_encoder *intel_encoder, |
946 | uint32_t dp_reg_value, | 946 | uint32_t dp_reg_value, |
947 | uint8_t dp_train_pat, | 947 | uint8_t dp_train_pat, |
948 | uint8_t train_set[4], | 948 | uint8_t train_set[4], |
949 | bool first) | 949 | bool first) |
950 | { | 950 | { |
951 | struct drm_device *dev = intel_output->base.dev; | 951 | struct drm_device *dev = intel_encoder->base.dev; |
952 | struct drm_i915_private *dev_priv = dev->dev_private; | 952 | struct drm_i915_private *dev_priv = dev->dev_private; |
953 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 953 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
954 | int ret; | 954 | int ret; |
955 | 955 | ||
956 | I915_WRITE(dp_priv->output_reg, dp_reg_value); | 956 | I915_WRITE(dp_priv->output_reg, dp_reg_value); |
@@ -958,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output, | |||
958 | if (first) | 958 | if (first) |
959 | intel_wait_for_vblank(dev); | 959 | intel_wait_for_vblank(dev); |
960 | 960 | ||
961 | intel_dp_aux_native_write_1(intel_output, | 961 | intel_dp_aux_native_write_1(intel_encoder, |
962 | DP_TRAINING_PATTERN_SET, | 962 | DP_TRAINING_PATTERN_SET, |
963 | dp_train_pat); | 963 | dp_train_pat); |
964 | 964 | ||
965 | ret = intel_dp_aux_native_write(intel_output, | 965 | ret = intel_dp_aux_native_write(intel_encoder, |
966 | DP_TRAINING_LANE0_SET, train_set, 4); | 966 | DP_TRAINING_LANE0_SET, train_set, 4); |
967 | if (ret != 4) | 967 | if (ret != 4) |
968 | return false; | 968 | return false; |
@@ -971,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output, | |||
971 | } | 971 | } |
972 | 972 | ||
973 | static void | 973 | static void |
974 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | 974 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, |
975 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) | 975 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) |
976 | { | 976 | { |
977 | struct drm_device *dev = intel_output->base.dev; | 977 | struct drm_device *dev = intel_encoder->base.dev; |
978 | struct drm_i915_private *dev_priv = dev->dev_private; | 978 | struct drm_i915_private *dev_priv = dev->dev_private; |
979 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 979 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
980 | uint8_t train_set[4]; | 980 | uint8_t train_set[4]; |
981 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 981 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
982 | int i; | 982 | int i; |
@@ -987,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
987 | int tries; | 987 | int tries; |
988 | 988 | ||
989 | /* Write the link configuration data */ | 989 | /* Write the link configuration data */ |
990 | intel_dp_aux_native_write(intel_output, 0x100, | 990 | intel_dp_aux_native_write(intel_encoder, 0x100, |
991 | link_configuration, DP_LINK_CONFIGURATION_SIZE); | 991 | link_configuration, DP_LINK_CONFIGURATION_SIZE); |
992 | 992 | ||
993 | DP |= DP_PORT_EN; | 993 | DP |= DP_PORT_EN; |
@@ -1001,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1001 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | 1001 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); |
1002 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1002 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1003 | 1003 | ||
1004 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, | 1004 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, |
1005 | DP_TRAINING_PATTERN_1, train_set, first)) | 1005 | DP_TRAINING_PATTERN_1, train_set, first)) |
1006 | break; | 1006 | break; |
1007 | first = false; | 1007 | first = false; |
1008 | /* Set training pattern 1 */ | 1008 | /* Set training pattern 1 */ |
1009 | 1009 | ||
1010 | udelay(100); | 1010 | udelay(100); |
1011 | if (!intel_dp_get_link_status(intel_output, link_status)) | 1011 | if (!intel_dp_get_link_status(intel_encoder, link_status)) |
1012 | break; | 1012 | break; |
1013 | 1013 | ||
1014 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { | 1014 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { |
@@ -1033,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1033 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1033 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1034 | 1034 | ||
1035 | /* Compute new train_set as requested by target */ | 1035 | /* Compute new train_set as requested by target */ |
1036 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | 1036 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | /* channel equalization */ | 1039 | /* channel equalization */ |
@@ -1045,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1045 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1045 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1046 | 1046 | ||
1047 | /* channel eq pattern */ | 1047 | /* channel eq pattern */ |
1048 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, | 1048 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, |
1049 | DP_TRAINING_PATTERN_2, train_set, | 1049 | DP_TRAINING_PATTERN_2, train_set, |
1050 | false)) | 1050 | false)) |
1051 | break; | 1051 | break; |
1052 | 1052 | ||
1053 | udelay(400); | 1053 | udelay(400); |
1054 | if (!intel_dp_get_link_status(intel_output, link_status)) | 1054 | if (!intel_dp_get_link_status(intel_encoder, link_status)) |
1055 | break; | 1055 | break; |
1056 | 1056 | ||
1057 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { | 1057 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { |
@@ -1064,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1064 | break; | 1064 | break; |
1065 | 1065 | ||
1066 | /* Compute new train_set as requested by target */ | 1066 | /* Compute new train_set as requested by target */ |
1067 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | 1067 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); |
1068 | ++tries; | 1068 | ++tries; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); | 1071 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); |
1072 | POSTING_READ(dp_priv->output_reg); | 1072 | POSTING_READ(dp_priv->output_reg); |
1073 | intel_dp_aux_native_write_1(intel_output, | 1073 | intel_dp_aux_native_write_1(intel_encoder, |
1074 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | 1074 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | static void | 1077 | static void |
1078 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | 1078 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) |
1079 | { | 1079 | { |
1080 | struct drm_device *dev = intel_output->base.dev; | 1080 | struct drm_device *dev = intel_encoder->base.dev; |
1081 | struct drm_i915_private *dev_priv = dev->dev_private; | 1081 | struct drm_i915_private *dev_priv = dev->dev_private; |
1082 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1082 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1083 | 1083 | ||
1084 | DRM_DEBUG_KMS("\n"); | 1084 | DRM_DEBUG_KMS("\n"); |
1085 | 1085 | ||
1086 | if (IS_eDP(intel_output)) { | 1086 | if (IS_eDP(intel_encoder)) { |
1087 | DP &= ~DP_PLL_ENABLE; | 1087 | DP &= ~DP_PLL_ENABLE; |
1088 | I915_WRITE(dp_priv->output_reg, DP); | 1088 | I915_WRITE(dp_priv->output_reg, DP); |
1089 | POSTING_READ(dp_priv->output_reg); | 1089 | POSTING_READ(dp_priv->output_reg); |
@@ -1096,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
1096 | 1096 | ||
1097 | udelay(17000); | 1097 | udelay(17000); |
1098 | 1098 | ||
1099 | if (IS_eDP(intel_output)) | 1099 | if (IS_eDP(intel_encoder)) |
1100 | DP |= DP_LINK_TRAIN_OFF; | 1100 | DP |= DP_LINK_TRAIN_OFF; |
1101 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); | 1101 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); |
1102 | POSTING_READ(dp_priv->output_reg); | 1102 | POSTING_READ(dp_priv->output_reg); |
@@ -1105,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
1105 | static void | 1105 | static void |
1106 | intel_dp_restore(struct drm_connector *connector) | 1106 | intel_dp_restore(struct drm_connector *connector) |
1107 | { | 1107 | { |
1108 | struct intel_output *intel_output = to_intel_output(connector); | 1108 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1109 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1109 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1110 | 1110 | ||
1111 | if (dp_priv->save_DP & DP_PORT_EN) | 1111 | if (dp_priv->save_DP & DP_PORT_EN) |
1112 | intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); | 1112 | intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration); |
1113 | else | 1113 | else |
1114 | intel_dp_link_down(intel_output, dp_priv->save_DP); | 1114 | intel_dp_link_down(intel_encoder, dp_priv->save_DP); |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | /* | 1117 | /* |
@@ -1124,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector) | |||
1124 | */ | 1124 | */ |
1125 | 1125 | ||
1126 | static void | 1126 | static void |
1127 | intel_dp_check_link_status(struct intel_output *intel_output) | 1127 | intel_dp_check_link_status(struct intel_encoder *intel_encoder) |
1128 | { | 1128 | { |
1129 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1129 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1130 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 1130 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1131 | 1131 | ||
1132 | if (!intel_output->enc.crtc) | 1132 | if (!intel_encoder->enc.crtc) |
1133 | return; | 1133 | return; |
1134 | 1134 | ||
1135 | if (!intel_dp_get_link_status(intel_output, link_status)) { | 1135 | if (!intel_dp_get_link_status(intel_encoder, link_status)) { |
1136 | intel_dp_link_down(intel_output, dp_priv->DP); | 1136 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
1137 | return; | 1137 | return; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) | 1140 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) |
1141 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 1141 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | static enum drm_connector_status | 1144 | static enum drm_connector_status |
1145 | ironlake_dp_detect(struct drm_connector *connector) | 1145 | ironlake_dp_detect(struct drm_connector *connector) |
1146 | { | 1146 | { |
1147 | struct intel_output *intel_output = to_intel_output(connector); | 1147 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1148 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1148 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1149 | enum drm_connector_status status; | 1149 | enum drm_connector_status status; |
1150 | 1150 | ||
1151 | status = connector_status_disconnected; | 1151 | status = connector_status_disconnected; |
1152 | if (intel_dp_aux_native_read(intel_output, | 1152 | if (intel_dp_aux_native_read(intel_encoder, |
1153 | 0x000, dp_priv->dpcd, | 1153 | 0x000, dp_priv->dpcd, |
1154 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1154 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) |
1155 | { | 1155 | { |
@@ -1168,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
1168 | static enum drm_connector_status | 1168 | static enum drm_connector_status |
1169 | intel_dp_detect(struct drm_connector *connector) | 1169 | intel_dp_detect(struct drm_connector *connector) |
1170 | { | 1170 | { |
1171 | struct intel_output *intel_output = to_intel_output(connector); | 1171 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1172 | struct drm_device *dev = intel_output->base.dev; | 1172 | struct drm_device *dev = intel_encoder->base.dev; |
1173 | struct drm_i915_private *dev_priv = dev->dev_private; | 1173 | struct drm_i915_private *dev_priv = dev->dev_private; |
1174 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1174 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1175 | uint32_t temp, bit; | 1175 | uint32_t temp, bit; |
1176 | enum drm_connector_status status; | 1176 | enum drm_connector_status status; |
1177 | 1177 | ||
@@ -1210,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector) | |||
1210 | return connector_status_disconnected; | 1210 | return connector_status_disconnected; |
1211 | 1211 | ||
1212 | status = connector_status_disconnected; | 1212 | status = connector_status_disconnected; |
1213 | if (intel_dp_aux_native_read(intel_output, | 1213 | if (intel_dp_aux_native_read(intel_encoder, |
1214 | 0x000, dp_priv->dpcd, | 1214 | 0x000, dp_priv->dpcd, |
1215 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1215 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) |
1216 | { | 1216 | { |
@@ -1222,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector) | |||
1222 | 1222 | ||
1223 | static int intel_dp_get_modes(struct drm_connector *connector) | 1223 | static int intel_dp_get_modes(struct drm_connector *connector) |
1224 | { | 1224 | { |
1225 | struct intel_output *intel_output = to_intel_output(connector); | 1225 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1226 | struct drm_device *dev = intel_output->base.dev; | 1226 | struct drm_device *dev = intel_encoder->base.dev; |
1227 | struct drm_i915_private *dev_priv = dev->dev_private; | 1227 | struct drm_i915_private *dev_priv = dev->dev_private; |
1228 | int ret; | 1228 | int ret; |
1229 | 1229 | ||
1230 | /* We should parse the EDID data and find out if it has an audio sink | 1230 | /* We should parse the EDID data and find out if it has an audio sink |
1231 | */ | 1231 | */ |
1232 | 1232 | ||
1233 | ret = intel_ddc_get_modes(intel_output); | 1233 | ret = intel_ddc_get_modes(intel_encoder); |
1234 | if (ret) | 1234 | if (ret) |
1235 | return ret; | 1235 | return ret; |
1236 | 1236 | ||
1237 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1237 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1238 | if (IS_eDP(intel_output)) { | 1238 | if (IS_eDP(intel_encoder)) { |
1239 | if (dev_priv->panel_fixed_mode != NULL) { | 1239 | if (dev_priv->panel_fixed_mode != NULL) { |
1240 | struct drm_display_mode *mode; | 1240 | struct drm_display_mode *mode; |
1241 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1241 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1249,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1249 | static void | 1249 | static void |
1250 | intel_dp_destroy (struct drm_connector *connector) | 1250 | intel_dp_destroy (struct drm_connector *connector) |
1251 | { | 1251 | { |
1252 | struct intel_output *intel_output = to_intel_output(connector); | 1252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1253 | 1253 | ||
1254 | if (intel_output->i2c_bus) | 1254 | if (intel_encoder->i2c_bus) |
1255 | intel_i2c_destroy(intel_output->i2c_bus); | 1255 | intel_i2c_destroy(intel_encoder->i2c_bus); |
1256 | drm_sysfs_connector_remove(connector); | 1256 | drm_sysfs_connector_remove(connector); |
1257 | drm_connector_cleanup(connector); | 1257 | drm_connector_cleanup(connector); |
1258 | kfree(intel_output); | 1258 | kfree(intel_encoder); |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | 1261 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
@@ -1291,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { | |||
1291 | }; | 1291 | }; |
1292 | 1292 | ||
1293 | void | 1293 | void |
1294 | intel_dp_hot_plug(struct intel_output *intel_output) | 1294 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
1295 | { | 1295 | { |
1296 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1296 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1297 | 1297 | ||
1298 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | 1298 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) |
1299 | intel_dp_check_link_status(intel_output); | 1299 | intel_dp_check_link_status(intel_encoder); |
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | void | 1302 | void |
@@ -1304,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1304 | { | 1304 | { |
1305 | struct drm_i915_private *dev_priv = dev->dev_private; | 1305 | struct drm_i915_private *dev_priv = dev->dev_private; |
1306 | struct drm_connector *connector; | 1306 | struct drm_connector *connector; |
1307 | struct intel_output *intel_output; | 1307 | struct intel_encoder *intel_encoder; |
1308 | struct intel_dp_priv *dp_priv; | 1308 | struct intel_dp_priv *dp_priv; |
1309 | const char *name = NULL; | 1309 | const char *name = NULL; |
1310 | 1310 | ||
1311 | intel_output = kcalloc(sizeof(struct intel_output) + | 1311 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
1312 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1312 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
1313 | if (!intel_output) | 1313 | if (!intel_encoder) |
1314 | return; | 1314 | return; |
1315 | 1315 | ||
1316 | dp_priv = (struct intel_dp_priv *)(intel_output + 1); | 1316 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); |
1317 | 1317 | ||
1318 | connector = &intel_output->base; | 1318 | connector = &intel_encoder->base; |
1319 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | 1319 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, |
1320 | DRM_MODE_CONNECTOR_DisplayPort); | 1320 | DRM_MODE_CONNECTOR_DisplayPort); |
1321 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1321 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
1322 | 1322 | ||
1323 | if (output_reg == DP_A) | 1323 | if (output_reg == DP_A) |
1324 | intel_output->type = INTEL_OUTPUT_EDP; | 1324 | intel_encoder->type = INTEL_OUTPUT_EDP; |
1325 | else | 1325 | else |
1326 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | 1326 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
1327 | 1327 | ||
1328 | if (output_reg == DP_B || output_reg == PCH_DP_B) | 1328 | if (output_reg == DP_B || output_reg == PCH_DP_B) |
1329 | intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | 1329 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); |
1330 | else if (output_reg == DP_C || output_reg == PCH_DP_C) | 1330 | else if (output_reg == DP_C || output_reg == PCH_DP_C) |
1331 | intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); | 1331 | intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); |
1332 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1332 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1333 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1333 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1334 | 1334 | ||
1335 | if (IS_eDP(intel_output)) | 1335 | if (IS_eDP(intel_encoder)) |
1336 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1336 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1337 | 1337 | ||
1338 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1338 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1339 | connector->interlace_allowed = true; | 1339 | connector->interlace_allowed = true; |
1340 | connector->doublescan_allowed = 0; | 1340 | connector->doublescan_allowed = 0; |
1341 | 1341 | ||
1342 | dp_priv->intel_output = intel_output; | 1342 | dp_priv->intel_encoder = intel_encoder; |
1343 | dp_priv->output_reg = output_reg; | 1343 | dp_priv->output_reg = output_reg; |
1344 | dp_priv->has_audio = false; | 1344 | dp_priv->has_audio = false; |
1345 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; | 1345 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; |
1346 | intel_output->dev_priv = dp_priv; | 1346 | intel_encoder->dev_priv = dp_priv; |
1347 | 1347 | ||
1348 | drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, | 1348 | drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, |
1349 | DRM_MODE_ENCODER_TMDS); | 1349 | DRM_MODE_ENCODER_TMDS); |
1350 | drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); | 1350 | drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); |
1351 | 1351 | ||
1352 | drm_mode_connector_attach_encoder(&intel_output->base, | 1352 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
1353 | &intel_output->enc); | 1353 | &intel_encoder->enc); |
1354 | drm_sysfs_connector_add(connector); | 1354 | drm_sysfs_connector_add(connector); |
1355 | 1355 | ||
1356 | /* Set up the DDC bus. */ | 1356 | /* Set up the DDC bus. */ |
@@ -1378,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1378 | break; | 1378 | break; |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | intel_dp_i2c_init(intel_output, name); | 1381 | intel_dp_i2c_init(intel_encoder, name); |
1382 | 1382 | ||
1383 | intel_output->ddc_bus = &dp_priv->adapter; | 1383 | intel_encoder->ddc_bus = &dp_priv->adapter; |
1384 | intel_output->hot_plug = intel_dp_hot_plug; | 1384 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1385 | 1385 | ||
1386 | if (output_reg == DP_A) { | 1386 | if (output_reg == DP_A) { |
1387 | /* initialize panel mode from VBT if available for eDP */ | 1387 | /* initialize panel mode from VBT if available for eDP */ |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3a467ca57857..e30253755f12 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -95,7 +95,7 @@ struct intel_framebuffer { | |||
95 | }; | 95 | }; |
96 | 96 | ||
97 | 97 | ||
98 | struct intel_output { | 98 | struct intel_encoder { |
99 | struct drm_connector base; | 99 | struct drm_connector base; |
100 | 100 | ||
101 | struct drm_encoder enc; | 101 | struct drm_encoder enc; |
@@ -105,7 +105,7 @@ struct intel_output { | |||
105 | bool load_detect_temp; | 105 | bool load_detect_temp; |
106 | bool needs_tv_clock; | 106 | bool needs_tv_clock; |
107 | void *dev_priv; | 107 | void *dev_priv; |
108 | void (*hot_plug)(struct intel_output *); | 108 | void (*hot_plug)(struct intel_encoder *); |
109 | int crtc_mask; | 109 | int crtc_mask; |
110 | int clone_mask; | 110 | int clone_mask; |
111 | }; | 111 | }; |
@@ -152,15 +152,15 @@ struct intel_crtc { | |||
152 | }; | 152 | }; |
153 | 153 | ||
154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
155 | #define to_intel_output(x) container_of(x, struct intel_output, base) | 155 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
156 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) | 156 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) |
157 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 157 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
158 | 158 | ||
159 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | 159 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
160 | const char *name); | 160 | const char *name); |
161 | void intel_i2c_destroy(struct i2c_adapter *adapter); | 161 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
162 | int intel_ddc_get_modes(struct intel_output *intel_output); | 162 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder); |
163 | extern bool intel_ddc_probe(struct intel_output *intel_output); | 163 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); |
164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); |
165 | void intel_i2c_reset_gmbus(struct drm_device *dev); | 165 | void intel_i2c_reset_gmbus(struct drm_device *dev); |
166 | 166 | ||
@@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
175 | void | 175 | void |
176 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 176 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
177 | struct drm_display_mode *adjusted_mode); | 177 | struct drm_display_mode *adjusted_mode); |
178 | extern void intel_edp_link_config (struct intel_output *, int *, int *); | 178 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
179 | 179 | ||
180 | 180 | ||
181 | extern int intel_panel_fitter_pipe (struct drm_device *dev); | 181 | extern int intel_panel_fitter_pipe (struct drm_device *dev); |
@@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |||
191 | struct drm_file *file_priv); | 191 | struct drm_file *file_priv); |
192 | extern void intel_wait_for_vblank(struct drm_device *dev); | 192 | extern void intel_wait_for_vblank(struct drm_device *dev); |
193 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 193 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
194 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | 194 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
195 | struct drm_display_mode *mode, | 195 | struct drm_display_mode *mode, |
196 | int *dpms_mode); | 196 | int *dpms_mode); |
197 | extern void intel_release_load_detect_pipe(struct intel_output *intel_output, | 197 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
198 | int dpms_mode); | 198 | int dpms_mode); |
199 | 199 | ||
200 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | 200 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 0427ca5a2514..ebf213c96b9c 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -80,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = { | |||
80 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | 80 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) |
81 | { | 81 | { |
82 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 82 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
83 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 83 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
84 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 84 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
85 | u32 dvo_reg = dvo->dvo_reg; | 85 | u32 dvo_reg = dvo->dvo_reg; |
86 | u32 temp = I915_READ(dvo_reg); | 86 | u32 temp = I915_READ(dvo_reg); |
87 | 87 | ||
@@ -99,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | |||
99 | static void intel_dvo_save(struct drm_connector *connector) | 99 | static void intel_dvo_save(struct drm_connector *connector) |
100 | { | 100 | { |
101 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 101 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
102 | struct intel_output *intel_output = to_intel_output(connector); | 102 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
103 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 103 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
104 | 104 | ||
105 | /* Each output should probably just save the registers it touches, | 105 | /* Each output should probably just save the registers it touches, |
106 | * but for now, use more overkill. | 106 | * but for now, use more overkill. |
@@ -115,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector) | |||
115 | static void intel_dvo_restore(struct drm_connector *connector) | 115 | static void intel_dvo_restore(struct drm_connector *connector) |
116 | { | 116 | { |
117 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 117 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
118 | struct intel_output *intel_output = to_intel_output(connector); | 118 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
119 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 119 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
120 | 120 | ||
121 | dvo->dev_ops->restore(dvo); | 121 | dvo->dev_ops->restore(dvo); |
122 | 122 | ||
@@ -128,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector) | |||
128 | static int intel_dvo_mode_valid(struct drm_connector *connector, | 128 | static int intel_dvo_mode_valid(struct drm_connector *connector, |
129 | struct drm_display_mode *mode) | 129 | struct drm_display_mode *mode) |
130 | { | 130 | { |
131 | struct intel_output *intel_output = to_intel_output(connector); | 131 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
132 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 132 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
133 | 133 | ||
134 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 134 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
135 | return MODE_NO_DBLESCAN; | 135 | return MODE_NO_DBLESCAN; |
@@ -150,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | |||
150 | struct drm_display_mode *mode, | 150 | struct drm_display_mode *mode, |
151 | struct drm_display_mode *adjusted_mode) | 151 | struct drm_display_mode *adjusted_mode) |
152 | { | 152 | { |
153 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 153 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
154 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 154 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
155 | 155 | ||
156 | /* If we have timings from the BIOS for the panel, put them in | 156 | /* If we have timings from the BIOS for the panel, put them in |
157 | * to the adjusted mode. The CRTC will be set up for this mode, | 157 | * to the adjusted mode. The CRTC will be set up for this mode, |
@@ -186,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
186 | struct drm_device *dev = encoder->dev; | 186 | struct drm_device *dev = encoder->dev; |
187 | struct drm_i915_private *dev_priv = dev->dev_private; | 187 | struct drm_i915_private *dev_priv = dev->dev_private; |
188 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 188 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
189 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 189 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
190 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 190 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
191 | int pipe = intel_crtc->pipe; | 191 | int pipe = intel_crtc->pipe; |
192 | u32 dvo_val; | 192 | u32 dvo_val; |
193 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; | 193 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; |
@@ -241,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
241 | */ | 241 | */ |
242 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | 242 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) |
243 | { | 243 | { |
244 | struct intel_output *intel_output = to_intel_output(connector); | 244 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
245 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 245 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
246 | 246 | ||
247 | return dvo->dev_ops->detect(dvo); | 247 | return dvo->dev_ops->detect(dvo); |
248 | } | 248 | } |
249 | 249 | ||
250 | static int intel_dvo_get_modes(struct drm_connector *connector) | 250 | static int intel_dvo_get_modes(struct drm_connector *connector) |
251 | { | 251 | { |
252 | struct intel_output *intel_output = to_intel_output(connector); | 252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
253 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 253 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
254 | 254 | ||
255 | /* We should probably have an i2c driver get_modes function for those | 255 | /* We should probably have an i2c driver get_modes function for those |
256 | * devices which will have a fixed set of modes determined by the chip | 256 | * devices which will have a fixed set of modes determined by the chip |
257 | * (TV-out, for example), but for now with just TMDS and LVDS, | 257 | * (TV-out, for example), but for now with just TMDS and LVDS, |
258 | * that's not the case. | 258 | * that's not the case. |
259 | */ | 259 | */ |
260 | intel_ddc_get_modes(intel_output); | 260 | intel_ddc_get_modes(intel_encoder); |
261 | if (!list_empty(&connector->probed_modes)) | 261 | if (!list_empty(&connector->probed_modes)) |
262 | return 1; | 262 | return 1; |
263 | 263 | ||
@@ -275,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector) | |||
275 | 275 | ||
276 | static void intel_dvo_destroy (struct drm_connector *connector) | 276 | static void intel_dvo_destroy (struct drm_connector *connector) |
277 | { | 277 | { |
278 | struct intel_output *intel_output = to_intel_output(connector); | 278 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
279 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 279 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
280 | 280 | ||
281 | if (dvo) { | 281 | if (dvo) { |
282 | if (dvo->dev_ops->destroy) | 282 | if (dvo->dev_ops->destroy) |
@@ -286,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector) | |||
286 | /* no need, in i830_dvoices[] now */ | 286 | /* no need, in i830_dvoices[] now */ |
287 | //kfree(dvo); | 287 | //kfree(dvo); |
288 | } | 288 | } |
289 | if (intel_output->i2c_bus) | 289 | if (intel_encoder->i2c_bus) |
290 | intel_i2c_destroy(intel_output->i2c_bus); | 290 | intel_i2c_destroy(intel_encoder->i2c_bus); |
291 | if (intel_output->ddc_bus) | 291 | if (intel_encoder->ddc_bus) |
292 | intel_i2c_destroy(intel_output->ddc_bus); | 292 | intel_i2c_destroy(intel_encoder->ddc_bus); |
293 | drm_sysfs_connector_remove(connector); | 293 | drm_sysfs_connector_remove(connector); |
294 | drm_connector_cleanup(connector); | 294 | drm_connector_cleanup(connector); |
295 | kfree(intel_output); | 295 | kfree(intel_encoder); |
296 | } | 296 | } |
297 | 297 | ||
298 | #ifdef RANDR_GET_CRTC_INTERFACE | 298 | #ifdef RANDR_GET_CRTC_INTERFACE |
@@ -300,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) | |||
300 | { | 300 | { |
301 | struct drm_device *dev = connector->dev; | 301 | struct drm_device *dev = connector->dev; |
302 | struct drm_i915_private *dev_priv = dev->dev_private; | 302 | struct drm_i915_private *dev_priv = dev->dev_private; |
303 | struct intel_output *intel_output = to_intel_output(connector); | 303 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
304 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 304 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
305 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); | 305 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); |
306 | 306 | ||
307 | return intel_pipe_to_crtc(pScrn, pipe); | 307 | return intel_pipe_to_crtc(pScrn, pipe); |
@@ -352,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
352 | { | 352 | { |
353 | struct drm_device *dev = connector->dev; | 353 | struct drm_device *dev = connector->dev; |
354 | struct drm_i915_private *dev_priv = dev->dev_private; | 354 | struct drm_i915_private *dev_priv = dev->dev_private; |
355 | struct intel_output *intel_output = to_intel_output(connector); | 355 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
356 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 356 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
357 | uint32_t dvo_reg = dvo->dvo_reg; | 357 | uint32_t dvo_reg = dvo->dvo_reg; |
358 | uint32_t dvo_val = I915_READ(dvo_reg); | 358 | uint32_t dvo_val = I915_READ(dvo_reg); |
359 | struct drm_display_mode *mode = NULL; | 359 | struct drm_display_mode *mode = NULL; |
@@ -383,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
383 | 383 | ||
384 | void intel_dvo_init(struct drm_device *dev) | 384 | void intel_dvo_init(struct drm_device *dev) |
385 | { | 385 | { |
386 | struct intel_output *intel_output; | 386 | struct intel_encoder *intel_encoder; |
387 | struct intel_dvo_device *dvo; | 387 | struct intel_dvo_device *dvo; |
388 | struct i2c_adapter *i2cbus = NULL; | 388 | struct i2c_adapter *i2cbus = NULL; |
389 | int ret = 0; | 389 | int ret = 0; |
390 | int i; | 390 | int i; |
391 | int encoder_type = DRM_MODE_ENCODER_NONE; | 391 | int encoder_type = DRM_MODE_ENCODER_NONE; |
392 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); | 392 | intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); |
393 | if (!intel_output) | 393 | if (!intel_encoder) |
394 | return; | 394 | return; |
395 | 395 | ||
396 | /* Set up the DDC bus */ | 396 | /* Set up the DDC bus */ |
397 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); | 397 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); |
398 | if (!intel_output->ddc_bus) | 398 | if (!intel_encoder->ddc_bus) |
399 | goto free_intel; | 399 | goto free_intel; |
400 | 400 | ||
401 | /* Now, try to find a controller */ | 401 | /* Now, try to find a controller */ |
402 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { | 402 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { |
403 | struct drm_connector *connector = &intel_output->base; | 403 | struct drm_connector *connector = &intel_encoder->base; |
404 | int gpio; | 404 | int gpio; |
405 | 405 | ||
406 | dvo = &intel_dvo_devices[i]; | 406 | dvo = &intel_dvo_devices[i]; |
@@ -435,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev) | |||
435 | if (!ret) | 435 | if (!ret) |
436 | continue; | 436 | continue; |
437 | 437 | ||
438 | intel_output->type = INTEL_OUTPUT_DVO; | 438 | intel_encoder->type = INTEL_OUTPUT_DVO; |
439 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 439 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
440 | switch (dvo->type) { | 440 | switch (dvo->type) { |
441 | case INTEL_DVO_CHIP_TMDS: | 441 | case INTEL_DVO_CHIP_TMDS: |
442 | intel_output->clone_mask = | 442 | intel_encoder->clone_mask = |
443 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | | 443 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | |
444 | (1 << INTEL_ANALOG_CLONE_BIT); | 444 | (1 << INTEL_ANALOG_CLONE_BIT); |
445 | drm_connector_init(dev, connector, | 445 | drm_connector_init(dev, connector, |
@@ -448,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev) | |||
448 | encoder_type = DRM_MODE_ENCODER_TMDS; | 448 | encoder_type = DRM_MODE_ENCODER_TMDS; |
449 | break; | 449 | break; |
450 | case INTEL_DVO_CHIP_LVDS: | 450 | case INTEL_DVO_CHIP_LVDS: |
451 | intel_output->clone_mask = | 451 | intel_encoder->clone_mask = |
452 | (1 << INTEL_DVO_LVDS_CLONE_BIT); | 452 | (1 << INTEL_DVO_LVDS_CLONE_BIT); |
453 | drm_connector_init(dev, connector, | 453 | drm_connector_init(dev, connector, |
454 | &intel_dvo_connector_funcs, | 454 | &intel_dvo_connector_funcs, |
@@ -463,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev) | |||
463 | connector->interlace_allowed = false; | 463 | connector->interlace_allowed = false; |
464 | connector->doublescan_allowed = false; | 464 | connector->doublescan_allowed = false; |
465 | 465 | ||
466 | intel_output->dev_priv = dvo; | 466 | intel_encoder->dev_priv = dvo; |
467 | intel_output->i2c_bus = i2cbus; | 467 | intel_encoder->i2c_bus = i2cbus; |
468 | 468 | ||
469 | drm_encoder_init(dev, &intel_output->enc, | 469 | drm_encoder_init(dev, &intel_encoder->enc, |
470 | &intel_dvo_enc_funcs, encoder_type); | 470 | &intel_dvo_enc_funcs, encoder_type); |
471 | drm_encoder_helper_add(&intel_output->enc, | 471 | drm_encoder_helper_add(&intel_encoder->enc, |
472 | &intel_dvo_helper_funcs); | 472 | &intel_dvo_helper_funcs); |
473 | 473 | ||
474 | drm_mode_connector_attach_encoder(&intel_output->base, | 474 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
475 | &intel_output->enc); | 475 | &intel_encoder->enc); |
476 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { | 476 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { |
477 | /* For our LVDS chipsets, we should hopefully be able | 477 | /* For our LVDS chipsets, we should hopefully be able |
478 | * to dig the fixed panel mode out of the BIOS data. | 478 | * to dig the fixed panel mode out of the BIOS data. |
@@ -490,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev) | |||
490 | return; | 490 | return; |
491 | } | 491 | } |
492 | 492 | ||
493 | intel_i2c_destroy(intel_output->ddc_bus); | 493 | intel_i2c_destroy(intel_encoder->ddc_bus); |
494 | /* Didn't find a chip, so tear down. */ | 494 | /* Didn't find a chip, so tear down. */ |
495 | if (i2cbus != NULL) | 495 | if (i2cbus != NULL) |
496 | intel_i2c_destroy(i2cbus); | 496 | intel_i2c_destroy(i2cbus); |
497 | free_intel: | 497 | free_intel: |
498 | kfree(intel_output); | 498 | kfree(intel_encoder); |
499 | } | 499 | } |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 69bbef92f130..8a0b3bcdc7b1 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
144 | ret = -ENOMEM; | 144 | ret = -ENOMEM; |
145 | goto out; | 145 | goto out; |
146 | } | 146 | } |
147 | obj_priv = fbo->driver_private; | 147 | obj_priv = to_intel_bo(fbo); |
148 | 148 | ||
149 | mutex_lock(&dev->struct_mutex); | 149 | mutex_lock(&dev->struct_mutex); |
150 | 150 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1ed02f641258..48cade0cf7b1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -51,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
51 | struct drm_i915_private *dev_priv = dev->dev_private; | 51 | struct drm_i915_private *dev_priv = dev->dev_private; |
52 | struct drm_crtc *crtc = encoder->crtc; | 52 | struct drm_crtc *crtc = encoder->crtc; |
53 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 53 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
54 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 54 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
55 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 55 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
56 | u32 sdvox; | 56 | u32 sdvox; |
57 | 57 | ||
58 | sdvox = SDVO_ENCODING_HDMI | | 58 | sdvox = SDVO_ENCODING_HDMI | |
@@ -74,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
74 | { | 74 | { |
75 | struct drm_device *dev = encoder->dev; | 75 | struct drm_device *dev = encoder->dev; |
76 | struct drm_i915_private *dev_priv = dev->dev_private; | 76 | struct drm_i915_private *dev_priv = dev->dev_private; |
77 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 77 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
78 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 78 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
79 | u32 temp; | 79 | u32 temp; |
80 | 80 | ||
81 | temp = I915_READ(hdmi_priv->sdvox_reg); | 81 | temp = I915_READ(hdmi_priv->sdvox_reg); |
@@ -110,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector) | |||
110 | { | 110 | { |
111 | struct drm_device *dev = connector->dev; | 111 | struct drm_device *dev = connector->dev; |
112 | struct drm_i915_private *dev_priv = dev->dev_private; | 112 | struct drm_i915_private *dev_priv = dev->dev_private; |
113 | struct intel_output *intel_output = to_intel_output(connector); | 113 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
114 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 114 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
115 | 115 | ||
116 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); | 116 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); |
117 | } | 117 | } |
@@ -120,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector) | |||
120 | { | 120 | { |
121 | struct drm_device *dev = connector->dev; | 121 | struct drm_device *dev = connector->dev; |
122 | struct drm_i915_private *dev_priv = dev->dev_private; | 122 | struct drm_i915_private *dev_priv = dev->dev_private; |
123 | struct intel_output *intel_output = to_intel_output(connector); | 123 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
124 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 124 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
125 | 125 | ||
126 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); | 126 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); |
127 | POSTING_READ(hdmi_priv->sdvox_reg); | 127 | POSTING_READ(hdmi_priv->sdvox_reg); |
@@ -151,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
151 | static enum drm_connector_status | 151 | static enum drm_connector_status |
152 | intel_hdmi_detect(struct drm_connector *connector) | 152 | intel_hdmi_detect(struct drm_connector *connector) |
153 | { | 153 | { |
154 | struct intel_output *intel_output = to_intel_output(connector); | 154 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
155 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 155 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
156 | struct edid *edid = NULL; | 156 | struct edid *edid = NULL; |
157 | enum drm_connector_status status = connector_status_disconnected; | 157 | enum drm_connector_status status = connector_status_disconnected; |
158 | 158 | ||
159 | hdmi_priv->has_hdmi_sink = false; | 159 | hdmi_priv->has_hdmi_sink = false; |
160 | edid = drm_get_edid(&intel_output->base, | 160 | edid = drm_get_edid(&intel_encoder->base, |
161 | intel_output->ddc_bus); | 161 | intel_encoder->ddc_bus); |
162 | 162 | ||
163 | if (edid) { | 163 | if (edid) { |
164 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 164 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
165 | status = connector_status_connected; | 165 | status = connector_status_connected; |
166 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 166 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
167 | } | 167 | } |
168 | intel_output->base.display_info.raw_edid = NULL; | 168 | intel_encoder->base.display_info.raw_edid = NULL; |
169 | kfree(edid); | 169 | kfree(edid); |
170 | } | 170 | } |
171 | 171 | ||
@@ -174,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
174 | 174 | ||
175 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 175 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
176 | { | 176 | { |
177 | struct intel_output *intel_output = to_intel_output(connector); | 177 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
178 | 178 | ||
179 | /* We should parse the EDID data and find out if it's an HDMI sink so | 179 | /* We should parse the EDID data and find out if it's an HDMI sink so |
180 | * we can send audio to it. | 180 | * we can send audio to it. |
181 | */ | 181 | */ |
182 | 182 | ||
183 | return intel_ddc_get_modes(intel_output); | 183 | return intel_ddc_get_modes(intel_encoder); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void intel_hdmi_destroy(struct drm_connector *connector) | 186 | static void intel_hdmi_destroy(struct drm_connector *connector) |
187 | { | 187 | { |
188 | struct intel_output *intel_output = to_intel_output(connector); | 188 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
189 | 189 | ||
190 | if (intel_output->i2c_bus) | 190 | if (intel_encoder->i2c_bus) |
191 | intel_i2c_destroy(intel_output->i2c_bus); | 191 | intel_i2c_destroy(intel_encoder->i2c_bus); |
192 | drm_sysfs_connector_remove(connector); | 192 | drm_sysfs_connector_remove(connector); |
193 | drm_connector_cleanup(connector); | 193 | drm_connector_cleanup(connector); |
194 | kfree(intel_output); | 194 | kfree(intel_encoder); |
195 | } | 195 | } |
196 | 196 | ||
197 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { | 197 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { |
@@ -230,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
230 | { | 230 | { |
231 | struct drm_i915_private *dev_priv = dev->dev_private; | 231 | struct drm_i915_private *dev_priv = dev->dev_private; |
232 | struct drm_connector *connector; | 232 | struct drm_connector *connector; |
233 | struct intel_output *intel_output; | 233 | struct intel_encoder *intel_encoder; |
234 | struct intel_hdmi_priv *hdmi_priv; | 234 | struct intel_hdmi_priv *hdmi_priv; |
235 | 235 | ||
236 | intel_output = kcalloc(sizeof(struct intel_output) + | 236 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); |
238 | if (!intel_output) | 238 | if (!intel_encoder) |
239 | return; | 239 | return; |
240 | hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); | 240 | hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); |
241 | 241 | ||
242 | connector = &intel_output->base; | 242 | connector = &intel_encoder->base; |
243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
244 | DRM_MODE_CONNECTOR_HDMIA); | 244 | DRM_MODE_CONNECTOR_HDMIA); |
245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
246 | 246 | ||
247 | intel_output->type = INTEL_OUTPUT_HDMI; | 247 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
248 | 248 | ||
249 | connector->interlace_allowed = 0; | 249 | connector->interlace_allowed = 0; |
250 | connector->doublescan_allowed = 0; | 250 | connector->doublescan_allowed = 0; |
251 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 251 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
252 | 252 | ||
253 | /* Set up the DDC bus. */ | 253 | /* Set up the DDC bus. */ |
254 | if (sdvox_reg == SDVOB) { | 254 | if (sdvox_reg == SDVOB) { |
255 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 255 | intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
256 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 256 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
257 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 257 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
258 | } else if (sdvox_reg == SDVOC) { | 258 | } else if (sdvox_reg == SDVOC) { |
259 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 259 | intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
260 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 260 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
261 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 261 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
262 | } else if (sdvox_reg == HDMIB) { | 262 | } else if (sdvox_reg == HDMIB) { |
263 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 263 | intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
264 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 264 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
265 | "HDMIB"); | 265 | "HDMIB"); |
266 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 266 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
267 | } else if (sdvox_reg == HDMIC) { | 267 | } else if (sdvox_reg == HDMIC) { |
268 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 268 | intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
269 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 269 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
270 | "HDMIC"); | 270 | "HDMIC"); |
271 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 271 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
272 | } else if (sdvox_reg == HDMID) { | 272 | } else if (sdvox_reg == HDMID) { |
273 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 273 | intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
274 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 274 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
275 | "HDMID"); | 275 | "HDMID"); |
276 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | 276 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; |
277 | } | 277 | } |
278 | if (!intel_output->ddc_bus) | 278 | if (!intel_encoder->ddc_bus) |
279 | goto err_connector; | 279 | goto err_connector; |
280 | 280 | ||
281 | hdmi_priv->sdvox_reg = sdvox_reg; | 281 | hdmi_priv->sdvox_reg = sdvox_reg; |
282 | intel_output->dev_priv = hdmi_priv; | 282 | intel_encoder->dev_priv = hdmi_priv; |
283 | 283 | ||
284 | drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, | 284 | drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, |
285 | DRM_MODE_ENCODER_TMDS); | 285 | DRM_MODE_ENCODER_TMDS); |
286 | drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); | 286 | drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); |
287 | 287 | ||
288 | drm_mode_connector_attach_encoder(&intel_output->base, | 288 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
289 | &intel_output->enc); | 289 | &intel_encoder->enc); |
290 | drm_sysfs_connector_add(connector); | 290 | drm_sysfs_connector_add(connector); |
291 | 291 | ||
292 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 292 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
@@ -302,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
302 | 302 | ||
303 | err_connector: | 303 | err_connector: |
304 | drm_connector_cleanup(connector); | 304 | drm_connector_cleanup(connector); |
305 | kfree(intel_output); | 305 | kfree(intel_encoder); |
306 | 306 | ||
307 | return; | 307 | return; |
308 | } | 308 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 216e9f52b6e0..b66806a37d37 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -239,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
239 | struct drm_i915_private *dev_priv = dev->dev_private; | 239 | struct drm_i915_private *dev_priv = dev->dev_private; |
240 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 240 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
241 | struct drm_encoder *tmp_encoder; | 241 | struct drm_encoder *tmp_encoder; |
242 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 242 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
243 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 243 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
244 | u32 pfit_control = 0, pfit_pgm_ratios = 0; | 244 | u32 pfit_control = 0, pfit_pgm_ratios = 0; |
245 | int left_border = 0, right_border = 0, top_border = 0; | 245 | int left_border = 0, right_border = 0, top_border = 0; |
246 | int bottom_border = 0; | 246 | int bottom_border = 0; |
@@ -587,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
587 | { | 587 | { |
588 | struct drm_device *dev = encoder->dev; | 588 | struct drm_device *dev = encoder->dev; |
589 | struct drm_i915_private *dev_priv = dev->dev_private; | 589 | struct drm_i915_private *dev_priv = dev->dev_private; |
590 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 590 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
591 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 591 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
592 | 592 | ||
593 | /* | 593 | /* |
594 | * The LVDS pin pair will already have been turned on in the | 594 | * The LVDS pin pair will already have been turned on in the |
@@ -635,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
635 | static int intel_lvds_get_modes(struct drm_connector *connector) | 635 | static int intel_lvds_get_modes(struct drm_connector *connector) |
636 | { | 636 | { |
637 | struct drm_device *dev = connector->dev; | 637 | struct drm_device *dev = connector->dev; |
638 | struct intel_output *intel_output = to_intel_output(connector); | 638 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
639 | struct drm_i915_private *dev_priv = dev->dev_private; | 639 | struct drm_i915_private *dev_priv = dev->dev_private; |
640 | int ret = 0; | 640 | int ret = 0; |
641 | 641 | ||
642 | ret = intel_ddc_get_modes(intel_output); | 642 | if (dev_priv->lvds_edid_good) { |
643 | ret = intel_ddc_get_modes(intel_encoder); | ||
643 | 644 | ||
644 | if (ret) | 645 | if (ret) |
645 | return ret; | 646 | return ret; |
647 | } | ||
646 | 648 | ||
647 | /* Didn't get an EDID, so | 649 | /* Didn't get an EDID, so |
648 | * Set wide sync ranges so we get all modes | 650 | * Set wide sync ranges so we get all modes |
@@ -715,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
715 | static void intel_lvds_destroy(struct drm_connector *connector) | 717 | static void intel_lvds_destroy(struct drm_connector *connector) |
716 | { | 718 | { |
717 | struct drm_device *dev = connector->dev; | 719 | struct drm_device *dev = connector->dev; |
718 | struct intel_output *intel_output = to_intel_output(connector); | 720 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
719 | struct drm_i915_private *dev_priv = dev->dev_private; | 721 | struct drm_i915_private *dev_priv = dev->dev_private; |
720 | 722 | ||
721 | if (intel_output->ddc_bus) | 723 | if (intel_encoder->ddc_bus) |
722 | intel_i2c_destroy(intel_output->ddc_bus); | 724 | intel_i2c_destroy(intel_encoder->ddc_bus); |
723 | if (dev_priv->lid_notifier.notifier_call) | 725 | if (dev_priv->lid_notifier.notifier_call) |
724 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | 726 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
725 | drm_sysfs_connector_remove(connector); | 727 | drm_sysfs_connector_remove(connector); |
@@ -732,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
732 | uint64_t value) | 734 | uint64_t value) |
733 | { | 735 | { |
734 | struct drm_device *dev = connector->dev; | 736 | struct drm_device *dev = connector->dev; |
735 | struct intel_output *intel_output = | 737 | struct intel_encoder *intel_encoder = |
736 | to_intel_output(connector); | 738 | to_intel_encoder(connector); |
737 | 739 | ||
738 | if (property == dev->mode_config.scaling_mode_property && | 740 | if (property == dev->mode_config.scaling_mode_property && |
739 | connector->encoder) { | 741 | connector->encoder) { |
740 | struct drm_crtc *crtc = connector->encoder->crtc; | 742 | struct drm_crtc *crtc = connector->encoder->crtc; |
741 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 743 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
742 | if (value == DRM_MODE_SCALE_NONE) { | 744 | if (value == DRM_MODE_SCALE_NONE) { |
743 | DRM_DEBUG_KMS("no scaling not supported\n"); | 745 | DRM_DEBUG_KMS("no scaling not supported\n"); |
744 | return 0; | 746 | return 0; |
@@ -858,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
858 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 860 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
859 | }, | 861 | }, |
860 | }, | 862 | }, |
863 | { | ||
864 | .callback = intel_no_lvds_dmi_callback, | ||
865 | .ident = "Clientron U800", | ||
866 | .matches = { | ||
867 | DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), | ||
868 | DMI_MATCH(DMI_PRODUCT_NAME, "U800"), | ||
869 | }, | ||
870 | }, | ||
861 | 871 | ||
862 | { } /* terminating entry */ | 872 | { } /* terminating entry */ |
863 | }; | 873 | }; |
@@ -968,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) | |||
968 | void intel_lvds_init(struct drm_device *dev) | 978 | void intel_lvds_init(struct drm_device *dev) |
969 | { | 979 | { |
970 | struct drm_i915_private *dev_priv = dev->dev_private; | 980 | struct drm_i915_private *dev_priv = dev->dev_private; |
971 | struct intel_output *intel_output; | 981 | struct intel_encoder *intel_encoder; |
972 | struct drm_connector *connector; | 982 | struct drm_connector *connector; |
973 | struct drm_encoder *encoder; | 983 | struct drm_encoder *encoder; |
974 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 984 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
@@ -996,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev) | |||
996 | gpio = PCH_GPIOC; | 1006 | gpio = PCH_GPIOC; |
997 | } | 1007 | } |
998 | 1008 | ||
999 | intel_output = kzalloc(sizeof(struct intel_output) + | 1009 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + |
1000 | sizeof(struct intel_lvds_priv), GFP_KERNEL); | 1010 | sizeof(struct intel_lvds_priv), GFP_KERNEL); |
1001 | if (!intel_output) { | 1011 | if (!intel_encoder) { |
1002 | return; | 1012 | return; |
1003 | } | 1013 | } |
1004 | 1014 | ||
1005 | connector = &intel_output->base; | 1015 | connector = &intel_encoder->base; |
1006 | encoder = &intel_output->enc; | 1016 | encoder = &intel_encoder->enc; |
1007 | drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, | 1017 | drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, |
1008 | DRM_MODE_CONNECTOR_LVDS); | 1018 | DRM_MODE_CONNECTOR_LVDS); |
1009 | 1019 | ||
1010 | drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, | 1020 | drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, |
1011 | DRM_MODE_ENCODER_LVDS); | 1021 | DRM_MODE_ENCODER_LVDS); |
1012 | 1022 | ||
1013 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1023 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
1014 | intel_output->type = INTEL_OUTPUT_LVDS; | 1024 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
1015 | 1025 | ||
1016 | intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 1026 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
1017 | intel_output->crtc_mask = (1 << 1); | 1027 | intel_encoder->crtc_mask = (1 << 1); |
1018 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 1028 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
1019 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 1029 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
1020 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 1030 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
1021 | connector->interlace_allowed = false; | 1031 | connector->interlace_allowed = false; |
1022 | connector->doublescan_allowed = false; | 1032 | connector->doublescan_allowed = false; |
1023 | 1033 | ||
1024 | lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); | 1034 | lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); |
1025 | intel_output->dev_priv = lvds_priv; | 1035 | intel_encoder->dev_priv = lvds_priv; |
1026 | /* create the scaling mode property */ | 1036 | /* create the scaling mode property */ |
1027 | drm_mode_create_scaling_mode_property(dev); | 1037 | drm_mode_create_scaling_mode_property(dev); |
1028 | /* | 1038 | /* |
1029 | * the initial panel fitting mode will be FULL_SCREEN. | 1039 | * the initial panel fitting mode will be FULL_SCREEN. |
1030 | */ | 1040 | */ |
1031 | 1041 | ||
1032 | drm_connector_attach_property(&intel_output->base, | 1042 | drm_connector_attach_property(&intel_encoder->base, |
1033 | dev->mode_config.scaling_mode_property, | 1043 | dev->mode_config.scaling_mode_property, |
1034 | DRM_MODE_SCALE_FULLSCREEN); | 1044 | DRM_MODE_SCALE_FULLSCREEN); |
1035 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | 1045 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; |
@@ -1044,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
1044 | */ | 1054 | */ |
1045 | 1055 | ||
1046 | /* Set up the DDC bus. */ | 1056 | /* Set up the DDC bus. */ |
1047 | intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); | 1057 | intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); |
1048 | if (!intel_output->ddc_bus) { | 1058 | if (!intel_encoder->ddc_bus) { |
1049 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | 1059 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " |
1050 | "failed.\n"); | 1060 | "failed.\n"); |
1051 | goto failed; | 1061 | goto failed; |
@@ -1055,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev) | |||
1055 | * Attempt to get the fixed panel mode from DDC. Assume that the | 1065 | * Attempt to get the fixed panel mode from DDC. Assume that the |
1056 | * preferred mode is the right one. | 1066 | * preferred mode is the right one. |
1057 | */ | 1067 | */ |
1058 | intel_ddc_get_modes(intel_output); | 1068 | dev_priv->lvds_edid_good = true; |
1069 | |||
1070 | if (!intel_ddc_get_modes(intel_encoder)) | ||
1071 | dev_priv->lvds_edid_good = false; | ||
1059 | 1072 | ||
1060 | list_for_each_entry(scan, &connector->probed_modes, head) { | 1073 | list_for_each_entry(scan, &connector->probed_modes, head) { |
1061 | mutex_lock(&dev->mode_config.mutex); | 1074 | mutex_lock(&dev->mode_config.mutex); |
@@ -1133,9 +1146,9 @@ out: | |||
1133 | 1146 | ||
1134 | failed: | 1147 | failed: |
1135 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); | 1148 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
1136 | if (intel_output->ddc_bus) | 1149 | if (intel_encoder->ddc_bus) |
1137 | intel_i2c_destroy(intel_output->ddc_bus); | 1150 | intel_i2c_destroy(intel_encoder->ddc_bus); |
1138 | drm_connector_cleanup(connector); | 1151 | drm_connector_cleanup(connector); |
1139 | drm_encoder_cleanup(encoder); | 1152 | drm_encoder_cleanup(encoder); |
1140 | kfree(intel_output); | 1153 | kfree(intel_encoder); |
1141 | } | 1154 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 89d303d1d3fb..8e5c83b2d120 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * intel_ddc_probe | 34 | * intel_ddc_probe |
35 | * | 35 | * |
36 | */ | 36 | */ |
37 | bool intel_ddc_probe(struct intel_output *intel_output) | 37 | bool intel_ddc_probe(struct intel_encoder *intel_encoder) |
38 | { | 38 | { |
39 | u8 out_buf[] = { 0x0, 0x0}; | 39 | u8 out_buf[] = { 0x0, 0x0}; |
40 | u8 buf[2]; | 40 | u8 buf[2]; |
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
54 | } | 54 | } |
55 | }; | 55 | }; |
56 | 56 | ||
57 | intel_i2c_quirk_set(intel_output->base.dev, true); | 57 | intel_i2c_quirk_set(intel_encoder->base.dev, true); |
58 | ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); | 58 | ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); |
59 | intel_i2c_quirk_set(intel_output->base.dev, false); | 59 | intel_i2c_quirk_set(intel_encoder->base.dev, false); |
60 | if (ret == 2) | 60 | if (ret == 2) |
61 | return true; | 61 | return true; |
62 | 62 | ||
@@ -69,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
69 | * | 69 | * |
70 | * Fetch the EDID information from @connector using the DDC bus. | 70 | * Fetch the EDID information from @connector using the DDC bus. |
71 | */ | 71 | */ |
72 | int intel_ddc_get_modes(struct intel_output *intel_output) | 72 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder) |
73 | { | 73 | { |
74 | struct edid *edid; | 74 | struct edid *edid; |
75 | int ret = 0; | 75 | int ret = 0; |
76 | 76 | ||
77 | intel_i2c_quirk_set(intel_output->base.dev, true); | 77 | intel_i2c_quirk_set(intel_encoder->base.dev, true); |
78 | edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); | 78 | edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); |
79 | intel_i2c_quirk_set(intel_output->base.dev, false); | 79 | intel_i2c_quirk_set(intel_encoder->base.dev, false); |
80 | if (edid) { | 80 | if (edid) { |
81 | drm_mode_connector_update_edid_property(&intel_output->base, | 81 | drm_mode_connector_update_edid_property(&intel_encoder->base, |
82 | edid); | 82 | edid); |
83 | ret = drm_add_edid_modes(&intel_output->base, edid); | 83 | ret = drm_add_edid_modes(&intel_encoder->base, edid); |
84 | intel_output->base.display_info.raw_edid = NULL; | 84 | intel_encoder->base.display_info.raw_edid = NULL; |
85 | kfree(edid); | 85 | kfree(edid); |
86 | } | 86 | } |
87 | 87 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 60595fc26fdd..6d524a1fc271 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
724 | int ret, tmp_width; | 724 | int ret, tmp_width; |
725 | struct overlay_registers *regs; | 725 | struct overlay_registers *regs; |
726 | bool scale_changed = false; | 726 | bool scale_changed = false; |
727 | struct drm_i915_gem_object *bo_priv = new_bo->driver_private; | 727 | struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); |
728 | struct drm_device *dev = overlay->dev; | 728 | struct drm_device *dev = overlay->dev; |
729 | 729 | ||
730 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 730 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
@@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
809 | intel_overlay_continue(overlay, scale_changed); | 809 | intel_overlay_continue(overlay, scale_changed); |
810 | 810 | ||
811 | overlay->old_vid_bo = overlay->vid_bo; | 811 | overlay->old_vid_bo = overlay->vid_bo; |
812 | overlay->vid_bo = new_bo->driver_private; | 812 | overlay->vid_bo = to_intel_bo(new_bo); |
813 | 813 | ||
814 | return 0; | 814 | return 0; |
815 | 815 | ||
@@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1344 | reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); | 1344 | reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); |
1345 | if (!reg_bo) | 1345 | if (!reg_bo) |
1346 | goto out_free; | 1346 | goto out_free; |
1347 | overlay->reg_bo = reg_bo->driver_private; | 1347 | overlay->reg_bo = to_intel_bo(reg_bo); |
1348 | 1348 | ||
1349 | if (OVERLAY_NONPHYSICAL(dev)) { | 1349 | if (OVERLAY_NONPHYSICAL(dev)) { |
1350 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); | 1350 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 26e13a0bf30b..87d953664cb0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -54,7 +54,7 @@ struct intel_sdvo_priv { | |||
54 | u8 slave_addr; | 54 | u8 slave_addr; |
55 | 55 | ||
56 | /* Register for the SDVO device: SDVOB or SDVOC */ | 56 | /* Register for the SDVO device: SDVOB or SDVOC */ |
57 | int output_device; | 57 | int sdvo_reg; |
58 | 58 | ||
59 | /* Active outputs controlled by this SDVO output */ | 59 | /* Active outputs controlled by this SDVO output */ |
60 | uint16_t controlled_output; | 60 | uint16_t controlled_output; |
@@ -124,7 +124,7 @@ struct intel_sdvo_priv { | |||
124 | */ | 124 | */ |
125 | struct intel_sdvo_encode encode; | 125 | struct intel_sdvo_encode encode; |
126 | 126 | ||
127 | /* DDC bus used by this SDVO output */ | 127 | /* DDC bus used by this SDVO encoder */ |
128 | uint8_t ddc_bus; | 128 | uint8_t ddc_bus; |
129 | 129 | ||
130 | /* Mac mini hack -- use the same DDC as the analog connector */ | 130 | /* Mac mini hack -- use the same DDC as the analog connector */ |
@@ -162,22 +162,22 @@ struct intel_sdvo_priv { | |||
162 | }; | 162 | }; |
163 | 163 | ||
164 | static bool | 164 | static bool |
165 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); | 165 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * Writes the SDVOB or SDVOC with the given value, but always writes both | 168 | * Writes the SDVOB or SDVOC with the given value, but always writes both |
169 | * SDVOB and SDVOC to work around apparent hardware issues (according to | 169 | * SDVOB and SDVOC to work around apparent hardware issues (according to |
170 | * comments in the BIOS). | 170 | * comments in the BIOS). |
171 | */ | 171 | */ |
172 | static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) | 172 | static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) |
173 | { | 173 | { |
174 | struct drm_device *dev = intel_output->base.dev; | 174 | struct drm_device *dev = intel_encoder->base.dev; |
175 | struct drm_i915_private *dev_priv = dev->dev_private; | 175 | struct drm_i915_private *dev_priv = dev->dev_private; |
176 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 176 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
177 | u32 bval = val, cval = val; | 177 | u32 bval = val, cval = val; |
178 | int i; | 178 | int i; |
179 | 179 | ||
180 | if (sdvo_priv->output_device == SDVOB) { | 180 | if (sdvo_priv->sdvo_reg == SDVOB) { |
181 | cval = I915_READ(SDVOC); | 181 | cval = I915_READ(SDVOC); |
182 | } else { | 182 | } else { |
183 | bval = I915_READ(SDVOB); | 183 | bval = I915_READ(SDVOB); |
@@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | 199 | static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, |
200 | u8 *ch) | 200 | u8 *ch) |
201 | { | 201 | { |
202 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 202 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
203 | u8 out_buf[2]; | 203 | u8 out_buf[2]; |
204 | u8 buf[2]; | 204 | u8 buf[2]; |
205 | int ret; | 205 | int ret; |
@@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
222 | out_buf[0] = addr; | 222 | out_buf[0] = addr; |
223 | out_buf[1] = 0; | 223 | out_buf[1] = 0; |
224 | 224 | ||
225 | if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) | 225 | if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) |
226 | { | 226 | { |
227 | *ch = buf[0]; | 227 | *ch = buf[0]; |
228 | return true; | 228 | return true; |
@@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
232 | return false; | 232 | return false; |
233 | } | 233 | } |
234 | 234 | ||
235 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | 235 | static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, |
236 | u8 ch) | 236 | u8 ch) |
237 | { | 237 | { |
238 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 238 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
239 | u8 out_buf[2]; | 239 | u8 out_buf[2]; |
240 | struct i2c_msg msgs[] = { | 240 | struct i2c_msg msgs[] = { |
241 | { | 241 | { |
@@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | |||
249 | out_buf[0] = addr; | 249 | out_buf[0] = addr; |
250 | out_buf[1] = ch; | 250 | out_buf[1] = ch; |
251 | 251 | ||
252 | if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) | 252 | if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) |
253 | { | 253 | { |
254 | return true; | 254 | return true; |
255 | } | 255 | } |
@@ -353,13 +353,13 @@ static const struct _sdvo_cmd_name { | |||
353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), | 353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), |
354 | }; | 354 | }; |
355 | 355 | ||
356 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") | 356 | #define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") |
357 | #define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) | 357 | #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) |
358 | 358 | ||
359 | static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | 359 | static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, |
360 | void *args, int args_len) | 360 | void *args, int args_len) |
361 | { | 361 | { |
362 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 362 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
363 | int i; | 363 | int i; |
364 | 364 | ||
365 | DRM_DEBUG_KMS("%s: W: %02X ", | 365 | DRM_DEBUG_KMS("%s: W: %02X ", |
@@ -379,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | |||
379 | DRM_LOG_KMS("\n"); | 379 | DRM_LOG_KMS("\n"); |
380 | } | 380 | } |
381 | 381 | ||
382 | static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, | 382 | static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, |
383 | void *args, int args_len) | 383 | void *args, int args_len) |
384 | { | 384 | { |
385 | int i; | 385 | int i; |
386 | 386 | ||
387 | intel_sdvo_debug_write(intel_output, cmd, args, args_len); | 387 | intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); |
388 | 388 | ||
389 | for (i = 0; i < args_len; i++) { | 389 | for (i = 0; i < args_len; i++) { |
390 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, | 390 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, |
391 | ((u8*)args)[i]); | 391 | ((u8*)args)[i]); |
392 | } | 392 | } |
393 | 393 | ||
394 | intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); | 394 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); |
395 | } | 395 | } |
396 | 396 | ||
397 | static const char *cmd_status_names[] = { | 397 | static const char *cmd_status_names[] = { |
@@ -404,11 +404,11 @@ static const char *cmd_status_names[] = { | |||
404 | "Scaling not supported" | 404 | "Scaling not supported" |
405 | }; | 405 | }; |
406 | 406 | ||
407 | static void intel_sdvo_debug_response(struct intel_output *intel_output, | 407 | static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, |
408 | void *response, int response_len, | 408 | void *response, int response_len, |
409 | u8 status) | 409 | u8 status) |
410 | { | 410 | { |
411 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 411 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
412 | int i; | 412 | int i; |
413 | 413 | ||
414 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); | 414 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); |
@@ -423,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, | |||
423 | DRM_LOG_KMS("\n"); | 423 | DRM_LOG_KMS("\n"); |
424 | } | 424 | } |
425 | 425 | ||
426 | static u8 intel_sdvo_read_response(struct intel_output *intel_output, | 426 | static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, |
427 | void *response, int response_len) | 427 | void *response, int response_len) |
428 | { | 428 | { |
429 | int i; | 429 | int i; |
@@ -433,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output, | |||
433 | while (retry--) { | 433 | while (retry--) { |
434 | /* Read the command response */ | 434 | /* Read the command response */ |
435 | for (i = 0; i < response_len; i++) { | 435 | for (i = 0; i < response_len; i++) { |
436 | intel_sdvo_read_byte(intel_output, | 436 | intel_sdvo_read_byte(intel_encoder, |
437 | SDVO_I2C_RETURN_0 + i, | 437 | SDVO_I2C_RETURN_0 + i, |
438 | &((u8 *)response)[i]); | 438 | &((u8 *)response)[i]); |
439 | } | 439 | } |
440 | 440 | ||
441 | /* read the return status */ | 441 | /* read the return status */ |
442 | intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, | 442 | intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, |
443 | &status); | 443 | &status); |
444 | 444 | ||
445 | intel_sdvo_debug_response(intel_output, response, response_len, | 445 | intel_sdvo_debug_response(intel_encoder, response, response_len, |
446 | status); | 446 | status); |
447 | if (status != SDVO_CMD_STATUS_PENDING) | 447 | if (status != SDVO_CMD_STATUS_PENDING) |
448 | return status; | 448 | return status; |
@@ -470,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
470 | * another I2C transaction after issuing the DDC bus switch, it will be | 470 | * another I2C transaction after issuing the DDC bus switch, it will be |
471 | * switched to the internal SDVO register. | 471 | * switched to the internal SDVO register. |
472 | */ | 472 | */ |
473 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | 473 | static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, |
474 | u8 target) | 474 | u8 target) |
475 | { | 475 | { |
476 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 476 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
477 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | 477 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; |
478 | struct i2c_msg msgs[] = { | 478 | struct i2c_msg msgs[] = { |
479 | { | 479 | { |
@@ -497,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
497 | }, | 497 | }, |
498 | }; | 498 | }; |
499 | 499 | ||
500 | intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | 500 | intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, |
501 | &target, 1); | 501 | &target, 1); |
502 | /* write the DDC switch command argument */ | 502 | /* write the DDC switch command argument */ |
503 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); | 503 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); |
504 | 504 | ||
505 | out_buf[0] = SDVO_I2C_OPCODE; | 505 | out_buf[0] = SDVO_I2C_OPCODE; |
506 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | 506 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; |
@@ -509,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
509 | ret_value[0] = 0; | 509 | ret_value[0] = 0; |
510 | ret_value[1] = 0; | 510 | ret_value[1] = 0; |
511 | 511 | ||
512 | ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); | 512 | ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); |
513 | if (ret != 3) { | 513 | if (ret != 3) { |
514 | /* failure in I2C transfer */ | 514 | /* failure in I2C transfer */ |
515 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | 515 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); |
@@ -523,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
523 | return; | 523 | return; |
524 | } | 524 | } |
525 | 525 | ||
526 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) | 526 | static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) |
527 | { | 527 | { |
528 | struct intel_sdvo_set_target_input_args targets = {0}; | 528 | struct intel_sdvo_set_target_input_args targets = {0}; |
529 | u8 status; | 529 | u8 status; |
@@ -534,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool | |||
534 | if (target_1) | 534 | if (target_1) |
535 | targets.target_1 = 1; | 535 | targets.target_1 = 1; |
536 | 536 | ||
537 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, | 537 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, |
538 | sizeof(targets)); | 538 | sizeof(targets)); |
539 | 539 | ||
540 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 540 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
541 | 541 | ||
542 | return (status == SDVO_CMD_STATUS_SUCCESS); | 542 | return (status == SDVO_CMD_STATUS_SUCCESS); |
543 | } | 543 | } |
@@ -548,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool | |||
548 | * This function is making an assumption about the layout of the response, | 548 | * This function is making an assumption about the layout of the response, |
549 | * which should be checked against the docs. | 549 | * which should be checked against the docs. |
550 | */ | 550 | */ |
551 | static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) | 551 | static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) |
552 | { | 552 | { |
553 | struct intel_sdvo_get_trained_inputs_response response; | 553 | struct intel_sdvo_get_trained_inputs_response response; |
554 | u8 status; | 554 | u8 status; |
555 | 555 | ||
556 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); | 556 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); |
557 | status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); | 557 | status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); |
558 | if (status != SDVO_CMD_STATUS_SUCCESS) | 558 | if (status != SDVO_CMD_STATUS_SUCCESS) |
559 | return false; | 559 | return false; |
560 | 560 | ||
@@ -563,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo | |||
563 | return true; | 563 | return true; |
564 | } | 564 | } |
565 | 565 | ||
566 | static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, | 566 | static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder, |
567 | u16 *outputs) | 567 | u16 *outputs) |
568 | { | 568 | { |
569 | u8 status; | 569 | u8 status; |
570 | 570 | ||
571 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); | 571 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); |
572 | status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); | 572 | status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs)); |
573 | 573 | ||
574 | return (status == SDVO_CMD_STATUS_SUCCESS); | 574 | return (status == SDVO_CMD_STATUS_SUCCESS); |
575 | } | 575 | } |
576 | 576 | ||
577 | static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, | 577 | static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, |
578 | u16 outputs) | 578 | u16 outputs) |
579 | { | 579 | { |
580 | u8 status; | 580 | u8 status; |
581 | 581 | ||
582 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, | 582 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, |
583 | sizeof(outputs)); | 583 | sizeof(outputs)); |
584 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 584 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
585 | return (status == SDVO_CMD_STATUS_SUCCESS); | 585 | return (status == SDVO_CMD_STATUS_SUCCESS); |
586 | } | 586 | } |
587 | 587 | ||
588 | static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, | 588 | static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, |
589 | int mode) | 589 | int mode) |
590 | { | 590 | { |
591 | u8 status, state = SDVO_ENCODER_STATE_ON; | 591 | u8 status, state = SDVO_ENCODER_STATE_ON; |
@@ -605,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output | |||
605 | break; | 605 | break; |
606 | } | 606 | } |
607 | 607 | ||
608 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, | 608 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, |
609 | sizeof(state)); | 609 | sizeof(state)); |
610 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 610 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
611 | 611 | ||
612 | return (status == SDVO_CMD_STATUS_SUCCESS); | 612 | return (status == SDVO_CMD_STATUS_SUCCESS); |
613 | } | 613 | } |
614 | 614 | ||
615 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, | 615 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, |
616 | int *clock_min, | 616 | int *clock_min, |
617 | int *clock_max) | 617 | int *clock_max) |
618 | { | 618 | { |
619 | struct intel_sdvo_pixel_clock_range clocks; | 619 | struct intel_sdvo_pixel_clock_range clocks; |
620 | u8 status; | 620 | u8 status; |
621 | 621 | ||
622 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | 622 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, |
623 | NULL, 0); | 623 | NULL, 0); |
624 | 624 | ||
625 | status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); | 625 | status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); |
626 | 626 | ||
627 | if (status != SDVO_CMD_STATUS_SUCCESS) | 627 | if (status != SDVO_CMD_STATUS_SUCCESS) |
628 | return false; | 628 | return false; |
@@ -634,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou | |||
634 | return true; | 634 | return true; |
635 | } | 635 | } |
636 | 636 | ||
637 | static bool intel_sdvo_set_target_output(struct intel_output *intel_output, | 637 | static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, |
638 | u16 outputs) | 638 | u16 outputs) |
639 | { | 639 | { |
640 | u8 status; | 640 | u8 status; |
641 | 641 | ||
642 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, | 642 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, |
643 | sizeof(outputs)); | 643 | sizeof(outputs)); |
644 | 644 | ||
645 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 645 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
646 | return (status == SDVO_CMD_STATUS_SUCCESS); | 646 | return (status == SDVO_CMD_STATUS_SUCCESS); |
647 | } | 647 | } |
648 | 648 | ||
649 | static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, | 649 | static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd, |
650 | struct intel_sdvo_dtd *dtd) | 650 | struct intel_sdvo_dtd *dtd) |
651 | { | 651 | { |
652 | u8 status; | 652 | u8 status; |
653 | 653 | ||
654 | intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); | 654 | intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0); |
655 | status = intel_sdvo_read_response(intel_output, &dtd->part1, | 655 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, |
656 | sizeof(dtd->part1)); | 656 | sizeof(dtd->part1)); |
657 | if (status != SDVO_CMD_STATUS_SUCCESS) | 657 | if (status != SDVO_CMD_STATUS_SUCCESS) |
658 | return false; | 658 | return false; |
659 | 659 | ||
660 | intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); | 660 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0); |
661 | status = intel_sdvo_read_response(intel_output, &dtd->part2, | 661 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, |
662 | sizeof(dtd->part2)); | 662 | sizeof(dtd->part2)); |
663 | if (status != SDVO_CMD_STATUS_SUCCESS) | 663 | if (status != SDVO_CMD_STATUS_SUCCESS) |
664 | return false; | 664 | return false; |
@@ -666,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, | |||
666 | return true; | 666 | return true; |
667 | } | 667 | } |
668 | 668 | ||
669 | static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, | 669 | static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder, |
670 | struct intel_sdvo_dtd *dtd) | 670 | struct intel_sdvo_dtd *dtd) |
671 | { | 671 | { |
672 | return intel_sdvo_get_timing(intel_output, | 672 | return intel_sdvo_get_timing(intel_encoder, |
673 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); | 673 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); |
674 | } | 674 | } |
675 | 675 | ||
676 | static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, | 676 | static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder, |
677 | struct intel_sdvo_dtd *dtd) | 677 | struct intel_sdvo_dtd *dtd) |
678 | { | 678 | { |
679 | return intel_sdvo_get_timing(intel_output, | 679 | return intel_sdvo_get_timing(intel_encoder, |
680 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); | 680 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); |
681 | } | 681 | } |
682 | 682 | ||
683 | static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, | 683 | static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, |
684 | struct intel_sdvo_dtd *dtd) | 684 | struct intel_sdvo_dtd *dtd) |
685 | { | 685 | { |
686 | u8 status; | 686 | u8 status; |
687 | 687 | ||
688 | intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); | 688 | intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); |
689 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 689 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
690 | if (status != SDVO_CMD_STATUS_SUCCESS) | 690 | if (status != SDVO_CMD_STATUS_SUCCESS) |
691 | return false; | 691 | return false; |
692 | 692 | ||
693 | intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); | 693 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); |
694 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 694 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
695 | if (status != SDVO_CMD_STATUS_SUCCESS) | 695 | if (status != SDVO_CMD_STATUS_SUCCESS) |
696 | return false; | 696 | return false; |
697 | 697 | ||
698 | return true; | 698 | return true; |
699 | } | 699 | } |
700 | 700 | ||
701 | static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, | 701 | static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, |
702 | struct intel_sdvo_dtd *dtd) | 702 | struct intel_sdvo_dtd *dtd) |
703 | { | 703 | { |
704 | return intel_sdvo_set_timing(intel_output, | 704 | return intel_sdvo_set_timing(intel_encoder, |
705 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); | 705 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); |
706 | } | 706 | } |
707 | 707 | ||
708 | static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, | 708 | static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, |
709 | struct intel_sdvo_dtd *dtd) | 709 | struct intel_sdvo_dtd *dtd) |
710 | { | 710 | { |
711 | return intel_sdvo_set_timing(intel_output, | 711 | return intel_sdvo_set_timing(intel_encoder, |
712 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); | 712 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); |
713 | } | 713 | } |
714 | 714 | ||
715 | static bool | 715 | static bool |
716 | intel_sdvo_create_preferred_input_timing(struct intel_output *output, | 716 | intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, |
717 | uint16_t clock, | 717 | uint16_t clock, |
718 | uint16_t width, | 718 | uint16_t width, |
719 | uint16_t height) | 719 | uint16_t height) |
720 | { | 720 | { |
721 | struct intel_sdvo_preferred_input_timing_args args; | 721 | struct intel_sdvo_preferred_input_timing_args args; |
722 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 722 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
723 | uint8_t status; | 723 | uint8_t status; |
724 | 724 | ||
725 | memset(&args, 0, sizeof(args)); | 725 | memset(&args, 0, sizeof(args)); |
@@ -733,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, | |||
733 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) | 733 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) |
734 | args.scaled = 1; | 734 | args.scaled = 1; |
735 | 735 | ||
736 | intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | 736 | intel_sdvo_write_cmd(intel_encoder, |
737 | SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | ||
737 | &args, sizeof(args)); | 738 | &args, sizeof(args)); |
738 | status = intel_sdvo_read_response(output, NULL, 0); | 739 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
739 | if (status != SDVO_CMD_STATUS_SUCCESS) | 740 | if (status != SDVO_CMD_STATUS_SUCCESS) |
740 | return false; | 741 | return false; |
741 | 742 | ||
742 | return true; | 743 | return true; |
743 | } | 744 | } |
744 | 745 | ||
745 | static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | 746 | static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, |
746 | struct intel_sdvo_dtd *dtd) | 747 | struct intel_sdvo_dtd *dtd) |
747 | { | 748 | { |
748 | bool status; | 749 | bool status; |
749 | 750 | ||
750 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | 751 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, |
751 | NULL, 0); | 752 | NULL, 0); |
752 | 753 | ||
753 | status = intel_sdvo_read_response(output, &dtd->part1, | 754 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, |
754 | sizeof(dtd->part1)); | 755 | sizeof(dtd->part1)); |
755 | if (status != SDVO_CMD_STATUS_SUCCESS) | 756 | if (status != SDVO_CMD_STATUS_SUCCESS) |
756 | return false; | 757 | return false; |
757 | 758 | ||
758 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | 759 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, |
759 | NULL, 0); | 760 | NULL, 0); |
760 | 761 | ||
761 | status = intel_sdvo_read_response(output, &dtd->part2, | 762 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, |
762 | sizeof(dtd->part2)); | 763 | sizeof(dtd->part2)); |
763 | if (status != SDVO_CMD_STATUS_SUCCESS) | 764 | if (status != SDVO_CMD_STATUS_SUCCESS) |
764 | return false; | 765 | return false; |
@@ -766,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | |||
766 | return false; | 767 | return false; |
767 | } | 768 | } |
768 | 769 | ||
769 | static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | 770 | static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder) |
770 | { | 771 | { |
771 | u8 response, status; | 772 | u8 response, status; |
772 | 773 | ||
773 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); | 774 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); |
774 | status = intel_sdvo_read_response(intel_output, &response, 1); | 775 | status = intel_sdvo_read_response(intel_encoder, &response, 1); |
775 | 776 | ||
776 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 777 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
777 | DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); | 778 | DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); |
@@ -783,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | |||
783 | return response; | 784 | return response; |
784 | } | 785 | } |
785 | 786 | ||
786 | static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) | 787 | static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) |
787 | { | 788 | { |
788 | u8 status; | 789 | u8 status; |
789 | 790 | ||
790 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); | 791 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); |
791 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 792 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
792 | if (status != SDVO_CMD_STATUS_SUCCESS) | 793 | if (status != SDVO_CMD_STATUS_SUCCESS) |
793 | return false; | 794 | return false; |
794 | 795 | ||
@@ -877,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | |||
877 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | 878 | mode->flags |= DRM_MODE_FLAG_PVSYNC; |
878 | } | 879 | } |
879 | 880 | ||
880 | static bool intel_sdvo_get_supp_encode(struct intel_output *output, | 881 | static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, |
881 | struct intel_sdvo_encode *encode) | 882 | struct intel_sdvo_encode *encode) |
882 | { | 883 | { |
883 | uint8_t status; | 884 | uint8_t status; |
884 | 885 | ||
885 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); | 886 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); |
886 | status = intel_sdvo_read_response(output, encode, sizeof(*encode)); | 887 | status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); |
887 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ | 888 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ |
888 | memset(encode, 0, sizeof(*encode)); | 889 | memset(encode, 0, sizeof(*encode)); |
889 | return false; | 890 | return false; |
@@ -892,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output, | |||
892 | return true; | 893 | return true; |
893 | } | 894 | } |
894 | 895 | ||
895 | static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) | 896 | static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, |
897 | uint8_t mode) | ||
896 | { | 898 | { |
897 | uint8_t status; | 899 | uint8_t status; |
898 | 900 | ||
899 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); | 901 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); |
900 | status = intel_sdvo_read_response(output, NULL, 0); | 902 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
901 | 903 | ||
902 | return (status == SDVO_CMD_STATUS_SUCCESS); | 904 | return (status == SDVO_CMD_STATUS_SUCCESS); |
903 | } | 905 | } |
904 | 906 | ||
905 | static bool intel_sdvo_set_colorimetry(struct intel_output *output, | 907 | static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, |
906 | uint8_t mode) | 908 | uint8_t mode) |
907 | { | 909 | { |
908 | uint8_t status; | 910 | uint8_t status; |
909 | 911 | ||
910 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); | 912 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); |
911 | status = intel_sdvo_read_response(output, NULL, 0); | 913 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
912 | 914 | ||
913 | return (status == SDVO_CMD_STATUS_SUCCESS); | 915 | return (status == SDVO_CMD_STATUS_SUCCESS); |
914 | } | 916 | } |
915 | 917 | ||
916 | #if 0 | 918 | #if 0 |
917 | static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | 919 | static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) |
918 | { | 920 | { |
919 | int i, j; | 921 | int i, j; |
920 | uint8_t set_buf_index[2]; | 922 | uint8_t set_buf_index[2]; |
@@ -923,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | |||
923 | uint8_t buf[48]; | 925 | uint8_t buf[48]; |
924 | uint8_t *pos; | 926 | uint8_t *pos; |
925 | 927 | ||
926 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); | 928 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); |
927 | intel_sdvo_read_response(output, &av_split, 1); | 929 | intel_sdvo_read_response(encoder, &av_split, 1); |
928 | 930 | ||
929 | for (i = 0; i <= av_split; i++) { | 931 | for (i = 0; i <= av_split; i++) { |
930 | set_buf_index[0] = i; set_buf_index[1] = 0; | 932 | set_buf_index[0] = i; set_buf_index[1] = 0; |
931 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, | 933 | intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, |
932 | set_buf_index, 2); | 934 | set_buf_index, 2); |
933 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); | 935 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); |
934 | intel_sdvo_read_response(output, &buf_size, 1); | 936 | intel_sdvo_read_response(encoder, &buf_size, 1); |
935 | 937 | ||
936 | pos = buf; | 938 | pos = buf; |
937 | for (j = 0; j <= buf_size; j += 8) { | 939 | for (j = 0; j <= buf_size; j += 8) { |
938 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, | 940 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, |
939 | NULL, 0); | 941 | NULL, 0); |
940 | intel_sdvo_read_response(output, pos, 8); | 942 | intel_sdvo_read_response(encoder, pos, 8); |
941 | pos += 8; | 943 | pos += 8; |
942 | } | 944 | } |
943 | } | 945 | } |
944 | } | 946 | } |
945 | #endif | 947 | #endif |
946 | 948 | ||
947 | static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, | 949 | static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, |
948 | uint8_t *data, int8_t size, uint8_t tx_rate) | 950 | int index, |
951 | uint8_t *data, int8_t size, uint8_t tx_rate) | ||
949 | { | 952 | { |
950 | uint8_t set_buf_index[2]; | 953 | uint8_t set_buf_index[2]; |
951 | 954 | ||
952 | set_buf_index[0] = index; | 955 | set_buf_index[0] = index; |
953 | set_buf_index[1] = 0; | 956 | set_buf_index[1] = 0; |
954 | 957 | ||
955 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); | 958 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, |
959 | set_buf_index, 2); | ||
956 | 960 | ||
957 | for (; size > 0; size -= 8) { | 961 | for (; size > 0; size -= 8) { |
958 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); | 962 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); |
959 | data += 8; | 963 | data += 8; |
960 | } | 964 | } |
961 | 965 | ||
962 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | 966 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); |
963 | } | 967 | } |
964 | 968 | ||
965 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | 969 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) |
@@ -1034,7 +1038,7 @@ struct dip_infoframe { | |||
1034 | } __attribute__ ((packed)) u; | 1038 | } __attribute__ ((packed)) u; |
1035 | } __attribute__((packed)); | 1039 | } __attribute__((packed)); |
1036 | 1040 | ||
1037 | static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | 1041 | static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, |
1038 | struct drm_display_mode * mode) | 1042 | struct drm_display_mode * mode) |
1039 | { | 1043 | { |
1040 | struct dip_infoframe avi_if = { | 1044 | struct dip_infoframe avi_if = { |
@@ -1045,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | |||
1045 | 1049 | ||
1046 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | 1050 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, |
1047 | 4 + avi_if.len); | 1051 | 4 + avi_if.len); |
1048 | intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, | 1052 | intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, |
1053 | 4 + avi_if.len, | ||
1049 | SDVO_HBUF_TX_VSYNC); | 1054 | SDVO_HBUF_TX_VSYNC); |
1050 | } | 1055 | } |
1051 | 1056 | ||
1052 | static void intel_sdvo_set_tv_format(struct intel_output *output) | 1057 | static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) |
1053 | { | 1058 | { |
1054 | 1059 | ||
1055 | struct intel_sdvo_tv_format format; | 1060 | struct intel_sdvo_tv_format format; |
1056 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1061 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1057 | uint32_t format_map, i; | 1062 | uint32_t format_map, i; |
1058 | uint8_t status; | 1063 | uint8_t status; |
1059 | 1064 | ||
@@ -1066,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) | |||
1066 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? | 1071 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? |
1067 | sizeof(format) : sizeof(format_map)); | 1072 | sizeof(format) : sizeof(format_map)); |
1068 | 1073 | ||
1069 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, | 1074 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, |
1070 | sizeof(format)); | 1075 | sizeof(format)); |
1071 | 1076 | ||
1072 | status = intel_sdvo_read_response(output, NULL, 0); | 1077 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
1073 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1078 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1074 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", | 1079 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", |
1075 | SDVO_NAME(sdvo_priv)); | 1080 | SDVO_NAME(sdvo_priv)); |
@@ -1079,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1079 | struct drm_display_mode *mode, | 1084 | struct drm_display_mode *mode, |
1080 | struct drm_display_mode *adjusted_mode) | 1085 | struct drm_display_mode *adjusted_mode) |
1081 | { | 1086 | { |
1082 | struct intel_output *output = enc_to_intel_output(encoder); | 1087 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1083 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | 1088 | struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; |
1084 | 1089 | ||
1085 | if (dev_priv->is_tv) { | 1090 | if (dev_priv->is_tv) { |
1086 | struct intel_sdvo_dtd output_dtd; | 1091 | struct intel_sdvo_dtd output_dtd; |
@@ -1095,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1095 | 1100 | ||
1096 | /* Set output timings */ | 1101 | /* Set output timings */ |
1097 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | 1102 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); |
1098 | intel_sdvo_set_target_output(output, | 1103 | intel_sdvo_set_target_output(intel_encoder, |
1099 | dev_priv->controlled_output); | 1104 | dev_priv->controlled_output); |
1100 | intel_sdvo_set_output_timing(output, &output_dtd); | 1105 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
1101 | 1106 | ||
1102 | /* Set the input timing to the screen. Assume always input 0. */ | 1107 | /* Set the input timing to the screen. Assume always input 0. */ |
1103 | intel_sdvo_set_target_input(output, true, false); | 1108 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1104 | 1109 | ||
1105 | 1110 | ||
1106 | success = intel_sdvo_create_preferred_input_timing(output, | 1111 | success = intel_sdvo_create_preferred_input_timing(intel_encoder, |
1107 | mode->clock / 10, | 1112 | mode->clock / 10, |
1108 | mode->hdisplay, | 1113 | mode->hdisplay, |
1109 | mode->vdisplay); | 1114 | mode->vdisplay); |
1110 | if (success) { | 1115 | if (success) { |
1111 | struct intel_sdvo_dtd input_dtd; | 1116 | struct intel_sdvo_dtd input_dtd; |
1112 | 1117 | ||
1113 | intel_sdvo_get_preferred_input_timing(output, | 1118 | intel_sdvo_get_preferred_input_timing(intel_encoder, |
1114 | &input_dtd); | 1119 | &input_dtd); |
1115 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 1120 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
1116 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | 1121 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; |
@@ -1133,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1133 | intel_sdvo_get_dtd_from_mode(&output_dtd, | 1138 | intel_sdvo_get_dtd_from_mode(&output_dtd, |
1134 | dev_priv->sdvo_lvds_fixed_mode); | 1139 | dev_priv->sdvo_lvds_fixed_mode); |
1135 | 1140 | ||
1136 | intel_sdvo_set_target_output(output, | 1141 | intel_sdvo_set_target_output(intel_encoder, |
1137 | dev_priv->controlled_output); | 1142 | dev_priv->controlled_output); |
1138 | intel_sdvo_set_output_timing(output, &output_dtd); | 1143 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
1139 | 1144 | ||
1140 | /* Set the input timing to the screen. Assume always input 0. */ | 1145 | /* Set the input timing to the screen. Assume always input 0. */ |
1141 | intel_sdvo_set_target_input(output, true, false); | 1146 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1142 | 1147 | ||
1143 | 1148 | ||
1144 | success = intel_sdvo_create_preferred_input_timing( | 1149 | success = intel_sdvo_create_preferred_input_timing( |
1145 | output, | 1150 | intel_encoder, |
1146 | mode->clock / 10, | 1151 | mode->clock / 10, |
1147 | mode->hdisplay, | 1152 | mode->hdisplay, |
1148 | mode->vdisplay); | 1153 | mode->vdisplay); |
@@ -1150,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1150 | if (success) { | 1155 | if (success) { |
1151 | struct intel_sdvo_dtd input_dtd; | 1156 | struct intel_sdvo_dtd input_dtd; |
1152 | 1157 | ||
1153 | intel_sdvo_get_preferred_input_timing(output, | 1158 | intel_sdvo_get_preferred_input_timing(intel_encoder, |
1154 | &input_dtd); | 1159 | &input_dtd); |
1155 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 1160 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
1156 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | 1161 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; |
@@ -1182,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1182 | struct drm_i915_private *dev_priv = dev->dev_private; | 1187 | struct drm_i915_private *dev_priv = dev->dev_private; |
1183 | struct drm_crtc *crtc = encoder->crtc; | 1188 | struct drm_crtc *crtc = encoder->crtc; |
1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1189 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1185 | struct intel_output *output = enc_to_intel_output(encoder); | 1190 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1186 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1191 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1187 | u32 sdvox = 0; | 1192 | u32 sdvox = 0; |
1188 | int sdvo_pixel_multiply; | 1193 | int sdvo_pixel_multiply; |
1189 | struct intel_sdvo_in_out_map in_out; | 1194 | struct intel_sdvo_in_out_map in_out; |
@@ -1202,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1202 | in_out.in0 = sdvo_priv->controlled_output; | 1207 | in_out.in0 = sdvo_priv->controlled_output; |
1203 | in_out.in1 = 0; | 1208 | in_out.in1 = 0; |
1204 | 1209 | ||
1205 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, | 1210 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, |
1206 | &in_out, sizeof(in_out)); | 1211 | &in_out, sizeof(in_out)); |
1207 | status = intel_sdvo_read_response(output, NULL, 0); | 1212 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
1208 | 1213 | ||
1209 | if (sdvo_priv->is_hdmi) { | 1214 | if (sdvo_priv->is_hdmi) { |
1210 | intel_sdvo_set_avi_infoframe(output, mode); | 1215 | intel_sdvo_set_avi_infoframe(intel_encoder, mode); |
1211 | sdvox |= SDVO_AUDIO_ENABLE; | 1216 | sdvox |= SDVO_AUDIO_ENABLE; |
1212 | } | 1217 | } |
1213 | 1218 | ||
@@ -1224,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1224 | */ | 1229 | */ |
1225 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { | 1230 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { |
1226 | /* Set the output timing to the screen */ | 1231 | /* Set the output timing to the screen */ |
1227 | intel_sdvo_set_target_output(output, | 1232 | intel_sdvo_set_target_output(intel_encoder, |
1228 | sdvo_priv->controlled_output); | 1233 | sdvo_priv->controlled_output); |
1229 | intel_sdvo_set_output_timing(output, &input_dtd); | 1234 | intel_sdvo_set_output_timing(intel_encoder, &input_dtd); |
1230 | } | 1235 | } |
1231 | 1236 | ||
1232 | /* Set the input timing to the screen. Assume always input 0. */ | 1237 | /* Set the input timing to the screen. Assume always input 0. */ |
1233 | intel_sdvo_set_target_input(output, true, false); | 1238 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1234 | 1239 | ||
1235 | if (sdvo_priv->is_tv) | 1240 | if (sdvo_priv->is_tv) |
1236 | intel_sdvo_set_tv_format(output); | 1241 | intel_sdvo_set_tv_format(intel_encoder); |
1237 | 1242 | ||
1238 | /* We would like to use intel_sdvo_create_preferred_input_timing() to | 1243 | /* We would like to use intel_sdvo_create_preferred_input_timing() to |
1239 | * provide the device with a timing it can support, if it supports that | 1244 | * provide the device with a timing it can support, if it supports that |
@@ -1241,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1241 | * output the preferred timing, and we don't support that currently. | 1246 | * output the preferred timing, and we don't support that currently. |
1242 | */ | 1247 | */ |
1243 | #if 0 | 1248 | #if 0 |
1244 | success = intel_sdvo_create_preferred_input_timing(output, clock, | 1249 | success = intel_sdvo_create_preferred_input_timing(encoder, clock, |
1245 | width, height); | 1250 | width, height); |
1246 | if (success) { | 1251 | if (success) { |
1247 | struct intel_sdvo_dtd *input_dtd; | 1252 | struct intel_sdvo_dtd *input_dtd; |
1248 | 1253 | ||
1249 | intel_sdvo_get_preferred_input_timing(output, &input_dtd); | 1254 | intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); |
1250 | intel_sdvo_set_input_timing(output, &input_dtd); | 1255 | intel_sdvo_set_input_timing(encoder, &input_dtd); |
1251 | } | 1256 | } |
1252 | #else | 1257 | #else |
1253 | intel_sdvo_set_input_timing(output, &input_dtd); | 1258 | intel_sdvo_set_input_timing(intel_encoder, &input_dtd); |
1254 | #endif | 1259 | #endif |
1255 | 1260 | ||
1256 | switch (intel_sdvo_get_pixel_multiplier(mode)) { | 1261 | switch (intel_sdvo_get_pixel_multiplier(mode)) { |
1257 | case 1: | 1262 | case 1: |
1258 | intel_sdvo_set_clock_rate_mult(output, | 1263 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
1259 | SDVO_CLOCK_RATE_MULT_1X); | 1264 | SDVO_CLOCK_RATE_MULT_1X); |
1260 | break; | 1265 | break; |
1261 | case 2: | 1266 | case 2: |
1262 | intel_sdvo_set_clock_rate_mult(output, | 1267 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
1263 | SDVO_CLOCK_RATE_MULT_2X); | 1268 | SDVO_CLOCK_RATE_MULT_2X); |
1264 | break; | 1269 | break; |
1265 | case 4: | 1270 | case 4: |
1266 | intel_sdvo_set_clock_rate_mult(output, | 1271 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
1267 | SDVO_CLOCK_RATE_MULT_4X); | 1272 | SDVO_CLOCK_RATE_MULT_4X); |
1268 | break; | 1273 | break; |
1269 | } | 1274 | } |
@@ -1274,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1274 | SDVO_VSYNC_ACTIVE_HIGH | | 1279 | SDVO_VSYNC_ACTIVE_HIGH | |
1275 | SDVO_HSYNC_ACTIVE_HIGH; | 1280 | SDVO_HSYNC_ACTIVE_HIGH; |
1276 | } else { | 1281 | } else { |
1277 | sdvox |= I915_READ(sdvo_priv->output_device); | 1282 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); |
1278 | switch (sdvo_priv->output_device) { | 1283 | switch (sdvo_priv->sdvo_reg) { |
1279 | case SDVOB: | 1284 | case SDVOB: |
1280 | sdvox &= SDVOB_PRESERVE_MASK; | 1285 | sdvox &= SDVOB_PRESERVE_MASK; |
1281 | break; | 1286 | break; |
@@ -1299,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1299 | 1304 | ||
1300 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) | 1305 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) |
1301 | sdvox |= SDVO_STALL_SELECT; | 1306 | sdvox |= SDVO_STALL_SELECT; |
1302 | intel_sdvo_write_sdvox(output, sdvox); | 1307 | intel_sdvo_write_sdvox(intel_encoder, sdvox); |
1303 | } | 1308 | } |
1304 | 1309 | ||
1305 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | 1310 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) |
1306 | { | 1311 | { |
1307 | struct drm_device *dev = encoder->dev; | 1312 | struct drm_device *dev = encoder->dev; |
1308 | struct drm_i915_private *dev_priv = dev->dev_private; | 1313 | struct drm_i915_private *dev_priv = dev->dev_private; |
1309 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1314 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1310 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1315 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1311 | u32 temp; | 1316 | u32 temp; |
1312 | 1317 | ||
1313 | if (mode != DRM_MODE_DPMS_ON) { | 1318 | if (mode != DRM_MODE_DPMS_ON) { |
1314 | intel_sdvo_set_active_outputs(intel_output, 0); | 1319 | intel_sdvo_set_active_outputs(intel_encoder, 0); |
1315 | if (0) | 1320 | if (0) |
1316 | intel_sdvo_set_encoder_power_state(intel_output, mode); | 1321 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); |
1317 | 1322 | ||
1318 | if (mode == DRM_MODE_DPMS_OFF) { | 1323 | if (mode == DRM_MODE_DPMS_OFF) { |
1319 | temp = I915_READ(sdvo_priv->output_device); | 1324 | temp = I915_READ(sdvo_priv->sdvo_reg); |
1320 | if ((temp & SDVO_ENABLE) != 0) { | 1325 | if ((temp & SDVO_ENABLE) != 0) { |
1321 | intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); | 1326 | intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); |
1322 | } | 1327 | } |
1323 | } | 1328 | } |
1324 | } else { | 1329 | } else { |
@@ -1326,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1326 | int i; | 1331 | int i; |
1327 | u8 status; | 1332 | u8 status; |
1328 | 1333 | ||
1329 | temp = I915_READ(sdvo_priv->output_device); | 1334 | temp = I915_READ(sdvo_priv->sdvo_reg); |
1330 | if ((temp & SDVO_ENABLE) == 0) | 1335 | if ((temp & SDVO_ENABLE) == 0) |
1331 | intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); | 1336 | intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); |
1332 | for (i = 0; i < 2; i++) | 1337 | for (i = 0; i < 2; i++) |
1333 | intel_wait_for_vblank(dev); | 1338 | intel_wait_for_vblank(dev); |
1334 | 1339 | ||
1335 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, | 1340 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, |
1336 | &input2); | 1341 | &input2); |
1337 | 1342 | ||
1338 | 1343 | ||
@@ -1346,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1346 | } | 1351 | } |
1347 | 1352 | ||
1348 | if (0) | 1353 | if (0) |
1349 | intel_sdvo_set_encoder_power_state(intel_output, mode); | 1354 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); |
1350 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); | 1355 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); |
1351 | } | 1356 | } |
1352 | return; | 1357 | return; |
1353 | } | 1358 | } |
@@ -1356,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
1356 | { | 1361 | { |
1357 | struct drm_device *dev = connector->dev; | 1362 | struct drm_device *dev = connector->dev; |
1358 | struct drm_i915_private *dev_priv = dev->dev_private; | 1363 | struct drm_i915_private *dev_priv = dev->dev_private; |
1359 | struct intel_output *intel_output = to_intel_output(connector); | 1364 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1360 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1365 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1361 | int o; | 1366 | int o; |
1362 | 1367 | ||
1363 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); | 1368 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder); |
1364 | intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); | 1369 | intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs); |
1365 | 1370 | ||
1366 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | 1371 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { |
1367 | intel_sdvo_set_target_input(intel_output, true, false); | 1372 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1368 | intel_sdvo_get_input_timing(intel_output, | 1373 | intel_sdvo_get_input_timing(intel_encoder, |
1369 | &sdvo_priv->save_input_dtd_1); | 1374 | &sdvo_priv->save_input_dtd_1); |
1370 | } | 1375 | } |
1371 | 1376 | ||
1372 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | 1377 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { |
1373 | intel_sdvo_set_target_input(intel_output, false, true); | 1378 | intel_sdvo_set_target_input(intel_encoder, false, true); |
1374 | intel_sdvo_get_input_timing(intel_output, | 1379 | intel_sdvo_get_input_timing(intel_encoder, |
1375 | &sdvo_priv->save_input_dtd_2); | 1380 | &sdvo_priv->save_input_dtd_2); |
1376 | } | 1381 | } |
1377 | 1382 | ||
@@ -1380,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
1380 | u16 this_output = (1 << o); | 1385 | u16 this_output = (1 << o); |
1381 | if (sdvo_priv->caps.output_flags & this_output) | 1386 | if (sdvo_priv->caps.output_flags & this_output) |
1382 | { | 1387 | { |
1383 | intel_sdvo_set_target_output(intel_output, this_output); | 1388 | intel_sdvo_set_target_output(intel_encoder, this_output); |
1384 | intel_sdvo_get_output_timing(intel_output, | 1389 | intel_sdvo_get_output_timing(intel_encoder, |
1385 | &sdvo_priv->save_output_dtd[o]); | 1390 | &sdvo_priv->save_output_dtd[o]); |
1386 | } | 1391 | } |
1387 | } | 1392 | } |
@@ -1389,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
1389 | /* XXX: Save TV format/enhancements. */ | 1394 | /* XXX: Save TV format/enhancements. */ |
1390 | } | 1395 | } |
1391 | 1396 | ||
1392 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); | 1397 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg); |
1393 | } | 1398 | } |
1394 | 1399 | ||
1395 | static void intel_sdvo_restore(struct drm_connector *connector) | 1400 | static void intel_sdvo_restore(struct drm_connector *connector) |
1396 | { | 1401 | { |
1397 | struct drm_device *dev = connector->dev; | 1402 | struct drm_device *dev = connector->dev; |
1398 | struct intel_output *intel_output = to_intel_output(connector); | 1403 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1399 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1404 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1400 | int o; | 1405 | int o; |
1401 | int i; | 1406 | int i; |
1402 | bool input1, input2; | 1407 | bool input1, input2; |
1403 | u8 status; | 1408 | u8 status; |
1404 | 1409 | ||
1405 | intel_sdvo_set_active_outputs(intel_output, 0); | 1410 | intel_sdvo_set_active_outputs(intel_encoder, 0); |
1406 | 1411 | ||
1407 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) | 1412 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) |
1408 | { | 1413 | { |
1409 | u16 this_output = (1 << o); | 1414 | u16 this_output = (1 << o); |
1410 | if (sdvo_priv->caps.output_flags & this_output) { | 1415 | if (sdvo_priv->caps.output_flags & this_output) { |
1411 | intel_sdvo_set_target_output(intel_output, this_output); | 1416 | intel_sdvo_set_target_output(intel_encoder, this_output); |
1412 | intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); | 1417 | intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); |
1413 | } | 1418 | } |
1414 | } | 1419 | } |
1415 | 1420 | ||
1416 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | 1421 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { |
1417 | intel_sdvo_set_target_input(intel_output, true, false); | 1422 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1418 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); | 1423 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); |
1419 | } | 1424 | } |
1420 | 1425 | ||
1421 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | 1426 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { |
1422 | intel_sdvo_set_target_input(intel_output, false, true); | 1427 | intel_sdvo_set_target_input(intel_encoder, false, true); |
1423 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); | 1428 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); |
1424 | } | 1429 | } |
1425 | 1430 | ||
1426 | intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); | 1431 | intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult); |
1427 | 1432 | ||
1428 | if (sdvo_priv->is_tv) { | 1433 | if (sdvo_priv->is_tv) { |
1429 | /* XXX: Restore TV format/enhancements. */ | 1434 | /* XXX: Restore TV format/enhancements. */ |
1430 | } | 1435 | } |
1431 | 1436 | ||
1432 | intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); | 1437 | intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX); |
1433 | 1438 | ||
1434 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) | 1439 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) |
1435 | { | 1440 | { |
1436 | for (i = 0; i < 2; i++) | 1441 | for (i = 0; i < 2; i++) |
1437 | intel_wait_for_vblank(dev); | 1442 | intel_wait_for_vblank(dev); |
1438 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); | 1443 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); |
1439 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) | 1444 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) |
1440 | DRM_DEBUG_KMS("First %s output reported failure to " | 1445 | DRM_DEBUG_KMS("First %s output reported failure to " |
1441 | "sync\n", SDVO_NAME(sdvo_priv)); | 1446 | "sync\n", SDVO_NAME(sdvo_priv)); |
1442 | } | 1447 | } |
1443 | 1448 | ||
1444 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); | 1449 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs); |
1445 | } | 1450 | } |
1446 | 1451 | ||
1447 | static int intel_sdvo_mode_valid(struct drm_connector *connector, | 1452 | static int intel_sdvo_mode_valid(struct drm_connector *connector, |
1448 | struct drm_display_mode *mode) | 1453 | struct drm_display_mode *mode) |
1449 | { | 1454 | { |
1450 | struct intel_output *intel_output = to_intel_output(connector); | 1455 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1451 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1456 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1452 | 1457 | ||
1453 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1458 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
1454 | return MODE_NO_DBLESCAN; | 1459 | return MODE_NO_DBLESCAN; |
@@ -1473,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, | |||
1473 | return MODE_OK; | 1478 | return MODE_OK; |
1474 | } | 1479 | } |
1475 | 1480 | ||
1476 | static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) | 1481 | static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) |
1477 | { | 1482 | { |
1478 | u8 status; | 1483 | u8 status; |
1479 | 1484 | ||
1480 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); | 1485 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); |
1481 | status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); | 1486 | status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); |
1482 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1487 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1483 | return false; | 1488 | return false; |
1484 | 1489 | ||
@@ -1488,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc | |||
1488 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | 1493 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) |
1489 | { | 1494 | { |
1490 | struct drm_connector *connector = NULL; | 1495 | struct drm_connector *connector = NULL; |
1491 | struct intel_output *iout = NULL; | 1496 | struct intel_encoder *iout = NULL; |
1492 | struct intel_sdvo_priv *sdvo; | 1497 | struct intel_sdvo_priv *sdvo; |
1493 | 1498 | ||
1494 | /* find the sdvo connector */ | 1499 | /* find the sdvo connector */ |
1495 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1500 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1496 | iout = to_intel_output(connector); | 1501 | iout = to_intel_encoder(connector); |
1497 | 1502 | ||
1498 | if (iout->type != INTEL_OUTPUT_SDVO) | 1503 | if (iout->type != INTEL_OUTPUT_SDVO) |
1499 | continue; | 1504 | continue; |
1500 | 1505 | ||
1501 | sdvo = iout->dev_priv; | 1506 | sdvo = iout->dev_priv; |
1502 | 1507 | ||
1503 | if (sdvo->output_device == SDVOB && sdvoB) | 1508 | if (sdvo->sdvo_reg == SDVOB && sdvoB) |
1504 | return connector; | 1509 | return connector; |
1505 | 1510 | ||
1506 | if (sdvo->output_device == SDVOC && !sdvoB) | 1511 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) |
1507 | return connector; | 1512 | return connector; |
1508 | 1513 | ||
1509 | } | 1514 | } |
@@ -1515,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) | |||
1515 | { | 1520 | { |
1516 | u8 response[2]; | 1521 | u8 response[2]; |
1517 | u8 status; | 1522 | u8 status; |
1518 | struct intel_output *intel_output; | 1523 | struct intel_encoder *intel_encoder; |
1519 | DRM_DEBUG_KMS("\n"); | 1524 | DRM_DEBUG_KMS("\n"); |
1520 | 1525 | ||
1521 | if (!connector) | 1526 | if (!connector) |
1522 | return 0; | 1527 | return 0; |
1523 | 1528 | ||
1524 | intel_output = to_intel_output(connector); | 1529 | intel_encoder = to_intel_encoder(connector); |
1525 | 1530 | ||
1526 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | 1531 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); |
1527 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1532 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
1528 | 1533 | ||
1529 | if (response[0] !=0) | 1534 | if (response[0] !=0) |
1530 | return 1; | 1535 | return 1; |
@@ -1536,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1536 | { | 1541 | { |
1537 | u8 response[2]; | 1542 | u8 response[2]; |
1538 | u8 status; | 1543 | u8 status; |
1539 | struct intel_output *intel_output = to_intel_output(connector); | 1544 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1540 | 1545 | ||
1541 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1546 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
1542 | intel_sdvo_read_response(intel_output, &response, 2); | 1547 | intel_sdvo_read_response(intel_encoder, &response, 2); |
1543 | 1548 | ||
1544 | if (on) { | 1549 | if (on) { |
1545 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | 1550 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); |
1546 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1551 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
1547 | 1552 | ||
1548 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1553 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
1549 | } else { | 1554 | } else { |
1550 | response[0] = 0; | 1555 | response[0] = 0; |
1551 | response[1] = 0; | 1556 | response[1] = 0; |
1552 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1557 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
1553 | } | 1558 | } |
1554 | 1559 | ||
1555 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1560 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
1556 | intel_sdvo_read_response(intel_output, &response, 2); | 1561 | intel_sdvo_read_response(intel_encoder, &response, 2); |
1557 | } | 1562 | } |
1558 | 1563 | ||
1559 | static bool | 1564 | static bool |
1560 | intel_sdvo_multifunc_encoder(struct intel_output *intel_output) | 1565 | intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) |
1561 | { | 1566 | { |
1562 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1567 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1563 | int caps = 0; | 1568 | int caps = 0; |
1564 | 1569 | ||
1565 | if (sdvo_priv->caps.output_flags & | 1570 | if (sdvo_priv->caps.output_flags & |
@@ -1593,11 +1598,11 @@ static struct drm_connector * | |||
1593 | intel_find_analog_connector(struct drm_device *dev) | 1598 | intel_find_analog_connector(struct drm_device *dev) |
1594 | { | 1599 | { |
1595 | struct drm_connector *connector; | 1600 | struct drm_connector *connector; |
1596 | struct intel_output *intel_output; | 1601 | struct intel_encoder *intel_encoder; |
1597 | 1602 | ||
1598 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1603 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1599 | intel_output = to_intel_output(connector); | 1604 | intel_encoder = to_intel_encoder(connector); |
1600 | if (intel_output->type == INTEL_OUTPUT_ANALOG) | 1605 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) |
1601 | return connector; | 1606 | return connector; |
1602 | } | 1607 | } |
1603 | return NULL; | 1608 | return NULL; |
@@ -1622,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev) | |||
1622 | enum drm_connector_status | 1627 | enum drm_connector_status |
1623 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | 1628 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) |
1624 | { | 1629 | { |
1625 | struct intel_output *intel_output = to_intel_output(connector); | 1630 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1626 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1631 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1627 | enum drm_connector_status status = connector_status_connected; | 1632 | enum drm_connector_status status = connector_status_connected; |
1628 | struct edid *edid = NULL; | 1633 | struct edid *edid = NULL; |
1629 | 1634 | ||
1630 | edid = drm_get_edid(&intel_output->base, | 1635 | edid = drm_get_edid(&intel_encoder->base, |
1631 | intel_output->ddc_bus); | 1636 | intel_encoder->ddc_bus); |
1632 | 1637 | ||
1633 | /* This is only applied to SDVO cards with multiple outputs */ | 1638 | /* This is only applied to SDVO cards with multiple outputs */ |
1634 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { | 1639 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { |
1635 | uint8_t saved_ddc, temp_ddc; | 1640 | uint8_t saved_ddc, temp_ddc; |
1636 | saved_ddc = sdvo_priv->ddc_bus; | 1641 | saved_ddc = sdvo_priv->ddc_bus; |
1637 | temp_ddc = sdvo_priv->ddc_bus >> 1; | 1642 | temp_ddc = sdvo_priv->ddc_bus >> 1; |
@@ -1641,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1641 | */ | 1646 | */ |
1642 | while(temp_ddc > 1) { | 1647 | while(temp_ddc > 1) { |
1643 | sdvo_priv->ddc_bus = temp_ddc; | 1648 | sdvo_priv->ddc_bus = temp_ddc; |
1644 | edid = drm_get_edid(&intel_output->base, | 1649 | edid = drm_get_edid(&intel_encoder->base, |
1645 | intel_output->ddc_bus); | 1650 | intel_encoder->ddc_bus); |
1646 | if (edid) { | 1651 | if (edid) { |
1647 | /* | 1652 | /* |
1648 | * When we can get the EDID, maybe it is the | 1653 | * When we can get the EDID, maybe it is the |
@@ -1661,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1661 | */ | 1666 | */ |
1662 | if (edid == NULL && | 1667 | if (edid == NULL && |
1663 | sdvo_priv->analog_ddc_bus && | 1668 | sdvo_priv->analog_ddc_bus && |
1664 | !intel_analog_is_connected(intel_output->base.dev)) | 1669 | !intel_analog_is_connected(intel_encoder->base.dev)) |
1665 | edid = drm_get_edid(&intel_output->base, | 1670 | edid = drm_get_edid(&intel_encoder->base, |
1666 | sdvo_priv->analog_ddc_bus); | 1671 | sdvo_priv->analog_ddc_bus); |
1667 | if (edid != NULL) { | 1672 | if (edid != NULL) { |
1668 | /* Don't report the output as connected if it's a DVI-I | 1673 | /* Don't report the output as connected if it's a DVI-I |
@@ -1677,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1677 | } | 1682 | } |
1678 | 1683 | ||
1679 | kfree(edid); | 1684 | kfree(edid); |
1680 | intel_output->base.display_info.raw_edid = NULL; | 1685 | intel_encoder->base.display_info.raw_edid = NULL; |
1681 | 1686 | ||
1682 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | 1687 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) |
1683 | status = connector_status_disconnected; | 1688 | status = connector_status_disconnected; |
@@ -1689,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1689 | { | 1694 | { |
1690 | uint16_t response; | 1695 | uint16_t response; |
1691 | u8 status; | 1696 | u8 status; |
1692 | struct intel_output *intel_output = to_intel_output(connector); | 1697 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1693 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1698 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1694 | 1699 | ||
1695 | intel_sdvo_write_cmd(intel_output, | 1700 | intel_sdvo_write_cmd(intel_encoder, |
1696 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1701 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); |
1697 | if (sdvo_priv->is_tv) { | 1702 | if (sdvo_priv->is_tv) { |
1698 | /* add 30ms delay when the output type is SDVO-TV */ | 1703 | /* add 30ms delay when the output type is SDVO-TV */ |
1699 | mdelay(30); | 1704 | mdelay(30); |
1700 | } | 1705 | } |
1701 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1706 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
1702 | 1707 | ||
1703 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); | 1708 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); |
1704 | 1709 | ||
@@ -1708,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1708 | if (response == 0) | 1713 | if (response == 0) |
1709 | return connector_status_disconnected; | 1714 | return connector_status_disconnected; |
1710 | 1715 | ||
1711 | if (intel_sdvo_multifunc_encoder(intel_output) && | 1716 | if (intel_sdvo_multifunc_encoder(intel_encoder) && |
1712 | sdvo_priv->attached_output != response) { | 1717 | sdvo_priv->attached_output != response) { |
1713 | if (sdvo_priv->controlled_output != response && | 1718 | if (sdvo_priv->controlled_output != response && |
1714 | intel_sdvo_output_setup(intel_output, response) != true) | 1719 | intel_sdvo_output_setup(intel_encoder, response) != true) |
1715 | return connector_status_unknown; | 1720 | return connector_status_unknown; |
1716 | sdvo_priv->attached_output = response; | 1721 | sdvo_priv->attached_output = response; |
1717 | } | 1722 | } |
@@ -1720,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1720 | 1725 | ||
1721 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1726 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
1722 | { | 1727 | { |
1723 | struct intel_output *intel_output = to_intel_output(connector); | 1728 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1724 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1729 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1725 | int num_modes; | 1730 | int num_modes; |
1726 | 1731 | ||
1727 | /* set the bus switch and get the modes */ | 1732 | /* set the bus switch and get the modes */ |
1728 | num_modes = intel_ddc_get_modes(intel_output); | 1733 | num_modes = intel_ddc_get_modes(intel_encoder); |
1729 | 1734 | ||
1730 | /* | 1735 | /* |
1731 | * Mac mini hack. On this device, the DVI-I connector shares one DDC | 1736 | * Mac mini hack. On this device, the DVI-I connector shares one DDC |
@@ -1735,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1735 | */ | 1740 | */ |
1736 | if (num_modes == 0 && | 1741 | if (num_modes == 0 && |
1737 | sdvo_priv->analog_ddc_bus && | 1742 | sdvo_priv->analog_ddc_bus && |
1738 | !intel_analog_is_connected(intel_output->base.dev)) { | 1743 | !intel_analog_is_connected(intel_encoder->base.dev)) { |
1739 | struct i2c_adapter *digital_ddc_bus; | 1744 | struct i2c_adapter *digital_ddc_bus; |
1740 | 1745 | ||
1741 | /* Switch to the analog ddc bus and try that | 1746 | /* Switch to the analog ddc bus and try that |
1742 | */ | 1747 | */ |
1743 | digital_ddc_bus = intel_output->ddc_bus; | 1748 | digital_ddc_bus = intel_encoder->ddc_bus; |
1744 | intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; | 1749 | intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus; |
1745 | 1750 | ||
1746 | (void) intel_ddc_get_modes(intel_output); | 1751 | (void) intel_ddc_get_modes(intel_encoder); |
1747 | 1752 | ||
1748 | intel_output->ddc_bus = digital_ddc_bus; | 1753 | intel_encoder->ddc_bus = digital_ddc_bus; |
1749 | } | 1754 | } |
1750 | } | 1755 | } |
1751 | 1756 | ||
@@ -1816,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = { | |||
1816 | 1821 | ||
1817 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | 1822 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) |
1818 | { | 1823 | { |
1819 | struct intel_output *output = to_intel_output(connector); | 1824 | struct intel_encoder *output = to_intel_encoder(connector); |
1820 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1825 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
1821 | struct intel_sdvo_sdtv_resolution_request tv_res; | 1826 | struct intel_sdvo_sdtv_resolution_request tv_res; |
1822 | uint32_t reply = 0, format_map = 0; | 1827 | uint32_t reply = 0, format_map = 0; |
@@ -1858,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1858 | 1863 | ||
1859 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1864 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
1860 | { | 1865 | { |
1861 | struct intel_output *intel_output = to_intel_output(connector); | 1866 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1862 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1867 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1863 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1868 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1864 | struct drm_display_mode *newmode; | 1869 | struct drm_display_mode *newmode; |
1865 | 1870 | ||
1866 | /* | 1871 | /* |
@@ -1868,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1868 | * Assume that the preferred modes are | 1873 | * Assume that the preferred modes are |
1869 | * arranged in priority order. | 1874 | * arranged in priority order. |
1870 | */ | 1875 | */ |
1871 | intel_ddc_get_modes(intel_output); | 1876 | intel_ddc_get_modes(intel_encoder); |
1872 | if (list_empty(&connector->probed_modes) == false) | 1877 | if (list_empty(&connector->probed_modes) == false) |
1873 | goto end; | 1878 | goto end; |
1874 | 1879 | ||
@@ -1897,7 +1902,7 @@ end: | |||
1897 | 1902 | ||
1898 | static int intel_sdvo_get_modes(struct drm_connector *connector) | 1903 | static int intel_sdvo_get_modes(struct drm_connector *connector) |
1899 | { | 1904 | { |
1900 | struct intel_output *output = to_intel_output(connector); | 1905 | struct intel_encoder *output = to_intel_encoder(connector); |
1901 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1906 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
1902 | 1907 | ||
1903 | if (sdvo_priv->is_tv) | 1908 | if (sdvo_priv->is_tv) |
@@ -1915,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
1915 | static | 1920 | static |
1916 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | 1921 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) |
1917 | { | 1922 | { |
1918 | struct intel_output *intel_output = to_intel_output(connector); | 1923 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1919 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1924 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1920 | struct drm_device *dev = connector->dev; | 1925 | struct drm_device *dev = connector->dev; |
1921 | 1926 | ||
1922 | if (sdvo_priv->is_tv) { | 1927 | if (sdvo_priv->is_tv) { |
@@ -1953,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | |||
1953 | 1958 | ||
1954 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1959 | static void intel_sdvo_destroy(struct drm_connector *connector) |
1955 | { | 1960 | { |
1956 | struct intel_output *intel_output = to_intel_output(connector); | 1961 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1957 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1962 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1958 | 1963 | ||
1959 | if (intel_output->i2c_bus) | 1964 | if (intel_encoder->i2c_bus) |
1960 | intel_i2c_destroy(intel_output->i2c_bus); | 1965 | intel_i2c_destroy(intel_encoder->i2c_bus); |
1961 | if (intel_output->ddc_bus) | 1966 | if (intel_encoder->ddc_bus) |
1962 | intel_i2c_destroy(intel_output->ddc_bus); | 1967 | intel_i2c_destroy(intel_encoder->ddc_bus); |
1963 | if (sdvo_priv->analog_ddc_bus) | 1968 | if (sdvo_priv->analog_ddc_bus) |
1964 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | 1969 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); |
1965 | 1970 | ||
@@ -1977,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
1977 | drm_sysfs_connector_remove(connector); | 1982 | drm_sysfs_connector_remove(connector); |
1978 | drm_connector_cleanup(connector); | 1983 | drm_connector_cleanup(connector); |
1979 | 1984 | ||
1980 | kfree(intel_output); | 1985 | kfree(intel_encoder); |
1981 | } | 1986 | } |
1982 | 1987 | ||
1983 | static int | 1988 | static int |
@@ -1985,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1985 | struct drm_property *property, | 1990 | struct drm_property *property, |
1986 | uint64_t val) | 1991 | uint64_t val) |
1987 | { | 1992 | { |
1988 | struct intel_output *intel_output = to_intel_output(connector); | 1993 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1989 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1994 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1990 | struct drm_encoder *encoder = &intel_output->enc; | 1995 | struct drm_encoder *encoder = &intel_encoder->enc; |
1991 | struct drm_crtc *crtc = encoder->crtc; | 1996 | struct drm_crtc *crtc = encoder->crtc; |
1992 | int ret = 0; | 1997 | int ret = 0; |
1993 | bool changed = false; | 1998 | bool changed = false; |
@@ -2095,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
2095 | sdvo_priv->cur_brightness = temp_value; | 2100 | sdvo_priv->cur_brightness = temp_value; |
2096 | } | 2101 | } |
2097 | if (cmd) { | 2102 | if (cmd) { |
2098 | intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); | 2103 | intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); |
2099 | status = intel_sdvo_read_response(intel_output, | 2104 | status = intel_sdvo_read_response(intel_encoder, |
2100 | NULL, 0); | 2105 | NULL, 0); |
2101 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2106 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2102 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); | 2107 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); |
@@ -2191,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) | |||
2191 | } | 2196 | } |
2192 | 2197 | ||
2193 | static bool | 2198 | static bool |
2194 | intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | 2199 | intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) |
2195 | { | 2200 | { |
2196 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 2201 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
2197 | uint8_t status; | 2202 | uint8_t status; |
@@ -2205,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | |||
2205 | return true; | 2210 | return true; |
2206 | } | 2211 | } |
2207 | 2212 | ||
2208 | static struct intel_output * | 2213 | static struct intel_encoder * |
2209 | intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) | 2214 | intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) |
2210 | { | 2215 | { |
2211 | struct drm_device *dev = chan->drm_dev; | 2216 | struct drm_device *dev = chan->drm_dev; |
2212 | struct drm_connector *connector; | 2217 | struct drm_connector *connector; |
2213 | struct intel_output *intel_output = NULL; | 2218 | struct intel_encoder *intel_encoder = NULL; |
2214 | 2219 | ||
2215 | list_for_each_entry(connector, | 2220 | list_for_each_entry(connector, |
2216 | &dev->mode_config.connector_list, head) { | 2221 | &dev->mode_config.connector_list, head) { |
2217 | if (to_intel_output(connector)->ddc_bus == &chan->adapter) { | 2222 | if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { |
2218 | intel_output = to_intel_output(connector); | 2223 | intel_encoder = to_intel_encoder(connector); |
2219 | break; | 2224 | break; |
2220 | } | 2225 | } |
2221 | } | 2226 | } |
2222 | return intel_output; | 2227 | return intel_encoder; |
2223 | } | 2228 | } |
2224 | 2229 | ||
2225 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | 2230 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, |
2226 | struct i2c_msg msgs[], int num) | 2231 | struct i2c_msg msgs[], int num) |
2227 | { | 2232 | { |
2228 | struct intel_output *intel_output; | 2233 | struct intel_encoder *intel_encoder; |
2229 | struct intel_sdvo_priv *sdvo_priv; | 2234 | struct intel_sdvo_priv *sdvo_priv; |
2230 | struct i2c_algo_bit_data *algo_data; | 2235 | struct i2c_algo_bit_data *algo_data; |
2231 | const struct i2c_algorithm *algo; | 2236 | const struct i2c_algorithm *algo; |
2232 | 2237 | ||
2233 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; | 2238 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; |
2234 | intel_output = | 2239 | intel_encoder = |
2235 | intel_sdvo_chan_to_intel_output( | 2240 | intel_sdvo_chan_to_intel_encoder( |
2236 | (struct intel_i2c_chan *)(algo_data->data)); | 2241 | (struct intel_i2c_chan *)(algo_data->data)); |
2237 | if (intel_output == NULL) | 2242 | if (intel_encoder == NULL) |
2238 | return -EINVAL; | 2243 | return -EINVAL; |
2239 | 2244 | ||
2240 | sdvo_priv = intel_output->dev_priv; | 2245 | sdvo_priv = intel_encoder->dev_priv; |
2241 | algo = intel_output->i2c_bus->algo; | 2246 | algo = intel_encoder->i2c_bus->algo; |
2242 | 2247 | ||
2243 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | 2248 | intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); |
2244 | return algo->master_xfer(i2c_adap, msgs, num); | 2249 | return algo->master_xfer(i2c_adap, msgs, num); |
2245 | } | 2250 | } |
2246 | 2251 | ||
@@ -2249,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { | |||
2249 | }; | 2254 | }; |
2250 | 2255 | ||
2251 | static u8 | 2256 | static u8 |
2252 | intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | 2257 | intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) |
2253 | { | 2258 | { |
2254 | struct drm_i915_private *dev_priv = dev->dev_private; | 2259 | struct drm_i915_private *dev_priv = dev->dev_private; |
2255 | struct sdvo_device_mapping *my_mapping, *other_mapping; | 2260 | struct sdvo_device_mapping *my_mapping, *other_mapping; |
2256 | 2261 | ||
2257 | if (output_device == SDVOB) { | 2262 | if (sdvo_reg == SDVOB) { |
2258 | my_mapping = &dev_priv->sdvo_mappings[0]; | 2263 | my_mapping = &dev_priv->sdvo_mappings[0]; |
2259 | other_mapping = &dev_priv->sdvo_mappings[1]; | 2264 | other_mapping = &dev_priv->sdvo_mappings[1]; |
2260 | } else { | 2265 | } else { |
@@ -2279,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | |||
2279 | /* No SDVO device info is found for another DVO port, | 2284 | /* No SDVO device info is found for another DVO port, |
2280 | * so use mapping assumption we had before BIOS parsing. | 2285 | * so use mapping assumption we had before BIOS parsing. |
2281 | */ | 2286 | */ |
2282 | if (output_device == SDVOB) | 2287 | if (sdvo_reg == SDVOB) |
2283 | return 0x70; | 2288 | return 0x70; |
2284 | else | 2289 | else |
2285 | return 0x72; | 2290 | return 0x72; |
@@ -2305,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = { | |||
2305 | }; | 2310 | }; |
2306 | 2311 | ||
2307 | static bool | 2312 | static bool |
2308 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | 2313 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) |
2309 | { | 2314 | { |
2310 | struct drm_connector *connector = &intel_output->base; | 2315 | struct drm_connector *connector = &intel_encoder->base; |
2311 | struct drm_encoder *encoder = &intel_output->enc; | 2316 | struct drm_encoder *encoder = &intel_encoder->enc; |
2312 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2317 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2313 | bool ret = true, registered = false; | 2318 | bool ret = true, registered = false; |
2314 | 2319 | ||
2315 | sdvo_priv->is_tv = false; | 2320 | sdvo_priv->is_tv = false; |
2316 | intel_output->needs_tv_clock = false; | 2321 | intel_encoder->needs_tv_clock = false; |
2317 | sdvo_priv->is_lvds = false; | 2322 | sdvo_priv->is_lvds = false; |
2318 | 2323 | ||
2319 | if (device_is_registered(&connector->kdev)) { | 2324 | if (device_is_registered(&connector->kdev)) { |
@@ -2331,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2331 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2336 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2332 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2337 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2333 | 2338 | ||
2334 | if (intel_sdvo_get_supp_encode(intel_output, | 2339 | if (intel_sdvo_get_supp_encode(intel_encoder, |
2335 | &sdvo_priv->encode) && | 2340 | &sdvo_priv->encode) && |
2336 | intel_sdvo_get_digital_encoding_mode(intel_output) && | 2341 | intel_sdvo_get_digital_encoding_mode(intel_encoder) && |
2337 | sdvo_priv->is_hdmi) { | 2342 | sdvo_priv->is_hdmi) { |
2338 | /* enable hdmi encoding mode if supported */ | 2343 | /* enable hdmi encoding mode if supported */ |
2339 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | 2344 | intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); |
2340 | intel_sdvo_set_colorimetry(intel_output, | 2345 | intel_sdvo_set_colorimetry(intel_encoder, |
2341 | SDVO_COLORIMETRY_RGB256); | 2346 | SDVO_COLORIMETRY_RGB256); |
2342 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2347 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2343 | intel_output->clone_mask = | 2348 | intel_encoder->clone_mask = |
2344 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2349 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2345 | (1 << INTEL_ANALOG_CLONE_BIT); | 2350 | (1 << INTEL_ANALOG_CLONE_BIT); |
2346 | } | 2351 | } |
@@ -2351,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2356 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2357 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
2353 | sdvo_priv->is_tv = true; | 2358 | sdvo_priv->is_tv = true; |
2354 | intel_output->needs_tv_clock = true; | 2359 | intel_encoder->needs_tv_clock = true; |
2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2360 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
2356 | } else if (flags & SDVO_OUTPUT_RGB0) { | 2361 | } else if (flags & SDVO_OUTPUT_RGB0) { |
2357 | 2362 | ||
2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | 2363 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; |
2359 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2364 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2360 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2365 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2361 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2366 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2362 | (1 << INTEL_ANALOG_CLONE_BIT); | 2367 | (1 << INTEL_ANALOG_CLONE_BIT); |
2363 | } else if (flags & SDVO_OUTPUT_RGB1) { | 2368 | } else if (flags & SDVO_OUTPUT_RGB1) { |
2364 | 2369 | ||
2365 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | 2370 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; |
2366 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2371 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2367 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2372 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2368 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2373 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2369 | (1 << INTEL_ANALOG_CLONE_BIT); | 2374 | (1 << INTEL_ANALOG_CLONE_BIT); |
2370 | } else if (flags & SDVO_OUTPUT_CVBS0) { | 2375 | } else if (flags & SDVO_OUTPUT_CVBS0) { |
2371 | 2376 | ||
@@ -2373,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2373 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2378 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
2374 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2379 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
2375 | sdvo_priv->is_tv = true; | 2380 | sdvo_priv->is_tv = true; |
2376 | intel_output->needs_tv_clock = true; | 2381 | intel_encoder->needs_tv_clock = true; |
2377 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2382 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
2378 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2383 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2379 | 2384 | ||
2380 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2385 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
2381 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2386 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
2382 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2387 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
2383 | sdvo_priv->is_lvds = true; | 2388 | sdvo_priv->is_lvds = true; |
2384 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | 2389 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | |
2385 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 2390 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
2386 | } else if (flags & SDVO_OUTPUT_LVDS1) { | 2391 | } else if (flags & SDVO_OUTPUT_LVDS1) { |
2387 | 2392 | ||
@@ -2389,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2389 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2394 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
2390 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2395 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
2391 | sdvo_priv->is_lvds = true; | 2396 | sdvo_priv->is_lvds = true; |
2392 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | 2397 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | |
2393 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 2398 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
2394 | } else { | 2399 | } else { |
2395 | 2400 | ||
@@ -2402,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2402 | bytes[0], bytes[1]); | 2407 | bytes[0], bytes[1]); |
2403 | ret = false; | 2408 | ret = false; |
2404 | } | 2409 | } |
2405 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 2410 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
2406 | 2411 | ||
2407 | if (ret && registered) | 2412 | if (ret && registered) |
2408 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; | 2413 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; |
@@ -2414,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2414 | 2419 | ||
2415 | static void intel_sdvo_tv_create_property(struct drm_connector *connector) | 2420 | static void intel_sdvo_tv_create_property(struct drm_connector *connector) |
2416 | { | 2421 | { |
2417 | struct intel_output *intel_output = to_intel_output(connector); | 2422 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
2418 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2423 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2419 | struct intel_sdvo_tv_format format; | 2424 | struct intel_sdvo_tv_format format; |
2420 | uint32_t format_map, i; | 2425 | uint32_t format_map, i; |
2421 | uint8_t status; | 2426 | uint8_t status; |
2422 | 2427 | ||
2423 | intel_sdvo_set_target_output(intel_output, | 2428 | intel_sdvo_set_target_output(intel_encoder, |
2424 | sdvo_priv->controlled_output); | 2429 | sdvo_priv->controlled_output); |
2425 | 2430 | ||
2426 | intel_sdvo_write_cmd(intel_output, | 2431 | intel_sdvo_write_cmd(intel_encoder, |
2427 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); | 2432 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); |
2428 | status = intel_sdvo_read_response(intel_output, | 2433 | status = intel_sdvo_read_response(intel_encoder, |
2429 | &format, sizeof(format)); | 2434 | &format, sizeof(format)); |
2430 | if (status != SDVO_CMD_STATUS_SUCCESS) | 2435 | if (status != SDVO_CMD_STATUS_SUCCESS) |
2431 | return; | 2436 | return; |
@@ -2463,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) | |||
2463 | 2468 | ||
2464 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | 2469 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) |
2465 | { | 2470 | { |
2466 | struct intel_output *intel_output = to_intel_output(connector); | 2471 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
2467 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2472 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2468 | struct intel_sdvo_enhancements_reply sdvo_data; | 2473 | struct intel_sdvo_enhancements_reply sdvo_data; |
2469 | struct drm_device *dev = connector->dev; | 2474 | struct drm_device *dev = connector->dev; |
2470 | uint8_t status; | 2475 | uint8_t status; |
2471 | uint16_t response, data_value[2]; | 2476 | uint16_t response, data_value[2]; |
2472 | 2477 | ||
2473 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2478 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
2474 | NULL, 0); | 2479 | NULL, 0); |
2475 | status = intel_sdvo_read_response(intel_output, &sdvo_data, | 2480 | status = intel_sdvo_read_response(intel_encoder, &sdvo_data, |
2476 | sizeof(sdvo_data)); | 2481 | sizeof(sdvo_data)); |
2477 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2482 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2478 | DRM_DEBUG_KMS(" incorrect response is returned\n"); | 2483 | DRM_DEBUG_KMS(" incorrect response is returned\n"); |
@@ -2488,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2488 | * property | 2493 | * property |
2489 | */ | 2494 | */ |
2490 | if (sdvo_data.overscan_h) { | 2495 | if (sdvo_data.overscan_h) { |
2491 | intel_sdvo_write_cmd(intel_output, | 2496 | intel_sdvo_write_cmd(intel_encoder, |
2492 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); | 2497 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); |
2493 | status = intel_sdvo_read_response(intel_output, | 2498 | status = intel_sdvo_read_response(intel_encoder, |
2494 | &data_value, 4); | 2499 | &data_value, 4); |
2495 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2500 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2496 | DRM_DEBUG_KMS("Incorrect SDVO max " | 2501 | DRM_DEBUG_KMS("Incorrect SDVO max " |
2497 | "h_overscan\n"); | 2502 | "h_overscan\n"); |
2498 | return; | 2503 | return; |
2499 | } | 2504 | } |
2500 | intel_sdvo_write_cmd(intel_output, | 2505 | intel_sdvo_write_cmd(intel_encoder, |
2501 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); | 2506 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); |
2502 | status = intel_sdvo_read_response(intel_output, | 2507 | status = intel_sdvo_read_response(intel_encoder, |
2503 | &response, 2); | 2508 | &response, 2); |
2504 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2509 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2505 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); | 2510 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); |
@@ -2529,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2529 | data_value[0], data_value[1], response); | 2534 | data_value[0], data_value[1], response); |
2530 | } | 2535 | } |
2531 | if (sdvo_data.overscan_v) { | 2536 | if (sdvo_data.overscan_v) { |
2532 | intel_sdvo_write_cmd(intel_output, | 2537 | intel_sdvo_write_cmd(intel_encoder, |
2533 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); | 2538 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); |
2534 | status = intel_sdvo_read_response(intel_output, | 2539 | status = intel_sdvo_read_response(intel_encoder, |
2535 | &data_value, 4); | 2540 | &data_value, 4); |
2536 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2541 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2537 | DRM_DEBUG_KMS("Incorrect SDVO max " | 2542 | DRM_DEBUG_KMS("Incorrect SDVO max " |
2538 | "v_overscan\n"); | 2543 | "v_overscan\n"); |
2539 | return; | 2544 | return; |
2540 | } | 2545 | } |
2541 | intel_sdvo_write_cmd(intel_output, | 2546 | intel_sdvo_write_cmd(intel_encoder, |
2542 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); | 2547 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); |
2543 | status = intel_sdvo_read_response(intel_output, | 2548 | status = intel_sdvo_read_response(intel_encoder, |
2544 | &response, 2); | 2549 | &response, 2); |
2545 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2550 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2546 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); | 2551 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); |
@@ -2570,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2570 | data_value[0], data_value[1], response); | 2575 | data_value[0], data_value[1], response); |
2571 | } | 2576 | } |
2572 | if (sdvo_data.position_h) { | 2577 | if (sdvo_data.position_h) { |
2573 | intel_sdvo_write_cmd(intel_output, | 2578 | intel_sdvo_write_cmd(intel_encoder, |
2574 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); | 2579 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); |
2575 | status = intel_sdvo_read_response(intel_output, | 2580 | status = intel_sdvo_read_response(intel_encoder, |
2576 | &data_value, 4); | 2581 | &data_value, 4); |
2577 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2582 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2578 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); | 2583 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); |
2579 | return; | 2584 | return; |
2580 | } | 2585 | } |
2581 | intel_sdvo_write_cmd(intel_output, | 2586 | intel_sdvo_write_cmd(intel_encoder, |
2582 | SDVO_CMD_GET_POSITION_H, NULL, 0); | 2587 | SDVO_CMD_GET_POSITION_H, NULL, 0); |
2583 | status = intel_sdvo_read_response(intel_output, | 2588 | status = intel_sdvo_read_response(intel_encoder, |
2584 | &response, 2); | 2589 | &response, 2); |
2585 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2590 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2586 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); | 2591 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); |
@@ -2601,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2601 | data_value[0], data_value[1], response); | 2606 | data_value[0], data_value[1], response); |
2602 | } | 2607 | } |
2603 | if (sdvo_data.position_v) { | 2608 | if (sdvo_data.position_v) { |
2604 | intel_sdvo_write_cmd(intel_output, | 2609 | intel_sdvo_write_cmd(intel_encoder, |
2605 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); | 2610 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); |
2606 | status = intel_sdvo_read_response(intel_output, | 2611 | status = intel_sdvo_read_response(intel_encoder, |
2607 | &data_value, 4); | 2612 | &data_value, 4); |
2608 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2613 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2609 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); | 2614 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); |
2610 | return; | 2615 | return; |
2611 | } | 2616 | } |
2612 | intel_sdvo_write_cmd(intel_output, | 2617 | intel_sdvo_write_cmd(intel_encoder, |
2613 | SDVO_CMD_GET_POSITION_V, NULL, 0); | 2618 | SDVO_CMD_GET_POSITION_V, NULL, 0); |
2614 | status = intel_sdvo_read_response(intel_output, | 2619 | status = intel_sdvo_read_response(intel_encoder, |
2615 | &response, 2); | 2620 | &response, 2); |
2616 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2621 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2617 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); | 2622 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); |
@@ -2634,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2634 | } | 2639 | } |
2635 | if (sdvo_priv->is_tv) { | 2640 | if (sdvo_priv->is_tv) { |
2636 | if (sdvo_data.saturation) { | 2641 | if (sdvo_data.saturation) { |
2637 | intel_sdvo_write_cmd(intel_output, | 2642 | intel_sdvo_write_cmd(intel_encoder, |
2638 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); | 2643 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); |
2639 | status = intel_sdvo_read_response(intel_output, | 2644 | status = intel_sdvo_read_response(intel_encoder, |
2640 | &data_value, 4); | 2645 | &data_value, 4); |
2641 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2646 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2642 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); | 2647 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); |
2643 | return; | 2648 | return; |
2644 | } | 2649 | } |
2645 | intel_sdvo_write_cmd(intel_output, | 2650 | intel_sdvo_write_cmd(intel_encoder, |
2646 | SDVO_CMD_GET_SATURATION, NULL, 0); | 2651 | SDVO_CMD_GET_SATURATION, NULL, 0); |
2647 | status = intel_sdvo_read_response(intel_output, | 2652 | status = intel_sdvo_read_response(intel_encoder, |
2648 | &response, 2); | 2653 | &response, 2); |
2649 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2654 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2650 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); | 2655 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); |
@@ -2666,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2666 | data_value[0], data_value[1], response); | 2671 | data_value[0], data_value[1], response); |
2667 | } | 2672 | } |
2668 | if (sdvo_data.contrast) { | 2673 | if (sdvo_data.contrast) { |
2669 | intel_sdvo_write_cmd(intel_output, | 2674 | intel_sdvo_write_cmd(intel_encoder, |
2670 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); | 2675 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); |
2671 | status = intel_sdvo_read_response(intel_output, | 2676 | status = intel_sdvo_read_response(intel_encoder, |
2672 | &data_value, 4); | 2677 | &data_value, 4); |
2673 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2678 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2674 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); | 2679 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); |
2675 | return; | 2680 | return; |
2676 | } | 2681 | } |
2677 | intel_sdvo_write_cmd(intel_output, | 2682 | intel_sdvo_write_cmd(intel_encoder, |
2678 | SDVO_CMD_GET_CONTRAST, NULL, 0); | 2683 | SDVO_CMD_GET_CONTRAST, NULL, 0); |
2679 | status = intel_sdvo_read_response(intel_output, | 2684 | status = intel_sdvo_read_response(intel_encoder, |
2680 | &response, 2); | 2685 | &response, 2); |
2681 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2686 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2682 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); | 2687 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); |
@@ -2697,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2697 | data_value[0], data_value[1], response); | 2702 | data_value[0], data_value[1], response); |
2698 | } | 2703 | } |
2699 | if (sdvo_data.hue) { | 2704 | if (sdvo_data.hue) { |
2700 | intel_sdvo_write_cmd(intel_output, | 2705 | intel_sdvo_write_cmd(intel_encoder, |
2701 | SDVO_CMD_GET_MAX_HUE, NULL, 0); | 2706 | SDVO_CMD_GET_MAX_HUE, NULL, 0); |
2702 | status = intel_sdvo_read_response(intel_output, | 2707 | status = intel_sdvo_read_response(intel_encoder, |
2703 | &data_value, 4); | 2708 | &data_value, 4); |
2704 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2709 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2705 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); | 2710 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); |
2706 | return; | 2711 | return; |
2707 | } | 2712 | } |
2708 | intel_sdvo_write_cmd(intel_output, | 2713 | intel_sdvo_write_cmd(intel_encoder, |
2709 | SDVO_CMD_GET_HUE, NULL, 0); | 2714 | SDVO_CMD_GET_HUE, NULL, 0); |
2710 | status = intel_sdvo_read_response(intel_output, | 2715 | status = intel_sdvo_read_response(intel_encoder, |
2711 | &response, 2); | 2716 | &response, 2); |
2712 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2717 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2713 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); | 2718 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); |
@@ -2730,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2730 | } | 2735 | } |
2731 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | 2736 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { |
2732 | if (sdvo_data.brightness) { | 2737 | if (sdvo_data.brightness) { |
2733 | intel_sdvo_write_cmd(intel_output, | 2738 | intel_sdvo_write_cmd(intel_encoder, |
2734 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); | 2739 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); |
2735 | status = intel_sdvo_read_response(intel_output, | 2740 | status = intel_sdvo_read_response(intel_encoder, |
2736 | &data_value, 4); | 2741 | &data_value, 4); |
2737 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2742 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2738 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); | 2743 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); |
2739 | return; | 2744 | return; |
2740 | } | 2745 | } |
2741 | intel_sdvo_write_cmd(intel_output, | 2746 | intel_sdvo_write_cmd(intel_encoder, |
2742 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); | 2747 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); |
2743 | status = intel_sdvo_read_response(intel_output, | 2748 | status = intel_sdvo_read_response(intel_encoder, |
2744 | &response, 2); | 2749 | &response, 2); |
2745 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2750 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2746 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); | 2751 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); |
@@ -2765,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2765 | return; | 2770 | return; |
2766 | } | 2771 | } |
2767 | 2772 | ||
2768 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2773 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) |
2769 | { | 2774 | { |
2770 | struct drm_i915_private *dev_priv = dev->dev_private; | 2775 | struct drm_i915_private *dev_priv = dev->dev_private; |
2771 | struct drm_connector *connector; | 2776 | struct drm_connector *connector; |
2772 | struct intel_output *intel_output; | 2777 | struct intel_encoder *intel_encoder; |
2773 | struct intel_sdvo_priv *sdvo_priv; | 2778 | struct intel_sdvo_priv *sdvo_priv; |
2774 | 2779 | ||
2775 | u8 ch[0x40]; | 2780 | u8 ch[0x40]; |
2776 | int i; | 2781 | int i; |
2777 | 2782 | ||
2778 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 2783 | intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); |
2779 | if (!intel_output) { | 2784 | if (!intel_encoder) { |
2780 | return false; | 2785 | return false; |
2781 | } | 2786 | } |
2782 | 2787 | ||
2783 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); | 2788 | sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); |
2784 | sdvo_priv->output_device = output_device; | 2789 | sdvo_priv->sdvo_reg = sdvo_reg; |
2785 | 2790 | ||
2786 | intel_output->dev_priv = sdvo_priv; | 2791 | intel_encoder->dev_priv = sdvo_priv; |
2787 | intel_output->type = INTEL_OUTPUT_SDVO; | 2792 | intel_encoder->type = INTEL_OUTPUT_SDVO; |
2788 | 2793 | ||
2789 | /* setup the DDC bus. */ | 2794 | /* setup the DDC bus. */ |
2790 | if (output_device == SDVOB) | 2795 | if (sdvo_reg == SDVOB) |
2791 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); | 2796 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); |
2792 | else | 2797 | else |
2793 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); | 2798 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); |
2794 | 2799 | ||
2795 | if (!intel_output->i2c_bus) | 2800 | if (!intel_encoder->i2c_bus) |
2796 | goto err_inteloutput; | 2801 | goto err_inteloutput; |
2797 | 2802 | ||
2798 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); | 2803 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); |
2799 | 2804 | ||
2800 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ | 2805 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ |
2801 | intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; | 2806 | intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; |
2802 | 2807 | ||
2803 | /* Read the regs to test if we can talk to the device */ | 2808 | /* Read the regs to test if we can talk to the device */ |
2804 | for (i = 0; i < 0x40; i++) { | 2809 | for (i = 0; i < 0x40; i++) { |
2805 | if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { | 2810 | if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { |
2806 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", | 2811 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", |
2807 | output_device == SDVOB ? 'B' : 'C'); | 2812 | sdvo_reg == SDVOB ? 'B' : 'C'); |
2808 | goto err_i2c; | 2813 | goto err_i2c; |
2809 | } | 2814 | } |
2810 | } | 2815 | } |
2811 | 2816 | ||
2812 | /* setup the DDC bus. */ | 2817 | /* setup the DDC bus. */ |
2813 | if (output_device == SDVOB) { | 2818 | if (sdvo_reg == SDVOB) { |
2814 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2819 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
2815 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2816 | "SDVOB/VGA DDC BUS"); | 2821 | "SDVOB/VGA DDC BUS"); |
2817 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | 2822 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; |
2818 | } else { | 2823 | } else { |
2819 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2824 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2825 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2821 | "SDVOC/VGA DDC BUS"); | 2826 | "SDVOC/VGA DDC BUS"); |
2822 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | 2827 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; |
2823 | } | 2828 | } |
2824 | 2829 | ||
2825 | if (intel_output->ddc_bus == NULL) | 2830 | if (intel_encoder->ddc_bus == NULL) |
2826 | goto err_i2c; | 2831 | goto err_i2c; |
2827 | 2832 | ||
2828 | /* Wrap with our custom algo which switches to DDC mode */ | 2833 | /* Wrap with our custom algo which switches to DDC mode */ |
2829 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; | 2834 | intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
2830 | 2835 | ||
2831 | /* In default case sdvo lvds is false */ | 2836 | /* In default case sdvo lvds is false */ |
2832 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | 2837 | intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); |
2833 | 2838 | ||
2834 | if (intel_sdvo_output_setup(intel_output, | 2839 | if (intel_sdvo_output_setup(intel_encoder, |
2835 | sdvo_priv->caps.output_flags) != true) { | 2840 | sdvo_priv->caps.output_flags) != true) { |
2836 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2841 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
2837 | output_device == SDVOB ? 'B' : 'C'); | 2842 | sdvo_reg == SDVOB ? 'B' : 'C'); |
2838 | goto err_i2c; | 2843 | goto err_i2c; |
2839 | } | 2844 | } |
2840 | 2845 | ||
2841 | 2846 | ||
2842 | connector = &intel_output->base; | 2847 | connector = &intel_encoder->base; |
2843 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, | 2848 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, |
2844 | connector->connector_type); | 2849 | connector->connector_type); |
2845 | 2850 | ||
@@ -2848,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2848 | connector->doublescan_allowed = 0; | 2853 | connector->doublescan_allowed = 0; |
2849 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 2854 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
2850 | 2855 | ||
2851 | drm_encoder_init(dev, &intel_output->enc, | 2856 | drm_encoder_init(dev, &intel_encoder->enc, |
2852 | &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); | 2857 | &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type); |
2853 | 2858 | ||
2854 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); | 2859 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); |
2855 | 2860 | ||
2856 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 2861 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
2857 | if (sdvo_priv->is_tv) | 2862 | if (sdvo_priv->is_tv) |
2858 | intel_sdvo_tv_create_property(connector); | 2863 | intel_sdvo_tv_create_property(connector); |
2859 | 2864 | ||
@@ -2865,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2865 | intel_sdvo_select_ddc_bus(sdvo_priv); | 2870 | intel_sdvo_select_ddc_bus(sdvo_priv); |
2866 | 2871 | ||
2867 | /* Set the input timing to the screen. Assume always input 0. */ | 2872 | /* Set the input timing to the screen. Assume always input 0. */ |
2868 | intel_sdvo_set_target_input(intel_output, true, false); | 2873 | intel_sdvo_set_target_input(intel_encoder, true, false); |
2869 | 2874 | ||
2870 | intel_sdvo_get_input_pixel_clock_range(intel_output, | 2875 | intel_sdvo_get_input_pixel_clock_range(intel_encoder, |
2871 | &sdvo_priv->pixel_clock_min, | 2876 | &sdvo_priv->pixel_clock_min, |
2872 | &sdvo_priv->pixel_clock_max); | 2877 | &sdvo_priv->pixel_clock_max); |
2873 | 2878 | ||
@@ -2894,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2894 | err_i2c: | 2899 | err_i2c: |
2895 | if (sdvo_priv->analog_ddc_bus != NULL) | 2900 | if (sdvo_priv->analog_ddc_bus != NULL) |
2896 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | 2901 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); |
2897 | if (intel_output->ddc_bus != NULL) | 2902 | if (intel_encoder->ddc_bus != NULL) |
2898 | intel_i2c_destroy(intel_output->ddc_bus); | 2903 | intel_i2c_destroy(intel_encoder->ddc_bus); |
2899 | if (intel_output->i2c_bus != NULL) | 2904 | if (intel_encoder->i2c_bus != NULL) |
2900 | intel_i2c_destroy(intel_output->i2c_bus); | 2905 | intel_i2c_destroy(intel_encoder->i2c_bus); |
2901 | err_inteloutput: | 2906 | err_inteloutput: |
2902 | kfree(intel_output); | 2907 | kfree(intel_encoder); |
2903 | 2908 | ||
2904 | return false; | 2909 | return false; |
2905 | } | 2910 | } |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b741..d7d39b2327df 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector) | |||
921 | { | 921 | { |
922 | struct drm_device *dev = connector->dev; | 922 | struct drm_device *dev = connector->dev; |
923 | struct drm_i915_private *dev_priv = dev->dev_private; | 923 | struct drm_i915_private *dev_priv = dev->dev_private; |
924 | struct intel_output *intel_output = to_intel_output(connector); | 924 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
925 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 925 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
926 | int i; | 926 | int i; |
927 | 927 | ||
928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); | 928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); |
@@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector) | |||
971 | { | 971 | { |
972 | struct drm_device *dev = connector->dev; | 972 | struct drm_device *dev = connector->dev; |
973 | struct drm_i915_private *dev_priv = dev->dev_private; | 973 | struct drm_i915_private *dev_priv = dev->dev_private; |
974 | struct intel_output *intel_output = to_intel_output(connector); | 974 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
975 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 975 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
976 | struct drm_crtc *crtc = connector->encoder->crtc; | 976 | struct drm_crtc *crtc = connector->encoder->crtc; |
977 | struct intel_crtc *intel_crtc; | 977 | struct intel_crtc *intel_crtc; |
978 | int i; | 978 | int i; |
@@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format) | |||
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | static const struct tv_mode * | 1070 | static const struct tv_mode * |
1071 | intel_tv_mode_find (struct intel_output *intel_output) | 1071 | intel_tv_mode_find (struct intel_encoder *intel_encoder) |
1072 | { | 1072 | { |
1073 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1073 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1074 | 1074 | ||
1075 | return intel_tv_mode_lookup(tv_priv->tv_format); | 1075 | return intel_tv_mode_lookup(tv_priv->tv_format); |
1076 | } | 1076 | } |
@@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output) | |||
1078 | static enum drm_mode_status | 1078 | static enum drm_mode_status |
1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) | 1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) |
1080 | { | 1080 | { |
1081 | struct intel_output *intel_output = to_intel_output(connector); | 1081 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1083 | 1083 | ||
1084 | /* Ensure TV refresh is close to desired refresh */ | 1084 | /* Ensure TV refresh is close to desired refresh */ |
1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) | 1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) |
@@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1095 | { | 1095 | { |
1096 | struct drm_device *dev = encoder->dev; | 1096 | struct drm_device *dev = encoder->dev; |
1097 | struct drm_mode_config *drm_config = &dev->mode_config; | 1097 | struct drm_mode_config *drm_config = &dev->mode_config; |
1098 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1098 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1099 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); | 1099 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); |
1100 | struct drm_encoder *other_encoder; | 1100 | struct drm_encoder *other_encoder; |
1101 | 1101 | ||
1102 | if (!tv_mode) | 1102 | if (!tv_mode) |
@@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1121 | struct drm_i915_private *dev_priv = dev->dev_private; | 1121 | struct drm_i915_private *dev_priv = dev->dev_private; |
1122 | struct drm_crtc *crtc = encoder->crtc; | 1122 | struct drm_crtc *crtc = encoder->crtc; |
1123 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1123 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1124 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1124 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1125 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1125 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1126 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1126 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1127 | u32 tv_ctl; | 1127 | u32 tv_ctl; |
1128 | u32 hctl1, hctl2, hctl3; | 1128 | u32 hctl1, hctl2, hctl3; |
1129 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; | 1129 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; |
@@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = { | |||
1360 | * \return false if TV is disconnected. | 1360 | * \return false if TV is disconnected. |
1361 | */ | 1361 | */ |
1362 | static int | 1362 | static int |
1363 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | 1363 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) |
1364 | { | 1364 | { |
1365 | struct drm_encoder *encoder = &intel_output->enc; | 1365 | struct drm_encoder *encoder = &intel_encoder->enc; |
1366 | struct drm_device *dev = encoder->dev; | 1366 | struct drm_device *dev = encoder->dev; |
1367 | struct drm_i915_private *dev_priv = dev->dev_private; | 1367 | struct drm_i915_private *dev_priv = dev->dev_private; |
1368 | unsigned long irqflags; | 1368 | unsigned long irqflags; |
@@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
1441 | */ | 1441 | */ |
1442 | static void intel_tv_find_better_format(struct drm_connector *connector) | 1442 | static void intel_tv_find_better_format(struct drm_connector *connector) |
1443 | { | 1443 | { |
1444 | struct intel_output *intel_output = to_intel_output(connector); | 1444 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1445 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1445 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1446 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1446 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1447 | int i; | 1447 | int i; |
1448 | 1448 | ||
1449 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == | 1449 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == |
@@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector) | |||
1475 | { | 1475 | { |
1476 | struct drm_crtc *crtc; | 1476 | struct drm_crtc *crtc; |
1477 | struct drm_display_mode mode; | 1477 | struct drm_display_mode mode; |
1478 | struct intel_output *intel_output = to_intel_output(connector); | 1478 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1479 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1479 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1480 | struct drm_encoder *encoder = &intel_output->enc; | 1480 | struct drm_encoder *encoder = &intel_encoder->enc; |
1481 | int dpms_mode; | 1481 | int dpms_mode; |
1482 | int type = tv_priv->type; | 1482 | int type = tv_priv->type; |
1483 | 1483 | ||
@@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector) | |||
1485 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1485 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1486 | 1486 | ||
1487 | if (encoder->crtc && encoder->crtc->enabled) { | 1487 | if (encoder->crtc && encoder->crtc->enabled) { |
1488 | type = intel_tv_detect_type(encoder->crtc, intel_output); | 1488 | type = intel_tv_detect_type(encoder->crtc, intel_encoder); |
1489 | } else { | 1489 | } else { |
1490 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); | 1490 | crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); |
1491 | if (crtc) { | 1491 | if (crtc) { |
1492 | type = intel_tv_detect_type(crtc, intel_output); | 1492 | type = intel_tv_detect_type(crtc, intel_encoder); |
1493 | intel_release_load_detect_pipe(intel_output, dpms_mode); | 1493 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); |
1494 | } else | 1494 | } else |
1495 | type = -1; | 1495 | type = -1; |
1496 | } | 1496 | } |
@@ -1525,8 +1525,8 @@ static void | |||
1525 | intel_tv_chose_preferred_modes(struct drm_connector *connector, | 1525 | intel_tv_chose_preferred_modes(struct drm_connector *connector, |
1526 | struct drm_display_mode *mode_ptr) | 1526 | struct drm_display_mode *mode_ptr) |
1527 | { | 1527 | { |
1528 | struct intel_output *intel_output = to_intel_output(connector); | 1528 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1529 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1529 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1530 | 1530 | ||
1531 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | 1531 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) |
1532 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | 1532 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; |
@@ -1550,8 +1550,8 @@ static int | |||
1550 | intel_tv_get_modes(struct drm_connector *connector) | 1550 | intel_tv_get_modes(struct drm_connector *connector) |
1551 | { | 1551 | { |
1552 | struct drm_display_mode *mode_ptr; | 1552 | struct drm_display_mode *mode_ptr; |
1553 | struct intel_output *intel_output = to_intel_output(connector); | 1553 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1554 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1554 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1555 | int j, count = 0; | 1555 | int j, count = 0; |
1556 | u64 tmp; | 1556 | u64 tmp; |
1557 | 1557 | ||
@@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1604 | static void | 1604 | static void |
1605 | intel_tv_destroy (struct drm_connector *connector) | 1605 | intel_tv_destroy (struct drm_connector *connector) |
1606 | { | 1606 | { |
1607 | struct intel_output *intel_output = to_intel_output(connector); | 1607 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1608 | 1608 | ||
1609 | drm_sysfs_connector_remove(connector); | 1609 | drm_sysfs_connector_remove(connector); |
1610 | drm_connector_cleanup(connector); | 1610 | drm_connector_cleanup(connector); |
1611 | kfree(intel_output); | 1611 | kfree(intel_encoder); |
1612 | } | 1612 | } |
1613 | 1613 | ||
1614 | 1614 | ||
@@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1617 | uint64_t val) | 1617 | uint64_t val) |
1618 | { | 1618 | { |
1619 | struct drm_device *dev = connector->dev; | 1619 | struct drm_device *dev = connector->dev; |
1620 | struct intel_output *intel_output = to_intel_output(connector); | 1620 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1621 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1621 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1622 | struct drm_encoder *encoder = &intel_output->enc; | 1622 | struct drm_encoder *encoder = &intel_encoder->enc; |
1623 | struct drm_crtc *crtc = encoder->crtc; | 1623 | struct drm_crtc *crtc = encoder->crtc; |
1624 | int ret = 0; | 1624 | int ret = 0; |
1625 | bool changed = false; | 1625 | bool changed = false; |
@@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev) | |||
1740 | { | 1740 | { |
1741 | struct drm_i915_private *dev_priv = dev->dev_private; | 1741 | struct drm_i915_private *dev_priv = dev->dev_private; |
1742 | struct drm_connector *connector; | 1742 | struct drm_connector *connector; |
1743 | struct intel_output *intel_output; | 1743 | struct intel_encoder *intel_encoder; |
1744 | struct intel_tv_priv *tv_priv; | 1744 | struct intel_tv_priv *tv_priv; |
1745 | u32 tv_dac_on, tv_dac_off, save_tv_dac; | 1745 | u32 tv_dac_on, tv_dac_off, save_tv_dac; |
1746 | char **tv_format_names; | 1746 | char **tv_format_names; |
@@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev) | |||
1780 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) | 1780 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) |
1781 | return; | 1781 | return; |
1782 | 1782 | ||
1783 | intel_output = kzalloc(sizeof(struct intel_output) + | 1783 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + |
1784 | sizeof(struct intel_tv_priv), GFP_KERNEL); | 1784 | sizeof(struct intel_tv_priv), GFP_KERNEL); |
1785 | if (!intel_output) { | 1785 | if (!intel_encoder) { |
1786 | return; | 1786 | return; |
1787 | } | 1787 | } |
1788 | 1788 | ||
1789 | connector = &intel_output->base; | 1789 | connector = &intel_encoder->base; |
1790 | 1790 | ||
1791 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1791 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
1792 | DRM_MODE_CONNECTOR_SVIDEO); | 1792 | DRM_MODE_CONNECTOR_SVIDEO); |
1793 | 1793 | ||
1794 | drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, | 1794 | drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, |
1795 | DRM_MODE_ENCODER_TVDAC); | 1795 | DRM_MODE_ENCODER_TVDAC); |
1796 | 1796 | ||
1797 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1797 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
1798 | tv_priv = (struct intel_tv_priv *)(intel_output + 1); | 1798 | tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); |
1799 | intel_output->type = INTEL_OUTPUT_TVOUT; | 1799 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
1800 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1800 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1801 | intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); | 1801 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); |
1802 | intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); | 1802 | intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); |
1803 | intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | 1803 | intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); |
1804 | intel_output->dev_priv = tv_priv; | 1804 | intel_encoder->dev_priv = tv_priv; |
1805 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; | 1805 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; |
1806 | 1806 | ||
1807 | /* BIOS margin values */ | 1807 | /* BIOS margin values */ |
@@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev) | |||
1812 | 1812 | ||
1813 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); | 1813 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); |
1814 | 1814 | ||
1815 | drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); | 1815 | drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); |
1816 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); | 1816 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); |
1817 | connector->interlace_allowed = false; | 1817 | connector->interlace_allowed = false; |
1818 | connector->doublescan_allowed = false; | 1818 | connector->doublescan_allowed = false; |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index bcec2d79636e..1d569830ed99 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -908,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | |||
908 | uint8_t attr = U8((*ptr)++), shift; | 908 | uint8_t attr = U8((*ptr)++), shift; |
909 | uint32_t saved, dst; | 909 | uint32_t saved, dst; |
910 | int dptr = *ptr; | 910 | int dptr = *ptr; |
911 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
911 | SDEBUG(" dst: "); | 912 | SDEBUG(" dst: "); |
912 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 913 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
914 | /* op needs to full dst value */ | ||
915 | dst = saved; | ||
913 | shift = atom_get_src(ctx, attr, ptr); | 916 | shift = atom_get_src(ctx, attr, ptr); |
914 | SDEBUG(" shift: %d\n", shift); | 917 | SDEBUG(" shift: %d\n", shift); |
915 | dst <<= shift; | 918 | dst <<= shift; |
919 | dst &= atom_arg_mask[dst_align]; | ||
920 | dst >>= atom_arg_shift[dst_align]; | ||
916 | SDEBUG(" dst: "); | 921 | SDEBUG(" dst: "); |
917 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 922 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
918 | } | 923 | } |
@@ -922,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | |||
922 | uint8_t attr = U8((*ptr)++), shift; | 927 | uint8_t attr = U8((*ptr)++), shift; |
923 | uint32_t saved, dst; | 928 | uint32_t saved, dst; |
924 | int dptr = *ptr; | 929 | int dptr = *ptr; |
930 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
925 | SDEBUG(" dst: "); | 931 | SDEBUG(" dst: "); |
926 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 932 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
933 | /* op needs to full dst value */ | ||
934 | dst = saved; | ||
927 | shift = atom_get_src(ctx, attr, ptr); | 935 | shift = atom_get_src(ctx, attr, ptr); |
928 | SDEBUG(" shift: %d\n", shift); | 936 | SDEBUG(" shift: %d\n", shift); |
929 | dst >>= shift; | 937 | dst >>= shift; |
938 | dst &= atom_arg_mask[dst_align]; | ||
939 | dst >>= atom_arg_shift[dst_align]; | ||
930 | SDEBUG(" dst: "); | 940 | SDEBUG(" dst: "); |
931 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 941 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
932 | } | 942 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index fd4ef6d18849..a87990b3ae84 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -521,6 +521,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
523 | adjusted_clock = mode->clock * 2; | 523 | adjusted_clock = mode->clock * 2; |
524 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | ||
525 | pll->algo = PLL_ALGO_LEGACY; | ||
526 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | ||
527 | } | ||
524 | } else { | 528 | } else { |
525 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 529 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
526 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 530 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9580497ede4..d7388fdb6d0b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -2891,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
2891 | { | 2891 | { |
2892 | struct radeon_bo *robj; | 2892 | struct radeon_bo *robj; |
2893 | unsigned long size; | 2893 | unsigned long size; |
2894 | unsigned u, i, w, h; | 2894 | unsigned u, i, w, h, d; |
2895 | int ret; | 2895 | int ret; |
2896 | 2896 | ||
2897 | for (u = 0; u < track->num_texture; u++) { | 2897 | for (u = 0; u < track->num_texture; u++) { |
@@ -2923,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
2923 | h = h / (1 << i); | 2923 | h = h / (1 << i); |
2924 | if (track->textures[u].roundup_h) | 2924 | if (track->textures[u].roundup_h) |
2925 | h = roundup_pow_of_two(h); | 2925 | h = roundup_pow_of_two(h); |
2926 | if (track->textures[u].tex_coord_type == 1) { | ||
2927 | d = (1 << track->textures[u].txdepth) / (1 << i); | ||
2928 | if (!d) | ||
2929 | d = 1; | ||
2930 | } else { | ||
2931 | d = 1; | ||
2932 | } | ||
2926 | if (track->textures[u].compress_format) { | 2933 | if (track->textures[u].compress_format) { |
2927 | 2934 | ||
2928 | size += r100_track_compress_size(track->textures[u].compress_format, w, h); | 2935 | size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; |
2929 | /* compressed textures are block based */ | 2936 | /* compressed textures are block based */ |
2930 | } else | 2937 | } else |
2931 | size += w * h; | 2938 | size += w * h * d; |
2932 | } | 2939 | } |
2933 | size *= track->textures[u].cpp; | 2940 | size *= track->textures[u].cpp; |
2934 | 2941 | ||
2935 | switch (track->textures[u].tex_coord_type) { | 2942 | switch (track->textures[u].tex_coord_type) { |
2936 | case 0: | 2943 | case 0: |
2937 | break; | ||
2938 | case 1: | 2944 | case 1: |
2939 | size *= (1 << track->textures[u].txdepth); | ||
2940 | break; | 2945 | break; |
2941 | case 2: | 2946 | case 2: |
2942 | if (track->separate_cube) { | 2947 | if (track->separate_cube) { |
@@ -3007,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3007 | } | 3012 | } |
3008 | } | 3013 | } |
3009 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; | 3014 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
3010 | nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; | 3015 | if (track->vap_vf_cntl & (1 << 14)) { |
3016 | nverts = track->vap_alt_nverts; | ||
3017 | } else { | ||
3018 | nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; | ||
3019 | } | ||
3011 | switch (prim_walk) { | 3020 | switch (prim_walk) { |
3012 | case 1: | 3021 | case 1: |
3013 | for (i = 0; i < track->num_arrays; i++) { | 3022 | for (i = 0; i < track->num_arrays; i++) { |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index b27a6999d219..fadfe68de9cc 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -64,6 +64,7 @@ struct r100_cs_track { | |||
64 | unsigned maxy; | 64 | unsigned maxy; |
65 | unsigned vtx_size; | 65 | unsigned vtx_size; |
66 | unsigned vap_vf_cntl; | 66 | unsigned vap_vf_cntl; |
67 | unsigned vap_alt_nverts; | ||
67 | unsigned immd_dwords; | 68 | unsigned immd_dwords; |
68 | unsigned num_arrays; | 69 | unsigned num_arrays; |
69 | unsigned max_indx; | 70 | unsigned max_indx; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 2b9affe754ce..eaf1f6bc44f1 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -324,13 +324,12 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
324 | uint32_t gb_tile_config, tmp; | 324 | uint32_t gb_tile_config, tmp; |
325 | 325 | ||
326 | r100_hdp_reset(rdev); | 326 | r100_hdp_reset(rdev); |
327 | /* FIXME: rv380 one pipes ? */ | ||
328 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || | 327 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || |
329 | (rdev->family == CHIP_R350)) { | 328 | (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { |
330 | /* r300,r350 */ | 329 | /* r300,r350 */ |
331 | rdev->num_gb_pipes = 2; | 330 | rdev->num_gb_pipes = 2; |
332 | } else { | 331 | } else { |
333 | /* rv350,rv370,rv380,r300 AD */ | 332 | /* rv350,rv370,rv380,r300 AD, r350 AH */ |
334 | rdev->num_gb_pipes = 1; | 333 | rdev->num_gb_pipes = 1; |
335 | } | 334 | } |
336 | rdev->num_z_pipes = 1; | 335 | rdev->num_z_pipes = 1; |
@@ -730,6 +729,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
730 | /* VAP_VF_MAX_VTX_INDX */ | 729 | /* VAP_VF_MAX_VTX_INDX */ |
731 | track->max_indx = idx_value & 0x00FFFFFFUL; | 730 | track->max_indx = idx_value & 0x00FFFFFFUL; |
732 | break; | 731 | break; |
732 | case 0x2088: | ||
733 | /* VAP_ALT_NUM_VERTICES - only valid on r500 */ | ||
734 | if (p->rdev->family < CHIP_RV515) | ||
735 | goto fail; | ||
736 | track->vap_alt_nverts = idx_value & 0xFFFFFF; | ||
737 | break; | ||
733 | case 0x43E4: | 738 | case 0x43E4: |
734 | /* SC_SCISSOR1 */ | 739 | /* SC_SCISSOR1 */ |
735 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; | 740 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
@@ -767,7 +772,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
767 | tmp = idx_value & ~(0x7 << 16); | 772 | tmp = idx_value & ~(0x7 << 16); |
768 | tmp |= tile_flags; | 773 | tmp |= tile_flags; |
769 | ib[idx] = tmp; | 774 | ib[idx] = tmp; |
770 | |||
771 | i = (reg - 0x4E38) >> 2; | 775 | i = (reg - 0x4E38) >> 2; |
772 | track->cb[i].pitch = idx_value & 0x3FFE; | 776 | track->cb[i].pitch = idx_value & 0x3FFE; |
773 | switch (((idx_value >> 21) & 0xF)) { | 777 | switch (((idx_value >> 21) & 0xF)) { |
@@ -1052,11 +1056,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1052 | break; | 1056 | break; |
1053 | /* fallthrough do not move */ | 1057 | /* fallthrough do not move */ |
1054 | default: | 1058 | default: |
1055 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1059 | goto fail; |
1056 | reg, idx); | ||
1057 | return -EINVAL; | ||
1058 | } | 1060 | } |
1059 | return 0; | 1061 | return 0; |
1062 | fail: | ||
1063 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | ||
1064 | reg, idx); | ||
1065 | return -EINVAL; | ||
1060 | } | 1066 | } |
1061 | 1067 | ||
1062 | static int r300_packet3_check(struct radeon_cs_parser *p, | 1068 | static int r300_packet3_check(struct radeon_cs_parser *p, |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index ea46d558e8f3..c5c2742e4140 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
@@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
921 | 921 | ||
922 | ptr_addr = drm_buffer_read_object(cmdbuf->buffer, | 922 | ptr_addr = drm_buffer_read_object(cmdbuf->buffer, |
923 | sizeof(stack_ptr_addr), &stack_ptr_addr); | 923 | sizeof(stack_ptr_addr), &stack_ptr_addr); |
924 | ref_age_base = (u32 *)(unsigned long)*ptr_addr; | 924 | ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr); |
925 | 925 | ||
926 | for (i=0; i < header.scratch.n_bufs; i++) { | 926 | for (i=0; i < header.scratch.n_bufs; i++) { |
927 | buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); | 927 | buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3dc968c9f5a4..c2bda4ad62e7 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -59,6 +59,12 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
59 | /* get max number of pipes */ | 59 | /* get max number of pipes */ |
60 | gb_pipe_select = RREG32(0x402C); | 60 | gb_pipe_select = RREG32(0x402C); |
61 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; | 61 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
62 | |||
63 | /* SE chips have 1 pipe */ | ||
64 | if ((rdev->pdev->device == 0x5e4c) || | ||
65 | (rdev->pdev->device == 0x5e4f)) | ||
66 | num_pipes = 1; | ||
67 | |||
62 | rdev->num_gb_pipes = num_pipes; | 68 | rdev->num_gb_pipes = num_pipes; |
63 | tmp = 0; | 69 | tmp = 0; |
64 | switch (num_pipes) { | 70 | switch (num_pipes) { |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index dac7042b797e..1d898051c631 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) | 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) |
37 | { | 37 | { |
38 | return rdev->family >= CHIP_R600 | 38 | return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR) |
39 | || rdev->family == CHIP_RS600 | 39 | || rdev->family == CHIP_RS600 |
40 | || rdev->family == CHIP_RS690 | 40 | || rdev->family == CHIP_RS690 |
41 | || rdev->family == CHIP_RS740; | 41 | || rdev->family == CHIP_RS740; |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 029fa1406d1d..2616b822ba68 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
314 | struct radeon_device *rdev = dev->dev_private; | 314 | struct radeon_device *rdev = dev->dev_private; |
315 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; | 315 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; |
316 | 316 | ||
317 | if (ASIC_IS_DCE4(rdev)) | ||
318 | return; | ||
319 | |||
317 | if (!offset) | 320 | if (!offset) |
318 | return; | 321 | return; |
319 | 322 | ||
@@ -484,6 +487,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder) | |||
484 | struct radeon_device *rdev = dev->dev_private; | 487 | struct radeon_device *rdev = dev->dev_private; |
485 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 488 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
486 | 489 | ||
490 | if (ASIC_IS_DCE4(rdev)) | ||
491 | return; | ||
492 | |||
487 | if (!radeon_encoder->hdmi_offset) { | 493 | if (!radeon_encoder->hdmi_offset) { |
488 | r600_hdmi_assign_block(encoder); | 494 | r600_hdmi_assign_block(encoder); |
489 | if (!radeon_encoder->hdmi_offset) { | 495 | if (!radeon_encoder->hdmi_offset) { |
@@ -525,6 +531,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder) | |||
525 | struct radeon_device *rdev = dev->dev_private; | 531 | struct radeon_device *rdev = dev->dev_private; |
526 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 532 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
527 | 533 | ||
534 | if (ASIC_IS_DCE4(rdev)) | ||
535 | return; | ||
536 | |||
528 | if (!radeon_encoder->hdmi_offset) { | 537 | if (!radeon_encoder->hdmi_offset) { |
529 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); | 538 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); |
530 | return; | 539 | return; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 3fba50540f72..1331351c5178 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
162 | { | 162 | { |
163 | struct drm_device *dev = connector->dev; | 163 | struct drm_device *dev = connector->dev; |
164 | struct drm_connector *conflict; | 164 | struct drm_connector *conflict; |
165 | struct radeon_connector *radeon_conflict; | ||
165 | int i; | 166 | int i; |
166 | 167 | ||
167 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { | 168 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { |
168 | if (conflict == connector) | 169 | if (conflict == connector) |
169 | continue; | 170 | continue; |
170 | 171 | ||
172 | radeon_conflict = to_radeon_connector(conflict); | ||
171 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 173 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
172 | if (conflict->encoder_ids[i] == 0) | 174 | if (conflict->encoder_ids[i] == 0) |
173 | break; | 175 | break; |
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
177 | if (conflict->status != connector_status_connected) | 179 | if (conflict->status != connector_status_connected) |
178 | continue; | 180 | continue; |
179 | 181 | ||
182 | if (radeon_conflict->use_digital) | ||
183 | continue; | ||
184 | |||
180 | if (priority == true) { | 185 | if (priority == true) { |
181 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
182 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); |
@@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
287 | 292 | ||
288 | if (property == rdev->mode_info.coherent_mode_property) { | 293 | if (property == rdev->mode_info.coherent_mode_property) { |
289 | struct radeon_encoder_atom_dig *dig; | 294 | struct radeon_encoder_atom_dig *dig; |
295 | bool new_coherent_mode; | ||
290 | 296 | ||
291 | /* need to find digital encoder on connector */ | 297 | /* need to find digital encoder on connector */ |
292 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | 298 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); |
@@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
299 | return 0; | 305 | return 0; |
300 | 306 | ||
301 | dig = radeon_encoder->enc_priv; | 307 | dig = radeon_encoder->enc_priv; |
302 | dig->coherent_mode = val ? true : false; | 308 | new_coherent_mode = val ? true : false; |
303 | radeon_property_change_mode(&radeon_encoder->base); | 309 | if (dig->coherent_mode != new_coherent_mode) { |
310 | dig->coherent_mode = new_coherent_mode; | ||
311 | radeon_property_change_mode(&radeon_encoder->base); | ||
312 | } | ||
304 | } | 313 | } |
305 | 314 | ||
306 | if (property == rdev->mode_info.tv_std_property) { | 315 | if (property == rdev->mode_info.tv_std_property) { |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 419630dd2075..2f042a3c0e62 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -435,14 +435,19 @@ static void radeon_init_pipes(struct drm_device *dev) | |||
435 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { | 435 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { |
436 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); | 436 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); |
437 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; | 437 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; |
438 | /* SE cards have 1 pipe */ | ||
439 | if ((dev->pdev->device == 0x5e4c) || | ||
440 | (dev->pdev->device == 0x5e4f)) | ||
441 | dev_priv->num_gb_pipes = 1; | ||
438 | } else { | 442 | } else { |
439 | /* R3xx */ | 443 | /* R3xx */ |
440 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && | 444 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && |
441 | dev->pdev->device != 0x4144) || | 445 | dev->pdev->device != 0x4144) || |
442 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { | 446 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 && |
447 | dev->pdev->device != 0x4148)) { | ||
443 | dev_priv->num_gb_pipes = 2; | 448 | dev_priv->num_gb_pipes = 2; |
444 | } else { | 449 | } else { |
445 | /* RV3xx/R300 AD */ | 450 | /* RV3xx/R300 AD/R350 AH */ |
446 | dev_priv->num_gb_pipes = 1; | 451 | dev_priv->num_gb_pipes = 1; |
447 | } | 452 | } |
448 | } | 453 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bddf17f97da8..7b629e305560 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -36,6 +36,54 @@ | |||
36 | #include "radeon.h" | 36 | #include "radeon.h" |
37 | #include "atom.h" | 37 | #include "atom.h" |
38 | 38 | ||
39 | static const char radeon_family_name[][16] = { | ||
40 | "R100", | ||
41 | "RV100", | ||
42 | "RS100", | ||
43 | "RV200", | ||
44 | "RS200", | ||
45 | "R200", | ||
46 | "RV250", | ||
47 | "RS300", | ||
48 | "RV280", | ||
49 | "R300", | ||
50 | "R350", | ||
51 | "RV350", | ||
52 | "RV380", | ||
53 | "R420", | ||
54 | "R423", | ||
55 | "RV410", | ||
56 | "RS400", | ||
57 | "RS480", | ||
58 | "RS600", | ||
59 | "RS690", | ||
60 | "RS740", | ||
61 | "RV515", | ||
62 | "R520", | ||
63 | "RV530", | ||
64 | "RV560", | ||
65 | "RV570", | ||
66 | "R580", | ||
67 | "R600", | ||
68 | "RV610", | ||
69 | "RV630", | ||
70 | "RV670", | ||
71 | "RV620", | ||
72 | "RV635", | ||
73 | "RS780", | ||
74 | "RS880", | ||
75 | "RV770", | ||
76 | "RV730", | ||
77 | "RV710", | ||
78 | "RV740", | ||
79 | "CEDAR", | ||
80 | "REDWOOD", | ||
81 | "JUNIPER", | ||
82 | "CYPRESS", | ||
83 | "HEMLOCK", | ||
84 | "LAST", | ||
85 | }; | ||
86 | |||
39 | /* | 87 | /* |
40 | * Clear GPU surface registers. | 88 | * Clear GPU surface registers. |
41 | */ | 89 | */ |
@@ -526,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
526 | int r; | 574 | int r; |
527 | int dma_bits; | 575 | int dma_bits; |
528 | 576 | ||
529 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | ||
530 | rdev->shutdown = false; | 577 | rdev->shutdown = false; |
531 | rdev->dev = &pdev->dev; | 578 | rdev->dev = &pdev->dev; |
532 | rdev->ddev = ddev; | 579 | rdev->ddev = ddev; |
@@ -538,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev, | |||
538 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 585 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
539 | rdev->gpu_lockup = false; | 586 | rdev->gpu_lockup = false; |
540 | rdev->accel_working = false; | 587 | rdev->accel_working = false; |
588 | |||
589 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", | ||
590 | radeon_family_name[rdev->family], pdev->vendor, pdev->device); | ||
591 | |||
541 | /* mutex initialization are all done here so we | 592 | /* mutex initialization are all done here so we |
542 | * can recall function without having locking issues */ | 593 | * can recall function without having locking issues */ |
543 | mutex_init(&rdev->cs_mutex); | 594 | mutex_init(&rdev->cs_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b8d672828246..bb1c122cad21 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc) | |||
86 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); | 86 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
87 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); | 87 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
88 | 88 | ||
89 | WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id); | 89 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); |
90 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007); | 90 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); |
91 | 91 | ||
92 | WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0); | 92 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); |
93 | for (i = 0; i < 256; i++) { | 93 | for (i = 0; i < 256; i++) { |
94 | WREG32(EVERGREEN_DC_LUT_30_COLOR, | 94 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, |
95 | (radeon_crtc->lut_r[i] << 20) | | 95 | (radeon_crtc->lut_r[i] << 20) | |
96 | (radeon_crtc->lut_g[i] << 10) | | 96 | (radeon_crtc->lut_g[i] << 10) | |
97 | (radeon_crtc->lut_b[i] << 0)); | 97 | (radeon_crtc->lut_b[i] << 0)); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 055a51732dcb..4b05563d99e1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -43,9 +43,10 @@ | |||
43 | * - 2.0.0 - initial interface | 43 | * - 2.0.0 - initial interface |
44 | * - 2.1.0 - add square tiling interface | 44 | * - 2.1.0 - add square tiling interface |
45 | * - 2.2.0 - add r6xx/r7xx const buffer support | 45 | * - 2.2.0 - add r6xx/r7xx const buffer support |
46 | * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs | ||
46 | */ | 47 | */ |
47 | #define KMS_DRIVER_MAJOR 2 | 48 | #define KMS_DRIVER_MAJOR 2 |
48 | #define KMS_DRIVER_MINOR 2 | 49 | #define KMS_DRIVER_MINOR 3 |
49 | #define KMS_DRIVER_PATCHLEVEL 0 | 50 | #define KMS_DRIVER_PATCHLEVEL 0 |
50 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 51 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
51 | int radeon_driver_unload_kms(struct drm_device *dev); | 52 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c52fc3080b67..fed7b8084779 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -865,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
865 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 865 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
866 | if (dig->coherent_mode) | 866 | if (dig->coherent_mode) |
867 | args.v3.acConfig.fCoherentMode = 1; | 867 | args.v3.acConfig.fCoherentMode = 1; |
868 | if (radeon_encoder->pixel_clock > 165000) | ||
869 | args.v3.acConfig.fDualLinkConnector = 1; | ||
868 | } | 870 | } |
869 | } else if (ASIC_IS_DCE32(rdev)) { | 871 | } else if (ASIC_IS_DCE32(rdev)) { |
870 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; | 872 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; |
@@ -888,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
888 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 890 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
889 | if (dig->coherent_mode) | 891 | if (dig->coherent_mode) |
890 | args.v2.acConfig.fCoherentMode = 1; | 892 | args.v2.acConfig.fCoherentMode = 1; |
893 | if (radeon_encoder->pixel_clock > 165000) | ||
894 | args.v2.acConfig.fDualLinkConnector = 1; | ||
891 | } | 895 | } |
892 | } else { | 896 | } else { |
893 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | 897 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; |
@@ -1322,7 +1326,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1322 | 1326 | ||
1323 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1327 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1324 | 1328 | ||
1325 | if (ASIC_IS_AVIVO(rdev)) { | 1329 | if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { |
1326 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) | 1330 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
1327 | atombios_yuv_setup(encoder, true); | 1331 | atombios_yuv_setup(encoder, true); |
1328 | else | 1332 | else |
@@ -1373,8 +1377,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1373 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 1377 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
1374 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 1378 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
1375 | atombios_dac_setup(encoder, ATOM_ENABLE); | 1379 | atombios_dac_setup(encoder, ATOM_ENABLE); |
1376 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | 1380 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { |
1377 | atombios_tv_setup(encoder, ATOM_ENABLE); | 1381 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
1382 | atombios_tv_setup(encoder, ATOM_ENABLE); | ||
1383 | else | ||
1384 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
1385 | } | ||
1378 | break; | 1386 | break; |
1379 | } | 1387 | } |
1380 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1388 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 93c7d5d41914..e329066dcabd 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -36,7 +36,7 @@ | |||
36 | * Radeon chip families | 36 | * Radeon chip families |
37 | */ | 37 | */ |
38 | enum radeon_family { | 38 | enum radeon_family { |
39 | CHIP_R100, | 39 | CHIP_R100 = 0, |
40 | CHIP_RV100, | 40 | CHIP_RV100, |
41 | CHIP_RS100, | 41 | CHIP_RS100, |
42 | CHIP_RV200, | 42 | CHIP_RV200, |
@@ -99,4 +99,5 @@ enum radeon_chip_flags { | |||
99 | RADEON_IS_PCI = 0x00800000UL, | 99 | RADEON_IS_PCI = 0x00800000UL, |
100 | RADEON_IS_IGPGART = 0x01000000UL, | 100 | RADEON_IS_IGPGART = 0x01000000UL, |
101 | }; | 101 | }; |
102 | |||
102 | #endif | 103 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index d3657dcfdd26..c633319f98ed 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -165,7 +165,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) | |||
165 | { | 165 | { |
166 | struct radeon_device *rdev = dev->dev_private; | 166 | struct radeon_device *rdev = dev->dev_private; |
167 | 167 | ||
168 | if (crtc < 0 || crtc > 1) { | 168 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
169 | DRM_ERROR("Invalid crtc %d\n", crtc); | 169 | DRM_ERROR("Invalid crtc %d\n", crtc); |
170 | return -EINVAL; | 170 | return -EINVAL; |
171 | } | 171 | } |
@@ -177,7 +177,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) | |||
177 | { | 177 | { |
178 | struct radeon_device *rdev = dev->dev_private; | 178 | struct radeon_device *rdev = dev->dev_private; |
179 | 179 | ||
180 | if (crtc < 0 || crtc > 1) { | 180 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
181 | DRM_ERROR("Invalid crtc %d\n", crtc); | 181 | DRM_ERROR("Invalid crtc %d\n", crtc); |
182 | return -EINVAL; | 182 | return -EINVAL; |
183 | } | 183 | } |
@@ -191,7 +191,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) | |||
191 | { | 191 | { |
192 | struct radeon_device *rdev = dev->dev_private; | 192 | struct radeon_device *rdev = dev->dev_private; |
193 | 193 | ||
194 | if (crtc < 0 || crtc > 1) { | 194 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
195 | DRM_ERROR("Invalid crtc %d\n", crtc); | 195 | DRM_ERROR("Invalid crtc %d\n", crtc); |
196 | return; | 196 | return; |
197 | } | 197 | } |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index 19c4663fa9c6..1e97b2d129fd 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 | |||
@@ -125,6 +125,8 @@ r300 0x4f60 | |||
125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
128 | 0x4010 GB_MSPOS0 | ||
129 | 0x4014 GB_MSPOS1 | ||
128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 989f7a020832..e958980d00f1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 | |||
@@ -125,6 +125,8 @@ r420 0x4f60 | |||
125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
128 | 0x4010 GB_MSPOS0 | ||
129 | 0x4014 GB_MSPOS1 | ||
128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 6801b865d1c4..83e8bc0c2bb2 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 | |||
@@ -125,6 +125,8 @@ rs600 0x6d40 | |||
125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
128 | 0x4010 GB_MSPOS0 | ||
129 | 0x4014 GB_MSPOS1 | ||
128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 38abf63bf2cd..1e46233985eb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
@@ -35,6 +35,7 @@ rv515 0x6d40 | |||
35 | 0x1DA8 VAP_VPORT_ZSCALE | 35 | 0x1DA8 VAP_VPORT_ZSCALE |
36 | 0x1DAC VAP_VPORT_ZOFFSET | 36 | 0x1DAC VAP_VPORT_ZOFFSET |
37 | 0x2080 VAP_CNTL | 37 | 0x2080 VAP_CNTL |
38 | 0x208C VAP_INDEX_OFFSET | ||
38 | 0x2090 VAP_OUT_VTX_FMT_0 | 39 | 0x2090 VAP_OUT_VTX_FMT_0 |
39 | 0x2094 VAP_OUT_VTX_FMT_1 | 40 | 0x2094 VAP_OUT_VTX_FMT_1 |
40 | 0x20B0 VAP_VTE_CNTL | 41 | 0x20B0 VAP_VTE_CNTL |
@@ -158,6 +159,8 @@ rv515 0x6d40 | |||
158 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 159 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
159 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 160 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
160 | 0x4008 GB_ENABLE | 161 | 0x4008 GB_ENABLE |
162 | 0x4010 GB_MSPOS0 | ||
163 | 0x4014 GB_MSPOS1 | ||
161 | 0x401C GB_SELECT | 164 | 0x401C GB_SELECT |
162 | 0x4020 GB_AA_CONFIG | 165 | 0x4020 GB_AA_CONFIG |
163 | 0x4024 GB_FIFO_SIZE | 166 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index abf824c2123d..a81bc7a21e14 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) | |||
159 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 159 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
160 | 160 | ||
161 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 161 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
162 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); | 162 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); |
163 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 163 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
164 | 164 | ||
165 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 165 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index c1605b528e8f..0f28d91f29d8 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
@@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = { | |||
142 | "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", | 142 | "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", |
143 | "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", | 143 | "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", |
144 | NULL }, | 144 | NULL }, |
145 | /* Set 17: iMac 9,1 */ | ||
146 | { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P", | ||
147 | "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL }, | ||
148 | /* Set 18: MacBook Pro 2,2 */ | ||
149 | { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0", | ||
150 | "Th0H", "Th1H", "Tm0P", "Ts0P", NULL }, | ||
145 | }; | 151 | }; |
146 | 152 | ||
147 | /* List of keys used to read/write fan speeds */ | 153 | /* List of keys used to read/write fan speeds */ |
@@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { | |||
1350 | { .accelerometer = 1, .light = 1, .temperature_set = 15 }, | 1356 | { .accelerometer = 1, .light = 1, .temperature_set = 15 }, |
1351 | /* MacPro3,1: temperature set 16 */ | 1357 | /* MacPro3,1: temperature set 16 */ |
1352 | { .accelerometer = 0, .light = 0, .temperature_set = 16 }, | 1358 | { .accelerometer = 0, .light = 0, .temperature_set = 16 }, |
1359 | /* iMac 9,1: light sensor only, temperature set 17 */ | ||
1360 | { .accelerometer = 0, .light = 0, .temperature_set = 17 }, | ||
1361 | /* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */ | ||
1362 | { .accelerometer = 1, .light = 1, .temperature_set = 18 }, | ||
1353 | }; | 1363 | }; |
1354 | 1364 | ||
1355 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". | 1365 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". |
@@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
1375 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1385 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
1376 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, | 1386 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, |
1377 | &applesmc_dmi_data[9]}, | 1387 | &applesmc_dmi_data[9]}, |
1388 | { applesmc_dmi_match, "Apple MacBook Pro 2,2", { | ||
1389 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."), | ||
1390 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") }, | ||
1391 | &applesmc_dmi_data[18]}, | ||
1378 | { applesmc_dmi_match, "Apple MacBook Pro", { | 1392 | { applesmc_dmi_match, "Apple MacBook Pro", { |
1379 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | 1393 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), |
1380 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, | 1394 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, |
@@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
1415 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1429 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
1416 | DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, | 1430 | DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, |
1417 | &applesmc_dmi_data[4]}, | 1431 | &applesmc_dmi_data[4]}, |
1432 | { applesmc_dmi_match, "Apple iMac 9,1", { | ||
1433 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), | ||
1434 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") }, | ||
1435 | &applesmc_dmi_data[17]}, | ||
1418 | { applesmc_dmi_match, "Apple iMac 8", { | 1436 | { applesmc_dmi_match, "Apple iMac 8", { |
1419 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1437 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
1420 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, | 1438 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 75f3fa55663d..16c420240724 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_data *data) | |||
1169 | int err; | 1169 | int err; |
1170 | 1170 | ||
1171 | list_for_each_entry(s, &data->sensor_list, list) { | 1171 | list_for_each_entry(s, &data->sensor_list, list) { |
1172 | sysfs_attr_init(&s->input_attr.attr); | ||
1172 | err = device_create_file(data->hwmon_dev, &s->input_attr); | 1173 | err = device_create_file(data->hwmon_dev, &s->input_attr); |
1173 | if (err) | 1174 | if (err) |
1174 | return err; | 1175 | return err; |
1176 | sysfs_attr_init(&s->label_attr.attr); | ||
1175 | err = device_create_file(data->hwmon_dev, &s->label_attr); | 1177 | err = device_create_file(data->hwmon_dev, &s->label_attr); |
1176 | if (err) | 1178 | if (err) |
1177 | return err; | 1179 | return err; |
1180 | sysfs_attr_init(&s->limit1_attr.attr); | ||
1178 | err = device_create_file(data->hwmon_dev, &s->limit1_attr); | 1181 | err = device_create_file(data->hwmon_dev, &s->limit1_attr); |
1179 | if (err) | 1182 | if (err) |
1180 | return err; | 1183 | return err; |
1184 | sysfs_attr_init(&s->limit2_attr.attr); | ||
1181 | err = device_create_file(data->hwmon_dev, &s->limit2_attr); | 1185 | err = device_create_file(data->hwmon_dev, &s->limit2_attr); |
1182 | if (err) | 1186 | if (err) |
1183 | return err; | 1187 | return err; |
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index be475e844c2a..c8ab50516672 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c | |||
@@ -217,6 +217,10 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { | |||
217 | AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), | 217 | AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), |
218 | AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), | 218 | AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), |
219 | AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), | 219 | AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), |
220 | AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), | ||
221 | AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), | ||
222 | AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), | ||
223 | AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), | ||
220 | { NULL, } | 224 | { NULL, } |
221 | /* Laptop models without axis info (yet): | 225 | /* Laptop models without axis info (yet): |
222 | * "NC6910" "HP Compaq 6910" | 226 | * "NC6910" "HP Compaq 6910" |
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 1002befd87d5..5be09c048c5f 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c | |||
@@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, | |||
539 | 539 | ||
540 | struct it87_data *data = dev_get_drvdata(dev); | 540 | struct it87_data *data = dev_get_drvdata(dev); |
541 | long val; | 541 | long val; |
542 | u8 reg; | ||
542 | 543 | ||
543 | if (strict_strtol(buf, 10, &val) < 0) | 544 | if (strict_strtol(buf, 10, &val) < 0) |
544 | return -EINVAL; | 545 | return -EINVAL; |
545 | 546 | ||
546 | mutex_lock(&data->update_lock); | 547 | reg = it87_read_value(data, IT87_REG_TEMP_ENABLE); |
547 | 548 | reg &= ~(1 << nr); | |
548 | data->sensor &= ~(1 << nr); | 549 | reg &= ~(8 << nr); |
549 | data->sensor &= ~(8 << nr); | ||
550 | if (val == 2) { /* backwards compatibility */ | 550 | if (val == 2) { /* backwards compatibility */ |
551 | dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " | 551 | dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " |
552 | "instead\n"); | 552 | "instead\n"); |
@@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, | |||
554 | } | 554 | } |
555 | /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ | 555 | /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ |
556 | if (val == 3) | 556 | if (val == 3) |
557 | data->sensor |= 1 << nr; | 557 | reg |= 1 << nr; |
558 | else if (val == 4) | 558 | else if (val == 4) |
559 | data->sensor |= 8 << nr; | 559 | reg |= 8 << nr; |
560 | else if (val != 0) { | 560 | else if (val != 0) |
561 | mutex_unlock(&data->update_lock); | ||
562 | return -EINVAL; | 561 | return -EINVAL; |
563 | } | 562 | |
563 | mutex_lock(&data->update_lock); | ||
564 | data->sensor = reg; | ||
564 | it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); | 565 | it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); |
566 | data->valid = 0; /* Force cache refresh */ | ||
565 | mutex_unlock(&data->update_lock); | 567 | mutex_unlock(&data->update_lock); |
566 | return count; | 568 | return count; |
567 | } | 569 | } |
@@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev) | |||
1841 | it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); | 1843 | it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); |
1842 | } | 1844 | } |
1843 | 1845 | ||
1844 | /* Check if temperature channels are reset manually or by some reason */ | 1846 | /* Temperature channels are not forcibly enabled, as they can be |
1845 | tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE); | 1847 | * set to two different sensor types and we can't guess which one |
1846 | if ((tmp & 0x3f) == 0) { | 1848 | * is correct for a given system. These channels can be enabled at |
1847 | /* Temp1,Temp3=thermistor; Temp2=thermal diode */ | 1849 | * run-time through the temp{1-3}_type sysfs accessors if needed. */ |
1848 | tmp = (tmp & 0xc0) | 0x2a; | ||
1849 | it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp); | ||
1850 | } | ||
1851 | data->sensor = tmp; | ||
1852 | 1850 | ||
1853 | /* Check if voltage monitors are reset manually or by some reason */ | 1851 | /* Check if voltage monitors are reset manually or by some reason */ |
1854 | tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); | 1852 | tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); |
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 6b2d8ae64fe1..a610e7880fb3 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c | |||
@@ -303,13 +303,13 @@ error_ret: | |||
303 | **/ | 303 | **/ |
304 | static inline int sht15_calc_temp(struct sht15_data *data) | 304 | static inline int sht15_calc_temp(struct sht15_data *data) |
305 | { | 305 | { |
306 | int d1 = 0; | 306 | int d1 = temppoints[0].d1; |
307 | int i; | 307 | int i; |
308 | 308 | ||
309 | for (i = 1; i < ARRAY_SIZE(temppoints); i++) | 309 | for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--) |
310 | /* Find pointer to interpolate */ | 310 | /* Find pointer to interpolate */ |
311 | if (data->supply_uV > temppoints[i - 1].vdd) { | 311 | if (data->supply_uV > temppoints[i - 1].vdd) { |
312 | d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) | 312 | d1 = (data->supply_uV - temppoints[i - 1].vdd) |
313 | * (temppoints[i].d1 - temppoints[i - 1].d1) | 313 | * (temppoints[i].d1 - temppoints[i - 1].d1) |
314 | / (temppoints[i].vdd - temppoints[i - 1].vdd) | 314 | / (temppoints[i].vdd - temppoints[i - 1].vdd) |
315 | + temppoints[i - 1].d1; | 315 | + temppoints[i - 1].d1; |
@@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev) | |||
542 | /* If a regulator is available, query what the supply voltage actually is!*/ | 542 | /* If a regulator is available, query what the supply voltage actually is!*/ |
543 | data->reg = regulator_get(data->dev, "vcc"); | 543 | data->reg = regulator_get(data->dev, "vcc"); |
544 | if (!IS_ERR(data->reg)) { | 544 | if (!IS_ERR(data->reg)) { |
545 | data->supply_uV = regulator_get_voltage(data->reg); | 545 | int voltage; |
546 | |||
547 | voltage = regulator_get_voltage(data->reg); | ||
548 | if (voltage) | ||
549 | data->supply_uV = voltage; | ||
550 | |||
546 | regulator_enable(data->reg); | 551 | regulator_enable(data->reg); |
547 | /* setup a notifier block to update this if another device | 552 | /* setup a notifier block to update this if another device |
548 | * causes the voltage to change */ | 553 | * causes the voltage to change */ |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index f7e27b702375..d1ff9408dc1f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -146,10 +146,10 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) | |||
146 | "<%s> I2C Interrupted\n", __func__); | 146 | "<%s> I2C Interrupted\n", __func__); |
147 | return -EINTR; | 147 | return -EINTR; |
148 | } | 148 | } |
149 | if (time_after(jiffies, orig_jiffies + HZ / 1000)) { | 149 | if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { |
150 | dev_dbg(&i2c_imx->adapter.dev, | 150 | dev_dbg(&i2c_imx->adapter.dev, |
151 | "<%s> I2C bus is busy\n", __func__); | 151 | "<%s> I2C bus is busy\n", __func__); |
152 | return -EIO; | 152 | return -ETIMEDOUT; |
153 | } | 153 | } |
154 | schedule(); | 154 | schedule(); |
155 | } | 155 | } |
@@ -444,6 +444,8 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, | |||
444 | result = i2c_imx_read(i2c_imx, &msgs[i]); | 444 | result = i2c_imx_read(i2c_imx, &msgs[i]); |
445 | else | 445 | else |
446 | result = i2c_imx_write(i2c_imx, &msgs[i]); | 446 | result = i2c_imx_write(i2c_imx, &msgs[i]); |
447 | if (result) | ||
448 | goto fail0; | ||
447 | } | 449 | } |
448 | 450 | ||
449 | fail0: | 451 | fail0: |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 6bd0f19cd451..389ac6032a7b 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -903,6 +903,11 @@ omap_i2c_probe(struct platform_device *pdev) | |||
903 | 903 | ||
904 | platform_set_drvdata(pdev, dev); | 904 | platform_set_drvdata(pdev, dev); |
905 | 905 | ||
906 | if (cpu_is_omap7xx()) | ||
907 | dev->reg_shift = 1; | ||
908 | else | ||
909 | dev->reg_shift = 2; | ||
910 | |||
906 | if ((r = omap_i2c_get_clocks(dev)) != 0) | 911 | if ((r = omap_i2c_get_clocks(dev)) != 0) |
907 | goto err_iounmap; | 912 | goto err_iounmap; |
908 | 913 | ||
@@ -926,11 +931,6 @@ omap_i2c_probe(struct platform_device *pdev) | |||
926 | dev->b_hw = 1; /* Enable hardware fixes */ | 931 | dev->b_hw = 1; /* Enable hardware fixes */ |
927 | } | 932 | } |
928 | 933 | ||
929 | if (cpu_is_omap7xx()) | ||
930 | dev->reg_shift = 1; | ||
931 | else | ||
932 | dev->reg_shift = 2; | ||
933 | |||
934 | /* reset ASAP, clearing any IRQs */ | 934 | /* reset ASAP, clearing any IRQs */ |
935 | omap_i2c_init(dev); | 935 | omap_i2c_init(dev); |
936 | 936 | ||
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 247103372a06..a97e3fec8148 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c | |||
@@ -173,6 +173,9 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) | |||
173 | /* We still have something to talk about... */ | 173 | /* We still have something to talk about... */ |
174 | val = *alg_data->mif.buf++; | 174 | val = *alg_data->mif.buf++; |
175 | 175 | ||
176 | if (alg_data->mif.len == 1) | ||
177 | val |= stop_bit; | ||
178 | |||
176 | alg_data->mif.len--; | 179 | alg_data->mif.len--; |
177 | iowrite32(val, I2C_REG_TX(alg_data)); | 180 | iowrite32(val, I2C_REG_TX(alg_data)); |
178 | 181 | ||
@@ -246,6 +249,9 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) | |||
246 | __func__); | 249 | __func__); |
247 | 250 | ||
248 | if (alg_data->mif.len == 1) { | 251 | if (alg_data->mif.len == 1) { |
252 | /* Last byte, do not acknowledge next rcv. */ | ||
253 | val |= stop_bit; | ||
254 | |||
249 | /* | 255 | /* |
250 | * Enable interrupt RFDAIE (data in Rx fifo), | 256 | * Enable interrupt RFDAIE (data in Rx fifo), |
251 | * and disable DRMIE (need data for Tx) | 257 | * and disable DRMIE (need data for Tx) |
@@ -633,6 +639,8 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev) | |||
633 | */ | 639 | */ |
634 | 640 | ||
635 | tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; | 641 | tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; |
642 | if (tmp > 0x3FF) | ||
643 | tmp = 0x3FF; | ||
636 | iowrite32(tmp, I2C_REG_CKH(alg_data)); | 644 | iowrite32(tmp, I2C_REG_CKH(alg_data)); |
637 | iowrite32(tmp, I2C_REG_CKL(alg_data)); | 645 | iowrite32(tmp, I2C_REG_CKL(alg_data)); |
638 | 646 | ||
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 1f5b38be73bc..495be451d326 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -498,7 +498,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) | |||
498 | int i = 0; | 498 | int i = 0; |
499 | 499 | ||
500 | /* Locate the apropriate clock setting */ | 500 | /* Locate the apropriate clock setting */ |
501 | while (i < ARRAY_SIZE(stu300_clktable) && | 501 | while (i < ARRAY_SIZE(stu300_clktable) - 1 && |
502 | stu300_clktable[i].rate < clkrate) | 502 | stu300_clktable[i].rate < clkrate) |
503 | i++; | 503 | i++; |
504 | 504 | ||
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index ab87e4f7cec9..defce2877eef 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
@@ -409,6 +409,8 @@ static struct pcmcia_device_id ide_ids[] = { | |||
409 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), | 409 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), |
410 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | 410 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), |
411 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 411 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
412 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), | ||
413 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), | ||
412 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), | 414 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), |
413 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 415 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
414 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 416 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
@@ -429,6 +431,8 @@ static struct pcmcia_device_id ide_ids[] = { | |||
429 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), | 431 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), |
430 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), | 432 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), |
431 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), | 433 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), |
434 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), | ||
435 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), | ||
432 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), | 436 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), |
433 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), | 437 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), |
434 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), | 438 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), |
diff --git a/drivers/input/input.c b/drivers/input/input.c index afd4e2b7658c..9c79bd56b51a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev, | |||
660 | int input_get_keycode(struct input_dev *dev, | 660 | int input_get_keycode(struct input_dev *dev, |
661 | unsigned int scancode, unsigned int *keycode) | 661 | unsigned int scancode, unsigned int *keycode) |
662 | { | 662 | { |
663 | return dev->getkeycode(dev, scancode, keycode); | 663 | unsigned long flags; |
664 | int retval; | ||
665 | |||
666 | spin_lock_irqsave(&dev->event_lock, flags); | ||
667 | retval = dev->getkeycode(dev, scancode, keycode); | ||
668 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
669 | |||
670 | return retval; | ||
664 | } | 671 | } |
665 | EXPORT_SYMBOL(input_get_keycode); | 672 | EXPORT_SYMBOL(input_get_keycode); |
666 | 673 | ||
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index ffc25cfcef7a..b443e088fd3c 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
374 | input_dev->name = pdev->name; | 374 | input_dev->name = pdev->name; |
375 | input_dev->id.bustype = BUS_HOST; | 375 | input_dev->id.bustype = BUS_HOST; |
376 | input_dev->dev.parent = &pdev->dev; | 376 | input_dev->dev.parent = &pdev->dev; |
377 | input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); | 377 | input_dev->evbit[0] = BIT_MASK(EV_KEY); |
378 | if (!pdata->no_autorepeat) | ||
379 | input_dev->evbit[0] |= BIT_MASK(EV_REP); | ||
378 | input_dev->open = matrix_keypad_start; | 380 | input_dev->open = matrix_keypad_start; |
379 | input_dev->close = matrix_keypad_stop; | 381 | input_dev->close = matrix_keypad_stop; |
380 | 382 | ||
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 99d58764ef03..0d22cb9ce42e 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = { | |||
64 | { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, | 64 | { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, |
65 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, | 65 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, |
66 | { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ | 66 | { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ |
67 | { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */ | ||
67 | { { 0x52, 0x01, 0x14 }, 0xff, 0xff, | 68 | { { 0x52, 0x01, 0x14 }, 0xff, 0xff, |
68 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ | 69 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ |
69 | }; | 70 | }; |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 4f8fe0886b2a..b89879bd860f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
@@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = { | |||
803 | .disconnect = bcm5974_disconnect, | 803 | .disconnect = bcm5974_disconnect, |
804 | .suspend = bcm5974_suspend, | 804 | .suspend = bcm5974_suspend, |
805 | .resume = bcm5974_resume, | 805 | .resume = bcm5974_resume, |
806 | .reset_resume = bcm5974_resume, | ||
807 | .id_table = bcm5974_table, | 806 | .id_table = bcm5974_table, |
808 | .supports_autosuspend = 1, | 807 | .supports_autosuspend = 1, |
809 | }; | 808 | }; |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 577688b5b951..6440a8f55686 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port."); | |||
39 | 39 | ||
40 | static bool i8042_nomux; | 40 | static bool i8042_nomux; |
41 | module_param_named(nomux, i8042_nomux, bool, 0); | 41 | module_param_named(nomux, i8042_nomux, bool, 0); |
42 | MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present."); | 42 | MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present."); |
43 | 43 | ||
44 | static bool i8042_unlock; | 44 | static bool i8042_unlock; |
45 | module_param_named(unlock, i8042_unlock, bool, 0); | 45 | module_param_named(unlock, i8042_unlock, bool, 0); |
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c index 82ae18d29685..014248344763 100644 --- a/drivers/input/sparse-keymap.c +++ b/drivers/input/sparse-keymap.c | |||
@@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev, | |||
68 | unsigned int scancode, | 68 | unsigned int scancode, |
69 | unsigned int *keycode) | 69 | unsigned int *keycode) |
70 | { | 70 | { |
71 | const struct key_entry *key = | 71 | const struct key_entry *key; |
72 | sparse_keymap_entry_from_scancode(dev, scancode); | ||
73 | 72 | ||
74 | if (key && key->type == KE_KEY) { | 73 | if (dev->keycode) { |
75 | *keycode = key->keycode; | 74 | key = sparse_keymap_entry_from_scancode(dev, scancode); |
76 | return 0; | 75 | if (key && key->type == KE_KEY) { |
76 | *keycode = key->keycode; | ||
77 | return 0; | ||
78 | } | ||
77 | } | 79 | } |
78 | 80 | ||
79 | return -EINVAL; | 81 | return -EINVAL; |
@@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev, | |||
86 | struct key_entry *key; | 88 | struct key_entry *key; |
87 | int old_keycode; | 89 | int old_keycode; |
88 | 90 | ||
89 | if (keycode < 0 || keycode > KEY_MAX) | 91 | if (dev->keycode) { |
90 | return -EINVAL; | 92 | key = sparse_keymap_entry_from_scancode(dev, scancode); |
91 | 93 | if (key && key->type == KE_KEY) { | |
92 | key = sparse_keymap_entry_from_scancode(dev, scancode); | 94 | old_keycode = key->keycode; |
93 | if (key && key->type == KE_KEY) { | 95 | key->keycode = keycode; |
94 | old_keycode = key->keycode; | 96 | set_bit(keycode, dev->keybit); |
95 | key->keycode = keycode; | 97 | if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) |
96 | set_bit(keycode, dev->keybit); | 98 | clear_bit(old_keycode, dev->keybit); |
97 | if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) | 99 | return 0; |
98 | clear_bit(old_keycode, dev->keybit); | 100 | } |
99 | return 0; | ||
100 | } | 101 | } |
101 | 102 | ||
102 | return -EINVAL; | 103 | return -EINVAL; |
@@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev, | |||
164 | return 0; | 165 | return 0; |
165 | 166 | ||
166 | err_out: | 167 | err_out: |
167 | kfree(keymap); | 168 | kfree(map); |
168 | return error; | 169 | return error; |
169 | 170 | ||
170 | } | 171 | } |
@@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup); | |||
176 | * | 177 | * |
177 | * This function is used to free memory allocated by sparse keymap | 178 | * This function is used to free memory allocated by sparse keymap |
178 | * in an input device that was set up by sparse_keymap_setup(). | 179 | * in an input device that was set up by sparse_keymap_setup(). |
180 | * NOTE: It is safe to cal this function while input device is | ||
181 | * still registered (however the drivers should care not to try to | ||
182 | * use freed keymap and thus have to shut off interrups/polling | ||
183 | * before freeing the keymap). | ||
179 | */ | 184 | */ |
180 | void sparse_keymap_free(struct input_dev *dev) | 185 | void sparse_keymap_free(struct input_dev *dev) |
181 | { | 186 | { |
187 | unsigned long flags; | ||
188 | |||
189 | /* | ||
190 | * Take event lock to prevent racing with input_get_keycode() | ||
191 | * and input_set_keycode() if we are called while input device | ||
192 | * is still registered. | ||
193 | */ | ||
194 | spin_lock_irqsave(&dev->event_lock, flags); | ||
195 | |||
182 | kfree(dev->keycode); | 196 | kfree(dev->keycode); |
183 | dev->keycode = NULL; | 197 | dev->keycode = NULL; |
184 | dev->keycodemax = 0; | 198 | dev->keycodemax = 0; |
185 | dev->getkeycode = NULL; | 199 | |
186 | dev->setkeycode = NULL; | 200 | spin_unlock_irqrestore(&dev->event_lock, flags); |
187 | } | 201 | } |
188 | EXPORT_SYMBOL(sparse_keymap_free); | 202 | EXPORT_SYMBOL(sparse_keymap_free); |
189 | 203 | ||
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 8b5d2873f0c4..f46502589e4e 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf) | |||
673 | int rv; | 673 | int rv; |
674 | 674 | ||
675 | mutex_lock(&wacom->lock); | 675 | mutex_lock(&wacom->lock); |
676 | if (wacom->open) { | 676 | |
677 | /* switch to wacom mode first */ | ||
678 | wacom_query_tablet_data(intf, features); | ||
679 | |||
680 | if (wacom->open) | ||
677 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); | 681 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); |
678 | /* switch to wacom mode if needed */ | 682 | else |
679 | if (!wacom_retrieve_hid_descriptor(intf, features)) | ||
680 | wacom_query_tablet_data(intf, features); | ||
681 | } else | ||
682 | rv = 0; | 683 | rv = 0; |
684 | |||
683 | mutex_unlock(&wacom->lock); | 685 | mutex_unlock(&wacom->lock); |
684 | 686 | ||
685 | return rv; | 687 | return rv; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index b3ba3437a2eb..4a852d815c68 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
155 | { | 155 | { |
156 | struct wacom_features *features = &wacom->features; | 156 | struct wacom_features *features = &wacom->features; |
157 | unsigned char *data = wacom->data; | 157 | unsigned char *data = wacom->data; |
158 | int x, y, prox; | 158 | int x, y, rw; |
159 | int rw = 0; | 159 | static int penData = 0; |
160 | int retval = 0; | ||
161 | 160 | ||
162 | if (data[0] != WACOM_REPORT_PENABLED) { | 161 | if (data[0] != WACOM_REPORT_PENABLED) { |
163 | dbg("wacom_graphire_irq: received unknown report #%d", data[0]); | 162 | dbg("wacom_graphire_irq: received unknown report #%d", data[0]); |
164 | goto exit; | 163 | return 0; |
165 | } | 164 | } |
166 | 165 | ||
167 | prox = data[1] & 0x80; | 166 | if (data[1] & 0x80) { |
168 | if (prox || wacom->id[0]) { | 167 | /* in prox and not a pad data */ |
169 | if (prox) { | 168 | penData = 1; |
170 | switch ((data[1] >> 5) & 3) { | 169 | |
170 | switch ((data[1] >> 5) & 3) { | ||
171 | 171 | ||
172 | case 0: /* Pen */ | 172 | case 0: /* Pen */ |
173 | wacom->tool[0] = BTN_TOOL_PEN; | 173 | wacom->tool[0] = BTN_TOOL_PEN; |
@@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
181 | 181 | ||
182 | case 2: /* Mouse with wheel */ | 182 | case 2: /* Mouse with wheel */ |
183 | wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); | 183 | wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); |
184 | if (features->type == WACOM_G4 || features->type == WACOM_MO) { | ||
185 | rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03); | ||
186 | wacom_report_rel(wcombo, REL_WHEEL, -rw); | ||
187 | } else | ||
188 | wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]); | ||
184 | /* fall through */ | 189 | /* fall through */ |
185 | 190 | ||
186 | case 3: /* Mouse without wheel */ | 191 | case 3: /* Mouse without wheel */ |
187 | wacom->tool[0] = BTN_TOOL_MOUSE; | 192 | wacom->tool[0] = BTN_TOOL_MOUSE; |
188 | wacom->id[0] = CURSOR_DEVICE_ID; | 193 | wacom->id[0] = CURSOR_DEVICE_ID; |
194 | wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); | ||
195 | wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); | ||
196 | if (features->type == WACOM_G4 || features->type == WACOM_MO) | ||
197 | wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); | ||
198 | else | ||
199 | wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); | ||
189 | break; | 200 | break; |
190 | } | ||
191 | } | 201 | } |
192 | x = wacom_le16_to_cpu(&data[2]); | 202 | x = wacom_le16_to_cpu(&data[2]); |
193 | y = wacom_le16_to_cpu(&data[4]); | 203 | y = wacom_le16_to_cpu(&data[4]); |
@@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
198 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); | 208 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); |
199 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); | 209 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); |
200 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); | 210 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); |
201 | } else { | ||
202 | wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); | ||
203 | wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); | ||
204 | if (features->type == WACOM_G4 || | ||
205 | features->type == WACOM_MO) { | ||
206 | wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); | ||
207 | rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); | ||
208 | } else { | ||
209 | wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); | ||
210 | rw = -(signed)data[6]; | ||
211 | } | ||
212 | wacom_report_rel(wcombo, REL_WHEEL, rw); | ||
213 | } | 211 | } |
214 | |||
215 | if (!prox) | ||
216 | wacom->id[0] = 0; | ||
217 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ | 212 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ |
218 | wacom_report_key(wcombo, wacom->tool[0], prox); | 213 | wacom_report_key(wcombo, wacom->tool[0], 1); |
219 | wacom_input_sync(wcombo); /* sync last event */ | 214 | } else if (wacom->id[0]) { |
215 | wacom_report_abs(wcombo, ABS_X, 0); | ||
216 | wacom_report_abs(wcombo, ABS_Y, 0); | ||
217 | if (wacom->tool[0] == BTN_TOOL_MOUSE) { | ||
218 | wacom_report_key(wcombo, BTN_LEFT, 0); | ||
219 | wacom_report_key(wcombo, BTN_RIGHT, 0); | ||
220 | wacom_report_abs(wcombo, ABS_DISTANCE, 0); | ||
221 | } else { | ||
222 | wacom_report_abs(wcombo, ABS_PRESSURE, 0); | ||
223 | wacom_report_key(wcombo, BTN_TOUCH, 0); | ||
224 | wacom_report_key(wcombo, BTN_STYLUS, 0); | ||
225 | wacom_report_key(wcombo, BTN_STYLUS2, 0); | ||
226 | } | ||
227 | wacom->id[0] = 0; | ||
228 | wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */ | ||
229 | wacom_report_key(wcombo, wacom->tool[0], 0); | ||
220 | } | 230 | } |
221 | 231 | ||
222 | /* send pad data */ | 232 | /* send pad data */ |
223 | switch (features->type) { | 233 | switch (features->type) { |
224 | case WACOM_G4: | 234 | case WACOM_G4: |
225 | prox = data[7] & 0xf8; | 235 | if (data[7] & 0xf8) { |
226 | if (prox || wacom->id[1]) { | 236 | if (penData) { |
237 | wacom_input_sync(wcombo); /* sync last event */ | ||
238 | if (!wacom->id[0]) | ||
239 | penData = 0; | ||
240 | } | ||
227 | wacom->id[1] = PAD_DEVICE_ID; | 241 | wacom->id[1] = PAD_DEVICE_ID; |
228 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); | 242 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); |
229 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); | 243 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); |
@@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
231 | wacom_report_rel(wcombo, REL_WHEEL, rw); | 245 | wacom_report_rel(wcombo, REL_WHEEL, rw); |
232 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); | 246 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); |
233 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 247 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); |
234 | if (!prox) | 248 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
235 | wacom->id[1] = 0; | 249 | } else if (wacom->id[1]) { |
236 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 250 | if (penData) { |
251 | wacom_input_sync(wcombo); /* sync last event */ | ||
252 | if (!wacom->id[0]) | ||
253 | penData = 0; | ||
254 | } | ||
255 | wacom->id[1] = 0; | ||
256 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); | ||
257 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); | ||
258 | wacom_report_rel(wcombo, REL_WHEEL, 0); | ||
259 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); | ||
260 | wacom_report_abs(wcombo, ABS_MISC, 0); | ||
237 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | 261 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
238 | } | 262 | } |
239 | retval = 1; | ||
240 | break; | 263 | break; |
241 | case WACOM_MO: | 264 | case WACOM_MO: |
242 | prox = (data[7] & 0xf8) || data[8]; | 265 | if ((data[7] & 0xf8) || (data[8] & 0xff)) { |
243 | if (prox || wacom->id[1]) { | 266 | if (penData) { |
267 | wacom_input_sync(wcombo); /* sync last event */ | ||
268 | if (!wacom->id[0]) | ||
269 | penData = 0; | ||
270 | } | ||
244 | wacom->id[1] = PAD_DEVICE_ID; | 271 | wacom->id[1] = PAD_DEVICE_ID; |
245 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); | 272 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); |
246 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); | 273 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); |
@@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
248 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); | 275 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); |
249 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); | 276 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); |
250 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); | 277 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); |
251 | if (!prox) | ||
252 | wacom->id[1] = 0; | ||
253 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 278 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); |
254 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | 279 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
280 | } else if (wacom->id[1]) { | ||
281 | if (penData) { | ||
282 | wacom_input_sync(wcombo); /* sync last event */ | ||
283 | if (!wacom->id[0]) | ||
284 | penData = 0; | ||
285 | } | ||
286 | wacom->id[1] = 0; | ||
287 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); | ||
288 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); | ||
289 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x10)); | ||
290 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); | ||
291 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); | ||
292 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); | ||
293 | wacom_report_abs(wcombo, ABS_MISC, 0); | ||
294 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | ||
255 | } | 295 | } |
256 | retval = 1; | ||
257 | break; | 296 | break; |
258 | } | 297 | } |
259 | exit: | 298 | return 1; |
260 | return retval; | ||
261 | } | 299 | } |
262 | 300 | ||
263 | static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) | 301 | static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) |
@@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo) | |||
598 | static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) | 636 | static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) |
599 | { | 637 | { |
600 | wacom_report_abs(wcombo, ABS_X, | 638 | wacom_report_abs(wcombo, ABS_X, |
601 | data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8)); | 639 | (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8)); |
602 | wacom_report_abs(wcombo, ABS_Y, | 640 | wacom_report_abs(wcombo, ABS_Y, |
603 | data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8)); | 641 | (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8)); |
604 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); | 642 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); |
605 | wacom_report_key(wcombo, wacom->tool[idx], 1); | 643 | wacom_report_key(wcombo, wacom->tool[idx], 1); |
606 | if (idx) | 644 | if (idx) |
@@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo) | |||
744 | 782 | ||
745 | touchInProx = 0; | 783 | touchInProx = 0; |
746 | 784 | ||
747 | if (!wacom->id[0]) { /* first in prox */ | 785 | if (prox) { /* in prox */ |
748 | /* Going into proximity select tool */ | 786 | if (!wacom->id[0]) { |
749 | wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; | 787 | /* Going into proximity select tool */ |
750 | if (wacom->tool[0] == BTN_TOOL_PEN) | 788 | wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; |
751 | wacom->id[0] = STYLUS_DEVICE_ID; | 789 | if (wacom->tool[0] == BTN_TOOL_PEN) |
752 | else | 790 | wacom->id[0] = STYLUS_DEVICE_ID; |
753 | wacom->id[0] = ERASER_DEVICE_ID; | 791 | else |
754 | } | 792 | wacom->id[0] = ERASER_DEVICE_ID; |
755 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); | 793 | } |
756 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); | 794 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); |
757 | wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); | 795 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); |
758 | wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); | 796 | wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); |
759 | pressure = ((data[7] & 0x01) << 8) | data[6]; | 797 | wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); |
760 | if (pressure < 0) | 798 | pressure = ((data[7] & 0x01) << 8) | data[6]; |
761 | pressure = features->pressure_max + pressure + 1; | 799 | if (pressure < 0) |
762 | wacom_report_abs(wcombo, ABS_PRESSURE, pressure); | 800 | pressure = features->pressure_max + pressure + 1; |
763 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); | 801 | wacom_report_abs(wcombo, ABS_PRESSURE, pressure); |
764 | if (!prox) { /* out-prox */ | 802 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); |
803 | } else { | ||
804 | wacom_report_abs(wcombo, ABS_X, 0); | ||
805 | wacom_report_abs(wcombo, ABS_Y, 0); | ||
806 | wacom_report_abs(wcombo, ABS_PRESSURE, 0); | ||
807 | wacom_report_key(wcombo, BTN_STYLUS, 0); | ||
808 | wacom_report_key(wcombo, BTN_STYLUS2, 0); | ||
809 | wacom_report_key(wcombo, BTN_TOUCH, 0); | ||
765 | wacom->id[0] = 0; | 810 | wacom->id[0] = 0; |
766 | /* pen is out so touch can be enabled now */ | 811 | /* pen is out so touch can be enabled now */ |
767 | touchInProx = 1; | 812 | touchInProx = 1; |
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 0be15c70c16d..47a5ffec55a3 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c | |||
@@ -14,11 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | |||
18 | #include <linux/errno.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/timer.h> | ||
22 | #include <linux/usb.h> | 17 | #include <linux/usb.h> |
23 | #include <linux/module.h> | 18 | #include <linux/module.h> |
24 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index eb7e27105a82..964a55fb1486 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c | |||
@@ -12,8 +12,6 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include "gigaset.h" | 14 | #include "gigaset.h" |
15 | #include <linux/slab.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
19 | #include <linux/isdn/capilli.h> | 17 | #include <linux/isdn/capilli.h> |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index 0b39b387c125..f6f45f221920 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
@@ -14,10 +14,8 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | #include <linux/ctype.h> | ||
18 | #include <linux/module.h> | 17 | #include <linux/module.h> |
19 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
20 | #include <linux/slab.h> | ||
21 | 19 | ||
22 | /* Version Information */ | 20 | /* Version Information */ |
23 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" | 21 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" |
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 9ef5b0463fd5..05947f9c1849 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h | |||
@@ -20,11 +20,12 @@ | |||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
21 | 21 | ||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/sched.h> | ||
23 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
24 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/ctype.h> | ||
25 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
27 | #include <linux/usb.h> | ||
28 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
29 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
30 | #include <linux/ppp_defs.h> | 31 | #include <linux/ppp_defs.h> |
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index c99fb9790a13..c22e5ace8276 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | #include <linux/isdnif.h> | 17 | #include <linux/isdnif.h> |
18 | #include <linux/slab.h> | ||
19 | 18 | ||
20 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ | 19 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ |
21 | 20 | ||
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index f0dc6c9cc283..c9f28dd40d5c 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include "gigaset.h" | 14 | #include "gigaset.h" |
15 | #include <linux/gigaset_dev.h> | 15 | #include <linux/gigaset_dev.h> |
16 | #include <linux/tty.h> | ||
17 | #include <linux/tty_flip.h> | 16 | #include <linux/tty_flip.h> |
18 | 17 | ||
19 | /*** our ioctls ***/ | 18 | /*** our ioctls ***/ |
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c index b69f73a0668f..b943efbff44d 100644 --- a/drivers/isdn/gigaset/proc.c +++ b/drivers/isdn/gigaset/proc.c | |||
@@ -14,7 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | #include <linux/ctype.h> | ||
18 | 17 | ||
19 | static ssize_t show_cidmode(struct device *dev, | 18 | static ssize_t show_cidmode(struct device *dev, |
20 | struct device_attribute *attr, char *buf) | 19 | struct device_attribute *attr, char *buf) |
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8b0afd203a07..e96c0586886c 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c | |||
@@ -11,13 +11,10 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include "gigaset.h" | 13 | #include "gigaset.h" |
14 | |||
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
17 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
18 | #include <linux/tty.h> | ||
19 | #include <linux/completion.h> | 17 | #include <linux/completion.h> |
20 | #include <linux/slab.h> | ||
21 | 18 | ||
22 | /* Version Information */ | 19 | /* Version Information */ |
23 | #define DRIVER_AUTHOR "Tilman Schmidt" | 20 | #define DRIVER_AUTHOR "Tilman Schmidt" |
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 9430a2bbb523..76dbb20f3065 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
@@ -16,10 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "gigaset.h" | 18 | #include "gigaset.h" |
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/usb.h> | 19 | #include <linux/usb.h> |
24 | #include <linux/module.h> | 20 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 07090f379c63..69c84a1d88ea 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status) | |||
178 | 178 | ||
179 | /* We set the status. */ | 179 | /* We set the status. */ |
180 | to_lgdev(vdev)->desc->status = status; | 180 | to_lgdev(vdev)->desc->status = status; |
181 | kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset); | 181 | hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void lg_set_status(struct virtio_device *vdev, u8 status) | 184 | static void lg_set_status(struct virtio_device *vdev, u8 status) |
@@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq) | |||
229 | */ | 229 | */ |
230 | struct lguest_vq_info *lvq = vq->priv; | 230 | struct lguest_vq_info *lvq = vq->priv; |
231 | 231 | ||
232 | kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); | 232 | hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0); |
233 | } | 233 | } |
234 | 234 | ||
235 | /* An extern declaration inside a C file is bad form. Don't do it. */ | 235 | /* An extern declaration inside a C file is bad form. Don't do it. */ |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index fb2b7ef7868e..b4eb675a807e 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -288,6 +288,18 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
288 | insn = lgread(cpu, physaddr, u8); | 288 | insn = lgread(cpu, physaddr, u8); |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Around 2.6.33, the kernel started using an emulation for the | ||
292 | * cmpxchg8b instruction in early boot on many configurations. This | ||
293 | * code isn't paravirtualized, and it tries to disable interrupts. | ||
294 | * Ignore it, which will Mostly Work. | ||
295 | */ | ||
296 | if (insn == 0xfa) { | ||
297 | /* "cli", or Clear Interrupt Enable instruction. Skip it. */ | ||
298 | cpu->regs->eip++; | ||
299 | return 1; | ||
300 | } | ||
301 | |||
302 | /* | ||
291 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits | 303 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits |
292 | * of the eax register. | 304 | * of the eax register. |
293 | */ | 305 | */ |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e3e9a36ea3b7..58ea0ecae7c3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1650 | int previous, int *dd_idx, | 1650 | int previous, int *dd_idx, |
1651 | struct stripe_head *sh) | 1651 | struct stripe_head *sh) |
1652 | { | 1652 | { |
1653 | long stripe; | 1653 | sector_t stripe, stripe2; |
1654 | unsigned long chunk_number; | 1654 | sector_t chunk_number; |
1655 | unsigned int chunk_offset; | 1655 | unsigned int chunk_offset; |
1656 | int pd_idx, qd_idx; | 1656 | int pd_idx, qd_idx; |
1657 | int ddf_layout = 0; | 1657 | int ddf_layout = 0; |
@@ -1671,18 +1671,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1671 | */ | 1671 | */ |
1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); | 1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); |
1673 | chunk_number = r_sector; | 1673 | chunk_number = r_sector; |
1674 | BUG_ON(r_sector != chunk_number); | ||
1675 | 1674 | ||
1676 | /* | 1675 | /* |
1677 | * Compute the stripe number | 1676 | * Compute the stripe number |
1678 | */ | 1677 | */ |
1679 | stripe = chunk_number / data_disks; | 1678 | stripe = chunk_number; |
1680 | 1679 | *dd_idx = sector_div(stripe, data_disks); | |
1681 | /* | 1680 | stripe2 = stripe; |
1682 | * Compute the data disk and parity disk indexes inside the stripe | ||
1683 | */ | ||
1684 | *dd_idx = chunk_number % data_disks; | ||
1685 | |||
1686 | /* | 1681 | /* |
1687 | * Select the parity disk based on the user selected algorithm. | 1682 | * Select the parity disk based on the user selected algorithm. |
1688 | */ | 1683 | */ |
@@ -1694,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1694 | case 5: | 1689 | case 5: |
1695 | switch (algorithm) { | 1690 | switch (algorithm) { |
1696 | case ALGORITHM_LEFT_ASYMMETRIC: | 1691 | case ALGORITHM_LEFT_ASYMMETRIC: |
1697 | pd_idx = data_disks - stripe % raid_disks; | 1692 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
1698 | if (*dd_idx >= pd_idx) | 1693 | if (*dd_idx >= pd_idx) |
1699 | (*dd_idx)++; | 1694 | (*dd_idx)++; |
1700 | break; | 1695 | break; |
1701 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1696 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1702 | pd_idx = stripe % raid_disks; | 1697 | pd_idx = sector_div(stripe2, raid_disks); |
1703 | if (*dd_idx >= pd_idx) | 1698 | if (*dd_idx >= pd_idx) |
1704 | (*dd_idx)++; | 1699 | (*dd_idx)++; |
1705 | break; | 1700 | break; |
1706 | case ALGORITHM_LEFT_SYMMETRIC: | 1701 | case ALGORITHM_LEFT_SYMMETRIC: |
1707 | pd_idx = data_disks - stripe % raid_disks; | 1702 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
1708 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1703 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1709 | break; | 1704 | break; |
1710 | case ALGORITHM_RIGHT_SYMMETRIC: | 1705 | case ALGORITHM_RIGHT_SYMMETRIC: |
1711 | pd_idx = stripe % raid_disks; | 1706 | pd_idx = sector_div(stripe2, raid_disks); |
1712 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1707 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1713 | break; | 1708 | break; |
1714 | case ALGORITHM_PARITY_0: | 1709 | case ALGORITHM_PARITY_0: |
@@ -1728,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1728 | 1723 | ||
1729 | switch (algorithm) { | 1724 | switch (algorithm) { |
1730 | case ALGORITHM_LEFT_ASYMMETRIC: | 1725 | case ALGORITHM_LEFT_ASYMMETRIC: |
1731 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1726 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
1732 | qd_idx = pd_idx + 1; | 1727 | qd_idx = pd_idx + 1; |
1733 | if (pd_idx == raid_disks-1) { | 1728 | if (pd_idx == raid_disks-1) { |
1734 | (*dd_idx)++; /* Q D D D P */ | 1729 | (*dd_idx)++; /* Q D D D P */ |
@@ -1737,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1737 | (*dd_idx) += 2; /* D D P Q D */ | 1732 | (*dd_idx) += 2; /* D D P Q D */ |
1738 | break; | 1733 | break; |
1739 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1734 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1740 | pd_idx = stripe % raid_disks; | 1735 | pd_idx = sector_div(stripe2, raid_disks); |
1741 | qd_idx = pd_idx + 1; | 1736 | qd_idx = pd_idx + 1; |
1742 | if (pd_idx == raid_disks-1) { | 1737 | if (pd_idx == raid_disks-1) { |
1743 | (*dd_idx)++; /* Q D D D P */ | 1738 | (*dd_idx)++; /* Q D D D P */ |
@@ -1746,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1746 | (*dd_idx) += 2; /* D D P Q D */ | 1741 | (*dd_idx) += 2; /* D D P Q D */ |
1747 | break; | 1742 | break; |
1748 | case ALGORITHM_LEFT_SYMMETRIC: | 1743 | case ALGORITHM_LEFT_SYMMETRIC: |
1749 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1744 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
1750 | qd_idx = (pd_idx + 1) % raid_disks; | 1745 | qd_idx = (pd_idx + 1) % raid_disks; |
1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1746 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
1752 | break; | 1747 | break; |
1753 | case ALGORITHM_RIGHT_SYMMETRIC: | 1748 | case ALGORITHM_RIGHT_SYMMETRIC: |
1754 | pd_idx = stripe % raid_disks; | 1749 | pd_idx = sector_div(stripe2, raid_disks); |
1755 | qd_idx = (pd_idx + 1) % raid_disks; | 1750 | qd_idx = (pd_idx + 1) % raid_disks; |
1756 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
1757 | break; | 1752 | break; |
@@ -1770,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1770 | /* Exactly the same as RIGHT_ASYMMETRIC, but or | 1765 | /* Exactly the same as RIGHT_ASYMMETRIC, but or |
1771 | * of blocks for computing Q is different. | 1766 | * of blocks for computing Q is different. |
1772 | */ | 1767 | */ |
1773 | pd_idx = stripe % raid_disks; | 1768 | pd_idx = sector_div(stripe2, raid_disks); |
1774 | qd_idx = pd_idx + 1; | 1769 | qd_idx = pd_idx + 1; |
1775 | if (pd_idx == raid_disks-1) { | 1770 | if (pd_idx == raid_disks-1) { |
1776 | (*dd_idx)++; /* Q D D D P */ | 1771 | (*dd_idx)++; /* Q D D D P */ |
@@ -1785,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1785 | * D D D P Q rather than | 1780 | * D D D P Q rather than |
1786 | * Q D D D P | 1781 | * Q D D D P |
1787 | */ | 1782 | */ |
1788 | pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); | 1783 | stripe2 += 1; |
1784 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); | ||
1789 | qd_idx = pd_idx + 1; | 1785 | qd_idx = pd_idx + 1; |
1790 | if (pd_idx == raid_disks-1) { | 1786 | if (pd_idx == raid_disks-1) { |
1791 | (*dd_idx)++; /* Q D D D P */ | 1787 | (*dd_idx)++; /* Q D D D P */ |
@@ -1797,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1797 | 1793 | ||
1798 | case ALGORITHM_ROTATING_N_CONTINUE: | 1794 | case ALGORITHM_ROTATING_N_CONTINUE: |
1799 | /* Same as left_symmetric but Q is before P */ | 1795 | /* Same as left_symmetric but Q is before P */ |
1800 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1796 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
1801 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; | 1797 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; |
1802 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1798 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1803 | ddf_layout = 1; | 1799 | ddf_layout = 1; |
@@ -1805,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1805 | 1801 | ||
1806 | case ALGORITHM_LEFT_ASYMMETRIC_6: | 1802 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
1807 | /* RAID5 left_asymmetric, with Q on last device */ | 1803 | /* RAID5 left_asymmetric, with Q on last device */ |
1808 | pd_idx = data_disks - stripe % (raid_disks-1); | 1804 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
1809 | if (*dd_idx >= pd_idx) | 1805 | if (*dd_idx >= pd_idx) |
1810 | (*dd_idx)++; | 1806 | (*dd_idx)++; |
1811 | qd_idx = raid_disks - 1; | 1807 | qd_idx = raid_disks - 1; |
1812 | break; | 1808 | break; |
1813 | 1809 | ||
1814 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | 1810 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
1815 | pd_idx = stripe % (raid_disks-1); | 1811 | pd_idx = sector_div(stripe2, raid_disks-1); |
1816 | if (*dd_idx >= pd_idx) | 1812 | if (*dd_idx >= pd_idx) |
1817 | (*dd_idx)++; | 1813 | (*dd_idx)++; |
1818 | qd_idx = raid_disks - 1; | 1814 | qd_idx = raid_disks - 1; |
1819 | break; | 1815 | break; |
1820 | 1816 | ||
1821 | case ALGORITHM_LEFT_SYMMETRIC_6: | 1817 | case ALGORITHM_LEFT_SYMMETRIC_6: |
1822 | pd_idx = data_disks - stripe % (raid_disks-1); | 1818 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
1823 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1819 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
1824 | qd_idx = raid_disks - 1; | 1820 | qd_idx = raid_disks - 1; |
1825 | break; | 1821 | break; |
1826 | 1822 | ||
1827 | case ALGORITHM_RIGHT_SYMMETRIC_6: | 1823 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
1828 | pd_idx = stripe % (raid_disks-1); | 1824 | pd_idx = sector_div(stripe2, raid_disks-1); |
1829 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1825 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
1830 | qd_idx = raid_disks - 1; | 1826 | qd_idx = raid_disks - 1; |
1831 | break; | 1827 | break; |
@@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
1870 | : conf->algorithm; | 1866 | : conf->algorithm; |
1871 | sector_t stripe; | 1867 | sector_t stripe; |
1872 | int chunk_offset; | 1868 | int chunk_offset; |
1873 | int chunk_number, dummy1, dd_idx = i; | 1869 | sector_t chunk_number; |
1870 | int dummy1, dd_idx = i; | ||
1874 | sector_t r_sector; | 1871 | sector_t r_sector; |
1875 | struct stripe_head sh2; | 1872 | struct stripe_head sh2; |
1876 | 1873 | ||
1877 | 1874 | ||
1878 | chunk_offset = sector_div(new_sector, sectors_per_chunk); | 1875 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
1879 | stripe = new_sector; | 1876 | stripe = new_sector; |
1880 | BUG_ON(new_sector != stripe); | ||
1881 | 1877 | ||
1882 | if (i == sh->pd_idx) | 1878 | if (i == sh->pd_idx) |
1883 | return 0; | 1879 | return 0; |
@@ -1970,7 +1966,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
1970 | } | 1966 | } |
1971 | 1967 | ||
1972 | chunk_number = stripe * data_disks + i; | 1968 | chunk_number = stripe * data_disks + i; |
1973 | r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; | 1969 | r_sector = chunk_number * sectors_per_chunk + chunk_offset; |
1974 | 1970 | ||
1975 | check = raid5_compute_sector(conf, r_sector, | 1971 | check = raid5_compute_sector(conf, r_sector, |
1976 | previous, &dummy1, &sh2); | 1972 | previous, &dummy1, &sh2); |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 2191c8d896a0..0d0d625fece2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -311,6 +311,22 @@ config TI_DAC7512 | |||
311 | This driver can also be built as a module. If so, the module | 311 | This driver can also be built as a module. If so, the module |
312 | will be calles ti_dac7512. | 312 | will be calles ti_dac7512. |
313 | 313 | ||
314 | config VMWARE_BALLOON | ||
315 | tristate "VMware Balloon Driver" | ||
316 | depends on X86 | ||
317 | help | ||
318 | This is VMware physical memory management driver which acts | ||
319 | like a "balloon" that can be inflated to reclaim physical pages | ||
320 | by reserving them in the guest and invalidating them in the | ||
321 | monitor, freeing up the underlying machine pages so they can | ||
322 | be allocated to other guests. The balloon can also be deflated | ||
323 | to allow the guest to use more physical memory. | ||
324 | |||
325 | If unsure, say N. | ||
326 | |||
327 | To compile this driver as a module, choose M here: the | ||
328 | module will be called vmware_balloon. | ||
329 | |||
314 | source "drivers/misc/c2port/Kconfig" | 330 | source "drivers/misc/c2port/Kconfig" |
315 | source "drivers/misc/eeprom/Kconfig" | 331 | source "drivers/misc/eeprom/Kconfig" |
316 | source "drivers/misc/cb710/Kconfig" | 332 | source "drivers/misc/cb710/Kconfig" |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 27c484355414..7b6f7eefdf8d 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -29,3 +29,4 @@ obj-$(CONFIG_C2PORT) += c2port/ | |||
29 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ | 29 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ |
30 | obj-y += eeprom/ | 30 | obj-y += eeprom/ |
31 | obj-y += cb710/ | 31 | obj-y += cb710/ |
32 | obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o | ||
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c new file mode 100644 index 000000000000..e7161c4e3798 --- /dev/null +++ b/drivers/misc/vmware_balloon.c | |||
@@ -0,0 +1,832 @@ | |||
1 | /* | ||
2 | * VMware Balloon driver. | ||
3 | * | ||
4 | * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * Maintained by: Dmitry Torokhov <dtor@vmware.com> | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This is VMware physical memory management driver for Linux. The driver | ||
25 | * acts like a "balloon" that can be inflated to reclaim physical pages by | ||
26 | * reserving them in the guest and invalidating them in the monitor, | ||
27 | * freeing up the underlying machine pages so they can be allocated to | ||
28 | * other guests. The balloon can also be deflated to allow the guest to | ||
29 | * use more physical memory. Higher level policies can control the sizes | ||
30 | * of balloons in VMs in order to manage physical memory resources. | ||
31 | */ | ||
32 | |||
33 | //#define DEBUG | ||
34 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/mm.h> | ||
39 | #include <linux/sched.h> | ||
40 | #include <linux/module.h> | ||
41 | #include <linux/workqueue.h> | ||
42 | #include <linux/debugfs.h> | ||
43 | #include <linux/seq_file.h> | ||
44 | #include <asm/vmware.h> | ||
45 | |||
46 | MODULE_AUTHOR("VMware, Inc."); | ||
47 | MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); | ||
48 | MODULE_VERSION("1.2.1.0-K"); | ||
49 | MODULE_ALIAS("dmi:*:svnVMware*:*"); | ||
50 | MODULE_ALIAS("vmware_vmmemctl"); | ||
51 | MODULE_LICENSE("GPL"); | ||
52 | |||
53 | /* | ||
54 | * Various constants controlling rate of inflaint/deflating balloon, | ||
55 | * measured in pages. | ||
56 | */ | ||
57 | |||
58 | /* | ||
59 | * Rate of allocating memory when there is no memory pressure | ||
60 | * (driver performs non-sleeping allocations). | ||
61 | */ | ||
62 | #define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U | ||
63 | |||
64 | /* | ||
65 | * Rates of memory allocaton when guest experiences memory pressure | ||
66 | * (driver performs sleeping allocations). | ||
67 | */ | ||
68 | #define VMW_BALLOON_RATE_ALLOC_MIN 512U | ||
69 | #define VMW_BALLOON_RATE_ALLOC_MAX 2048U | ||
70 | #define VMW_BALLOON_RATE_ALLOC_INC 16U | ||
71 | |||
72 | /* | ||
73 | * Rates for releasing pages while deflating balloon. | ||
74 | */ | ||
75 | #define VMW_BALLOON_RATE_FREE_MIN 512U | ||
76 | #define VMW_BALLOON_RATE_FREE_MAX 16384U | ||
77 | #define VMW_BALLOON_RATE_FREE_INC 16U | ||
78 | |||
79 | /* | ||
80 | * When guest is under memory pressure, use a reduced page allocation | ||
81 | * rate for next several cycles. | ||
82 | */ | ||
83 | #define VMW_BALLOON_SLOW_CYCLES 4 | ||
84 | |||
85 | /* | ||
86 | * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't | ||
87 | * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use | ||
88 | * __GFP_NOWARN, to suppress page allocation failure warnings. | ||
89 | */ | ||
90 | #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) | ||
91 | |||
92 | /* | ||
93 | * Use GFP_HIGHUSER when executing in a separate kernel thread | ||
94 | * context and allocation can sleep. This is less stressful to | ||
95 | * the guest memory system, since it allows the thread to block | ||
96 | * while memory is reclaimed, and won't take pages from emergency | ||
97 | * low-memory pools. | ||
98 | */ | ||
99 | #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER) | ||
100 | |||
101 | /* Maximum number of page allocations without yielding processor */ | ||
102 | #define VMW_BALLOON_YIELD_THRESHOLD 1024 | ||
103 | |||
104 | |||
105 | /* | ||
106 | * Hypervisor communication port definitions. | ||
107 | */ | ||
108 | #define VMW_BALLOON_HV_PORT 0x5670 | ||
109 | #define VMW_BALLOON_HV_MAGIC 0x456c6d6f | ||
110 | #define VMW_BALLOON_PROTOCOL_VERSION 2 | ||
111 | #define VMW_BALLOON_GUEST_ID 1 /* Linux */ | ||
112 | |||
113 | #define VMW_BALLOON_CMD_START 0 | ||
114 | #define VMW_BALLOON_CMD_GET_TARGET 1 | ||
115 | #define VMW_BALLOON_CMD_LOCK 2 | ||
116 | #define VMW_BALLOON_CMD_UNLOCK 3 | ||
117 | #define VMW_BALLOON_CMD_GUEST_ID 4 | ||
118 | |||
119 | /* error codes */ | ||
120 | #define VMW_BALLOON_SUCCESS 0 | ||
121 | #define VMW_BALLOON_FAILURE -1 | ||
122 | #define VMW_BALLOON_ERROR_CMD_INVALID 1 | ||
123 | #define VMW_BALLOON_ERROR_PPN_INVALID 2 | ||
124 | #define VMW_BALLOON_ERROR_PPN_LOCKED 3 | ||
125 | #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4 | ||
126 | #define VMW_BALLOON_ERROR_PPN_PINNED 5 | ||
127 | #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6 | ||
128 | #define VMW_BALLOON_ERROR_RESET 7 | ||
129 | #define VMW_BALLOON_ERROR_BUSY 8 | ||
130 | |||
131 | #define VMWARE_BALLOON_CMD(cmd, data, result) \ | ||
132 | ({ \ | ||
133 | unsigned long __stat, __dummy1, __dummy2; \ | ||
134 | __asm__ __volatile__ ("inl (%%dx)" : \ | ||
135 | "=a"(__stat), \ | ||
136 | "=c"(__dummy1), \ | ||
137 | "=d"(__dummy2), \ | ||
138 | "=b"(result) : \ | ||
139 | "0"(VMW_BALLOON_HV_MAGIC), \ | ||
140 | "1"(VMW_BALLOON_CMD_##cmd), \ | ||
141 | "2"(VMW_BALLOON_HV_PORT), \ | ||
142 | "3"(data) : \ | ||
143 | "memory"); \ | ||
144 | result &= -1UL; \ | ||
145 | __stat & -1UL; \ | ||
146 | }) | ||
147 | |||
148 | #ifdef CONFIG_DEBUG_FS | ||
149 | struct vmballoon_stats { | ||
150 | unsigned int timer; | ||
151 | |||
152 | /* allocation statustics */ | ||
153 | unsigned int alloc; | ||
154 | unsigned int alloc_fail; | ||
155 | unsigned int sleep_alloc; | ||
156 | unsigned int sleep_alloc_fail; | ||
157 | unsigned int refused_alloc; | ||
158 | unsigned int refused_free; | ||
159 | unsigned int free; | ||
160 | |||
161 | /* monitor operations */ | ||
162 | unsigned int lock; | ||
163 | unsigned int lock_fail; | ||
164 | unsigned int unlock; | ||
165 | unsigned int unlock_fail; | ||
166 | unsigned int target; | ||
167 | unsigned int target_fail; | ||
168 | unsigned int start; | ||
169 | unsigned int start_fail; | ||
170 | unsigned int guest_type; | ||
171 | unsigned int guest_type_fail; | ||
172 | }; | ||
173 | |||
174 | #define STATS_INC(stat) (stat)++ | ||
175 | #else | ||
176 | #define STATS_INC(stat) | ||
177 | #endif | ||
178 | |||
179 | struct vmballoon { | ||
180 | |||
181 | /* list of reserved physical pages */ | ||
182 | struct list_head pages; | ||
183 | |||
184 | /* transient list of non-balloonable pages */ | ||
185 | struct list_head refused_pages; | ||
186 | |||
187 | /* balloon size in pages */ | ||
188 | unsigned int size; | ||
189 | unsigned int target; | ||
190 | |||
191 | /* reset flag */ | ||
192 | bool reset_required; | ||
193 | |||
194 | /* adjustment rates (pages per second) */ | ||
195 | unsigned int rate_alloc; | ||
196 | unsigned int rate_free; | ||
197 | |||
198 | /* slowdown page allocations for next few cycles */ | ||
199 | unsigned int slow_allocation_cycles; | ||
200 | |||
201 | #ifdef CONFIG_DEBUG_FS | ||
202 | /* statistics */ | ||
203 | struct vmballoon_stats stats; | ||
204 | |||
205 | /* debugfs file exporting statistics */ | ||
206 | struct dentry *dbg_entry; | ||
207 | #endif | ||
208 | |||
209 | struct sysinfo sysinfo; | ||
210 | |||
211 | struct delayed_work dwork; | ||
212 | }; | ||
213 | |||
214 | static struct vmballoon balloon; | ||
215 | static struct workqueue_struct *vmballoon_wq; | ||
216 | |||
217 | /* | ||
218 | * Send "start" command to the host, communicating supported version | ||
219 | * of the protocol. | ||
220 | */ | ||
221 | static bool vmballoon_send_start(struct vmballoon *b) | ||
222 | { | ||
223 | unsigned long status, dummy; | ||
224 | |||
225 | STATS_INC(b->stats.start); | ||
226 | |||
227 | status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy); | ||
228 | if (status == VMW_BALLOON_SUCCESS) | ||
229 | return true; | ||
230 | |||
231 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
232 | STATS_INC(b->stats.start_fail); | ||
233 | return false; | ||
234 | } | ||
235 | |||
236 | static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) | ||
237 | { | ||
238 | switch (status) { | ||
239 | case VMW_BALLOON_SUCCESS: | ||
240 | return true; | ||
241 | |||
242 | case VMW_BALLOON_ERROR_RESET: | ||
243 | b->reset_required = true; | ||
244 | /* fall through */ | ||
245 | |||
246 | default: | ||
247 | return false; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Communicate guest type to the host so that it can adjust ballooning | ||
253 | * algorithm to the one most appropriate for the guest. This command | ||
254 | * is normally issued after sending "start" command and is part of | ||
255 | * standard reset sequence. | ||
256 | */ | ||
257 | static bool vmballoon_send_guest_id(struct vmballoon *b) | ||
258 | { | ||
259 | unsigned long status, dummy; | ||
260 | |||
261 | status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy); | ||
262 | |||
263 | STATS_INC(b->stats.guest_type); | ||
264 | |||
265 | if (vmballoon_check_status(b, status)) | ||
266 | return true; | ||
267 | |||
268 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
269 | STATS_INC(b->stats.guest_type_fail); | ||
270 | return false; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Retrieve desired balloon size from the host. | ||
275 | */ | ||
276 | static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) | ||
277 | { | ||
278 | unsigned long status; | ||
279 | unsigned long target; | ||
280 | unsigned long limit; | ||
281 | u32 limit32; | ||
282 | |||
283 | /* | ||
284 | * si_meminfo() is cheap. Moreover, we want to provide dynamic | ||
285 | * max balloon size later. So let us call si_meminfo() every | ||
286 | * iteration. | ||
287 | */ | ||
288 | si_meminfo(&b->sysinfo); | ||
289 | limit = b->sysinfo.totalram; | ||
290 | |||
291 | /* Ensure limit fits in 32-bits */ | ||
292 | limit32 = (u32)limit; | ||
293 | if (limit != limit32) | ||
294 | return false; | ||
295 | |||
296 | /* update stats */ | ||
297 | STATS_INC(b->stats.target); | ||
298 | |||
299 | status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target); | ||
300 | if (vmballoon_check_status(b, status)) { | ||
301 | *new_target = target; | ||
302 | return true; | ||
303 | } | ||
304 | |||
305 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
306 | STATS_INC(b->stats.target_fail); | ||
307 | return false; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Notify the host about allocated page so that host can use it without | ||
312 | * fear that guest will need it. Host may reject some pages, we need to | ||
313 | * check the return value and maybe submit a different page. | ||
314 | */ | ||
315 | static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn) | ||
316 | { | ||
317 | unsigned long status, dummy; | ||
318 | u32 pfn32; | ||
319 | |||
320 | pfn32 = (u32)pfn; | ||
321 | if (pfn32 != pfn) | ||
322 | return false; | ||
323 | |||
324 | STATS_INC(b->stats.lock); | ||
325 | |||
326 | status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); | ||
327 | if (vmballoon_check_status(b, status)) | ||
328 | return true; | ||
329 | |||
330 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
331 | STATS_INC(b->stats.lock_fail); | ||
332 | return false; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Notify the host that guest intends to release given page back into | ||
337 | * the pool of available (to the guest) pages. | ||
338 | */ | ||
339 | static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) | ||
340 | { | ||
341 | unsigned long status, dummy; | ||
342 | u32 pfn32; | ||
343 | |||
344 | pfn32 = (u32)pfn; | ||
345 | if (pfn32 != pfn) | ||
346 | return false; | ||
347 | |||
348 | STATS_INC(b->stats.unlock); | ||
349 | |||
350 | status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy); | ||
351 | if (vmballoon_check_status(b, status)) | ||
352 | return true; | ||
353 | |||
354 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
355 | STATS_INC(b->stats.unlock_fail); | ||
356 | return false; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Quickly release all pages allocated for the balloon. This function is | ||
361 | * called when host decides to "reset" balloon for one reason or another. | ||
362 | * Unlike normal "deflate" we do not (shall not) notify host of the pages | ||
363 | * being released. | ||
364 | */ | ||
365 | static void vmballoon_pop(struct vmballoon *b) | ||
366 | { | ||
367 | struct page *page, *next; | ||
368 | unsigned int count = 0; | ||
369 | |||
370 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
371 | list_del(&page->lru); | ||
372 | __free_page(page); | ||
373 | STATS_INC(b->stats.free); | ||
374 | b->size--; | ||
375 | |||
376 | if (++count >= b->rate_free) { | ||
377 | count = 0; | ||
378 | cond_resched(); | ||
379 | } | ||
380 | } | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Perform standard reset sequence by popping the balloon (in case it | ||
385 | * is not empty) and then restarting protocol. This operation normally | ||
386 | * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. | ||
387 | */ | ||
388 | static void vmballoon_reset(struct vmballoon *b) | ||
389 | { | ||
390 | /* free all pages, skipping monitor unlock */ | ||
391 | vmballoon_pop(b); | ||
392 | |||
393 | if (vmballoon_send_start(b)) { | ||
394 | b->reset_required = false; | ||
395 | if (!vmballoon_send_guest_id(b)) | ||
396 | pr_err("failed to send guest ID to the host\n"); | ||
397 | } | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Allocate (or reserve) a page for the balloon and notify the host. If host | ||
402 | * refuses the page put it on "refuse" list and allocate another one until host | ||
403 | * is satisfied. "Refused" pages are released at the end of inflation cycle | ||
404 | * (when we allocate b->rate_alloc pages). | ||
405 | */ | ||
406 | static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) | ||
407 | { | ||
408 | struct page *page; | ||
409 | gfp_t flags; | ||
410 | bool locked = false; | ||
411 | |||
412 | do { | ||
413 | if (!can_sleep) | ||
414 | STATS_INC(b->stats.alloc); | ||
415 | else | ||
416 | STATS_INC(b->stats.sleep_alloc); | ||
417 | |||
418 | flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP; | ||
419 | page = alloc_page(flags); | ||
420 | if (!page) { | ||
421 | if (!can_sleep) | ||
422 | STATS_INC(b->stats.alloc_fail); | ||
423 | else | ||
424 | STATS_INC(b->stats.sleep_alloc_fail); | ||
425 | return -ENOMEM; | ||
426 | } | ||
427 | |||
428 | /* inform monitor */ | ||
429 | locked = vmballoon_send_lock_page(b, page_to_pfn(page)); | ||
430 | if (!locked) { | ||
431 | if (b->reset_required) { | ||
432 | __free_page(page); | ||
433 | return -EIO; | ||
434 | } | ||
435 | |||
436 | /* place on list of non-balloonable pages, retry allocation */ | ||
437 | list_add(&page->lru, &b->refused_pages); | ||
438 | STATS_INC(b->stats.refused_alloc); | ||
439 | } | ||
440 | } while (!locked); | ||
441 | |||
442 | /* track allocated page */ | ||
443 | list_add(&page->lru, &b->pages); | ||
444 | |||
445 | /* update balloon size */ | ||
446 | b->size++; | ||
447 | |||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Release the page allocated for the balloon. Note that we first notify | ||
453 | * the host so it can make sure the page will be available for the guest | ||
454 | * to use, if needed. | ||
455 | */ | ||
456 | static int vmballoon_release_page(struct vmballoon *b, struct page *page) | ||
457 | { | ||
458 | if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) | ||
459 | return -EIO; | ||
460 | |||
461 | list_del(&page->lru); | ||
462 | |||
463 | /* deallocate page */ | ||
464 | __free_page(page); | ||
465 | STATS_INC(b->stats.free); | ||
466 | |||
467 | /* update balloon size */ | ||
468 | b->size--; | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * Release pages that were allocated while attempting to inflate the | ||
475 | * balloon but were refused by the host for one reason or another. | ||
476 | */ | ||
477 | static void vmballoon_release_refused_pages(struct vmballoon *b) | ||
478 | { | ||
479 | struct page *page, *next; | ||
480 | |||
481 | list_for_each_entry_safe(page, next, &b->refused_pages, lru) { | ||
482 | list_del(&page->lru); | ||
483 | __free_page(page); | ||
484 | STATS_INC(b->stats.refused_free); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Inflate the balloon towards its target size. Note that we try to limit | ||
490 | * the rate of allocation to make sure we are not choking the rest of the | ||
491 | * system. | ||
492 | */ | ||
493 | static void vmballoon_inflate(struct vmballoon *b) | ||
494 | { | ||
495 | unsigned int goal; | ||
496 | unsigned int rate; | ||
497 | unsigned int i; | ||
498 | unsigned int allocations = 0; | ||
499 | int error = 0; | ||
500 | bool alloc_can_sleep = false; | ||
501 | |||
502 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
503 | |||
504 | /* | ||
505 | * First try NOSLEEP page allocations to inflate balloon. | ||
506 | * | ||
507 | * If we do not throttle nosleep allocations, we can drain all | ||
508 | * free pages in the guest quickly (if the balloon target is high). | ||
509 | * As a side-effect, draining free pages helps to inform (force) | ||
510 | * the guest to start swapping if balloon target is not met yet, | ||
511 | * which is a desired behavior. However, balloon driver can consume | ||
512 | * all available CPU cycles if too many pages are allocated in a | ||
513 | * second. Therefore, we throttle nosleep allocations even when | ||
514 | * the guest is not under memory pressure. OTOH, if we have already | ||
515 | * predicted that the guest is under memory pressure, then we | ||
516 | * slowdown page allocations considerably. | ||
517 | */ | ||
518 | |||
519 | goal = b->target - b->size; | ||
520 | /* | ||
521 | * Start with no sleep allocation rate which may be higher | ||
522 | * than sleeping allocation rate. | ||
523 | */ | ||
524 | rate = b->slow_allocation_cycles ? | ||
525 | b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX; | ||
526 | |||
527 | pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n", | ||
528 | __func__, goal, rate, b->rate_alloc); | ||
529 | |||
530 | for (i = 0; i < goal; i++) { | ||
531 | |||
532 | error = vmballoon_reserve_page(b, alloc_can_sleep); | ||
533 | if (error) { | ||
534 | if (error != -ENOMEM) { | ||
535 | /* | ||
536 | * Not a page allocation failure, stop this | ||
537 | * cycle. Maybe we'll get new target from | ||
538 | * the host soon. | ||
539 | */ | ||
540 | break; | ||
541 | } | ||
542 | |||
543 | if (alloc_can_sleep) { | ||
544 | /* | ||
545 | * CANSLEEP page allocation failed, so guest | ||
546 | * is under severe memory pressure. Quickly | ||
547 | * decrease allocation rate. | ||
548 | */ | ||
549 | b->rate_alloc = max(b->rate_alloc / 2, | ||
550 | VMW_BALLOON_RATE_ALLOC_MIN); | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * NOSLEEP page allocation failed, so the guest is | ||
556 | * under memory pressure. Let us slow down page | ||
557 | * allocations for next few cycles so that the guest | ||
558 | * gets out of memory pressure. Also, if we already | ||
559 | * allocated b->rate_alloc pages, let's pause, | ||
560 | * otherwise switch to sleeping allocations. | ||
561 | */ | ||
562 | b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; | ||
563 | |||
564 | if (i >= b->rate_alloc) | ||
565 | break; | ||
566 | |||
567 | alloc_can_sleep = true; | ||
568 | /* Lower rate for sleeping allocations. */ | ||
569 | rate = b->rate_alloc; | ||
570 | } | ||
571 | |||
572 | if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { | ||
573 | cond_resched(); | ||
574 | allocations = 0; | ||
575 | } | ||
576 | |||
577 | if (i >= rate) { | ||
578 | /* We allocated enough pages, let's take a break. */ | ||
579 | break; | ||
580 | } | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * We reached our goal without failures so try increasing | ||
585 | * allocation rate. | ||
586 | */ | ||
587 | if (error == 0 && i >= b->rate_alloc) { | ||
588 | unsigned int mult = i / b->rate_alloc; | ||
589 | |||
590 | b->rate_alloc = | ||
591 | min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, | ||
592 | VMW_BALLOON_RATE_ALLOC_MAX); | ||
593 | } | ||
594 | |||
595 | vmballoon_release_refused_pages(b); | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Decrease the size of the balloon allowing guest to use more memory. | ||
600 | */ | ||
601 | static void vmballoon_deflate(struct vmballoon *b) | ||
602 | { | ||
603 | struct page *page, *next; | ||
604 | unsigned int i = 0; | ||
605 | unsigned int goal; | ||
606 | int error; | ||
607 | |||
608 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
609 | |||
610 | /* limit deallocation rate */ | ||
611 | goal = min(b->size - b->target, b->rate_free); | ||
612 | |||
613 | pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free); | ||
614 | |||
615 | /* free pages to reach target */ | ||
616 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
617 | error = vmballoon_release_page(b, page); | ||
618 | if (error) { | ||
619 | /* quickly decrease rate in case of error */ | ||
620 | b->rate_free = max(b->rate_free / 2, | ||
621 | VMW_BALLOON_RATE_FREE_MIN); | ||
622 | return; | ||
623 | } | ||
624 | |||
625 | if (++i >= goal) | ||
626 | break; | ||
627 | } | ||
628 | |||
629 | /* slowly increase rate if there were no errors */ | ||
630 | b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC, | ||
631 | VMW_BALLOON_RATE_FREE_MAX); | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Balloon work function: reset protocol, if needed, get the new size and | ||
636 | * adjust balloon as needed. Repeat in 1 sec. | ||
637 | */ | ||
638 | static void vmballoon_work(struct work_struct *work) | ||
639 | { | ||
640 | struct delayed_work *dwork = to_delayed_work(work); | ||
641 | struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); | ||
642 | unsigned int target; | ||
643 | |||
644 | STATS_INC(b->stats.timer); | ||
645 | |||
646 | if (b->reset_required) | ||
647 | vmballoon_reset(b); | ||
648 | |||
649 | if (b->slow_allocation_cycles > 0) | ||
650 | b->slow_allocation_cycles--; | ||
651 | |||
652 | if (vmballoon_send_get_target(b, &target)) { | ||
653 | /* update target, adjust size */ | ||
654 | b->target = target; | ||
655 | |||
656 | if (b->size < target) | ||
657 | vmballoon_inflate(b); | ||
658 | else if (b->size > target) | ||
659 | vmballoon_deflate(b); | ||
660 | } | ||
661 | |||
662 | queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ)); | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * DEBUGFS Interface | ||
667 | */ | ||
668 | #ifdef CONFIG_DEBUG_FS | ||
669 | |||
670 | static int vmballoon_debug_show(struct seq_file *f, void *offset) | ||
671 | { | ||
672 | struct vmballoon *b = f->private; | ||
673 | struct vmballoon_stats *stats = &b->stats; | ||
674 | |||
675 | /* format size info */ | ||
676 | seq_printf(f, | ||
677 | "target: %8d pages\n" | ||
678 | "current: %8d pages\n", | ||
679 | b->target, b->size); | ||
680 | |||
681 | /* format rate info */ | ||
682 | seq_printf(f, | ||
683 | "rateNoSleepAlloc: %8d pages/sec\n" | ||
684 | "rateSleepAlloc: %8d pages/sec\n" | ||
685 | "rateFree: %8d pages/sec\n", | ||
686 | VMW_BALLOON_NOSLEEP_ALLOC_MAX, | ||
687 | b->rate_alloc, b->rate_free); | ||
688 | |||
689 | seq_printf(f, | ||
690 | "\n" | ||
691 | "timer: %8u\n" | ||
692 | "start: %8u (%4u failed)\n" | ||
693 | "guestType: %8u (%4u failed)\n" | ||
694 | "lock: %8u (%4u failed)\n" | ||
695 | "unlock: %8u (%4u failed)\n" | ||
696 | "target: %8u (%4u failed)\n" | ||
697 | "primNoSleepAlloc: %8u (%4u failed)\n" | ||
698 | "primCanSleepAlloc: %8u (%4u failed)\n" | ||
699 | "primFree: %8u\n" | ||
700 | "errAlloc: %8u\n" | ||
701 | "errFree: %8u\n", | ||
702 | stats->timer, | ||
703 | stats->start, stats->start_fail, | ||
704 | stats->guest_type, stats->guest_type_fail, | ||
705 | stats->lock, stats->lock_fail, | ||
706 | stats->unlock, stats->unlock_fail, | ||
707 | stats->target, stats->target_fail, | ||
708 | stats->alloc, stats->alloc_fail, | ||
709 | stats->sleep_alloc, stats->sleep_alloc_fail, | ||
710 | stats->free, | ||
711 | stats->refused_alloc, stats->refused_free); | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static int vmballoon_debug_open(struct inode *inode, struct file *file) | ||
717 | { | ||
718 | return single_open(file, vmballoon_debug_show, inode->i_private); | ||
719 | } | ||
720 | |||
721 | static const struct file_operations vmballoon_debug_fops = { | ||
722 | .owner = THIS_MODULE, | ||
723 | .open = vmballoon_debug_open, | ||
724 | .read = seq_read, | ||
725 | .llseek = seq_lseek, | ||
726 | .release = single_release, | ||
727 | }; | ||
728 | |||
729 | static int __init vmballoon_debugfs_init(struct vmballoon *b) | ||
730 | { | ||
731 | int error; | ||
732 | |||
733 | b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, | ||
734 | &vmballoon_debug_fops); | ||
735 | if (IS_ERR(b->dbg_entry)) { | ||
736 | error = PTR_ERR(b->dbg_entry); | ||
737 | pr_err("failed to create debugfs entry, error: %d\n", error); | ||
738 | return error; | ||
739 | } | ||
740 | |||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | static void __exit vmballoon_debugfs_exit(struct vmballoon *b) | ||
745 | { | ||
746 | debugfs_remove(b->dbg_entry); | ||
747 | } | ||
748 | |||
749 | #else | ||
750 | |||
751 | static inline int vmballoon_debugfs_init(struct vmballoon *b) | ||
752 | { | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static inline void vmballoon_debugfs_exit(struct vmballoon *b) | ||
757 | { | ||
758 | } | ||
759 | |||
760 | #endif /* CONFIG_DEBUG_FS */ | ||
761 | |||
762 | static int __init vmballoon_init(void) | ||
763 | { | ||
764 | int error; | ||
765 | |||
766 | /* | ||
767 | * Check if we are running on VMware's hypervisor and bail out | ||
768 | * if we are not. | ||
769 | */ | ||
770 | if (!vmware_platform()) | ||
771 | return -ENODEV; | ||
772 | |||
773 | vmballoon_wq = create_freezeable_workqueue("vmmemctl"); | ||
774 | if (!vmballoon_wq) { | ||
775 | pr_err("failed to create workqueue\n"); | ||
776 | return -ENOMEM; | ||
777 | } | ||
778 | |||
779 | INIT_LIST_HEAD(&balloon.pages); | ||
780 | INIT_LIST_HEAD(&balloon.refused_pages); | ||
781 | |||
782 | /* initialize rates */ | ||
783 | balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; | ||
784 | balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX; | ||
785 | |||
786 | INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); | ||
787 | |||
788 | /* | ||
789 | * Start balloon. | ||
790 | */ | ||
791 | if (!vmballoon_send_start(&balloon)) { | ||
792 | pr_err("failed to send start command to the host\n"); | ||
793 | error = -EIO; | ||
794 | goto fail; | ||
795 | } | ||
796 | |||
797 | if (!vmballoon_send_guest_id(&balloon)) { | ||
798 | pr_err("failed to send guest ID to the host\n"); | ||
799 | error = -EIO; | ||
800 | goto fail; | ||
801 | } | ||
802 | |||
803 | error = vmballoon_debugfs_init(&balloon); | ||
804 | if (error) | ||
805 | goto fail; | ||
806 | |||
807 | queue_delayed_work(vmballoon_wq, &balloon.dwork, 0); | ||
808 | |||
809 | return 0; | ||
810 | |||
811 | fail: | ||
812 | destroy_workqueue(vmballoon_wq); | ||
813 | return error; | ||
814 | } | ||
815 | module_init(vmballoon_init); | ||
816 | |||
817 | static void __exit vmballoon_exit(void) | ||
818 | { | ||
819 | cancel_delayed_work_sync(&balloon.dwork); | ||
820 | destroy_workqueue(vmballoon_wq); | ||
821 | |||
822 | vmballoon_debugfs_exit(&balloon); | ||
823 | |||
824 | /* | ||
825 | * Deallocate all reserved memory, and reset connection with monitor. | ||
826 | * Reset connection before deallocating memory to avoid potential for | ||
827 | * additional spurious resets from guest touching deallocated pages. | ||
828 | */ | ||
829 | vmballoon_send_start(&balloon); | ||
830 | vmballoon_pop(&balloon); | ||
831 | } | ||
832 | module_exit(vmballoon_exit); | ||
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 82d1e4de475b..4521b1ecce45 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Core functionality. | 5 | # Core functionality. |
6 | obj-$(CONFIG_MTD) += mtd.o | 6 | obj-$(CONFIG_MTD) += mtd.o |
7 | mtd-y := mtdcore.o mtdsuper.o mtdbdi.o | 7 | mtd-y := mtdcore.o mtdsuper.o |
8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o | 8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o |
9 | 9 | ||
10 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o | 10 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o |
diff --git a/drivers/mtd/internal.h b/drivers/mtd/internal.h index c658fe7216b5..e69de29bb2d1 100644 --- a/drivers/mtd/internal.h +++ b/drivers/mtd/internal.h | |||
@@ -1,17 +0,0 @@ | |||
1 | /* Internal MTD definitions | ||
2 | * | ||
3 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * mtdbdi.c | ||
14 | */ | ||
15 | extern struct backing_dev_info mtd_bdi_unmappable; | ||
16 | extern struct backing_dev_info mtd_bdi_ro_mappable; | ||
17 | extern struct backing_dev_info mtd_bdi_rw_mappable; | ||
diff --git a/drivers/mtd/mtdbdi.c b/drivers/mtd/mtdbdi.c index 5ca5aed0b225..e69de29bb2d1 100644 --- a/drivers/mtd/mtdbdi.c +++ b/drivers/mtd/mtdbdi.c | |||
@@ -1,43 +0,0 @@ | |||
1 | /* MTD backing device capabilities | ||
2 | * | ||
3 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/backing-dev.h> | ||
13 | #include <linux/mtd/mtd.h> | ||
14 | #include "internal.h" | ||
15 | |||
16 | /* | ||
17 | * backing device capabilities for non-mappable devices (such as NAND flash) | ||
18 | * - permits private mappings, copies are taken of the data | ||
19 | */ | ||
20 | struct backing_dev_info mtd_bdi_unmappable = { | ||
21 | .capabilities = BDI_CAP_MAP_COPY, | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | * backing device capabilities for R/O mappable devices (such as ROM) | ||
26 | * - permits private mappings, copies are taken of the data | ||
27 | * - permits non-writable shared mappings | ||
28 | */ | ||
29 | struct backing_dev_info mtd_bdi_ro_mappable = { | ||
30 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
31 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * backing device capabilities for writable mappable devices (such as RAM) | ||
36 | * - permits private mappings, copies are taken of the data | ||
37 | * - permits non-writable shared mappings | ||
38 | */ | ||
39 | struct backing_dev_info mtd_bdi_rw_mappable = { | ||
40 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
41 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | ||
42 | BDI_CAP_WRITE_MAP), | ||
43 | }; | ||
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5b38b17d2229..b177e750efc3 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -2,6 +2,9 @@ | |||
2 | * Core registration and callback routines for MTD | 2 | * Core registration and callback routines for MTD |
3 | * drivers and users. | 3 | * drivers and users. |
4 | * | 4 | * |
5 | * bdi bits are: | ||
6 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
7 | * Written by David Howells (dhowells@redhat.com) | ||
5 | */ | 8 | */ |
6 | 9 | ||
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
@@ -16,11 +19,39 @@ | |||
16 | #include <linux/init.h> | 19 | #include <linux/init.h> |
17 | #include <linux/mtd/compatmac.h> | 20 | #include <linux/mtd/compatmac.h> |
18 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/backing-dev.h> | ||
19 | 23 | ||
20 | #include <linux/mtd/mtd.h> | 24 | #include <linux/mtd/mtd.h> |
21 | #include "internal.h" | ||
22 | 25 | ||
23 | #include "mtdcore.h" | 26 | #include "mtdcore.h" |
27 | /* | ||
28 | * backing device capabilities for non-mappable devices (such as NAND flash) | ||
29 | * - permits private mappings, copies are taken of the data | ||
30 | */ | ||
31 | struct backing_dev_info mtd_bdi_unmappable = { | ||
32 | .capabilities = BDI_CAP_MAP_COPY, | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | * backing device capabilities for R/O mappable devices (such as ROM) | ||
37 | * - permits private mappings, copies are taken of the data | ||
38 | * - permits non-writable shared mappings | ||
39 | */ | ||
40 | struct backing_dev_info mtd_bdi_ro_mappable = { | ||
41 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
42 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | ||
43 | }; | ||
44 | |||
45 | /* | ||
46 | * backing device capabilities for writable mappable devices (such as RAM) | ||
47 | * - permits private mappings, copies are taken of the data | ||
48 | * - permits non-writable shared mappings | ||
49 | */ | ||
50 | struct backing_dev_info mtd_bdi_rw_mappable = { | ||
51 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
52 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | ||
53 | BDI_CAP_WRITE_MAP), | ||
54 | }; | ||
24 | 55 | ||
25 | static int mtd_cls_suspend(struct device *dev, pm_message_t state); | 56 | static int mtd_cls_suspend(struct device *dev, pm_message_t state); |
26 | static int mtd_cls_resume(struct device *dev); | 57 | static int mtd_cls_resume(struct device *dev); |
@@ -628,20 +659,55 @@ done: | |||
628 | /*====================================================================*/ | 659 | /*====================================================================*/ |
629 | /* Init code */ | 660 | /* Init code */ |
630 | 661 | ||
662 | static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) | ||
663 | { | ||
664 | int ret; | ||
665 | |||
666 | ret = bdi_init(bdi); | ||
667 | if (!ret) | ||
668 | ret = bdi_register(bdi, NULL, name); | ||
669 | |||
670 | if (ret) | ||
671 | bdi_destroy(bdi); | ||
672 | |||
673 | return ret; | ||
674 | } | ||
675 | |||
631 | static int __init init_mtd(void) | 676 | static int __init init_mtd(void) |
632 | { | 677 | { |
633 | int ret; | 678 | int ret; |
679 | |||
634 | ret = class_register(&mtd_class); | 680 | ret = class_register(&mtd_class); |
681 | if (ret) | ||
682 | goto err_reg; | ||
683 | |||
684 | ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); | ||
685 | if (ret) | ||
686 | goto err_bdi1; | ||
687 | |||
688 | ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap"); | ||
689 | if (ret) | ||
690 | goto err_bdi2; | ||
691 | |||
692 | ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap"); | ||
693 | if (ret) | ||
694 | goto err_bdi3; | ||
635 | 695 | ||
636 | if (ret) { | ||
637 | pr_err("Error registering mtd class: %d\n", ret); | ||
638 | return ret; | ||
639 | } | ||
640 | #ifdef CONFIG_PROC_FS | 696 | #ifdef CONFIG_PROC_FS |
641 | if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) | 697 | if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) |
642 | proc_mtd->read_proc = mtd_read_proc; | 698 | proc_mtd->read_proc = mtd_read_proc; |
643 | #endif /* CONFIG_PROC_FS */ | 699 | #endif /* CONFIG_PROC_FS */ |
644 | return 0; | 700 | return 0; |
701 | |||
702 | err_bdi3: | ||
703 | bdi_destroy(&mtd_bdi_ro_mappable); | ||
704 | err_bdi2: | ||
705 | bdi_destroy(&mtd_bdi_unmappable); | ||
706 | err_bdi1: | ||
707 | class_unregister(&mtd_class); | ||
708 | err_reg: | ||
709 | pr_err("Error registering mtd class or bdi: %d\n", ret); | ||
710 | return ret; | ||
645 | } | 711 | } |
646 | 712 | ||
647 | static void __exit cleanup_mtd(void) | 713 | static void __exit cleanup_mtd(void) |
@@ -651,6 +717,9 @@ static void __exit cleanup_mtd(void) | |||
651 | remove_proc_entry( "mtd", NULL); | 717 | remove_proc_entry( "mtd", NULL); |
652 | #endif /* CONFIG_PROC_FS */ | 718 | #endif /* CONFIG_PROC_FS */ |
653 | class_unregister(&mtd_class); | 719 | class_unregister(&mtd_class); |
720 | bdi_destroy(&mtd_bdi_unmappable); | ||
721 | bdi_destroy(&mtd_bdi_ro_mappable); | ||
722 | bdi_destroy(&mtd_bdi_rw_mappable); | ||
654 | } | 723 | } |
655 | 724 | ||
656 | module_init(init_mtd); | 725 | module_init(init_mtd); |
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index af8b42e0a55b..7c003191fca4 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mtd/super.h> | 13 | #include <linux/mtd/super.h> |
14 | #include <linux/namei.h> | 14 | #include <linux/namei.h> |
15 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
16 | #include <linux/slab.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * compare superblocks to see if they're equivalent | 19 | * compare superblocks to see if they're equivalent |
@@ -44,6 +45,7 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd) | |||
44 | 45 | ||
45 | sb->s_mtd = mtd; | 46 | sb->s_mtd = mtd; |
46 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); | 47 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); |
48 | sb->s_bdi = mtd->backing_dev_info; | ||
47 | return 0; | 49 | return 0; |
48 | } | 50 | } |
49 | 51 | ||
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index f59c07427af3..d60fc5719fef 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
60 | } | 60 | } |
61 | buf64 = (uint64_t *)buf; | 61 | buf64 = (uint64_t *)buf; |
62 | while (i < len/8) { | 62 | while (i < len/8) { |
63 | uint64_t x; | 63 | /* |
64 | * Since GCC has no proper constraint (PR 43518) | ||
65 | * force x variable to r2/r3 registers as ldrd instruction | ||
66 | * requires first register to be even. | ||
67 | */ | ||
68 | register uint64_t x asm ("r2"); | ||
69 | |||
64 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); | 70 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); |
65 | buf64[i++] = x; | 71 | buf64[i++] = x; |
66 | } | 72 | } |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index a03d291de854..f0d23de32967 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, | |||
1944 | netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", | 1944 | netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", |
1945 | __func__, rx_status, rx_size, cur_rx); | 1945 | __func__, rx_status, rx_size, cur_rx); |
1946 | #if RTL8139_DEBUG > 2 | 1946 | #if RTL8139_DEBUG > 2 |
1947 | print_dump_hex(KERN_DEBUG, "Frame contents: ", | 1947 | print_hex_dump(KERN_DEBUG, "Frame contents: ", |
1948 | DUMP_PREFIX_OFFSET, 16, 1, | 1948 | DUMP_PREFIX_OFFSET, 16, 1, |
1949 | &rx_ring[ring_offset], 70, true); | 1949 | &rx_ring[ring_offset], 70, true); |
1950 | #endif | 1950 | #endif |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index a583b50d9de8..12b280afdd51 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -273,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150) += usb/ | |||
273 | obj-$(CONFIG_USB_HSO) += usb/ | 273 | obj-$(CONFIG_USB_HSO) += usb/ |
274 | obj-$(CONFIG_USB_USBNET) += usb/ | 274 | obj-$(CONFIG_USB_USBNET) += usb/ |
275 | obj-$(CONFIG_USB_ZD1201) += usb/ | 275 | obj-$(CONFIG_USB_ZD1201) += usb/ |
276 | obj-$(CONFIG_USB_IPHETH) += usb/ | ||
276 | 277 | ||
277 | obj-y += wireless/ | 278 | obj-y += wireless/ |
278 | obj-$(CONFIG_NET_TULIP) += tulip/ | 279 | obj-$(CONFIG_NET_TULIP) += tulip/ |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index a257babd1bb4..ac90a3828f69 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -58,8 +58,8 @@ | |||
58 | #include "bnx2_fw.h" | 58 | #include "bnx2_fw.h" |
59 | 59 | ||
60 | #define DRV_MODULE_NAME "bnx2" | 60 | #define DRV_MODULE_NAME "bnx2" |
61 | #define DRV_MODULE_VERSION "2.0.8" | 61 | #define DRV_MODULE_VERSION "2.0.9" |
62 | #define DRV_MODULE_RELDATE "Feb 15, 2010" | 62 | #define DRV_MODULE_RELDATE "April 27, 2010" |
63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" | 63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" |
64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" | 64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" |
65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" | 65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" |
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp) | |||
651 | } | 651 | } |
652 | 652 | ||
653 | static void | 653 | static void |
654 | bnx2_netif_stop(struct bnx2 *bp) | 654 | bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) |
655 | { | 655 | { |
656 | bnx2_cnic_stop(bp); | 656 | if (stop_cnic) |
657 | bnx2_cnic_stop(bp); | ||
657 | if (netif_running(bp->dev)) { | 658 | if (netif_running(bp->dev)) { |
658 | int i; | 659 | int i; |
659 | 660 | ||
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp) | |||
671 | } | 672 | } |
672 | 673 | ||
673 | static void | 674 | static void |
674 | bnx2_netif_start(struct bnx2 *bp) | 675 | bnx2_netif_start(struct bnx2 *bp, bool start_cnic) |
675 | { | 676 | { |
676 | if (atomic_dec_and_test(&bp->intr_sem)) { | 677 | if (atomic_dec_and_test(&bp->intr_sem)) { |
677 | if (netif_running(bp->dev)) { | 678 | if (netif_running(bp->dev)) { |
678 | netif_tx_wake_all_queues(bp->dev); | 679 | netif_tx_wake_all_queues(bp->dev); |
679 | bnx2_napi_enable(bp); | 680 | bnx2_napi_enable(bp); |
680 | bnx2_enable_int(bp); | 681 | bnx2_enable_int(bp); |
681 | bnx2_cnic_start(bp); | 682 | if (start_cnic) |
683 | bnx2_cnic_start(bp); | ||
682 | } | 684 | } |
683 | } | 685 | } |
684 | } | 686 | } |
@@ -4759,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | |||
4759 | rc = bnx2_alloc_bad_rbuf(bp); | 4761 | rc = bnx2_alloc_bad_rbuf(bp); |
4760 | } | 4762 | } |
4761 | 4763 | ||
4762 | if (bp->flags & BNX2_FLAG_USING_MSIX) | 4764 | if (bp->flags & BNX2_FLAG_USING_MSIX) { |
4763 | bnx2_setup_msix_tbl(bp); | 4765 | bnx2_setup_msix_tbl(bp); |
4766 | /* Prevent MSIX table reads and write from timing out */ | ||
4767 | REG_WR(bp, BNX2_MISC_ECO_HW_CTL, | ||
4768 | BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); | ||
4769 | } | ||
4764 | 4770 | ||
4765 | return rc; | 4771 | return rc; |
4766 | } | 4772 | } |
@@ -6273,12 +6279,12 @@ bnx2_reset_task(struct work_struct *work) | |||
6273 | return; | 6279 | return; |
6274 | } | 6280 | } |
6275 | 6281 | ||
6276 | bnx2_netif_stop(bp); | 6282 | bnx2_netif_stop(bp, true); |
6277 | 6283 | ||
6278 | bnx2_init_nic(bp, 1); | 6284 | bnx2_init_nic(bp, 1); |
6279 | 6285 | ||
6280 | atomic_set(&bp->intr_sem, 1); | 6286 | atomic_set(&bp->intr_sem, 1); |
6281 | bnx2_netif_start(bp); | 6287 | bnx2_netif_start(bp, true); |
6282 | rtnl_unlock(); | 6288 | rtnl_unlock(); |
6283 | } | 6289 | } |
6284 | 6290 | ||
@@ -6320,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) | |||
6320 | struct bnx2 *bp = netdev_priv(dev); | 6326 | struct bnx2 *bp = netdev_priv(dev); |
6321 | 6327 | ||
6322 | if (netif_running(dev)) | 6328 | if (netif_running(dev)) |
6323 | bnx2_netif_stop(bp); | 6329 | bnx2_netif_stop(bp, false); |
6324 | 6330 | ||
6325 | bp->vlgrp = vlgrp; | 6331 | bp->vlgrp = vlgrp; |
6326 | 6332 | ||
@@ -6331,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) | |||
6331 | if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) | 6337 | if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) |
6332 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); | 6338 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); |
6333 | 6339 | ||
6334 | bnx2_netif_start(bp); | 6340 | bnx2_netif_start(bp, false); |
6335 | } | 6341 | } |
6336 | #endif | 6342 | #endif |
6337 | 6343 | ||
@@ -7051,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) | |||
7051 | bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; | 7057 | bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; |
7052 | 7058 | ||
7053 | if (netif_running(bp->dev)) { | 7059 | if (netif_running(bp->dev)) { |
7054 | bnx2_netif_stop(bp); | 7060 | bnx2_netif_stop(bp, true); |
7055 | bnx2_init_nic(bp, 0); | 7061 | bnx2_init_nic(bp, 0); |
7056 | bnx2_netif_start(bp); | 7062 | bnx2_netif_start(bp, true); |
7057 | } | 7063 | } |
7058 | 7064 | ||
7059 | return 0; | 7065 | return 0; |
@@ -7083,7 +7089,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) | |||
7083 | /* Reset will erase chipset stats; save them */ | 7089 | /* Reset will erase chipset stats; save them */ |
7084 | bnx2_save_stats(bp); | 7090 | bnx2_save_stats(bp); |
7085 | 7091 | ||
7086 | bnx2_netif_stop(bp); | 7092 | bnx2_netif_stop(bp, true); |
7087 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | 7093 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); |
7088 | bnx2_free_skbs(bp); | 7094 | bnx2_free_skbs(bp); |
7089 | bnx2_free_mem(bp); | 7095 | bnx2_free_mem(bp); |
@@ -7111,7 +7117,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) | |||
7111 | bnx2_setup_cnic_irq_info(bp); | 7117 | bnx2_setup_cnic_irq_info(bp); |
7112 | mutex_unlock(&bp->cnic_lock); | 7118 | mutex_unlock(&bp->cnic_lock); |
7113 | #endif | 7119 | #endif |
7114 | bnx2_netif_start(bp); | 7120 | bnx2_netif_start(bp, true); |
7115 | } | 7121 | } |
7116 | return 0; | 7122 | return 0; |
7117 | } | 7123 | } |
@@ -7364,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) | |||
7364 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | 7370 | if (etest->flags & ETH_TEST_FL_OFFLINE) { |
7365 | int i; | 7371 | int i; |
7366 | 7372 | ||
7367 | bnx2_netif_stop(bp); | 7373 | bnx2_netif_stop(bp, true); |
7368 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); | 7374 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); |
7369 | bnx2_free_skbs(bp); | 7375 | bnx2_free_skbs(bp); |
7370 | 7376 | ||
@@ -7383,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) | |||
7383 | bnx2_shutdown_chip(bp); | 7389 | bnx2_shutdown_chip(bp); |
7384 | else { | 7390 | else { |
7385 | bnx2_init_nic(bp, 1); | 7391 | bnx2_init_nic(bp, 1); |
7386 | bnx2_netif_start(bp); | 7392 | bnx2_netif_start(bp, true); |
7387 | } | 7393 | } |
7388 | 7394 | ||
7389 | /* wait for link up */ | 7395 | /* wait for link up */ |
@@ -8377,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
8377 | return 0; | 8383 | return 0; |
8378 | 8384 | ||
8379 | flush_scheduled_work(); | 8385 | flush_scheduled_work(); |
8380 | bnx2_netif_stop(bp); | 8386 | bnx2_netif_stop(bp, true); |
8381 | netif_device_detach(dev); | 8387 | netif_device_detach(dev); |
8382 | del_timer_sync(&bp->timer); | 8388 | del_timer_sync(&bp->timer); |
8383 | bnx2_shutdown_chip(bp); | 8389 | bnx2_shutdown_chip(bp); |
@@ -8399,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev) | |||
8399 | bnx2_set_power_state(bp, PCI_D0); | 8405 | bnx2_set_power_state(bp, PCI_D0); |
8400 | netif_device_attach(dev); | 8406 | netif_device_attach(dev); |
8401 | bnx2_init_nic(bp, 1); | 8407 | bnx2_init_nic(bp, 1); |
8402 | bnx2_netif_start(bp); | 8408 | bnx2_netif_start(bp, true); |
8403 | return 0; | 8409 | return 0; |
8404 | } | 8410 | } |
8405 | 8411 | ||
@@ -8426,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, | |||
8426 | } | 8432 | } |
8427 | 8433 | ||
8428 | if (netif_running(dev)) { | 8434 | if (netif_running(dev)) { |
8429 | bnx2_netif_stop(bp); | 8435 | bnx2_netif_stop(bp, true); |
8430 | del_timer_sync(&bp->timer); | 8436 | del_timer_sync(&bp->timer); |
8431 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); | 8437 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); |
8432 | } | 8438 | } |
@@ -8483,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev) | |||
8483 | 8489 | ||
8484 | rtnl_lock(); | 8490 | rtnl_lock(); |
8485 | if (netif_running(dev)) | 8491 | if (netif_running(dev)) |
8486 | bnx2_netif_start(bp); | 8492 | bnx2_netif_start(bp, true); |
8487 | 8493 | ||
8488 | netif_device_attach(dev); | 8494 | netif_device_attach(dev); |
8489 | rtnl_unlock(); | 8495 | rtnl_unlock(); |
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 33451092b8e8..d800b598ae3d 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c | |||
@@ -1006,7 +1006,7 @@ static int ems_usb_probe(struct usb_interface *intf, | |||
1006 | 1006 | ||
1007 | netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); | 1007 | netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); |
1008 | if (!netdev) { | 1008 | if (!netdev) { |
1009 | dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); | 1009 | dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n"); |
1010 | return -ENOMEM; | 1010 | return -ENOMEM; |
1011 | } | 1011 | } |
1012 | 1012 | ||
@@ -1036,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf, | |||
1036 | 1036 | ||
1037 | dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); | 1037 | dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); |
1038 | if (!dev->intr_urb) { | 1038 | if (!dev->intr_urb) { |
1039 | dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n"); | 1039 | dev_err(&intf->dev, "Couldn't alloc intr URB\n"); |
1040 | goto cleanup_candev; | 1040 | goto cleanup_candev; |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); | 1043 | dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); |
1044 | if (!dev->intr_in_buffer) { | 1044 | if (!dev->intr_in_buffer) { |
1045 | dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n"); | 1045 | dev_err(&intf->dev, "Couldn't alloc Intr buffer\n"); |
1046 | goto cleanup_intr_urb; | 1046 | goto cleanup_intr_urb; |
1047 | } | 1047 | } |
1048 | 1048 | ||
1049 | dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + | 1049 | dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + |
1050 | sizeof(struct ems_cpc_msg), GFP_KERNEL); | 1050 | sizeof(struct ems_cpc_msg), GFP_KERNEL); |
1051 | if (!dev->tx_msg_buffer) { | 1051 | if (!dev->tx_msg_buffer) { |
1052 | dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n"); | 1052 | dev_err(&intf->dev, "Couldn't alloc Tx buffer\n"); |
1053 | goto cleanup_intr_in_buffer; | 1053 | goto cleanup_intr_in_buffer; |
1054 | } | 1054 | } |
1055 | 1055 | ||
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 9781942992e9..4b451a7c03e9 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk) | |||
2334 | struct cnic_local *cp = dev->cnic_priv; | 2334 | struct cnic_local *cp = dev->cnic_priv; |
2335 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; | 2335 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; |
2336 | 2336 | ||
2337 | prefetch(cp->status_blk.bnx2x); | 2337 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { |
2338 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | 2338 | prefetch(cp->status_blk.bnx2x); |
2339 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | ||
2339 | 2340 | ||
2340 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2341 | tasklet_schedule(&cp->cnic_irq_task); | 2341 | tasklet_schedule(&cp->cnic_irq_task); |
2342 | 2342 | cnic_chk_pkt_rings(cp); | |
2343 | cnic_chk_pkt_rings(cp); | 2343 | } |
2344 | 2344 | ||
2345 | return 0; | 2345 | return 0; |
2346 | } | 2346 | } |
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c index 5248f9e0b2f4..35cd36729155 100644 --- a/drivers/net/cxgb3/ael1002.c +++ b/drivers/net/cxgb3/ael1002.c | |||
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = { | |||
934 | int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, | 934 | int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, |
935 | int phy_addr, const struct mdio_ops *mdio_ops) | 935 | int phy_addr, const struct mdio_ops *mdio_ops) |
936 | { | 936 | { |
937 | cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops, | 937 | cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops, |
938 | SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, | 938 | SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, |
939 | "10GBASE-CX4"); | 939 | "10GBASE-CX4"); |
940 | return 0; | 940 | return 0; |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index aced6c5e635c..e3f1b8566495 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter) | |||
439 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, | 439 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, |
440 | unsigned long n) | 440 | unsigned long n) |
441 | { | 441 | { |
442 | int attempts = 5; | 442 | int attempts = 10; |
443 | 443 | ||
444 | while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { | 444 | while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { |
445 | if (!--attempts) | 445 | if (!--attempts) |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index b997e578e58f..791080303db1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -166,6 +166,7 @@ | |||
166 | #include <linux/ethtool.h> | 166 | #include <linux/ethtool.h> |
167 | #include <linux/string.h> | 167 | #include <linux/string.h> |
168 | #include <linux/firmware.h> | 168 | #include <linux/firmware.h> |
169 | #include <linux/rtnetlink.h> | ||
169 | #include <asm/unaligned.h> | 170 | #include <asm/unaligned.h> |
170 | 171 | ||
171 | 172 | ||
@@ -2265,8 +2266,13 @@ static void e100_tx_timeout_task(struct work_struct *work) | |||
2265 | 2266 | ||
2266 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", | 2267 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", |
2267 | ioread8(&nic->csr->scb.status)); | 2268 | ioread8(&nic->csr->scb.status)); |
2268 | e100_down(netdev_priv(netdev)); | 2269 | |
2269 | e100_up(netdev_priv(netdev)); | 2270 | rtnl_lock(); |
2271 | if (netif_running(netdev)) { | ||
2272 | e100_down(netdev_priv(netdev)); | ||
2273 | e100_up(netdev_priv(netdev)); | ||
2274 | } | ||
2275 | rtnl_unlock(); | ||
2270 | } | 2276 | } |
2271 | 2277 | ||
2272 | static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) | 2278 | static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 712ccc66ba25..90155552ea09 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -336,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | |||
336 | struct e1000_hw *hw = &adapter->hw; | 336 | struct e1000_hw *hw = &adapter->hw; |
337 | static int global_quad_port_a; /* global port a indication */ | 337 | static int global_quad_port_a; /* global port a indication */ |
338 | struct pci_dev *pdev = adapter->pdev; | 338 | struct pci_dev *pdev = adapter->pdev; |
339 | u16 eeprom_data = 0; | ||
340 | int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; | 339 | int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; |
341 | s32 rc; | 340 | s32 rc; |
342 | 341 | ||
@@ -387,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | |||
387 | if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) | 386 | if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) |
388 | adapter->flags &= ~FLAG_HAS_WOL; | 387 | adapter->flags &= ~FLAG_HAS_WOL; |
389 | break; | 388 | break; |
390 | |||
391 | case e1000_82573: | 389 | case e1000_82573: |
390 | case e1000_82574: | ||
391 | case e1000_82583: | ||
392 | /* Disable ASPM L0s due to hardware errata */ | ||
393 | e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S); | ||
394 | |||
392 | if (pdev->device == E1000_DEV_ID_82573L) { | 395 | if (pdev->device == E1000_DEV_ID_82573L) { |
393 | if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, | 396 | adapter->flags |= FLAG_HAS_JUMBO_FRAMES; |
394 | &eeprom_data) < 0) | 397 | adapter->max_hw_frame_size = DEFAULT_JUMBO; |
395 | break; | ||
396 | if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) { | ||
397 | adapter->flags |= FLAG_HAS_JUMBO_FRAMES; | ||
398 | adapter->max_hw_frame_size = DEFAULT_JUMBO; | ||
399 | } | ||
400 | } | 398 | } |
401 | break; | 399 | break; |
402 | default: | 400 | default: |
@@ -1792,6 +1790,7 @@ struct e1000_info e1000_82571_info = { | |||
1792 | | FLAG_RESET_OVERWRITES_LAA /* errata */ | 1790 | | FLAG_RESET_OVERWRITES_LAA /* errata */ |
1793 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ | 1791 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ |
1794 | | FLAG_APME_CHECK_PORT_B, | 1792 | | FLAG_APME_CHECK_PORT_B, |
1793 | .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ | ||
1795 | .pba = 38, | 1794 | .pba = 38, |
1796 | .max_hw_frame_size = DEFAULT_JUMBO, | 1795 | .max_hw_frame_size = DEFAULT_JUMBO, |
1797 | .get_variants = e1000_get_variants_82571, | 1796 | .get_variants = e1000_get_variants_82571, |
@@ -1809,6 +1808,7 @@ struct e1000_info e1000_82572_info = { | |||
1809 | | FLAG_RX_CSUM_ENABLED | 1808 | | FLAG_RX_CSUM_ENABLED |
1810 | | FLAG_HAS_CTRLEXT_ON_LOAD | 1809 | | FLAG_HAS_CTRLEXT_ON_LOAD |
1811 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ | 1810 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ |
1811 | .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ | ||
1812 | .pba = 38, | 1812 | .pba = 38, |
1813 | .max_hw_frame_size = DEFAULT_JUMBO, | 1813 | .max_hw_frame_size = DEFAULT_JUMBO, |
1814 | .get_variants = e1000_get_variants_82571, | 1814 | .get_variants = e1000_get_variants_82571, |
@@ -1820,13 +1820,11 @@ struct e1000_info e1000_82572_info = { | |||
1820 | struct e1000_info e1000_82573_info = { | 1820 | struct e1000_info e1000_82573_info = { |
1821 | .mac = e1000_82573, | 1821 | .mac = e1000_82573, |
1822 | .flags = FLAG_HAS_HW_VLAN_FILTER | 1822 | .flags = FLAG_HAS_HW_VLAN_FILTER |
1823 | | FLAG_HAS_JUMBO_FRAMES | ||
1824 | | FLAG_HAS_WOL | 1823 | | FLAG_HAS_WOL |
1825 | | FLAG_APME_IN_CTRL3 | 1824 | | FLAG_APME_IN_CTRL3 |
1826 | | FLAG_RX_CSUM_ENABLED | 1825 | | FLAG_RX_CSUM_ENABLED |
1827 | | FLAG_HAS_SMART_POWER_DOWN | 1826 | | FLAG_HAS_SMART_POWER_DOWN |
1828 | | FLAG_HAS_AMT | 1827 | | FLAG_HAS_AMT |
1829 | | FLAG_HAS_ERT | ||
1830 | | FLAG_HAS_SWSM_ON_LOAD, | 1828 | | FLAG_HAS_SWSM_ON_LOAD, |
1831 | .pba = 20, | 1829 | .pba = 20, |
1832 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | 1830 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 118bdf483593..ee32b9b27a9f 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | #include <linux/pci-aspm.h> | ||
40 | 41 | ||
41 | #include "hw.h" | 42 | #include "hw.h" |
42 | 43 | ||
@@ -374,7 +375,7 @@ struct e1000_adapter { | |||
374 | struct e1000_info { | 375 | struct e1000_info { |
375 | enum e1000_mac_type mac; | 376 | enum e1000_mac_type mac; |
376 | unsigned int flags; | 377 | unsigned int flags; |
377 | unsigned int flags2; | 378 | unsigned int flags2; |
378 | u32 pba; | 379 | u32 pba; |
379 | u32 max_hw_frame_size; | 380 | u32 max_hw_frame_size; |
380 | s32 (*get_variants)(struct e1000_adapter *); | 381 | s32 (*get_variants)(struct e1000_adapter *); |
@@ -421,6 +422,7 @@ struct e1000_info { | |||
421 | #define FLAG2_CRC_STRIPPING (1 << 0) | 422 | #define FLAG2_CRC_STRIPPING (1 << 0) |
422 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) | 423 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) |
423 | #define FLAG2_IS_DISCARDING (1 << 2) | 424 | #define FLAG2_IS_DISCARDING (1 << 2) |
425 | #define FLAG2_DISABLE_ASPM_L1 (1 << 3) | ||
424 | 426 | ||
425 | #define E1000_RX_DESC_PS(R, i) \ | 427 | #define E1000_RX_DESC_PS(R, i) \ |
426 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 428 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
@@ -461,6 +463,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter); | |||
461 | extern bool e1000e_has_link(struct e1000_adapter *adapter); | 463 | extern bool e1000e_has_link(struct e1000_adapter *adapter); |
462 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); | 464 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); |
463 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); | 465 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); |
466 | extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); | ||
464 | 467 | ||
465 | extern unsigned int copybreak; | 468 | extern unsigned int copybreak; |
466 | 469 | ||
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index cfd09cea7214..fb8fc7d1b50d 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
661 | i = 0; | 661 | i = 0; |
662 | } | 662 | } |
663 | 663 | ||
664 | if (i == tx_ring->next_to_use) | ||
665 | break; | ||
664 | eop = tx_ring->buffer_info[i].next_to_watch; | 666 | eop = tx_ring->buffer_info[i].next_to_watch; |
665 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 667 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
666 | } | 668 | } |
@@ -4281,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4281 | return -EINVAL; | 4283 | return -EINVAL; |
4282 | } | 4284 | } |
4283 | 4285 | ||
4286 | /* 82573 Errata 17 */ | ||
4287 | if (((adapter->hw.mac.type == e1000_82573) || | ||
4288 | (adapter->hw.mac.type == e1000_82574)) && | ||
4289 | (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { | ||
4290 | adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; | ||
4291 | e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); | ||
4292 | } | ||
4293 | |||
4284 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 4294 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
4285 | msleep(1); | 4295 | msleep(1); |
4286 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ | 4296 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ |
@@ -4603,29 +4613,39 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | |||
4603 | } | 4613 | } |
4604 | } | 4614 | } |
4605 | 4615 | ||
4606 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) | 4616 | #ifdef CONFIG_PCIEASPM |
4617 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4618 | { | ||
4619 | pci_disable_link_state(pdev, state); | ||
4620 | } | ||
4621 | #else | ||
4622 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4607 | { | 4623 | { |
4608 | int pos; | 4624 | int pos; |
4609 | u16 val; | 4625 | u16 reg16; |
4610 | 4626 | ||
4611 | /* | 4627 | /* |
4612 | * 82573 workaround - disable L1 ASPM on mobile chipsets | 4628 | * Both device and parent should have the same ASPM setting. |
4613 | * | 4629 | * Disable ASPM in downstream component first and then upstream. |
4614 | * L1 ASPM on various mobile (ich7) chipsets do not behave properly | ||
4615 | * resulting in lost data or garbage information on the pci-e link | ||
4616 | * level. This could result in (false) bad EEPROM checksum errors, | ||
4617 | * long ping times (up to 2s) or even a system freeze/hang. | ||
4618 | * | ||
4619 | * Unfortunately this feature saves about 1W power consumption when | ||
4620 | * active. | ||
4621 | */ | 4630 | */ |
4622 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 4631 | pos = pci_pcie_cap(pdev); |
4623 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); | 4632 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
4624 | if (val & 0x2) { | 4633 | reg16 &= ~state; |
4625 | dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); | 4634 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
4626 | val &= ~0x2; | 4635 | |
4627 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); | 4636 | pos = pci_pcie_cap(pdev->bus->self); |
4628 | } | 4637 | pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); |
4638 | reg16 &= ~state; | ||
4639 | pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); | ||
4640 | } | ||
4641 | #endif | ||
4642 | void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4643 | { | ||
4644 | dev_info(&pdev->dev, "Disabling ASPM %s %s\n", | ||
4645 | (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", | ||
4646 | (state & PCIE_LINK_STATE_L1) ? "L1" : ""); | ||
4647 | |||
4648 | __e1000e_disable_aspm(pdev, state); | ||
4629 | } | 4649 | } |
4630 | 4650 | ||
4631 | #ifdef CONFIG_PM | 4651 | #ifdef CONFIG_PM |
@@ -4651,7 +4671,8 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4651 | pci_set_power_state(pdev, PCI_D0); | 4671 | pci_set_power_state(pdev, PCI_D0); |
4652 | pci_restore_state(pdev); | 4672 | pci_restore_state(pdev); |
4653 | pci_save_state(pdev); | 4673 | pci_save_state(pdev); |
4654 | e1000e_disable_l1aspm(pdev); | 4674 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) |
4675 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4655 | 4676 | ||
4656 | err = pci_enable_device_mem(pdev); | 4677 | err = pci_enable_device_mem(pdev); |
4657 | if (err) { | 4678 | if (err) { |
@@ -4793,7 +4814,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
4793 | int err; | 4814 | int err; |
4794 | pci_ers_result_t result; | 4815 | pci_ers_result_t result; |
4795 | 4816 | ||
4796 | e1000e_disable_l1aspm(pdev); | 4817 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) |
4818 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4797 | err = pci_enable_device_mem(pdev); | 4819 | err = pci_enable_device_mem(pdev); |
4798 | if (err) { | 4820 | if (err) { |
4799 | dev_err(&pdev->dev, | 4821 | dev_err(&pdev->dev, |
@@ -4887,13 +4909,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) | |||
4887 | dev_warn(&adapter->pdev->dev, | 4909 | dev_warn(&adapter->pdev->dev, |
4888 | "Warning: detected DSPD enabled in EEPROM\n"); | 4910 | "Warning: detected DSPD enabled in EEPROM\n"); |
4889 | } | 4911 | } |
4890 | |||
4891 | ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); | ||
4892 | if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) { | ||
4893 | /* ASPM enable */ | ||
4894 | dev_warn(&adapter->pdev->dev, | ||
4895 | "Warning: detected ASPM enabled in EEPROM\n"); | ||
4896 | } | ||
4897 | } | 4912 | } |
4898 | 4913 | ||
4899 | static const struct net_device_ops e1000e_netdev_ops = { | 4914 | static const struct net_device_ops e1000e_netdev_ops = { |
@@ -4942,7 +4957,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4942 | u16 eeprom_data = 0; | 4957 | u16 eeprom_data = 0; |
4943 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | 4958 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
4944 | 4959 | ||
4945 | e1000e_disable_l1aspm(pdev); | 4960 | if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) |
4961 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4946 | 4962 | ||
4947 | err = pci_enable_device_mem(pdev); | 4963 | err = pci_enable_device_mem(pdev); |
4948 | if (err) | 4964 | if (err) |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 73b260c3c654..5c98f7c22425 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5899,7 +5899,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5899 | /* Limit the number of tx's outstanding for hw bug */ | 5899 | /* Limit the number of tx's outstanding for hw bug */ |
5900 | if (id->driver_data & DEV_NEED_TX_LIMIT) { | 5900 | if (id->driver_data & DEV_NEED_TX_LIMIT) { |
5901 | np->tx_limit = 1; | 5901 | np->tx_limit = 1; |
5902 | if ((id->driver_data & DEV_NEED_TX_LIMIT2) && | 5902 | if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && |
5903 | pci_dev->revision >= 0xA2) | 5903 | pci_dev->revision >= 0xA2) |
5904 | np->tx_limit = 0; | 5904 | np->tx_limit = 0; |
5905 | } | 5905 | } |
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index d5160edf2fcf..3acac5f930c8 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c | |||
@@ -205,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) | |||
205 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) | 205 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) |
206 | { | 206 | { |
207 | struct gfar __iomem *enet_regs; | 207 | struct gfar __iomem *enet_regs; |
208 | u32 __iomem *ioremap_tbipa; | ||
209 | u64 addr, size; | ||
210 | 208 | ||
211 | /* | 209 | /* |
212 | * This is mildly evil, but so is our hardware for doing this. | 210 | * This is mildly evil, but so is our hardware for doing this. |
@@ -220,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi | |||
220 | return &enet_regs->tbipa; | 218 | return &enet_regs->tbipa; |
221 | } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || | 219 | } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || |
222 | of_device_is_compatible(np, "fsl,etsec2-tbi")) { | 220 | of_device_is_compatible(np, "fsl,etsec2-tbi")) { |
223 | addr = of_translate_address(np, of_get_address(np, 1, &size, NULL)); | 221 | return of_iomap(np, 1); |
224 | ioremap_tbipa = ioremap(addr, size); | ||
225 | return ioremap_tbipa; | ||
226 | } else | 222 | } else |
227 | return NULL; | 223 | return NULL; |
228 | } | 224 | } |
@@ -279,6 +275,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
279 | u32 __iomem *tbipa; | 275 | u32 __iomem *tbipa; |
280 | struct mii_bus *new_bus; | 276 | struct mii_bus *new_bus; |
281 | int tbiaddr = -1; | 277 | int tbiaddr = -1; |
278 | const u32 *addrp; | ||
282 | u64 addr = 0, size = 0; | 279 | u64 addr = 0, size = 0; |
283 | int err = 0; | 280 | int err = 0; |
284 | 281 | ||
@@ -297,8 +294,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
297 | new_bus->priv = priv; | 294 | new_bus->priv = priv; |
298 | fsl_pq_mdio_bus_name(new_bus->id, np); | 295 | fsl_pq_mdio_bus_name(new_bus->id, np); |
299 | 296 | ||
297 | addrp = of_get_address(np, 0, &size, NULL); | ||
298 | if (!addrp) { | ||
299 | err = -EINVAL; | ||
300 | goto err_free_bus; | ||
301 | } | ||
302 | |||
300 | /* Set the PHY base address */ | 303 | /* Set the PHY base address */ |
301 | addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); | 304 | addr = of_translate_address(np, addrp); |
305 | if (addr == OF_BAD_ADDR) { | ||
306 | err = -EINVAL; | ||
307 | goto err_free_bus; | ||
308 | } | ||
309 | |||
302 | map = ioremap(addr, size); | 310 | map = ioremap(addr, size); |
303 | if (!map) { | 311 | if (!map) { |
304 | err = -ENOMEM; | 312 | err = -ENOMEM; |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 080d1cea5b26..4e97ca182997 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -549,12 +549,8 @@ static int gfar_parse_group(struct device_node *np, | |||
549 | struct gfar_private *priv, const char *model) | 549 | struct gfar_private *priv, const char *model) |
550 | { | 550 | { |
551 | u32 *queue_mask; | 551 | u32 *queue_mask; |
552 | u64 addr, size; | ||
553 | |||
554 | addr = of_translate_address(np, | ||
555 | of_get_address(np, 0, &size, NULL)); | ||
556 | priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); | ||
557 | 552 | ||
553 | priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); | ||
558 | if (!priv->gfargrp[priv->num_grps].regs) | 554 | if (!priv->gfargrp[priv->num_grps].regs) |
559 | return -ENOMEM; | 555 | return -ENOMEM; |
560 | 556 | ||
@@ -1515,9 +1511,9 @@ static void gfar_halt_nodisable(struct net_device *dev) | |||
1515 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | 1511 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1516 | gfar_write(®s->dmactrl, tempval); | 1512 | gfar_write(®s->dmactrl, tempval); |
1517 | 1513 | ||
1518 | while (!(gfar_read(®s->ievent) & | 1514 | spin_event_timeout(((gfar_read(®s->ievent) & |
1519 | (IEVENT_GRSC | IEVENT_GTSC))) | 1515 | (IEVENT_GRSC | IEVENT_GTSC)) == |
1520 | cpu_relax(); | 1516 | (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); |
1521 | } | 1517 | } |
1522 | } | 1518 | } |
1523 | 1519 | ||
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index d313fae992da..743038490104 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, | |||
1814 | retval = 0; | 1814 | retval = 0; |
1815 | break; | 1815 | break; |
1816 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1816 | case E1000_DEV_ID_82576_QUAD_COPPER: |
1817 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
1817 | /* quad port adapters only support WoL on port A */ | 1818 | /* quad port adapters only support WoL on port A */ |
1818 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { | 1819 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { |
1819 | wol->supported = 0; | 1820 | wol->supported = 0; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 9b3c51ab1758..c9baa2aa98cd 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1612 | adapter->eeprom_wol = 0; | 1612 | adapter->eeprom_wol = 0; |
1613 | break; | 1613 | break; |
1614 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1614 | case E1000_DEV_ID_82576_QUAD_COPPER: |
1615 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
1615 | /* if quad port adapter, disable WoL on all but port A */ | 1616 | /* if quad port adapter, disable WoL on all but port A */ |
1616 | if (global_quad_port_a != 0) | 1617 | if (global_quad_port_a != 0) |
1617 | adapter->eeprom_wol = 0; | 1618 | adapter->eeprom_wol = 0; |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index b405a00817c6..12fc0e7ba2ca 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -39,6 +39,8 @@ | |||
39 | #define IXGBE_82599_MC_TBL_SIZE 128 | 39 | #define IXGBE_82599_MC_TBL_SIZE 128 |
40 | #define IXGBE_82599_VFT_TBL_SIZE 128 | 40 | #define IXGBE_82599_VFT_TBL_SIZE 128 |
41 | 41 | ||
42 | void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | ||
43 | void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | ||
42 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 44 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
43 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | 45 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
44 | ixgbe_link_speed speed, | 46 | ixgbe_link_speed speed, |
@@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) | |||
69 | if (hw->phy.multispeed_fiber) { | 71 | if (hw->phy.multispeed_fiber) { |
70 | /* Set up dual speed SFP+ support */ | 72 | /* Set up dual speed SFP+ support */ |
71 | mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; | 73 | mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; |
74 | mac->ops.disable_tx_laser = | ||
75 | &ixgbe_disable_tx_laser_multispeed_fiber; | ||
76 | mac->ops.enable_tx_laser = | ||
77 | &ixgbe_enable_tx_laser_multispeed_fiber; | ||
72 | mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; | 78 | mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; |
73 | } else { | 79 | } else { |
80 | mac->ops.disable_tx_laser = NULL; | ||
81 | mac->ops.enable_tx_laser = NULL; | ||
74 | mac->ops.flap_tx_laser = NULL; | 82 | mac->ops.flap_tx_laser = NULL; |
75 | if ((mac->ops.get_media_type(hw) == | 83 | if ((mac->ops.get_media_type(hw) == |
76 | ixgbe_media_type_backplane) && | 84 | ixgbe_media_type_backplane) && |
@@ -415,6 +423,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | |||
415 | return status; | 423 | return status; |
416 | } | 424 | } |
417 | 425 | ||
426 | /** | ||
427 | * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser | ||
428 | * @hw: pointer to hardware structure | ||
429 | * | ||
430 | * The base drivers may require better control over SFP+ module | ||
431 | * PHY states. This includes selectively shutting down the Tx | ||
432 | * laser on the PHY, effectively halting physical link. | ||
433 | **/ | ||
434 | void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | ||
435 | { | ||
436 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
437 | |||
438 | /* Disable tx laser; allow 100us to go dark per spec */ | ||
439 | esdp_reg |= IXGBE_ESDP_SDP3; | ||
440 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
441 | IXGBE_WRITE_FLUSH(hw); | ||
442 | udelay(100); | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser | ||
447 | * @hw: pointer to hardware structure | ||
448 | * | ||
449 | * The base drivers may require better control over SFP+ module | ||
450 | * PHY states. This includes selectively turning on the Tx | ||
451 | * laser on the PHY, effectively starting physical link. | ||
452 | **/ | ||
453 | void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | ||
454 | { | ||
455 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
456 | |||
457 | /* Enable tx laser; allow 100ms to light up */ | ||
458 | esdp_reg &= ~IXGBE_ESDP_SDP3; | ||
459 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
460 | IXGBE_WRITE_FLUSH(hw); | ||
461 | msleep(100); | ||
462 | } | ||
463 | |||
418 | /** | 464 | /** |
419 | * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser | 465 | * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser |
420 | * @hw: pointer to hardware structure | 466 | * @hw: pointer to hardware structure |
@@ -429,23 +475,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | |||
429 | **/ | 475 | **/ |
430 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | 476 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
431 | { | 477 | { |
432 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
433 | |||
434 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); | 478 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); |
435 | 479 | ||
436 | if (hw->mac.autotry_restart) { | 480 | if (hw->mac.autotry_restart) { |
437 | /* Disable tx laser; allow 100us to go dark per spec */ | 481 | ixgbe_disable_tx_laser_multispeed_fiber(hw); |
438 | esdp_reg |= IXGBE_ESDP_SDP3; | 482 | ixgbe_enable_tx_laser_multispeed_fiber(hw); |
439 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
440 | IXGBE_WRITE_FLUSH(hw); | ||
441 | udelay(100); | ||
442 | |||
443 | /* Enable tx laser; allow 100ms to light up */ | ||
444 | esdp_reg &= ~IXGBE_ESDP_SDP3; | ||
445 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
446 | IXGBE_WRITE_FLUSH(hw); | ||
447 | msleep(100); | ||
448 | |||
449 | hw->mac.autotry_restart = false; | 483 | hw->mac.autotry_restart = false; |
450 | } | 484 | } |
451 | } | 485 | } |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 8f677cb86290..6c00ee493a3b 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -2982,6 +2982,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2982 | else | 2982 | else |
2983 | ixgbe_configure_msi_and_legacy(adapter); | 2983 | ixgbe_configure_msi_and_legacy(adapter); |
2984 | 2984 | ||
2985 | /* enable the optics */ | ||
2986 | if (hw->phy.multispeed_fiber) | ||
2987 | hw->mac.ops.enable_tx_laser(hw); | ||
2988 | |||
2985 | clear_bit(__IXGBE_DOWN, &adapter->state); | 2989 | clear_bit(__IXGBE_DOWN, &adapter->state); |
2986 | ixgbe_napi_enable_all(adapter); | 2990 | ixgbe_napi_enable_all(adapter); |
2987 | 2991 | ||
@@ -3243,6 +3247,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3243 | /* signal that we are down to the interrupt handler */ | 3247 | /* signal that we are down to the interrupt handler */ |
3244 | set_bit(__IXGBE_DOWN, &adapter->state); | 3248 | set_bit(__IXGBE_DOWN, &adapter->state); |
3245 | 3249 | ||
3250 | /* power down the optics */ | ||
3251 | if (hw->phy.multispeed_fiber) | ||
3252 | hw->mac.ops.disable_tx_laser(hw); | ||
3253 | |||
3246 | /* disable receive for all VFs and wait one second */ | 3254 | /* disable receive for all VFs and wait one second */ |
3247 | if (adapter->num_vfs) { | 3255 | if (adapter->num_vfs) { |
3248 | /* ping all the active vfs to let them know we are going down */ | 3256 | /* ping all the active vfs to let them know we are going down */ |
@@ -6253,6 +6261,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6253 | goto err_eeprom; | 6261 | goto err_eeprom; |
6254 | } | 6262 | } |
6255 | 6263 | ||
6264 | /* power down the optics */ | ||
6265 | if (hw->phy.multispeed_fiber) | ||
6266 | hw->mac.ops.disable_tx_laser(hw); | ||
6267 | |||
6256 | init_timer(&adapter->watchdog_timer); | 6268 | init_timer(&adapter->watchdog_timer); |
6257 | adapter->watchdog_timer.function = &ixgbe_watchdog; | 6269 | adapter->watchdog_timer.function = &ixgbe_watchdog; |
6258 | adapter->watchdog_timer.data = (unsigned long)adapter; | 6270 | adapter->watchdog_timer.data = (unsigned long)adapter; |
@@ -6400,16 +6412,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6400 | del_timer_sync(&adapter->sfp_timer); | 6412 | del_timer_sync(&adapter->sfp_timer); |
6401 | cancel_work_sync(&adapter->watchdog_task); | 6413 | cancel_work_sync(&adapter->watchdog_task); |
6402 | cancel_work_sync(&adapter->sfp_task); | 6414 | cancel_work_sync(&adapter->sfp_task); |
6403 | if (adapter->hw.phy.multispeed_fiber) { | ||
6404 | struct ixgbe_hw *hw = &adapter->hw; | ||
6405 | /* | ||
6406 | * Restart clause 37 autoneg, disable and re-enable | ||
6407 | * the tx laser, to clear & alert the link partner | ||
6408 | * that it needs to restart autotry | ||
6409 | */ | ||
6410 | hw->mac.autotry_restart = true; | ||
6411 | hw->mac.ops.flap_tx_laser(hw); | ||
6412 | } | ||
6413 | cancel_work_sync(&adapter->multispeed_fiber_task); | 6415 | cancel_work_sync(&adapter->multispeed_fiber_task); |
6414 | cancel_work_sync(&adapter->sfp_config_module_task); | 6416 | cancel_work_sync(&adapter->sfp_config_module_task); |
6415 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 6417 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 4ec6dc1a5b75..534affcc38ca 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -2398,6 +2398,8 @@ struct ixgbe_mac_operations { | |||
2398 | s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); | 2398 | s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); |
2399 | 2399 | ||
2400 | /* Link */ | 2400 | /* Link */ |
2401 | void (*disable_tx_laser)(struct ixgbe_hw *); | ||
2402 | void (*enable_tx_laser)(struct ixgbe_hw *); | ||
2401 | void (*flap_tx_laser)(struct ixgbe_hw *); | 2403 | void (*flap_tx_laser)(struct ixgbe_hw *); |
2402 | s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); | 2404 | s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); |
2403 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); | 2405 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index 13cc1ca261d9..9e9f9b349766 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c | |||
@@ -722,12 +722,14 @@ static void ks8851_tx_work(struct work_struct *work) | |||
722 | txb = skb_dequeue(&ks->txq); | 722 | txb = skb_dequeue(&ks->txq); |
723 | last = skb_queue_empty(&ks->txq); | 723 | last = skb_queue_empty(&ks->txq); |
724 | 724 | ||
725 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); | 725 | if (txb != NULL) { |
726 | ks8851_wrpkt(ks, txb, last); | 726 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); |
727 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); | 727 | ks8851_wrpkt(ks, txb, last); |
728 | ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); | 728 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); |
729 | ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); | ||
729 | 730 | ||
730 | ks8851_done_tx(ks, txb); | 731 | ks8851_done_tx(ks, txb); |
732 | } | ||
731 | } | 733 | } |
732 | 734 | ||
733 | mutex_unlock(&ks->lock); | 735 | mutex_unlock(&ks->lock); |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 471887742b02..ecde0876a785 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev, | |||
1690 | if (pause->tx_pause != mgp->pause) | 1690 | if (pause->tx_pause != mgp->pause) |
1691 | return myri10ge_change_pause(mgp, pause->tx_pause); | 1691 | return myri10ge_change_pause(mgp, pause->tx_pause); |
1692 | if (pause->rx_pause != mgp->pause) | 1692 | if (pause->rx_pause != mgp->pause) |
1693 | return myri10ge_change_pause(mgp, pause->tx_pause); | 1693 | return myri10ge_change_pause(mgp, pause->rx_pause); |
1694 | if (pause->autoneg != 0) | 1694 | if (pause->autoneg != 0) |
1695 | return -EINVAL; | 1695 | return -EINVAL; |
1696 | return 0; | 1696 | return 0; |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 3d1d3a7b7ed3..757f87bb1db3 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -781,8 +781,13 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | |||
781 | inw(ioaddr + EL3_STATUS)); | 781 | inw(ioaddr + EL3_STATUS)); |
782 | 782 | ||
783 | spin_lock_irqsave(&lp->window_lock, flags); | 783 | spin_lock_irqsave(&lp->window_lock, flags); |
784 | |||
785 | dev->stats.tx_bytes += skb->len; | ||
786 | |||
787 | /* Put out the doubleword header... */ | ||
784 | outw(skb->len, ioaddr + TX_FIFO); | 788 | outw(skb->len, ioaddr + TX_FIFO); |
785 | outw(0, ioaddr + TX_FIFO); | 789 | outw(0, ioaddr + TX_FIFO); |
790 | /* ... and the packet rounded to a doubleword. */ | ||
786 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); | 791 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); |
787 | 792 | ||
788 | dev->trans_start = jiffies; | 793 | dev->trans_start = jiffies; |
@@ -1021,8 +1026,6 @@ static void update_stats(struct net_device *dev) | |||
1021 | /* BadSSD */ inb(ioaddr + 12); | 1026 | /* BadSSD */ inb(ioaddr + 12); |
1022 | up = inb(ioaddr + 13); | 1027 | up = inb(ioaddr + 13); |
1023 | 1028 | ||
1024 | dev->stats.tx_bytes += tx + ((up & 0xf0) << 12); | ||
1025 | |||
1026 | EL3WINDOW(1); | 1029 | EL3WINDOW(1); |
1027 | } | 1030 | } |
1028 | 1031 | ||
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index ff7eb9116b6a..ccc553782a0d 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -1608,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev) | |||
1608 | { | 1608 | { |
1609 | unsigned int ioaddr = dev->base_addr; | 1609 | unsigned int ioaddr = dev->base_addr; |
1610 | struct smc_private *smc = netdev_priv(dev); | 1610 | struct smc_private *smc = netdev_priv(dev); |
1611 | u_int multicast_table[ 2 ] = { 0, }; | 1611 | unsigned char multicast_table[8]; |
1612 | unsigned long flags; | 1612 | unsigned long flags; |
1613 | u_short rx_cfg_setting; | 1613 | u_short rx_cfg_setting; |
1614 | int i; | ||
1615 | |||
1616 | memset(multicast_table, 0, sizeof(multicast_table)); | ||
1614 | 1617 | ||
1615 | if (dev->flags & IFF_PROMISC) { | 1618 | if (dev->flags & IFF_PROMISC) { |
1616 | rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; | 1619 | rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; |
@@ -1622,10 +1625,6 @@ static void set_rx_mode(struct net_device *dev) | |||
1622 | 1625 | ||
1623 | netdev_for_each_mc_addr(mc_addr, dev) { | 1626 | netdev_for_each_mc_addr(mc_addr, dev) { |
1624 | u_int position = ether_crc(6, mc_addr->dmi_addr); | 1627 | u_int position = ether_crc(6, mc_addr->dmi_addr); |
1625 | #ifndef final_version /* Verify multicast address. */ | ||
1626 | if ((mc_addr->dmi_addr[0] & 1) == 0) | ||
1627 | continue; | ||
1628 | #endif | ||
1629 | multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); | 1628 | multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); |
1630 | } | 1629 | } |
1631 | } | 1630 | } |
@@ -1635,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev) | |||
1635 | /* Load MC table and Rx setting into the chip without interrupts. */ | 1634 | /* Load MC table and Rx setting into the chip without interrupts. */ |
1636 | spin_lock_irqsave(&smc->lock, flags); | 1635 | spin_lock_irqsave(&smc->lock, flags); |
1637 | SMC_SELECT_BANK(3); | 1636 | SMC_SELECT_BANK(3); |
1638 | outl(multicast_table[0], ioaddr + MULTICAST0); | 1637 | for (i = 0; i < 8; i++) |
1639 | outl(multicast_table[1], ioaddr + MULTICAST4); | 1638 | outb(multicast_table[i], ioaddr + MULTICAST0 + i); |
1640 | SMC_SELECT_BANK(0); | 1639 | SMC_SELECT_BANK(0); |
1641 | outw(rx_cfg_setting, ioaddr + RCR); | 1640 | outw(rx_cfg_setting, ioaddr + RCR); |
1642 | SMC_SELECT_BANK(2); | 1641 | SMC_SELECT_BANK(2); |
@@ -1805,23 +1804,30 @@ static void media_check(u_long arg) | |||
1805 | SMC_SELECT_BANK(1); | 1804 | SMC_SELECT_BANK(1); |
1806 | media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; | 1805 | media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; |
1807 | 1806 | ||
1807 | SMC_SELECT_BANK(saved_bank); | ||
1808 | spin_unlock_irqrestore(&smc->lock, flags); | ||
1809 | |||
1808 | /* Check for pending interrupt with watchdog flag set: with | 1810 | /* Check for pending interrupt with watchdog flag set: with |
1809 | this, we can limp along even if the interrupt is blocked */ | 1811 | this, we can limp along even if the interrupt is blocked */ |
1810 | if (smc->watchdog++ && ((i>>8) & i)) { | 1812 | if (smc->watchdog++ && ((i>>8) & i)) { |
1811 | if (!smc->fast_poll) | 1813 | if (!smc->fast_poll) |
1812 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); | 1814 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); |
1815 | local_irq_save(flags); | ||
1813 | smc_interrupt(dev->irq, dev); | 1816 | smc_interrupt(dev->irq, dev); |
1817 | local_irq_restore(flags); | ||
1814 | smc->fast_poll = HZ; | 1818 | smc->fast_poll = HZ; |
1815 | } | 1819 | } |
1816 | if (smc->fast_poll) { | 1820 | if (smc->fast_poll) { |
1817 | smc->fast_poll--; | 1821 | smc->fast_poll--; |
1818 | smc->media.expires = jiffies + HZ/100; | 1822 | smc->media.expires = jiffies + HZ/100; |
1819 | add_timer(&smc->media); | 1823 | add_timer(&smc->media); |
1820 | SMC_SELECT_BANK(saved_bank); | ||
1821 | spin_unlock_irqrestore(&smc->lock, flags); | ||
1822 | return; | 1824 | return; |
1823 | } | 1825 | } |
1824 | 1826 | ||
1827 | spin_lock_irqsave(&smc->lock, flags); | ||
1828 | |||
1829 | saved_bank = inw(ioaddr + BANK_SELECT); | ||
1830 | |||
1825 | if (smc->cfg & CFG_MII_SELECT) { | 1831 | if (smc->cfg & CFG_MII_SELECT) { |
1826 | if (smc->mii_if.phy_id < 0) | 1832 | if (smc->mii_if.phy_id < 0) |
1827 | goto reschedule; | 1833 | goto reschedule; |
@@ -1979,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
1979 | unsigned int ioaddr = dev->base_addr; | 1985 | unsigned int ioaddr = dev->base_addr; |
1980 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 1986 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
1981 | int ret; | 1987 | int ret; |
1988 | unsigned long flags; | ||
1982 | 1989 | ||
1983 | spin_lock_irq(&smc->lock); | 1990 | spin_lock_irqsave(&smc->lock, flags); |
1984 | SMC_SELECT_BANK(3); | 1991 | SMC_SELECT_BANK(3); |
1985 | if (smc->cfg & CFG_MII_SELECT) | 1992 | if (smc->cfg & CFG_MII_SELECT) |
1986 | ret = mii_ethtool_gset(&smc->mii_if, ecmd); | 1993 | ret = mii_ethtool_gset(&smc->mii_if, ecmd); |
1987 | else | 1994 | else |
1988 | ret = smc_netdev_get_ecmd(dev, ecmd); | 1995 | ret = smc_netdev_get_ecmd(dev, ecmd); |
1989 | SMC_SELECT_BANK(saved_bank); | 1996 | SMC_SELECT_BANK(saved_bank); |
1990 | spin_unlock_irq(&smc->lock); | 1997 | spin_unlock_irqrestore(&smc->lock, flags); |
1991 | return ret; | 1998 | return ret; |
1992 | } | 1999 | } |
1993 | 2000 | ||
@@ -1997,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
1997 | unsigned int ioaddr = dev->base_addr; | 2004 | unsigned int ioaddr = dev->base_addr; |
1998 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2005 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
1999 | int ret; | 2006 | int ret; |
2007 | unsigned long flags; | ||
2000 | 2008 | ||
2001 | spin_lock_irq(&smc->lock); | 2009 | spin_lock_irqsave(&smc->lock, flags); |
2002 | SMC_SELECT_BANK(3); | 2010 | SMC_SELECT_BANK(3); |
2003 | if (smc->cfg & CFG_MII_SELECT) | 2011 | if (smc->cfg & CFG_MII_SELECT) |
2004 | ret = mii_ethtool_sset(&smc->mii_if, ecmd); | 2012 | ret = mii_ethtool_sset(&smc->mii_if, ecmd); |
2005 | else | 2013 | else |
2006 | ret = smc_netdev_set_ecmd(dev, ecmd); | 2014 | ret = smc_netdev_set_ecmd(dev, ecmd); |
2007 | SMC_SELECT_BANK(saved_bank); | 2015 | SMC_SELECT_BANK(saved_bank); |
2008 | spin_unlock_irq(&smc->lock); | 2016 | spin_unlock_irqrestore(&smc->lock, flags); |
2009 | return ret; | 2017 | return ret; |
2010 | } | 2018 | } |
2011 | 2019 | ||
@@ -2015,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev) | |||
2015 | unsigned int ioaddr = dev->base_addr; | 2023 | unsigned int ioaddr = dev->base_addr; |
2016 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2024 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
2017 | u32 ret; | 2025 | u32 ret; |
2026 | unsigned long flags; | ||
2018 | 2027 | ||
2019 | spin_lock_irq(&smc->lock); | 2028 | spin_lock_irqsave(&smc->lock, flags); |
2020 | SMC_SELECT_BANK(3); | 2029 | SMC_SELECT_BANK(3); |
2021 | ret = smc_link_ok(dev); | 2030 | ret = smc_link_ok(dev); |
2022 | SMC_SELECT_BANK(saved_bank); | 2031 | SMC_SELECT_BANK(saved_bank); |
2023 | spin_unlock_irq(&smc->lock); | 2032 | spin_unlock_irqrestore(&smc->lock, flags); |
2024 | return ret; | 2033 | return ret; |
2025 | } | 2034 | } |
2026 | 2035 | ||
@@ -2057,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | |||
2057 | int rc = 0; | 2066 | int rc = 0; |
2058 | u16 saved_bank; | 2067 | u16 saved_bank; |
2059 | unsigned int ioaddr = dev->base_addr; | 2068 | unsigned int ioaddr = dev->base_addr; |
2069 | unsigned long flags; | ||
2060 | 2070 | ||
2061 | if (!netif_running(dev)) | 2071 | if (!netif_running(dev)) |
2062 | return -EINVAL; | 2072 | return -EINVAL; |
2063 | 2073 | ||
2064 | spin_lock_irq(&smc->lock); | 2074 | spin_lock_irqsave(&smc->lock, flags); |
2065 | saved_bank = inw(ioaddr + BANK_SELECT); | 2075 | saved_bank = inw(ioaddr + BANK_SELECT); |
2066 | SMC_SELECT_BANK(3); | 2076 | SMC_SELECT_BANK(3); |
2067 | rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); | 2077 | rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); |
2068 | SMC_SELECT_BANK(saved_bank); | 2078 | SMC_SELECT_BANK(saved_bank); |
2069 | spin_unlock_irq(&smc->lock); | 2079 | spin_unlock_irqrestore(&smc->lock, flags); |
2070 | return rc; | 2080 | return rc; |
2071 | } | 2081 | } |
2072 | 2082 | ||
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index a6ef266a2fe2..e73ba455aa20 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c | |||
@@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device *netdev) | |||
431 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | 431 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; |
432 | u32 mode = VPORT_MISS_MODE_DROP; | 432 | u32 mode = VPORT_MISS_MODE_DROP; |
433 | 433 | ||
434 | if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) | ||
435 | return; | ||
436 | |||
434 | qlcnic_nic_add_mac(adapter, adapter->mac_addr); | 437 | qlcnic_nic_add_mac(adapter, adapter->mac_addr); |
435 | qlcnic_nic_add_mac(adapter, bcast_addr); | 438 | qlcnic_nic_add_mac(adapter, bcast_addr); |
436 | 439 | ||
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 43afdb6b25e6..0298d8c1dcb6 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -134,7 +134,7 @@ | |||
134 | #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) | 134 | #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) |
135 | #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) | 135 | #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) |
136 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ | 136 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ |
137 | #define MCAST_MAX 4 /* Max number multicast addresses to filter */ | 137 | #define MCAST_MAX 3 /* Max number multicast addresses to filter */ |
138 | 138 | ||
139 | /* Descriptor status */ | 139 | /* Descriptor status */ |
140 | #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ | 140 | #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ |
@@ -982,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev) | |||
982 | crc >>= 26; | 982 | crc >>= 26; |
983 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | 983 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); |
984 | } | 984 | } |
985 | /* Write the index of the hash table */ | ||
986 | for (i = 0; i < 4; i++) | ||
987 | iowrite16(hash_table[i] << 14, ioaddr + MCR1); | ||
988 | /* Fill the MAC hash tables with their values */ | 985 | /* Fill the MAC hash tables with their values */ |
989 | iowrite16(hash_table[0], ioaddr + MAR0); | 986 | iowrite16(hash_table[0], ioaddr + MAR0); |
990 | iowrite16(hash_table[1], ioaddr + MAR1); | 987 | iowrite16(hash_table[1], ioaddr + MAR1); |
@@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct net_device *dev) | |||
1000 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); | 997 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); |
1001 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); | 998 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); |
1002 | } else { | 999 | } else { |
1003 | iowrite16(0xffff, ioaddr + MID_0L + 8 * i); | 1000 | iowrite16(0xffff, ioaddr + MID_1L + 8 * i); |
1004 | iowrite16(0xffff, ioaddr + MID_0M + 8 * i); | 1001 | iowrite16(0xffff, ioaddr + MID_1M + 8 * i); |
1005 | iowrite16(0xffff, ioaddr + MID_0H + 8 * i); | 1002 | iowrite16(0xffff, ioaddr + MID_1H + 8 * i); |
1006 | } | 1003 | } |
1007 | i++; | 1004 | i++; |
1008 | } | 1005 | } |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index dbb1f5a1824c..4748c21eb72e 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2759,6 +2759,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, | |||
2759 | { | 2759 | { |
2760 | iounmap(ioaddr); | 2760 | iounmap(ioaddr); |
2761 | pci_release_regions(pdev); | 2761 | pci_release_regions(pdev); |
2762 | pci_clear_mwi(pdev); | ||
2762 | pci_disable_device(pdev); | 2763 | pci_disable_device(pdev); |
2763 | free_netdev(dev); | 2764 | free_netdev(dev); |
2764 | } | 2765 | } |
@@ -2825,8 +2826,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | |||
2825 | spin_lock_irq(&tp->lock); | 2826 | spin_lock_irq(&tp->lock); |
2826 | 2827 | ||
2827 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 2828 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
2829 | |||
2828 | RTL_W32(MAC4, high); | 2830 | RTL_W32(MAC4, high); |
2831 | RTL_R32(MAC4); | ||
2832 | |||
2829 | RTL_W32(MAC0, low); | 2833 | RTL_W32(MAC0, low); |
2834 | RTL_R32(MAC0); | ||
2835 | |||
2830 | RTL_W8(Cfg9346, Cfg9346_Lock); | 2836 | RTL_W8(Cfg9346, Cfg9346_Lock); |
2831 | 2837 | ||
2832 | spin_unlock_irq(&tp->lock); | 2838 | spin_unlock_irq(&tp->lock); |
@@ -3014,9 +3020,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3014 | goto err_out_free_dev_1; | 3020 | goto err_out_free_dev_1; |
3015 | } | 3021 | } |
3016 | 3022 | ||
3017 | rc = pci_set_mwi(pdev); | 3023 | if (pci_set_mwi(pdev) < 0) |
3018 | if (rc < 0) | 3024 | netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); |
3019 | goto err_out_disable_2; | ||
3020 | 3025 | ||
3021 | /* make sure PCI base addr 1 is MMIO */ | 3026 | /* make sure PCI base addr 1 is MMIO */ |
3022 | if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { | 3027 | if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { |
@@ -3024,7 +3029,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3024 | "region #%d not an MMIO resource, aborting\n", | 3029 | "region #%d not an MMIO resource, aborting\n", |
3025 | region); | 3030 | region); |
3026 | rc = -ENODEV; | 3031 | rc = -ENODEV; |
3027 | goto err_out_mwi_3; | 3032 | goto err_out_mwi_2; |
3028 | } | 3033 | } |
3029 | 3034 | ||
3030 | /* check for weird/broken PCI region reporting */ | 3035 | /* check for weird/broken PCI region reporting */ |
@@ -3032,13 +3037,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3032 | netif_err(tp, probe, dev, | 3037 | netif_err(tp, probe, dev, |
3033 | "Invalid PCI region size(s), aborting\n"); | 3038 | "Invalid PCI region size(s), aborting\n"); |
3034 | rc = -ENODEV; | 3039 | rc = -ENODEV; |
3035 | goto err_out_mwi_3; | 3040 | goto err_out_mwi_2; |
3036 | } | 3041 | } |
3037 | 3042 | ||
3038 | rc = pci_request_regions(pdev, MODULENAME); | 3043 | rc = pci_request_regions(pdev, MODULENAME); |
3039 | if (rc < 0) { | 3044 | if (rc < 0) { |
3040 | netif_err(tp, probe, dev, "could not request regions\n"); | 3045 | netif_err(tp, probe, dev, "could not request regions\n"); |
3041 | goto err_out_mwi_3; | 3046 | goto err_out_mwi_2; |
3042 | } | 3047 | } |
3043 | 3048 | ||
3044 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3049 | tp->cp_cmd = PCIMulRW | RxChkSum; |
@@ -3051,7 +3056,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3051 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 3056 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3052 | if (rc < 0) { | 3057 | if (rc < 0) { |
3053 | netif_err(tp, probe, dev, "DMA configuration failed\n"); | 3058 | netif_err(tp, probe, dev, "DMA configuration failed\n"); |
3054 | goto err_out_free_res_4; | 3059 | goto err_out_free_res_3; |
3055 | } | 3060 | } |
3056 | } | 3061 | } |
3057 | 3062 | ||
@@ -3060,7 +3065,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3060 | if (!ioaddr) { | 3065 | if (!ioaddr) { |
3061 | netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); | 3066 | netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); |
3062 | rc = -EIO; | 3067 | rc = -EIO; |
3063 | goto err_out_free_res_4; | 3068 | goto err_out_free_res_3; |
3064 | } | 3069 | } |
3065 | 3070 | ||
3066 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 3071 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
@@ -3102,7 +3107,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3102 | if (i == ARRAY_SIZE(rtl_chip_info)) { | 3107 | if (i == ARRAY_SIZE(rtl_chip_info)) { |
3103 | dev_err(&pdev->dev, | 3108 | dev_err(&pdev->dev, |
3104 | "driver bug, MAC version not found in rtl_chip_info\n"); | 3109 | "driver bug, MAC version not found in rtl_chip_info\n"); |
3105 | goto err_out_msi_5; | 3110 | goto err_out_msi_4; |
3106 | } | 3111 | } |
3107 | tp->chipset = i; | 3112 | tp->chipset = i; |
3108 | 3113 | ||
@@ -3167,7 +3172,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3167 | 3172 | ||
3168 | rc = register_netdev(dev); | 3173 | rc = register_netdev(dev); |
3169 | if (rc < 0) | 3174 | if (rc < 0) |
3170 | goto err_out_msi_5; | 3175 | goto err_out_msi_4; |
3171 | 3176 | ||
3172 | pci_set_drvdata(pdev, dev); | 3177 | pci_set_drvdata(pdev, dev); |
3173 | 3178 | ||
@@ -3190,14 +3195,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3190 | out: | 3195 | out: |
3191 | return rc; | 3196 | return rc; |
3192 | 3197 | ||
3193 | err_out_msi_5: | 3198 | err_out_msi_4: |
3194 | rtl_disable_msi(pdev, tp); | 3199 | rtl_disable_msi(pdev, tp); |
3195 | iounmap(ioaddr); | 3200 | iounmap(ioaddr); |
3196 | err_out_free_res_4: | 3201 | err_out_free_res_3: |
3197 | pci_release_regions(pdev); | 3202 | pci_release_regions(pdev); |
3198 | err_out_mwi_3: | 3203 | err_out_mwi_2: |
3199 | pci_clear_mwi(pdev); | 3204 | pci_clear_mwi(pdev); |
3200 | err_out_disable_2: | ||
3201 | pci_disable_device(pdev); | 3205 | pci_disable_device(pdev); |
3202 | err_out_free_dev_1: | 3206 | err_out_free_dev_1: |
3203 | free_netdev(dev); | 3207 | free_netdev(dev); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 6486657c47b8..649a264d6a81 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1861,6 +1861,7 @@ out: | |||
1861 | } | 1861 | } |
1862 | 1862 | ||
1863 | if (disabled) { | 1863 | if (disabled) { |
1864 | dev_close(efx->net_dev); | ||
1864 | EFX_ERR(efx, "has been disabled\n"); | 1865 | EFX_ERR(efx, "has been disabled\n"); |
1865 | efx->state = STATE_DISABLED; | 1866 | efx->state = STATE_DISABLED; |
1866 | } else { | 1867 | } else { |
@@ -1884,8 +1885,7 @@ static void efx_reset_work(struct work_struct *data) | |||
1884 | } | 1885 | } |
1885 | 1886 | ||
1886 | rtnl_lock(); | 1887 | rtnl_lock(); |
1887 | if (efx_reset(efx, efx->reset_pending)) | 1888 | (void)efx_reset(efx, efx->reset_pending); |
1888 | dev_close(efx->net_dev); | ||
1889 | rtnl_unlock(); | 1889 | rtnl_unlock(); |
1890 | } | 1890 | } |
1891 | 1891 | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index d294d66fd600..08278e7302b3 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -1320,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
1320 | 1320 | ||
1321 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); | 1321 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); |
1322 | 1322 | ||
1323 | falcon_probe_board(efx, board_rev); | 1323 | rc = falcon_probe_board(efx, board_rev); |
1324 | if (rc) | ||
1325 | goto fail2; | ||
1324 | 1326 | ||
1325 | kfree(nvconfig); | 1327 | kfree(nvconfig); |
1326 | return 0; | 1328 | return 0; |
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index 5712fddd72f2..c7a933a3292e 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = { | |||
728 | }, | 728 | }, |
729 | }; | 729 | }; |
730 | 730 | ||
731 | static const struct falcon_board_type falcon_dummy_board = { | 731 | int falcon_probe_board(struct efx_nic *efx, u16 revision_info) |
732 | .init = efx_port_dummy_op_int, | ||
733 | .init_phy = efx_port_dummy_op_void, | ||
734 | .fini = efx_port_dummy_op_void, | ||
735 | .set_id_led = efx_port_dummy_op_set_id_led, | ||
736 | .monitor = efx_port_dummy_op_int, | ||
737 | }; | ||
738 | |||
739 | void falcon_probe_board(struct efx_nic *efx, u16 revision_info) | ||
740 | { | 732 | { |
741 | struct falcon_board *board = falcon_board(efx); | 733 | struct falcon_board *board = falcon_board(efx); |
742 | u8 type_id = FALCON_BOARD_TYPE(revision_info); | 734 | u8 type_id = FALCON_BOARD_TYPE(revision_info); |
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info) | |||
754 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | 746 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) |
755 | ? board->type->ref_model : board->type->gen_type, | 747 | ? board->type->ref_model : board->type->gen_type, |
756 | 'A' + board->major, board->minor); | 748 | 'A' + board->major, board->minor); |
749 | return 0; | ||
757 | } else { | 750 | } else { |
758 | EFX_ERR(efx, "unknown board type %d\n", type_id); | 751 | EFX_ERR(efx, "unknown board type %d\n", type_id); |
759 | board->type = &falcon_dummy_board; | 752 | return -ENODEV; |
760 | } | 753 | } |
761 | } | 754 | } |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 9351c0331a47..3166bafdfbef 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type; | |||
156 | ************************************************************************** | 156 | ************************************************************************** |
157 | */ | 157 | */ |
158 | 158 | ||
159 | extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); | 159 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); |
160 | 160 | ||
161 | /* TX data path */ | 161 | /* TX data path */ |
162 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); | 162 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); |
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 38dcc42c4f79..e0c46f59d1f8 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
@@ -456,8 +456,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) | |||
456 | 456 | ||
457 | static void siena_update_nic_stats(struct efx_nic *efx) | 457 | static void siena_update_nic_stats(struct efx_nic *efx) |
458 | { | 458 | { |
459 | while (siena_try_update_nic_stats(efx) == -EAGAIN) | 459 | int retry; |
460 | cpu_relax(); | 460 | |
461 | /* If we're unlucky enough to read statistics wduring the DMA, wait | ||
462 | * up to 10ms for it to finish (typically takes <500us) */ | ||
463 | for (retry = 0; retry < 100; ++retry) { | ||
464 | if (siena_try_update_nic_stats(efx) == 0) | ||
465 | return; | ||
466 | udelay(100); | ||
467 | } | ||
468 | |||
469 | /* Use the old values instead */ | ||
461 | } | 470 | } |
462 | 471 | ||
463 | static void siena_start_nic_stats(struct efx_nic *efx) | 472 | static void siena_start_nic_stats(struct efx_nic *efx) |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index a214a1627e8b..4111a85ec80e 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
1686 | } | 1686 | } |
1687 | pr_info("done!\n"); | 1687 | pr_info("done!\n"); |
1688 | 1688 | ||
1689 | if (!request_mem_region(res->start, (res->end - res->start), | 1689 | if (!request_mem_region(res->start, resource_size(res), |
1690 | pdev->name)) { | 1690 | pdev->name)) { |
1691 | pr_err("%s: ERROR: memory allocation failed" | 1691 | pr_err("%s: ERROR: memory allocation failed" |
1692 | "cannot get the I/O addr 0x%x\n", | 1692 | "cannot get the I/O addr 0x%x\n", |
@@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
1695 | goto out; | 1695 | goto out; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | addr = ioremap(res->start, (res->end - res->start)); | 1698 | addr = ioremap(res->start, resource_size(res)); |
1699 | if (!addr) { | 1699 | if (!addr) { |
1700 | pr_err("%s: ERROR: memory mapping failed \n", __func__); | 1700 | pr_err("%s: ERROR: memory mapping failed\n", __func__); |
1701 | ret = -ENOMEM; | 1701 | ret = -ENOMEM; |
1702 | goto out; | 1702 | goto out; |
1703 | } | 1703 | } |
@@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
1775 | out: | 1775 | out: |
1776 | if (ret < 0) { | 1776 | if (ret < 0) { |
1777 | platform_set_drvdata(pdev, NULL); | 1777 | platform_set_drvdata(pdev, NULL); |
1778 | release_mem_region(res->start, (res->end - res->start)); | 1778 | release_mem_region(res->start, resource_size(res)); |
1779 | if (addr != NULL) | 1779 | if (addr != NULL) |
1780 | iounmap(addr); | 1780 | iounmap(addr); |
1781 | } | 1781 | } |
@@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev) | |||
1813 | 1813 | ||
1814 | iounmap((void *)ndev->base_addr); | 1814 | iounmap((void *)ndev->base_addr); |
1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1816 | release_mem_region(res->start, (res->end - res->start)); | 1816 | release_mem_region(res->start, resource_size(res)); |
1817 | 1817 | ||
1818 | free_netdev(ndev); | 1818 | free_netdev(ndev); |
1819 | 1819 | ||
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 22cf1c446de3..ecc41cffb470 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -8633,6 +8633,7 @@ static int tg3_test_msi(struct tg3 *tp) | |||
8633 | pci_disable_msi(tp->pdev); | 8633 | pci_disable_msi(tp->pdev); |
8634 | 8634 | ||
8635 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 8635 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
8636 | tp->napi[0].irq_vec = tp->pdev->irq; | ||
8636 | 8637 | ||
8637 | err = tg3_request_irq(tp, 0); | 8638 | err = tg3_request_irq(tp, 0); |
8638 | if (err) | 8639 | if (err) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 96c39bddc78c..43265207d463 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -387,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
387 | } | 387 | } |
388 | } | 388 | } |
389 | 389 | ||
390 | /* Orphan the skb - required as we might hang on to it | ||
391 | * for indefinite time. */ | ||
392 | skb_orphan(skb); | ||
393 | |||
390 | /* Enqueue packet */ | 394 | /* Enqueue packet */ |
391 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); | 395 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); |
392 | dev->trans_start = jiffies; | 396 | dev->trans_start = jiffies; |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index ba56ce4382d9..5d58abc224f4 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -385,4 +385,26 @@ config USB_CDC_PHONET | |||
385 | cellular modem, as found on most Nokia handsets with the | 385 | cellular modem, as found on most Nokia handsets with the |
386 | "PC suite" USB profile. | 386 | "PC suite" USB profile. |
387 | 387 | ||
388 | config USB_IPHETH | ||
389 | tristate "Apple iPhone USB Ethernet driver" | ||
390 | default n | ||
391 | ---help--- | ||
392 | Module used to share Internet connection (tethering) from your | ||
393 | iPhone (Original, 3G and 3GS) to your system. | ||
394 | Note that you need userspace libraries and programs that are needed | ||
395 | to pair your device with your system and that understand the iPhone | ||
396 | protocol. | ||
397 | |||
398 | For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver | ||
399 | |||
400 | config USB_SIERRA_NET | ||
401 | tristate "USB-to-WWAN Driver for Sierra Wireless modems" | ||
402 | depends on USB_USBNET | ||
403 | default y | ||
404 | help | ||
405 | Choose this option if you have a Sierra Wireless USB-to-WWAN device. | ||
406 | |||
407 | To compile this driver as a module, choose M here: the | ||
408 | module will be called sierra_net. | ||
409 | |||
388 | endmenu | 410 | endmenu |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 82ea62955b56..b13a279663ba 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
@@ -23,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o | |||
23 | obj-$(CONFIG_USB_USBNET) += usbnet.o | 23 | obj-$(CONFIG_USB_USBNET) += usbnet.o |
24 | obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o | 24 | obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o |
25 | obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o | 25 | obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o |
26 | obj-$(CONFIG_USB_IPHETH) += ipheth.o | ||
27 | obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o | ||
26 | 28 | ||
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c8cdb7f30adc..3547cf13d219 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -431,6 +431,7 @@ static const struct driver_info mbm_info = { | |||
431 | .bind = cdc_bind, | 431 | .bind = cdc_bind, |
432 | .unbind = usbnet_cdc_unbind, | 432 | .unbind = usbnet_cdc_unbind, |
433 | .status = cdc_status, | 433 | .status = cdc_status, |
434 | .manage_power = cdc_manage_power, | ||
434 | }; | 435 | }; |
435 | 436 | ||
436 | /*-------------------------------------------------------------------------*/ | 437 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c new file mode 100644 index 000000000000..418825d26f90 --- /dev/null +++ b/drivers/net/usb/ipheth.c | |||
@@ -0,0 +1,569 @@ | |||
1 | /* | ||
2 | * ipheth.c - Apple iPhone USB Ethernet driver | ||
3 | * | ||
4 | * Copyright (c) 2009 Diego Giagio <diego@giagio.com> | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Redistribution and use in source and binary forms, with or without | ||
8 | * modification, are permitted provided that the following conditions | ||
9 | * are met: | ||
10 | * 1. Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
13 | * notice, this list of conditions and the following disclaimer in the | ||
14 | * documentation and/or other materials provided with the distribution. | ||
15 | * 3. Neither the name of GIAGIO.COM nor the names of its contributors | ||
16 | * may be used to endorse or promote products derived from this software | ||
17 | * without specific prior written permission. | ||
18 | * | ||
19 | * Alternatively, provided that this notice is retained in full, this | ||
20 | * software may be distributed under the terms of the GNU General | ||
21 | * Public License ("GPL") version 2, in which case the provisions of the | ||
22 | * GPL apply INSTEAD OF those given above. | ||
23 | * | ||
24 | * The provided data structures and external interfaces from this code | ||
25 | * are not restricted to be used by modules with a GPL compatible license. | ||
26 | * | ||
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | ||
38 | * DAMAGE. | ||
39 | * | ||
40 | * | ||
41 | * Attention: iPhone device must be paired, otherwise it won't respond to our | ||
42 | * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver | ||
43 | * | ||
44 | */ | ||
45 | |||
46 | #include <linux/kernel.h> | ||
47 | #include <linux/errno.h> | ||
48 | #include <linux/init.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/module.h> | ||
51 | #include <linux/netdevice.h> | ||
52 | #include <linux/etherdevice.h> | ||
53 | #include <linux/ethtool.h> | ||
54 | #include <linux/usb.h> | ||
55 | #include <linux/workqueue.h> | ||
56 | |||
57 | #define USB_VENDOR_APPLE 0x05ac | ||
58 | #define USB_PRODUCT_IPHONE 0x1290 | ||
59 | #define USB_PRODUCT_IPHONE_3G 0x1292 | ||
60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 | ||
61 | |||
62 | #define IPHETH_USBINTF_CLASS 255 | ||
63 | #define IPHETH_USBINTF_SUBCLASS 253 | ||
64 | #define IPHETH_USBINTF_PROTO 1 | ||
65 | |||
66 | #define IPHETH_BUF_SIZE 1516 | ||
67 | #define IPHETH_TX_TIMEOUT (5 * HZ) | ||
68 | |||
69 | #define IPHETH_INTFNUM 2 | ||
70 | #define IPHETH_ALT_INTFNUM 1 | ||
71 | |||
72 | #define IPHETH_CTRL_ENDP 0x00 | ||
73 | #define IPHETH_CTRL_BUF_SIZE 0x40 | ||
74 | #define IPHETH_CTRL_TIMEOUT (5 * HZ) | ||
75 | |||
76 | #define IPHETH_CMD_GET_MACADDR 0x00 | ||
77 | #define IPHETH_CMD_CARRIER_CHECK 0x45 | ||
78 | |||
79 | #define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ) | ||
80 | #define IPHETH_CARRIER_ON 0x04 | ||
81 | |||
82 | static struct usb_device_id ipheth_table[] = { | ||
83 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
84 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE, | ||
85 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
86 | IPHETH_USBINTF_PROTO) }, | ||
87 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
88 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G, | ||
89 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
90 | IPHETH_USBINTF_PROTO) }, | ||
91 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
92 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, | ||
93 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
94 | IPHETH_USBINTF_PROTO) }, | ||
95 | { } | ||
96 | }; | ||
97 | MODULE_DEVICE_TABLE(usb, ipheth_table); | ||
98 | |||
99 | struct ipheth_device { | ||
100 | struct usb_device *udev; | ||
101 | struct usb_interface *intf; | ||
102 | struct net_device *net; | ||
103 | struct sk_buff *tx_skb; | ||
104 | struct urb *tx_urb; | ||
105 | struct urb *rx_urb; | ||
106 | unsigned char *tx_buf; | ||
107 | unsigned char *rx_buf; | ||
108 | unsigned char *ctrl_buf; | ||
109 | u8 bulk_in; | ||
110 | u8 bulk_out; | ||
111 | struct delayed_work carrier_work; | ||
112 | }; | ||
113 | |||
114 | static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags); | ||
115 | |||
116 | static int ipheth_alloc_urbs(struct ipheth_device *iphone) | ||
117 | { | ||
118 | struct urb *tx_urb = NULL; | ||
119 | struct urb *rx_urb = NULL; | ||
120 | u8 *tx_buf = NULL; | ||
121 | u8 *rx_buf = NULL; | ||
122 | |||
123 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
124 | if (tx_urb == NULL) | ||
125 | goto error_nomem; | ||
126 | |||
127 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
128 | if (rx_urb == NULL) | ||
129 | goto free_tx_urb; | ||
130 | |||
131 | tx_buf = usb_buffer_alloc(iphone->udev, | ||
132 | IPHETH_BUF_SIZE, | ||
133 | GFP_KERNEL, | ||
134 | &tx_urb->transfer_dma); | ||
135 | if (tx_buf == NULL) | ||
136 | goto free_rx_urb; | ||
137 | |||
138 | rx_buf = usb_buffer_alloc(iphone->udev, | ||
139 | IPHETH_BUF_SIZE, | ||
140 | GFP_KERNEL, | ||
141 | &rx_urb->transfer_dma); | ||
142 | if (rx_buf == NULL) | ||
143 | goto free_tx_buf; | ||
144 | |||
145 | |||
146 | iphone->tx_urb = tx_urb; | ||
147 | iphone->rx_urb = rx_urb; | ||
148 | iphone->tx_buf = tx_buf; | ||
149 | iphone->rx_buf = rx_buf; | ||
150 | return 0; | ||
151 | |||
152 | free_tx_buf: | ||
153 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf, | ||
154 | tx_urb->transfer_dma); | ||
155 | free_rx_urb: | ||
156 | usb_free_urb(rx_urb); | ||
157 | free_tx_urb: | ||
158 | usb_free_urb(tx_urb); | ||
159 | error_nomem: | ||
160 | return -ENOMEM; | ||
161 | } | ||
162 | |||
163 | static void ipheth_free_urbs(struct ipheth_device *iphone) | ||
164 | { | ||
165 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, | ||
166 | iphone->rx_urb->transfer_dma); | ||
167 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, | ||
168 | iphone->tx_urb->transfer_dma); | ||
169 | usb_free_urb(iphone->rx_urb); | ||
170 | usb_free_urb(iphone->tx_urb); | ||
171 | } | ||
172 | |||
173 | static void ipheth_kill_urbs(struct ipheth_device *dev) | ||
174 | { | ||
175 | usb_kill_urb(dev->tx_urb); | ||
176 | usb_kill_urb(dev->rx_urb); | ||
177 | } | ||
178 | |||
179 | static void ipheth_rcvbulk_callback(struct urb *urb) | ||
180 | { | ||
181 | struct ipheth_device *dev; | ||
182 | struct sk_buff *skb; | ||
183 | int status; | ||
184 | char *buf; | ||
185 | int len; | ||
186 | |||
187 | dev = urb->context; | ||
188 | if (dev == NULL) | ||
189 | return; | ||
190 | |||
191 | status = urb->status; | ||
192 | switch (status) { | ||
193 | case -ENOENT: | ||
194 | case -ECONNRESET: | ||
195 | case -ESHUTDOWN: | ||
196 | return; | ||
197 | case 0: | ||
198 | break; | ||
199 | default: | ||
200 | err("%s: urb status: %d", __func__, urb->status); | ||
201 | return; | ||
202 | } | ||
203 | |||
204 | len = urb->actual_length; | ||
205 | buf = urb->transfer_buffer; | ||
206 | |||
207 | skb = dev_alloc_skb(NET_IP_ALIGN + len); | ||
208 | if (!skb) { | ||
209 | err("%s: dev_alloc_skb: -ENOMEM", __func__); | ||
210 | dev->net->stats.rx_dropped++; | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | skb_reserve(skb, NET_IP_ALIGN); | ||
215 | memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); | ||
216 | skb->dev = dev->net; | ||
217 | skb->protocol = eth_type_trans(skb, dev->net); | ||
218 | |||
219 | dev->net->stats.rx_packets++; | ||
220 | dev->net->stats.rx_bytes += len; | ||
221 | |||
222 | netif_rx(skb); | ||
223 | ipheth_rx_submit(dev, GFP_ATOMIC); | ||
224 | } | ||
225 | |||
226 | static void ipheth_sndbulk_callback(struct urb *urb) | ||
227 | { | ||
228 | struct ipheth_device *dev; | ||
229 | |||
230 | dev = urb->context; | ||
231 | if (dev == NULL) | ||
232 | return; | ||
233 | |||
234 | if (urb->status != 0 && | ||
235 | urb->status != -ENOENT && | ||
236 | urb->status != -ECONNRESET && | ||
237 | urb->status != -ESHUTDOWN) | ||
238 | err("%s: urb status: %d", __func__, urb->status); | ||
239 | |||
240 | dev_kfree_skb_irq(dev->tx_skb); | ||
241 | netif_wake_queue(dev->net); | ||
242 | } | ||
243 | |||
244 | static int ipheth_carrier_set(struct ipheth_device *dev) | ||
245 | { | ||
246 | struct usb_device *udev = dev->udev; | ||
247 | int retval; | ||
248 | |||
249 | retval = usb_control_msg(udev, | ||
250 | usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), | ||
251 | IPHETH_CMD_CARRIER_CHECK, /* request */ | ||
252 | 0xc0, /* request type */ | ||
253 | 0x00, /* value */ | ||
254 | 0x02, /* index */ | ||
255 | dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE, | ||
256 | IPHETH_CTRL_TIMEOUT); | ||
257 | if (retval < 0) { | ||
258 | err("%s: usb_control_msg: %d", __func__, retval); | ||
259 | return retval; | ||
260 | } | ||
261 | |||
262 | if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) | ||
263 | netif_carrier_on(dev->net); | ||
264 | else | ||
265 | netif_carrier_off(dev->net); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static void ipheth_carrier_check_work(struct work_struct *work) | ||
271 | { | ||
272 | struct ipheth_device *dev = container_of(work, struct ipheth_device, | ||
273 | carrier_work.work); | ||
274 | |||
275 | ipheth_carrier_set(dev); | ||
276 | schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT); | ||
277 | } | ||
278 | |||
279 | static int ipheth_get_macaddr(struct ipheth_device *dev) | ||
280 | { | ||
281 | struct usb_device *udev = dev->udev; | ||
282 | struct net_device *net = dev->net; | ||
283 | int retval; | ||
284 | |||
285 | retval = usb_control_msg(udev, | ||
286 | usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), | ||
287 | IPHETH_CMD_GET_MACADDR, /* request */ | ||
288 | 0xc0, /* request type */ | ||
289 | 0x00, /* value */ | ||
290 | 0x02, /* index */ | ||
291 | dev->ctrl_buf, | ||
292 | IPHETH_CTRL_BUF_SIZE, | ||
293 | IPHETH_CTRL_TIMEOUT); | ||
294 | if (retval < 0) { | ||
295 | err("%s: usb_control_msg: %d", __func__, retval); | ||
296 | } else if (retval < ETH_ALEN) { | ||
297 | err("%s: usb_control_msg: short packet: %d bytes", | ||
298 | __func__, retval); | ||
299 | retval = -EINVAL; | ||
300 | } else { | ||
301 | memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN); | ||
302 | retval = 0; | ||
303 | } | ||
304 | |||
305 | return retval; | ||
306 | } | ||
307 | |||
308 | static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) | ||
309 | { | ||
310 | struct usb_device *udev = dev->udev; | ||
311 | int retval; | ||
312 | |||
313 | usb_fill_bulk_urb(dev->rx_urb, udev, | ||
314 | usb_rcvbulkpipe(udev, dev->bulk_in), | ||
315 | dev->rx_buf, IPHETH_BUF_SIZE, | ||
316 | ipheth_rcvbulk_callback, | ||
317 | dev); | ||
318 | dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
319 | |||
320 | retval = usb_submit_urb(dev->rx_urb, mem_flags); | ||
321 | if (retval) | ||
322 | err("%s: usb_submit_urb: %d", __func__, retval); | ||
323 | return retval; | ||
324 | } | ||
325 | |||
326 | static int ipheth_open(struct net_device *net) | ||
327 | { | ||
328 | struct ipheth_device *dev = netdev_priv(net); | ||
329 | struct usb_device *udev = dev->udev; | ||
330 | int retval = 0; | ||
331 | |||
332 | usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM); | ||
333 | |||
334 | retval = ipheth_carrier_set(dev); | ||
335 | if (retval) | ||
336 | return retval; | ||
337 | |||
338 | retval = ipheth_rx_submit(dev, GFP_KERNEL); | ||
339 | if (retval) | ||
340 | return retval; | ||
341 | |||
342 | schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT); | ||
343 | netif_start_queue(net); | ||
344 | return retval; | ||
345 | } | ||
346 | |||
347 | static int ipheth_close(struct net_device *net) | ||
348 | { | ||
349 | struct ipheth_device *dev = netdev_priv(net); | ||
350 | |||
351 | cancel_delayed_work_sync(&dev->carrier_work); | ||
352 | netif_stop_queue(net); | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int ipheth_tx(struct sk_buff *skb, struct net_device *net) | ||
357 | { | ||
358 | struct ipheth_device *dev = netdev_priv(net); | ||
359 | struct usb_device *udev = dev->udev; | ||
360 | int retval; | ||
361 | |||
362 | /* Paranoid */ | ||
363 | if (skb->len > IPHETH_BUF_SIZE) { | ||
364 | WARN(1, "%s: skb too large: %d bytes", __func__, skb->len); | ||
365 | dev->net->stats.tx_dropped++; | ||
366 | dev_kfree_skb_irq(skb); | ||
367 | return NETDEV_TX_OK; | ||
368 | } | ||
369 | |||
370 | memcpy(dev->tx_buf, skb->data, skb->len); | ||
371 | if (skb->len < IPHETH_BUF_SIZE) | ||
372 | memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len); | ||
373 | |||
374 | usb_fill_bulk_urb(dev->tx_urb, udev, | ||
375 | usb_sndbulkpipe(udev, dev->bulk_out), | ||
376 | dev->tx_buf, IPHETH_BUF_SIZE, | ||
377 | ipheth_sndbulk_callback, | ||
378 | dev); | ||
379 | dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
380 | |||
381 | retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); | ||
382 | if (retval) { | ||
383 | err("%s: usb_submit_urb: %d", __func__, retval); | ||
384 | dev->net->stats.tx_errors++; | ||
385 | dev_kfree_skb_irq(skb); | ||
386 | } else { | ||
387 | dev->tx_skb = skb; | ||
388 | |||
389 | dev->net->stats.tx_packets++; | ||
390 | dev->net->stats.tx_bytes += skb->len; | ||
391 | netif_stop_queue(net); | ||
392 | } | ||
393 | |||
394 | return NETDEV_TX_OK; | ||
395 | } | ||
396 | |||
397 | static void ipheth_tx_timeout(struct net_device *net) | ||
398 | { | ||
399 | struct ipheth_device *dev = netdev_priv(net); | ||
400 | |||
401 | err("%s: TX timeout", __func__); | ||
402 | dev->net->stats.tx_errors++; | ||
403 | usb_unlink_urb(dev->tx_urb); | ||
404 | } | ||
405 | |||
406 | static struct net_device_stats *ipheth_stats(struct net_device *net) | ||
407 | { | ||
408 | struct ipheth_device *dev = netdev_priv(net); | ||
409 | return &dev->net->stats; | ||
410 | } | ||
411 | |||
412 | static u32 ipheth_ethtool_op_get_link(struct net_device *net) | ||
413 | { | ||
414 | struct ipheth_device *dev = netdev_priv(net); | ||
415 | return netif_carrier_ok(dev->net); | ||
416 | } | ||
417 | |||
418 | static struct ethtool_ops ops = { | ||
419 | .get_link = ipheth_ethtool_op_get_link | ||
420 | }; | ||
421 | |||
422 | static const struct net_device_ops ipheth_netdev_ops = { | ||
423 | .ndo_open = &ipheth_open, | ||
424 | .ndo_stop = &ipheth_close, | ||
425 | .ndo_start_xmit = &ipheth_tx, | ||
426 | .ndo_tx_timeout = &ipheth_tx_timeout, | ||
427 | .ndo_get_stats = &ipheth_stats, | ||
428 | }; | ||
429 | |||
430 | static struct device_type ipheth_type = { | ||
431 | .name = "wwan", | ||
432 | }; | ||
433 | |||
434 | static int ipheth_probe(struct usb_interface *intf, | ||
435 | const struct usb_device_id *id) | ||
436 | { | ||
437 | struct usb_device *udev = interface_to_usbdev(intf); | ||
438 | struct usb_host_interface *hintf; | ||
439 | struct usb_endpoint_descriptor *endp; | ||
440 | struct ipheth_device *dev; | ||
441 | struct net_device *netdev; | ||
442 | int i; | ||
443 | int retval; | ||
444 | |||
445 | netdev = alloc_etherdev(sizeof(struct ipheth_device)); | ||
446 | if (!netdev) | ||
447 | return -ENOMEM; | ||
448 | |||
449 | netdev->netdev_ops = &ipheth_netdev_ops; | ||
450 | netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; | ||
451 | strcpy(netdev->name, "wwan%d"); | ||
452 | |||
453 | dev = netdev_priv(netdev); | ||
454 | dev->udev = udev; | ||
455 | dev->net = netdev; | ||
456 | dev->intf = intf; | ||
457 | |||
458 | /* Set up endpoints */ | ||
459 | hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM); | ||
460 | if (hintf == NULL) { | ||
461 | retval = -ENODEV; | ||
462 | err("Unable to find alternate settings interface"); | ||
463 | goto err_endpoints; | ||
464 | } | ||
465 | |||
466 | for (i = 0; i < hintf->desc.bNumEndpoints; i++) { | ||
467 | endp = &hintf->endpoint[i].desc; | ||
468 | if (usb_endpoint_is_bulk_in(endp)) | ||
469 | dev->bulk_in = endp->bEndpointAddress; | ||
470 | else if (usb_endpoint_is_bulk_out(endp)) | ||
471 | dev->bulk_out = endp->bEndpointAddress; | ||
472 | } | ||
473 | if (!(dev->bulk_in && dev->bulk_out)) { | ||
474 | retval = -ENODEV; | ||
475 | err("Unable to find endpoints"); | ||
476 | goto err_endpoints; | ||
477 | } | ||
478 | |||
479 | dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL); | ||
480 | if (dev->ctrl_buf == NULL) { | ||
481 | retval = -ENOMEM; | ||
482 | goto err_alloc_ctrl_buf; | ||
483 | } | ||
484 | |||
485 | retval = ipheth_get_macaddr(dev); | ||
486 | if (retval) | ||
487 | goto err_get_macaddr; | ||
488 | |||
489 | INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work); | ||
490 | |||
491 | retval = ipheth_alloc_urbs(dev); | ||
492 | if (retval) { | ||
493 | err("error allocating urbs: %d", retval); | ||
494 | goto err_alloc_urbs; | ||
495 | } | ||
496 | |||
497 | usb_set_intfdata(intf, dev); | ||
498 | |||
499 | SET_NETDEV_DEV(netdev, &intf->dev); | ||
500 | SET_ETHTOOL_OPS(netdev, &ops); | ||
501 | SET_NETDEV_DEVTYPE(netdev, &ipheth_type); | ||
502 | |||
503 | retval = register_netdev(netdev); | ||
504 | if (retval) { | ||
505 | err("error registering netdev: %d", retval); | ||
506 | retval = -EIO; | ||
507 | goto err_register_netdev; | ||
508 | } | ||
509 | |||
510 | dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n"); | ||
511 | return 0; | ||
512 | |||
513 | err_register_netdev: | ||
514 | ipheth_free_urbs(dev); | ||
515 | err_alloc_urbs: | ||
516 | err_get_macaddr: | ||
517 | err_alloc_ctrl_buf: | ||
518 | kfree(dev->ctrl_buf); | ||
519 | err_endpoints: | ||
520 | free_netdev(netdev); | ||
521 | return retval; | ||
522 | } | ||
523 | |||
524 | static void ipheth_disconnect(struct usb_interface *intf) | ||
525 | { | ||
526 | struct ipheth_device *dev; | ||
527 | |||
528 | dev = usb_get_intfdata(intf); | ||
529 | if (dev != NULL) { | ||
530 | unregister_netdev(dev->net); | ||
531 | ipheth_kill_urbs(dev); | ||
532 | ipheth_free_urbs(dev); | ||
533 | kfree(dev->ctrl_buf); | ||
534 | free_netdev(dev->net); | ||
535 | } | ||
536 | usb_set_intfdata(intf, NULL); | ||
537 | dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n"); | ||
538 | } | ||
539 | |||
540 | static struct usb_driver ipheth_driver = { | ||
541 | .name = "ipheth", | ||
542 | .probe = ipheth_probe, | ||
543 | .disconnect = ipheth_disconnect, | ||
544 | .id_table = ipheth_table, | ||
545 | }; | ||
546 | |||
547 | static int __init ipheth_init(void) | ||
548 | { | ||
549 | int retval; | ||
550 | |||
551 | retval = usb_register(&ipheth_driver); | ||
552 | if (retval) { | ||
553 | err("usb_register failed: %d", retval); | ||
554 | return retval; | ||
555 | } | ||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | static void __exit ipheth_exit(void) | ||
560 | { | ||
561 | usb_deregister(&ipheth_driver); | ||
562 | } | ||
563 | |||
564 | module_init(ipheth_init); | ||
565 | module_exit(ipheth_exit); | ||
566 | |||
567 | MODULE_AUTHOR("Diego Giagio <diego@giagio.com>"); | ||
568 | MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver"); | ||
569 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 52671ea043a7..c4c334d9770f 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = { | |||
145 | { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ | 145 | { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ |
146 | { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ | 146 | { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ |
147 | { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ | 147 | { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ |
148 | { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ | ||
148 | { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ | 149 | { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ |
149 | { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ | 150 | { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ |
150 | { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ | 151 | { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c new file mode 100644 index 000000000000..a44f9e0ea098 --- /dev/null +++ b/drivers/net/usb/sierra_net.c | |||
@@ -0,0 +1,1001 @@ | |||
1 | /* | ||
2 | * USB-to-WWAN Driver for Sierra Wireless modems | ||
3 | * | ||
4 | * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer | ||
5 | * <linux@sierrawireless.com> | ||
6 | * | ||
7 | * Portions of this based on the cdc_ether driver by David Brownell (2003-2005) | ||
8 | * and Ole Andre Vadla Ravnas (ActiveSync) (2006). | ||
9 | * | ||
10 | * IMPORTANT DISCLAIMER: This driver is not commercially supported by | ||
11 | * Sierra Wireless. Use at your own risk. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | */ | ||
27 | |||
28 | #define DRIVER_VERSION "v.2.0" | ||
29 | #define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer" | ||
30 | #define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems" | ||
31 | static const char driver_name[] = "sierra_net"; | ||
32 | |||
33 | /* if defined debug messages enabled */ | ||
34 | /*#define DEBUG*/ | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/ethtool.h> | ||
39 | #include <linux/mii.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <linux/timer.h> | ||
42 | #include <linux/usb.h> | ||
43 | #include <linux/usb/cdc.h> | ||
44 | #include <net/ip.h> | ||
45 | #include <net/udp.h> | ||
46 | #include <asm/unaligned.h> | ||
47 | #include <linux/usb/usbnet.h> | ||
48 | |||
49 | #define SWI_USB_REQUEST_GET_FW_ATTR 0x06 | ||
50 | #define SWI_GET_FW_ATTR_MASK 0x08 | ||
51 | |||
52 | /* atomic counter partially included in MAC address to make sure 2 devices | ||
53 | * do not end up with the same MAC - concept breaks in case of > 255 ifaces | ||
54 | */ | ||
55 | static atomic_t iface_counter = ATOMIC_INIT(0); | ||
56 | |||
57 | /* | ||
58 | * SYNC Timer Delay definition used to set the expiry time | ||
59 | */ | ||
60 | #define SIERRA_NET_SYNCDELAY (2*HZ) | ||
61 | |||
62 | /* Max. MTU supported. The modem buffers are limited to 1500 */ | ||
63 | #define SIERRA_NET_MAX_SUPPORTED_MTU 1500 | ||
64 | |||
65 | /* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control | ||
66 | * message reception ... and thus the max. received packet. | ||
67 | * (May be the cause for parse_hip returning -EINVAL) | ||
68 | */ | ||
69 | #define SIERRA_NET_USBCTL_BUF_LEN 1024 | ||
70 | |||
71 | /* list of interface numbers - used for constructing interface lists */ | ||
72 | struct sierra_net_iface_info { | ||
73 | const u32 infolen; /* number of interface numbers on list */ | ||
74 | const u8 *ifaceinfo; /* pointer to the array holding the numbers */ | ||
75 | }; | ||
76 | |||
77 | struct sierra_net_info_data { | ||
78 | u16 rx_urb_size; | ||
79 | struct sierra_net_iface_info whitelist; | ||
80 | }; | ||
81 | |||
82 | /* Private data structure */ | ||
83 | struct sierra_net_data { | ||
84 | |||
85 | u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ | ||
86 | |||
87 | u16 link_up; /* air link up or down */ | ||
88 | u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ | ||
89 | |||
90 | u8 sync_msg[4]; /* SYNC message */ | ||
91 | u8 shdwn_msg[4]; /* Shutdown message */ | ||
92 | |||
93 | /* Backpointer to the container */ | ||
94 | struct usbnet *usbnet; | ||
95 | |||
96 | u8 ifnum; /* interface number */ | ||
97 | |||
98 | /* Bit masks, must be a power of 2 */ | ||
99 | #define SIERRA_NET_EVENT_RESP_AVAIL 0x01 | ||
100 | #define SIERRA_NET_TIMER_EXPIRY 0x02 | ||
101 | unsigned long kevent_flags; | ||
102 | struct work_struct sierra_net_kevent; | ||
103 | struct timer_list sync_timer; /* For retrying SYNC sequence */ | ||
104 | }; | ||
105 | |||
106 | struct param { | ||
107 | int is_present; | ||
108 | union { | ||
109 | void *ptr; | ||
110 | u32 dword; | ||
111 | u16 word; | ||
112 | u8 byte; | ||
113 | }; | ||
114 | }; | ||
115 | |||
116 | /* HIP message type */ | ||
117 | #define SIERRA_NET_HIP_EXTENDEDID 0x7F | ||
118 | #define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */ | ||
119 | #define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */ | ||
120 | #define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */ | ||
121 | #define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */ | ||
122 | |||
123 | #define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202 | ||
124 | #define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002 | ||
125 | |||
126 | /* 3G UMTS Link Sense Indication definitions */ | ||
127 | #define SIERRA_NET_HIP_LSI_UMTSID 0x78 | ||
128 | |||
129 | /* Reverse Channel Grant Indication HIP message */ | ||
130 | #define SIERRA_NET_HIP_RCGI 0x64 | ||
131 | |||
132 | /* LSI Protocol types */ | ||
133 | #define SIERRA_NET_PROTOCOL_UMTS 0x01 | ||
134 | /* LSI Coverage */ | ||
135 | #define SIERRA_NET_COVERAGE_NONE 0x00 | ||
136 | #define SIERRA_NET_COVERAGE_NOPACKET 0x01 | ||
137 | |||
138 | /* LSI Session */ | ||
139 | #define SIERRA_NET_SESSION_IDLE 0x00 | ||
140 | /* LSI Link types */ | ||
141 | #define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 | ||
142 | |||
143 | struct lsi_umts { | ||
144 | u8 protocol; | ||
145 | u8 unused1; | ||
146 | __be16 length; | ||
147 | /* eventually use a union for the rest - assume umts for now */ | ||
148 | u8 coverage; | ||
149 | u8 unused2[41]; | ||
150 | u8 session_state; | ||
151 | u8 unused3[33]; | ||
152 | u8 link_type; | ||
153 | u8 pdp_addr_len; /* NW-supplied PDP address len */ | ||
154 | u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ | ||
155 | u8 unused4[23]; | ||
156 | u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */ | ||
157 | u8 dns1_addr[16]; /* NW-supplied 1st DNS address */ | ||
158 | u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */ | ||
159 | u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/ | ||
160 | u8 wins1_addr_len; /* NW-supplied 1st Wins address len */ | ||
161 | u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/ | ||
162 | u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */ | ||
163 | u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */ | ||
164 | u8 unused5[4]; | ||
165 | u8 gw_addr_len; /* NW-supplied GW address len */ | ||
166 | u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ | ||
167 | u8 reserved[8]; | ||
168 | } __attribute__ ((packed)); | ||
169 | |||
170 | #define SIERRA_NET_LSI_COMMON_LEN 4 | ||
171 | #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) | ||
172 | #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ | ||
173 | (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) | ||
174 | |||
175 | /* Forward definitions */ | ||
176 | static void sierra_sync_timer(unsigned long syncdata); | ||
177 | static int sierra_net_change_mtu(struct net_device *net, int new_mtu); | ||
178 | |||
179 | /* Our own net device operations structure */ | ||
180 | static const struct net_device_ops sierra_net_device_ops = { | ||
181 | .ndo_open = usbnet_open, | ||
182 | .ndo_stop = usbnet_stop, | ||
183 | .ndo_start_xmit = usbnet_start_xmit, | ||
184 | .ndo_tx_timeout = usbnet_tx_timeout, | ||
185 | .ndo_change_mtu = sierra_net_change_mtu, | ||
186 | .ndo_set_mac_address = eth_mac_addr, | ||
187 | .ndo_validate_addr = eth_validate_addr, | ||
188 | }; | ||
189 | |||
190 | /* get private data associated with passed in usbnet device */ | ||
191 | static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev) | ||
192 | { | ||
193 | return (struct sierra_net_data *)dev->data[0]; | ||
194 | } | ||
195 | |||
196 | /* set private data associated with passed in usbnet device */ | ||
197 | static inline void sierra_net_set_private(struct usbnet *dev, | ||
198 | struct sierra_net_data *priv) | ||
199 | { | ||
200 | dev->data[0] = (unsigned long)priv; | ||
201 | } | ||
202 | |||
203 | /* is packet IPv4 */ | ||
204 | static inline int is_ip(struct sk_buff *skb) | ||
205 | { | ||
206 | return (skb->protocol == cpu_to_be16(ETH_P_IP)); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * check passed in packet and make sure that: | ||
211 | * - it is linear (no scatter/gather) | ||
212 | * - it is ethernet (mac_header properly set) | ||
213 | */ | ||
214 | static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev) | ||
215 | { | ||
216 | skb_reset_mac_header(skb); /* ethernet header */ | ||
217 | |||
218 | if (skb_is_nonlinear(skb)) { | ||
219 | netdev_err(dev->net, "Non linear buffer-dropping\n"); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | if (!pskb_may_pull(skb, ETH_HLEN)) | ||
224 | return 0; | ||
225 | skb->protocol = eth_hdr(skb)->h_proto; | ||
226 | |||
227 | return 1; | ||
228 | } | ||
229 | |||
230 | static const u8 *save16bit(struct param *p, const u8 *datap) | ||
231 | { | ||
232 | p->is_present = 1; | ||
233 | p->word = get_unaligned_be16(datap); | ||
234 | return datap + sizeof(p->word); | ||
235 | } | ||
236 | |||
237 | static const u8 *save8bit(struct param *p, const u8 *datap) | ||
238 | { | ||
239 | p->is_present = 1; | ||
240 | p->byte = *datap; | ||
241 | return datap + sizeof(p->byte); | ||
242 | } | ||
243 | |||
244 | /*----------------------------------------------------------------------------* | ||
245 | * BEGIN HIP * | ||
246 | *----------------------------------------------------------------------------*/ | ||
247 | /* HIP header */ | ||
248 | #define SIERRA_NET_HIP_HDR_LEN 4 | ||
249 | /* Extended HIP header */ | ||
250 | #define SIERRA_NET_HIP_EXT_HDR_LEN 6 | ||
251 | |||
252 | struct hip_hdr { | ||
253 | int hdrlen; | ||
254 | struct param payload_len; | ||
255 | struct param msgid; | ||
256 | struct param msgspecific; | ||
257 | struct param extmsgid; | ||
258 | }; | ||
259 | |||
260 | static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh) | ||
261 | { | ||
262 | const u8 *curp = buf; | ||
263 | int padded; | ||
264 | |||
265 | if (buflen < SIERRA_NET_HIP_HDR_LEN) | ||
266 | return -EPROTO; | ||
267 | |||
268 | curp = save16bit(&hh->payload_len, curp); | ||
269 | curp = save8bit(&hh->msgid, curp); | ||
270 | curp = save8bit(&hh->msgspecific, curp); | ||
271 | |||
272 | padded = hh->msgid.byte & 0x80; | ||
273 | hh->msgid.byte &= 0x7F; /* 7 bits */ | ||
274 | |||
275 | hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID); | ||
276 | if (hh->extmsgid.is_present) { | ||
277 | if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN) | ||
278 | return -EPROTO; | ||
279 | |||
280 | hh->payload_len.word &= 0x3FFF; /* 14 bits */ | ||
281 | |||
282 | curp = save16bit(&hh->extmsgid, curp); | ||
283 | hh->extmsgid.word &= 0x03FF; /* 10 bits */ | ||
284 | |||
285 | hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN; | ||
286 | } else { | ||
287 | hh->payload_len.word &= 0x07FF; /* 11 bits */ | ||
288 | hh->hdrlen = SIERRA_NET_HIP_HDR_LEN; | ||
289 | } | ||
290 | |||
291 | if (padded) { | ||
292 | hh->hdrlen++; | ||
293 | hh->payload_len.word--; | ||
294 | } | ||
295 | |||
296 | /* if real packet shorter than the claimed length */ | ||
297 | if (buflen < (hh->hdrlen + hh->payload_len.word)) | ||
298 | return -EINVAL; | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void build_hip(u8 *buf, const u16 payloadlen, | ||
304 | struct sierra_net_data *priv) | ||
305 | { | ||
306 | /* the following doesn't have the full functionality. We | ||
307 | * currently build only one kind of header, so it is faster this way | ||
308 | */ | ||
309 | put_unaligned_be16(payloadlen, buf); | ||
310 | memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template)); | ||
311 | } | ||
312 | /*----------------------------------------------------------------------------* | ||
313 | * END HIP * | ||
314 | *----------------------------------------------------------------------------*/ | ||
315 | |||
316 | static int sierra_net_send_cmd(struct usbnet *dev, | ||
317 | u8 *cmd, int cmdlen, const char * cmd_name) | ||
318 | { | ||
319 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
320 | int status; | ||
321 | |||
322 | status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | ||
323 | USB_CDC_SEND_ENCAPSULATED_COMMAND, | ||
324 | USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, | ||
325 | priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT); | ||
326 | |||
327 | if (status != cmdlen && status != -ENODEV) | ||
328 | netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status); | ||
329 | |||
330 | return status; | ||
331 | } | ||
332 | |||
333 | static int sierra_net_send_sync(struct usbnet *dev) | ||
334 | { | ||
335 | int status; | ||
336 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
337 | |||
338 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
339 | |||
340 | status = sierra_net_send_cmd(dev, priv->sync_msg, | ||
341 | sizeof(priv->sync_msg), "SYNC"); | ||
342 | |||
343 | return status; | ||
344 | } | ||
345 | |||
346 | static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix) | ||
347 | { | ||
348 | dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix); | ||
349 | priv->tx_hdr_template[0] = 0x3F; | ||
350 | priv->tx_hdr_template[1] = ctx_ix; | ||
351 | *((u16 *)&priv->tx_hdr_template[2]) = | ||
352 | cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID); | ||
353 | } | ||
354 | |||
355 | static inline int sierra_net_is_valid_addrlen(u8 len) | ||
356 | { | ||
357 | return (len == sizeof(struct in_addr)); | ||
358 | } | ||
359 | |||
360 | static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) | ||
361 | { | ||
362 | struct lsi_umts *lsi = (struct lsi_umts *)data; | ||
363 | |||
364 | if (datalen < sizeof(struct lsi_umts)) { | ||
365 | netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", | ||
366 | __func__, datalen, | ||
367 | sizeof(struct lsi_umts)); | ||
368 | return -1; | ||
369 | } | ||
370 | |||
371 | if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { | ||
372 | netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", | ||
373 | __func__, be16_to_cpu(lsi->length), | ||
374 | (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); | ||
375 | return -1; | ||
376 | } | ||
377 | |||
378 | /* Validate the protocol - only support UMTS for now */ | ||
379 | if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { | ||
380 | netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", | ||
381 | lsi->protocol); | ||
382 | return -1; | ||
383 | } | ||
384 | |||
385 | /* Validate the link type */ | ||
386 | if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { | ||
387 | netdev_err(dev->net, "Link type unsupported: 0x%02x\n", | ||
388 | lsi->link_type); | ||
389 | return -1; | ||
390 | } | ||
391 | |||
392 | /* Validate the coverage */ | ||
393 | if (lsi->coverage == SIERRA_NET_COVERAGE_NONE | ||
394 | || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { | ||
395 | netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | /* Validate the session state */ | ||
400 | if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { | ||
401 | netdev_err(dev->net, "Session idle, 0x%02x\n", | ||
402 | lsi->session_state); | ||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | /* Set link_sense true */ | ||
407 | return 1; | ||
408 | } | ||
409 | |||
410 | static void sierra_net_handle_lsi(struct usbnet *dev, char *data, | ||
411 | struct hip_hdr *hh) | ||
412 | { | ||
413 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
414 | int link_up; | ||
415 | |||
416 | link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen, | ||
417 | hh->payload_len.word); | ||
418 | if (link_up < 0) { | ||
419 | netdev_err(dev->net, "Invalid LSI\n"); | ||
420 | return; | ||
421 | } | ||
422 | if (link_up) { | ||
423 | sierra_net_set_ctx_index(priv, hh->msgspecific.byte); | ||
424 | priv->link_up = 1; | ||
425 | netif_carrier_on(dev->net); | ||
426 | } else { | ||
427 | priv->link_up = 0; | ||
428 | netif_carrier_off(dev->net); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | static void sierra_net_dosync(struct usbnet *dev) | ||
433 | { | ||
434 | int status; | ||
435 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
436 | |||
437 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
438 | |||
439 | /* tell modem we are ready */ | ||
440 | status = sierra_net_send_sync(dev); | ||
441 | if (status < 0) | ||
442 | netdev_err(dev->net, | ||
443 | "Send SYNC failed, status %d\n", status); | ||
444 | status = sierra_net_send_sync(dev); | ||
445 | if (status < 0) | ||
446 | netdev_err(dev->net, | ||
447 | "Send SYNC failed, status %d\n", status); | ||
448 | |||
449 | /* Now, start a timer and make sure we get the Restart Indication */ | ||
450 | priv->sync_timer.function = sierra_sync_timer; | ||
451 | priv->sync_timer.data = (unsigned long) dev; | ||
452 | priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY; | ||
453 | add_timer(&priv->sync_timer); | ||
454 | } | ||
455 | |||
456 | static void sierra_net_kevent(struct work_struct *work) | ||
457 | { | ||
458 | struct sierra_net_data *priv = | ||
459 | container_of(work, struct sierra_net_data, sierra_net_kevent); | ||
460 | struct usbnet *dev = priv->usbnet; | ||
461 | int len; | ||
462 | int err; | ||
463 | u8 *buf; | ||
464 | u8 ifnum; | ||
465 | |||
466 | if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) { | ||
467 | clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags); | ||
468 | |||
469 | /* Query the modem for the LSI message */ | ||
470 | buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL); | ||
471 | if (!buf) { | ||
472 | netdev_err(dev->net, | ||
473 | "failed to allocate buf for LS msg\n"); | ||
474 | return; | ||
475 | } | ||
476 | ifnum = priv->ifnum; | ||
477 | len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | ||
478 | USB_CDC_GET_ENCAPSULATED_RESPONSE, | ||
479 | USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE, | ||
480 | 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN, | ||
481 | USB_CTRL_SET_TIMEOUT); | ||
482 | |||
483 | if (len < 0) { | ||
484 | netdev_err(dev->net, | ||
485 | "usb_control_msg failed, status %d\n", len); | ||
486 | } else { | ||
487 | struct hip_hdr hh; | ||
488 | |||
489 | dev_dbg(&dev->udev->dev, "%s: Received status message," | ||
490 | " %04x bytes", __func__, len); | ||
491 | |||
492 | err = parse_hip(buf, len, &hh); | ||
493 | if (err) { | ||
494 | netdev_err(dev->net, "%s: Bad packet," | ||
495 | " parse result %d\n", __func__, err); | ||
496 | kfree(buf); | ||
497 | return; | ||
498 | } | ||
499 | |||
500 | /* Validate packet length */ | ||
501 | if (len != hh.hdrlen + hh.payload_len.word) { | ||
502 | netdev_err(dev->net, "%s: Bad packet, received" | ||
503 | " %d, expected %d\n", __func__, len, | ||
504 | hh.hdrlen + hh.payload_len.word); | ||
505 | kfree(buf); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | /* Switch on received message types */ | ||
510 | switch (hh.msgid.byte) { | ||
511 | case SIERRA_NET_HIP_LSI_UMTSID: | ||
512 | dev_dbg(&dev->udev->dev, "LSI for ctx:%d", | ||
513 | hh.msgspecific.byte); | ||
514 | sierra_net_handle_lsi(dev, buf, &hh); | ||
515 | break; | ||
516 | case SIERRA_NET_HIP_RESTART_ID: | ||
517 | dev_dbg(&dev->udev->dev, "Restart reported: %d," | ||
518 | " stopping sync timer", | ||
519 | hh.msgspecific.byte); | ||
520 | /* Got sync resp - stop timer & clear mask */ | ||
521 | del_timer_sync(&priv->sync_timer); | ||
522 | clear_bit(SIERRA_NET_TIMER_EXPIRY, | ||
523 | &priv->kevent_flags); | ||
524 | break; | ||
525 | case SIERRA_NET_HIP_HSYNC_ID: | ||
526 | dev_dbg(&dev->udev->dev, "SYNC received"); | ||
527 | err = sierra_net_send_sync(dev); | ||
528 | if (err < 0) | ||
529 | netdev_err(dev->net, | ||
530 | "Send SYNC failed %d\n", err); | ||
531 | break; | ||
532 | case SIERRA_NET_HIP_EXTENDEDID: | ||
533 | netdev_err(dev->net, "Unrecognized HIP msg, " | ||
534 | "extmsgid 0x%04x\n", hh.extmsgid.word); | ||
535 | break; | ||
536 | case SIERRA_NET_HIP_RCGI: | ||
537 | /* Ignored */ | ||
538 | break; | ||
539 | default: | ||
540 | netdev_err(dev->net, "Unrecognized HIP msg, " | ||
541 | "msgid 0x%02x\n", hh.msgid.byte); | ||
542 | break; | ||
543 | } | ||
544 | } | ||
545 | kfree(buf); | ||
546 | } | ||
547 | /* The sync timer bit might be set */ | ||
548 | if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) { | ||
549 | clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); | ||
550 | dev_dbg(&dev->udev->dev, "Deferred sync timer expiry"); | ||
551 | sierra_net_dosync(priv->usbnet); | ||
552 | } | ||
553 | |||
554 | if (priv->kevent_flags) | ||
555 | dev_dbg(&dev->udev->dev, "sierra_net_kevent done, " | ||
556 | "kevent_flags = 0x%lx", priv->kevent_flags); | ||
557 | } | ||
558 | |||
559 | static void sierra_net_defer_kevent(struct usbnet *dev, int work) | ||
560 | { | ||
561 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
562 | |||
563 | set_bit(work, &priv->kevent_flags); | ||
564 | schedule_work(&priv->sierra_net_kevent); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Sync Retransmit Timer Handler. On expiry, kick the work queue | ||
569 | */ | ||
570 | void sierra_sync_timer(unsigned long syncdata) | ||
571 | { | ||
572 | struct usbnet *dev = (struct usbnet *)syncdata; | ||
573 | |||
574 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
575 | /* Kick the tasklet */ | ||
576 | sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY); | ||
577 | } | ||
578 | |||
579 | static void sierra_net_status(struct usbnet *dev, struct urb *urb) | ||
580 | { | ||
581 | struct usb_cdc_notification *event; | ||
582 | |||
583 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
584 | |||
585 | if (urb->actual_length < sizeof *event) | ||
586 | return; | ||
587 | |||
588 | /* Add cases to handle other standard notifications. */ | ||
589 | event = urb->transfer_buffer; | ||
590 | switch (event->bNotificationType) { | ||
591 | case USB_CDC_NOTIFY_NETWORK_CONNECTION: | ||
592 | case USB_CDC_NOTIFY_SPEED_CHANGE: | ||
593 | /* USB 305 sends those */ | ||
594 | break; | ||
595 | case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: | ||
596 | sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL); | ||
597 | break; | ||
598 | default: | ||
599 | netdev_err(dev->net, ": unexpected notification %02x!\n", | ||
600 | event->bNotificationType); | ||
601 | break; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | static void sierra_net_get_drvinfo(struct net_device *net, | ||
606 | struct ethtool_drvinfo *info) | ||
607 | { | ||
608 | /* Inherit standard device info */ | ||
609 | usbnet_get_drvinfo(net, info); | ||
610 | strncpy(info->driver, driver_name, sizeof info->driver); | ||
611 | strncpy(info->version, DRIVER_VERSION, sizeof info->version); | ||
612 | } | ||
613 | |||
614 | static u32 sierra_net_get_link(struct net_device *net) | ||
615 | { | ||
616 | struct usbnet *dev = netdev_priv(net); | ||
617 | /* Report link is down whenever the interface is down */ | ||
618 | return sierra_net_get_private(dev)->link_up && netif_running(net); | ||
619 | } | ||
620 | |||
621 | static struct ethtool_ops sierra_net_ethtool_ops = { | ||
622 | .get_drvinfo = sierra_net_get_drvinfo, | ||
623 | .get_link = sierra_net_get_link, | ||
624 | .get_msglevel = usbnet_get_msglevel, | ||
625 | .set_msglevel = usbnet_set_msglevel, | ||
626 | .get_settings = usbnet_get_settings, | ||
627 | .set_settings = usbnet_set_settings, | ||
628 | .nway_reset = usbnet_nway_reset, | ||
629 | }; | ||
630 | |||
631 | /* MTU can not be more than 1500 bytes, enforce it. */ | ||
632 | static int sierra_net_change_mtu(struct net_device *net, int new_mtu) | ||
633 | { | ||
634 | if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU) | ||
635 | return -EINVAL; | ||
636 | |||
637 | return usbnet_change_mtu(net, new_mtu); | ||
638 | } | ||
639 | |||
640 | static int is_whitelisted(const u8 ifnum, | ||
641 | const struct sierra_net_iface_info *whitelist) | ||
642 | { | ||
643 | if (whitelist) { | ||
644 | const u8 *list = whitelist->ifaceinfo; | ||
645 | int i; | ||
646 | |||
647 | for (i = 0; i < whitelist->infolen; i++) { | ||
648 | if (list[i] == ifnum) | ||
649 | return 1; | ||
650 | } | ||
651 | } | ||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) | ||
656 | { | ||
657 | int result = 0; | ||
658 | u16 *attrdata; | ||
659 | |||
660 | attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL); | ||
661 | if (!attrdata) | ||
662 | return -ENOMEM; | ||
663 | |||
664 | result = usb_control_msg( | ||
665 | dev->udev, | ||
666 | usb_rcvctrlpipe(dev->udev, 0), | ||
667 | /* _u8 vendor specific request */ | ||
668 | SWI_USB_REQUEST_GET_FW_ATTR, | ||
669 | USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */ | ||
670 | 0x0000, /* __u16 value not used */ | ||
671 | 0x0000, /* __u16 index not used */ | ||
672 | attrdata, /* char *data */ | ||
673 | sizeof(*attrdata), /* __u16 size */ | ||
674 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
675 | |||
676 | if (result < 0) { | ||
677 | kfree(attrdata); | ||
678 | return -EIO; | ||
679 | } | ||
680 | |||
681 | *datap = *attrdata; | ||
682 | |||
683 | kfree(attrdata); | ||
684 | return result; | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * collects the bulk endpoints, the status endpoint. | ||
689 | */ | ||
690 | static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) | ||
691 | { | ||
692 | u8 ifacenum; | ||
693 | u8 numendpoints; | ||
694 | u16 fwattr = 0; | ||
695 | int status; | ||
696 | struct ethhdr *eth; | ||
697 | struct sierra_net_data *priv; | ||
698 | static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { | ||
699 | 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; | ||
700 | static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { | ||
701 | 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; | ||
702 | |||
703 | struct sierra_net_info_data *data = | ||
704 | (struct sierra_net_info_data *)dev->driver_info->data; | ||
705 | |||
706 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
707 | |||
708 | ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; | ||
709 | /* We only accept certain interfaces */ | ||
710 | if (!is_whitelisted(ifacenum, &data->whitelist)) { | ||
711 | dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); | ||
712 | return -ENODEV; | ||
713 | } | ||
714 | numendpoints = intf->cur_altsetting->desc.bNumEndpoints; | ||
715 | /* We have three endpoints, bulk in and out, and a status */ | ||
716 | if (numendpoints != 3) { | ||
717 | dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d", | ||
718 | numendpoints); | ||
719 | return -ENODEV; | ||
720 | } | ||
721 | /* Status endpoint set in usbnet_get_endpoints() */ | ||
722 | dev->status = NULL; | ||
723 | status = usbnet_get_endpoints(dev, intf); | ||
724 | if (status < 0) { | ||
725 | dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)", | ||
726 | status); | ||
727 | return -ENODEV; | ||
728 | } | ||
729 | /* Initialize sierra private data */ | ||
730 | priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
731 | if (!priv) { | ||
732 | dev_err(&dev->udev->dev, "No memory"); | ||
733 | return -ENOMEM; | ||
734 | } | ||
735 | |||
736 | priv->usbnet = dev; | ||
737 | priv->ifnum = ifacenum; | ||
738 | dev->net->netdev_ops = &sierra_net_device_ops; | ||
739 | |||
740 | /* change MAC addr to include, ifacenum, and to be unique */ | ||
741 | dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); | ||
742 | dev->net->dev_addr[ETH_ALEN-1] = ifacenum; | ||
743 | |||
744 | /* we will have to manufacture ethernet headers, prepare template */ | ||
745 | eth = (struct ethhdr *)priv->ethr_hdr_tmpl; | ||
746 | memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
747 | eth->h_proto = cpu_to_be16(ETH_P_IP); | ||
748 | |||
749 | /* prepare shutdown message template */ | ||
750 | memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); | ||
751 | /* set context index initially to 0 - prepares tx hdr template */ | ||
752 | sierra_net_set_ctx_index(priv, 0); | ||
753 | |||
754 | /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ | ||
755 | dev->rx_urb_size = data->rx_urb_size; | ||
756 | if (dev->udev->speed != USB_SPEED_HIGH) | ||
757 | dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); | ||
758 | |||
759 | dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; | ||
760 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
761 | |||
762 | /* Set up the netdev */ | ||
763 | dev->net->flags |= IFF_NOARP; | ||
764 | dev->net->ethtool_ops = &sierra_net_ethtool_ops; | ||
765 | netif_carrier_off(dev->net); | ||
766 | |||
767 | sierra_net_set_private(dev, priv); | ||
768 | |||
769 | priv->kevent_flags = 0; | ||
770 | |||
771 | /* Use the shared workqueue */ | ||
772 | INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent); | ||
773 | |||
774 | /* Only need to do this once */ | ||
775 | init_timer(&priv->sync_timer); | ||
776 | |||
777 | /* verify fw attributes */ | ||
778 | status = sierra_net_get_fw_attr(dev, &fwattr); | ||
779 | dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr); | ||
780 | |||
781 | /* test whether firmware supports DHCP */ | ||
782 | if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) { | ||
783 | /* found incompatible firmware version */ | ||
784 | dev_err(&dev->udev->dev, "Incompatible driver and firmware" | ||
785 | " versions\n"); | ||
786 | kfree(priv); | ||
787 | return -ENODEV; | ||
788 | } | ||
789 | /* prepare sync message from template */ | ||
790 | memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) | ||
796 | { | ||
797 | int status; | ||
798 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
799 | |||
800 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
801 | |||
802 | /* Kill the timer then flush the work queue */ | ||
803 | del_timer_sync(&priv->sync_timer); | ||
804 | |||
805 | flush_scheduled_work(); | ||
806 | |||
807 | /* tell modem we are going away */ | ||
808 | status = sierra_net_send_cmd(dev, priv->shdwn_msg, | ||
809 | sizeof(priv->shdwn_msg), "Shutdown"); | ||
810 | if (status < 0) | ||
811 | netdev_err(dev->net, | ||
812 | "usb_control_msg failed, status %d\n", status); | ||
813 | |||
814 | sierra_net_set_private(dev, NULL); | ||
815 | |||
816 | kfree(priv); | ||
817 | } | ||
818 | |||
819 | static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev, | ||
820 | struct sk_buff *skb, int len) | ||
821 | { | ||
822 | struct sk_buff *new_skb; | ||
823 | |||
824 | /* clone skb */ | ||
825 | new_skb = skb_clone(skb, GFP_ATOMIC); | ||
826 | |||
827 | /* remove len bytes from original */ | ||
828 | skb_pull(skb, len); | ||
829 | |||
830 | /* trim next packet to it's length */ | ||
831 | if (new_skb) { | ||
832 | skb_trim(new_skb, len); | ||
833 | } else { | ||
834 | if (netif_msg_rx_err(dev)) | ||
835 | netdev_err(dev->net, "failed to get skb\n"); | ||
836 | dev->net->stats.rx_dropped++; | ||
837 | } | ||
838 | |||
839 | return new_skb; | ||
840 | } | ||
841 | |||
842 | /* ---------------------------- Receive data path ----------------------*/ | ||
843 | static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
844 | { | ||
845 | int err; | ||
846 | struct hip_hdr hh; | ||
847 | struct sk_buff *new_skb; | ||
848 | |||
849 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
850 | |||
851 | /* could contain multiple packets */ | ||
852 | while (likely(skb->len)) { | ||
853 | err = parse_hip(skb->data, skb->len, &hh); | ||
854 | if (err) { | ||
855 | if (netif_msg_rx_err(dev)) | ||
856 | netdev_err(dev->net, "Invalid HIP header %d\n", | ||
857 | err); | ||
858 | /* dev->net->stats.rx_errors incremented by caller */ | ||
859 | dev->net->stats.rx_length_errors++; | ||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | /* Validate Extended HIP header */ | ||
864 | if (!hh.extmsgid.is_present | ||
865 | || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) { | ||
866 | if (netif_msg_rx_err(dev)) | ||
867 | netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); | ||
868 | |||
869 | dev->net->stats.rx_frame_errors++; | ||
870 | /* dev->net->stats.rx_errors incremented by caller */; | ||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | skb_pull(skb, hh.hdrlen); | ||
875 | |||
876 | /* We are going to accept this packet, prepare it */ | ||
877 | memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, | ||
878 | ETH_HLEN); | ||
879 | |||
880 | /* Last packet in batch handled by usbnet */ | ||
881 | if (hh.payload_len.word == skb->len) | ||
882 | return 1; | ||
883 | |||
884 | new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word); | ||
885 | if (new_skb) | ||
886 | usbnet_skb_return(dev, new_skb); | ||
887 | |||
888 | } /* while */ | ||
889 | |||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | /* ---------------------------- Transmit data path ----------------------*/ | ||
894 | struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | ||
895 | gfp_t flags) | ||
896 | { | ||
897 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
898 | u16 len; | ||
899 | bool need_tail; | ||
900 | |||
901 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
902 | if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) { | ||
903 | /* enough head room as is? */ | ||
904 | if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) { | ||
905 | /* Save the Eth/IP length and set up HIP hdr */ | ||
906 | len = skb->len; | ||
907 | skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN); | ||
908 | /* Handle ZLP issue */ | ||
909 | need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN) | ||
910 | % dev->maxpacket == 0); | ||
911 | if (need_tail) { | ||
912 | if (unlikely(skb_tailroom(skb) == 0)) { | ||
913 | netdev_err(dev->net, "tx_fixup:" | ||
914 | "no room for packet\n"); | ||
915 | dev_kfree_skb_any(skb); | ||
916 | return NULL; | ||
917 | } else { | ||
918 | skb->data[skb->len] = 0; | ||
919 | __skb_put(skb, 1); | ||
920 | len = len + 1; | ||
921 | } | ||
922 | } | ||
923 | build_hip(skb->data, len, priv); | ||
924 | return skb; | ||
925 | } else { | ||
926 | /* | ||
927 | * compensate in the future if necessary | ||
928 | */ | ||
929 | netdev_err(dev->net, "tx_fixup: no room for HIP\n"); | ||
930 | } /* headroom */ | ||
931 | } | ||
932 | |||
933 | if (!priv->link_up) | ||
934 | dev->net->stats.tx_carrier_errors++; | ||
935 | |||
936 | /* tx_dropped incremented by usbnet */ | ||
937 | |||
938 | /* filter the packet out, release it */ | ||
939 | dev_kfree_skb_any(skb); | ||
940 | return NULL; | ||
941 | } | ||
942 | |||
943 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; | ||
944 | static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | ||
945 | .rx_urb_size = 8 * 1024, | ||
946 | .whitelist = { | ||
947 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), | ||
948 | .ifaceinfo = sierra_net_ifnum_list | ||
949 | } | ||
950 | }; | ||
951 | |||
952 | static const struct driver_info sierra_net_info_68A3 = { | ||
953 | .description = "Sierra Wireless USB-to-WWAN Modem", | ||
954 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, | ||
955 | .bind = sierra_net_bind, | ||
956 | .unbind = sierra_net_unbind, | ||
957 | .status = sierra_net_status, | ||
958 | .rx_fixup = sierra_net_rx_fixup, | ||
959 | .tx_fixup = sierra_net_tx_fixup, | ||
960 | .data = (unsigned long)&sierra_net_info_data_68A3, | ||
961 | }; | ||
962 | |||
963 | static const struct usb_device_id products[] = { | ||
964 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ | ||
965 | .driver_info = (unsigned long) &sierra_net_info_68A3}, | ||
966 | |||
967 | {}, /* last item */ | ||
968 | }; | ||
969 | MODULE_DEVICE_TABLE(usb, products); | ||
970 | |||
971 | /* We are based on usbnet, so let it handle the USB driver specifics */ | ||
972 | static struct usb_driver sierra_net_driver = { | ||
973 | .name = "sierra_net", | ||
974 | .id_table = products, | ||
975 | .probe = usbnet_probe, | ||
976 | .disconnect = usbnet_disconnect, | ||
977 | .suspend = usbnet_suspend, | ||
978 | .resume = usbnet_resume, | ||
979 | .no_dynamic_id = 1, | ||
980 | }; | ||
981 | |||
982 | static int __init sierra_net_init(void) | ||
983 | { | ||
984 | BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data) | ||
985 | < sizeof(struct cdc_state)); | ||
986 | |||
987 | return usb_register(&sierra_net_driver); | ||
988 | } | ||
989 | |||
990 | static void __exit sierra_net_exit(void) | ||
991 | { | ||
992 | usb_deregister(&sierra_net_driver); | ||
993 | } | ||
994 | |||
995 | module_exit(sierra_net_exit); | ||
996 | module_init(sierra_net_init); | ||
997 | |||
998 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
999 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
1000 | MODULE_VERSION(DRIVER_VERSION); | ||
1001 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6fb783ce20b9..b0577dd1a42d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) | |||
327 | struct scatterlist sg[2]; | 327 | struct scatterlist sg[2]; |
328 | int err; | 328 | int err; |
329 | 329 | ||
330 | sg_init_table(sg, 2); | ||
330 | skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); | 331 | skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); |
331 | if (unlikely(!skb)) | 332 | if (unlikely(!skb)) |
332 | return -ENOMEM; | 333 | return -ENOMEM; |
@@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) | |||
352 | char *p; | 353 | char *p; |
353 | int i, err, offset; | 354 | int i, err, offset; |
354 | 355 | ||
356 | sg_init_table(sg, MAX_SKB_FRAGS + 2); | ||
355 | /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ | 357 | /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ |
356 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { | 358 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
357 | first = get_a_page(vi, gfp); | 359 | first = get_a_page(vi, gfp); |
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b9b9d6b01c0b..941f053e650e 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev) | |||
628 | ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); | 628 | ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); |
629 | } | 629 | } |
630 | 630 | ||
631 | static void ppp_close(struct net_device *dev) | ||
632 | { | ||
633 | ppp_tx_flush(); | ||
634 | } | ||
635 | |||
631 | static struct hdlc_proto proto = { | 636 | static struct hdlc_proto proto = { |
632 | .start = ppp_start, | 637 | .start = ppp_start, |
633 | .stop = ppp_stop, | 638 | .stop = ppp_stop, |
639 | .close = ppp_close, | ||
634 | .type_trans = ppp_type_trans, | 640 | .type_trans = ppp_type_trans, |
635 | .ioctl = ppp_ioctl, | 641 | .ioctl = ppp_ioctl, |
636 | .netif_rx = ppp_rx, | 642 | .netif_rx = ppp_rx, |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 67ca4e5a6017..115e1aeedb59 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); | 1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); |
1533 | ath9k_set_wiphy_idle(aphy, idle); | 1533 | ath9k_set_wiphy_idle(aphy, idle); |
1534 | 1534 | ||
1535 | if (!idle && all_wiphys_idle) | 1535 | enable_radio = (!idle && all_wiphys_idle); |
1536 | enable_radio = true; | ||
1537 | 1536 | ||
1538 | /* | 1537 | /* |
1539 | * After we unlock here its possible another wiphy | 1538 | * After we unlock here its possible another wiphy |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 83c52a682622..8972166386cb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2016 | "%d index %d\n", scd_ssn , index); | 2016 | "%d index %d\n", scd_ssn , index); |
2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2018 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2018 | if (qc) |
2019 | iwl_free_tfds_in_queue(priv, sta_id, | ||
2020 | tid, freed); | ||
2019 | 2021 | ||
2020 | if (priv->mac80211_registered && | 2022 | if (priv->mac80211_registered && |
2021 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 2023 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
@@ -2041,14 +2043,17 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2041 | tx_resp->failure_frame); | 2043 | tx_resp->failure_frame); |
2042 | 2044 | ||
2043 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2045 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2044 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2046 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2047 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
2048 | else if (sta_id == IWL_INVALID_STATION) | ||
2049 | IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); | ||
2045 | 2050 | ||
2046 | if (priv->mac80211_registered && | 2051 | if (priv->mac80211_registered && |
2047 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | 2052 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) |
2048 | iwl_wake_queue(priv, txq_id); | 2053 | iwl_wake_queue(priv, txq_id); |
2049 | } | 2054 | } |
2050 | 2055 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | |
2051 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 2056 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); |
2052 | 2057 | ||
2053 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 2058 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) |
2054 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | 2059 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index c4844adff92a..92b3e64fc14d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -259,7 +259,7 @@ static struct iwl_lib_ops iwl6000_lib = { | |||
259 | EEPROM_5000_REG_BAND_3_CHANNELS, | 259 | EEPROM_5000_REG_BAND_3_CHANNELS, |
260 | EEPROM_5000_REG_BAND_4_CHANNELS, | 260 | EEPROM_5000_REG_BAND_4_CHANNELS, |
261 | EEPROM_5000_REG_BAND_5_CHANNELS, | 261 | EEPROM_5000_REG_BAND_5_CHANNELS, |
262 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 262 | EEPROM_6000_REG_BAND_24_HT40_CHANNELS, |
263 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 263 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS |
264 | }, | 264 | }, |
265 | .verify_signature = iwlcore_eeprom_verify_signature, | 265 | .verify_signature = iwlcore_eeprom_verify_signature, |
@@ -323,7 +323,7 @@ static struct iwl_lib_ops iwl6050_lib = { | |||
323 | EEPROM_5000_REG_BAND_3_CHANNELS, | 323 | EEPROM_5000_REG_BAND_3_CHANNELS, |
324 | EEPROM_5000_REG_BAND_4_CHANNELS, | 324 | EEPROM_5000_REG_BAND_4_CHANNELS, |
325 | EEPROM_5000_REG_BAND_5_CHANNELS, | 325 | EEPROM_5000_REG_BAND_5_CHANNELS, |
326 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 326 | EEPROM_6000_REG_BAND_24_HT40_CHANNELS, |
327 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 327 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS |
328 | }, | 328 | }, |
329 | .verify_signature = iwlcore_eeprom_verify_signature, | 329 | .verify_signature = iwlcore_eeprom_verify_signature, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 35f819ac87a3..1460116d329f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | |||
@@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); | 346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); |
347 | } | 347 | } |
348 | 348 | ||
349 | /* | ||
350 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
351 | * that wraps a NULL pointer check | ||
352 | */ | ||
353 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
354 | { | ||
355 | if (tbl->expected_tpt) | ||
356 | return tbl->expected_tpt[rs_index]; | ||
357 | return 0; | ||
358 | } | ||
359 | |||
349 | /** | 360 | /** |
350 | * rs_collect_tx_data - Update the success/failure sliding window | 361 | * rs_collect_tx_data - Update the success/failure sliding window |
351 | * | 362 | * |
@@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
353 | * at this rate. window->data contains the bitmask of successful | 364 | * at this rate. window->data contains the bitmask of successful |
354 | * packets. | 365 | * packets. |
355 | */ | 366 | */ |
356 | static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, | 367 | static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, |
357 | int scale_index, s32 tpt, int attempts, | 368 | int scale_index, int attempts, int successes) |
358 | int successes) | ||
359 | { | 369 | { |
360 | struct iwl_rate_scale_data *window = NULL; | 370 | struct iwl_rate_scale_data *window = NULL; |
361 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); | 371 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); |
362 | s32 fail_count; | 372 | s32 fail_count, tpt; |
363 | 373 | ||
364 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) | 374 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) |
365 | return -EINVAL; | 375 | return -EINVAL; |
366 | 376 | ||
367 | /* Select window for current tx bit rate */ | 377 | /* Select window for current tx bit rate */ |
368 | window = &(windows[scale_index]); | 378 | window = &(tbl->win[scale_index]); |
379 | |||
380 | /* Get expected throughput */ | ||
381 | tpt = get_expected_tpt(tbl, scale_index); | ||
369 | 382 | ||
370 | /* | 383 | /* |
371 | * Keep track of only the latest 62 tx frame attempts in this rate's | 384 | * Keep track of only the latest 62 tx frame attempts in this rate's |
@@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a, | |||
739 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && | 752 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && |
740 | (a->is_SGI == b->is_SGI); | 753 | (a->is_SGI == b->is_SGI); |
741 | } | 754 | } |
742 | /* | ||
743 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
744 | * that wraps a NULL pointer check | ||
745 | */ | ||
746 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
747 | { | ||
748 | if (tbl->expected_tpt) | ||
749 | return tbl->expected_tpt[rs_index]; | ||
750 | return 0; | ||
751 | } | ||
752 | 755 | ||
753 | /* | 756 | /* |
754 | * mac80211 sends us Tx status | 757 | * mac80211 sends us Tx status |
@@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
765 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 768 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
766 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; | 769 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; |
767 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 770 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
768 | struct iwl_rate_scale_data *window = NULL; | ||
769 | enum mac80211_rate_control_flags mac_flags; | 771 | enum mac80211_rate_control_flags mac_flags; |
770 | u32 tx_rate; | 772 | u32 tx_rate; |
771 | struct iwl_scale_tbl_info tbl_type; | 773 | struct iwl_scale_tbl_info tbl_type; |
772 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl; | 774 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; |
773 | s32 tpt = 0; | ||
774 | 775 | ||
775 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); | 776 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); |
776 | 777 | ||
@@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
853 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); | 854 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); |
854 | return; | 855 | return; |
855 | } | 856 | } |
856 | window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); | ||
857 | 857 | ||
858 | /* | 858 | /* |
859 | * Updating the frame history depends on whether packets were | 859 | * Updating the frame history depends on whether packets were |
@@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); | 866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); |
867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, | 867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, |
868 | &rs_index); | 868 | &rs_index); |
869 | tpt = get_expected_tpt(curr_tbl, rs_index); | 869 | rs_collect_tx_data(curr_tbl, rs_index, |
870 | rs_collect_tx_data(window, rs_index, tpt, | ||
871 | info->status.ampdu_ack_len, | 870 | info->status.ampdu_ack_len, |
872 | info->status.ampdu_ack_map); | 871 | info->status.ampdu_ack_map); |
873 | 872 | ||
@@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
897 | * table as active/search. | 896 | * table as active/search. |
898 | */ | 897 | */ |
899 | if (table_type_matches(&tbl_type, curr_tbl)) | 898 | if (table_type_matches(&tbl_type, curr_tbl)) |
900 | tpt = get_expected_tpt(curr_tbl, rs_index); | 899 | tmp_tbl = curr_tbl; |
901 | else if (table_type_matches(&tbl_type, other_tbl)) | 900 | else if (table_type_matches(&tbl_type, other_tbl)) |
902 | tpt = get_expected_tpt(other_tbl, rs_index); | 901 | tmp_tbl = other_tbl; |
903 | else | 902 | else |
904 | continue; | 903 | continue; |
905 | 904 | rs_collect_tx_data(tmp_tbl, rs_index, 1, | |
906 | /* Constants mean 1 transmission, 0 successes */ | 905 | i < retries ? 0 : legacy_success); |
907 | if (i < retries) | ||
908 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
909 | 0); | ||
910 | else | ||
911 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
912 | legacy_success); | ||
913 | } | 906 | } |
914 | 907 | ||
915 | /* Update success/fail counts if not searching for new mode */ | 908 | /* Update success/fail counts if not searching for new mode */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 8b8e3e1cbb44..bdff56583e11 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -3331,6 +3331,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) | |||
3331 | 3331 | ||
3332 | cancel_delayed_work_sync(&priv->init_alive_start); | 3332 | cancel_delayed_work_sync(&priv->init_alive_start); |
3333 | cancel_delayed_work(&priv->scan_check); | 3333 | cancel_delayed_work(&priv->scan_check); |
3334 | cancel_work_sync(&priv->start_internal_scan); | ||
3334 | cancel_delayed_work(&priv->alive_start); | 3335 | cancel_delayed_work(&priv->alive_start); |
3335 | cancel_work_sync(&priv->beacon_update); | 3336 | cancel_work_sync(&priv->beacon_update); |
3336 | del_timer_sync(&priv->statistics_periodic); | 3337 | del_timer_sync(&priv->statistics_periodic); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c index de3b3f403d1f..8b516c5ff0bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c | |||
@@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, | |||
808 | } | 808 | } |
809 | } | 809 | } |
810 | 810 | ||
811 | /* | ||
812 | * The above algorithm sometimes fails when the ucode | ||
813 | * reports 0 for all chains. It's not clear why that | ||
814 | * happens to start with, but it is then causing trouble | ||
815 | * because this can make us enable more chains than the | ||
816 | * hardware really has. | ||
817 | * | ||
818 | * To be safe, simply mask out any chains that we know | ||
819 | * are not on the device. | ||
820 | */ | ||
821 | active_chains &= priv->hw_params.valid_rx_ant; | ||
822 | |||
811 | num_tx_chains = 0; | 823 | num_tx_chains = 0; |
812 | for (i = 0; i < NUM_RX_CHAINS; i++) { | 824 | for (i = 0; i < NUM_RX_CHAINS; i++) { |
813 | /* loops on all the bits of | 825 | /* loops on all the bits of |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index db050b811232..049b652bcb5e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv) | |||
308 | 308 | ||
309 | spin_unlock_irqrestore(&priv->lock, flags); | 309 | spin_unlock_irqrestore(&priv->lock, flags); |
310 | 310 | ||
311 | /* Allocate and init all Tx and Command queues */ | 311 | /* Allocate or reset and init all Tx and Command queues */ |
312 | ret = iwl_txq_ctx_reset(priv); | 312 | if (!priv->txq) { |
313 | if (ret) | 313 | ret = iwl_txq_ctx_alloc(priv); |
314 | return ret; | 314 | if (ret) |
315 | return ret; | ||
316 | } else | ||
317 | iwl_txq_ctx_reset(priv); | ||
315 | 318 | ||
316 | set_bit(STATUS_INIT, &priv->status); | 319 | set_bit(STATUS_INIT, &priv->status); |
317 | 320 | ||
@@ -3355,7 +3358,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv) | |||
3355 | */ | 3358 | */ |
3356 | IWL_DEBUG_INFO(priv, "perform radio reset.\n"); | 3359 | IWL_DEBUG_INFO(priv, "perform radio reset.\n"); |
3357 | iwl_internal_short_hw_scan(priv); | 3360 | iwl_internal_short_hw_scan(priv); |
3358 | return; | ||
3359 | } | 3361 | } |
3360 | 3362 | ||
3361 | 3363 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 4ef7739f9e8e..36940a9ec6b9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | |||
442 | /***************************************************** | 442 | /***************************************************** |
443 | * TX | 443 | * TX |
444 | ******************************************************/ | 444 | ******************************************************/ |
445 | int iwl_txq_ctx_reset(struct iwl_priv *priv); | 445 | int iwl_txq_ctx_alloc(struct iwl_priv *priv); |
446 | void iwl_txq_ctx_reset(struct iwl_priv *priv); | ||
446 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 447 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
447 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, | 448 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, |
448 | struct iwl_tx_queue *txq, | 449 | struct iwl_tx_queue *txq, |
@@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv, | |||
456 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 457 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
457 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 458 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
458 | int slots_num, u32 txq_id); | 459 | int slots_num, u32 txq_id); |
460 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
461 | int slots_num, u32 txq_id); | ||
459 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 462 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
460 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); | 463 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); |
461 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | 464 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); |
@@ -503,7 +506,7 @@ void iwl_init_scan_params(struct iwl_priv *priv); | |||
503 | int iwl_scan_cancel(struct iwl_priv *priv); | 506 | int iwl_scan_cancel(struct iwl_priv *priv); |
504 | int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); | 507 | int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); |
505 | int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); | 508 | int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); |
506 | int iwl_internal_short_hw_scan(struct iwl_priv *priv); | 509 | void iwl_internal_short_hw_scan(struct iwl_priv *priv); |
507 | int iwl_force_reset(struct iwl_priv *priv, int mode); | 510 | int iwl_force_reset(struct iwl_priv *priv, int mode); |
508 | u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, | 511 | u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, |
509 | const u8 *ie, int ie_len, int left); | 512 | const u8 *ie, int ie_len, int left); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 6054c5fba0c1..ef1720a852e9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -1296,6 +1296,7 @@ struct iwl_priv { | |||
1296 | struct work_struct tt_work; | 1296 | struct work_struct tt_work; |
1297 | struct work_struct ct_enter; | 1297 | struct work_struct ct_enter; |
1298 | struct work_struct ct_exit; | 1298 | struct work_struct ct_exit; |
1299 | struct work_struct start_internal_scan; | ||
1299 | 1300 | ||
1300 | struct tasklet_struct irq_tasklet; | 1301 | struct tasklet_struct irq_tasklet; |
1301 | 1302 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 4e1ba824dc50..8171c701e4e1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | |||
@@ -203,6 +203,10 @@ struct iwl_eeprom_enhanced_txpwr { | |||
203 | #define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ | 203 | #define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ |
204 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ | 204 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ |
205 | 205 | ||
206 | /* 6000 regulatory - indirect access */ | ||
207 | #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\ | ||
208 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ | ||
209 | |||
206 | /* 6000 and up regulatory tx power - indirect access */ | 210 | /* 6000 and up regulatory tx power - indirect access */ |
207 | /* max. elements per section */ | 211 | /* max. elements per section */ |
208 | #define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) | 212 | #define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 9ab0e412bf10..12e455a4b90e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -470,6 +470,8 @@ EXPORT_SYMBOL(iwl_init_scan_params); | |||
470 | 470 | ||
471 | static int iwl_scan_initiate(struct iwl_priv *priv) | 471 | static int iwl_scan_initiate(struct iwl_priv *priv) |
472 | { | 472 | { |
473 | WARN_ON(!mutex_is_locked(&priv->mutex)); | ||
474 | |||
473 | IWL_DEBUG_INFO(priv, "Starting scan...\n"); | 475 | IWL_DEBUG_INFO(priv, "Starting scan...\n"); |
474 | set_bit(STATUS_SCANNING, &priv->status); | 476 | set_bit(STATUS_SCANNING, &priv->status); |
475 | priv->is_internal_short_scan = false; | 477 | priv->is_internal_short_scan = false; |
@@ -547,24 +549,31 @@ EXPORT_SYMBOL(iwl_mac_hw_scan); | |||
547 | * internal short scan, this function should only been called while associated. | 549 | * internal short scan, this function should only been called while associated. |
548 | * It will reset and tune the radio to prevent possible RF related problem | 550 | * It will reset and tune the radio to prevent possible RF related problem |
549 | */ | 551 | */ |
550 | int iwl_internal_short_hw_scan(struct iwl_priv *priv) | 552 | void iwl_internal_short_hw_scan(struct iwl_priv *priv) |
551 | { | 553 | { |
552 | int ret = 0; | 554 | queue_work(priv->workqueue, &priv->start_internal_scan); |
555 | } | ||
556 | |||
557 | static void iwl_bg_start_internal_scan(struct work_struct *work) | ||
558 | { | ||
559 | struct iwl_priv *priv = | ||
560 | container_of(work, struct iwl_priv, start_internal_scan); | ||
561 | |||
562 | mutex_lock(&priv->mutex); | ||
553 | 563 | ||
554 | if (!iwl_is_ready_rf(priv)) { | 564 | if (!iwl_is_ready_rf(priv)) { |
555 | ret = -EIO; | ||
556 | IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); | 565 | IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); |
557 | goto out; | 566 | goto unlock; |
558 | } | 567 | } |
568 | |||
559 | if (test_bit(STATUS_SCANNING, &priv->status)) { | 569 | if (test_bit(STATUS_SCANNING, &priv->status)) { |
560 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | 570 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); |
561 | ret = -EAGAIN; | 571 | goto unlock; |
562 | goto out; | ||
563 | } | 572 | } |
573 | |||
564 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | 574 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { |
565 | IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); | 575 | IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); |
566 | ret = -EAGAIN; | 576 | goto unlock; |
567 | goto out; | ||
568 | } | 577 | } |
569 | 578 | ||
570 | priv->scan_bands = 0; | 579 | priv->scan_bands = 0; |
@@ -577,9 +586,8 @@ int iwl_internal_short_hw_scan(struct iwl_priv *priv) | |||
577 | set_bit(STATUS_SCANNING, &priv->status); | 586 | set_bit(STATUS_SCANNING, &priv->status); |
578 | priv->is_internal_short_scan = true; | 587 | priv->is_internal_short_scan = true; |
579 | queue_work(priv->workqueue, &priv->request_scan); | 588 | queue_work(priv->workqueue, &priv->request_scan); |
580 | 589 | unlock: | |
581 | out: | 590 | mutex_unlock(&priv->mutex); |
582 | return ret; | ||
583 | } | 591 | } |
584 | EXPORT_SYMBOL(iwl_internal_short_hw_scan); | 592 | EXPORT_SYMBOL(iwl_internal_short_hw_scan); |
585 | 593 | ||
@@ -965,6 +973,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv) | |||
965 | INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); | 973 | INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); |
966 | INIT_WORK(&priv->request_scan, iwl_bg_request_scan); | 974 | INIT_WORK(&priv->request_scan, iwl_bg_request_scan); |
967 | INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); | 975 | INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); |
976 | INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); | ||
968 | INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); | 977 | INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); |
969 | } | 978 | } |
970 | EXPORT_SYMBOL(iwl_setup_scan_deferred_work); | 979 | EXPORT_SYMBOL(iwl_setup_scan_deferred_work); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index f0b7e6cfbe4f..8dd0c036d547 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
194 | struct iwl_queue *q = &txq->q; | 194 | struct iwl_queue *q = &txq->q; |
195 | struct device *dev = &priv->pci_dev->dev; | 195 | struct device *dev = &priv->pci_dev->dev; |
196 | int i; | 196 | int i; |
197 | bool huge = false; | ||
197 | 198 | ||
198 | if (q->n_bd == 0) | 199 | if (q->n_bd == 0) |
199 | return; | 200 | return; |
200 | 201 | ||
202 | for (; q->read_ptr != q->write_ptr; | ||
203 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
204 | /* we have no way to tell if it is a huge cmd ATM */ | ||
205 | i = get_cmd_index(q, q->read_ptr, 0); | ||
206 | |||
207 | if (txq->meta[i].flags & CMD_SIZE_HUGE) { | ||
208 | huge = true; | ||
209 | continue; | ||
210 | } | ||
211 | |||
212 | pci_unmap_single(priv->pci_dev, | ||
213 | pci_unmap_addr(&txq->meta[i], mapping), | ||
214 | pci_unmap_len(&txq->meta[i], len), | ||
215 | PCI_DMA_BIDIRECTIONAL); | ||
216 | } | ||
217 | if (huge) { | ||
218 | i = q->n_window; | ||
219 | pci_unmap_single(priv->pci_dev, | ||
220 | pci_unmap_addr(&txq->meta[i], mapping), | ||
221 | pci_unmap_len(&txq->meta[i], len), | ||
222 | PCI_DMA_BIDIRECTIONAL); | ||
223 | } | ||
224 | |||
201 | /* De-alloc array of command/tx buffers */ | 225 | /* De-alloc array of command/tx buffers */ |
202 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | 226 | for (i = 0; i <= TFD_CMD_SLOTS; i++) |
203 | kfree(txq->cmd[i]); | 227 | kfree(txq->cmd[i]); |
@@ -410,6 +434,26 @@ out_free_arrays: | |||
410 | } | 434 | } |
411 | EXPORT_SYMBOL(iwl_tx_queue_init); | 435 | EXPORT_SYMBOL(iwl_tx_queue_init); |
412 | 436 | ||
437 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
438 | int slots_num, u32 txq_id) | ||
439 | { | ||
440 | int actual_slots = slots_num; | ||
441 | |||
442 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
443 | actual_slots++; | ||
444 | |||
445 | memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); | ||
446 | |||
447 | txq->need_update = 0; | ||
448 | |||
449 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
450 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | ||
451 | |||
452 | /* Tell device where to find queue */ | ||
453 | priv->cfg->ops->lib->txq_init(priv, txq); | ||
454 | } | ||
455 | EXPORT_SYMBOL(iwl_tx_queue_reset); | ||
456 | |||
413 | /** | 457 | /** |
414 | * iwl_hw_txq_ctx_free - Free TXQ Context | 458 | * iwl_hw_txq_ctx_free - Free TXQ Context |
415 | * | 459 | * |
@@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
421 | 465 | ||
422 | /* Tx queues */ | 466 | /* Tx queues */ |
423 | if (priv->txq) { | 467 | if (priv->txq) { |
424 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | 468 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) |
425 | txq_id++) | ||
426 | if (txq_id == IWL_CMD_QUEUE_NUM) | 469 | if (txq_id == IWL_CMD_QUEUE_NUM) |
427 | iwl_cmd_queue_free(priv); | 470 | iwl_cmd_queue_free(priv); |
428 | else | 471 | else |
@@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
438 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 481 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); |
439 | 482 | ||
440 | /** | 483 | /** |
441 | * iwl_txq_ctx_reset - Reset TX queue context | 484 | * iwl_txq_ctx_alloc - allocate TX queue context |
442 | * Destroys all DMA structures and initialize them again | 485 | * Allocate all Tx DMA structures and initialize them |
443 | * | 486 | * |
444 | * @param priv | 487 | * @param priv |
445 | * @return error code | 488 | * @return error code |
446 | */ | 489 | */ |
447 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | 490 | int iwl_txq_ctx_alloc(struct iwl_priv *priv) |
448 | { | 491 | { |
449 | int ret = 0; | 492 | int ret; |
450 | int txq_id, slots_num; | 493 | int txq_id, slots_num; |
451 | unsigned long flags; | 494 | unsigned long flags; |
452 | 495 | ||
@@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
504 | return ret; | 547 | return ret; |
505 | } | 548 | } |
506 | 549 | ||
550 | void iwl_txq_ctx_reset(struct iwl_priv *priv) | ||
551 | { | ||
552 | int txq_id, slots_num; | ||
553 | unsigned long flags; | ||
554 | |||
555 | spin_lock_irqsave(&priv->lock, flags); | ||
556 | |||
557 | /* Turn off all Tx DMA fifos */ | ||
558 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
559 | |||
560 | /* Tell NIC where to find the "keep warm" buffer */ | ||
561 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
562 | |||
563 | spin_unlock_irqrestore(&priv->lock, flags); | ||
564 | |||
565 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
566 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
567 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
568 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
569 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
570 | } | ||
571 | } | ||
572 | |||
507 | /** | 573 | /** |
508 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | 574 | * iwl_txq_ctx_stop - Stop all Tx DMA channels |
509 | */ | 575 | */ |
510 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | 576 | void iwl_txq_ctx_stop(struct iwl_priv *priv) |
511 | { | 577 | { |
@@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) | |||
525 | 1000); | 591 | 1000); |
526 | } | 592 | } |
527 | spin_unlock_irqrestore(&priv->lock, flags); | 593 | spin_unlock_irqrestore(&priv->lock, flags); |
528 | |||
529 | /* Deallocate memory for all Tx queues */ | ||
530 | iwl_hw_txq_ctx_free(priv); | ||
531 | } | 594 | } |
532 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | 595 | EXPORT_SYMBOL(iwl_txq_ctx_stop); |
533 | 596 | ||
@@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1050 | 1113 | ||
1051 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 1114 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
1052 | 1115 | ||
1116 | /* If this is a huge cmd, mark the huge flag also on the meta.flags | ||
1117 | * of the _original_ cmd. This is used for DMA mapping clean up. | ||
1118 | */ | ||
1119 | if (cmd->flags & CMD_SIZE_HUGE) { | ||
1120 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
1121 | txq->meta[idx].flags = CMD_SIZE_HUGE; | ||
1122 | } | ||
1123 | |||
1053 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 1124 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
1054 | out_cmd = txq->cmd[idx]; | 1125 | out_cmd = txq->cmd[idx]; |
1055 | out_meta = &txq->meta[idx]; | 1126 | out_meta = &txq->meta[idx]; |
@@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1227 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); | 1298 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
1228 | struct iwl_device_cmd *cmd; | 1299 | struct iwl_device_cmd *cmd; |
1229 | struct iwl_cmd_meta *meta; | 1300 | struct iwl_cmd_meta *meta; |
1301 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
1230 | 1302 | ||
1231 | /* If a Tx command is being handled and it isn't in the actual | 1303 | /* If a Tx command is being handled and it isn't in the actual |
1232 | * command queue then there a command routing bug has been introduced | 1304 | * command queue then there a command routing bug has been introduced |
@@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1240 | return; | 1312 | return; |
1241 | } | 1313 | } |
1242 | 1314 | ||
1243 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | 1315 | /* If this is a huge cmd, clear the huge flag on the meta.flags |
1244 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | 1316 | * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap |
1245 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; | 1317 | * the DMA buffer for the scan (huge) command. |
1318 | */ | ||
1319 | if (huge) { | ||
1320 | cmd_index = get_cmd_index(&txq->q, index, 0); | ||
1321 | txq->meta[cmd_index].flags = 0; | ||
1322 | } | ||
1323 | cmd_index = get_cmd_index(&txq->q, index, huge); | ||
1324 | cmd = txq->cmd[cmd_index]; | ||
1325 | meta = &txq->meta[cmd_index]; | ||
1246 | 1326 | ||
1247 | pci_unmap_single(priv->pci_dev, | 1327 | pci_unmap_single(priv->pci_dev, |
1248 | pci_unmap_addr(meta, mapping), | 1328 | pci_unmap_addr(meta, mapping), |
@@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1264 | get_cmd_string(cmd->hdr.cmd)); | 1344 | get_cmd_string(cmd->hdr.cmd)); |
1265 | wake_up_interruptible(&priv->wait_command_queue); | 1345 | wake_up_interruptible(&priv->wait_command_queue); |
1266 | } | 1346 | } |
1347 | meta->flags = 0; | ||
1267 | } | 1348 | } |
1268 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 1349 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |
1269 | 1350 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 5ea587e59e48..37499127c801 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -679,7 +679,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
679 | */ | 679 | */ |
680 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) | 680 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) |
681 | { | 681 | { |
682 | return state > PCI_D0 ? | 682 | return state >= PCI_D0 ? |
683 | pci_platform_power_transition(dev, state) : -EINVAL; | 683 | pci_platform_power_transition(dev, state) : -EINVAL; |
684 | } | 684 | } |
685 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); | 685 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); |
@@ -716,10 +716,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
716 | */ | 716 | */ |
717 | return 0; | 717 | return 0; |
718 | 718 | ||
719 | /* Check if we're already there */ | ||
720 | if (dev->current_state == state) | ||
721 | return 0; | ||
722 | |||
723 | __pci_start_power_transition(dev, state); | 719 | __pci_start_power_transition(dev, state); |
724 | 720 | ||
725 | /* This device is quirked not to be put into D3, so | 721 | /* This device is quirked not to be put into D3, so |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index aa495ad9bbd4..7a711ee314b7 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -244,11 +244,17 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | |||
244 | 244 | ||
245 | /* Assert Secondary Bus Reset */ | 245 | /* Assert Secondary Bus Reset */ |
246 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); | 246 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); |
247 | p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET; | 247 | p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET; |
248 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | 248 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); |
249 | 249 | ||
250 | /* | ||
251 | * we should send hot reset message for 2ms to allow it time to | ||
252 | * propogate to all downstream ports | ||
253 | */ | ||
254 | msleep(2); | ||
255 | |||
250 | /* De-assert Secondary Bus Reset */ | 256 | /* De-assert Secondary Bus Reset */ |
251 | p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET; | 257 | p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; |
252 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | 258 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); |
253 | 259 | ||
254 | /* | 260 | /* |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 882bd8d29fe3..c82548afcd5c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -174,19 +174,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
174 | pci_read_config_dword(dev, pos, &sz); | 174 | pci_read_config_dword(dev, pos, &sz); |
175 | pci_write_config_dword(dev, pos, l); | 175 | pci_write_config_dword(dev, pos, l); |
176 | 176 | ||
177 | if (!sz) | ||
178 | goto fail; /* BAR not implemented */ | ||
179 | |||
180 | /* | 177 | /* |
181 | * All bits set in sz means the device isn't working properly. | 178 | * All bits set in sz means the device isn't working properly. |
182 | * If it's a memory BAR or a ROM, bit 0 must be clear; if it's | 179 | * If the BAR isn't implemented, all bits must be 0. If it's a |
183 | * an io BAR, bit 1 must be clear. | 180 | * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit |
181 | * 1 must be clear. | ||
184 | */ | 182 | */ |
185 | if (sz == 0xffffffff) { | 183 | if (!sz || sz == 0xffffffff) |
186 | dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n", | ||
187 | pos, sz); | ||
188 | goto fail; | 184 | goto fail; |
189 | } | ||
190 | 185 | ||
191 | /* | 186 | /* |
192 | * I don't know how l can have all bits set. Copied from old code. | 187 | * I don't know how l can have all bits set. Copied from old code. |
@@ -249,17 +244,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
249 | pos, res); | 244 | pos, res); |
250 | } | 245 | } |
251 | } else { | 246 | } else { |
252 | u32 size = pci_size(l, sz, mask); | 247 | sz = pci_size(l, sz, mask); |
253 | 248 | ||
254 | if (!size) { | 249 | if (!sz) |
255 | dev_err(&dev->dev, "reg %x: invalid size " | ||
256 | "(l %#x sz %#x mask %#x); broken device?", | ||
257 | pos, l, sz, mask); | ||
258 | goto fail; | 250 | goto fail; |
259 | } | ||
260 | 251 | ||
261 | res->start = l; | 252 | res->start = l; |
262 | res->end = l + size; | 253 | res->end = l + sz; |
263 | 254 | ||
264 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); | 255 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); |
265 | } | 256 | } |
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index f230f6543bff..854959cada3a 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c | |||
@@ -1484,6 +1484,11 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info) | |||
1484 | if (!s) | 1484 | if (!s) |
1485 | return -EINVAL; | 1485 | return -EINVAL; |
1486 | 1486 | ||
1487 | if (s->functions) { | ||
1488 | WARN_ON(1); | ||
1489 | return -EINVAL; | ||
1490 | } | ||
1491 | |||
1487 | /* We do not want to validate the CIS cache... */ | 1492 | /* We do not want to validate the CIS cache... */ |
1488 | mutex_lock(&s->ops_mutex); | 1493 | mutex_lock(&s->ops_mutex); |
1489 | destroy_cis_cache(s); | 1494 | destroy_cis_cache(s); |
@@ -1639,7 +1644,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, | |||
1639 | count = 0; | 1644 | count = 0; |
1640 | else { | 1645 | else { |
1641 | struct pcmcia_socket *s; | 1646 | struct pcmcia_socket *s; |
1642 | unsigned int chains; | 1647 | unsigned int chains = 1; |
1643 | 1648 | ||
1644 | if (off + count > size) | 1649 | if (off + count > size) |
1645 | count = size - off; | 1650 | count = size - off; |
@@ -1648,7 +1653,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, | |||
1648 | 1653 | ||
1649 | if (!(s->state & SOCKET_PRESENT)) | 1654 | if (!(s->state & SOCKET_PRESENT)) |
1650 | return -ENODEV; | 1655 | return -ENODEV; |
1651 | if (pccard_validate_cis(s, &chains)) | 1656 | if (!s->functions && pccard_validate_cis(s, &chains)) |
1652 | return -EIO; | 1657 | return -EIO; |
1653 | if (!chains) | 1658 | if (!chains) |
1654 | return -ENODATA; | 1659 | return -ENODATA; |
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 6206408e196c..2d48196a48cd 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c | |||
@@ -166,8 +166,10 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) | |||
166 | 166 | ||
167 | ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, | 167 | ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, |
168 | IRQF_DISABLED, "pcmcia_insert", sock); | 168 | IRQF_DISABLED, "pcmcia_insert", sock); |
169 | if (ret) | 169 | if (ret) { |
170 | local_irq_restore(flags); | ||
170 | goto out1; | 171 | goto out1; |
172 | } | ||
171 | 173 | ||
172 | ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, | 174 | ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, |
173 | IRQF_DISABLED, "pcmcia_eject", sock); | 175 | IRQF_DISABLED, "pcmcia_eject", sock); |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index cb6036d89e59..508f94a2a78d 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -335,7 +335,6 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le | |||
335 | 335 | ||
336 | mutex_lock(&s->ops_mutex); | 336 | mutex_lock(&s->ops_mutex); |
337 | list_del(&p_dev->socket_device_list); | 337 | list_del(&p_dev->socket_device_list); |
338 | p_dev->_removed = 1; | ||
339 | mutex_unlock(&s->ops_mutex); | 338 | mutex_unlock(&s->ops_mutex); |
340 | 339 | ||
341 | dev_dbg(&p_dev->dev, "unregistering device\n"); | 340 | dev_dbg(&p_dev->dev, "unregistering device\n"); |
@@ -654,14 +653,7 @@ static int pcmcia_requery_callback(struct device *dev, void * _data) | |||
654 | 653 | ||
655 | static void pcmcia_requery(struct pcmcia_socket *s) | 654 | static void pcmcia_requery(struct pcmcia_socket *s) |
656 | { | 655 | { |
657 | int present, has_pfc; | 656 | int has_pfc; |
658 | |||
659 | mutex_lock(&s->ops_mutex); | ||
660 | present = s->pcmcia_state.present; | ||
661 | mutex_unlock(&s->ops_mutex); | ||
662 | |||
663 | if (!present) | ||
664 | return; | ||
665 | 657 | ||
666 | if (s->functions == 0) { | 658 | if (s->functions == 0) { |
667 | pcmcia_card_add(s); | 659 | pcmcia_card_add(s); |
@@ -687,12 +679,10 @@ static void pcmcia_requery(struct pcmcia_socket *s) | |||
687 | new_funcs = mfc.nfn; | 679 | new_funcs = mfc.nfn; |
688 | else | 680 | else |
689 | new_funcs = 1; | 681 | new_funcs = 1; |
690 | if (old_funcs > new_funcs) { | 682 | if (old_funcs != new_funcs) { |
683 | /* we need to re-start */ | ||
691 | pcmcia_card_remove(s, NULL); | 684 | pcmcia_card_remove(s, NULL); |
692 | pcmcia_card_add(s); | 685 | pcmcia_card_add(s); |
693 | } else if (new_funcs > old_funcs) { | ||
694 | s->functions = new_funcs; | ||
695 | pcmcia_device_add(s, 1); | ||
696 | } | 686 | } |
697 | } | 687 | } |
698 | 688 | ||
@@ -728,6 +718,8 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) | |||
728 | struct pcmcia_socket *s = dev->socket; | 718 | struct pcmcia_socket *s = dev->socket; |
729 | const struct firmware *fw; | 719 | const struct firmware *fw; |
730 | int ret = -ENOMEM; | 720 | int ret = -ENOMEM; |
721 | cistpl_longlink_mfc_t mfc; | ||
722 | int old_funcs, new_funcs = 1; | ||
731 | 723 | ||
732 | if (!filename) | 724 | if (!filename) |
733 | return -EINVAL; | 725 | return -EINVAL; |
@@ -750,6 +742,14 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) | |||
750 | goto release; | 742 | goto release; |
751 | } | 743 | } |
752 | 744 | ||
745 | /* we need to re-start if the number of functions changed */ | ||
746 | old_funcs = s->functions; | ||
747 | if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, | ||
748 | &mfc)) | ||
749 | new_funcs = mfc.nfn; | ||
750 | |||
751 | if (old_funcs != new_funcs) | ||
752 | ret = -EBUSY; | ||
753 | 753 | ||
754 | /* update information */ | 754 | /* update information */ |
755 | pcmcia_device_query(dev); | 755 | pcmcia_device_query(dev); |
@@ -820,11 +820,12 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, | |||
820 | } | 820 | } |
821 | 821 | ||
822 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { | 822 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { |
823 | if (dev->device_no != did->device_no) | 823 | dev_dbg(&dev->dev, "this is a pseudo-multi-function device\n"); |
824 | return 0; | ||
825 | mutex_lock(&dev->socket->ops_mutex); | 824 | mutex_lock(&dev->socket->ops_mutex); |
826 | dev->socket->pcmcia_state.has_pfc = 1; | 825 | dev->socket->pcmcia_state.has_pfc = 1; |
827 | mutex_unlock(&dev->socket->ops_mutex); | 826 | mutex_unlock(&dev->socket->ops_mutex); |
827 | if (dev->device_no != did->device_no) | ||
828 | return 0; | ||
828 | } | 829 | } |
829 | 830 | ||
830 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { | 831 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { |
@@ -835,7 +836,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, | |||
835 | 836 | ||
836 | /* if this is a pseudo-multi-function device, | 837 | /* if this is a pseudo-multi-function device, |
837 | * we need explicit matches */ | 838 | * we need explicit matches */ |
838 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) | 839 | if (dev->socket->pcmcia_state.has_pfc) |
839 | return 0; | 840 | return 0; |
840 | if (dev->device_no) | 841 | if (dev->device_no) |
841 | return 0; | 842 | return 0; |
@@ -858,10 +859,8 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, | |||
858 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { | 859 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { |
859 | dev_dbg(&dev->dev, "device needs a fake CIS\n"); | 860 | dev_dbg(&dev->dev, "device needs a fake CIS\n"); |
860 | if (!dev->socket->fake_cis) | 861 | if (!dev->socket->fake_cis) |
861 | pcmcia_load_firmware(dev, did->cisfile); | 862 | if (pcmcia_load_firmware(dev, did->cisfile)) |
862 | 863 | return 0; | |
863 | if (!dev->socket->fake_cis) | ||
864 | return 0; | ||
865 | } | 864 | } |
866 | 865 | ||
867 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { | 866 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { |
@@ -1254,9 +1253,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1254 | 1253 | ||
1255 | switch (event) { | 1254 | switch (event) { |
1256 | case CS_EVENT_CARD_REMOVAL: | 1255 | case CS_EVENT_CARD_REMOVAL: |
1257 | mutex_lock(&s->ops_mutex); | 1256 | atomic_set(&skt->present, 0); |
1258 | s->pcmcia_state.present = 0; | ||
1259 | mutex_unlock(&s->ops_mutex); | ||
1260 | pcmcia_card_remove(skt, NULL); | 1257 | pcmcia_card_remove(skt, NULL); |
1261 | handle_event(skt, event); | 1258 | handle_event(skt, event); |
1262 | mutex_lock(&s->ops_mutex); | 1259 | mutex_lock(&s->ops_mutex); |
@@ -1265,9 +1262,9 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1265 | break; | 1262 | break; |
1266 | 1263 | ||
1267 | case CS_EVENT_CARD_INSERTION: | 1264 | case CS_EVENT_CARD_INSERTION: |
1265 | atomic_set(&skt->present, 1); | ||
1268 | mutex_lock(&s->ops_mutex); | 1266 | mutex_lock(&s->ops_mutex); |
1269 | s->pcmcia_state.has_pfc = 0; | 1267 | s->pcmcia_state.has_pfc = 0; |
1270 | s->pcmcia_state.present = 1; | ||
1271 | destroy_cis_cache(s); /* to be on the safe side... */ | 1268 | destroy_cis_cache(s); /* to be on the safe side... */ |
1272 | mutex_unlock(&s->ops_mutex); | 1269 | mutex_unlock(&s->ops_mutex); |
1273 | pcmcia_card_add(skt); | 1270 | pcmcia_card_add(skt); |
@@ -1307,7 +1304,13 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1307 | return 0; | 1304 | return 0; |
1308 | } /* ds_event */ | 1305 | } /* ds_event */ |
1309 | 1306 | ||
1310 | 1307 | /* | |
1308 | * NOTE: This is racy. There's no guarantee the card will still be | ||
1309 | * physically present, even if the call to this function returns | ||
1310 | * non-NULL. Furthermore, the device driver most likely is unbound | ||
1311 | * almost immediately, so the timeframe where pcmcia_dev_present | ||
1312 | * returns NULL is probably really really small. | ||
1313 | */ | ||
1311 | struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) | 1314 | struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) |
1312 | { | 1315 | { |
1313 | struct pcmcia_device *p_dev; | 1316 | struct pcmcia_device *p_dev; |
@@ -1317,22 +1320,9 @@ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) | |||
1317 | if (!p_dev) | 1320 | if (!p_dev) |
1318 | return NULL; | 1321 | return NULL; |
1319 | 1322 | ||
1320 | mutex_lock(&p_dev->socket->ops_mutex); | 1323 | if (atomic_read(&p_dev->socket->present) != 0) |
1321 | if (!p_dev->socket->pcmcia_state.present) | 1324 | ret = p_dev; |
1322 | goto out; | ||
1323 | |||
1324 | if (p_dev->socket->pcmcia_state.dead) | ||
1325 | goto out; | ||
1326 | |||
1327 | if (p_dev->_removed) | ||
1328 | goto out; | ||
1329 | |||
1330 | if (p_dev->suspended) | ||
1331 | goto out; | ||
1332 | 1325 | ||
1333 | ret = p_dev; | ||
1334 | out: | ||
1335 | mutex_unlock(&p_dev->socket->ops_mutex); | ||
1336 | pcmcia_put_dev(p_dev); | 1326 | pcmcia_put_dev(p_dev); |
1337 | return ret; | 1327 | return ret; |
1338 | } | 1328 | } |
@@ -1382,6 +1372,8 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev, | |||
1382 | return ret; | 1372 | return ret; |
1383 | } | 1373 | } |
1384 | 1374 | ||
1375 | atomic_set(&socket->present, 0); | ||
1376 | |||
1385 | return 0; | 1377 | return 0; |
1386 | } | 1378 | } |
1387 | 1379 | ||
@@ -1393,10 +1385,6 @@ static void pcmcia_bus_remove_socket(struct device *dev, | |||
1393 | if (!socket) | 1385 | if (!socket) |
1394 | return; | 1386 | return; |
1395 | 1387 | ||
1396 | mutex_lock(&socket->ops_mutex); | ||
1397 | socket->pcmcia_state.dead = 1; | ||
1398 | mutex_unlock(&socket->ops_mutex); | ||
1399 | |||
1400 | pccard_register_pcmcia(socket, NULL); | 1388 | pccard_register_pcmcia(socket, NULL); |
1401 | 1389 | ||
1402 | /* unregister any unbound devices */ | 1390 | /* unregister any unbound devices */ |
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index caec1dee2a4b..7c3d03bb4f30 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c | |||
@@ -755,12 +755,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
755 | else | 755 | else |
756 | printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); | 756 | printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); |
757 | 757 | ||
758 | #ifdef CONFIG_PCMCIA_PROBE | 758 | /* If the interrupt is already assigned, it must be the same */ |
759 | 759 | if (s->irq.AssignedIRQ != 0) | |
760 | if (s->irq.AssignedIRQ != 0) { | ||
761 | /* If the interrupt is already assigned, it must be the same */ | ||
762 | irq = s->irq.AssignedIRQ; | 760 | irq = s->irq.AssignedIRQ; |
763 | } else { | 761 | |
762 | #ifdef CONFIG_PCMCIA_PROBE | ||
763 | if (!irq) { | ||
764 | int try; | 764 | int try; |
765 | u32 mask = s->irq_mask; | 765 | u32 mask = s->irq_mask; |
766 | void *data = p_dev; /* something unique to this device */ | 766 | void *data = p_dev; /* something unique to this device */ |
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 559069a80a3b..a6eb7b59ba9f 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c | |||
@@ -214,7 +214,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, | |||
214 | return; | 214 | return; |
215 | } | 215 | } |
216 | for (i = base, most = 0; i < base+num; i += 8) { | 216 | for (i = base, most = 0; i < base+num; i += 8) { |
217 | res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); | 217 | res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); |
218 | if (!res) | 218 | if (!res) |
219 | continue; | 219 | continue; |
220 | hole = inb(i); | 220 | hole = inb(i); |
@@ -231,9 +231,14 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, | |||
231 | 231 | ||
232 | bad = any = 0; | 232 | bad = any = 0; |
233 | for (i = base; i < base+num; i += 8) { | 233 | for (i = base; i < base+num; i += 8) { |
234 | res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); | 234 | res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); |
235 | if (!res) | 235 | if (!res) { |
236 | if (!any) | ||
237 | printk(" excluding"); | ||
238 | if (!bad) | ||
239 | bad = any = i; | ||
236 | continue; | 240 | continue; |
241 | } | ||
237 | for (j = 0; j < 8; j++) | 242 | for (j = 0; j < 8; j++) |
238 | if (inb(i+j) != most) | 243 | if (inb(i+j) != most) |
239 | break; | 244 | break; |
@@ -253,6 +258,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, | |||
253 | } | 258 | } |
254 | if (bad) { | 259 | if (bad) { |
255 | if ((num > 16) && (bad == base) && (i == base+num)) { | 260 | if ((num > 16) && (bad == base) && (i == base+num)) { |
261 | sub_interval(&s_data->io_db, bad, i-bad); | ||
256 | printk(" nothing: probe failed.\n"); | 262 | printk(" nothing: probe failed.\n"); |
257 | return; | 263 | return; |
258 | } else { | 264 | } else { |
@@ -804,7 +810,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned | |||
804 | static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) | 810 | static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) |
805 | { | 811 | { |
806 | struct socket_data *data = s->resource_data; | 812 | struct socket_data *data = s->resource_data; |
807 | unsigned long size = end - start + 1; | 813 | unsigned long size; |
808 | int ret = 0; | 814 | int ret = 0; |
809 | 815 | ||
810 | #if defined(CONFIG_X86) | 816 | #if defined(CONFIG_X86) |
@@ -814,6 +820,8 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long | |||
814 | start = 0x100; | 820 | start = 0x100; |
815 | #endif | 821 | #endif |
816 | 822 | ||
823 | size = end - start + 1; | ||
824 | |||
817 | if (end < start) | 825 | if (end < start) |
818 | return -EINVAL; | 826 | return -EINVAL; |
819 | 827 | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 7bec4588c268..6c3320d75055 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -390,6 +390,7 @@ config EEEPC_WMI | |||
390 | depends on ACPI_WMI | 390 | depends on ACPI_WMI |
391 | depends on INPUT | 391 | depends on INPUT |
392 | depends on EXPERIMENTAL | 392 | depends on EXPERIMENTAL |
393 | select INPUT_SPARSEKMAP | ||
393 | ---help--- | 394 | ---help--- |
394 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. | 395 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. |
395 | 396 | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 52262b012abb..efe8f6388906 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -79,15 +79,15 @@ static uint wapf = 1; | |||
79 | module_param(wapf, uint, 0644); | 79 | module_param(wapf, uint, 0644); |
80 | MODULE_PARM_DESC(wapf, "WAPF value"); | 80 | MODULE_PARM_DESC(wapf, "WAPF value"); |
81 | 81 | ||
82 | static uint wlan_status = 1; | 82 | static int wlan_status = 1; |
83 | static uint bluetooth_status = 1; | 83 | static int bluetooth_status = 1; |
84 | 84 | ||
85 | module_param(wlan_status, uint, 0644); | 85 | module_param(wlan_status, int, 0644); |
86 | MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " | 86 | MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " |
87 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " | 87 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " |
88 | "default is 1"); | 88 | "default is 1"); |
89 | 89 | ||
90 | module_param(bluetooth_status, uint, 0644); | 90 | module_param(bluetooth_status, int, 0644); |
91 | MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " | 91 | MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " |
92 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " | 92 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " |
93 | "default is 1"); | 93 | "default is 1"); |
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 6ba6c30e5bb6..66f53c3c35e8 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
@@ -217,6 +217,7 @@ static void dell_wmi_notify(u32 value, void *context) | |||
217 | if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { | 217 | if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { |
218 | printk(KERN_INFO "dell-wmi: Received unknown WMI event" | 218 | printk(KERN_INFO "dell-wmi: Received unknown WMI event" |
219 | " (0x%x)\n", buffer_entry[1]); | 219 | " (0x%x)\n", buffer_entry[1]); |
220 | kfree(obj); | ||
220 | return; | 221 | return; |
221 | } | 222 | } |
222 | 223 | ||
@@ -234,7 +235,7 @@ static void dell_wmi_notify(u32 value, void *context) | |||
234 | key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { | 235 | key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { |
235 | /* Don't report brightness notifications that will also | 236 | /* Don't report brightness notifications that will also |
236 | * come via ACPI */ | 237 | * come via ACPI */ |
237 | return; | 238 | ; |
238 | } else { | 239 | } else { |
239 | input_report_key(dell_wmi_input_dev, key->keycode, 1); | 240 | input_report_key(dell_wmi_input_dev, key->keycode, 1); |
240 | input_sync(dell_wmi_input_dev); | 241 | input_sync(dell_wmi_input_dev); |
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 54a015785ca8..0306174ba875 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -169,7 +169,6 @@ struct eeepc_laptop { | |||
169 | struct backlight_device *backlight_device; | 169 | struct backlight_device *backlight_device; |
170 | 170 | ||
171 | struct input_dev *inputdev; | 171 | struct input_dev *inputdev; |
172 | struct key_entry *keymap; | ||
173 | 172 | ||
174 | struct rfkill *wlan_rfkill; | 173 | struct rfkill *wlan_rfkill; |
175 | struct rfkill *bluetooth_rfkill; | 174 | struct rfkill *bluetooth_rfkill; |
@@ -1204,8 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc) | |||
1204 | static void eeepc_input_exit(struct eeepc_laptop *eeepc) | 1203 | static void eeepc_input_exit(struct eeepc_laptop *eeepc) |
1205 | { | 1204 | { |
1206 | if (eeepc->inputdev) { | 1205 | if (eeepc->inputdev) { |
1206 | sparse_keymap_free(eeepc->inputdev); | ||
1207 | input_unregister_device(eeepc->inputdev); | 1207 | input_unregister_device(eeepc->inputdev); |
1208 | kfree(eeepc->keymap); | ||
1209 | } | 1208 | } |
1210 | } | 1209 | } |
1211 | 1210 | ||
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 9f8822658fd7..b227eb469f49 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
@@ -23,6 +23,8 @@ | |||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
27 | |||
26 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 29 | #include <linux/module.h> |
28 | #include <linux/init.h> | 30 | #include <linux/init.h> |
@@ -30,22 +32,34 @@ | |||
30 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
31 | #include <linux/input.h> | 33 | #include <linux/input.h> |
32 | #include <linux/input/sparse-keymap.h> | 34 | #include <linux/input/sparse-keymap.h> |
35 | #include <linux/fb.h> | ||
36 | #include <linux/backlight.h> | ||
37 | #include <linux/platform_device.h> | ||
33 | #include <acpi/acpi_bus.h> | 38 | #include <acpi/acpi_bus.h> |
34 | #include <acpi/acpi_drivers.h> | 39 | #include <acpi/acpi_drivers.h> |
35 | 40 | ||
41 | #define EEEPC_WMI_FILE "eeepc-wmi" | ||
42 | |||
36 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 43 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
37 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); | 44 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); |
38 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
39 | 46 | ||
40 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" | 47 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" |
48 | #define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" | ||
41 | 49 | ||
42 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); | 50 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); |
51 | MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID); | ||
43 | 52 | ||
44 | #define NOTIFY_BRNUP_MIN 0x11 | 53 | #define NOTIFY_BRNUP_MIN 0x11 |
45 | #define NOTIFY_BRNUP_MAX 0x1f | 54 | #define NOTIFY_BRNUP_MAX 0x1f |
46 | #define NOTIFY_BRNDOWN_MIN 0x20 | 55 | #define NOTIFY_BRNDOWN_MIN 0x20 |
47 | #define NOTIFY_BRNDOWN_MAX 0x2e | 56 | #define NOTIFY_BRNDOWN_MAX 0x2e |
48 | 57 | ||
58 | #define EEEPC_WMI_METHODID_DEVS 0x53564544 | ||
59 | #define EEEPC_WMI_METHODID_DSTS 0x53544344 | ||
60 | |||
61 | #define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012 | ||
62 | |||
49 | static const struct key_entry eeepc_wmi_keymap[] = { | 63 | static const struct key_entry eeepc_wmi_keymap[] = { |
50 | /* Sleep already handled via generic ACPI code */ | 64 | /* Sleep already handled via generic ACPI code */ |
51 | { KE_KEY, 0x5d, { KEY_WLAN } }, | 65 | { KE_KEY, 0x5d, { KEY_WLAN } }, |
@@ -58,18 +72,198 @@ static const struct key_entry eeepc_wmi_keymap[] = { | |||
58 | { KE_END, 0}, | 72 | { KE_END, 0}, |
59 | }; | 73 | }; |
60 | 74 | ||
61 | static struct input_dev *eeepc_wmi_input_dev; | 75 | struct bios_args { |
76 | u32 dev_id; | ||
77 | u32 ctrl_param; | ||
78 | }; | ||
79 | |||
80 | struct eeepc_wmi { | ||
81 | struct input_dev *inputdev; | ||
82 | struct backlight_device *backlight_device; | ||
83 | }; | ||
84 | |||
85 | static struct platform_device *platform_device; | ||
86 | |||
87 | static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc) | ||
88 | { | ||
89 | int err; | ||
90 | |||
91 | eeepc->inputdev = input_allocate_device(); | ||
92 | if (!eeepc->inputdev) | ||
93 | return -ENOMEM; | ||
94 | |||
95 | eeepc->inputdev->name = "Eee PC WMI hotkeys"; | ||
96 | eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0"; | ||
97 | eeepc->inputdev->id.bustype = BUS_HOST; | ||
98 | eeepc->inputdev->dev.parent = &platform_device->dev; | ||
99 | |||
100 | err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL); | ||
101 | if (err) | ||
102 | goto err_free_dev; | ||
103 | |||
104 | err = input_register_device(eeepc->inputdev); | ||
105 | if (err) | ||
106 | goto err_free_keymap; | ||
107 | |||
108 | return 0; | ||
109 | |||
110 | err_free_keymap: | ||
111 | sparse_keymap_free(eeepc->inputdev); | ||
112 | err_free_dev: | ||
113 | input_free_device(eeepc->inputdev); | ||
114 | return err; | ||
115 | } | ||
116 | |||
117 | static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc) | ||
118 | { | ||
119 | if (eeepc->inputdev) { | ||
120 | sparse_keymap_free(eeepc->inputdev); | ||
121 | input_unregister_device(eeepc->inputdev); | ||
122 | } | ||
123 | |||
124 | eeepc->inputdev = NULL; | ||
125 | } | ||
126 | |||
127 | static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param) | ||
128 | { | ||
129 | struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id }; | ||
130 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
131 | union acpi_object *obj; | ||
132 | acpi_status status; | ||
133 | u32 tmp; | ||
134 | |||
135 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
136 | 1, EEEPC_WMI_METHODID_DSTS, &input, &output); | ||
137 | |||
138 | if (ACPI_FAILURE(status)) | ||
139 | return status; | ||
140 | |||
141 | obj = (union acpi_object *)output.pointer; | ||
142 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
143 | tmp = (u32)obj->integer.value; | ||
144 | else | ||
145 | tmp = 0; | ||
146 | |||
147 | if (ctrl_param) | ||
148 | *ctrl_param = tmp; | ||
149 | |||
150 | kfree(obj); | ||
151 | |||
152 | return status; | ||
153 | |||
154 | } | ||
155 | |||
156 | static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param) | ||
157 | { | ||
158 | struct bios_args args = { | ||
159 | .dev_id = dev_id, | ||
160 | .ctrl_param = ctrl_param, | ||
161 | }; | ||
162 | struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; | ||
163 | acpi_status status; | ||
164 | |||
165 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
166 | 1, EEEPC_WMI_METHODID_DEVS, &input, NULL); | ||
167 | |||
168 | return status; | ||
169 | } | ||
170 | |||
171 | static int read_brightness(struct backlight_device *bd) | ||
172 | { | ||
173 | static u32 ctrl_param; | ||
174 | acpi_status status; | ||
175 | |||
176 | status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param); | ||
177 | |||
178 | if (ACPI_FAILURE(status)) | ||
179 | return -1; | ||
180 | else | ||
181 | return ctrl_param & 0xFF; | ||
182 | } | ||
183 | |||
184 | static int update_bl_status(struct backlight_device *bd) | ||
185 | { | ||
186 | |||
187 | static u32 ctrl_param; | ||
188 | acpi_status status; | ||
189 | |||
190 | ctrl_param = bd->props.brightness; | ||
191 | |||
192 | status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param); | ||
193 | |||
194 | if (ACPI_FAILURE(status)) | ||
195 | return -1; | ||
196 | else | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static const struct backlight_ops eeepc_wmi_bl_ops = { | ||
201 | .get_brightness = read_brightness, | ||
202 | .update_status = update_bl_status, | ||
203 | }; | ||
204 | |||
205 | static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code) | ||
206 | { | ||
207 | struct backlight_device *bd = eeepc->backlight_device; | ||
208 | int old = bd->props.brightness; | ||
209 | int new; | ||
210 | |||
211 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
212 | new = code - NOTIFY_BRNUP_MIN + 1; | ||
213 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
214 | new = code - NOTIFY_BRNDOWN_MIN; | ||
215 | |||
216 | bd->props.brightness = new; | ||
217 | backlight_update_status(bd); | ||
218 | backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); | ||
219 | |||
220 | return old; | ||
221 | } | ||
222 | |||
223 | static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc) | ||
224 | { | ||
225 | struct backlight_device *bd; | ||
226 | struct backlight_properties props; | ||
227 | |||
228 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
229 | props.max_brightness = 15; | ||
230 | bd = backlight_device_register(EEEPC_WMI_FILE, | ||
231 | &platform_device->dev, eeepc, | ||
232 | &eeepc_wmi_bl_ops, &props); | ||
233 | if (IS_ERR(bd)) { | ||
234 | pr_err("Could not register backlight device\n"); | ||
235 | return PTR_ERR(bd); | ||
236 | } | ||
237 | |||
238 | eeepc->backlight_device = bd; | ||
239 | |||
240 | bd->props.brightness = read_brightness(bd); | ||
241 | bd->props.power = FB_BLANK_UNBLANK; | ||
242 | backlight_update_status(bd); | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc) | ||
248 | { | ||
249 | if (eeepc->backlight_device) | ||
250 | backlight_device_unregister(eeepc->backlight_device); | ||
251 | |||
252 | eeepc->backlight_device = NULL; | ||
253 | } | ||
62 | 254 | ||
63 | static void eeepc_wmi_notify(u32 value, void *context) | 255 | static void eeepc_wmi_notify(u32 value, void *context) |
64 | { | 256 | { |
257 | struct eeepc_wmi *eeepc = context; | ||
65 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | 258 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; |
66 | union acpi_object *obj; | 259 | union acpi_object *obj; |
67 | acpi_status status; | 260 | acpi_status status; |
68 | int code; | 261 | int code; |
262 | int orig_code; | ||
69 | 263 | ||
70 | status = wmi_get_event_data(value, &response); | 264 | status = wmi_get_event_data(value, &response); |
71 | if (status != AE_OK) { | 265 | if (status != AE_OK) { |
72 | pr_err("EEEPC WMI: bad event status 0x%x\n", status); | 266 | pr_err("bad event status 0x%x\n", status); |
73 | return; | 267 | return; |
74 | } | 268 | } |
75 | 269 | ||
@@ -77,81 +271,142 @@ static void eeepc_wmi_notify(u32 value, void *context) | |||
77 | 271 | ||
78 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | 272 | if (obj && obj->type == ACPI_TYPE_INTEGER) { |
79 | code = obj->integer.value; | 273 | code = obj->integer.value; |
274 | orig_code = code; | ||
80 | 275 | ||
81 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | 276 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) |
82 | code = NOTIFY_BRNUP_MIN; | 277 | code = NOTIFY_BRNUP_MIN; |
83 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | 278 | else if (code >= NOTIFY_BRNDOWN_MIN && |
279 | code <= NOTIFY_BRNDOWN_MAX) | ||
84 | code = NOTIFY_BRNDOWN_MIN; | 280 | code = NOTIFY_BRNDOWN_MIN; |
85 | 281 | ||
86 | if (!sparse_keymap_report_event(eeepc_wmi_input_dev, | 282 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { |
283 | if (!acpi_video_backlight_support()) | ||
284 | eeepc_wmi_backlight_notify(eeepc, orig_code); | ||
285 | } | ||
286 | |||
287 | if (!sparse_keymap_report_event(eeepc->inputdev, | ||
87 | code, 1, true)) | 288 | code, 1, true)) |
88 | pr_info("EEEPC WMI: Unknown key %x pressed\n", code); | 289 | pr_info("Unknown key %x pressed\n", code); |
89 | } | 290 | } |
90 | 291 | ||
91 | kfree(obj); | 292 | kfree(obj); |
92 | } | 293 | } |
93 | 294 | ||
94 | static int eeepc_wmi_input_setup(void) | 295 | static int __devinit eeepc_wmi_platform_probe(struct platform_device *device) |
95 | { | 296 | { |
297 | struct eeepc_wmi *eeepc; | ||
96 | int err; | 298 | int err; |
299 | acpi_status status; | ||
97 | 300 | ||
98 | eeepc_wmi_input_dev = input_allocate_device(); | 301 | eeepc = platform_get_drvdata(device); |
99 | if (!eeepc_wmi_input_dev) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys"; | ||
103 | eeepc_wmi_input_dev->phys = "wmi/input0"; | ||
104 | eeepc_wmi_input_dev->id.bustype = BUS_HOST; | ||
105 | 302 | ||
106 | err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL); | 303 | err = eeepc_wmi_input_init(eeepc); |
107 | if (err) | 304 | if (err) |
108 | goto err_free_dev; | 305 | goto error_input; |
109 | 306 | ||
110 | err = input_register_device(eeepc_wmi_input_dev); | 307 | if (!acpi_video_backlight_support()) { |
111 | if (err) | 308 | err = eeepc_wmi_backlight_init(eeepc); |
112 | goto err_free_keymap; | 309 | if (err) |
310 | goto error_backlight; | ||
311 | } else | ||
312 | pr_info("Backlight controlled by ACPI video driver\n"); | ||
313 | |||
314 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | ||
315 | eeepc_wmi_notify, eeepc); | ||
316 | if (ACPI_FAILURE(status)) { | ||
317 | pr_err("Unable to register notify handler - %d\n", | ||
318 | status); | ||
319 | err = -ENODEV; | ||
320 | goto error_wmi; | ||
321 | } | ||
113 | 322 | ||
114 | return 0; | 323 | return 0; |
115 | 324 | ||
116 | err_free_keymap: | 325 | error_wmi: |
117 | sparse_keymap_free(eeepc_wmi_input_dev); | 326 | eeepc_wmi_backlight_exit(eeepc); |
118 | err_free_dev: | 327 | error_backlight: |
119 | input_free_device(eeepc_wmi_input_dev); | 328 | eeepc_wmi_input_exit(eeepc); |
329 | error_input: | ||
120 | return err; | 330 | return err; |
121 | } | 331 | } |
122 | 332 | ||
333 | static int __devexit eeepc_wmi_platform_remove(struct platform_device *device) | ||
334 | { | ||
335 | struct eeepc_wmi *eeepc; | ||
336 | |||
337 | eeepc = platform_get_drvdata(device); | ||
338 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
339 | eeepc_wmi_backlight_exit(eeepc); | ||
340 | eeepc_wmi_input_exit(eeepc); | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static struct platform_driver platform_driver = { | ||
346 | .driver = { | ||
347 | .name = EEEPC_WMI_FILE, | ||
348 | .owner = THIS_MODULE, | ||
349 | }, | ||
350 | .probe = eeepc_wmi_platform_probe, | ||
351 | .remove = __devexit_p(eeepc_wmi_platform_remove), | ||
352 | }; | ||
353 | |||
123 | static int __init eeepc_wmi_init(void) | 354 | static int __init eeepc_wmi_init(void) |
124 | { | 355 | { |
356 | struct eeepc_wmi *eeepc; | ||
125 | int err; | 357 | int err; |
126 | acpi_status status; | ||
127 | 358 | ||
128 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) { | 359 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) || |
129 | pr_warning("EEEPC WMI: No known WMI GUID found\n"); | 360 | !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) { |
361 | pr_warning("No known WMI GUID found\n"); | ||
130 | return -ENODEV; | 362 | return -ENODEV; |
131 | } | 363 | } |
132 | 364 | ||
133 | err = eeepc_wmi_input_setup(); | 365 | eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL); |
134 | if (err) | 366 | if (!eeepc) |
135 | return err; | 367 | return -ENOMEM; |
136 | 368 | ||
137 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | 369 | platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1); |
138 | eeepc_wmi_notify, NULL); | 370 | if (!platform_device) { |
139 | if (ACPI_FAILURE(status)) { | 371 | pr_warning("Unable to allocate platform device\n"); |
140 | sparse_keymap_free(eeepc_wmi_input_dev); | 372 | err = -ENOMEM; |
141 | input_unregister_device(eeepc_wmi_input_dev); | 373 | goto fail_platform; |
142 | pr_err("EEEPC WMI: Unable to register notify handler - %d\n", | 374 | } |
143 | status); | 375 | |
144 | return -ENODEV; | 376 | err = platform_device_add(platform_device); |
377 | if (err) { | ||
378 | pr_warning("Unable to add platform device\n"); | ||
379 | goto put_dev; | ||
380 | } | ||
381 | |||
382 | platform_set_drvdata(platform_device, eeepc); | ||
383 | |||
384 | err = platform_driver_register(&platform_driver); | ||
385 | if (err) { | ||
386 | pr_warning("Unable to register platform driver\n"); | ||
387 | goto del_dev; | ||
145 | } | 388 | } |
146 | 389 | ||
147 | return 0; | 390 | return 0; |
391 | |||
392 | del_dev: | ||
393 | platform_device_del(platform_device); | ||
394 | put_dev: | ||
395 | platform_device_put(platform_device); | ||
396 | fail_platform: | ||
397 | kfree(eeepc); | ||
398 | |||
399 | return err; | ||
148 | } | 400 | } |
149 | 401 | ||
150 | static void __exit eeepc_wmi_exit(void) | 402 | static void __exit eeepc_wmi_exit(void) |
151 | { | 403 | { |
152 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | 404 | struct eeepc_wmi *eeepc; |
153 | sparse_keymap_free(eeepc_wmi_input_dev); | 405 | |
154 | input_unregister_device(eeepc_wmi_input_dev); | 406 | eeepc = platform_get_drvdata(platform_device); |
407 | platform_driver_unregister(&platform_driver); | ||
408 | platform_device_unregister(platform_device); | ||
409 | kfree(eeepc); | ||
155 | } | 410 | } |
156 | 411 | ||
157 | module_init(eeepc_wmi_init); | 412 | module_init(eeepc_wmi_init); |
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c index b6218f11c957..552cad85ae5a 100644 --- a/drivers/regulator/max8925-regulator.c +++ b/drivers/regulator/max8925-regulator.c | |||
@@ -109,7 +109,7 @@ static int max8925_is_enabled(struct regulator_dev *rdev) | |||
109 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); | 109 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); |
110 | int ret; | 110 | int ret; |
111 | 111 | ||
112 | ret = max8925_reg_read(info->i2c, info->vol_reg); | 112 | ret = max8925_reg_read(info->i2c, info->enable_reg); |
113 | if (ret < 0) | 113 | if (ret < 0) |
114 | return ret; | 114 | return ret; |
115 | 115 | ||
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index a681f5e8f786..ad036dd8da13 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c | |||
@@ -618,9 +618,12 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev) | |||
618 | dev_get_platdata(&pdev->dev); | 618 | dev_get_platdata(&pdev->dev); |
619 | int i; | 619 | int i; |
620 | 620 | ||
621 | platform_set_drvdata(pdev, NULL); | ||
622 | |||
621 | for (i = 0; i < pdata->num_regulators; i++) | 623 | for (i = 0; i < pdata->num_regulators; i++) |
622 | regulator_unregister(priv->regulators[i]); | 624 | regulator_unregister(priv->regulators[i]); |
623 | 625 | ||
626 | kfree(priv); | ||
624 | return 0; | 627 | return 0; |
625 | } | 628 | } |
626 | 629 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index bbea90baf98f..acf222f91f5a 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1899,7 +1899,8 @@ restart: | |||
1899 | /* Process requests that may be recovered */ | 1899 | /* Process requests that may be recovered */ |
1900 | if (cqr->status == DASD_CQR_NEED_ERP) { | 1900 | if (cqr->status == DASD_CQR_NEED_ERP) { |
1901 | erp_fn = base->discipline->erp_action(cqr); | 1901 | erp_fn = base->discipline->erp_action(cqr); |
1902 | erp_fn(cqr); | 1902 | if (IS_ERR(erp_fn(cqr))) |
1903 | continue; | ||
1903 | goto restart; | 1904 | goto restart; |
1904 | } | 1905 | } |
1905 | 1906 | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 6927e751ce3e..6632649dd6aa 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -2309,7 +2309,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) | |||
2309 | cqr->retries); | 2309 | cqr->retries); |
2310 | dasd_block_set_timer(device->block, (HZ << 3)); | 2310 | dasd_block_set_timer(device->block, (HZ << 3)); |
2311 | } | 2311 | } |
2312 | return cqr; | 2312 | return erp; |
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | ccw = cqr->cpaddr; | 2315 | ccw = cqr->cpaddr; |
@@ -2372,6 +2372,9 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr) | |||
2372 | /* add erp and initialize with default TIC */ | 2372 | /* add erp and initialize with default TIC */ |
2373 | erp = dasd_3990_erp_add_erp(cqr); | 2373 | erp = dasd_3990_erp_add_erp(cqr); |
2374 | 2374 | ||
2375 | if (IS_ERR(erp)) | ||
2376 | return erp; | ||
2377 | |||
2375 | /* inspect sense, determine specific ERP if possible */ | 2378 | /* inspect sense, determine specific ERP if possible */ |
2376 | if (erp != cqr) { | 2379 | if (erp != cqr) { |
2377 | 2380 | ||
@@ -2711,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2711 | if (erp == NULL) { | 2714 | if (erp == NULL) { |
2712 | /* no matching erp found - set up erp */ | 2715 | /* no matching erp found - set up erp */ |
2713 | erp = dasd_3990_erp_additional_erp(cqr); | 2716 | erp = dasd_3990_erp_additional_erp(cqr); |
2717 | if (IS_ERR(erp)) | ||
2718 | return erp; | ||
2714 | } else { | 2719 | } else { |
2715 | /* matching erp found - set all leading erp's to DONE */ | 2720 | /* matching erp found - set all leading erp's to DONE */ |
2716 | erp = dasd_3990_erp_handle_match_erp(cqr, erp); | 2721 | erp = dasd_3990_erp_handle_match_erp(cqr, erp); |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 18daf16aa357..7217966f7d31 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -638,11 +638,7 @@ static int __init zcore_reipl_init(void) | |||
638 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); | 638 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); |
639 | else | 639 | else |
640 | rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); | 640 | rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); |
641 | if (rc) { | 641 | if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) != |
642 | free_page((unsigned long) ipl_block); | ||
643 | return rc; | ||
644 | } | ||
645 | if (csum_partial(ipl_block, ipl_block->hdr.len, 0) != | ||
646 | ipib_info.checksum) { | 642 | ipib_info.checksum) { |
647 | TRACE("Checksum does not match\n"); | 643 | TRACE("Checksum does not match\n"); |
648 | free_page((unsigned long) ipl_block); | 644 | free_page((unsigned long) ipl_block); |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 4038f5b4f144..ce7cb87479fe 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | 30 | ||
31 | static void *sei_page; | 31 | static void *sei_page; |
32 | static DEFINE_SPINLOCK(sda_lock); | ||
32 | 33 | ||
33 | /** | 34 | /** |
34 | * chsc_error_from_response() - convert a chsc response to an error | 35 | * chsc_error_from_response() - convert a chsc response to an error |
@@ -832,11 +833,10 @@ void __init chsc_free_sei_area(void) | |||
832 | kfree(sei_page); | 833 | kfree(sei_page); |
833 | } | 834 | } |
834 | 835 | ||
835 | int __init | 836 | int chsc_enable_facility(int operation_code) |
836 | chsc_enable_facility(int operation_code) | ||
837 | { | 837 | { |
838 | int ret; | 838 | int ret; |
839 | struct { | 839 | static struct { |
840 | struct chsc_header request; | 840 | struct chsc_header request; |
841 | u8 reserved1:4; | 841 | u8 reserved1:4; |
842 | u8 format:4; | 842 | u8 format:4; |
@@ -849,33 +849,32 @@ chsc_enable_facility(int operation_code) | |||
849 | u32 reserved5:4; | 849 | u32 reserved5:4; |
850 | u32 format2:4; | 850 | u32 format2:4; |
851 | u32 reserved6:24; | 851 | u32 reserved6:24; |
852 | } __attribute__ ((packed)) *sda_area; | 852 | } __attribute__ ((packed, aligned(4096))) sda_area; |
853 | 853 | ||
854 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 854 | spin_lock(&sda_lock); |
855 | if (!sda_area) | 855 | memset(&sda_area, 0, sizeof(sda_area)); |
856 | return -ENOMEM; | 856 | sda_area.request.length = 0x0400; |
857 | sda_area->request.length = 0x0400; | 857 | sda_area.request.code = 0x0031; |
858 | sda_area->request.code = 0x0031; | 858 | sda_area.operation_code = operation_code; |
859 | sda_area->operation_code = operation_code; | ||
860 | 859 | ||
861 | ret = chsc(sda_area); | 860 | ret = chsc(&sda_area); |
862 | if (ret > 0) { | 861 | if (ret > 0) { |
863 | ret = (ret == 3) ? -ENODEV : -EBUSY; | 862 | ret = (ret == 3) ? -ENODEV : -EBUSY; |
864 | goto out; | 863 | goto out; |
865 | } | 864 | } |
866 | 865 | ||
867 | switch (sda_area->response.code) { | 866 | switch (sda_area.response.code) { |
868 | case 0x0101: | 867 | case 0x0101: |
869 | ret = -EOPNOTSUPP; | 868 | ret = -EOPNOTSUPP; |
870 | break; | 869 | break; |
871 | default: | 870 | default: |
872 | ret = chsc_error_from_response(sda_area->response.code); | 871 | ret = chsc_error_from_response(sda_area.response.code); |
873 | } | 872 | } |
874 | if (ret != 0) | 873 | if (ret != 0) |
875 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | 874 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", |
876 | operation_code, sda_area->response.code); | 875 | operation_code, sda_area.response.code); |
877 | out: | 876 | out: |
878 | free_page((unsigned long)sda_area); | 877 | spin_unlock(&sda_lock); |
879 | return ret; | 878 | return ret; |
880 | } | 879 | } |
881 | 880 | ||
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 404f630c27ca..3b6f4adc5094 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -124,7 +124,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch) | |||
124 | * since we don't have a way to clear the subchannel and | 124 | * since we don't have a way to clear the subchannel and |
125 | * cannot disable it with a request running. | 125 | * cannot disable it with a request running. |
126 | */ | 126 | */ |
127 | cc = stsch(sch->schid, &schib); | 127 | cc = stsch_err(sch->schid, &schib); |
128 | if (!cc && scsw_stctl(&schib.scsw)) | 128 | if (!cc && scsw_stctl(&schib.scsw)) |
129 | return -EAGAIN; | 129 | return -EAGAIN; |
130 | return 0; | 130 | return 0; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index f736cdcf08ad..5feea1a371e1 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -361,7 +361,7 @@ int cio_commit_config(struct subchannel *sch) | |||
361 | struct schib schib; | 361 | struct schib schib; |
362 | int ccode, retry, ret = 0; | 362 | int ccode, retry, ret = 0; |
363 | 363 | ||
364 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 364 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
365 | return -ENODEV; | 365 | return -ENODEV; |
366 | 366 | ||
367 | for (retry = 0; retry < 5; retry++) { | 367 | for (retry = 0; retry < 5; retry++) { |
@@ -372,7 +372,7 @@ int cio_commit_config(struct subchannel *sch) | |||
372 | return ccode; | 372 | return ccode; |
373 | switch (ccode) { | 373 | switch (ccode) { |
374 | case 0: /* successful */ | 374 | case 0: /* successful */ |
375 | if (stsch(sch->schid, &schib) || | 375 | if (stsch_err(sch->schid, &schib) || |
376 | !css_sch_is_valid(&schib)) | 376 | !css_sch_is_valid(&schib)) |
377 | return -ENODEV; | 377 | return -ENODEV; |
378 | if (cio_check_config(sch, &schib)) { | 378 | if (cio_check_config(sch, &schib)) { |
@@ -404,7 +404,7 @@ int cio_update_schib(struct subchannel *sch) | |||
404 | { | 404 | { |
405 | struct schib schib; | 405 | struct schib schib; |
406 | 406 | ||
407 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 407 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
408 | return -ENODEV; | 408 | return -ENODEV; |
409 | 409 | ||
410 | memcpy(&sch->schib, &schib, sizeof(schib)); | 410 | memcpy(&sch->schib, &schib, sizeof(schib)); |
@@ -771,7 +771,7 @@ cio_get_console_sch_no(void) | |||
771 | if (console_irq != -1) { | 771 | if (console_irq != -1) { |
772 | /* VM provided us with the irq number of the console. */ | 772 | /* VM provided us with the irq number of the console. */ |
773 | schid.sch_no = console_irq; | 773 | schid.sch_no = console_irq; |
774 | if (stsch(schid, &console_subchannel.schib) != 0 || | 774 | if (stsch_err(schid, &console_subchannel.schib) != 0 || |
775 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || | 775 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || |
776 | !console_subchannel.schib.pmcw.dnv) | 776 | !console_subchannel.schib.pmcw.dnv) |
777 | return -1; | 777 | return -1; |
@@ -863,10 +863,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
863 | cc = 0; | 863 | cc = 0; |
864 | for (retry=0;retry<3;retry++) { | 864 | for (retry=0;retry<3;retry++) { |
865 | schib->pmcw.ena = 0; | 865 | schib->pmcw.ena = 0; |
866 | cc = msch(schid, schib); | 866 | cc = msch_err(schid, schib); |
867 | if (cc) | 867 | if (cc) |
868 | return (cc==3?-ENODEV:-EBUSY); | 868 | return (cc==3?-ENODEV:-EBUSY); |
869 | if (stsch(schid, schib) || !css_sch_is_valid(schib)) | 869 | if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) |
870 | return -ENODEV; | 870 | return -ENODEV; |
871 | if (!schib->pmcw.ena) | 871 | if (!schib->pmcw.ena) |
872 | return 0; | 872 | return 0; |
@@ -913,7 +913,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr) | |||
913 | 913 | ||
914 | pgm_check_occured = 0; | 914 | pgm_check_occured = 0; |
915 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; | 915 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; |
916 | rc = stsch(schid, addr); | 916 | rc = stsch_err(schid, addr); |
917 | s390_base_pgm_handler_fn = NULL; | 917 | s390_base_pgm_handler_fn = NULL; |
918 | 918 | ||
919 | /* The program check handler could have changed pgm_check_occured. */ | 919 | /* The program check handler could have changed pgm_check_occured. */ |
@@ -950,7 +950,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) | |||
950 | /* No default clear strategy */ | 950 | /* No default clear strategy */ |
951 | break; | 951 | break; |
952 | } | 952 | } |
953 | stsch(schid, &schib); | 953 | stsch_err(schid, &schib); |
954 | __disable_subchannel_easy(schid, &schib); | 954 | __disable_subchannel_easy(schid, &schib); |
955 | } | 955 | } |
956 | out: | 956 | out: |
@@ -1086,7 +1086,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | |||
1086 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; | 1086 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; |
1087 | if (!schid.one) | 1087 | if (!schid.one) |
1088 | return -ENODEV; | 1088 | return -ENODEV; |
1089 | if (stsch(schid, &schib)) | 1089 | if (stsch_err(schid, &schib)) |
1090 | return -ENODEV; | 1090 | return -ENODEV; |
1091 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) | 1091 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) |
1092 | return -ENODEV; | 1092 | return -ENODEV; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 2769da54f2b9..511649115bd7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -870,15 +870,10 @@ static int __init css_bus_init(void) | |||
870 | 870 | ||
871 | /* Try to enable MSS. */ | 871 | /* Try to enable MSS. */ |
872 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | 872 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); |
873 | switch (ret) { | 873 | if (ret) |
874 | case 0: /* Success. */ | ||
875 | max_ssid = __MAX_SSID; | ||
876 | break; | ||
877 | case -ENOMEM: | ||
878 | goto out; | ||
879 | default: | ||
880 | max_ssid = 0; | 874 | max_ssid = 0; |
881 | } | 875 | else /* Success. */ |
876 | max_ssid = __MAX_SSID; | ||
882 | 877 | ||
883 | ret = slow_subchannel_init(); | 878 | ret = slow_subchannel_init(); |
884 | if (ret) | 879 | if (ret) |
@@ -1048,6 +1043,11 @@ static int __init channel_subsystem_init_sync(void) | |||
1048 | } | 1043 | } |
1049 | subsys_initcall_sync(channel_subsystem_init_sync); | 1044 | subsys_initcall_sync(channel_subsystem_init_sync); |
1050 | 1045 | ||
1046 | void channel_subsystem_reinit(void) | ||
1047 | { | ||
1048 | chsc_enable_facility(CHSC_SDA_OC_MSS); | ||
1049 | } | ||
1050 | |||
1051 | #ifdef CONFIG_PROC_FS | 1051 | #ifdef CONFIG_PROC_FS |
1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, | 1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, |
1053 | size_t count, loff_t *ppos) | 1053 | size_t count, loff_t *ppos) |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c56ab94612f9..c9b852647f01 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) | |||
45 | sch = to_subchannel(cdev->dev.parent); | 45 | sch = to_subchannel(cdev->dev.parent); |
46 | private = to_io_private(sch); | 46 | private = to_io_private(sch); |
47 | orb = &private->orb; | 47 | orb = &private->orb; |
48 | cc = stsch(sch->schid, &schib); | 48 | cc = stsch_err(sch->schid, &schib); |
49 | 49 | ||
50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " |
51 | "device information:\n", get_clock()); | 51 | "device information:\n", get_clock()); |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 18564891ea61..b3b1d2f79398 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -2105,7 +2105,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2105 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; | 2105 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; |
2106 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; | 2106 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; |
2107 | 2107 | ||
2108 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { | 2108 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && |
2109 | !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { | ||
2109 | blktrc.flags |= ZFCP_BLK_LAT_VALID; | 2110 | blktrc.flags |= ZFCP_BLK_LAT_VALID; |
2110 | blktrc.channel_lat = lat_in->channel_lat * ticks; | 2111 | blktrc.channel_lat = lat_in->channel_lat * ticks; |
2111 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; | 2112 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; |
@@ -2157,9 +2158,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) | |||
2157 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; | 2158 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; |
2158 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); | 2159 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); |
2159 | 2160 | ||
2160 | zfcp_fsf_req_trace(req, scpnt); | ||
2161 | |||
2162 | skip_fsfstatus: | 2161 | skip_fsfstatus: |
2162 | zfcp_fsf_req_trace(req, scpnt); | ||
2163 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); | 2163 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); |
2164 | 2164 | ||
2165 | scpnt->host_scribble = NULL; | 2165 | scpnt->host_scribble = NULL; |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 72617b650a7e..e641922f20bc 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
@@ -169,6 +169,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
169 | SE_DEBUG(DBG_LVL_1, | 169 | SE_DEBUG(DBG_LVL_1, |
170 | "Failed to allocate memory for" | 170 | "Failed to allocate memory for" |
171 | "mgmt_invalidate_icds \n"); | 171 | "mgmt_invalidate_icds \n"); |
172 | spin_unlock(&ctrl->mbox_lock); | ||
172 | return -1; | 173 | return -1; |
173 | } | 174 | } |
174 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); | 175 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); |
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 6cf9dc37d78b..6b624e767d3b 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h | |||
@@ -362,6 +362,7 @@ struct bnx2i_hba { | |||
362 | u32 num_ccell; | 362 | u32 num_ccell; |
363 | 363 | ||
364 | int ofld_conns_active; | 364 | int ofld_conns_active; |
365 | wait_queue_head_t eh_wait; | ||
365 | 366 | ||
366 | int max_active_conns; | 367 | int max_active_conns; |
367 | struct iscsi_cid_queue cid_que; | 368 | struct iscsi_cid_queue cid_que; |
@@ -381,6 +382,7 @@ struct bnx2i_hba { | |||
381 | spinlock_t lock; /* protects hba structure access */ | 382 | spinlock_t lock; /* protects hba structure access */ |
382 | struct mutex net_dev_lock;/* sync net device access */ | 383 | struct mutex net_dev_lock;/* sync net device access */ |
383 | 384 | ||
385 | int hba_shutdown_tmo; | ||
384 | /* | 386 | /* |
385 | * PCI related info. | 387 | * PCI related info. |
386 | */ | 388 | */ |
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 6d8172e781cf..5d9296c599f6 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c | |||
@@ -177,11 +177,22 @@ void bnx2i_stop(void *handle) | |||
177 | struct bnx2i_hba *hba = handle; | 177 | struct bnx2i_hba *hba = handle; |
178 | 178 | ||
179 | /* check if cleanup happened in GOING_DOWN context */ | 179 | /* check if cleanup happened in GOING_DOWN context */ |
180 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); | ||
181 | if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, | 180 | if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, |
182 | &hba->adapter_state)) | 181 | &hba->adapter_state)) |
183 | iscsi_host_for_each_session(hba->shost, | 182 | iscsi_host_for_each_session(hba->shost, |
184 | bnx2i_drop_session); | 183 | bnx2i_drop_session); |
184 | |||
185 | /* Wait for all endpoints to be torn down, Chip will be reset once | ||
186 | * control returns to network driver. So it is required to cleanup and | ||
187 | * release all connection resources before returning from this routine. | ||
188 | */ | ||
189 | wait_event_interruptible_timeout(hba->eh_wait, | ||
190 | (hba->ofld_conns_active == 0), | ||
191 | hba->hba_shutdown_tmo); | ||
192 | /* This flag should be cleared last so that ep_disconnect() gracefully | ||
193 | * cleans up connection context | ||
194 | */ | ||
195 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); | ||
185 | } | 196 | } |
186 | 197 | ||
187 | /** | 198 | /** |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f2e9b18fe76c..fa68ab34b998 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
@@ -820,6 +820,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) | |||
820 | 820 | ||
821 | spin_lock_init(&hba->lock); | 821 | spin_lock_init(&hba->lock); |
822 | mutex_init(&hba->net_dev_lock); | 822 | mutex_init(&hba->net_dev_lock); |
823 | init_waitqueue_head(&hba->eh_wait); | ||
824 | if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) | ||
825 | hba->hba_shutdown_tmo = 240 * HZ; | ||
826 | else /* 5706/5708/5709 */ | ||
827 | hba->hba_shutdown_tmo = 30 * HZ; | ||
823 | 828 | ||
824 | if (iscsi_host_add(shost, &hba->pcidev->dev)) | 829 | if (iscsi_host_add(shost, &hba->pcidev->dev)) |
825 | goto free_dump_mem; | 830 | goto free_dump_mem; |
@@ -1658,8 +1663,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1658 | */ | 1663 | */ |
1659 | hba = bnx2i_check_route(dst_addr); | 1664 | hba = bnx2i_check_route(dst_addr); |
1660 | 1665 | ||
1661 | if (!hba) { | 1666 | if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { |
1662 | rc = -ENOMEM; | 1667 | rc = -EINVAL; |
1663 | goto check_busy; | 1668 | goto check_busy; |
1664 | } | 1669 | } |
1665 | 1670 | ||
@@ -1804,7 +1809,7 @@ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
1804 | (bnx2i_ep->state == | 1809 | (bnx2i_ep->state == |
1805 | EP_STATE_CONNECT_COMPL)), | 1810 | EP_STATE_CONNECT_COMPL)), |
1806 | msecs_to_jiffies(timeout_ms)); | 1811 | msecs_to_jiffies(timeout_ms)); |
1807 | if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) | 1812 | if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) |
1808 | rc = -1; | 1813 | rc = -1; |
1809 | 1814 | ||
1810 | if (rc > 0) | 1815 | if (rc > 0) |
@@ -1957,6 +1962,8 @@ return_bnx2i_ep: | |||
1957 | 1962 | ||
1958 | if (!hba->ofld_conns_active) | 1963 | if (!hba->ofld_conns_active) |
1959 | bnx2i_unreg_dev_all(); | 1964 | bnx2i_unreg_dev_all(); |
1965 | |||
1966 | wake_up_interruptible(&hba->eh_wait); | ||
1960 | } | 1967 | } |
1961 | 1968 | ||
1962 | 1969 | ||
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 496764349c41..0435d044c9da 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -188,7 +188,8 @@ MODULE_DEVICE_TABLE(pci,dptids); | |||
188 | static int adpt_detect(struct scsi_host_template* sht) | 188 | static int adpt_detect(struct scsi_host_template* sht) |
189 | { | 189 | { |
190 | struct pci_dev *pDev = NULL; | 190 | struct pci_dev *pDev = NULL; |
191 | adpt_hba* pHba; | 191 | adpt_hba *pHba; |
192 | adpt_hba *next; | ||
192 | 193 | ||
193 | PINFO("Detecting Adaptec I2O RAID controllers...\n"); | 194 | PINFO("Detecting Adaptec I2O RAID controllers...\n"); |
194 | 195 | ||
@@ -206,7 +207,8 @@ static int adpt_detect(struct scsi_host_template* sht) | |||
206 | } | 207 | } |
207 | 208 | ||
208 | /* In INIT state, Activate IOPs */ | 209 | /* In INIT state, Activate IOPs */ |
209 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 210 | for (pHba = hba_chain; pHba; pHba = next) { |
211 | next = pHba->next; | ||
210 | // Activate does get status , init outbound, and get hrt | 212 | // Activate does get status , init outbound, and get hrt |
211 | if (adpt_i2o_activate_hba(pHba) < 0) { | 213 | if (adpt_i2o_activate_hba(pHba) < 0) { |
212 | adpt_i2o_delete_hba(pHba); | 214 | adpt_i2o_delete_hba(pHba); |
@@ -243,7 +245,8 @@ rebuild_sys_tab: | |||
243 | PDEBUG("HBA's in OPERATIONAL state\n"); | 245 | PDEBUG("HBA's in OPERATIONAL state\n"); |
244 | 246 | ||
245 | printk("dpti: If you have a lot of devices this could take a few minutes.\n"); | 247 | printk("dpti: If you have a lot of devices this could take a few minutes.\n"); |
246 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 248 | for (pHba = hba_chain; pHba; pHba = next) { |
249 | next = pHba->next; | ||
247 | printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); | 250 | printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); |
248 | if (adpt_i2o_lct_get(pHba) < 0){ | 251 | if (adpt_i2o_lct_get(pHba) < 0){ |
249 | adpt_i2o_delete_hba(pHba); | 252 | adpt_i2o_delete_hba(pHba); |
@@ -263,7 +266,8 @@ rebuild_sys_tab: | |||
263 | adpt_sysfs_class = NULL; | 266 | adpt_sysfs_class = NULL; |
264 | } | 267 | } |
265 | 268 | ||
266 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 269 | for (pHba = hba_chain; pHba; pHba = next) { |
270 | next = pHba->next; | ||
267 | if (adpt_scsi_host_alloc(pHba, sht) < 0){ | 271 | if (adpt_scsi_host_alloc(pHba, sht) < 0){ |
268 | adpt_i2o_delete_hba(pHba); | 272 | adpt_i2o_delete_hba(pHba); |
269 | continue; | 273 | continue; |
@@ -1229,11 +1233,10 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) | |||
1229 | } | 1233 | } |
1230 | } | 1234 | } |
1231 | pci_dev_put(pHba->pDev); | 1235 | pci_dev_put(pHba->pDev); |
1232 | kfree(pHba); | ||
1233 | |||
1234 | if (adpt_sysfs_class) | 1236 | if (adpt_sysfs_class) |
1235 | device_destroy(adpt_sysfs_class, | 1237 | device_destroy(adpt_sysfs_class, |
1236 | MKDEV(DPTI_I2O_MAJOR, pHba->unit)); | 1238 | MKDEV(DPTI_I2O_MAJOR, pHba->unit)); |
1239 | kfree(pHba); | ||
1237 | 1240 | ||
1238 | if(hba_count <= 0){ | 1241 | if(hba_count <= 0){ |
1239 | unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); | 1242 | unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index ff5ec5ac1fb5..88bad0e81bdd 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -323,16 +323,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd, | |||
323 | srp_cmd->buf_fmt = fmt; | 323 | srp_cmd->buf_fmt = fmt; |
324 | } | 324 | } |
325 | 325 | ||
326 | static void unmap_sg_list(int num_entries, | ||
327 | struct device *dev, | ||
328 | struct srp_direct_buf *md) | ||
329 | { | ||
330 | int i; | ||
331 | |||
332 | for (i = 0; i < num_entries; ++i) | ||
333 | dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); | ||
334 | } | ||
335 | |||
336 | /** | 326 | /** |
337 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format | 327 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format |
338 | * @cmd: srp_cmd whose additional_data member will be unmapped | 328 | * @cmd: srp_cmd whose additional_data member will be unmapped |
@@ -350,24 +340,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd, | |||
350 | 340 | ||
351 | if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) | 341 | if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) |
352 | return; | 342 | return; |
353 | else if (out_fmt == SRP_DATA_DESC_DIRECT || | ||
354 | in_fmt == SRP_DATA_DESC_DIRECT) { | ||
355 | struct srp_direct_buf *data = | ||
356 | (struct srp_direct_buf *) cmd->add_data; | ||
357 | dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); | ||
358 | } else { | ||
359 | struct srp_indirect_buf *indirect = | ||
360 | (struct srp_indirect_buf *) cmd->add_data; | ||
361 | int num_mapped = indirect->table_desc.len / | ||
362 | sizeof(struct srp_direct_buf); | ||
363 | 343 | ||
364 | if (num_mapped <= MAX_INDIRECT_BUFS) { | 344 | if (evt_struct->cmnd) |
365 | unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); | 345 | scsi_dma_unmap(evt_struct->cmnd); |
366 | return; | ||
367 | } | ||
368 | |||
369 | unmap_sg_list(num_mapped, dev, evt_struct->ext_list); | ||
370 | } | ||
371 | } | 346 | } |
372 | 347 | ||
373 | static int map_sg_list(struct scsi_cmnd *cmd, int nseg, | 348 | static int map_sg_list(struct scsi_cmnd *cmd, int nseg, |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 0ee725ced511..02143af7c1af 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -599,7 +599,7 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
599 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 599 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
600 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); | 600 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); |
601 | 601 | ||
602 | if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { | 602 | if (sock->sk->sk_sleep) { |
603 | sock->sk->sk_err = EIO; | 603 | sock->sk->sk_err = EIO; |
604 | wake_up_interruptible(sock->sk->sk_sleep); | 604 | wake_up_interruptible(sock->sk->sk_sleep); |
605 | } | 605 | } |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index ec3723831e89..d62b3e467926 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -433,7 +433,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, | |||
433 | dd_data = cmdiocbq->context1; | 433 | dd_data = cmdiocbq->context1; |
434 | /* normal completion and timeout crossed paths, already done */ | 434 | /* normal completion and timeout crossed paths, already done */ |
435 | if (!dd_data) { | 435 | if (!dd_data) { |
436 | spin_unlock_irqrestore(&phba->hbalock, flags); | 436 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
437 | return; | 437 | return; |
438 | } | 438 | } |
439 | 439 | ||
@@ -1196,7 +1196,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | |||
1196 | dd_data = cmdiocbq->context1; | 1196 | dd_data = cmdiocbq->context1; |
1197 | /* normal completion and timeout crossed paths, already done */ | 1197 | /* normal completion and timeout crossed paths, already done */ |
1198 | if (!dd_data) { | 1198 | if (!dd_data) { |
1199 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1199 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
1200 | return; | 1200 | return; |
1201 | } | 1201 | } |
1202 | 1202 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 359e9a71a021..1c7ef55966fb 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -2393,6 +2393,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
2393 | return 0; | 2393 | return 0; |
2394 | 2394 | ||
2395 | done: | 2395 | done: |
2396 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2396 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) | 2397 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) |
2397 | kfree(sp->fcport); | 2398 | kfree(sp->fcport); |
2398 | kfree(sp->ctx); | 2399 | kfree(sp->ctx); |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 09d6d4b76f39..caeb7d10ae04 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, | |||
467 | if (conn_err_detail) | 467 | if (conn_err_detail) |
468 | *conn_err_detail = mbox_sts[5]; | 468 | *conn_err_detail = mbox_sts[5]; |
469 | if (tcp_source_port_num) | 469 | if (tcp_source_port_num) |
470 | *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; | 470 | *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16); |
471 | if (connection_id) | 471 | if (connection_id) |
472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; | 472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; |
473 | status = QLA_SUCCESS; | 473 | status = QLA_SUCCESS; |
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index d0b7d2ff9ac5..333580bf37c5 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
@@ -1587,7 +1587,7 @@ static int wd7000_host_reset(struct scsi_cmnd *SCpnt) | |||
1587 | { | 1587 | { |
1588 | Adapter *host = (Adapter *) SCpnt->device->host->hostdata; | 1588 | Adapter *host = (Adapter *) SCpnt->device->host->hostdata; |
1589 | 1589 | ||
1590 | spin_unlock_irq(SCpnt->device->host->host_lock); | 1590 | spin_lock_irq(SCpnt->device->host->host_lock); |
1591 | 1591 | ||
1592 | if (wd7000_adapter_reset(host) < 0) { | 1592 | if (wd7000_adapter_reset(host) < 0) { |
1593 | spin_unlock_irq(SCpnt->device->host->host_lock); | 1593 | spin_unlock_irq(SCpnt->device->host->host_lock); |
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c index 7bb5fee639e3..b5aaef965f24 100644 --- a/drivers/serial/mcf.c +++ b/drivers/serial/mcf.c | |||
@@ -263,6 +263,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios, | |||
263 | } | 263 | } |
264 | 264 | ||
265 | spin_lock_irqsave(&port->lock, flags); | 265 | spin_lock_irqsave(&port->lock, flags); |
266 | uart_update_timeout(port, termios->c_cflag, baud); | ||
266 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); | 267 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); |
267 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); | 268 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); |
268 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); | 269 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); |
@@ -379,6 +380,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data) | |||
379 | static void mcf_config_port(struct uart_port *port, int flags) | 380 | static void mcf_config_port(struct uart_port *port, int flags) |
380 | { | 381 | { |
381 | port->type = PORT_MCF; | 382 | port->type = PORT_MCF; |
383 | port->fifosize = MCFUART_TXFIFOSIZE; | ||
382 | 384 | ||
383 | /* Clear mask, so no surprise interrupts. */ | 385 | /* Clear mask, so no surprise interrupts. */ |
384 | writeb(0, port->membase + MCFUART_UIMR); | 386 | writeb(0, port->membase + MCFUART_UIMR); |
@@ -424,7 +426,7 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser) | |||
424 | /* | 426 | /* |
425 | * Define the basic serial functions we support. | 427 | * Define the basic serial functions we support. |
426 | */ | 428 | */ |
427 | static struct uart_ops mcf_uart_ops = { | 429 | static const struct uart_ops mcf_uart_ops = { |
428 | .tx_empty = mcf_tx_empty, | 430 | .tx_empty = mcf_tx_empty, |
429 | .get_mctrl = mcf_get_mctrl, | 431 | .get_mctrl = mcf_get_mctrl, |
430 | .set_mctrl = mcf_set_mctrl, | 432 | .set_mctrl = mcf_set_mctrl, |
@@ -443,7 +445,7 @@ static struct uart_ops mcf_uart_ops = { | |||
443 | .verify_port = mcf_verify_port, | 445 | .verify_port = mcf_verify_port, |
444 | }; | 446 | }; |
445 | 447 | ||
446 | static struct mcf_uart mcf_ports[3]; | 448 | static struct mcf_uart mcf_ports[4]; |
447 | 449 | ||
448 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) | 450 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) |
449 | 451 | ||
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 175d202ab37e..8cfa5b12ea7a 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -105,6 +105,10 @@ struct serial_cfg_mem { | |||
105 | * manfid 0x0160, 0x0104 | 105 | * manfid 0x0160, 0x0104 |
106 | * This card appears to have a 14.7456MHz clock. | 106 | * This card appears to have a 14.7456MHz clock. |
107 | */ | 107 | */ |
108 | /* Generic Modem: MD55x (GPRS/EDGE) have | ||
109 | * Elan VPU16551 UART with 14.7456MHz oscillator | ||
110 | * manfid 0x015D, 0x4C45 | ||
111 | */ | ||
108 | static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) | 112 | static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) |
109 | { | 113 | { |
110 | port->uartclk = 14745600; | 114 | port->uartclk = 14745600; |
@@ -196,6 +200,11 @@ static const struct serial_quirk quirks[] = { | |||
196 | .multi = -1, | 200 | .multi = -1, |
197 | .setup = quirk_setup_brainboxes_0104, | 201 | .setup = quirk_setup_brainboxes_0104, |
198 | }, { | 202 | }, { |
203 | .manfid = 0x015D, | ||
204 | .prodid = 0x4C45, | ||
205 | .multi = -1, | ||
206 | .setup = quirk_setup_brainboxes_0104, | ||
207 | }, { | ||
199 | .manfid = MANFID_IBM, | 208 | .manfid = MANFID_IBM, |
200 | .prodid = ~0, | 209 | .prodid = ~0, |
201 | .multi = -1, | 210 | .multi = -1, |
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c index a67c622869d2..e2c44ec6fc45 100644 --- a/drivers/staging/dt3155/dt3155_drv.c +++ b/drivers/staging/dt3155/dt3155_drv.c | |||
@@ -57,19 +57,8 @@ MA 02111-1307 USA | |||
57 | 57 | ||
58 | extern void printques(int); | 58 | extern void printques(int); |
59 | 59 | ||
60 | #ifdef MODULE | ||
61 | #include <linux/module.h> | 60 | #include <linux/module.h> |
62 | #include <linux/interrupt.h> | 61 | #include <linux/interrupt.h> |
63 | |||
64 | |||
65 | MODULE_LICENSE("GPL"); | ||
66 | |||
67 | #endif | ||
68 | |||
69 | #ifndef CONFIG_PCI | ||
70 | #error "DT3155 : Kernel PCI support not enabled (DT3155 drive requires PCI)" | ||
71 | #endif | ||
72 | |||
73 | #include <linux/pci.h> | 62 | #include <linux/pci.h> |
74 | #include <linux/types.h> | 63 | #include <linux/types.h> |
75 | #include <linux/poll.h> | 64 | #include <linux/poll.h> |
@@ -84,6 +73,9 @@ MODULE_LICENSE("GPL"); | |||
84 | #include "dt3155_io.h" | 73 | #include "dt3155_io.h" |
85 | #include "allocator.h" | 74 | #include "allocator.h" |
86 | 75 | ||
76 | |||
77 | MODULE_LICENSE("GPL"); | ||
78 | |||
87 | /* Error variable. Zero means no error. */ | 79 | /* Error variable. Zero means no error. */ |
88 | int dt3155_errno = 0; | 80 | int dt3155_errno = 0; |
89 | 81 | ||
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 6a3b5cae3a6e..2f3dc4cdf79b 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -301,7 +301,7 @@ static int usb_probe_interface(struct device *dev) | |||
301 | 301 | ||
302 | intf->condition = USB_INTERFACE_BINDING; | 302 | intf->condition = USB_INTERFACE_BINDING; |
303 | 303 | ||
304 | /* Bound interfaces are initially active. They are | 304 | /* Probed interfaces are initially active. They are |
305 | * runtime-PM-enabled only if the driver has autosuspend support. | 305 | * runtime-PM-enabled only if the driver has autosuspend support. |
306 | * They are sensitive to their children's power states. | 306 | * They are sensitive to their children's power states. |
307 | */ | 307 | */ |
@@ -437,11 +437,11 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
437 | 437 | ||
438 | iface->condition = USB_INTERFACE_BOUND; | 438 | iface->condition = USB_INTERFACE_BOUND; |
439 | 439 | ||
440 | /* Bound interfaces are initially active. They are | 440 | /* Claimed interfaces are initially inactive (suspended). They are |
441 | * runtime-PM-enabled only if the driver has autosuspend support. | 441 | * runtime-PM-enabled only if the driver has autosuspend support. |
442 | * They are sensitive to their children's power states. | 442 | * They are sensitive to their children's power states. |
443 | */ | 443 | */ |
444 | pm_runtime_set_active(dev); | 444 | pm_runtime_set_suspended(dev); |
445 | pm_suspend_ignore_children(dev, false); | 445 | pm_suspend_ignore_children(dev, false); |
446 | if (driver->supports_autosuspend) | 446 | if (driver->supports_autosuspend) |
447 | pm_runtime_enable(dev); | 447 | pm_runtime_enable(dev); |
@@ -1170,7 +1170,7 @@ done: | |||
1170 | static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | 1170 | static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) |
1171 | { | 1171 | { |
1172 | int status = 0; | 1172 | int status = 0; |
1173 | int i = 0; | 1173 | int i = 0, n = 0; |
1174 | struct usb_interface *intf; | 1174 | struct usb_interface *intf; |
1175 | 1175 | ||
1176 | if (udev->state == USB_STATE_NOTATTACHED || | 1176 | if (udev->state == USB_STATE_NOTATTACHED || |
@@ -1179,7 +1179,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1179 | 1179 | ||
1180 | /* Suspend all the interfaces and then udev itself */ | 1180 | /* Suspend all the interfaces and then udev itself */ |
1181 | if (udev->actconfig) { | 1181 | if (udev->actconfig) { |
1182 | for (; i < udev->actconfig->desc.bNumInterfaces; i++) { | 1182 | n = udev->actconfig->desc.bNumInterfaces; |
1183 | for (i = n - 1; i >= 0; --i) { | ||
1183 | intf = udev->actconfig->interface[i]; | 1184 | intf = udev->actconfig->interface[i]; |
1184 | status = usb_suspend_interface(udev, intf, msg); | 1185 | status = usb_suspend_interface(udev, intf, msg); |
1185 | if (status != 0) | 1186 | if (status != 0) |
@@ -1192,7 +1193,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1192 | /* If the suspend failed, resume interfaces that did get suspended */ | 1193 | /* If the suspend failed, resume interfaces that did get suspended */ |
1193 | if (status != 0) { | 1194 | if (status != 0) { |
1194 | msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); | 1195 | msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); |
1195 | while (--i >= 0) { | 1196 | while (++i < n) { |
1196 | intf = udev->actconfig->interface[i]; | 1197 | intf = udev->actconfig->interface[i]; |
1197 | usb_resume_interface(udev, intf, msg, 0); | 1198 | usb_resume_interface(udev, intf, msg, 0); |
1198 | } | 1199 | } |
@@ -1263,13 +1264,47 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) | |||
1263 | return status; | 1264 | return status; |
1264 | } | 1265 | } |
1265 | 1266 | ||
1267 | static void choose_wakeup(struct usb_device *udev, pm_message_t msg) | ||
1268 | { | ||
1269 | int w, i; | ||
1270 | struct usb_interface *intf; | ||
1271 | |||
1272 | /* Remote wakeup is needed only when we actually go to sleep. | ||
1273 | * For things like FREEZE and QUIESCE, if the device is already | ||
1274 | * autosuspended then its current wakeup setting is okay. | ||
1275 | */ | ||
1276 | if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) { | ||
1277 | if (udev->state != USB_STATE_SUSPENDED) | ||
1278 | udev->do_remote_wakeup = 0; | ||
1279 | return; | ||
1280 | } | ||
1281 | |||
1282 | /* If remote wakeup is permitted, see whether any interface drivers | ||
1283 | * actually want it. | ||
1284 | */ | ||
1285 | w = 0; | ||
1286 | if (device_may_wakeup(&udev->dev) && udev->actconfig) { | ||
1287 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { | ||
1288 | intf = udev->actconfig->interface[i]; | ||
1289 | w |= intf->needs_remote_wakeup; | ||
1290 | } | ||
1291 | } | ||
1292 | |||
1293 | /* If the device is autosuspended with the wrong wakeup setting, | ||
1294 | * autoresume now so the setting can be changed. | ||
1295 | */ | ||
1296 | if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup) | ||
1297 | pm_runtime_resume(&udev->dev); | ||
1298 | udev->do_remote_wakeup = w; | ||
1299 | } | ||
1300 | |||
1266 | /* The device lock is held by the PM core */ | 1301 | /* The device lock is held by the PM core */ |
1267 | int usb_suspend(struct device *dev, pm_message_t msg) | 1302 | int usb_suspend(struct device *dev, pm_message_t msg) |
1268 | { | 1303 | { |
1269 | struct usb_device *udev = to_usb_device(dev); | 1304 | struct usb_device *udev = to_usb_device(dev); |
1270 | 1305 | ||
1271 | do_unbind_rebind(udev, DO_UNBIND); | 1306 | do_unbind_rebind(udev, DO_UNBIND); |
1272 | udev->do_remote_wakeup = device_may_wakeup(&udev->dev); | 1307 | choose_wakeup(udev, msg); |
1273 | return usb_suspend_both(udev, msg); | 1308 | return usb_suspend_both(udev, msg); |
1274 | } | 1309 | } |
1275 | 1310 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 207e7a85aeb0..13ead00aecd5 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd) | |||
543 | */ | 543 | */ |
544 | ehci->periodic_size = DEFAULT_I_TDPS; | 544 | ehci->periodic_size = DEFAULT_I_TDPS; |
545 | INIT_LIST_HEAD(&ehci->cached_itd_list); | 545 | INIT_LIST_HEAD(&ehci->cached_itd_list); |
546 | INIT_LIST_HEAD(&ehci->cached_sitd_list); | ||
546 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) | 547 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) |
547 | return retval; | 548 | return retval; |
548 | 549 | ||
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 19372673bf09..c7178bcde67a 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -801,7 +801,7 @@ static int ehci_hub_control ( | |||
801 | * this bit; seems too long to spin routinely... | 801 | * this bit; seems too long to spin routinely... |
802 | */ | 802 | */ |
803 | retval = handshake(ehci, status_reg, | 803 | retval = handshake(ehci, status_reg, |
804 | PORT_RESET, 0, 750); | 804 | PORT_RESET, 0, 1000); |
805 | if (retval != 0) { | 805 | if (retval != 0) { |
806 | ehci_err (ehci, "port %d reset error %d\n", | 806 | ehci_err (ehci, "port %d reset error %d\n", |
807 | wIndex + 1, retval); | 807 | wIndex + 1, retval); |
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index aeda96e0af67..1f3f01eacaf0 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c | |||
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh) | |||
136 | 136 | ||
137 | static void ehci_mem_cleanup (struct ehci_hcd *ehci) | 137 | static void ehci_mem_cleanup (struct ehci_hcd *ehci) |
138 | { | 138 | { |
139 | free_cached_itd_list(ehci); | 139 | free_cached_lists(ehci); |
140 | if (ehci->async) | 140 | if (ehci->async) |
141 | qh_put (ehci->async); | 141 | qh_put (ehci->async); |
142 | ehci->async = NULL; | 142 | ehci->async = NULL; |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index a67a0030dd57..40a858335035 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
@@ -629,11 +629,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
629 | } | 629 | } |
630 | snprintf(supply, sizeof(supply), "hsusb%d", i); | 630 | snprintf(supply, sizeof(supply), "hsusb%d", i); |
631 | omap->regulator[i] = regulator_get(omap->dev, supply); | 631 | omap->regulator[i] = regulator_get(omap->dev, supply); |
632 | if (IS_ERR(omap->regulator[i])) | 632 | if (IS_ERR(omap->regulator[i])) { |
633 | omap->regulator[i] = NULL; | ||
633 | dev_dbg(&pdev->dev, | 634 | dev_dbg(&pdev->dev, |
634 | "failed to get ehci port%d regulator\n", i); | 635 | "failed to get ehci port%d regulator\n", i); |
635 | else | 636 | } else { |
636 | regulator_enable(omap->regulator[i]); | 637 | regulator_enable(omap->regulator[i]); |
638 | } | ||
637 | } | 639 | } |
638 | 640 | ||
639 | ret = omap_start_ehc(omap, hcd); | 641 | ret = omap_start_ehc(omap, hcd); |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a0aaaaff2560..805ec633a652 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -510,7 +510,7 @@ static int disable_periodic (struct ehci_hcd *ehci) | |||
510 | ehci_writel(ehci, cmd, &ehci->regs->command); | 510 | ehci_writel(ehci, cmd, &ehci->regs->command); |
511 | /* posted write ... */ | 511 | /* posted write ... */ |
512 | 512 | ||
513 | free_cached_itd_list(ehci); | 513 | free_cached_lists(ehci); |
514 | 514 | ||
515 | ehci->next_uframe = -1; | 515 | ehci->next_uframe = -1; |
516 | return 0; | 516 | return 0; |
@@ -2139,13 +2139,27 @@ sitd_complete ( | |||
2139 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); | 2139 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); |
2140 | } | 2140 | } |
2141 | iso_stream_put (ehci, stream); | 2141 | iso_stream_put (ehci, stream); |
2142 | /* OK to recycle this SITD now that its completion callback ran. */ | 2142 | |
2143 | done: | 2143 | done: |
2144 | sitd->urb = NULL; | 2144 | sitd->urb = NULL; |
2145 | sitd->stream = NULL; | 2145 | if (ehci->clock_frame != sitd->frame) { |
2146 | list_move(&sitd->sitd_list, &stream->free_list); | 2146 | /* OK to recycle this SITD now. */ |
2147 | iso_stream_put(ehci, stream); | 2147 | sitd->stream = NULL; |
2148 | 2148 | list_move(&sitd->sitd_list, &stream->free_list); | |
2149 | iso_stream_put(ehci, stream); | ||
2150 | } else { | ||
2151 | /* HW might remember this SITD, so we can't recycle it yet. | ||
2152 | * Move it to a safe place until a new frame starts. | ||
2153 | */ | ||
2154 | list_move(&sitd->sitd_list, &ehci->cached_sitd_list); | ||
2155 | if (stream->refcount == 2) { | ||
2156 | /* If iso_stream_put() were called here, stream | ||
2157 | * would be freed. Instead, just prevent reuse. | ||
2158 | */ | ||
2159 | stream->ep->hcpriv = NULL; | ||
2160 | stream->ep = NULL; | ||
2161 | } | ||
2162 | } | ||
2149 | return retval; | 2163 | return retval; |
2150 | } | 2164 | } |
2151 | 2165 | ||
@@ -2211,9 +2225,10 @@ done: | |||
2211 | 2225 | ||
2212 | /*-------------------------------------------------------------------------*/ | 2226 | /*-------------------------------------------------------------------------*/ |
2213 | 2227 | ||
2214 | static void free_cached_itd_list(struct ehci_hcd *ehci) | 2228 | static void free_cached_lists(struct ehci_hcd *ehci) |
2215 | { | 2229 | { |
2216 | struct ehci_itd *itd, *n; | 2230 | struct ehci_itd *itd, *n; |
2231 | struct ehci_sitd *sitd, *sn; | ||
2217 | 2232 | ||
2218 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { | 2233 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { |
2219 | struct ehci_iso_stream *stream = itd->stream; | 2234 | struct ehci_iso_stream *stream = itd->stream; |
@@ -2221,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci) | |||
2221 | list_move(&itd->itd_list, &stream->free_list); | 2236 | list_move(&itd->itd_list, &stream->free_list); |
2222 | iso_stream_put(ehci, stream); | 2237 | iso_stream_put(ehci, stream); |
2223 | } | 2238 | } |
2239 | |||
2240 | list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { | ||
2241 | struct ehci_iso_stream *stream = sitd->stream; | ||
2242 | sitd->stream = NULL; | ||
2243 | list_move(&sitd->sitd_list, &stream->free_list); | ||
2244 | iso_stream_put(ehci, stream); | ||
2245 | } | ||
2224 | } | 2246 | } |
2225 | 2247 | ||
2226 | /*-------------------------------------------------------------------------*/ | 2248 | /*-------------------------------------------------------------------------*/ |
@@ -2247,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci) | |||
2247 | clock_frame = -1; | 2269 | clock_frame = -1; |
2248 | } | 2270 | } |
2249 | if (ehci->clock_frame != clock_frame) { | 2271 | if (ehci->clock_frame != clock_frame) { |
2250 | free_cached_itd_list(ehci); | 2272 | free_cached_lists(ehci); |
2251 | ehci->clock_frame = clock_frame; | 2273 | ehci->clock_frame = clock_frame; |
2252 | } | 2274 | } |
2253 | clock %= mod; | 2275 | clock %= mod; |
@@ -2414,7 +2436,7 @@ restart: | |||
2414 | clock = now; | 2436 | clock = now; |
2415 | clock_frame = clock >> 3; | 2437 | clock_frame = clock >> 3; |
2416 | if (ehci->clock_frame != clock_frame) { | 2438 | if (ehci->clock_frame != clock_frame) { |
2417 | free_cached_itd_list(ehci); | 2439 | free_cached_lists(ehci); |
2418 | ehci->clock_frame = clock_frame; | 2440 | ehci->clock_frame = clock_frame; |
2419 | } | 2441 | } |
2420 | } else { | 2442 | } else { |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index b1dce96dd621..556c0b48f3ab 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */ | |||
87 | int next_uframe; /* scan periodic, start here */ | 87 | int next_uframe; /* scan periodic, start here */ |
88 | unsigned periodic_sched; /* periodic activity count */ | 88 | unsigned periodic_sched; /* periodic activity count */ |
89 | 89 | ||
90 | /* list of itds completed while clock_frame was still active */ | 90 | /* list of itds & sitds completed while clock_frame was still active */ |
91 | struct list_head cached_itd_list; | 91 | struct list_head cached_itd_list; |
92 | struct list_head cached_sitd_list; | ||
92 | unsigned clock_frame; | 93 | unsigned clock_frame; |
93 | 94 | ||
94 | /* per root hub port */ | 95 | /* per root hub port */ |
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action) | |||
195 | clear_bit (action, &ehci->actions); | 196 | clear_bit (action, &ehci->actions); |
196 | } | 197 | } |
197 | 198 | ||
198 | static void free_cached_itd_list(struct ehci_hcd *ehci); | 199 | static void free_cached_lists(struct ehci_hcd *ehci); |
199 | 200 | ||
200 | /*-------------------------------------------------------------------------*/ | 201 | /*-------------------------------------------------------------------------*/ |
201 | 202 | ||
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 4aa08d36d077..d22fb4d577b7 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." | 23 | #error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG) | 26 | #define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG) |
27 | 27 | ||
28 | static struct clk *usb11_clk; | 28 | static struct clk *usb11_clk; |
29 | static struct clk *usb20_clk; | 29 | static struct clk *usb20_clk; |
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index a9555cb901a1..de8ef945b536 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c | |||
@@ -49,6 +49,7 @@ struct usb_sevsegdev { | |||
49 | u16 textlength; | 49 | u16 textlength; |
50 | 50 | ||
51 | u8 shadow_power; /* for PM */ | 51 | u8 shadow_power; /* for PM */ |
52 | u8 has_interface_pm; | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | /* sysfs_streq can't replace this completely | 55 | /* sysfs_streq can't replace this completely |
@@ -68,12 +69,16 @@ static void update_display_powered(struct usb_sevsegdev *mydev) | |||
68 | { | 69 | { |
69 | int rc; | 70 | int rc; |
70 | 71 | ||
71 | if (!mydev->shadow_power && mydev->powered) { | 72 | if (mydev->powered && !mydev->has_interface_pm) { |
72 | rc = usb_autopm_get_interface(mydev->intf); | 73 | rc = usb_autopm_get_interface(mydev->intf); |
73 | if (rc < 0) | 74 | if (rc < 0) |
74 | return; | 75 | return; |
76 | mydev->has_interface_pm = 1; | ||
75 | } | 77 | } |
76 | 78 | ||
79 | if (mydev->shadow_power != 1) | ||
80 | return; | ||
81 | |||
77 | rc = usb_control_msg(mydev->udev, | 82 | rc = usb_control_msg(mydev->udev, |
78 | usb_sndctrlpipe(mydev->udev, 0), | 83 | usb_sndctrlpipe(mydev->udev, 0), |
79 | 0x12, | 84 | 0x12, |
@@ -86,8 +91,10 @@ static void update_display_powered(struct usb_sevsegdev *mydev) | |||
86 | if (rc < 0) | 91 | if (rc < 0) |
87 | dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); | 92 | dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); |
88 | 93 | ||
89 | if (mydev->shadow_power && !mydev->powered) | 94 | if (!mydev->powered && mydev->has_interface_pm) { |
90 | usb_autopm_put_interface(mydev->intf); | 95 | usb_autopm_put_interface(mydev->intf); |
96 | mydev->has_interface_pm = 0; | ||
97 | } | ||
91 | } | 98 | } |
92 | 99 | ||
93 | static void update_display_mode(struct usb_sevsegdev *mydev) | 100 | static void update_display_mode(struct usb_sevsegdev *mydev) |
@@ -351,6 +358,10 @@ static int sevseg_probe(struct usb_interface *interface, | |||
351 | mydev->intf = interface; | 358 | mydev->intf = interface; |
352 | usb_set_intfdata(interface, mydev); | 359 | usb_set_intfdata(interface, mydev); |
353 | 360 | ||
361 | /* PM */ | ||
362 | mydev->shadow_power = 1; /* currently active */ | ||
363 | mydev->has_interface_pm = 0; /* have not issued autopm_get */ | ||
364 | |||
354 | /*set defaults */ | 365 | /*set defaults */ |
355 | mydev->textmode = 0x02; /* ascii mode */ | 366 | mydev->textmode = 0x02; /* ascii mode */ |
356 | mydev->mode_msb = 0x06; /* 6 characters */ | 367 | mydev->mode_msb = 0x06; /* 6 characters */ |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 73d5f346d3e0..c97a0bb5b6db 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -97,6 +97,7 @@ static const struct usb_device_id id_table[] = { | |||
97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, | 97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, |
98 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, | 98 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, |
99 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, | 99 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, |
100 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, | ||
100 | { } /* Terminating entry */ | 101 | { } /* Terminating entry */ |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index d640dc951568..a352d5f3a59c 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -134,3 +134,7 @@ | |||
134 | /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ | 134 | /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ |
135 | #define SANWA_VENDOR_ID 0x11ad | 135 | #define SANWA_VENDOR_ID 0x11ad |
136 | #define SANWA_PRODUCT_ID 0x0001 | 136 | #define SANWA_PRODUCT_ID 0x0001 |
137 | |||
138 | /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ | ||
139 | #define ADLINK_VENDOR_ID 0x0b63 | ||
140 | #define ADLINK_ND6530_PRODUCT_ID 0x6530 | ||
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 0b9362061713..7e3bea23600b 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c | |||
@@ -42,6 +42,14 @@ | |||
42 | #define CMOTECH_PRODUCT_CDU550 0x5553 | 42 | #define CMOTECH_PRODUCT_CDU550 0x5553 |
43 | #define CMOTECH_PRODUCT_CDX650 0x6512 | 43 | #define CMOTECH_PRODUCT_CDX650 0x6512 |
44 | 44 | ||
45 | /* LG devices */ | ||
46 | #define LG_VENDOR_ID 0x1004 | ||
47 | #define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */ | ||
48 | |||
49 | /* Sanyo devices */ | ||
50 | #define SANYO_VENDOR_ID 0x0474 | ||
51 | #define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */ | ||
52 | |||
45 | static struct usb_device_id id_table[] = { | 53 | static struct usb_device_id id_table[] = { |
46 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, | 54 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, |
47 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, | 55 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, |
@@ -51,6 +59,8 @@ static struct usb_device_id id_table[] = { | |||
51 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, | 59 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, |
52 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, | 60 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, |
53 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, | 61 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, |
62 | { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, | ||
63 | { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, | ||
54 | { }, | 64 | { }, |
55 | }; | 65 | }; |
56 | MODULE_DEVICE_TABLE(usb, id_table); | 66 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 9202f94505e6..ef0bdb08d788 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -230,6 +230,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = { | |||
230 | static const struct usb_device_id id_table[] = { | 230 | static const struct usb_device_id id_table[] = { |
231 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ | 231 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ |
232 | { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ | 232 | { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ |
233 | { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */ | ||
233 | { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ | 234 | { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ |
234 | 235 | ||
235 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ | 236 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 0afe5c71c17e..880e990abb07 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -172,7 +172,7 @@ static unsigned int product_5052_count; | |||
172 | /* the array dimension is the number of default entries plus */ | 172 | /* the array dimension is the number of default entries plus */ |
173 | /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ | 173 | /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ |
174 | /* null entry */ | 174 | /* null entry */ |
175 | static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { | 175 | static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { |
176 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, | 176 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, |
177 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, | 177 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, |
178 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, | 178 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, |
@@ -180,6 +180,9 @@ static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { | |||
180 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, | 180 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, |
181 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, | 181 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, |
182 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, | 182 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, |
183 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, | ||
184 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, | ||
185 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, | ||
183 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, | 186 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, |
184 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, | 187 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, |
185 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, | 188 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, |
@@ -192,7 +195,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { | |||
192 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, | 195 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, |
193 | }; | 196 | }; |
194 | 197 | ||
195 | static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { | 198 | static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = { |
196 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, | 199 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, |
197 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, | 200 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, |
198 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, | 201 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, |
@@ -200,6 +203,9 @@ static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] | |||
200 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, | 203 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, |
201 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, | 204 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, |
202 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, | 205 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, |
206 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, | ||
207 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, | ||
208 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, | ||
203 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, | 209 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, |
204 | { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, | 210 | { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, |
205 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, | 211 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, |
@@ -287,6 +293,8 @@ MODULE_FIRMWARE("ti_5052.fw"); | |||
287 | MODULE_FIRMWARE("mts_cdma.fw"); | 293 | MODULE_FIRMWARE("mts_cdma.fw"); |
288 | MODULE_FIRMWARE("mts_gsm.fw"); | 294 | MODULE_FIRMWARE("mts_gsm.fw"); |
289 | MODULE_FIRMWARE("mts_edge.fw"); | 295 | MODULE_FIRMWARE("mts_edge.fw"); |
296 | MODULE_FIRMWARE("mts_mt9234mu.fw"); | ||
297 | MODULE_FIRMWARE("mts_mt9234zba.fw"); | ||
290 | 298 | ||
291 | module_param(debug, bool, S_IRUGO | S_IWUSR); | 299 | module_param(debug, bool, S_IRUGO | S_IWUSR); |
292 | MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); | 300 | MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); |
@@ -1687,6 +1695,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1687 | const struct firmware *fw_p; | 1695 | const struct firmware *fw_p; |
1688 | char buf[32]; | 1696 | char buf[32]; |
1689 | 1697 | ||
1698 | dbg("%s\n", __func__); | ||
1690 | /* try ID specific firmware first, then try generic firmware */ | 1699 | /* try ID specific firmware first, then try generic firmware */ |
1691 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, | 1700 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, |
1692 | dev->descriptor.idProduct); | 1701 | dev->descriptor.idProduct); |
@@ -1703,7 +1712,15 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1703 | case MTS_EDGE_PRODUCT_ID: | 1712 | case MTS_EDGE_PRODUCT_ID: |
1704 | strcpy(buf, "mts_edge.fw"); | 1713 | strcpy(buf, "mts_edge.fw"); |
1705 | break; | 1714 | break; |
1706 | } | 1715 | case MTS_MT9234MU_PRODUCT_ID: |
1716 | strcpy(buf, "mts_mt9234mu.fw"); | ||
1717 | break; | ||
1718 | case MTS_MT9234ZBA_PRODUCT_ID: | ||
1719 | strcpy(buf, "mts_mt9234zba.fw"); | ||
1720 | break; | ||
1721 | case MTS_MT9234ZBAOLD_PRODUCT_ID: | ||
1722 | strcpy(buf, "mts_mt9234zba.fw"); | ||
1723 | break; } | ||
1707 | } | 1724 | } |
1708 | if (buf[0] == '\0') { | 1725 | if (buf[0] == '\0') { |
1709 | if (tdev->td_is_3410) | 1726 | if (tdev->td_is_3410) |
@@ -1718,7 +1735,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1718 | return -ENOENT; | 1735 | return -ENOENT; |
1719 | } | 1736 | } |
1720 | if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { | 1737 | if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { |
1721 | dev_err(&dev->dev, "%s - firmware too large\n", __func__); | 1738 | dev_err(&dev->dev, "%s - firmware too large %d \n", __func__, fw_p->size); |
1722 | return -ENOENT; | 1739 | return -ENOENT; |
1723 | } | 1740 | } |
1724 | 1741 | ||
@@ -1730,6 +1747,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1730 | status = ti_do_download(dev, pipe, buffer, fw_p->size); | 1747 | status = ti_do_download(dev, pipe, buffer, fw_p->size); |
1731 | kfree(buffer); | 1748 | kfree(buffer); |
1732 | } else { | 1749 | } else { |
1750 | dbg("%s ENOMEM\n", __func__); | ||
1733 | status = -ENOMEM; | 1751 | status = -ENOMEM; |
1734 | } | 1752 | } |
1735 | release_firmware(fw_p); | 1753 | release_firmware(fw_p); |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index f323c6025858..2aac1953993b 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h | |||
@@ -45,6 +45,9 @@ | |||
45 | #define MTS_CDMA_PRODUCT_ID 0xF110 | 45 | #define MTS_CDMA_PRODUCT_ID 0xF110 |
46 | #define MTS_GSM_PRODUCT_ID 0xF111 | 46 | #define MTS_GSM_PRODUCT_ID 0xF111 |
47 | #define MTS_EDGE_PRODUCT_ID 0xF112 | 47 | #define MTS_EDGE_PRODUCT_ID 0xF112 |
48 | #define MTS_MT9234MU_PRODUCT_ID 0xF114 | ||
49 | #define MTS_MT9234ZBA_PRODUCT_ID 0xF115 | ||
50 | #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319 | ||
48 | 51 | ||
49 | /* Commands */ | 52 | /* Commands */ |
50 | #define TI_GET_VERSION 0x01 | 53 | #define TI_GET_VERSION 0x01 |
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index 46e79d349498..7ec24e46b34b 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c | |||
@@ -438,7 +438,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) | |||
438 | old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); | 438 | old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); |
439 | keep_alives = 0; | 439 | keep_alives = 0; |
440 | for (cnt = 0; | 440 | for (cnt = 0; |
441 | keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; | 441 | keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max; |
442 | cnt++) { | 442 | cnt++) { |
443 | unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); | 443 | unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); |
444 | 444 | ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5be11c99e18f..e69d238c5af0 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, | |||
236 | int log_all) | 236 | int log_all) |
237 | { | 237 | { |
238 | int i; | 238 | int i; |
239 | |||
240 | if (!mem) | ||
241 | return 0; | ||
242 | |||
239 | for (i = 0; i < mem->nregions; ++i) { | 243 | for (i = 0; i < mem->nregions; ++i) { |
240 | struct vhost_memory_region *m = mem->regions + i; | 244 | struct vhost_memory_region *m = mem->regions + i; |
241 | unsigned long a = m->userspace_addr; | 245 | unsigned long a = m->userspace_addr; |
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 581d2dbf675a..ecf405562f5c 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
@@ -49,6 +49,7 @@ enum { | |||
49 | M_MBP_2, /* MacBook Pro 2nd gen */ | 49 | M_MBP_2, /* MacBook Pro 2nd gen */ |
50 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ | 50 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ |
51 | M_MBP_4, /* MacBook Pro, 4th gen */ | 51 | M_MBP_4, /* MacBook Pro, 4th gen */ |
52 | M_MBP_5_1, /* MacBook Pro, 5,1th gen */ | ||
52 | M_UNKNOWN /* placeholder */ | 53 | M_UNKNOWN /* placeholder */ |
53 | }; | 54 | }; |
54 | 55 | ||
@@ -70,6 +71,7 @@ static struct efifb_dmi_info { | |||
70 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ | 71 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ |
71 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, | 72 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, |
72 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, | 73 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, |
74 | [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, | ||
73 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } | 75 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } |
74 | }; | 76 | }; |
75 | 77 | ||
@@ -106,6 +108,7 @@ static struct dmi_system_id __initdata dmi_system_table[] = { | |||
106 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), | 108 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), |
107 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), | 109 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), |
108 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), | 110 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), |
111 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), | ||
109 | {}, | 112 | {}, |
110 | }; | 113 | }; |
111 | 114 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 3aed38886f94..bfec7c29486d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -103,7 +103,8 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | |||
103 | num = min(num, ARRAY_SIZE(vb->pfns)); | 103 | num = min(num, ARRAY_SIZE(vb->pfns)); |
104 | 104 | ||
105 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { | 105 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { |
106 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY); | 106 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | |
107 | __GFP_NOMEMALLOC | __GFP_NOWARN); | ||
107 | if (!page) { | 108 | if (!page) { |
108 | if (printk_ratelimit()) | 109 | if (printk_ratelimit()) |
109 | dev_printk(KERN_INFO, &vb->vdev->dev, | 110 | dev_printk(KERN_INFO, &vb->vdev->dev, |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index ef36fca2eed4..3a7e9ff8a746 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/sched.h> | ||
19 | 20 | ||
20 | #include <asm/irq.h> | 21 | #include <asm/irq.h> |
21 | #include <mach/hardware.h> | 22 | #include <mach/hardware.h> |
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 1ed3d554e372..17726a05a0a6 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c | |||
@@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = { | |||
115 | 115 | ||
116 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) | 116 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) |
117 | { | 117 | { |
118 | int t = ((s16)rom[1] << 8) | rom[0]; | 118 | s16 t = le16_to_cpup((__le16 *)rom); |
119 | t = t*1000/16; | 119 | return t*1000/16; |
120 | return t; | ||
121 | } | 120 | } |
122 | 121 | ||
123 | static inline int w1_DS18S20_convert_temp(u8 rom[9]) | 122 | static inline int w1_DS18S20_convert_temp(u8 rom[9]) |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0e8468ffd100..0bf5020d0d32 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -194,10 +194,10 @@ config EP93XX_WATCHDOG | |||
194 | 194 | ||
195 | config OMAP_WATCHDOG | 195 | config OMAP_WATCHDOG |
196 | tristate "OMAP Watchdog" | 196 | tristate "OMAP Watchdog" |
197 | depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3 | 197 | depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS |
198 | help | 198 | help |
199 | Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y' | 199 | Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y' |
200 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer. | 200 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. |
201 | 201 | ||
202 | config PNX4008_WATCHDOG | 202 | config PNX4008_WATCHDOG |
203 | tristate "PNX4008 Watchdog" | 203 | tristate "PNX4008 Watchdog" |
@@ -302,7 +302,7 @@ config TS72XX_WATCHDOG | |||
302 | 302 | ||
303 | config MAX63XX_WATCHDOG | 303 | config MAX63XX_WATCHDOG |
304 | tristate "Max63xx watchdog" | 304 | tristate "Max63xx watchdog" |
305 | depends on ARM | 305 | depends on ARM && HAS_IOMEM |
306 | help | 306 | help |
307 | Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. | 307 | Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. |
308 | 308 | ||
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 8b724aad6825..801ead191499 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c | |||
@@ -44,7 +44,7 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT; | |||
44 | 44 | ||
45 | #ifdef CONFIG_FSL_BOOKE | 45 | #ifdef CONFIG_FSL_BOOKE |
46 | #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) | 46 | #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) |
47 | #define WDTP_MASK (WDTP(0)) | 47 | #define WDTP_MASK (WDTP(0x3f)) |
48 | #else | 48 | #else |
49 | #define WDTP(x) (TCR_WP(x)) | 49 | #define WDTP(x) (TCR_WP(x)) |
50 | #define WDTP_MASK (TCR_WP_MASK) | 50 | #define WDTP_MASK (TCR_WP_MASK) |
@@ -121,7 +121,7 @@ static ssize_t booke_wdt_write(struct file *file, const char __user *buf, | |||
121 | return count; | 121 | return count; |
122 | } | 122 | } |
123 | 123 | ||
124 | static const struct watchdog_info ident = { | 124 | static struct watchdog_info ident = { |
125 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, | 125 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, |
126 | .identity = "PowerPC Book-E Watchdog", | 126 | .identity = "PowerPC Book-E Watchdog", |
127 | }; | 127 | }; |
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 75f3a83c0361..3053ff05ca41 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c | |||
@@ -154,9 +154,14 @@ static void max63xx_wdt_enable(struct max63xx_timeout *entry) | |||
154 | 154 | ||
155 | static void max63xx_wdt_disable(void) | 155 | static void max63xx_wdt_disable(void) |
156 | { | 156 | { |
157 | u8 val; | ||
158 | |||
157 | spin_lock(&io_lock); | 159 | spin_lock(&io_lock); |
158 | 160 | ||
159 | __raw_writeb(3, wdt_base); | 161 | val = __raw_readb(wdt_base); |
162 | val &= ~MAX6369_WDSET; | ||
163 | val |= 3; | ||
164 | __raw_writeb(val, wdt_base); | ||
160 | 165 | ||
161 | spin_unlock(&io_lock); | 166 | spin_unlock(&io_lock); |
162 | 167 | ||
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index c8eadd478175..88c83aa57303 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c | |||
@@ -67,8 +67,8 @@ static DEFINE_SPINLOCK(sbwd_lock); | |||
67 | void sbwdog_set(char __iomem *wdog, unsigned long t) | 67 | void sbwdog_set(char __iomem *wdog, unsigned long t) |
68 | { | 68 | { |
69 | spin_lock(&sbwd_lock); | 69 | spin_lock(&sbwd_lock); |
70 | __raw_writeb(0, wdog - 0x10); | 70 | __raw_writeb(0, wdog); |
71 | __raw_writeq(t & 0x7fffffUL, wdog); | 71 | __raw_writeq(t & 0x7fffffUL, wdog - 0x10); |
72 | spin_unlock(&sbwd_lock); | 72 | spin_unlock(&sbwd_lock); |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 8d44c9b6fb5b..c7d67e9a7465 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c | |||
@@ -30,7 +30,7 @@ | |||
30 | static int nowayout = WATCHDOG_NOWAYOUT; | 30 | static int nowayout = WATCHDOG_NOWAYOUT; |
31 | static unsigned int margin = 60; /* (secs) Default is 1 minute */ | 31 | static unsigned int margin = 60; /* (secs) Default is 1 minute */ |
32 | static unsigned long wdt_status; | 32 | static unsigned long wdt_status; |
33 | static DEFINE_SPINLOCK(wdt_lock); | 33 | static DEFINE_MUTEX(wdt_lock); |
34 | 34 | ||
35 | #define WDT_IN_USE 0 | 35 | #define WDT_IN_USE 0 |
36 | #define WDT_OK_TO_CLOSE 1 | 36 | #define WDT_OK_TO_CLOSE 1 |
@@ -45,26 +45,26 @@ static DEFINE_SPINLOCK(wdt_lock); | |||
45 | 45 | ||
46 | static void wdt_send_data(unsigned char command, unsigned char data) | 46 | static void wdt_send_data(unsigned char command, unsigned char data) |
47 | { | 47 | { |
48 | outb(command, COMMAND_PORT); | ||
49 | msleep(100); | ||
50 | outb(data, DATA_PORT); | 48 | outb(data, DATA_PORT); |
51 | msleep(200); | 49 | msleep(200); |
50 | outb(command, COMMAND_PORT); | ||
51 | msleep(100); | ||
52 | } | 52 | } |
53 | 53 | ||
54 | static void wdt_enable(void) | 54 | static void wdt_enable(void) |
55 | { | 55 | { |
56 | spin_lock(&wdt_lock); | 56 | mutex_lock(&wdt_lock); |
57 | wdt_send_data(IFACE_ON_COMMAND, 1); | 57 | wdt_send_data(IFACE_ON_COMMAND, 1); |
58 | wdt_send_data(REBOOT_COMMAND, margin); | 58 | wdt_send_data(REBOOT_COMMAND, margin); |
59 | spin_unlock(&wdt_lock); | 59 | mutex_unlock(&wdt_lock); |
60 | } | 60 | } |
61 | 61 | ||
62 | static void wdt_disable(void) | 62 | static void wdt_disable(void) |
63 | { | 63 | { |
64 | spin_lock(&wdt_lock); | 64 | mutex_lock(&wdt_lock); |
65 | wdt_send_data(IFACE_ON_COMMAND, 0); | 65 | wdt_send_data(IFACE_ON_COMMAND, 0); |
66 | wdt_send_data(REBOOT_COMMAND, 0); | 66 | wdt_send_data(REBOOT_COMMAND, 0); |
67 | spin_unlock(&wdt_lock); | 67 | mutex_unlock(&wdt_lock); |
68 | } | 68 | } |
69 | 69 | ||
70 | static int fitpc2_wdt_open(struct inode *inode, struct file *file) | 70 | static int fitpc2_wdt_open(struct inode *inode, struct file *file) |
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 5c5bc8480070..f8b86e92cd66 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c | |||
@@ -238,6 +238,13 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, | |||
238 | return ERR_PTR(-ENOMEM); | 238 | return ERR_PTR(-ENOMEM); |
239 | } | 239 | } |
240 | 240 | ||
241 | rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); | ||
242 | if (rc) { | ||
243 | __putname(v9ses->aname); | ||
244 | __putname(v9ses->uname); | ||
245 | return ERR_PTR(rc); | ||
246 | } | ||
247 | |||
241 | spin_lock(&v9fs_sessionlist_lock); | 248 | spin_lock(&v9fs_sessionlist_lock); |
242 | list_add(&v9ses->slist, &v9fs_sessionlist); | 249 | list_add(&v9ses->slist, &v9fs_sessionlist); |
243 | spin_unlock(&v9fs_sessionlist_lock); | 250 | spin_unlock(&v9fs_sessionlist_lock); |
@@ -301,6 +308,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, | |||
301 | return fid; | 308 | return fid; |
302 | 309 | ||
303 | error: | 310 | error: |
311 | bdi_destroy(&v9ses->bdi); | ||
304 | return ERR_PTR(retval); | 312 | return ERR_PTR(retval); |
305 | } | 313 | } |
306 | 314 | ||
@@ -326,6 +334,8 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) | |||
326 | __putname(v9ses->uname); | 334 | __putname(v9ses->uname); |
327 | __putname(v9ses->aname); | 335 | __putname(v9ses->aname); |
328 | 336 | ||
337 | bdi_destroy(&v9ses->bdi); | ||
338 | |||
329 | spin_lock(&v9fs_sessionlist_lock); | 339 | spin_lock(&v9fs_sessionlist_lock); |
330 | list_del(&v9ses->slist); | 340 | list_del(&v9ses->slist); |
331 | spin_unlock(&v9fs_sessionlist_lock); | 341 | spin_unlock(&v9fs_sessionlist_lock); |
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index a0a8d3dd1361..bec4d0bcb458 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h | |||
@@ -20,6 +20,7 @@ | |||
20 | * Boston, MA 02111-1301 USA | 20 | * Boston, MA 02111-1301 USA |
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | #include <linux/backing-dev.h> | ||
23 | 24 | ||
24 | /** | 25 | /** |
25 | * enum p9_session_flags - option flags for each 9P session | 26 | * enum p9_session_flags - option flags for each 9P session |
@@ -102,6 +103,7 @@ struct v9fs_session_info { | |||
102 | u32 uid; /* if ACCESS_SINGLE, the uid that has access */ | 103 | u32 uid; /* if ACCESS_SINGLE, the uid that has access */ |
103 | struct p9_client *clnt; /* 9p client */ | 104 | struct p9_client *clnt; /* 9p client */ |
104 | struct list_head slist; /* list of sessions registered with v9fs */ | 105 | struct list_head slist; /* list of sessions registered with v9fs */ |
106 | struct backing_dev_info bdi; | ||
105 | }; | 107 | }; |
106 | 108 | ||
107 | struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, | 109 | struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, |
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 491108bd6e0d..806da5d3b3a0 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c | |||
@@ -77,6 +77,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, | |||
77 | sb->s_blocksize = 1 << sb->s_blocksize_bits; | 77 | sb->s_blocksize = 1 << sb->s_blocksize_bits; |
78 | sb->s_magic = V9FS_MAGIC; | 78 | sb->s_magic = V9FS_MAGIC; |
79 | sb->s_op = &v9fs_super_ops; | 79 | sb->s_op = &v9fs_super_ops; |
80 | sb->s_bdi = &v9ses->bdi; | ||
80 | 81 | ||
81 | sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC | | 82 | sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC | |
82 | MS_NOATIME; | 83 | MS_NOATIME; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index c54dad4e6063..a10f2582844f 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/fscache.h> | 21 | #include <linux/fscache.h> |
22 | #include <linux/backing-dev.h> | ||
22 | 23 | ||
23 | #include "afs.h" | 24 | #include "afs.h" |
24 | #include "afs_vl.h" | 25 | #include "afs_vl.h" |
@@ -313,6 +314,7 @@ struct afs_volume { | |||
313 | unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */ | 314 | unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */ |
314 | struct afs_server *servers[8]; /* servers on which volume resides (ordered) */ | 315 | struct afs_server *servers[8]; /* servers on which volume resides (ordered) */ |
315 | struct rw_semaphore server_sem; /* lock for accessing current server */ | 316 | struct rw_semaphore server_sem; /* lock for accessing current server */ |
317 | struct backing_dev_info bdi; | ||
316 | }; | 318 | }; |
317 | 319 | ||
318 | /* | 320 | /* |
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 5e813a816ce4..b3feddc4f7d6 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c | |||
@@ -138,9 +138,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
138 | { | 138 | { |
139 | struct afs_super_info *super; | 139 | struct afs_super_info *super; |
140 | struct vfsmount *mnt; | 140 | struct vfsmount *mnt; |
141 | struct page *page = NULL; | 141 | struct page *page; |
142 | size_t size; | 142 | size_t size; |
143 | char *buf, *devname = NULL, *options = NULL; | 143 | char *buf, *devname, *options; |
144 | int ret; | 144 | int ret; |
145 | 145 | ||
146 | _enter("{%s}", mntpt->d_name.name); | 146 | _enter("{%s}", mntpt->d_name.name); |
@@ -150,22 +150,22 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
150 | ret = -EINVAL; | 150 | ret = -EINVAL; |
151 | size = mntpt->d_inode->i_size; | 151 | size = mntpt->d_inode->i_size; |
152 | if (size > PAGE_SIZE - 1) | 152 | if (size > PAGE_SIZE - 1) |
153 | goto error; | 153 | goto error_no_devname; |
154 | 154 | ||
155 | ret = -ENOMEM; | 155 | ret = -ENOMEM; |
156 | devname = (char *) get_zeroed_page(GFP_KERNEL); | 156 | devname = (char *) get_zeroed_page(GFP_KERNEL); |
157 | if (!devname) | 157 | if (!devname) |
158 | goto error; | 158 | goto error_no_devname; |
159 | 159 | ||
160 | options = (char *) get_zeroed_page(GFP_KERNEL); | 160 | options = (char *) get_zeroed_page(GFP_KERNEL); |
161 | if (!options) | 161 | if (!options) |
162 | goto error; | 162 | goto error_no_options; |
163 | 163 | ||
164 | /* read the contents of the AFS special symlink */ | 164 | /* read the contents of the AFS special symlink */ |
165 | page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); | 165 | page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); |
166 | if (IS_ERR(page)) { | 166 | if (IS_ERR(page)) { |
167 | ret = PTR_ERR(page); | 167 | ret = PTR_ERR(page); |
168 | goto error; | 168 | goto error_no_page; |
169 | } | 169 | } |
170 | 170 | ||
171 | ret = -EIO; | 171 | ret = -EIO; |
@@ -196,12 +196,12 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
196 | return mnt; | 196 | return mnt; |
197 | 197 | ||
198 | error: | 198 | error: |
199 | if (page) | 199 | page_cache_release(page); |
200 | page_cache_release(page); | 200 | error_no_page: |
201 | if (devname) | 201 | free_page((unsigned long) options); |
202 | free_page((unsigned long) devname); | 202 | error_no_options: |
203 | if (options) | 203 | free_page((unsigned long) devname); |
204 | free_page((unsigned long) options); | 204 | error_no_devname: |
205 | _leave(" = %d", ret); | 205 | _leave(" = %d", ret); |
206 | return ERR_PTR(ret); | 206 | return ERR_PTR(ret); |
207 | } | 207 | } |
diff --git a/fs/afs/super.c b/fs/afs/super.c index 14f6431598ad..e932e5a3a0c1 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c | |||
@@ -311,6 +311,7 @@ static int afs_fill_super(struct super_block *sb, void *data) | |||
311 | sb->s_magic = AFS_FS_MAGIC; | 311 | sb->s_magic = AFS_FS_MAGIC; |
312 | sb->s_op = &afs_super_ops; | 312 | sb->s_op = &afs_super_ops; |
313 | sb->s_fs_info = as; | 313 | sb->s_fs_info = as; |
314 | sb->s_bdi = &as->volume->bdi; | ||
314 | 315 | ||
315 | /* allocate the root inode and dentry */ | 316 | /* allocate the root inode and dentry */ |
316 | fid.vid = as->volume->vid; | 317 | fid.vid = as->volume->vid; |
diff --git a/fs/afs/volume.c b/fs/afs/volume.c index a353e69e2391..401eeb21869f 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c | |||
@@ -106,6 +106,10 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params) | |||
106 | volume->cell = params->cell; | 106 | volume->cell = params->cell; |
107 | volume->vid = vlocation->vldb.vid[params->type]; | 107 | volume->vid = vlocation->vldb.vid[params->type]; |
108 | 108 | ||
109 | ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY); | ||
110 | if (ret) | ||
111 | goto error_bdi; | ||
112 | |||
109 | init_rwsem(&volume->server_sem); | 113 | init_rwsem(&volume->server_sem); |
110 | 114 | ||
111 | /* look up all the applicable server records */ | 115 | /* look up all the applicable server records */ |
@@ -151,6 +155,8 @@ error: | |||
151 | return ERR_PTR(ret); | 155 | return ERR_PTR(ret); |
152 | 156 | ||
153 | error_discard: | 157 | error_discard: |
158 | bdi_destroy(&volume->bdi); | ||
159 | error_bdi: | ||
154 | up_write(¶ms->cell->vl_sem); | 160 | up_write(¶ms->cell->vl_sem); |
155 | 161 | ||
156 | for (loop = volume->nservers - 1; loop >= 0; loop--) | 162 | for (loop = volume->nservers - 1; loop >= 0; loop--) |
@@ -200,6 +206,7 @@ void afs_put_volume(struct afs_volume *volume) | |||
200 | for (loop = volume->nservers - 1; loop >= 0; loop--) | 206 | for (loop = volume->nservers - 1; loop >= 0; loop--) |
201 | afs_put_server(volume->servers[loop]); | 207 | afs_put_server(volume->servers[loop]); |
202 | 208 | ||
209 | bdi_destroy(&volume->bdi); | ||
203 | kfree(volume); | 210 | kfree(volume); |
204 | 211 | ||
205 | _leave(" [destroyed]"); | 212 | _leave(" [destroyed]"); |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 7ab23e006e4c..2c5f9a0e5d72 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -1005,15 +1005,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux( | |||
1005 | } | 1005 | } |
1006 | } else if (!mm->start_data) { | 1006 | } else if (!mm->start_data) { |
1007 | mm->start_data = seg->addr; | 1007 | mm->start_data = seg->addr; |
1008 | #ifndef CONFIG_MMU | ||
1009 | mm->end_data = seg->addr + phdr->p_memsz; | 1008 | mm->end_data = seg->addr + phdr->p_memsz; |
1010 | #endif | ||
1011 | } | 1009 | } |
1012 | |||
1013 | #ifdef CONFIG_MMU | ||
1014 | if (seg->addr + phdr->p_memsz > mm->end_data) | ||
1015 | mm->end_data = seg->addr + phdr->p_memsz; | ||
1016 | #endif | ||
1017 | } | 1010 | } |
1018 | 1011 | ||
1019 | seg++; | 1012 | seg++; |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index e0e769bdca59..49566c1687d8 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp) | |||
355 | 355 | ||
356 | if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { | 356 | if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { |
357 | printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", | 357 | printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", |
358 | (int) r,(int)(start_brk-start_code),(int)text_len); | 358 | (int) r,(int)(start_brk-start_data+text_len),(int)text_len); |
359 | goto failed; | 359 | goto failed; |
360 | } | 360 | } |
361 | 361 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index dd769304382e..55dcb7884f4d 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -406,17 +406,23 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin) | |||
406 | 406 | ||
407 | int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync) | 407 | int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync) |
408 | { | 408 | { |
409 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | 409 | struct inode *bd_inode = filp->f_mapping->host; |
410 | struct block_device *bdev = I_BDEV(bd_inode); | ||
410 | int error; | 411 | int error; |
411 | 412 | ||
412 | error = sync_blockdev(bdev); | 413 | /* |
413 | if (error) | 414 | * There is no need to serialise calls to blkdev_issue_flush with |
414 | return error; | 415 | * i_mutex and doing so causes performance issues with concurrent |
415 | 416 | * O_SYNC writers to a block device. | |
416 | error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, | 417 | */ |
417 | (BLKDEV_IFL_WAIT)); | 418 | mutex_unlock(&bd_inode->i_mutex); |
419 | |||
420 | error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT); | ||
418 | if (error == -EOPNOTSUPP) | 421 | if (error == -EOPNOTSUPP) |
419 | error = 0; | 422 | error = 0; |
423 | |||
424 | mutex_lock(&bd_inode->i_mutex); | ||
425 | |||
420 | return error; | 426 | return error; |
421 | } | 427 | } |
422 | EXPORT_SYMBOL(blkdev_fsync); | 428 | EXPORT_SYMBOL(blkdev_fsync); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e7b8f2c89ccb..feca04197d02 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -44,8 +44,6 @@ static struct extent_io_ops btree_extent_io_ops; | |||
44 | static void end_workqueue_fn(struct btrfs_work *work); | 44 | static void end_workqueue_fn(struct btrfs_work *work); |
45 | static void free_fs_root(struct btrfs_root *root); | 45 | static void free_fs_root(struct btrfs_root *root); |
46 | 46 | ||
47 | static atomic_t btrfs_bdi_num = ATOMIC_INIT(0); | ||
48 | |||
49 | /* | 47 | /* |
50 | * end_io_wq structs are used to do processing in task context when an IO is | 48 | * end_io_wq structs are used to do processing in task context when an IO is |
51 | * complete. This is used during reads to verify checksums, and it is used | 49 | * complete. This is used during reads to verify checksums, and it is used |
@@ -1375,19 +1373,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) | |||
1375 | { | 1373 | { |
1376 | int err; | 1374 | int err; |
1377 | 1375 | ||
1378 | bdi->name = "btrfs"; | ||
1379 | bdi->capabilities = BDI_CAP_MAP_COPY; | 1376 | bdi->capabilities = BDI_CAP_MAP_COPY; |
1380 | err = bdi_init(bdi); | 1377 | err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY); |
1381 | if (err) | 1378 | if (err) |
1382 | return err; | 1379 | return err; |
1383 | 1380 | ||
1384 | err = bdi_register(bdi, NULL, "btrfs-%d", | ||
1385 | atomic_inc_return(&btrfs_bdi_num)); | ||
1386 | if (err) { | ||
1387 | bdi_destroy(bdi); | ||
1388 | return err; | ||
1389 | } | ||
1390 | |||
1391 | bdi->ra_pages = default_backing_dev_info.ra_pages; | 1381 | bdi->ra_pages = default_backing_dev_info.ra_pages; |
1392 | bdi->unplug_io_fn = btrfs_unplug_io_fn; | 1382 | bdi->unplug_io_fn = btrfs_unplug_io_fn; |
1393 | bdi->unplug_io_data = info; | 1383 | bdi->unplug_io_data = info; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index aa3cd7cc3e40..412593703d1e 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -337,16 +337,15 @@ out: | |||
337 | /* | 337 | /* |
338 | * Get ref for the oldest snapc for an inode with dirty data... that is, the | 338 | * Get ref for the oldest snapc for an inode with dirty data... that is, the |
339 | * only snap context we are allowed to write back. | 339 | * only snap context we are allowed to write back. |
340 | * | ||
341 | * Caller holds i_lock. | ||
342 | */ | 340 | */ |
343 | static struct ceph_snap_context *__get_oldest_context(struct inode *inode, | 341 | static struct ceph_snap_context *get_oldest_context(struct inode *inode, |
344 | u64 *snap_size) | 342 | u64 *snap_size) |
345 | { | 343 | { |
346 | struct ceph_inode_info *ci = ceph_inode(inode); | 344 | struct ceph_inode_info *ci = ceph_inode(inode); |
347 | struct ceph_snap_context *snapc = NULL; | 345 | struct ceph_snap_context *snapc = NULL; |
348 | struct ceph_cap_snap *capsnap = NULL; | 346 | struct ceph_cap_snap *capsnap = NULL; |
349 | 347 | ||
348 | spin_lock(&inode->i_lock); | ||
350 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { | 349 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { |
351 | dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, | 350 | dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, |
352 | capsnap->context, capsnap->dirty_pages); | 351 | capsnap->context, capsnap->dirty_pages); |
@@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode, | |||
357 | break; | 356 | break; |
358 | } | 357 | } |
359 | } | 358 | } |
360 | if (!snapc && ci->i_snap_realm) { | 359 | if (!snapc && ci->i_head_snapc) { |
361 | snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); | 360 | snapc = ceph_get_snap_context(ci->i_head_snapc); |
362 | dout(" head snapc %p has %d dirty pages\n", | 361 | dout(" head snapc %p has %d dirty pages\n", |
363 | snapc, ci->i_wrbuffer_ref_head); | 362 | snapc, ci->i_wrbuffer_ref_head); |
364 | } | 363 | } |
365 | return snapc; | ||
366 | } | ||
367 | |||
368 | static struct ceph_snap_context *get_oldest_context(struct inode *inode, | ||
369 | u64 *snap_size) | ||
370 | { | ||
371 | struct ceph_snap_context *snapc = NULL; | ||
372 | |||
373 | spin_lock(&inode->i_lock); | ||
374 | snapc = __get_oldest_context(inode, snap_size); | ||
375 | spin_unlock(&inode->i_lock); | 364 | spin_unlock(&inode->i_lock); |
376 | return snapc; | 365 | return snapc; |
377 | } | 366 | } |
@@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
392 | int len = PAGE_CACHE_SIZE; | 381 | int len = PAGE_CACHE_SIZE; |
393 | loff_t i_size; | 382 | loff_t i_size; |
394 | int err = 0; | 383 | int err = 0; |
395 | struct ceph_snap_context *snapc; | 384 | struct ceph_snap_context *snapc, *oldest; |
396 | u64 snap_size = 0; | 385 | u64 snap_size = 0; |
397 | long writeback_stat; | 386 | long writeback_stat; |
398 | 387 | ||
@@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
413 | dout("writepage %p page %p not dirty?\n", inode, page); | 402 | dout("writepage %p page %p not dirty?\n", inode, page); |
414 | goto out; | 403 | goto out; |
415 | } | 404 | } |
416 | if (snapc != get_oldest_context(inode, &snap_size)) { | 405 | oldest = get_oldest_context(inode, &snap_size); |
406 | if (snapc->seq > oldest->seq) { | ||
417 | dout("writepage %p page %p snapc %p not writeable - noop\n", | 407 | dout("writepage %p page %p snapc %p not writeable - noop\n", |
418 | inode, page, (void *)page->private); | 408 | inode, page, (void *)page->private); |
419 | /* we should only noop if called by kswapd */ | 409 | /* we should only noop if called by kswapd */ |
420 | WARN_ON((current->flags & PF_MEMALLOC) == 0); | 410 | WARN_ON((current->flags & PF_MEMALLOC) == 0); |
411 | ceph_put_snap_context(oldest); | ||
421 | goto out; | 412 | goto out; |
422 | } | 413 | } |
414 | ceph_put_snap_context(oldest); | ||
423 | 415 | ||
424 | /* is this a partial page at end of file? */ | 416 | /* is this a partial page at end of file? */ |
425 | if (snap_size) | 417 | if (snap_size) |
@@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
458 | ClearPagePrivate(page); | 450 | ClearPagePrivate(page); |
459 | end_page_writeback(page); | 451 | end_page_writeback(page); |
460 | ceph_put_wrbuffer_cap_refs(ci, 1, snapc); | 452 | ceph_put_wrbuffer_cap_refs(ci, 1, snapc); |
461 | ceph_put_snap_context(snapc); | 453 | ceph_put_snap_context(snapc); /* page's reference */ |
462 | out: | 454 | out: |
463 | return err; | 455 | return err; |
464 | } | 456 | } |
@@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
558 | dout("inode %p skipping page %p\n", inode, page); | 550 | dout("inode %p skipping page %p\n", inode, page); |
559 | wbc->pages_skipped++; | 551 | wbc->pages_skipped++; |
560 | } | 552 | } |
553 | ceph_put_snap_context((void *)page->private); | ||
561 | page->private = 0; | 554 | page->private = 0; |
562 | ClearPagePrivate(page); | 555 | ClearPagePrivate(page); |
563 | ceph_put_snap_context(snapc); | ||
564 | dout("unlocking %d %p\n", i, page); | 556 | dout("unlocking %d %p\n", i, page); |
565 | end_page_writeback(page); | 557 | end_page_writeback(page); |
566 | 558 | ||
@@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping, | |||
618 | int range_whole = 0; | 610 | int range_whole = 0; |
619 | int should_loop = 1; | 611 | int should_loop = 1; |
620 | pgoff_t max_pages = 0, max_pages_ever = 0; | 612 | pgoff_t max_pages = 0, max_pages_ever = 0; |
621 | struct ceph_snap_context *snapc = NULL, *last_snapc = NULL; | 613 | struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; |
622 | struct pagevec pvec; | 614 | struct pagevec pvec; |
623 | int done = 0; | 615 | int done = 0; |
624 | int rc = 0; | 616 | int rc = 0; |
@@ -770,9 +762,10 @@ get_more_pages: | |||
770 | } | 762 | } |
771 | 763 | ||
772 | /* only if matching snap context */ | 764 | /* only if matching snap context */ |
773 | if (snapc != (void *)page->private) { | 765 | pgsnapc = (void *)page->private; |
774 | dout("page snapc %p != oldest %p\n", | 766 | if (pgsnapc->seq > snapc->seq) { |
775 | (void *)page->private, snapc); | 767 | dout("page snapc %p %lld > oldest %p %lld\n", |
768 | pgsnapc, pgsnapc->seq, snapc, snapc->seq); | ||
776 | unlock_page(page); | 769 | unlock_page(page); |
777 | if (!locked_pages) | 770 | if (!locked_pages) |
778 | continue; /* keep looking for snap */ | 771 | continue; /* keep looking for snap */ |
@@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode, | |||
914 | struct ceph_snap_context *snapc) | 907 | struct ceph_snap_context *snapc) |
915 | { | 908 | { |
916 | struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); | 909 | struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); |
917 | return !oldest || snapc->seq <= oldest->seq; | 910 | int ret = !oldest || snapc->seq <= oldest->seq; |
911 | |||
912 | ceph_put_snap_context(oldest); | ||
913 | return ret; | ||
918 | } | 914 | } |
919 | 915 | ||
920 | /* | 916 | /* |
@@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file, | |||
936 | int pos_in_page = pos & ~PAGE_CACHE_MASK; | 932 | int pos_in_page = pos & ~PAGE_CACHE_MASK; |
937 | int end_in_page = pos_in_page + len; | 933 | int end_in_page = pos_in_page + len; |
938 | loff_t i_size; | 934 | loff_t i_size; |
939 | struct ceph_snap_context *snapc; | ||
940 | int r; | 935 | int r; |
936 | struct ceph_snap_context *snapc, *oldest; | ||
941 | 937 | ||
942 | retry_locked: | 938 | retry_locked: |
943 | /* writepages currently holds page lock, but if we change that later, */ | 939 | /* writepages currently holds page lock, but if we change that later, */ |
@@ -947,23 +943,24 @@ retry_locked: | |||
947 | BUG_ON(!ci->i_snap_realm); | 943 | BUG_ON(!ci->i_snap_realm); |
948 | down_read(&mdsc->snap_rwsem); | 944 | down_read(&mdsc->snap_rwsem); |
949 | BUG_ON(!ci->i_snap_realm->cached_context); | 945 | BUG_ON(!ci->i_snap_realm->cached_context); |
950 | if (page->private && | 946 | snapc = (void *)page->private; |
951 | (void *)page->private != ci->i_snap_realm->cached_context) { | 947 | if (snapc && snapc != ci->i_head_snapc) { |
952 | /* | 948 | /* |
953 | * this page is already dirty in another (older) snap | 949 | * this page is already dirty in another (older) snap |
954 | * context! is it writeable now? | 950 | * context! is it writeable now? |
955 | */ | 951 | */ |
956 | snapc = get_oldest_context(inode, NULL); | 952 | oldest = get_oldest_context(inode, NULL); |
957 | up_read(&mdsc->snap_rwsem); | 953 | up_read(&mdsc->snap_rwsem); |
958 | 954 | ||
959 | if (snapc != (void *)page->private) { | 955 | if (snapc->seq > oldest->seq) { |
956 | ceph_put_snap_context(oldest); | ||
960 | dout(" page %p snapc %p not current or oldest\n", | 957 | dout(" page %p snapc %p not current or oldest\n", |
961 | page, (void *)page->private); | 958 | page, snapc); |
962 | /* | 959 | /* |
963 | * queue for writeback, and wait for snapc to | 960 | * queue for writeback, and wait for snapc to |
964 | * be writeable or written | 961 | * be writeable or written |
965 | */ | 962 | */ |
966 | snapc = ceph_get_snap_context((void *)page->private); | 963 | snapc = ceph_get_snap_context(snapc); |
967 | unlock_page(page); | 964 | unlock_page(page); |
968 | ceph_queue_writeback(inode); | 965 | ceph_queue_writeback(inode); |
969 | r = wait_event_interruptible(ci->i_cap_wq, | 966 | r = wait_event_interruptible(ci->i_cap_wq, |
@@ -973,6 +970,7 @@ retry_locked: | |||
973 | return r; | 970 | return r; |
974 | return -EAGAIN; | 971 | return -EAGAIN; |
975 | } | 972 | } |
973 | ceph_put_snap_context(oldest); | ||
976 | 974 | ||
977 | /* yay, writeable, do it now (without dropping page lock) */ | 975 | /* yay, writeable, do it now (without dropping page lock) */ |
978 | dout(" page %p snapc %p not current, but oldest\n", | 976 | dout(" page %p snapc %p not current, but oldest\n", |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 3710e077a857..aa2239fa9a3b 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -1205,6 +1205,12 @@ retry: | |||
1205 | if (capsnap->dirty_pages || capsnap->writing) | 1205 | if (capsnap->dirty_pages || capsnap->writing) |
1206 | continue; | 1206 | continue; |
1207 | 1207 | ||
1208 | /* | ||
1209 | * if cap writeback already occurred, we should have dropped | ||
1210 | * the capsnap in ceph_put_wrbuffer_cap_refs. | ||
1211 | */ | ||
1212 | BUG_ON(capsnap->dirty == 0); | ||
1213 | |||
1208 | /* pick mds, take s_mutex */ | 1214 | /* pick mds, take s_mutex */ |
1209 | mds = __ceph_get_cap_mds(ci, &mseq); | 1215 | mds = __ceph_get_cap_mds(ci, &mseq); |
1210 | if (session && session->s_mds != mds) { | 1216 | if (session && session->s_mds != mds) { |
@@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) | |||
2118 | } | 2124 | } |
2119 | spin_unlock(&inode->i_lock); | 2125 | spin_unlock(&inode->i_lock); |
2120 | 2126 | ||
2121 | dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had), | 2127 | dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), |
2122 | last ? "last" : ""); | 2128 | last ? " last" : "", put ? " put" : ""); |
2123 | 2129 | ||
2124 | if (last && !flushsnaps) | 2130 | if (last && !flushsnaps) |
2125 | ceph_check_caps(ci, 0, NULL); | 2131 | ceph_check_caps(ci, 0, NULL); |
@@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
2143 | { | 2149 | { |
2144 | struct inode *inode = &ci->vfs_inode; | 2150 | struct inode *inode = &ci->vfs_inode; |
2145 | int last = 0; | 2151 | int last = 0; |
2146 | int last_snap = 0; | 2152 | int complete_capsnap = 0; |
2153 | int drop_capsnap = 0; | ||
2147 | int found = 0; | 2154 | int found = 0; |
2148 | struct ceph_cap_snap *capsnap = NULL; | 2155 | struct ceph_cap_snap *capsnap = NULL; |
2149 | 2156 | ||
@@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
2166 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { | 2173 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { |
2167 | if (capsnap->context == snapc) { | 2174 | if (capsnap->context == snapc) { |
2168 | found = 1; | 2175 | found = 1; |
2169 | capsnap->dirty_pages -= nr; | ||
2170 | last_snap = !capsnap->dirty_pages; | ||
2171 | break; | 2176 | break; |
2172 | } | 2177 | } |
2173 | } | 2178 | } |
2174 | BUG_ON(!found); | 2179 | BUG_ON(!found); |
2180 | capsnap->dirty_pages -= nr; | ||
2181 | if (capsnap->dirty_pages == 0) { | ||
2182 | complete_capsnap = 1; | ||
2183 | if (capsnap->dirty == 0) | ||
2184 | /* cap writeback completed before we created | ||
2185 | * the cap_snap; no FLUSHSNAP is needed */ | ||
2186 | drop_capsnap = 1; | ||
2187 | } | ||
2175 | dout("put_wrbuffer_cap_refs on %p cap_snap %p " | 2188 | dout("put_wrbuffer_cap_refs on %p cap_snap %p " |
2176 | " snap %lld %d/%d -> %d/%d %s%s\n", | 2189 | " snap %lld %d/%d -> %d/%d %s%s%s\n", |
2177 | inode, capsnap, capsnap->context->seq, | 2190 | inode, capsnap, capsnap->context->seq, |
2178 | ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, | 2191 | ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, |
2179 | ci->i_wrbuffer_ref, capsnap->dirty_pages, | 2192 | ci->i_wrbuffer_ref, capsnap->dirty_pages, |
2180 | last ? " (wrbuffer last)" : "", | 2193 | last ? " (wrbuffer last)" : "", |
2181 | last_snap ? " (capsnap last)" : ""); | 2194 | complete_capsnap ? " (complete capsnap)" : "", |
2195 | drop_capsnap ? " (drop capsnap)" : ""); | ||
2196 | if (drop_capsnap) { | ||
2197 | ceph_put_snap_context(capsnap->context); | ||
2198 | list_del(&capsnap->ci_item); | ||
2199 | list_del(&capsnap->flushing_item); | ||
2200 | ceph_put_cap_snap(capsnap); | ||
2201 | } | ||
2182 | } | 2202 | } |
2183 | 2203 | ||
2184 | spin_unlock(&inode->i_lock); | 2204 | spin_unlock(&inode->i_lock); |
@@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
2186 | if (last) { | 2206 | if (last) { |
2187 | ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); | 2207 | ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); |
2188 | iput(inode); | 2208 | iput(inode); |
2189 | } else if (last_snap) { | 2209 | } else if (complete_capsnap) { |
2190 | ceph_flush_snaps(ci); | 2210 | ceph_flush_snaps(ci); |
2191 | wake_up(&ci->i_cap_wq); | 2211 | wake_up(&ci->i_cap_wq); |
2192 | } | 2212 | } |
2213 | if (drop_capsnap) | ||
2214 | iput(inode); | ||
2193 | } | 2215 | } |
2194 | 2216 | ||
2195 | /* | 2217 | /* |
@@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, | |||
2465 | break; | 2487 | break; |
2466 | } | 2488 | } |
2467 | WARN_ON(capsnap->dirty_pages || capsnap->writing); | 2489 | WARN_ON(capsnap->dirty_pages || capsnap->writing); |
2468 | dout(" removing cap_snap %p follows %lld\n", | 2490 | dout(" removing %p cap_snap %p follows %lld\n", |
2469 | capsnap, follows); | 2491 | inode, capsnap, follows); |
2470 | ceph_put_snap_context(capsnap->context); | 2492 | ceph_put_snap_context(capsnap->context); |
2471 | list_del(&capsnap->ci_item); | 2493 | list_del(&capsnap->ci_item); |
2472 | list_del(&capsnap->flushing_item); | 2494 | list_del(&capsnap->flushing_item); |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 7261dc6c2ead..ea8ee2e526aa 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -171,11 +171,11 @@ more: | |||
171 | spin_lock(&inode->i_lock); | 171 | spin_lock(&inode->i_lock); |
172 | spin_lock(&dcache_lock); | 172 | spin_lock(&dcache_lock); |
173 | 173 | ||
174 | last = dentry; | ||
175 | |||
174 | if (err < 0) | 176 | if (err < 0) |
175 | goto out_unlock; | 177 | goto out_unlock; |
176 | 178 | ||
177 | last = dentry; | ||
178 | |||
179 | p = p->prev; | 179 | p = p->prev; |
180 | filp->f_pos++; | 180 | filp->f_pos++; |
181 | 181 | ||
@@ -312,7 +312,7 @@ more: | |||
312 | req->r_readdir_offset = fi->next_offset; | 312 | req->r_readdir_offset = fi->next_offset; |
313 | req->r_args.readdir.frag = cpu_to_le32(frag); | 313 | req->r_args.readdir.frag = cpu_to_le32(frag); |
314 | req->r_args.readdir.max_entries = cpu_to_le32(max_entries); | 314 | req->r_args.readdir.max_entries = cpu_to_le32(max_entries); |
315 | req->r_num_caps = max_entries; | 315 | req->r_num_caps = max_entries + 1; |
316 | err = ceph_mdsc_do_request(mdsc, NULL, req); | 316 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
317 | if (err < 0) { | 317 | if (err < 0) { |
318 | ceph_mdsc_put_request(req); | 318 | ceph_mdsc_put_request(req); |
@@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, | |||
489 | struct inode *inode = ceph_get_snapdir(parent); | 489 | struct inode *inode = ceph_get_snapdir(parent); |
490 | dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", | 490 | dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", |
491 | dentry, dentry->d_name.len, dentry->d_name.name, inode); | 491 | dentry, dentry->d_name.len, dentry->d_name.name, inode); |
492 | BUG_ON(!d_unhashed(dentry)); | ||
492 | d_add(dentry, inode); | 493 | d_add(dentry, inode); |
493 | err = 0; | 494 | err = 0; |
494 | } | 495 | } |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index aca82d55cc53..26f883c275e8 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
886 | struct inode *in = NULL; | 886 | struct inode *in = NULL; |
887 | struct ceph_mds_reply_inode *ininfo; | 887 | struct ceph_mds_reply_inode *ininfo; |
888 | struct ceph_vino vino; | 888 | struct ceph_vino vino; |
889 | struct ceph_client *client = ceph_sb_to_client(sb); | ||
889 | int i = 0; | 890 | int i = 0; |
890 | int err = 0; | 891 | int err = 0; |
891 | 892 | ||
@@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
949 | return err; | 950 | return err; |
950 | } | 951 | } |
951 | 952 | ||
952 | if (rinfo->head->is_dentry && !req->r_aborted) { | 953 | /* |
954 | * ignore null lease/binding on snapdir ENOENT, or else we | ||
955 | * will have trouble splicing in the virtual snapdir later | ||
956 | */ | ||
957 | if (rinfo->head->is_dentry && !req->r_aborted && | ||
958 | (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, | ||
959 | client->mount_args->snapdir_name, | ||
960 | req->r_dentry->d_name.len))) { | ||
953 | /* | 961 | /* |
954 | * lookup link rename : null -> possibly existing inode | 962 | * lookup link rename : null -> possibly existing inode |
955 | * mknod symlink mkdir : null -> new inode | 963 | * mknod symlink mkdir : null -> new inode |
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 8f1715ffbe4b..cdaaa131add3 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c | |||
@@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG; | |||
30 | static char tag_ack = CEPH_MSGR_TAG_ACK; | 30 | static char tag_ack = CEPH_MSGR_TAG_ACK; |
31 | static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; | 31 | static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; |
32 | 32 | ||
33 | #ifdef CONFIG_LOCKDEP | ||
34 | static struct lock_class_key socket_class; | ||
35 | #endif | ||
36 | |||
33 | 37 | ||
34 | static void queue_con(struct ceph_connection *con); | 38 | static void queue_con(struct ceph_connection *con); |
35 | static void con_work(struct work_struct *); | 39 | static void con_work(struct work_struct *); |
@@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con) | |||
228 | con->sock = sock; | 232 | con->sock = sock; |
229 | sock->sk->sk_allocation = GFP_NOFS; | 233 | sock->sk->sk_allocation = GFP_NOFS; |
230 | 234 | ||
235 | #ifdef CONFIG_LOCKDEP | ||
236 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); | ||
237 | #endif | ||
238 | |||
231 | set_sock_callbacks(sock, con); | 239 | set_sock_callbacks(sock, con); |
232 | 240 | ||
233 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); | 241 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); |
@@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con) | |||
333 | con->out_msg = NULL; | 341 | con->out_msg = NULL; |
334 | } | 342 | } |
335 | con->in_seq = 0; | 343 | con->in_seq = 0; |
344 | con->in_seq_acked = 0; | ||
336 | } | 345 | } |
337 | 346 | ||
338 | /* | 347 | /* |
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 21c6623c4b07..2e2c15eed82a 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c | |||
@@ -314,71 +314,6 @@ bad: | |||
314 | return ERR_PTR(err); | 314 | return ERR_PTR(err); |
315 | } | 315 | } |
316 | 316 | ||
317 | |||
318 | /* | ||
319 | * osd map | ||
320 | */ | ||
321 | void ceph_osdmap_destroy(struct ceph_osdmap *map) | ||
322 | { | ||
323 | dout("osdmap_destroy %p\n", map); | ||
324 | if (map->crush) | ||
325 | crush_destroy(map->crush); | ||
326 | while (!RB_EMPTY_ROOT(&map->pg_temp)) { | ||
327 | struct ceph_pg_mapping *pg = | ||
328 | rb_entry(rb_first(&map->pg_temp), | ||
329 | struct ceph_pg_mapping, node); | ||
330 | rb_erase(&pg->node, &map->pg_temp); | ||
331 | kfree(pg); | ||
332 | } | ||
333 | while (!RB_EMPTY_ROOT(&map->pg_pools)) { | ||
334 | struct ceph_pg_pool_info *pi = | ||
335 | rb_entry(rb_first(&map->pg_pools), | ||
336 | struct ceph_pg_pool_info, node); | ||
337 | rb_erase(&pi->node, &map->pg_pools); | ||
338 | kfree(pi); | ||
339 | } | ||
340 | kfree(map->osd_state); | ||
341 | kfree(map->osd_weight); | ||
342 | kfree(map->osd_addr); | ||
343 | kfree(map); | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * adjust max osd value. reallocate arrays. | ||
348 | */ | ||
349 | static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) | ||
350 | { | ||
351 | u8 *state; | ||
352 | struct ceph_entity_addr *addr; | ||
353 | u32 *weight; | ||
354 | |||
355 | state = kcalloc(max, sizeof(*state), GFP_NOFS); | ||
356 | addr = kcalloc(max, sizeof(*addr), GFP_NOFS); | ||
357 | weight = kcalloc(max, sizeof(*weight), GFP_NOFS); | ||
358 | if (state == NULL || addr == NULL || weight == NULL) { | ||
359 | kfree(state); | ||
360 | kfree(addr); | ||
361 | kfree(weight); | ||
362 | return -ENOMEM; | ||
363 | } | ||
364 | |||
365 | /* copy old? */ | ||
366 | if (map->osd_state) { | ||
367 | memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); | ||
368 | memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); | ||
369 | memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); | ||
370 | kfree(map->osd_state); | ||
371 | kfree(map->osd_addr); | ||
372 | kfree(map->osd_weight); | ||
373 | } | ||
374 | |||
375 | map->osd_state = state; | ||
376 | map->osd_weight = weight; | ||
377 | map->osd_addr = addr; | ||
378 | map->max_osd = max; | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | /* | 317 | /* |
383 | * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid | 318 | * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid |
384 | * to a set of osds) | 319 | * to a set of osds) |
@@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) | |||
482 | return NULL; | 417 | return NULL; |
483 | } | 418 | } |
484 | 419 | ||
420 | static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) | ||
421 | { | ||
422 | rb_erase(&pi->node, root); | ||
423 | kfree(pi->name); | ||
424 | kfree(pi); | ||
425 | } | ||
426 | |||
485 | void __decode_pool(void **p, struct ceph_pg_pool_info *pi) | 427 | void __decode_pool(void **p, struct ceph_pg_pool_info *pi) |
486 | { | 428 | { |
487 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); | 429 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); |
@@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi) | |||
490 | *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; | 432 | *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; |
491 | } | 433 | } |
492 | 434 | ||
435 | static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) | ||
436 | { | ||
437 | struct ceph_pg_pool_info *pi; | ||
438 | u32 num, len, pool; | ||
439 | |||
440 | ceph_decode_32_safe(p, end, num, bad); | ||
441 | dout(" %d pool names\n", num); | ||
442 | while (num--) { | ||
443 | ceph_decode_32_safe(p, end, pool, bad); | ||
444 | ceph_decode_32_safe(p, end, len, bad); | ||
445 | dout(" pool %d len %d\n", pool, len); | ||
446 | pi = __lookup_pg_pool(&map->pg_pools, pool); | ||
447 | if (pi) { | ||
448 | kfree(pi->name); | ||
449 | pi->name = kmalloc(len + 1, GFP_NOFS); | ||
450 | if (pi->name) { | ||
451 | memcpy(pi->name, *p, len); | ||
452 | pi->name[len] = '\0'; | ||
453 | dout(" name is %s\n", pi->name); | ||
454 | } | ||
455 | } | ||
456 | *p += len; | ||
457 | } | ||
458 | return 0; | ||
459 | |||
460 | bad: | ||
461 | return -EINVAL; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * osd map | ||
466 | */ | ||
467 | void ceph_osdmap_destroy(struct ceph_osdmap *map) | ||
468 | { | ||
469 | dout("osdmap_destroy %p\n", map); | ||
470 | if (map->crush) | ||
471 | crush_destroy(map->crush); | ||
472 | while (!RB_EMPTY_ROOT(&map->pg_temp)) { | ||
473 | struct ceph_pg_mapping *pg = | ||
474 | rb_entry(rb_first(&map->pg_temp), | ||
475 | struct ceph_pg_mapping, node); | ||
476 | rb_erase(&pg->node, &map->pg_temp); | ||
477 | kfree(pg); | ||
478 | } | ||
479 | while (!RB_EMPTY_ROOT(&map->pg_pools)) { | ||
480 | struct ceph_pg_pool_info *pi = | ||
481 | rb_entry(rb_first(&map->pg_pools), | ||
482 | struct ceph_pg_pool_info, node); | ||
483 | __remove_pg_pool(&map->pg_pools, pi); | ||
484 | } | ||
485 | kfree(map->osd_state); | ||
486 | kfree(map->osd_weight); | ||
487 | kfree(map->osd_addr); | ||
488 | kfree(map); | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * adjust max osd value. reallocate arrays. | ||
493 | */ | ||
494 | static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) | ||
495 | { | ||
496 | u8 *state; | ||
497 | struct ceph_entity_addr *addr; | ||
498 | u32 *weight; | ||
499 | |||
500 | state = kcalloc(max, sizeof(*state), GFP_NOFS); | ||
501 | addr = kcalloc(max, sizeof(*addr), GFP_NOFS); | ||
502 | weight = kcalloc(max, sizeof(*weight), GFP_NOFS); | ||
503 | if (state == NULL || addr == NULL || weight == NULL) { | ||
504 | kfree(state); | ||
505 | kfree(addr); | ||
506 | kfree(weight); | ||
507 | return -ENOMEM; | ||
508 | } | ||
509 | |||
510 | /* copy old? */ | ||
511 | if (map->osd_state) { | ||
512 | memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); | ||
513 | memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); | ||
514 | memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); | ||
515 | kfree(map->osd_state); | ||
516 | kfree(map->osd_addr); | ||
517 | kfree(map->osd_weight); | ||
518 | } | ||
519 | |||
520 | map->osd_state = state; | ||
521 | map->osd_weight = weight; | ||
522 | map->osd_addr = addr; | ||
523 | map->max_osd = max; | ||
524 | return 0; | ||
525 | } | ||
526 | |||
493 | /* | 527 | /* |
494 | * decode a full map. | 528 | * decode a full map. |
495 | */ | 529 | */ |
@@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
526 | ceph_decode_32_safe(p, end, max, bad); | 560 | ceph_decode_32_safe(p, end, max, bad); |
527 | while (max--) { | 561 | while (max--) { |
528 | ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); | 562 | ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); |
529 | pi = kmalloc(sizeof(*pi), GFP_NOFS); | 563 | pi = kzalloc(sizeof(*pi), GFP_NOFS); |
530 | if (!pi) | 564 | if (!pi) |
531 | goto bad; | 565 | goto bad; |
532 | pi->id = ceph_decode_32(p); | 566 | pi->id = ceph_decode_32(p); |
@@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
539 | __decode_pool(p, pi); | 573 | __decode_pool(p, pi); |
540 | __insert_pg_pool(&map->pg_pools, pi); | 574 | __insert_pg_pool(&map->pg_pools, pi); |
541 | } | 575 | } |
576 | |||
577 | if (version >= 5 && __decode_pool_names(p, end, map) < 0) | ||
578 | goto bad; | ||
579 | |||
542 | ceph_decode_32_safe(p, end, map->pool_max, bad); | 580 | ceph_decode_32_safe(p, end, map->pool_max, bad); |
543 | 581 | ||
544 | ceph_decode_32_safe(p, end, map->flags, bad); | 582 | ceph_decode_32_safe(p, end, map->flags, bad); |
@@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
712 | } | 750 | } |
713 | pi = __lookup_pg_pool(&map->pg_pools, pool); | 751 | pi = __lookup_pg_pool(&map->pg_pools, pool); |
714 | if (!pi) { | 752 | if (!pi) { |
715 | pi = kmalloc(sizeof(*pi), GFP_NOFS); | 753 | pi = kzalloc(sizeof(*pi), GFP_NOFS); |
716 | if (!pi) { | 754 | if (!pi) { |
717 | err = -ENOMEM; | 755 | err = -ENOMEM; |
718 | goto bad; | 756 | goto bad; |
@@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
722 | } | 760 | } |
723 | __decode_pool(p, pi); | 761 | __decode_pool(p, pi); |
724 | } | 762 | } |
763 | if (version >= 5 && __decode_pool_names(p, end, map) < 0) | ||
764 | goto bad; | ||
725 | 765 | ||
726 | /* old_pool */ | 766 | /* old_pool */ |
727 | ceph_decode_32_safe(p, end, len, bad); | 767 | ceph_decode_32_safe(p, end, len, bad); |
@@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
730 | 770 | ||
731 | ceph_decode_32_safe(p, end, pool, bad); | 771 | ceph_decode_32_safe(p, end, pool, bad); |
732 | pi = __lookup_pg_pool(&map->pg_pools, pool); | 772 | pi = __lookup_pg_pool(&map->pg_pools, pool); |
733 | if (pi) { | 773 | if (pi) |
734 | rb_erase(&pi->node, &map->pg_pools); | 774 | __remove_pg_pool(&map->pg_pools, pi); |
735 | kfree(pi); | ||
736 | } | ||
737 | } | 775 | } |
738 | 776 | ||
739 | /* new_up */ | 777 | /* new_up */ |
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h index 1fb55afb2642..8bc9f1e4f562 100644 --- a/fs/ceph/osdmap.h +++ b/fs/ceph/osdmap.h | |||
@@ -23,6 +23,7 @@ struct ceph_pg_pool_info { | |||
23 | int id; | 23 | int id; |
24 | struct ceph_pg_pool v; | 24 | struct ceph_pg_pool v; |
25 | int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; | 25 | int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; |
26 | char *name; | ||
26 | }; | 27 | }; |
27 | 28 | ||
28 | struct ceph_pg_mapping { | 29 | struct ceph_pg_mapping { |
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index 26ac8b89a676..a1fc1d017b58 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h | |||
@@ -11,8 +11,10 @@ | |||
11 | /* | 11 | /* |
12 | * osdmap encoding versions | 12 | * osdmap encoding versions |
13 | */ | 13 | */ |
14 | #define CEPH_OSDMAP_INC_VERSION 4 | 14 | #define CEPH_OSDMAP_INC_VERSION 5 |
15 | #define CEPH_OSDMAP_VERSION 4 | 15 | #define CEPH_OSDMAP_INC_VERSION_EXT 5 |
16 | #define CEPH_OSDMAP_VERSION 5 | ||
17 | #define CEPH_OSDMAP_VERSION_EXT 5 | ||
16 | 18 | ||
17 | /* | 19 | /* |
18 | * fs id | 20 | * fs id |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index e6f9bc57d472..2b881262ef67 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num) | |||
431 | * Caller must hold snap_rwsem for read (i.e., the realm topology won't | 431 | * Caller must hold snap_rwsem for read (i.e., the realm topology won't |
432 | * change). | 432 | * change). |
433 | */ | 433 | */ |
434 | void ceph_queue_cap_snap(struct ceph_inode_info *ci, | 434 | void ceph_queue_cap_snap(struct ceph_inode_info *ci) |
435 | struct ceph_snap_context *snapc) | ||
436 | { | 435 | { |
437 | struct inode *inode = &ci->vfs_inode; | 436 | struct inode *inode = &ci->vfs_inode; |
438 | struct ceph_cap_snap *capsnap; | 437 | struct ceph_cap_snap *capsnap; |
@@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, | |||
451 | as no new writes are allowed to start when pending, so any | 450 | as no new writes are allowed to start when pending, so any |
452 | writes in progress now were started before the previous | 451 | writes in progress now were started before the previous |
453 | cap_snap. lucky us. */ | 452 | cap_snap. lucky us. */ |
454 | dout("queue_cap_snap %p snapc %p seq %llu used %d" | 453 | dout("queue_cap_snap %p already pending\n", inode); |
455 | " already pending\n", inode, snapc, snapc->seq, used); | ||
456 | kfree(capsnap); | 454 | kfree(capsnap); |
457 | } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { | 455 | } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { |
456 | struct ceph_snap_context *snapc = ci->i_head_snapc; | ||
457 | |||
458 | igrab(inode); | 458 | igrab(inode); |
459 | 459 | ||
460 | atomic_set(&capsnap->nref, 1); | 460 | atomic_set(&capsnap->nref, 1); |
@@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, | |||
463 | INIT_LIST_HEAD(&capsnap->flushing_item); | 463 | INIT_LIST_HEAD(&capsnap->flushing_item); |
464 | 464 | ||
465 | capsnap->follows = snapc->seq - 1; | 465 | capsnap->follows = snapc->seq - 1; |
466 | capsnap->context = ceph_get_snap_context(snapc); | ||
467 | capsnap->issued = __ceph_caps_issued(ci, NULL); | 466 | capsnap->issued = __ceph_caps_issued(ci, NULL); |
468 | capsnap->dirty = __ceph_caps_dirty(ci); | 467 | capsnap->dirty = __ceph_caps_dirty(ci); |
469 | 468 | ||
@@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, | |||
480 | snapshot. */ | 479 | snapshot. */ |
481 | capsnap->dirty_pages = ci->i_wrbuffer_ref_head; | 480 | capsnap->dirty_pages = ci->i_wrbuffer_ref_head; |
482 | ci->i_wrbuffer_ref_head = 0; | 481 | ci->i_wrbuffer_ref_head = 0; |
483 | ceph_put_snap_context(ci->i_head_snapc); | 482 | capsnap->context = snapc; |
484 | ci->i_head_snapc = NULL; | 483 | ci->i_head_snapc = NULL; |
485 | list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); | 484 | list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); |
486 | 485 | ||
@@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, | |||
522 | capsnap->ctime = inode->i_ctime; | 521 | capsnap->ctime = inode->i_ctime; |
523 | capsnap->time_warp_seq = ci->i_time_warp_seq; | 522 | capsnap->time_warp_seq = ci->i_time_warp_seq; |
524 | if (capsnap->dirty_pages) { | 523 | if (capsnap->dirty_pages) { |
525 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu " | 524 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu " |
526 | "still has %d dirty pages\n", inode, capsnap, | 525 | "still has %d dirty pages\n", inode, capsnap, |
527 | capsnap->context, capsnap->context->seq, | 526 | capsnap->context, capsnap->context->seq, |
528 | capsnap->size, capsnap->dirty_pages); | 527 | ceph_cap_string(capsnap->dirty), capsnap->size, |
528 | capsnap->dirty_pages); | ||
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n", | 531 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n", |
532 | inode, capsnap, capsnap->context, | 532 | inode, capsnap, capsnap->context, |
533 | capsnap->context->seq, capsnap->size); | 533 | capsnap->context->seq, ceph_cap_string(capsnap->dirty), |
534 | capsnap->size); | ||
534 | 535 | ||
535 | spin_lock(&mdsc->snap_flush_lock); | 536 | spin_lock(&mdsc->snap_flush_lock); |
536 | list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); | 537 | list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); |
@@ -602,7 +603,7 @@ more: | |||
602 | if (lastinode) | 603 | if (lastinode) |
603 | iput(lastinode); | 604 | iput(lastinode); |
604 | lastinode = inode; | 605 | lastinode = inode; |
605 | ceph_queue_cap_snap(ci, realm->cached_context); | 606 | ceph_queue_cap_snap(ci); |
606 | spin_lock(&realm->inodes_with_caps_lock); | 607 | spin_lock(&realm->inodes_with_caps_lock); |
607 | } | 608 | } |
608 | spin_unlock(&realm->inodes_with_caps_lock); | 609 | spin_unlock(&realm->inodes_with_caps_lock); |
@@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, | |||
824 | spin_unlock(&realm->inodes_with_caps_lock); | 825 | spin_unlock(&realm->inodes_with_caps_lock); |
825 | spin_unlock(&inode->i_lock); | 826 | spin_unlock(&inode->i_lock); |
826 | 827 | ||
827 | ceph_queue_cap_snap(ci, | 828 | ceph_queue_cap_snap(ci); |
828 | ci->i_snap_realm->cached_context); | ||
829 | 829 | ||
830 | iput(inode); | 830 | iput(inode); |
831 | continue; | 831 | continue; |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index ca702c67bc66..e30dfbb056c3 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m, | |||
715 | extern void ceph_handle_snap(struct ceph_mds_client *mdsc, | 715 | extern void ceph_handle_snap(struct ceph_mds_client *mdsc, |
716 | struct ceph_mds_session *session, | 716 | struct ceph_mds_session *session, |
717 | struct ceph_msg *msg); | 717 | struct ceph_msg *msg); |
718 | extern void ceph_queue_cap_snap(struct ceph_inode_info *ci, | 718 | extern void ceph_queue_cap_snap(struct ceph_inode_info *ci); |
719 | struct ceph_snap_context *snapc); | ||
720 | extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, | 719 | extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, |
721 | struct ceph_cap_snap *capsnap); | 720 | struct ceph_cap_snap *capsnap); |
722 | extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); | 721 | extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); |
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 4797787c6a44..246a167cb913 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #ifndef _CIFS_FS_SB_H | 18 | #ifndef _CIFS_FS_SB_H |
19 | #define _CIFS_FS_SB_H | 19 | #define _CIFS_FS_SB_H |
20 | 20 | ||
21 | #include <linux/backing-dev.h> | ||
22 | |||
21 | #define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */ | 23 | #define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */ |
22 | #define CIFS_MOUNT_SET_UID 2 /* set current's euid in create etc. */ | 24 | #define CIFS_MOUNT_SET_UID 2 /* set current's euid in create etc. */ |
23 | #define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */ | 25 | #define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */ |
@@ -50,5 +52,6 @@ struct cifs_sb_info { | |||
50 | #ifdef CONFIG_CIFS_DFS_UPCALL | 52 | #ifdef CONFIG_CIFS_DFS_UPCALL |
51 | char *mountdata; /* mount options received at mount time */ | 53 | char *mountdata; /* mount options received at mount time */ |
52 | #endif | 54 | #endif |
55 | struct backing_dev_info bdi; | ||
53 | }; | 56 | }; |
54 | #endif /* _CIFS_FS_SB_H */ | 57 | #endif /* _CIFS_FS_SB_H */ |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index ded66be6597c..ad235d604a0b 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -103,6 +103,12 @@ cifs_read_super(struct super_block *sb, void *data, | |||
103 | if (cifs_sb == NULL) | 103 | if (cifs_sb == NULL) |
104 | return -ENOMEM; | 104 | return -ENOMEM; |
105 | 105 | ||
106 | rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); | ||
107 | if (rc) { | ||
108 | kfree(cifs_sb); | ||
109 | return rc; | ||
110 | } | ||
111 | |||
106 | #ifdef CONFIG_CIFS_DFS_UPCALL | 112 | #ifdef CONFIG_CIFS_DFS_UPCALL |
107 | /* copy mount params to sb for use in submounts */ | 113 | /* copy mount params to sb for use in submounts */ |
108 | /* BB: should we move this after the mount so we | 114 | /* BB: should we move this after the mount so we |
@@ -115,6 +121,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
115 | int len = strlen(data); | 121 | int len = strlen(data); |
116 | cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL); | 122 | cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL); |
117 | if (cifs_sb->mountdata == NULL) { | 123 | if (cifs_sb->mountdata == NULL) { |
124 | bdi_destroy(&cifs_sb->bdi); | ||
118 | kfree(sb->s_fs_info); | 125 | kfree(sb->s_fs_info); |
119 | sb->s_fs_info = NULL; | 126 | sb->s_fs_info = NULL; |
120 | return -ENOMEM; | 127 | return -ENOMEM; |
@@ -135,6 +142,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
135 | 142 | ||
136 | sb->s_magic = CIFS_MAGIC_NUMBER; | 143 | sb->s_magic = CIFS_MAGIC_NUMBER; |
137 | sb->s_op = &cifs_super_ops; | 144 | sb->s_op = &cifs_super_ops; |
145 | sb->s_bdi = &cifs_sb->bdi; | ||
138 | /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) | 146 | /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) |
139 | sb->s_blocksize = | 147 | sb->s_blocksize = |
140 | cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ | 148 | cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ |
@@ -183,6 +191,7 @@ out_mount_failed: | |||
183 | } | 191 | } |
184 | #endif | 192 | #endif |
185 | unload_nls(cifs_sb->local_nls); | 193 | unload_nls(cifs_sb->local_nls); |
194 | bdi_destroy(&cifs_sb->bdi); | ||
186 | kfree(cifs_sb); | 195 | kfree(cifs_sb); |
187 | } | 196 | } |
188 | return rc; | 197 | return rc; |
@@ -214,6 +223,7 @@ cifs_put_super(struct super_block *sb) | |||
214 | #endif | 223 | #endif |
215 | 224 | ||
216 | unload_nls(cifs_sb->local_nls); | 225 | unload_nls(cifs_sb->local_nls); |
226 | bdi_destroy(&cifs_sb->bdi); | ||
217 | kfree(cifs_sb); | 227 | kfree(cifs_sb); |
218 | 228 | ||
219 | unlock_kernel(); | 229 | unlock_kernel(); |
diff --git a/fs/coda/inode.c b/fs/coda/inode.c index a1695dcadd99..d97f9935a028 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c | |||
@@ -167,6 +167,10 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) | |||
167 | return -EBUSY; | 167 | return -EBUSY; |
168 | } | 168 | } |
169 | 169 | ||
170 | error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY); | ||
171 | if (error) | ||
172 | goto bdi_err; | ||
173 | |||
170 | vc->vc_sb = sb; | 174 | vc->vc_sb = sb; |
171 | 175 | ||
172 | sb->s_fs_info = vc; | 176 | sb->s_fs_info = vc; |
@@ -175,6 +179,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) | |||
175 | sb->s_blocksize_bits = 12; | 179 | sb->s_blocksize_bits = 12; |
176 | sb->s_magic = CODA_SUPER_MAGIC; | 180 | sb->s_magic = CODA_SUPER_MAGIC; |
177 | sb->s_op = &coda_super_operations; | 181 | sb->s_op = &coda_super_operations; |
182 | sb->s_bdi = &vc->bdi; | ||
178 | 183 | ||
179 | /* get root fid from Venus: this needs the root inode */ | 184 | /* get root fid from Venus: this needs the root inode */ |
180 | error = venus_rootfid(sb, &fid); | 185 | error = venus_rootfid(sb, &fid); |
@@ -200,6 +205,8 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) | |||
200 | return 0; | 205 | return 0; |
201 | 206 | ||
202 | error: | 207 | error: |
208 | bdi_destroy(&vc->bdi); | ||
209 | bdi_err: | ||
203 | if (root) | 210 | if (root) |
204 | iput(root); | 211 | iput(root); |
205 | if (vc) | 212 | if (vc) |
@@ -210,6 +217,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) | |||
210 | 217 | ||
211 | static void coda_put_super(struct super_block *sb) | 218 | static void coda_put_super(struct super_block *sb) |
212 | { | 219 | { |
220 | bdi_destroy(&coda_vcp(sb)->bdi); | ||
213 | coda_vcp(sb)->vc_sb = NULL; | 221 | coda_vcp(sb)->vc_sb = NULL; |
214 | sb->s_fs_info = NULL; | 222 | sb->s_fs_info = NULL; |
215 | 223 | ||
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index efb2b9400391..1cc087635a5e 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -382,8 +382,8 @@ out: | |||
382 | static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, | 382 | static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, |
383 | struct ecryptfs_crypt_stat *crypt_stat) | 383 | struct ecryptfs_crypt_stat *crypt_stat) |
384 | { | 384 | { |
385 | (*offset) = (crypt_stat->num_header_bytes_at_front | 385 | (*offset) = ecryptfs_lower_header_size(crypt_stat) |
386 | + (crypt_stat->extent_size * extent_num)); | 386 | + (crypt_stat->extent_size * extent_num); |
387 | } | 387 | } |
388 | 388 | ||
389 | /** | 389 | /** |
@@ -835,13 +835,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) | |||
835 | set_extent_mask_and_shift(crypt_stat); | 835 | set_extent_mask_and_shift(crypt_stat); |
836 | crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; | 836 | crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; |
837 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 837 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
838 | crypt_stat->num_header_bytes_at_front = 0; | 838 | crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; |
839 | else { | 839 | else { |
840 | if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) | 840 | if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) |
841 | crypt_stat->num_header_bytes_at_front = | 841 | crypt_stat->metadata_size = |
842 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | 842 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; |
843 | else | 843 | else |
844 | crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE; | 844 | crypt_stat->metadata_size = PAGE_CACHE_SIZE; |
845 | } | 845 | } |
846 | } | 846 | } |
847 | 847 | ||
@@ -1108,9 +1108,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written) | |||
1108 | (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; | 1108 | (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; |
1109 | } | 1109 | } |
1110 | 1110 | ||
1111 | static void | 1111 | void ecryptfs_write_crypt_stat_flags(char *page_virt, |
1112 | write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, | 1112 | struct ecryptfs_crypt_stat *crypt_stat, |
1113 | size_t *written) | 1113 | size_t *written) |
1114 | { | 1114 | { |
1115 | u32 flags = 0; | 1115 | u32 flags = 0; |
1116 | int i; | 1116 | int i; |
@@ -1238,8 +1238,7 @@ ecryptfs_write_header_metadata(char *virt, | |||
1238 | 1238 | ||
1239 | header_extent_size = (u32)crypt_stat->extent_size; | 1239 | header_extent_size = (u32)crypt_stat->extent_size; |
1240 | num_header_extents_at_front = | 1240 | num_header_extents_at_front = |
1241 | (u16)(crypt_stat->num_header_bytes_at_front | 1241 | (u16)(crypt_stat->metadata_size / crypt_stat->extent_size); |
1242 | / crypt_stat->extent_size); | ||
1243 | put_unaligned_be32(header_extent_size, virt); | 1242 | put_unaligned_be32(header_extent_size, virt); |
1244 | virt += 4; | 1243 | virt += 4; |
1245 | put_unaligned_be16(num_header_extents_at_front, virt); | 1244 | put_unaligned_be16(num_header_extents_at_front, virt); |
@@ -1292,7 +1291,8 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, | |||
1292 | offset = ECRYPTFS_FILE_SIZE_BYTES; | 1291 | offset = ECRYPTFS_FILE_SIZE_BYTES; |
1293 | write_ecryptfs_marker((page_virt + offset), &written); | 1292 | write_ecryptfs_marker((page_virt + offset), &written); |
1294 | offset += written; | 1293 | offset += written; |
1295 | write_ecryptfs_flags((page_virt + offset), crypt_stat, &written); | 1294 | ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat, |
1295 | &written); | ||
1296 | offset += written; | 1296 | offset += written; |
1297 | ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, | 1297 | ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, |
1298 | &written); | 1298 | &written); |
@@ -1382,7 +1382,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1382 | rc = -EINVAL; | 1382 | rc = -EINVAL; |
1383 | goto out; | 1383 | goto out; |
1384 | } | 1384 | } |
1385 | virt_len = crypt_stat->num_header_bytes_at_front; | 1385 | virt_len = crypt_stat->metadata_size; |
1386 | order = get_order(virt_len); | 1386 | order = get_order(virt_len); |
1387 | /* Released in this function */ | 1387 | /* Released in this function */ |
1388 | virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); | 1388 | virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); |
@@ -1428,16 +1428,15 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, | |||
1428 | header_extent_size = get_unaligned_be32(virt); | 1428 | header_extent_size = get_unaligned_be32(virt); |
1429 | virt += sizeof(__be32); | 1429 | virt += sizeof(__be32); |
1430 | num_header_extents_at_front = get_unaligned_be16(virt); | 1430 | num_header_extents_at_front = get_unaligned_be16(virt); |
1431 | crypt_stat->num_header_bytes_at_front = | 1431 | crypt_stat->metadata_size = (((size_t)num_header_extents_at_front |
1432 | (((size_t)num_header_extents_at_front | 1432 | * (size_t)header_extent_size)); |
1433 | * (size_t)header_extent_size)); | ||
1434 | (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); | 1433 | (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); |
1435 | if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) | 1434 | if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) |
1436 | && (crypt_stat->num_header_bytes_at_front | 1435 | && (crypt_stat->metadata_size |
1437 | < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { | 1436 | < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { |
1438 | rc = -EINVAL; | 1437 | rc = -EINVAL; |
1439 | printk(KERN_WARNING "Invalid header size: [%zd]\n", | 1438 | printk(KERN_WARNING "Invalid header size: [%zd]\n", |
1440 | crypt_stat->num_header_bytes_at_front); | 1439 | crypt_stat->metadata_size); |
1441 | } | 1440 | } |
1442 | return rc; | 1441 | return rc; |
1443 | } | 1442 | } |
@@ -1452,8 +1451,7 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, | |||
1452 | */ | 1451 | */ |
1453 | static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) | 1452 | static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) |
1454 | { | 1453 | { |
1455 | crypt_stat->num_header_bytes_at_front = | 1454 | crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; |
1456 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | ||
1457 | } | 1455 | } |
1458 | 1456 | ||
1459 | /** | 1457 | /** |
@@ -1607,6 +1605,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) | |||
1607 | ecryptfs_dentry, | 1605 | ecryptfs_dentry, |
1608 | ECRYPTFS_VALIDATE_HEADER_SIZE); | 1606 | ECRYPTFS_VALIDATE_HEADER_SIZE); |
1609 | if (rc) { | 1607 | if (rc) { |
1608 | memset(page_virt, 0, PAGE_CACHE_SIZE); | ||
1610 | rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); | 1609 | rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); |
1611 | if (rc) { | 1610 | if (rc) { |
1612 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " | 1611 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 542f625312f3..bfc2e0f78f00 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/scatterlist.h> | 35 | #include <linux/scatterlist.h> |
36 | #include <linux/hash.h> | 36 | #include <linux/hash.h> |
37 | #include <linux/nsproxy.h> | 37 | #include <linux/nsproxy.h> |
38 | #include <linux/backing-dev.h> | ||
38 | 39 | ||
39 | /* Version verification for shared data structures w/ userspace */ | 40 | /* Version verification for shared data structures w/ userspace */ |
40 | #define ECRYPTFS_VERSION_MAJOR 0x00 | 41 | #define ECRYPTFS_VERSION_MAJOR 0x00 |
@@ -273,7 +274,7 @@ struct ecryptfs_crypt_stat { | |||
273 | u32 flags; | 274 | u32 flags; |
274 | unsigned int file_version; | 275 | unsigned int file_version; |
275 | size_t iv_bytes; | 276 | size_t iv_bytes; |
276 | size_t num_header_bytes_at_front; | 277 | size_t metadata_size; |
277 | size_t extent_size; /* Data extent size; default is 4096 */ | 278 | size_t extent_size; /* Data extent size; default is 4096 */ |
278 | size_t key_size; | 279 | size_t key_size; |
279 | size_t extent_shift; | 280 | size_t extent_shift; |
@@ -393,6 +394,7 @@ struct ecryptfs_mount_crypt_stat { | |||
393 | struct ecryptfs_sb_info { | 394 | struct ecryptfs_sb_info { |
394 | struct super_block *wsi_sb; | 395 | struct super_block *wsi_sb; |
395 | struct ecryptfs_mount_crypt_stat mount_crypt_stat; | 396 | struct ecryptfs_mount_crypt_stat mount_crypt_stat; |
397 | struct backing_dev_info bdi; | ||
396 | }; | 398 | }; |
397 | 399 | ||
398 | /* file private data. */ | 400 | /* file private data. */ |
@@ -464,6 +466,14 @@ struct ecryptfs_daemon { | |||
464 | 466 | ||
465 | extern struct mutex ecryptfs_daemon_hash_mux; | 467 | extern struct mutex ecryptfs_daemon_hash_mux; |
466 | 468 | ||
469 | static inline size_t | ||
470 | ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat) | ||
471 | { | ||
472 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | ||
473 | return 0; | ||
474 | return crypt_stat->metadata_size; | ||
475 | } | ||
476 | |||
467 | static inline struct ecryptfs_file_info * | 477 | static inline struct ecryptfs_file_info * |
468 | ecryptfs_file_to_private(struct file *file) | 478 | ecryptfs_file_to_private(struct file *file) |
469 | { | 479 | { |
@@ -651,6 +661,9 @@ int ecryptfs_decrypt_page(struct page *page); | |||
651 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); | 661 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); |
652 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); | 662 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); |
653 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); | 663 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); |
664 | void ecryptfs_write_crypt_stat_flags(char *page_virt, | ||
665 | struct ecryptfs_crypt_stat *crypt_stat, | ||
666 | size_t *written); | ||
654 | int ecryptfs_read_and_validate_header_region(char *data, | 667 | int ecryptfs_read_and_validate_header_region(char *data, |
655 | struct inode *ecryptfs_inode); | 668 | struct inode *ecryptfs_inode); |
656 | int ecryptfs_read_and_validate_xattr_region(char *page_virt, | 669 | int ecryptfs_read_and_validate_xattr_region(char *page_virt, |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index d3362faf3852..e2d4418affac 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -324,6 +324,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
324 | rc = ecryptfs_read_and_validate_header_region(page_virt, | 324 | rc = ecryptfs_read_and_validate_header_region(page_virt, |
325 | ecryptfs_dentry->d_inode); | 325 | ecryptfs_dentry->d_inode); |
326 | if (rc) { | 326 | if (rc) { |
327 | memset(page_virt, 0, PAGE_CACHE_SIZE); | ||
327 | rc = ecryptfs_read_and_validate_xattr_region(page_virt, | 328 | rc = ecryptfs_read_and_validate_xattr_region(page_virt, |
328 | ecryptfs_dentry); | 329 | ecryptfs_dentry); |
329 | if (rc) { | 330 | if (rc) { |
@@ -336,7 +337,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
336 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | 337 | ecryptfs_dentry->d_sb)->mount_crypt_stat; |
337 | if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { | 338 | if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { |
338 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 339 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
339 | file_size = (crypt_stat->num_header_bytes_at_front | 340 | file_size = (crypt_stat->metadata_size |
340 | + i_size_read(lower_dentry->d_inode)); | 341 | + i_size_read(lower_dentry->d_inode)); |
341 | else | 342 | else |
342 | file_size = i_size_read(lower_dentry->d_inode); | 343 | file_size = i_size_read(lower_dentry->d_inode); |
@@ -388,9 +389,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
388 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); | 389 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
389 | if (IS_ERR(lower_dentry)) { | 390 | if (IS_ERR(lower_dentry)) { |
390 | rc = PTR_ERR(lower_dentry); | 391 | rc = PTR_ERR(lower_dentry); |
391 | printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " | 392 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
392 | "lower_dentry = [%s]\n", __func__, rc, | 393 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
393 | ecryptfs_dentry->d_name.name); | 394 | encrypted_and_encoded_name); |
394 | goto out_d_drop; | 395 | goto out_d_drop; |
395 | } | 396 | } |
396 | if (lower_dentry->d_inode) | 397 | if (lower_dentry->d_inode) |
@@ -417,9 +418,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
417 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); | 418 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
418 | if (IS_ERR(lower_dentry)) { | 419 | if (IS_ERR(lower_dentry)) { |
419 | rc = PTR_ERR(lower_dentry); | 420 | rc = PTR_ERR(lower_dentry); |
420 | printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " | 421 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
421 | "lower_dentry = [%s]\n", __func__, rc, | 422 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
422 | encrypted_and_encoded_name); | 423 | encrypted_and_encoded_name); |
423 | goto out_d_drop; | 424 | goto out_d_drop; |
424 | } | 425 | } |
425 | lookup_and_interpose: | 426 | lookup_and_interpose: |
@@ -456,8 +457,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir, | |||
456 | rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0); | 457 | rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0); |
457 | if (rc) | 458 | if (rc) |
458 | goto out_lock; | 459 | goto out_lock; |
459 | fsstack_copy_attr_times(dir, lower_new_dentry->d_inode); | 460 | fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); |
460 | fsstack_copy_inode_size(dir, lower_new_dentry->d_inode); | 461 | fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); |
461 | old_dentry->d_inode->i_nlink = | 462 | old_dentry->d_inode->i_nlink = |
462 | ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink; | 463 | ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink; |
463 | i_size_write(new_dentry->d_inode, file_size_save); | 464 | i_size_write(new_dentry->d_inode, file_size_save); |
@@ -648,38 +649,17 @@ out_lock: | |||
648 | return rc; | 649 | return rc; |
649 | } | 650 | } |
650 | 651 | ||
651 | static int | 652 | static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, |
652 | ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | 653 | size_t *bufsiz) |
653 | { | 654 | { |
655 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); | ||
654 | char *lower_buf; | 656 | char *lower_buf; |
655 | size_t lower_bufsiz; | 657 | size_t lower_bufsiz = PATH_MAX; |
656 | struct dentry *lower_dentry; | ||
657 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; | ||
658 | char *plaintext_name; | ||
659 | size_t plaintext_name_size; | ||
660 | mm_segment_t old_fs; | 658 | mm_segment_t old_fs; |
661 | int rc; | 659 | int rc; |
662 | 660 | ||
663 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | ||
664 | if (!lower_dentry->d_inode->i_op->readlink) { | ||
665 | rc = -EINVAL; | ||
666 | goto out; | ||
667 | } | ||
668 | mount_crypt_stat = &ecryptfs_superblock_to_private( | ||
669 | dentry->d_sb)->mount_crypt_stat; | ||
670 | /* | ||
671 | * If the lower filename is encrypted, it will result in a significantly | ||
672 | * longer name. If needed, truncate the name after decode and decrypt. | ||
673 | */ | ||
674 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) | ||
675 | lower_bufsiz = PATH_MAX; | ||
676 | else | ||
677 | lower_bufsiz = bufsiz; | ||
678 | /* Released in this function */ | ||
679 | lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); | 661 | lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); |
680 | if (lower_buf == NULL) { | 662 | if (!lower_buf) { |
681 | printk(KERN_ERR "%s: Out of memory whilst attempting to " | ||
682 | "kmalloc [%zd] bytes\n", __func__, lower_bufsiz); | ||
683 | rc = -ENOMEM; | 663 | rc = -ENOMEM; |
684 | goto out; | 664 | goto out; |
685 | } | 665 | } |
@@ -689,29 +669,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | |||
689 | (char __user *)lower_buf, | 669 | (char __user *)lower_buf, |
690 | lower_bufsiz); | 670 | lower_bufsiz); |
691 | set_fs(old_fs); | 671 | set_fs(old_fs); |
692 | if (rc >= 0) { | 672 | if (rc < 0) |
693 | rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name, | 673 | goto out; |
694 | &plaintext_name_size, | 674 | lower_bufsiz = rc; |
695 | dentry, lower_buf, | 675 | rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry, |
696 | rc); | 676 | lower_buf, lower_bufsiz); |
697 | if (rc) { | 677 | out: |
698 | printk(KERN_ERR "%s: Error attempting to decode and " | ||
699 | "decrypt filename; rc = [%d]\n", __func__, | ||
700 | rc); | ||
701 | goto out_free_lower_buf; | ||
702 | } | ||
703 | /* Check for bufsiz <= 0 done in sys_readlinkat() */ | ||
704 | rc = copy_to_user(buf, plaintext_name, | ||
705 | min((size_t) bufsiz, plaintext_name_size)); | ||
706 | if (rc) | ||
707 | rc = -EFAULT; | ||
708 | else | ||
709 | rc = plaintext_name_size; | ||
710 | kfree(plaintext_name); | ||
711 | fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode); | ||
712 | } | ||
713 | out_free_lower_buf: | ||
714 | kfree(lower_buf); | 678 | kfree(lower_buf); |
679 | return rc; | ||
680 | } | ||
681 | |||
682 | static int | ||
683 | ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | ||
684 | { | ||
685 | char *kbuf; | ||
686 | size_t kbufsiz, copied; | ||
687 | int rc; | ||
688 | |||
689 | rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz); | ||
690 | if (rc) | ||
691 | goto out; | ||
692 | copied = min_t(size_t, bufsiz, kbufsiz); | ||
693 | rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied; | ||
694 | kfree(kbuf); | ||
695 | fsstack_copy_attr_atime(dentry->d_inode, | ||
696 | ecryptfs_dentry_to_lower(dentry)->d_inode); | ||
715 | out: | 697 | out: |
716 | return rc; | 698 | return rc; |
717 | } | 699 | } |
@@ -769,7 +751,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat, | |||
769 | { | 751 | { |
770 | loff_t lower_size; | 752 | loff_t lower_size; |
771 | 753 | ||
772 | lower_size = crypt_stat->num_header_bytes_at_front; | 754 | lower_size = ecryptfs_lower_header_size(crypt_stat); |
773 | if (upper_size != 0) { | 755 | if (upper_size != 0) { |
774 | loff_t num_extents; | 756 | loff_t num_extents; |
775 | 757 | ||
@@ -1016,6 +998,28 @@ out: | |||
1016 | return rc; | 998 | return rc; |
1017 | } | 999 | } |
1018 | 1000 | ||
1001 | int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, | ||
1002 | struct kstat *stat) | ||
1003 | { | ||
1004 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; | ||
1005 | int rc = 0; | ||
1006 | |||
1007 | mount_crypt_stat = &ecryptfs_superblock_to_private( | ||
1008 | dentry->d_sb)->mount_crypt_stat; | ||
1009 | generic_fillattr(dentry->d_inode, stat); | ||
1010 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { | ||
1011 | char *target; | ||
1012 | size_t targetsiz; | ||
1013 | |||
1014 | rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz); | ||
1015 | if (!rc) { | ||
1016 | kfree(target); | ||
1017 | stat->size = targetsiz; | ||
1018 | } | ||
1019 | } | ||
1020 | return rc; | ||
1021 | } | ||
1022 | |||
1019 | int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | 1023 | int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, |
1020 | struct kstat *stat) | 1024 | struct kstat *stat) |
1021 | { | 1025 | { |
@@ -1040,7 +1044,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, | |||
1040 | 1044 | ||
1041 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1045 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
1042 | if (!lower_dentry->d_inode->i_op->setxattr) { | 1046 | if (!lower_dentry->d_inode->i_op->setxattr) { |
1043 | rc = -ENOSYS; | 1047 | rc = -EOPNOTSUPP; |
1044 | goto out; | 1048 | goto out; |
1045 | } | 1049 | } |
1046 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1050 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1058,7 +1062,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, | |||
1058 | int rc = 0; | 1062 | int rc = 0; |
1059 | 1063 | ||
1060 | if (!lower_dentry->d_inode->i_op->getxattr) { | 1064 | if (!lower_dentry->d_inode->i_op->getxattr) { |
1061 | rc = -ENOSYS; | 1065 | rc = -EOPNOTSUPP; |
1062 | goto out; | 1066 | goto out; |
1063 | } | 1067 | } |
1064 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1068 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1085,7 +1089,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size) | |||
1085 | 1089 | ||
1086 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1090 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
1087 | if (!lower_dentry->d_inode->i_op->listxattr) { | 1091 | if (!lower_dentry->d_inode->i_op->listxattr) { |
1088 | rc = -ENOSYS; | 1092 | rc = -EOPNOTSUPP; |
1089 | goto out; | 1093 | goto out; |
1090 | } | 1094 | } |
1091 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1095 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1102,7 +1106,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name) | |||
1102 | 1106 | ||
1103 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1107 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
1104 | if (!lower_dentry->d_inode->i_op->removexattr) { | 1108 | if (!lower_dentry->d_inode->i_op->removexattr) { |
1105 | rc = -ENOSYS; | 1109 | rc = -EOPNOTSUPP; |
1106 | goto out; | 1110 | goto out; |
1107 | } | 1111 | } |
1108 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1112 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1133,6 +1137,7 @@ const struct inode_operations ecryptfs_symlink_iops = { | |||
1133 | .put_link = ecryptfs_put_link, | 1137 | .put_link = ecryptfs_put_link, |
1134 | .permission = ecryptfs_permission, | 1138 | .permission = ecryptfs_permission, |
1135 | .setattr = ecryptfs_setattr, | 1139 | .setattr = ecryptfs_setattr, |
1140 | .getattr = ecryptfs_getattr_link, | ||
1136 | .setxattr = ecryptfs_setxattr, | 1141 | .setxattr = ecryptfs_setxattr, |
1137 | .getxattr = ecryptfs_getxattr, | 1142 | .getxattr = ecryptfs_getxattr, |
1138 | .listxattr = ecryptfs_listxattr, | 1143 | .listxattr = ecryptfs_listxattr, |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index af1a8f01ebac..760983d0f25e 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -497,17 +497,25 @@ struct kmem_cache *ecryptfs_sb_info_cache; | |||
497 | static int | 497 | static int |
498 | ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent) | 498 | ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent) |
499 | { | 499 | { |
500 | struct ecryptfs_sb_info *esi; | ||
500 | int rc = 0; | 501 | int rc = 0; |
501 | 502 | ||
502 | /* Released in ecryptfs_put_super() */ | 503 | /* Released in ecryptfs_put_super() */ |
503 | ecryptfs_set_superblock_private(sb, | 504 | ecryptfs_set_superblock_private(sb, |
504 | kmem_cache_zalloc(ecryptfs_sb_info_cache, | 505 | kmem_cache_zalloc(ecryptfs_sb_info_cache, |
505 | GFP_KERNEL)); | 506 | GFP_KERNEL)); |
506 | if (!ecryptfs_superblock_to_private(sb)) { | 507 | esi = ecryptfs_superblock_to_private(sb); |
508 | if (!esi) { | ||
507 | ecryptfs_printk(KERN_WARNING, "Out of memory\n"); | 509 | ecryptfs_printk(KERN_WARNING, "Out of memory\n"); |
508 | rc = -ENOMEM; | 510 | rc = -ENOMEM; |
509 | goto out; | 511 | goto out; |
510 | } | 512 | } |
513 | |||
514 | rc = bdi_setup_and_register(&esi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); | ||
515 | if (rc) | ||
516 | goto out; | ||
517 | |||
518 | sb->s_bdi = &esi->bdi; | ||
511 | sb->s_op = &ecryptfs_sops; | 519 | sb->s_op = &ecryptfs_sops; |
512 | /* Released through deactivate_super(sb) from get_sb_nodev */ | 520 | /* Released through deactivate_super(sb) from get_sb_nodev */ |
513 | sb->s_root = d_alloc(NULL, &(const struct qstr) { | 521 | sb->s_root = d_alloc(NULL, &(const struct qstr) { |
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index d491237c98e7..2ee9a3a7b68c 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -83,6 +83,19 @@ out: | |||
83 | return rc; | 83 | return rc; |
84 | } | 84 | } |
85 | 85 | ||
86 | static void strip_xattr_flag(char *page_virt, | ||
87 | struct ecryptfs_crypt_stat *crypt_stat) | ||
88 | { | ||
89 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { | ||
90 | size_t written; | ||
91 | |||
92 | crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR; | ||
93 | ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat, | ||
94 | &written); | ||
95 | crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; | ||
96 | } | ||
97 | } | ||
98 | |||
86 | /** | 99 | /** |
87 | * Header Extent: | 100 | * Header Extent: |
88 | * Octets 0-7: Unencrypted file size (big-endian) | 101 | * Octets 0-7: Unencrypted file size (big-endian) |
@@ -98,19 +111,6 @@ out: | |||
98 | * (big-endian) | 111 | * (big-endian) |
99 | * Octet 26: Begin RFC 2440 authentication token packet set | 112 | * Octet 26: Begin RFC 2440 authentication token packet set |
100 | */ | 113 | */ |
101 | static void set_header_info(char *page_virt, | ||
102 | struct ecryptfs_crypt_stat *crypt_stat) | ||
103 | { | ||
104 | size_t written; | ||
105 | size_t save_num_header_bytes_at_front = | ||
106 | crypt_stat->num_header_bytes_at_front; | ||
107 | |||
108 | crypt_stat->num_header_bytes_at_front = | ||
109 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | ||
110 | ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written); | ||
111 | crypt_stat->num_header_bytes_at_front = | ||
112 | save_num_header_bytes_at_front; | ||
113 | } | ||
114 | 114 | ||
115 | /** | 115 | /** |
116 | * ecryptfs_copy_up_encrypted_with_header | 116 | * ecryptfs_copy_up_encrypted_with_header |
@@ -136,8 +136,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
136 | * num_extents_per_page) | 136 | * num_extents_per_page) |
137 | + extent_num_in_page); | 137 | + extent_num_in_page); |
138 | size_t num_header_extents_at_front = | 138 | size_t num_header_extents_at_front = |
139 | (crypt_stat->num_header_bytes_at_front | 139 | (crypt_stat->metadata_size / crypt_stat->extent_size); |
140 | / crypt_stat->extent_size); | ||
141 | 140 | ||
142 | if (view_extent_num < num_header_extents_at_front) { | 141 | if (view_extent_num < num_header_extents_at_front) { |
143 | /* This is a header extent */ | 142 | /* This is a header extent */ |
@@ -147,9 +146,14 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
147 | memset(page_virt, 0, PAGE_CACHE_SIZE); | 146 | memset(page_virt, 0, PAGE_CACHE_SIZE); |
148 | /* TODO: Support more than one header extent */ | 147 | /* TODO: Support more than one header extent */ |
149 | if (view_extent_num == 0) { | 148 | if (view_extent_num == 0) { |
149 | size_t written; | ||
150 | |||
150 | rc = ecryptfs_read_xattr_region( | 151 | rc = ecryptfs_read_xattr_region( |
151 | page_virt, page->mapping->host); | 152 | page_virt, page->mapping->host); |
152 | set_header_info(page_virt, crypt_stat); | 153 | strip_xattr_flag(page_virt + 16, crypt_stat); |
154 | ecryptfs_write_header_metadata(page_virt + 20, | ||
155 | crypt_stat, | ||
156 | &written); | ||
153 | } | 157 | } |
154 | kunmap_atomic(page_virt, KM_USER0); | 158 | kunmap_atomic(page_virt, KM_USER0); |
155 | flush_dcache_page(page); | 159 | flush_dcache_page(page); |
@@ -162,7 +166,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
162 | /* This is an encrypted data extent */ | 166 | /* This is an encrypted data extent */ |
163 | loff_t lower_offset = | 167 | loff_t lower_offset = |
164 | ((view_extent_num * crypt_stat->extent_size) | 168 | ((view_extent_num * crypt_stat->extent_size) |
165 | - crypt_stat->num_header_bytes_at_front); | 169 | - crypt_stat->metadata_size); |
166 | 170 | ||
167 | rc = ecryptfs_read_lower_page_segment( | 171 | rc = ecryptfs_read_lower_page_segment( |
168 | page, (lower_offset >> PAGE_CACHE_SHIFT), | 172 | page, (lower_offset >> PAGE_CACHE_SHIFT), |
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index fcef41c1d2cf..0c0ae491d231 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
@@ -86,7 +86,6 @@ static void ecryptfs_destroy_inode(struct inode *inode) | |||
86 | if (lower_dentry->d_inode) { | 86 | if (lower_dentry->d_inode) { |
87 | fput(inode_info->lower_file); | 87 | fput(inode_info->lower_file); |
88 | inode_info->lower_file = NULL; | 88 | inode_info->lower_file = NULL; |
89 | d_drop(lower_dentry); | ||
90 | } | 89 | } |
91 | } | 90 | } |
92 | ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); | 91 | ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); |
@@ -123,6 +122,7 @@ static void ecryptfs_put_super(struct super_block *sb) | |||
123 | lock_kernel(); | 122 | lock_kernel(); |
124 | 123 | ||
125 | ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat); | 124 | ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat); |
125 | bdi_destroy(&sb_info->bdi); | ||
126 | kmem_cache_free(ecryptfs_sb_info_cache, sb_info); | 126 | kmem_cache_free(ecryptfs_sb_info_cache, sb_info); |
127 | ecryptfs_set_superblock_private(sb, NULL); | 127 | ecryptfs_set_superblock_private(sb, NULL); |
128 | 128 | ||
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h index 8442e353309f..54373278a353 100644 --- a/fs/exofs/exofs.h +++ b/fs/exofs/exofs.h | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include <linux/fs.h> | 36 | #include <linux/fs.h> |
37 | #include <linux/time.h> | 37 | #include <linux/time.h> |
38 | #include <linux/backing-dev.h> | ||
38 | #include "common.h" | 39 | #include "common.h" |
39 | 40 | ||
40 | /* FIXME: Remove once pnfs hits mainline | 41 | /* FIXME: Remove once pnfs hits mainline |
@@ -92,6 +93,7 @@ struct exofs_sb_info { | |||
92 | struct exofs_layout layout; /* Default files layout, | 93 | struct exofs_layout layout; /* Default files layout, |
93 | * contains the variable osd_dev | 94 | * contains the variable osd_dev |
94 | * array. Keep last */ | 95 | * array. Keep last */ |
96 | struct backing_dev_info bdi; | ||
95 | struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */ | 97 | struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */ |
96 | }; | 98 | }; |
97 | 99 | ||
diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 18e57ea1e5b4..03149b9a5178 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c | |||
@@ -302,6 +302,7 @@ static void exofs_put_super(struct super_block *sb) | |||
302 | _exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0], | 302 | _exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0], |
303 | sbi->layout.s_pid); | 303 | sbi->layout.s_pid); |
304 | 304 | ||
305 | bdi_destroy(&sbi->bdi); | ||
305 | exofs_free_sbi(sbi); | 306 | exofs_free_sbi(sbi); |
306 | sb->s_fs_info = NULL; | 307 | sb->s_fs_info = NULL; |
307 | } | 308 | } |
@@ -546,6 +547,10 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) | |||
546 | if (!sbi) | 547 | if (!sbi) |
547 | return -ENOMEM; | 548 | return -ENOMEM; |
548 | 549 | ||
550 | ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY); | ||
551 | if (ret) | ||
552 | goto free_bdi; | ||
553 | |||
549 | /* use mount options to fill superblock */ | 554 | /* use mount options to fill superblock */ |
550 | od = osduld_path_lookup(opts->dev_name); | 555 | od = osduld_path_lookup(opts->dev_name); |
551 | if (IS_ERR(od)) { | 556 | if (IS_ERR(od)) { |
@@ -612,6 +617,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) | |||
612 | } | 617 | } |
613 | 618 | ||
614 | /* set up operation vectors */ | 619 | /* set up operation vectors */ |
620 | sb->s_bdi = &sbi->bdi; | ||
615 | sb->s_fs_info = sbi; | 621 | sb->s_fs_info = sbi; |
616 | sb->s_op = &exofs_sops; | 622 | sb->s_op = &exofs_sops; |
617 | sb->s_export_op = &exofs_export_ops; | 623 | sb->s_export_op = &exofs_export_ops; |
@@ -643,6 +649,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) | |||
643 | return 0; | 649 | return 0; |
644 | 650 | ||
645 | free_sbi: | 651 | free_sbi: |
652 | bdi_destroy(&sbi->bdi); | ||
653 | free_bdi: | ||
646 | EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n", | 654 | EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n", |
647 | opts->dev_name, sbi->layout.s_pid, ret); | 655 | opts->dev_name, sbi->layout.s_pid, ret); |
648 | exofs_free_sbi(sbi); | 656 | exofs_free_sbi(sbi); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 94c8ee81f5e1..236b834b4ca8 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -3879,6 +3879,7 @@ static int ext4_xattr_fiemap(struct inode *inode, | |||
3879 | physical += offset; | 3879 | physical += offset; |
3880 | length = EXT4_SB(inode->i_sb)->s_inode_size - offset; | 3880 | length = EXT4_SB(inode->i_sb)->s_inode_size - offset; |
3881 | flags |= FIEMAP_EXTENT_DATA_INLINE; | 3881 | flags |= FIEMAP_EXTENT_DATA_INLINE; |
3882 | brelse(iloc.bh); | ||
3882 | } else { /* external block */ | 3883 | } else { /* external block */ |
3883 | physical = EXT4_I(inode)->i_file_acl << blockbits; | 3884 | physical = EXT4_I(inode)->i_file_acl << blockbits; |
3884 | length = inode->i_sb->s_blocksize; | 3885 | length = inode->i_sb->s_blocksize; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5381802d6052..81d605412844 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -5375,7 +5375,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
5375 | } else { | 5375 | } else { |
5376 | struct ext4_iloc iloc; | 5376 | struct ext4_iloc iloc; |
5377 | 5377 | ||
5378 | err = ext4_get_inode_loc(inode, &iloc); | 5378 | err = __ext4_get_inode_loc(inode, &iloc, 0); |
5379 | if (err) | 5379 | if (err) |
5380 | return err; | 5380 | return err; |
5381 | if (wbc->sync_mode == WB_SYNC_ALL) | 5381 | if (wbc->sync_mode == WB_SYNC_ALL) |
@@ -5386,6 +5386,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
5386 | (unsigned long long)iloc.bh->b_blocknr); | 5386 | (unsigned long long)iloc.bh->b_blocknr); |
5387 | err = -EIO; | 5387 | err = -EIO; |
5388 | } | 5388 | } |
5389 | brelse(iloc.bh); | ||
5389 | } | 5390 | } |
5390 | return err; | 5391 | return err; |
5391 | } | 5392 | } |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index bde9d0b170c2..b423a364dca3 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -2535,6 +2535,17 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | |||
2535 | mb_debug(1, "gonna free %u blocks in group %u (0x%p):", | 2535 | mb_debug(1, "gonna free %u blocks in group %u (0x%p):", |
2536 | entry->count, entry->group, entry); | 2536 | entry->count, entry->group, entry); |
2537 | 2537 | ||
2538 | if (test_opt(sb, DISCARD)) { | ||
2539 | ext4_fsblk_t discard_block; | ||
2540 | |||
2541 | discard_block = entry->start_blk + | ||
2542 | ext4_group_first_block_no(sb, entry->group); | ||
2543 | trace_ext4_discard_blocks(sb, | ||
2544 | (unsigned long long)discard_block, | ||
2545 | entry->count); | ||
2546 | sb_issue_discard(sb, discard_block, entry->count); | ||
2547 | } | ||
2548 | |||
2538 | err = ext4_mb_load_buddy(sb, entry->group, &e4b); | 2549 | err = ext4_mb_load_buddy(sb, entry->group, &e4b); |
2539 | /* we expect to find existing buddy because it's pinned */ | 2550 | /* we expect to find existing buddy because it's pinned */ |
2540 | BUG_ON(err != 0); | 2551 | BUG_ON(err != 0); |
@@ -2556,16 +2567,6 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | |||
2556 | page_cache_release(e4b.bd_bitmap_page); | 2567 | page_cache_release(e4b.bd_bitmap_page); |
2557 | } | 2568 | } |
2558 | ext4_unlock_group(sb, entry->group); | 2569 | ext4_unlock_group(sb, entry->group); |
2559 | if (test_opt(sb, DISCARD)) { | ||
2560 | ext4_fsblk_t discard_block; | ||
2561 | |||
2562 | discard_block = entry->start_blk + | ||
2563 | ext4_group_first_block_no(sb, entry->group); | ||
2564 | trace_ext4_discard_blocks(sb, | ||
2565 | (unsigned long long)discard_block, | ||
2566 | entry->count); | ||
2567 | sb_issue_discard(sb, discard_block, entry->count); | ||
2568 | } | ||
2569 | kmem_cache_free(ext4_free_ext_cachep, entry); | 2570 | kmem_cache_free(ext4_free_ext_cachep, entry); |
2570 | ext4_mb_release_desc(&e4b); | 2571 | ext4_mb_release_desc(&e4b); |
2571 | } | 2572 | } |
diff --git a/fs/ioctl.c b/fs/ioctl.c index 6c751106c2e5..7faefb4da939 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c | |||
@@ -228,14 +228,23 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) | |||
228 | 228 | ||
229 | #ifdef CONFIG_BLOCK | 229 | #ifdef CONFIG_BLOCK |
230 | 230 | ||
231 | #define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits) | 231 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) |
232 | #define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits); | 232 | { |
233 | return (offset >> inode->i_blkbits); | ||
234 | } | ||
235 | |||
236 | static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) | ||
237 | { | ||
238 | return (blk << inode->i_blkbits); | ||
239 | } | ||
233 | 240 | ||
234 | /** | 241 | /** |
235 | * __generic_block_fiemap - FIEMAP for block based inodes (no locking) | 242 | * __generic_block_fiemap - FIEMAP for block based inodes (no locking) |
236 | * @inode - the inode to map | 243 | * @inode: the inode to map |
237 | * @arg - the pointer to userspace where we copy everything to | 244 | * @fieinfo: the fiemap info struct that will be passed back to userspace |
238 | * @get_block - the fs's get_block function | 245 | * @start: where to start mapping in the inode |
246 | * @len: how much space to map | ||
247 | * @get_block: the fs's get_block function | ||
239 | * | 248 | * |
240 | * This does FIEMAP for block based inodes. Basically it will just loop | 249 | * This does FIEMAP for block based inodes. Basically it will just loop |
241 | * through get_block until we hit the number of extents we want to map, or we | 250 | * through get_block until we hit the number of extents we want to map, or we |
@@ -250,58 +259,63 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) | |||
250 | */ | 259 | */ |
251 | 260 | ||
252 | int __generic_block_fiemap(struct inode *inode, | 261 | int __generic_block_fiemap(struct inode *inode, |
253 | struct fiemap_extent_info *fieinfo, u64 start, | 262 | struct fiemap_extent_info *fieinfo, loff_t start, |
254 | u64 len, get_block_t *get_block) | 263 | loff_t len, get_block_t *get_block) |
255 | { | 264 | { |
256 | struct buffer_head tmp; | 265 | struct buffer_head map_bh; |
257 | unsigned long long start_blk; | 266 | sector_t start_blk, last_blk; |
258 | long long length = 0, map_len = 0; | 267 | loff_t isize = i_size_read(inode); |
259 | u64 logical = 0, phys = 0, size = 0; | 268 | u64 logical = 0, phys = 0, size = 0; |
260 | u32 flags = FIEMAP_EXTENT_MERGED; | 269 | u32 flags = FIEMAP_EXTENT_MERGED; |
261 | int ret = 0, past_eof = 0, whole_file = 0; | 270 | bool past_eof = false, whole_file = false; |
271 | int ret = 0; | ||
262 | 272 | ||
263 | if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC))) | 273 | ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); |
274 | if (ret) | ||
264 | return ret; | 275 | return ret; |
265 | 276 | ||
266 | start_blk = logical_to_blk(inode, start); | 277 | /* |
267 | 278 | * Either the i_mutex or other appropriate locking needs to be held | |
268 | length = (long long)min_t(u64, len, i_size_read(inode)); | 279 | * since we expect isize to not change at all through the duration of |
269 | if (length < len) | 280 | * this call. |
270 | whole_file = 1; | 281 | */ |
282 | if (len >= isize) { | ||
283 | whole_file = true; | ||
284 | len = isize; | ||
285 | } | ||
271 | 286 | ||
272 | map_len = length; | 287 | start_blk = logical_to_blk(inode, start); |
288 | last_blk = logical_to_blk(inode, start + len - 1); | ||
273 | 289 | ||
274 | do { | 290 | do { |
275 | /* | 291 | /* |
276 | * we set b_size to the total size we want so it will map as | 292 | * we set b_size to the total size we want so it will map as |
277 | * many contiguous blocks as possible at once | 293 | * many contiguous blocks as possible at once |
278 | */ | 294 | */ |
279 | memset(&tmp, 0, sizeof(struct buffer_head)); | 295 | memset(&map_bh, 0, sizeof(struct buffer_head)); |
280 | tmp.b_size = map_len; | 296 | map_bh.b_size = len; |
281 | 297 | ||
282 | ret = get_block(inode, start_blk, &tmp, 0); | 298 | ret = get_block(inode, start_blk, &map_bh, 0); |
283 | if (ret) | 299 | if (ret) |
284 | break; | 300 | break; |
285 | 301 | ||
286 | /* HOLE */ | 302 | /* HOLE */ |
287 | if (!buffer_mapped(&tmp)) { | 303 | if (!buffer_mapped(&map_bh)) { |
288 | length -= blk_to_logical(inode, 1); | ||
289 | start_blk++; | 304 | start_blk++; |
290 | 305 | ||
291 | /* | 306 | /* |
292 | * we want to handle the case where there is an | 307 | * We want to handle the case where there is an |
293 | * allocated block at the front of the file, and then | 308 | * allocated block at the front of the file, and then |
294 | * nothing but holes up to the end of the file properly, | 309 | * nothing but holes up to the end of the file properly, |
295 | * to make sure that extent at the front gets properly | 310 | * to make sure that extent at the front gets properly |
296 | * marked with FIEMAP_EXTENT_LAST | 311 | * marked with FIEMAP_EXTENT_LAST |
297 | */ | 312 | */ |
298 | if (!past_eof && | 313 | if (!past_eof && |
299 | blk_to_logical(inode, start_blk) >= | 314 | blk_to_logical(inode, start_blk) >= isize) |
300 | blk_to_logical(inode, 0)+i_size_read(inode)) | ||
301 | past_eof = 1; | 315 | past_eof = 1; |
302 | 316 | ||
303 | /* | 317 | /* |
304 | * first hole after going past the EOF, this is our | 318 | * First hole after going past the EOF, this is our |
305 | * last extent | 319 | * last extent |
306 | */ | 320 | */ |
307 | if (past_eof && size) { | 321 | if (past_eof && size) { |
@@ -309,15 +323,18 @@ int __generic_block_fiemap(struct inode *inode, | |||
309 | ret = fiemap_fill_next_extent(fieinfo, logical, | 323 | ret = fiemap_fill_next_extent(fieinfo, logical, |
310 | phys, size, | 324 | phys, size, |
311 | flags); | 325 | flags); |
312 | break; | 326 | } else if (size) { |
327 | ret = fiemap_fill_next_extent(fieinfo, logical, | ||
328 | phys, size, flags); | ||
329 | size = 0; | ||
313 | } | 330 | } |
314 | 331 | ||
315 | /* if we have holes up to/past EOF then we're done */ | 332 | /* if we have holes up to/past EOF then we're done */ |
316 | if (length <= 0 || past_eof) | 333 | if (start_blk > last_blk || past_eof || ret) |
317 | break; | 334 | break; |
318 | } else { | 335 | } else { |
319 | /* | 336 | /* |
320 | * we have gone over the length of what we wanted to | 337 | * We have gone over the length of what we wanted to |
321 | * map, and it wasn't the entire file, so add the extent | 338 | * map, and it wasn't the entire file, so add the extent |
322 | * we got last time and exit. | 339 | * we got last time and exit. |
323 | * | 340 | * |
@@ -331,7 +348,7 @@ int __generic_block_fiemap(struct inode *inode, | |||
331 | * are good to go, just add the extent to the fieinfo | 348 | * are good to go, just add the extent to the fieinfo |
332 | * and break | 349 | * and break |
333 | */ | 350 | */ |
334 | if (length <= 0 && !whole_file) { | 351 | if (start_blk > last_blk && !whole_file) { |
335 | ret = fiemap_fill_next_extent(fieinfo, logical, | 352 | ret = fiemap_fill_next_extent(fieinfo, logical, |
336 | phys, size, | 353 | phys, size, |
337 | flags); | 354 | flags); |
@@ -351,11 +368,10 @@ int __generic_block_fiemap(struct inode *inode, | |||
351 | } | 368 | } |
352 | 369 | ||
353 | logical = blk_to_logical(inode, start_blk); | 370 | logical = blk_to_logical(inode, start_blk); |
354 | phys = blk_to_logical(inode, tmp.b_blocknr); | 371 | phys = blk_to_logical(inode, map_bh.b_blocknr); |
355 | size = tmp.b_size; | 372 | size = map_bh.b_size; |
356 | flags = FIEMAP_EXTENT_MERGED; | 373 | flags = FIEMAP_EXTENT_MERGED; |
357 | 374 | ||
358 | length -= tmp.b_size; | ||
359 | start_blk += logical_to_blk(inode, size); | 375 | start_blk += logical_to_blk(inode, size); |
360 | 376 | ||
361 | /* | 377 | /* |
@@ -363,15 +379,13 @@ int __generic_block_fiemap(struct inode *inode, | |||
363 | * soon as we find a hole that the last extent we found | 379 | * soon as we find a hole that the last extent we found |
364 | * is marked with FIEMAP_EXTENT_LAST | 380 | * is marked with FIEMAP_EXTENT_LAST |
365 | */ | 381 | */ |
366 | if (!past_eof && | 382 | if (!past_eof && logical + size >= isize) |
367 | logical+size >= | 383 | past_eof = true; |
368 | blk_to_logical(inode, 0)+i_size_read(inode)) | ||
369 | past_eof = 1; | ||
370 | } | 384 | } |
371 | cond_resched(); | 385 | cond_resched(); |
372 | } while (1); | 386 | } while (1); |
373 | 387 | ||
374 | /* if ret is 1 then we just hit the end of the extent array */ | 388 | /* If ret is 1 then we just hit the end of the extent array */ |
375 | if (ret == 1) | 389 | if (ret == 1) |
376 | ret = 0; | 390 | ret = 0; |
377 | 391 | ||
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9dd126276c9f..ed9ba6fe04f5 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -61,7 +61,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino) | |||
61 | inode->i_op = &page_symlink_inode_operations; | 61 | inode->i_op = &page_symlink_inode_operations; |
62 | inode->i_mapping->a_ops = &jfs_aops; | 62 | inode->i_mapping->a_ops = &jfs_aops; |
63 | } else { | 63 | } else { |
64 | inode->i_op = &jfs_symlink_inode_operations; | 64 | inode->i_op = &jfs_fast_symlink_inode_operations; |
65 | /* | 65 | /* |
66 | * The inline data should be null-terminated, but | 66 | * The inline data should be null-terminated, but |
67 | * don't let on-disk corruption crash the kernel | 67 | * don't let on-disk corruption crash the kernel |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 6c4dfcbf3f55..9e2f6a721668 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
@@ -196,7 +196,7 @@ int dbMount(struct inode *ipbmap) | |||
196 | bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); | 196 | bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); |
197 | bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); | 197 | bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); |
198 | bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); | 198 | bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); |
199 | bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth); | 199 | bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); |
200 | bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); | 200 | bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); |
201 | bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); | 201 | bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); |
202 | bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); | 202 | bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); |
@@ -288,7 +288,7 @@ int dbSync(struct inode *ipbmap) | |||
288 | dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); | 288 | dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); |
289 | dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); | 289 | dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); |
290 | dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); | 290 | dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); |
291 | dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth); | 291 | dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight); |
292 | dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); | 292 | dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); |
293 | dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); | 293 | dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); |
294 | dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); | 294 | dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); |
@@ -1441,7 +1441,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) | |||
1441 | * tree index of this allocation group within the control page. | 1441 | * tree index of this allocation group within the control page. |
1442 | */ | 1442 | */ |
1443 | agperlev = | 1443 | agperlev = |
1444 | (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth; | 1444 | (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; |
1445 | ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); | 1445 | ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); |
1446 | 1446 | ||
1447 | /* dmap control page trees fan-out by 4 and a single allocation | 1447 | /* dmap control page trees fan-out by 4 and a single allocation |
@@ -1460,7 +1460,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) | |||
1460 | * the subtree to find the leftmost leaf that describes this | 1460 | * the subtree to find the leftmost leaf that describes this |
1461 | * free space. | 1461 | * free space. |
1462 | */ | 1462 | */ |
1463 | for (k = bmp->db_agheigth; k > 0; k--) { | 1463 | for (k = bmp->db_agheight; k > 0; k--) { |
1464 | for (n = 0, m = (ti << 2) + 1; n < 4; n++) { | 1464 | for (n = 0, m = (ti << 2) + 1; n < 4; n++) { |
1465 | if (l2nb <= dcp->stree[m + n]) { | 1465 | if (l2nb <= dcp->stree[m + n]) { |
1466 | ti = m + n; | 1466 | ti = m + n; |
@@ -3607,7 +3607,7 @@ void dbFinalizeBmap(struct inode *ipbmap) | |||
3607 | } | 3607 | } |
3608 | 3608 | ||
3609 | /* | 3609 | /* |
3610 | * compute db_aglevel, db_agheigth, db_width, db_agstart: | 3610 | * compute db_aglevel, db_agheight, db_width, db_agstart: |
3611 | * an ag is covered in aglevel dmapctl summary tree, | 3611 | * an ag is covered in aglevel dmapctl summary tree, |
3612 | * at agheight level height (from leaf) with agwidth number of nodes | 3612 | * at agheight level height (from leaf) with agwidth number of nodes |
3613 | * each, which starts at agstart index node of the smmary tree node | 3613 | * each, which starts at agstart index node of the smmary tree node |
@@ -3616,9 +3616,9 @@ void dbFinalizeBmap(struct inode *ipbmap) | |||
3616 | bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); | 3616 | bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); |
3617 | l2nl = | 3617 | l2nl = |
3618 | bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); | 3618 | bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); |
3619 | bmp->db_agheigth = l2nl >> 1; | 3619 | bmp->db_agheight = l2nl >> 1; |
3620 | bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1)); | 3620 | bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1)); |
3621 | for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0; | 3621 | for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0; |
3622 | i--) { | 3622 | i--) { |
3623 | bmp->db_agstart += n; | 3623 | bmp->db_agstart += n; |
3624 | n <<= 2; | 3624 | n <<= 2; |
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h index 1a6eb41569bc..6dcb906c55d8 100644 --- a/fs/jfs/jfs_dmap.h +++ b/fs/jfs/jfs_dmap.h | |||
@@ -210,7 +210,7 @@ struct dbmap_disk { | |||
210 | __le32 dn_maxag; /* 4: max active alloc group number */ | 210 | __le32 dn_maxag; /* 4: max active alloc group number */ |
211 | __le32 dn_agpref; /* 4: preferred alloc group (hint) */ | 211 | __le32 dn_agpref; /* 4: preferred alloc group (hint) */ |
212 | __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ | 212 | __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ |
213 | __le32 dn_agheigth; /* 4: height in dmapctl of the AG */ | 213 | __le32 dn_agheight; /* 4: height in dmapctl of the AG */ |
214 | __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ | 214 | __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ |
215 | __le32 dn_agstart; /* 4: start tree index at AG height */ | 215 | __le32 dn_agstart; /* 4: start tree index at AG height */ |
216 | __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ | 216 | __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ |
@@ -229,7 +229,7 @@ struct dbmap { | |||
229 | int dn_maxag; /* max active alloc group number */ | 229 | int dn_maxag; /* max active alloc group number */ |
230 | int dn_agpref; /* preferred alloc group (hint) */ | 230 | int dn_agpref; /* preferred alloc group (hint) */ |
231 | int dn_aglevel; /* dmapctl level holding the AG */ | 231 | int dn_aglevel; /* dmapctl level holding the AG */ |
232 | int dn_agheigth; /* height in dmapctl of the AG */ | 232 | int dn_agheight; /* height in dmapctl of the AG */ |
233 | int dn_agwidth; /* width in dmapctl of the AG */ | 233 | int dn_agwidth; /* width in dmapctl of the AG */ |
234 | int dn_agstart; /* start tree index at AG height */ | 234 | int dn_agstart; /* start tree index at AG height */ |
235 | int dn_agl2size; /* l2 num of blks per alloc group */ | 235 | int dn_agl2size; /* l2 num of blks per alloc group */ |
@@ -255,7 +255,7 @@ struct bmap { | |||
255 | #define db_agsize db_bmap.dn_agsize | 255 | #define db_agsize db_bmap.dn_agsize |
256 | #define db_agl2size db_bmap.dn_agl2size | 256 | #define db_agl2size db_bmap.dn_agl2size |
257 | #define db_agwidth db_bmap.dn_agwidth | 257 | #define db_agwidth db_bmap.dn_agwidth |
258 | #define db_agheigth db_bmap.dn_agheigth | 258 | #define db_agheight db_bmap.dn_agheight |
259 | #define db_agstart db_bmap.dn_agstart | 259 | #define db_agstart db_bmap.dn_agstart |
260 | #define db_numag db_bmap.dn_numag | 260 | #define db_numag db_bmap.dn_numag |
261 | #define db_maxlevel db_bmap.dn_maxlevel | 261 | #define db_maxlevel db_bmap.dn_maxlevel |
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index 79e2c79661df..9e6bda30a6e8 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
@@ -48,5 +48,6 @@ extern const struct file_operations jfs_dir_operations; | |||
48 | extern const struct inode_operations jfs_file_inode_operations; | 48 | extern const struct inode_operations jfs_file_inode_operations; |
49 | extern const struct file_operations jfs_file_operations; | 49 | extern const struct file_operations jfs_file_operations; |
50 | extern const struct inode_operations jfs_symlink_inode_operations; | 50 | extern const struct inode_operations jfs_symlink_inode_operations; |
51 | extern const struct inode_operations jfs_fast_symlink_inode_operations; | ||
51 | extern const struct dentry_operations jfs_ci_dentry_operations; | 52 | extern const struct dentry_operations jfs_ci_dentry_operations; |
52 | #endif /* _H_JFS_INODE */ | 53 | #endif /* _H_JFS_INODE */ |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 4a3e9f39c21d..a9cf8e8675be 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
@@ -956,7 +956,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
956 | */ | 956 | */ |
957 | 957 | ||
958 | if (ssize <= IDATASIZE) { | 958 | if (ssize <= IDATASIZE) { |
959 | ip->i_op = &jfs_symlink_inode_operations; | 959 | ip->i_op = &jfs_fast_symlink_inode_operations; |
960 | 960 | ||
961 | i_fastsymlink = JFS_IP(ip)->i_inline; | 961 | i_fastsymlink = JFS_IP(ip)->i_inline; |
962 | memcpy(i_fastsymlink, name, ssize); | 962 | memcpy(i_fastsymlink, name, ssize); |
@@ -978,7 +978,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
978 | else { | 978 | else { |
979 | jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); | 979 | jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); |
980 | 980 | ||
981 | ip->i_op = &page_symlink_inode_operations; | 981 | ip->i_op = &jfs_symlink_inode_operations; |
982 | ip->i_mapping->a_ops = &jfs_aops; | 982 | ip->i_mapping->a_ops = &jfs_aops; |
983 | 983 | ||
984 | /* | 984 | /* |
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 7f24a0bb08ca..1aba0039f1c9 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c | |||
@@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
81 | struct inode *iplist[1]; | 81 | struct inode *iplist[1]; |
82 | struct jfs_superblock *j_sb, *j_sb2; | 82 | struct jfs_superblock *j_sb, *j_sb2; |
83 | uint old_agsize; | 83 | uint old_agsize; |
84 | int agsizechanged = 0; | ||
84 | struct buffer_head *bh, *bh2; | 85 | struct buffer_head *bh, *bh2; |
85 | 86 | ||
86 | /* If the volume hasn't grown, get out now */ | 87 | /* If the volume hasn't grown, get out now */ |
@@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
333 | */ | 334 | */ |
334 | if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) | 335 | if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) |
335 | goto error_out; | 336 | goto error_out; |
337 | |||
338 | agsizechanged |= (bmp->db_agsize != old_agsize); | ||
339 | |||
336 | /* | 340 | /* |
337 | * the map now has extended to cover additional nblocks: | 341 | * the map now has extended to cover additional nblocks: |
338 | * dn_mapsize = oldMapsize + nblocks; | 342 | * dn_mapsize = oldMapsize + nblocks; |
@@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
432 | * will correctly identify the new ag); | 436 | * will correctly identify the new ag); |
433 | */ | 437 | */ |
434 | /* if new AG size the same as old AG size, done! */ | 438 | /* if new AG size the same as old AG size, done! */ |
435 | if (bmp->db_agsize != old_agsize) { | 439 | if (agsizechanged) { |
436 | if ((rc = diExtendFS(ipimap, ipbmap))) | 440 | if ((rc = diExtendFS(ipimap, ipbmap))) |
437 | goto error_out; | 441 | goto error_out; |
438 | 442 | ||
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c index 4af1a05aad0a..205b946d8e0d 100644 --- a/fs/jfs/symlink.c +++ b/fs/jfs/symlink.c | |||
@@ -29,9 +29,21 @@ static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
29 | return NULL; | 29 | return NULL; |
30 | } | 30 | } |
31 | 31 | ||
32 | const struct inode_operations jfs_symlink_inode_operations = { | 32 | const struct inode_operations jfs_fast_symlink_inode_operations = { |
33 | .readlink = generic_readlink, | 33 | .readlink = generic_readlink, |
34 | .follow_link = jfs_follow_link, | 34 | .follow_link = jfs_follow_link, |
35 | .setattr = jfs_setattr, | ||
36 | .setxattr = jfs_setxattr, | ||
37 | .getxattr = jfs_getxattr, | ||
38 | .listxattr = jfs_listxattr, | ||
39 | .removexattr = jfs_removexattr, | ||
40 | }; | ||
41 | |||
42 | const struct inode_operations jfs_symlink_inode_operations = { | ||
43 | .readlink = generic_readlink, | ||
44 | .follow_link = page_follow_link_light, | ||
45 | .put_link = page_put_link, | ||
46 | .setattr = jfs_setattr, | ||
35 | .setxattr = jfs_setxattr, | 47 | .setxattr = jfs_setxattr, |
36 | .getxattr = jfs_getxattr, | 48 | .getxattr = jfs_getxattr, |
37 | .listxattr = jfs_listxattr, | 49 | .listxattr = jfs_listxattr, |
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c index 84e36f52fe95..76c242fbe1b0 100644 --- a/fs/logfs/gc.c +++ b/fs/logfs/gc.c | |||
@@ -459,6 +459,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target) | |||
459 | struct logfs_block *block; | 459 | struct logfs_block *block; |
460 | int round, progress, last_progress = 0; | 460 | int round, progress, last_progress = 0; |
461 | 461 | ||
462 | /* | ||
463 | * Doing too many changes to the segfile at once would result | ||
464 | * in a large number of aliases. Write the journal before | ||
465 | * things get out of hand. | ||
466 | */ | ||
467 | if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES) | ||
468 | logfs_write_anchor(sb); | ||
469 | |||
462 | if (no_free_segments(sb) >= target && | 470 | if (no_free_segments(sb) >= target && |
463 | super->s_no_object_aliases < MAX_OBJ_ALIASES) | 471 | super->s_no_object_aliases < MAX_OBJ_ALIASES) |
464 | return; | 472 | return; |
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 33bd260b8309..fb0a613f885b 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c | |||
@@ -389,7 +389,10 @@ static void journal_get_erase_count(struct logfs_area *area) | |||
389 | static int journal_erase_segment(struct logfs_area *area) | 389 | static int journal_erase_segment(struct logfs_area *area) |
390 | { | 390 | { |
391 | struct super_block *sb = area->a_sb; | 391 | struct super_block *sb = area->a_sb; |
392 | struct logfs_segment_header sh; | 392 | union { |
393 | struct logfs_segment_header sh; | ||
394 | unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)]; | ||
395 | } u; | ||
393 | u64 ofs; | 396 | u64 ofs; |
394 | int err; | 397 | int err; |
395 | 398 | ||
@@ -397,20 +400,21 @@ static int journal_erase_segment(struct logfs_area *area) | |||
397 | if (err) | 400 | if (err) |
398 | return err; | 401 | return err; |
399 | 402 | ||
400 | sh.pad = 0; | 403 | memset(&u, 0, sizeof(u)); |
401 | sh.type = SEG_JOURNAL; | 404 | u.sh.pad = 0; |
402 | sh.level = 0; | 405 | u.sh.type = SEG_JOURNAL; |
403 | sh.segno = cpu_to_be32(area->a_segno); | 406 | u.sh.level = 0; |
404 | sh.ec = cpu_to_be32(area->a_erase_count); | 407 | u.sh.segno = cpu_to_be32(area->a_segno); |
405 | sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); | 408 | u.sh.ec = cpu_to_be32(area->a_erase_count); |
406 | sh.crc = logfs_crc32(&sh, sizeof(sh), 4); | 409 | u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); |
410 | u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4); | ||
407 | 411 | ||
408 | /* This causes a bug in segment.c. Not yet. */ | 412 | /* This causes a bug in segment.c. Not yet. */ |
409 | //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); | 413 | //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); |
410 | 414 | ||
411 | ofs = dev_ofs(sb, area->a_segno, 0); | 415 | ofs = dev_ofs(sb, area->a_segno, 0); |
412 | area->a_used_bytes = ALIGN(sizeof(sh), 16); | 416 | area->a_used_bytes = sizeof(u); |
413 | logfs_buf_write(area, ofs, &sh, sizeof(sh)); | 417 | logfs_buf_write(area, ofs, &u, sizeof(u)); |
414 | return 0; | 418 | return 0; |
415 | } | 419 | } |
416 | 420 | ||
@@ -494,6 +498,8 @@ static void account_shadows(struct super_block *sb) | |||
494 | 498 | ||
495 | btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); | 499 | btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); |
496 | btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); | 500 | btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); |
501 | btree_grim_visitor32(&tree->segment_map, 0, NULL); | ||
502 | tree->no_shadowed_segments = 0; | ||
497 | 503 | ||
498 | if (li->li_block) { | 504 | if (li->li_block) { |
499 | /* | 505 | /* |
@@ -607,9 +613,9 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type, | |||
607 | if (len == 0) | 613 | if (len == 0) |
608 | return logfs_write_header(super, header, 0, type); | 614 | return logfs_write_header(super, header, 0, type); |
609 | 615 | ||
616 | BUG_ON(len > sb->s_blocksize); | ||
610 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); | 617 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); |
611 | if (compr_len < 0 || type == JE_ANCHOR) { | 618 | if (compr_len < 0 || type == JE_ANCHOR) { |
612 | BUG_ON(len > sb->s_blocksize); | ||
613 | memcpy(data, buf, len); | 619 | memcpy(data, buf, len); |
614 | compr_len = len; | 620 | compr_len = len; |
615 | compr = COMPR_NONE; | 621 | compr = COMPR_NONE; |
@@ -661,6 +667,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type, | |||
661 | if (ofs < 0) | 667 | if (ofs < 0) |
662 | return ofs; | 668 | return ofs; |
663 | logfs_buf_write(area, ofs, super->s_compressed_je, len); | 669 | logfs_buf_write(area, ofs, super->s_compressed_je, len); |
670 | BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES); | ||
664 | super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); | 671 | super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); |
665 | return 0; | 672 | return 0; |
666 | } | 673 | } |
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index b84b0eec6024..0a3df1a0c936 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h | |||
@@ -257,10 +257,14 @@ struct logfs_shadow { | |||
257 | * struct shadow_tree | 257 | * struct shadow_tree |
258 | * @new: shadows where old_ofs==0, indexed by new_ofs | 258 | * @new: shadows where old_ofs==0, indexed by new_ofs |
259 | * @old: shadows where old_ofs!=0, indexed by old_ofs | 259 | * @old: shadows where old_ofs!=0, indexed by old_ofs |
260 | * @segment_map: bitfield of segments containing shadows | ||
261 | * @no_shadowed_segment: number of segments containing shadows | ||
260 | */ | 262 | */ |
261 | struct shadow_tree { | 263 | struct shadow_tree { |
262 | struct btree_head64 new; | 264 | struct btree_head64 new; |
263 | struct btree_head64 old; | 265 | struct btree_head64 old; |
266 | struct btree_head32 segment_map; | ||
267 | int no_shadowed_segments; | ||
264 | }; | 268 | }; |
265 | 269 | ||
266 | struct object_alias_item { | 270 | struct object_alias_item { |
@@ -305,13 +309,14 @@ typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix, | |||
305 | level_t level, int child_no, __be64 val); | 309 | level_t level, int child_no, __be64 val); |
306 | struct logfs_block_ops { | 310 | struct logfs_block_ops { |
307 | void (*write_block)(struct logfs_block *block); | 311 | void (*write_block)(struct logfs_block *block); |
308 | gc_level_t (*block_level)(struct logfs_block *block); | ||
309 | void (*free_block)(struct super_block *sb, struct logfs_block*block); | 312 | void (*free_block)(struct super_block *sb, struct logfs_block*block); |
310 | int (*write_alias)(struct super_block *sb, | 313 | int (*write_alias)(struct super_block *sb, |
311 | struct logfs_block *block, | 314 | struct logfs_block *block, |
312 | write_alias_t *write_one_alias); | 315 | write_alias_t *write_one_alias); |
313 | }; | 316 | }; |
314 | 317 | ||
318 | #define MAX_JOURNAL_ENTRIES 256 | ||
319 | |||
315 | struct logfs_super { | 320 | struct logfs_super { |
316 | struct mtd_info *s_mtd; /* underlying device */ | 321 | struct mtd_info *s_mtd; /* underlying device */ |
317 | struct block_device *s_bdev; /* underlying device */ | 322 | struct block_device *s_bdev; /* underlying device */ |
@@ -378,7 +383,7 @@ struct logfs_super { | |||
378 | u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ | 383 | u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ |
379 | u64 s_last_version; | 384 | u64 s_last_version; |
380 | struct logfs_area *s_journal_area; /* open journal segment */ | 385 | struct logfs_area *s_journal_area; /* open journal segment */ |
381 | __be64 s_je_array[64]; | 386 | __be64 s_je_array[MAX_JOURNAL_ENTRIES]; |
382 | int s_no_je; | 387 | int s_no_je; |
383 | 388 | ||
384 | int s_sum_index; /* for the 12 summaries */ | 389 | int s_sum_index; /* for the 12 summaries */ |
@@ -722,4 +727,10 @@ static inline struct logfs_area *get_area(struct super_block *sb, | |||
722 | return logfs_super(sb)->s_area[(__force u8)gc_level]; | 727 | return logfs_super(sb)->s_area[(__force u8)gc_level]; |
723 | } | 728 | } |
724 | 729 | ||
730 | static inline void logfs_mempool_destroy(mempool_t *pool) | ||
731 | { | ||
732 | if (pool) | ||
733 | mempool_destroy(pool); | ||
734 | } | ||
735 | |||
725 | #endif | 736 | #endif |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index bff40253dfb2..3159db6958e5 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
@@ -430,25 +430,6 @@ static void inode_write_block(struct logfs_block *block) | |||
430 | } | 430 | } |
431 | } | 431 | } |
432 | 432 | ||
433 | static gc_level_t inode_block_level(struct logfs_block *block) | ||
434 | { | ||
435 | BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER); | ||
436 | return GC_LEVEL(LOGFS_MAX_LEVELS); | ||
437 | } | ||
438 | |||
439 | static gc_level_t indirect_block_level(struct logfs_block *block) | ||
440 | { | ||
441 | struct page *page; | ||
442 | struct inode *inode; | ||
443 | u64 bix; | ||
444 | level_t level; | ||
445 | |||
446 | page = block->page; | ||
447 | inode = page->mapping->host; | ||
448 | logfs_unpack_index(page->index, &bix, &level); | ||
449 | return expand_level(inode->i_ino, level); | ||
450 | } | ||
451 | |||
452 | /* | 433 | /* |
453 | * This silences a false, yet annoying gcc warning. I hate it when my editor | 434 | * This silences a false, yet annoying gcc warning. I hate it when my editor |
454 | * jumps into bitops.h each time I recompile this file. | 435 | * jumps into bitops.h each time I recompile this file. |
@@ -587,14 +568,12 @@ static void indirect_free_block(struct super_block *sb, | |||
587 | 568 | ||
588 | static struct logfs_block_ops inode_block_ops = { | 569 | static struct logfs_block_ops inode_block_ops = { |
589 | .write_block = inode_write_block, | 570 | .write_block = inode_write_block, |
590 | .block_level = inode_block_level, | ||
591 | .free_block = inode_free_block, | 571 | .free_block = inode_free_block, |
592 | .write_alias = inode_write_alias, | 572 | .write_alias = inode_write_alias, |
593 | }; | 573 | }; |
594 | 574 | ||
595 | struct logfs_block_ops indirect_block_ops = { | 575 | struct logfs_block_ops indirect_block_ops = { |
596 | .write_block = indirect_write_block, | 576 | .write_block = indirect_write_block, |
597 | .block_level = indirect_block_level, | ||
598 | .free_block = indirect_free_block, | 577 | .free_block = indirect_free_block, |
599 | .write_alias = indirect_write_alias, | 578 | .write_alias = indirect_write_alias, |
600 | }; | 579 | }; |
@@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow) | |||
1241 | mempool_free(shadow, super->s_shadow_pool); | 1220 | mempool_free(shadow, super->s_shadow_pool); |
1242 | } | 1221 | } |
1243 | 1222 | ||
1223 | static void mark_segment(struct shadow_tree *tree, u32 segno) | ||
1224 | { | ||
1225 | int err; | ||
1226 | |||
1227 | if (!btree_lookup32(&tree->segment_map, segno)) { | ||
1228 | err = btree_insert32(&tree->segment_map, segno, (void *)1, | ||
1229 | GFP_NOFS); | ||
1230 | BUG_ON(err); | ||
1231 | tree->no_shadowed_segments++; | ||
1232 | } | ||
1233 | } | ||
1234 | |||
1244 | /** | 1235 | /** |
1245 | * fill_shadow_tree - Propagate shadow tree changes due to a write | 1236 | * fill_shadow_tree - Propagate shadow tree changes due to a write |
1246 | * @inode: Inode owning the page | 1237 | * @inode: Inode owning the page |
@@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page, | |||
1288 | 1279 | ||
1289 | super->s_dirty_used_bytes += shadow->new_len; | 1280 | super->s_dirty_used_bytes += shadow->new_len; |
1290 | super->s_dirty_free_bytes += shadow->old_len; | 1281 | super->s_dirty_free_bytes += shadow->old_len; |
1282 | mark_segment(tree, shadow->old_ofs >> super->s_segshift); | ||
1283 | mark_segment(tree, shadow->new_ofs >> super->s_segshift); | ||
1291 | } | 1284 | } |
1292 | } | 1285 | } |
1293 | 1286 | ||
@@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode *inode, u64 size) | |||
1845 | return logfs_truncate_direct(inode, size); | 1838 | return logfs_truncate_direct(inode, size); |
1846 | } | 1839 | } |
1847 | 1840 | ||
1848 | int logfs_truncate(struct inode *inode, u64 size) | 1841 | /* |
1842 | * Truncate, by changing the segment file, can consume a fair amount | ||
1843 | * of resources. So back off from time to time and do some GC. | ||
1844 | * 8 or 2048 blocks should be well within safety limits even if | ||
1845 | * every single block resided in a different segment. | ||
1846 | */ | ||
1847 | #define TRUNCATE_STEP (8 * 1024 * 1024) | ||
1848 | int logfs_truncate(struct inode *inode, u64 target) | ||
1849 | { | 1849 | { |
1850 | struct super_block *sb = inode->i_sb; | 1850 | struct super_block *sb = inode->i_sb; |
1851 | int err; | 1851 | u64 size = i_size_read(inode); |
1852 | int err = 0; | ||
1852 | 1853 | ||
1853 | logfs_get_wblocks(sb, NULL, 1); | 1854 | size = ALIGN(size, TRUNCATE_STEP); |
1854 | err = __logfs_truncate(inode, size); | 1855 | while (size > target) { |
1855 | if (!err) | 1856 | if (size > TRUNCATE_STEP) |
1856 | err = __logfs_write_inode(inode, 0); | 1857 | size -= TRUNCATE_STEP; |
1857 | logfs_put_wblocks(sb, NULL, 1); | 1858 | else |
1859 | size = 0; | ||
1860 | if (size < target) | ||
1861 | size = target; | ||
1862 | |||
1863 | logfs_get_wblocks(sb, NULL, 1); | ||
1864 | err = __logfs_truncate(inode, target); | ||
1865 | if (!err) | ||
1866 | err = __logfs_write_inode(inode, 0); | ||
1867 | logfs_put_wblocks(sb, NULL, 1); | ||
1868 | } | ||
1858 | 1869 | ||
1859 | if (!err) | 1870 | if (!err) |
1860 | err = vmtruncate(inode, size); | 1871 | err = vmtruncate(inode, target); |
1861 | 1872 | ||
1862 | /* I don't trust error recovery yet. */ | 1873 | /* I don't trust error recovery yet. */ |
1863 | WARN_ON(err); | 1874 | WARN_ON(err); |
@@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block *sb) | |||
2251 | struct logfs_super *super = logfs_super(sb); | 2262 | struct logfs_super *super = logfs_super(sb); |
2252 | 2263 | ||
2253 | destroy_meta_inode(super->s_segfile_inode); | 2264 | destroy_meta_inode(super->s_segfile_inode); |
2254 | if (super->s_block_pool) | 2265 | logfs_mempool_destroy(super->s_block_pool); |
2255 | mempool_destroy(super->s_block_pool); | 2266 | logfs_mempool_destroy(super->s_shadow_pool); |
2256 | if (super->s_shadow_pool) | ||
2257 | mempool_destroy(super->s_shadow_pool); | ||
2258 | } | 2267 | } |
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index 801a3a141625..f77ce2b470ba 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
@@ -183,14 +183,8 @@ static int btree_write_alias(struct super_block *sb, struct logfs_block *block, | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | static gc_level_t btree_block_level(struct logfs_block *block) | ||
187 | { | ||
188 | return expand_level(block->ino, block->level); | ||
189 | } | ||
190 | |||
191 | static struct logfs_block_ops btree_block_ops = { | 186 | static struct logfs_block_ops btree_block_ops = { |
192 | .write_block = btree_write_block, | 187 | .write_block = btree_write_block, |
193 | .block_level = btree_block_level, | ||
194 | .free_block = __free_block, | 188 | .free_block = __free_block, |
195 | .write_alias = btree_write_alias, | 189 | .write_alias = btree_write_alias, |
196 | }; | 190 | }; |
@@ -919,7 +913,7 @@ err: | |||
919 | for (i--; i >= 0; i--) | 913 | for (i--; i >= 0; i--) |
920 | free_area(super->s_area[i]); | 914 | free_area(super->s_area[i]); |
921 | free_area(super->s_journal_area); | 915 | free_area(super->s_journal_area); |
922 | mempool_destroy(super->s_alias_pool); | 916 | logfs_mempool_destroy(super->s_alias_pool); |
923 | return -ENOMEM; | 917 | return -ENOMEM; |
924 | } | 918 | } |
925 | 919 | ||
diff --git a/fs/logfs/super.c b/fs/logfs/super.c index b60bfac3263c..5866ee6e1327 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include "logfs.h" | 12 | #include "logfs.h" |
13 | #include <linux/bio.h> | 13 | #include <linux/bio.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/blkdev.h> | ||
15 | #include <linux/mtd/mtd.h> | 16 | #include <linux/mtd/mtd.h> |
16 | #include <linux/statfs.h> | 17 | #include <linux/statfs.h> |
17 | #include <linux/buffer_head.h> | 18 | #include <linux/buffer_head.h> |
@@ -137,6 +138,10 @@ static int logfs_sb_set(struct super_block *sb, void *_super) | |||
137 | sb->s_fs_info = super; | 138 | sb->s_fs_info = super; |
138 | sb->s_mtd = super->s_mtd; | 139 | sb->s_mtd = super->s_mtd; |
139 | sb->s_bdev = super->s_bdev; | 140 | sb->s_bdev = super->s_bdev; |
141 | if (sb->s_bdev) | ||
142 | sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; | ||
143 | if (sb->s_mtd) | ||
144 | sb->s_bdi = sb->s_mtd->backing_dev_info; | ||
140 | return 0; | 145 | return 0; |
141 | } | 146 | } |
142 | 147 | ||
@@ -452,6 +457,8 @@ static int logfs_read_sb(struct super_block *sb, int read_only) | |||
452 | 457 | ||
453 | btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); | 458 | btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); |
454 | btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); | 459 | btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); |
460 | btree_init_mempool32(&super->s_shadow_tree.segment_map, | ||
461 | super->s_btree_pool); | ||
455 | 462 | ||
456 | ret = logfs_init_mapping(sb); | 463 | ret = logfs_init_mapping(sb); |
457 | if (ret) | 464 | if (ret) |
@@ -516,8 +523,8 @@ static void logfs_kill_sb(struct super_block *sb) | |||
516 | if (super->s_erase_page) | 523 | if (super->s_erase_page) |
517 | __free_page(super->s_erase_page); | 524 | __free_page(super->s_erase_page); |
518 | super->s_devops->put_device(sb); | 525 | super->s_devops->put_device(sb); |
519 | mempool_destroy(super->s_btree_pool); | 526 | logfs_mempool_destroy(super->s_btree_pool); |
520 | mempool_destroy(super->s_alias_pool); | 527 | logfs_mempool_destroy(super->s_alias_pool); |
521 | kfree(super); | 528 | kfree(super); |
522 | log_super("LogFS: Finished unmounting\n"); | 529 | log_super("LogFS: Finished unmounting\n"); |
523 | } | 530 | } |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index cf98da1be23e..fa3385154023 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -526,10 +526,15 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
526 | sb->s_blocksize_bits = 10; | 526 | sb->s_blocksize_bits = 10; |
527 | sb->s_magic = NCP_SUPER_MAGIC; | 527 | sb->s_magic = NCP_SUPER_MAGIC; |
528 | sb->s_op = &ncp_sops; | 528 | sb->s_op = &ncp_sops; |
529 | sb->s_bdi = &server->bdi; | ||
529 | 530 | ||
530 | server = NCP_SBP(sb); | 531 | server = NCP_SBP(sb); |
531 | memset(server, 0, sizeof(*server)); | 532 | memset(server, 0, sizeof(*server)); |
532 | 533 | ||
534 | error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY); | ||
535 | if (error) | ||
536 | goto out_bdi; | ||
537 | |||
533 | server->ncp_filp = ncp_filp; | 538 | server->ncp_filp = ncp_filp; |
534 | server->ncp_sock = sock; | 539 | server->ncp_sock = sock; |
535 | 540 | ||
@@ -719,6 +724,8 @@ out_fput2: | |||
719 | if (server->info_filp) | 724 | if (server->info_filp) |
720 | fput(server->info_filp); | 725 | fput(server->info_filp); |
721 | out_fput: | 726 | out_fput: |
727 | bdi_destroy(&server->bdi); | ||
728 | out_bdi: | ||
722 | /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>: | 729 | /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>: |
723 | * | 730 | * |
724 | * The previously used put_filp(ncp_filp); was bogous, since | 731 | * The previously used put_filp(ncp_filp); was bogous, since |
@@ -756,6 +763,7 @@ static void ncp_put_super(struct super_block *sb) | |||
756 | kill_pid(server->m.wdog_pid, SIGTERM, 1); | 763 | kill_pid(server->m.wdog_pid, SIGTERM, 1); |
757 | put_pid(server->m.wdog_pid); | 764 | put_pid(server->m.wdog_pid); |
758 | 765 | ||
766 | bdi_destroy(&server->bdi); | ||
759 | kfree(server->priv.data); | 767 | kfree(server->priv.data); |
760 | kfree(server->auth.object_name); | 768 | kfree(server->auth.object_name); |
761 | vfree(server->rxbuf); | 769 | vfree(server->rxbuf); |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2a3d352c0bff..a8766c4ef2e0 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server, | |||
1294 | 1294 | ||
1295 | /* Initialise the client representation from the mount data */ | 1295 | /* Initialise the client representation from the mount data */ |
1296 | server->flags = data->flags; | 1296 | server->flags = data->flags; |
1297 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; | 1297 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR| |
1298 | NFS_CAP_POSIX_LOCK; | ||
1298 | server->options = data->options; | 1299 | server->options = data->options; |
1299 | 1300 | ||
1300 | /* Get a client record */ | 1301 | /* Get a client record */ |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index c6f2750648f4..db3ad849a289 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -837,6 +837,8 @@ out_zap_parent: | |||
837 | /* If we have submounts, don't unhash ! */ | 837 | /* If we have submounts, don't unhash ! */ |
838 | if (have_submounts(dentry)) | 838 | if (have_submounts(dentry)) |
839 | goto out_valid; | 839 | goto out_valid; |
840 | if (dentry->d_flags & DCACHE_DISCONNECTED) | ||
841 | goto out_valid; | ||
840 | shrink_dcache_parent(dentry); | 842 | shrink_dcache_parent(dentry); |
841 | } | 843 | } |
842 | d_drop(dentry); | 844 | d_drop(dentry); |
@@ -1025,12 +1027,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry | |||
1025 | res = NULL; | 1027 | res = NULL; |
1026 | goto out; | 1028 | goto out; |
1027 | /* This turned out not to be a regular file */ | 1029 | /* This turned out not to be a regular file */ |
1030 | case -EISDIR: | ||
1028 | case -ENOTDIR: | 1031 | case -ENOTDIR: |
1029 | goto no_open; | 1032 | goto no_open; |
1030 | case -ELOOP: | 1033 | case -ELOOP: |
1031 | if (!(nd->intent.open.flags & O_NOFOLLOW)) | 1034 | if (!(nd->intent.open.flags & O_NOFOLLOW)) |
1032 | goto no_open; | 1035 | goto no_open; |
1033 | /* case -EISDIR: */ | ||
1034 | /* case -EINVAL: */ | 1036 | /* case -EINVAL: */ |
1035 | default: | 1037 | default: |
1036 | goto out; | 1038 | goto out; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 737128f777f3..50a56edca0b5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c | |||
623 | list_for_each_entry(pos, &nfsi->open_files, list) { | 623 | list_for_each_entry(pos, &nfsi->open_files, list) { |
624 | if (cred != NULL && pos->cred != cred) | 624 | if (cred != NULL && pos->cred != cred) |
625 | continue; | 625 | continue; |
626 | if ((pos->mode & mode) == mode) { | 626 | if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode) |
627 | ctx = get_nfs_open_context(pos); | 627 | continue; |
628 | break; | 628 | ctx = get_nfs_open_context(pos); |
629 | } | 629 | break; |
630 | } | 630 | } |
631 | spin_unlock(&inode->i_lock); | 631 | spin_unlock(&inode->i_lock); |
632 | return ctx; | 632 | return ctx; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fe0cd9eb1d4d..638067007c65 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
1523 | nfs_post_op_update_inode(dir, o_res->dir_attr); | 1523 | nfs_post_op_update_inode(dir, o_res->dir_attr); |
1524 | } else | 1524 | } else |
1525 | nfs_refresh_inode(dir, o_res->dir_attr); | 1525 | nfs_refresh_inode(dir, o_res->dir_attr); |
1526 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) | ||
1527 | server->caps &= ~NFS_CAP_POSIX_LOCK; | ||
1526 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { | 1528 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { |
1527 | status = _nfs4_proc_open_confirm(data); | 1529 | status = _nfs4_proc_open_confirm(data); |
1528 | if (status != 0) | 1530 | if (status != 0) |
@@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in | |||
1664 | status = PTR_ERR(state); | 1666 | status = PTR_ERR(state); |
1665 | if (IS_ERR(state)) | 1667 | if (IS_ERR(state)) |
1666 | goto err_opendata_put; | 1668 | goto err_opendata_put; |
1667 | if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) | 1669 | if (server->caps & NFS_CAP_POSIX_LOCK) |
1668 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); | 1670 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); |
1669 | nfs4_opendata_put(opendata); | 1671 | nfs4_opendata_put(opendata); |
1670 | nfs4_put_state_owner(sp); | 1672 | nfs4_put_state_owner(sp); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 53ff70e23993..de38d63aa920 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page) | |||
201 | struct inode *inode = page->mapping->host; | 201 | struct inode *inode = page->mapping->host; |
202 | struct nfs_server *nfss = NFS_SERVER(inode); | 202 | struct nfs_server *nfss = NFS_SERVER(inode); |
203 | 203 | ||
204 | page_cache_get(page); | ||
204 | if (atomic_long_inc_return(&nfss->writeback) > | 205 | if (atomic_long_inc_return(&nfss->writeback) > |
205 | NFS_CONGESTION_ON_THRESH) { | 206 | NFS_CONGESTION_ON_THRESH) { |
206 | set_bdi_congested(&nfss->backing_dev_info, | 207 | set_bdi_congested(&nfss->backing_dev_info, |
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page) | |||
216 | struct nfs_server *nfss = NFS_SERVER(inode); | 217 | struct nfs_server *nfss = NFS_SERVER(inode); |
217 | 218 | ||
218 | end_page_writeback(page); | 219 | end_page_writeback(page); |
220 | page_cache_release(page); | ||
219 | if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) | 221 | if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) |
220 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); | 222 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); |
221 | } | 223 | } |
@@ -421,6 +423,7 @@ static void | |||
421 | nfs_mark_request_dirty(struct nfs_page *req) | 423 | nfs_mark_request_dirty(struct nfs_page *req) |
422 | { | 424 | { |
423 | __set_page_dirty_nobuffers(req->wb_page); | 425 | __set_page_dirty_nobuffers(req->wb_page); |
426 | __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC); | ||
424 | } | 427 | } |
425 | 428 | ||
426 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 429 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
@@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | |||
660 | req = nfs_setup_write_request(ctx, page, offset, count); | 663 | req = nfs_setup_write_request(ctx, page, offset, count); |
661 | if (IS_ERR(req)) | 664 | if (IS_ERR(req)) |
662 | return PTR_ERR(req); | 665 | return PTR_ERR(req); |
666 | nfs_mark_request_dirty(req); | ||
663 | /* Update file length */ | 667 | /* Update file length */ |
664 | nfs_grow_file(page, offset, count); | 668 | nfs_grow_file(page, offset, count); |
665 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | 669 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); |
670 | nfs_mark_request_dirty(req); | ||
666 | nfs_clear_page_tag_locked(req); | 671 | nfs_clear_page_tag_locked(req); |
667 | return 0; | 672 | return 0; |
668 | } | 673 | } |
@@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
739 | status = nfs_writepage_setup(ctx, page, offset, count); | 744 | status = nfs_writepage_setup(ctx, page, offset, count); |
740 | if (status < 0) | 745 | if (status < 0) |
741 | nfs_set_pageerror(page); | 746 | nfs_set_pageerror(page); |
742 | else | ||
743 | __set_page_dirty_nobuffers(page); | ||
744 | 747 | ||
745 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", | 748 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", |
746 | status, (long long)i_size_read(inode)); | 749 | status, (long long)i_size_read(inode)); |
@@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
749 | 752 | ||
750 | static void nfs_writepage_release(struct nfs_page *req) | 753 | static void nfs_writepage_release(struct nfs_page *req) |
751 | { | 754 | { |
755 | struct page *page = req->wb_page; | ||
752 | 756 | ||
753 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { | 757 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) |
754 | nfs_end_page_writeback(req->wb_page); | ||
755 | nfs_inode_remove_request(req); | 758 | nfs_inode_remove_request(req); |
756 | } else | ||
757 | nfs_end_page_writeback(req->wb_page); | ||
758 | nfs_clear_page_tag_locked(req); | 759 | nfs_clear_page_tag_locked(req); |
760 | nfs_end_page_writeback(page); | ||
759 | } | 761 | } |
760 | 762 | ||
761 | static int flush_task_priority(int how) | 763 | static int flush_task_priority(int how) |
@@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
779 | int how) | 781 | int how) |
780 | { | 782 | { |
781 | struct inode *inode = req->wb_context->path.dentry->d_inode; | 783 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
782 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | ||
783 | int priority = flush_task_priority(how); | 784 | int priority = flush_task_priority(how); |
784 | struct rpc_task *task; | 785 | struct rpc_task *task; |
785 | struct rpc_message msg = { | 786 | struct rpc_message msg = { |
@@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
794 | .callback_ops = call_ops, | 795 | .callback_ops = call_ops, |
795 | .callback_data = data, | 796 | .callback_data = data, |
796 | .workqueue = nfsiod_workqueue, | 797 | .workqueue = nfsiod_workqueue, |
797 | .flags = flags, | 798 | .flags = RPC_TASK_ASYNC, |
798 | .priority = priority, | 799 | .priority = priority, |
799 | }; | 800 | }; |
801 | int ret = 0; | ||
800 | 802 | ||
801 | /* Set up the RPC argument and reply structs | 803 | /* Set up the RPC argument and reply structs |
802 | * NB: take care not to mess about with data->commit et al. */ | 804 | * NB: take care not to mess about with data->commit et al. */ |
@@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
835 | (unsigned long long)data->args.offset); | 837 | (unsigned long long)data->args.offset); |
836 | 838 | ||
837 | task = rpc_run_task(&task_setup_data); | 839 | task = rpc_run_task(&task_setup_data); |
838 | if (IS_ERR(task)) | 840 | if (IS_ERR(task)) { |
839 | return PTR_ERR(task); | 841 | ret = PTR_ERR(task); |
842 | goto out; | ||
843 | } | ||
844 | if (how & FLUSH_SYNC) { | ||
845 | ret = rpc_wait_for_completion_task(task); | ||
846 | if (ret == 0) | ||
847 | ret = task->tk_status; | ||
848 | } | ||
840 | rpc_put_task(task); | 849 | rpc_put_task(task); |
841 | return 0; | 850 | out: |
851 | return ret; | ||
842 | } | 852 | } |
843 | 853 | ||
844 | /* If a nfs_flush_* function fails, it should remove reqs from @head and | 854 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
@@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
847 | */ | 857 | */ |
848 | static void nfs_redirty_request(struct nfs_page *req) | 858 | static void nfs_redirty_request(struct nfs_page *req) |
849 | { | 859 | { |
860 | struct page *page = req->wb_page; | ||
861 | |||
850 | nfs_mark_request_dirty(req); | 862 | nfs_mark_request_dirty(req); |
851 | nfs_end_page_writeback(req->wb_page); | ||
852 | nfs_clear_page_tag_locked(req); | 863 | nfs_clear_page_tag_locked(req); |
864 | nfs_end_page_writeback(page); | ||
853 | } | 865 | } |
854 | 866 | ||
855 | /* | 867 | /* |
@@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata) | |||
1084 | if (nfs_write_need_commit(data)) { | 1096 | if (nfs_write_need_commit(data)) { |
1085 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1097 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); |
1086 | nfs_mark_request_commit(req); | 1098 | nfs_mark_request_commit(req); |
1087 | nfs_end_page_writeback(page); | ||
1088 | dprintk(" marked for commit\n"); | 1099 | dprintk(" marked for commit\n"); |
1089 | goto next; | 1100 | goto next; |
1090 | } | 1101 | } |
1091 | dprintk(" OK\n"); | 1102 | dprintk(" OK\n"); |
1092 | remove_request: | 1103 | remove_request: |
1093 | nfs_end_page_writeback(page); | ||
1094 | nfs_inode_remove_request(req); | 1104 | nfs_inode_remove_request(req); |
1095 | next: | 1105 | next: |
1096 | nfs_clear_page_tag_locked(req); | 1106 | nfs_clear_page_tag_locked(req); |
1107 | nfs_end_page_writeback(page); | ||
1097 | } | 1108 | } |
1098 | nfs_writedata_release(calldata); | 1109 | nfs_writedata_release(calldata); |
1099 | } | 1110 | } |
@@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1207 | { | 1218 | { |
1208 | struct nfs_page *first = nfs_list_entry(head->next); | 1219 | struct nfs_page *first = nfs_list_entry(head->next); |
1209 | struct inode *inode = first->wb_context->path.dentry->d_inode; | 1220 | struct inode *inode = first->wb_context->path.dentry->d_inode; |
1210 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | ||
1211 | int priority = flush_task_priority(how); | 1221 | int priority = flush_task_priority(how); |
1212 | struct rpc_task *task; | 1222 | struct rpc_task *task; |
1213 | struct rpc_message msg = { | 1223 | struct rpc_message msg = { |
@@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1222 | .callback_ops = &nfs_commit_ops, | 1232 | .callback_ops = &nfs_commit_ops, |
1223 | .callback_data = data, | 1233 | .callback_data = data, |
1224 | .workqueue = nfsiod_workqueue, | 1234 | .workqueue = nfsiod_workqueue, |
1225 | .flags = flags, | 1235 | .flags = RPC_TASK_ASYNC, |
1226 | .priority = priority, | 1236 | .priority = priority, |
1227 | }; | 1237 | }; |
1228 | 1238 | ||
@@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1252 | task = rpc_run_task(&task_setup_data); | 1262 | task = rpc_run_task(&task_setup_data); |
1253 | if (IS_ERR(task)) | 1263 | if (IS_ERR(task)) |
1254 | return PTR_ERR(task); | 1264 | return PTR_ERR(task); |
1265 | if (how & FLUSH_SYNC) | ||
1266 | rpc_wait_for_completion_task(task); | ||
1255 | rpc_put_task(task); | 1267 | rpc_put_task(task); |
1256 | return 0; | 1268 | return 0; |
1257 | } | 1269 | } |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index e1703175ee28..34ccf815ea8a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -161,10 +161,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes) | |||
161 | argp->p = page_address(argp->pagelist[0]); | 161 | argp->p = page_address(argp->pagelist[0]); |
162 | argp->pagelist++; | 162 | argp->pagelist++; |
163 | if (argp->pagelen < PAGE_SIZE) { | 163 | if (argp->pagelen < PAGE_SIZE) { |
164 | argp->end = p + (argp->pagelen>>2); | 164 | argp->end = argp->p + (argp->pagelen>>2); |
165 | argp->pagelen = 0; | 165 | argp->pagelen = 0; |
166 | } else { | 166 | } else { |
167 | argp->end = p + (PAGE_SIZE>>2); | 167 | argp->end = argp->p + (PAGE_SIZE>>2); |
168 | argp->pagelen -= PAGE_SIZE; | 168 | argp->pagelen -= PAGE_SIZE; |
169 | } | 169 | } |
170 | memcpy(((char*)p)+avail, argp->p, (nbytes - avail)); | 170 | memcpy(((char*)p)+avail, argp->p, (nbytes - avail)); |
@@ -1426,10 +1426,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) | |||
1426 | argp->p = page_address(argp->pagelist[0]); | 1426 | argp->p = page_address(argp->pagelist[0]); |
1427 | argp->pagelist++; | 1427 | argp->pagelist++; |
1428 | if (argp->pagelen < PAGE_SIZE) { | 1428 | if (argp->pagelen < PAGE_SIZE) { |
1429 | argp->end = p + (argp->pagelen>>2); | 1429 | argp->end = argp->p + (argp->pagelen>>2); |
1430 | argp->pagelen = 0; | 1430 | argp->pagelen = 0; |
1431 | } else { | 1431 | } else { |
1432 | argp->end = p + (PAGE_SIZE>>2); | 1432 | argp->end = argp->p + (PAGE_SIZE>>2); |
1433 | argp->pagelen -= PAGE_SIZE; | 1433 | argp->pagelen -= PAGE_SIZE; |
1434 | } | 1434 | } |
1435 | } | 1435 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 7621db800a74..8418fcc0a6ab 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -2909,7 +2909,7 @@ out_no_task: | |||
2909 | */ | 2909 | */ |
2910 | static const struct pid_entry tid_base_stuff[] = { | 2910 | static const struct pid_entry tid_base_stuff[] = { |
2911 | DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), | 2911 | DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), |
2912 | DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations), | 2912 | DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), |
2913 | REG("environ", S_IRUSR, proc_environ_operations), | 2913 | REG("environ", S_IRUSR, proc_environ_operations), |
2914 | INF("auxv", S_IRUSR, proc_pid_auxv), | 2914 | INF("auxv", S_IRUSR, proc_pid_auxv), |
2915 | ONE("status", S_IRUGO, proc_pid_status), | 2915 | ONE("status", S_IRUGO, proc_pid_status), |
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index dad7fb247ddc..3e21b1e2ad3a 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig | |||
@@ -33,6 +33,14 @@ config PRINT_QUOTA_WARNING | |||
33 | Note that this behavior is currently deprecated and may go away in | 33 | Note that this behavior is currently deprecated and may go away in |
34 | future. Please use notification via netlink socket instead. | 34 | future. Please use notification via netlink socket instead. |
35 | 35 | ||
36 | config QUOTA_DEBUG | ||
37 | bool "Additional quota sanity checks" | ||
38 | depends on QUOTA | ||
39 | default n | ||
40 | help | ||
41 | If you say Y here, quota subsystem will perform some additional | ||
42 | sanity checks of quota internal structures. If unsure, say N. | ||
43 | |||
36 | # Generic support for tree structured quota files. Selected when needed. | 44 | # Generic support for tree structured quota files. Selected when needed. |
37 | config QUOTA_TREE | 45 | config QUOTA_TREE |
38 | tristate | 46 | tristate |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index a0a9405b202a..788b5802a7ce 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -80,8 +80,6 @@ | |||
80 | 80 | ||
81 | #include <asm/uaccess.h> | 81 | #include <asm/uaccess.h> |
82 | 82 | ||
83 | #define __DQUOT_PARANOIA | ||
84 | |||
85 | /* | 83 | /* |
86 | * There are three quota SMP locks. dq_list_lock protects all lists with quotas | 84 | * There are three quota SMP locks. dq_list_lock protects all lists with quotas |
87 | * and quota formats, dqstats structure containing statistics about the lists | 85 | * and quota formats, dqstats structure containing statistics about the lists |
@@ -695,7 +693,7 @@ void dqput(struct dquot *dquot) | |||
695 | 693 | ||
696 | if (!dquot) | 694 | if (!dquot) |
697 | return; | 695 | return; |
698 | #ifdef __DQUOT_PARANOIA | 696 | #ifdef CONFIG_QUOTA_DEBUG |
699 | if (!atomic_read(&dquot->dq_count)) { | 697 | if (!atomic_read(&dquot->dq_count)) { |
700 | printk("VFS: dqput: trying to free free dquot\n"); | 698 | printk("VFS: dqput: trying to free free dquot\n"); |
701 | printk("VFS: device %s, dquot of %s %d\n", | 699 | printk("VFS: device %s, dquot of %s %d\n", |
@@ -748,7 +746,7 @@ we_slept: | |||
748 | goto we_slept; | 746 | goto we_slept; |
749 | } | 747 | } |
750 | atomic_dec(&dquot->dq_count); | 748 | atomic_dec(&dquot->dq_count); |
751 | #ifdef __DQUOT_PARANOIA | 749 | #ifdef CONFIG_QUOTA_DEBUG |
752 | /* sanity check */ | 750 | /* sanity check */ |
753 | BUG_ON(!list_empty(&dquot->dq_free)); | 751 | BUG_ON(!list_empty(&dquot->dq_free)); |
754 | #endif | 752 | #endif |
@@ -845,7 +843,7 @@ we_slept: | |||
845 | dquot = NULL; | 843 | dquot = NULL; |
846 | goto out; | 844 | goto out; |
847 | } | 845 | } |
848 | #ifdef __DQUOT_PARANOIA | 846 | #ifdef CONFIG_QUOTA_DEBUG |
849 | BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ | 847 | BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ |
850 | #endif | 848 | #endif |
851 | out: | 849 | out: |
@@ -874,7 +872,7 @@ static int dqinit_needed(struct inode *inode, int type) | |||
874 | static void add_dquot_ref(struct super_block *sb, int type) | 872 | static void add_dquot_ref(struct super_block *sb, int type) |
875 | { | 873 | { |
876 | struct inode *inode, *old_inode = NULL; | 874 | struct inode *inode, *old_inode = NULL; |
877 | #ifdef __DQUOT_PARANOIA | 875 | #ifdef CONFIG_QUOTA_DEBUG |
878 | int reserved = 0; | 876 | int reserved = 0; |
879 | #endif | 877 | #endif |
880 | 878 | ||
@@ -882,7 +880,7 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
882 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 880 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
883 | if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) | 881 | if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) |
884 | continue; | 882 | continue; |
885 | #ifdef __DQUOT_PARANOIA | 883 | #ifdef CONFIG_QUOTA_DEBUG |
886 | if (unlikely(inode_get_rsv_space(inode) > 0)) | 884 | if (unlikely(inode_get_rsv_space(inode) > 0)) |
887 | reserved = 1; | 885 | reserved = 1; |
888 | #endif | 886 | #endif |
@@ -907,7 +905,7 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
907 | spin_unlock(&inode_lock); | 905 | spin_unlock(&inode_lock); |
908 | iput(old_inode); | 906 | iput(old_inode); |
909 | 907 | ||
910 | #ifdef __DQUOT_PARANOIA | 908 | #ifdef CONFIG_QUOTA_DEBUG |
911 | if (reserved) { | 909 | if (reserved) { |
912 | printk(KERN_WARNING "VFS (%s): Writes happened before quota" | 910 | printk(KERN_WARNING "VFS (%s): Writes happened before quota" |
913 | " was turned on thus quota information is probably " | 911 | " was turned on thus quota information is probably " |
@@ -940,7 +938,7 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, | |||
940 | inode->i_dquot[type] = NULL; | 938 | inode->i_dquot[type] = NULL; |
941 | if (dquot) { | 939 | if (dquot) { |
942 | if (dqput_blocks(dquot)) { | 940 | if (dqput_blocks(dquot)) { |
943 | #ifdef __DQUOT_PARANOIA | 941 | #ifdef CONFIG_QUOTA_DEBUG |
944 | if (atomic_read(&dquot->dq_count) != 1) | 942 | if (atomic_read(&dquot->dq_count) != 1) |
945 | printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); | 943 | printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); |
946 | #endif | 944 | #endif |
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index f8a6075abf50..07930449a958 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c | |||
@@ -46,8 +46,6 @@ static inline bool is_privroot_deh(struct dentry *dir, | |||
46 | struct reiserfs_de_head *deh) | 46 | struct reiserfs_de_head *deh) |
47 | { | 47 | { |
48 | struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; | 48 | struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; |
49 | if (reiserfs_expose_privroot(dir->d_sb)) | ||
50 | return 0; | ||
51 | return (dir == dir->d_parent && privroot->d_inode && | 49 | return (dir == dir->d_parent && privroot->d_inode && |
52 | deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); | 50 | deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); |
53 | } | 51 | } |
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 4f9586bb7631..e7cc00e636dc 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
@@ -554,7 +554,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, | |||
554 | if (!err && new_size < i_size_read(dentry->d_inode)) { | 554 | if (!err && new_size < i_size_read(dentry->d_inode)) { |
555 | struct iattr newattrs = { | 555 | struct iattr newattrs = { |
556 | .ia_ctime = current_fs_time(inode->i_sb), | 556 | .ia_ctime = current_fs_time(inode->i_sb), |
557 | .ia_size = buffer_size, | 557 | .ia_size = new_size, |
558 | .ia_valid = ATTR_SIZE | ATTR_CTIME, | 558 | .ia_valid = ATTR_SIZE | ATTR_CTIME, |
559 | }; | 559 | }; |
560 | 560 | ||
@@ -973,21 +973,13 @@ int reiserfs_permission(struct inode *inode, int mask) | |||
973 | return generic_permission(inode, mask, NULL); | 973 | return generic_permission(inode, mask, NULL); |
974 | } | 974 | } |
975 | 975 | ||
976 | /* This will catch lookups from the fs root to .reiserfs_priv */ | 976 | static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) |
977 | static int | ||
978 | xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name) | ||
979 | { | 977 | { |
980 | struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root; | 978 | return -EPERM; |
981 | if (container_of(q1, struct dentry, d_name) == priv_root) | ||
982 | return -ENOENT; | ||
983 | if (q1->len == name->len && | ||
984 | !memcmp(q1->name, name->name, name->len)) | ||
985 | return 0; | ||
986 | return 1; | ||
987 | } | 979 | } |
988 | 980 | ||
989 | static const struct dentry_operations xattr_lookup_poison_ops = { | 981 | static const struct dentry_operations xattr_lookup_poison_ops = { |
990 | .d_compare = xattr_lookup_poison, | 982 | .d_revalidate = xattr_hide_revalidate, |
991 | }; | 983 | }; |
992 | 984 | ||
993 | int reiserfs_lookup_privroot(struct super_block *s) | 985 | int reiserfs_lookup_privroot(struct super_block *s) |
@@ -1001,8 +993,7 @@ int reiserfs_lookup_privroot(struct super_block *s) | |||
1001 | strlen(PRIVROOT_NAME)); | 993 | strlen(PRIVROOT_NAME)); |
1002 | if (!IS_ERR(dentry)) { | 994 | if (!IS_ERR(dentry)) { |
1003 | REISERFS_SB(s)->priv_root = dentry; | 995 | REISERFS_SB(s)->priv_root = dentry; |
1004 | if (!reiserfs_expose_privroot(s)) | 996 | dentry->d_op = &xattr_lookup_poison_ops; |
1005 | s->s_root->d_op = &xattr_lookup_poison_ops; | ||
1006 | if (dentry->d_inode) | 997 | if (dentry->d_inode) |
1007 | dentry->d_inode->i_flags |= S_PRIVATE; | 998 | dentry->d_inode->i_flags |= S_PRIVATE; |
1008 | } else | 999 | } else |
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 1c4c8f089970..dfa1d67f8fca 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c | |||
@@ -479,6 +479,7 @@ smb_put_super(struct super_block *sb) | |||
479 | if (server->conn_pid) | 479 | if (server->conn_pid) |
480 | kill_pid(server->conn_pid, SIGTERM, 1); | 480 | kill_pid(server->conn_pid, SIGTERM, 1); |
481 | 481 | ||
482 | bdi_destroy(&server->bdi); | ||
482 | kfree(server->ops); | 483 | kfree(server->ops); |
483 | smb_unload_nls(server); | 484 | smb_unload_nls(server); |
484 | sb->s_fs_info = NULL; | 485 | sb->s_fs_info = NULL; |
@@ -525,6 +526,11 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
525 | if (!server) | 526 | if (!server) |
526 | goto out_no_server; | 527 | goto out_no_server; |
527 | sb->s_fs_info = server; | 528 | sb->s_fs_info = server; |
529 | |||
530 | if (bdi_setup_and_register(&server->bdi, "smbfs", BDI_CAP_MAP_COPY)) | ||
531 | goto out_bdi; | ||
532 | |||
533 | sb->s_bdi = &server->bdi; | ||
528 | 534 | ||
529 | server->super_block = sb; | 535 | server->super_block = sb; |
530 | server->mnt = NULL; | 536 | server->mnt = NULL; |
@@ -624,6 +630,8 @@ out_no_smbiod: | |||
624 | out_bad_option: | 630 | out_bad_option: |
625 | kfree(mem); | 631 | kfree(mem); |
626 | out_no_mem: | 632 | out_no_mem: |
633 | bdi_destroy(&server->bdi); | ||
634 | out_bdi: | ||
627 | if (!server->mnt) | 635 | if (!server->mnt) |
628 | printk(KERN_ERR "smb_fill_super: allocation failure\n"); | 636 | printk(KERN_ERR "smb_fill_super: allocation failure\n"); |
629 | sb->s_fs_info = NULL; | 637 | sb->s_fs_info = NULL; |
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 1cb0d81b164b..653c030eb840 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c | |||
@@ -87,9 +87,8 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, | |||
87 | u64 cur_index = index >> msblk->devblksize_log2; | 87 | u64 cur_index = index >> msblk->devblksize_log2; |
88 | int bytes, compressed, b = 0, k = 0, page = 0, avail; | 88 | int bytes, compressed, b = 0, k = 0, page = 0, avail; |
89 | 89 | ||
90 | 90 | bh = kcalloc(((srclength + msblk->devblksize - 1) | |
91 | bh = kcalloc((msblk->block_size >> msblk->devblksize_log2) + 1, | 91 | >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL); |
92 | sizeof(*bh), GFP_KERNEL); | ||
93 | if (bh == NULL) | 92 | if (bh == NULL) |
94 | return -ENOMEM; | 93 | return -ENOMEM; |
95 | 94 | ||
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 3550aec2f655..48b6f4a385a6 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c | |||
@@ -275,7 +275,8 @@ allocate_root: | |||
275 | 275 | ||
276 | err = squashfs_read_inode(root, root_inode); | 276 | err = squashfs_read_inode(root, root_inode); |
277 | if (err) { | 277 | if (err) { |
278 | iget_failed(root); | 278 | make_bad_inode(root); |
279 | iput(root); | ||
279 | goto failed_mount; | 280 | goto failed_mount; |
280 | } | 281 | } |
281 | insert_inode_hash(root); | 282 | insert_inode_hash(root); |
@@ -353,6 +354,7 @@ static void squashfs_put_super(struct super_block *sb) | |||
353 | kfree(sbi->id_table); | 354 | kfree(sbi->id_table); |
354 | kfree(sbi->fragment_index); | 355 | kfree(sbi->fragment_index); |
355 | kfree(sbi->meta_index); | 356 | kfree(sbi->meta_index); |
357 | kfree(sbi->inode_lookup_table); | ||
356 | kfree(sb->s_fs_info); | 358 | kfree(sb->s_fs_info); |
357 | sb->s_fs_info = NULL; | 359 | sb->s_fs_info = NULL; |
358 | } | 360 | } |
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 15a03d0fb9f3..7a603874e483 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c | |||
@@ -128,8 +128,9 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, | |||
128 | goto release_mutex; | 128 | goto release_mutex; |
129 | } | 129 | } |
130 | 130 | ||
131 | length = stream->total_out; | ||
131 | mutex_unlock(&msblk->read_data_mutex); | 132 | mutex_unlock(&msblk->read_data_mutex); |
132 | return stream->total_out; | 133 | return length; |
133 | 134 | ||
134 | release_mutex: | 135 | release_mutex: |
135 | mutex_unlock(&msblk->read_data_mutex); | 136 | mutex_unlock(&msblk->read_data_mutex); |
diff --git a/fs/super.c b/fs/super.c index f35ac6022109..dc72491a19f9 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -693,6 +693,7 @@ int set_anon_super(struct super_block *s, void *data) | |||
693 | return -EMFILE; | 693 | return -EMFILE; |
694 | } | 694 | } |
695 | s->s_dev = MKDEV(0, dev & MINORMASK); | 695 | s->s_dev = MKDEV(0, dev & MINORMASK); |
696 | s->s_bdi = &noop_backing_dev_info; | ||
696 | return 0; | 697 | return 0; |
697 | } | 698 | } |
698 | 699 | ||
@@ -954,10 +955,11 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void | |||
954 | if (error < 0) | 955 | if (error < 0) |
955 | goto out_free_secdata; | 956 | goto out_free_secdata; |
956 | BUG_ON(!mnt->mnt_sb); | 957 | BUG_ON(!mnt->mnt_sb); |
958 | WARN_ON(!mnt->mnt_sb->s_bdi); | ||
957 | 959 | ||
958 | error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); | 960 | error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); |
959 | if (error) | 961 | if (error) |
960 | goto out_sb; | 962 | goto out_sb; |
961 | 963 | ||
962 | /* | 964 | /* |
963 | * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE | 965 | * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE |
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
15 | #include <linux/quotaops.h> | 15 | #include <linux/quotaops.h> |
16 | #include <linux/buffer_head.h> | 16 | #include <linux/buffer_head.h> |
17 | #include <linux/backing-dev.h> | ||
17 | #include "internal.h" | 18 | #include "internal.h" |
18 | 19 | ||
19 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ | 20 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ |
@@ -32,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) | |||
32 | * This should be safe, as we require bdi backing to actually | 33 | * This should be safe, as we require bdi backing to actually |
33 | * write out data in the first place | 34 | * write out data in the first place |
34 | */ | 35 | */ |
35 | if (!sb->s_bdi) | 36 | if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info) |
36 | return 0; | 37 | return 0; |
37 | 38 | ||
38 | if (sb->s_qcop && sb->s_qcop->quota_sync) | 39 | if (sb->s_qcop && sb->s_qcop->quota_sync) |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 05cd85317f6f..fd9698215759 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -820,10 +820,10 @@ xfs_reclaim_inode( | |||
820 | * call into reclaim to find it in a clean state instead of waiting for | 820 | * call into reclaim to find it in a clean state instead of waiting for |
821 | * it now. We also don't return errors here - if the error is transient | 821 | * it now. We also don't return errors here - if the error is transient |
822 | * then the next reclaim pass will flush the inode, and if the error | 822 | * then the next reclaim pass will flush the inode, and if the error |
823 | * is permanent then the next sync reclaim will relcaim the inode and | 823 | * is permanent then the next sync reclaim will reclaim the inode and |
824 | * pass on the error. | 824 | * pass on the error. |
825 | */ | 825 | */ |
826 | if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 826 | if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
827 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, | 827 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, |
828 | "inode 0x%llx background reclaim flush failed with %d", | 828 | "inode 0x%llx background reclaim flush failed with %d", |
829 | (long long)ip->i_ino, error); | 829 | (long long)ip->i_ino, error); |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index cd27c9d6c71f..5bba29a07812 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
@@ -177,16 +177,26 @@ xfs_swap_extents_check_format( | |||
177 | XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max) | 177 | XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max) |
178 | return EINVAL; | 178 | return EINVAL; |
179 | 179 | ||
180 | /* Check root block of temp in btree form to max in target */ | 180 | /* |
181 | * If we are in a btree format, check that the temp root block will fit | ||
182 | * in the target and that it has enough extents to be in btree format | ||
183 | * in the target. | ||
184 | * | ||
185 | * Note that we have to be careful to allow btree->extent conversions | ||
186 | * (a common defrag case) which will occur when the temp inode is in | ||
187 | * extent format... | ||
188 | */ | ||
181 | if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE && | 189 | if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE && |
182 | XFS_IFORK_BOFF(ip) && | 190 | ((XFS_IFORK_BOFF(ip) && |
183 | tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) | 191 | tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) || |
192 | XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= ip->i_df.if_ext_max)) | ||
184 | return EINVAL; | 193 | return EINVAL; |
185 | 194 | ||
186 | /* Check root block of target in btree form to max in temp */ | 195 | /* Reciprocal target->temp btree format checks */ |
187 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && | 196 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && |
188 | XFS_IFORK_BOFF(tip) && | 197 | ((XFS_IFORK_BOFF(tip) && |
189 | ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) | 198 | ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) || |
199 | XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= tip->i_df.if_ext_max)) | ||
190 | return EINVAL; | 200 | return EINVAL; |
191 | 201 | ||
192 | return 0; | 202 | return 0; |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e8fba92d7cd9..2be019136287 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -745,9 +745,16 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
745 | 745 | ||
746 | /* | 746 | /* |
747 | * Determine if we have a transaction that has gone to disk | 747 | * Determine if we have a transaction that has gone to disk |
748 | * that needs to be covered. Log activity needs to be idle (no AIL and | 748 | * that needs to be covered. To begin the transition to the idle state |
749 | * nothing in the iclogs). And, we need to be in the right state indicating | 749 | * firstly the log needs to be idle (no AIL and nothing in the iclogs). |
750 | * something has gone out. | 750 | * If we are then in a state where covering is needed, the caller is informed |
751 | * that dummy transactions are required to move the log into the idle state. | ||
752 | * | ||
753 | * Because this is called as part of the sync process, we should also indicate | ||
754 | * that dummy transactions should be issued in anything but the covered or | ||
755 | * idle states. This ensures that the log tail is accurately reflected in | ||
756 | * the log at the end of the sync, hence if a crash occurrs avoids replay | ||
757 | * of transactions where the metadata is already on disk. | ||
751 | */ | 758 | */ |
752 | int | 759 | int |
753 | xfs_log_need_covered(xfs_mount_t *mp) | 760 | xfs_log_need_covered(xfs_mount_t *mp) |
@@ -759,17 +766,24 @@ xfs_log_need_covered(xfs_mount_t *mp) | |||
759 | return 0; | 766 | return 0; |
760 | 767 | ||
761 | spin_lock(&log->l_icloglock); | 768 | spin_lock(&log->l_icloglock); |
762 | if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || | 769 | switch (log->l_covered_state) { |
763 | (log->l_covered_state == XLOG_STATE_COVER_NEED2)) | 770 | case XLOG_STATE_COVER_DONE: |
764 | && !xfs_trans_ail_tail(log->l_ailp) | 771 | case XLOG_STATE_COVER_DONE2: |
765 | && xlog_iclogs_empty(log)) { | 772 | case XLOG_STATE_COVER_IDLE: |
766 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) | 773 | break; |
767 | log->l_covered_state = XLOG_STATE_COVER_DONE; | 774 | case XLOG_STATE_COVER_NEED: |
768 | else { | 775 | case XLOG_STATE_COVER_NEED2: |
769 | ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2); | 776 | if (!xfs_trans_ail_tail(log->l_ailp) && |
770 | log->l_covered_state = XLOG_STATE_COVER_DONE2; | 777 | xlog_iclogs_empty(log)) { |
778 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) | ||
779 | log->l_covered_state = XLOG_STATE_COVER_DONE; | ||
780 | else | ||
781 | log->l_covered_state = XLOG_STATE_COVER_DONE2; | ||
771 | } | 782 | } |
783 | /* FALLTHRU */ | ||
784 | default: | ||
772 | needed = 1; | 785 | needed = 1; |
786 | break; | ||
773 | } | 787 | } |
774 | spin_unlock(&log->l_icloglock); | 788 | spin_unlock(&log->l_icloglock); |
775 | return needed; | 789 | return needed; |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 04a6ebc27b96..2d428b088cc8 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -6,6 +6,7 @@ | |||
6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
9 | {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
9 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 10 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
10 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 11 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
11 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ | 12 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 2742e1adfc30..7534979d83bd 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -104,6 +104,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
104 | const char *fmt, ...); | 104 | const char *fmt, ...); |
105 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 105 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
106 | void bdi_unregister(struct backing_dev_info *bdi); | 106 | void bdi_unregister(struct backing_dev_info *bdi); |
107 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); | ||
107 | void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, | 108 | void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, |
108 | long nr_pages); | 109 | long nr_pages); |
109 | int bdi_writeback_task(struct bdi_writeback *wb); | 110 | int bdi_writeback_task(struct bdi_writeback *wb); |
@@ -249,6 +250,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); | |||
249 | #endif | 250 | #endif |
250 | 251 | ||
251 | extern struct backing_dev_info default_backing_dev_info; | 252 | extern struct backing_dev_info default_backing_dev_info; |
253 | extern struct backing_dev_info noop_backing_dev_info; | ||
252 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); | 254 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); |
253 | 255 | ||
254 | int writeback_in_progress(struct backing_dev_info *bdi); | 256 | int writeback_in_progress(struct backing_dev_info *bdi); |
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h index 5b5d4731f956..8859e2ede9fe 100644 --- a/include/linux/coda_psdev.h +++ b/include/linux/coda_psdev.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #define MAX_CODADEVS 5 /* how many do we allow */ | 7 | #define MAX_CODADEVS 5 /* how many do we allow */ |
8 | 8 | ||
9 | #ifdef __KERNEL__ | 9 | #ifdef __KERNEL__ |
10 | #include <linux/backing-dev.h> | ||
11 | |||
10 | struct kstatfs; | 12 | struct kstatfs; |
11 | 13 | ||
12 | /* communication pending/processing queues */ | 14 | /* communication pending/processing queues */ |
@@ -17,6 +19,7 @@ struct venus_comm { | |||
17 | struct list_head vc_processing; | 19 | struct list_head vc_processing; |
18 | int vc_inuse; | 20 | int vc_inuse; |
19 | struct super_block *vc_sb; | 21 | struct super_block *vc_sb; |
22 | struct backing_dev_info bdi; | ||
20 | }; | 23 | }; |
21 | 24 | ||
22 | 25 | ||
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 40b11013408e..68f883b30a53 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
@@ -1,21 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * Char device interface. | 2 | * Char device interface. |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> | 4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * it under the terms of the GNU General Public License as published by | 7 | * copy of this software and associated documentation files (the "Software"), |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * to deal in the Software without restriction, including without limitation |
9 | * (at your option) any later version. | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * This program is distributed in the hope that it will be useful, | 11 | * Software is furnished to do so, subject to the following conditions: |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * The above copyright notice and this permission notice (including the next |
14 | * GNU General Public License for more details. | 14 | * paragraph) shall be included in all copies or substantial portions of the |
15 | * | 15 | * Software. |
16 | * You should have received a copy of the GNU General Public License | 16 | * |
17 | * along with this program; if not, write to the Free Software Foundation, | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
19 | */ | 24 | */ |
20 | 25 | ||
21 | #ifndef _LINUX_FIREWIRE_CDEV_H | 26 | #ifndef _LINUX_FIREWIRE_CDEV_H |
@@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor { | |||
438 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE | 443 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE |
439 | * @header_size: Header size to strip for receive contexts | 444 | * @header_size: Header size to strip for receive contexts |
440 | * @channel: Channel to bind to | 445 | * @channel: Channel to bind to |
441 | * @speed: Speed to transmit at | 446 | * @speed: Speed for transmit contexts |
442 | * @closure: To be returned in &fw_cdev_event_iso_interrupt | 447 | * @closure: To be returned in &fw_cdev_event_iso_interrupt |
443 | * @handle: Handle to context, written back by kernel | 448 | * @handle: Handle to context, written back by kernel |
444 | * | 449 | * |
@@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor { | |||
451 | * If a context was successfully created, the kernel writes back a handle to the | 456 | * If a context was successfully created, the kernel writes back a handle to the |
452 | * context, which must be passed in for subsequent operations on that context. | 457 | * context, which must be passed in for subsequent operations on that context. |
453 | * | 458 | * |
459 | * For receive contexts, @header_size must be at least 4 and must be a multiple | ||
460 | * of 4. | ||
461 | * | ||
454 | * Note that the effect of a @header_size > 4 depends on | 462 | * Note that the effect of a @header_size > 4 depends on |
455 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. | 463 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. |
456 | */ | 464 | */ |
@@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context { | |||
481 | * | 489 | * |
482 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. | 490 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. |
483 | * | 491 | * |
484 | * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are | 492 | * Use the FW_CDEV_ISO_ macros to fill in @control. |
485 | * specified by IEEE 1394a and IEC 61883. | 493 | * |
486 | * | 494 | * For transmit packets, the header length must be a multiple of 4 and specifies |
487 | * FIXME - finish this documentation | 495 | * the numbers of bytes in @header that will be prepended to the packet's |
496 | * payload; these bytes are copied into the kernel and will not be accessed | ||
497 | * after the ioctl has returned. The sy and tag fields are copied to the iso | ||
498 | * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). | ||
499 | * The skip flag specifies that no packet is to be sent in a frame; when using | ||
500 | * this, all other fields except the interrupt flag must be zero. | ||
501 | * | ||
502 | * For receive packets, the header length must be a multiple of the context's | ||
503 | * header size; if the header length is larger than the context's header size, | ||
504 | * multiple packets are queued for this entry. The sy and tag fields are | ||
505 | * ignored. If the sync flag is set, the context drops all packets until | ||
506 | * a packet with a matching sy field is received (the sync value to wait for is | ||
507 | * specified in the &fw_cdev_start_iso structure). The payload length defines | ||
508 | * how many payload bytes can be received for one packet (in addition to payload | ||
509 | * quadlets that have been defined as headers and are stripped and returned in | ||
510 | * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the | ||
511 | * additional bytes are dropped. If less bytes are received, the remaining | ||
512 | * bytes in this part of the payload buffer will not be written to, not even by | ||
513 | * the next packet, i.e., packets received in consecutive frames will not | ||
514 | * necessarily be consecutive in memory. If an entry has queued multiple | ||
515 | * packets, the payload length is divided equally among them. | ||
516 | * | ||
517 | * When a packet with the interrupt flag set has been completed, the | ||
518 | * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued | ||
519 | * multiple receive packets is completed when its last packet is completed. | ||
488 | */ | 520 | */ |
489 | struct fw_cdev_iso_packet { | 521 | struct fw_cdev_iso_packet { |
490 | __u32 control; | 522 | __u32 control; |
@@ -501,7 +533,7 @@ struct fw_cdev_iso_packet { | |||
501 | * Queue a number of isochronous packets for reception or transmission. | 533 | * Queue a number of isochronous packets for reception or transmission. |
502 | * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, | 534 | * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, |
503 | * which describe how to transmit from or receive into a contiguous region | 535 | * which describe how to transmit from or receive into a contiguous region |
504 | * of a mmap()'ed payload buffer. As part of the packet descriptors, | 536 | * of a mmap()'ed payload buffer. As part of transmit packet descriptors, |
505 | * a series of headers can be supplied, which will be prepended to the | 537 | * a series of headers can be supplied, which will be prepended to the |
506 | * payload during DMA. | 538 | * payload during DMA. |
507 | * | 539 | * |
@@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 { | |||
620 | * instead of allocated. | 652 | * instead of allocated. |
621 | * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. | 653 | * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. |
622 | * | 654 | * |
623 | * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources | 655 | * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources |
624 | * for the lifetime of the fd or handle. | 656 | * for the lifetime of the fd or @handle. |
625 | * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources | 657 | * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources |
626 | * for the duration of a bus generation. | 658 | * for the duration of a bus generation. |
627 | * | 659 | * |
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h index b316770a43fd..9b4bb5fbba4b 100644 --- a/include/linux/firewire-constants.h +++ b/include/linux/firewire-constants.h | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * IEEE 1394 constants. | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
1 | #ifndef _LINUX_FIREWIRE_CONSTANTS_H | 26 | #ifndef _LINUX_FIREWIRE_CONSTANTS_H |
2 | #define _LINUX_FIREWIRE_CONSTANTS_H | 27 | #define _LINUX_FIREWIRE_CONSTANTS_H |
3 | 28 | ||
@@ -21,7 +46,7 @@ | |||
21 | #define EXTCODE_WRAP_ADD 0x6 | 46 | #define EXTCODE_WRAP_ADD 0x6 |
22 | #define EXTCODE_VENDOR_DEPENDENT 0x7 | 47 | #define EXTCODE_VENDOR_DEPENDENT 0x7 |
23 | 48 | ||
24 | /* Juju specific tcodes */ | 49 | /* Linux firewire-core (Juju) specific tcodes */ |
25 | #define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) | 50 | #define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) |
26 | #define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) | 51 | #define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) |
27 | #define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) | 52 | #define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) |
@@ -36,7 +61,7 @@ | |||
36 | #define RCODE_TYPE_ERROR 0x6 | 61 | #define RCODE_TYPE_ERROR 0x6 |
37 | #define RCODE_ADDRESS_ERROR 0x7 | 62 | #define RCODE_ADDRESS_ERROR 0x7 |
38 | 63 | ||
39 | /* Juju specific rcodes */ | 64 | /* Linux firewire-core (Juju) specific rcodes */ |
40 | #define RCODE_SEND_ERROR 0x10 | 65 | #define RCODE_SEND_ERROR 0x10 |
41 | #define RCODE_CANCELLED 0x11 | 66 | #define RCODE_CANCELLED 0x11 |
42 | #define RCODE_BUSY 0x12 | 67 | #define RCODE_BUSY 0x12 |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 31ee31be51e9..f30970c97acf 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2316,8 +2316,9 @@ extern int vfs_fstatat(int , char __user *, struct kstat *, int); | |||
2316 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, | 2316 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, |
2317 | unsigned long arg); | 2317 | unsigned long arg); |
2318 | extern int __generic_block_fiemap(struct inode *inode, | 2318 | extern int __generic_block_fiemap(struct inode *inode, |
2319 | struct fiemap_extent_info *fieinfo, u64 start, | 2319 | struct fiemap_extent_info *fieinfo, |
2320 | u64 len, get_block_t *get_block); | 2320 | loff_t start, loff_t len, |
2321 | get_block_t *get_block); | ||
2321 | extern int generic_block_fiemap(struct inode *inode, | 2322 | extern int generic_block_fiemap(struct inode *inode, |
2322 | struct fiemap_extent_info *fieinfo, u64 start, | 2323 | struct fiemap_extent_info *fieinfo, u64 start, |
2323 | u64 len, get_block_t *get_block); | 2324 | u64 len, get_block_t *get_block); |
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 3bd018baae20..c964cd7f436a 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h | |||
@@ -44,6 +44,7 @@ struct matrix_keymap_data { | |||
44 | * @active_low: gpio polarity | 44 | * @active_low: gpio polarity |
45 | * @wakeup: controls whether the device should be set up as wakeup | 45 | * @wakeup: controls whether the device should be set up as wakeup |
46 | * source | 46 | * source |
47 | * @no_autorepeat: disable key autorepeat | ||
47 | * | 48 | * |
48 | * This structure represents platform-specific data that use used by | 49 | * This structure represents platform-specific data that use used by |
49 | * matrix_keypad driver to perform proper initialization. | 50 | * matrix_keypad driver to perform proper initialization. |
@@ -64,6 +65,7 @@ struct matrix_keypad_platform_data { | |||
64 | 65 | ||
65 | bool active_low; | 66 | bool active_low; |
66 | bool wakeup; | 67 | bool wakeup; |
68 | bool no_autorepeat; | ||
67 | }; | 69 | }; |
68 | 70 | ||
69 | /** | 71 | /** |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a3fd0f91d943..169d07758ee5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache; | |||
54 | */ | 54 | */ |
55 | struct kvm_io_bus { | 55 | struct kvm_io_bus { |
56 | int dev_count; | 56 | int dev_count; |
57 | #define NR_IOBUS_DEVS 6 | 57 | #define NR_IOBUS_DEVS 200 |
58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | 58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; |
59 | }; | 59 | }; |
60 | 60 | ||
@@ -119,6 +119,11 @@ struct kvm_memory_slot { | |||
119 | int user_alloc; | 119 | int user_alloc; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | ||
123 | { | ||
124 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
125 | } | ||
126 | |||
122 | struct kvm_kernel_irq_routing_entry { | 127 | struct kvm_kernel_irq_routing_entry { |
123 | u32 gsi; | 128 | u32 gsi; |
124 | u32 type; | 129 | u32 type; |
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index 6330fc76b00f..5ec9ca671687 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/ncp_mount.h> | 12 | #include <linux/ncp_mount.h> |
13 | #include <linux/net.h> | 13 | #include <linux/net.h> |
14 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
15 | #include <linux/backing-dev.h> | ||
15 | 16 | ||
16 | #ifdef __KERNEL__ | 17 | #ifdef __KERNEL__ |
17 | 18 | ||
@@ -127,6 +128,7 @@ struct ncp_server { | |||
127 | size_t len; | 128 | size_t len; |
128 | __u8 data[128]; | 129 | __u8 data[128]; |
129 | } unexpected_packet; | 130 | } unexpected_packet; |
131 | struct backing_dev_info bdi; | ||
130 | }; | 132 | }; |
131 | 133 | ||
132 | extern void ncp_tcp_rcv_proc(struct work_struct *work); | 134 | extern void ncp_tcp_rcv_proc(struct work_struct *work); |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 717a5e54eb1d..e82957acea56 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -176,6 +176,7 @@ struct nfs_server { | |||
176 | #define NFS_CAP_ATIME (1U << 11) | 176 | #define NFS_CAP_ATIME (1U << 11) |
177 | #define NFS_CAP_CTIME (1U << 12) | 177 | #define NFS_CAP_CTIME (1U << 12) |
178 | #define NFS_CAP_MTIME (1U << 13) | 178 | #define NFS_CAP_MTIME (1U << 13) |
179 | #define NFS_CAP_POSIX_LOCK (1U << 14) | ||
179 | 180 | ||
180 | 181 | ||
181 | /* maximum number of slots to use */ | 182 | /* maximum number of slots to use */ |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 2110a81c5e2a..34066ffd893d 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
@@ -48,6 +48,15 @@ | |||
48 | #define POISON_FREE 0x6b /* for use-after-free poisoning */ | 48 | #define POISON_FREE 0x6b /* for use-after-free poisoning */ |
49 | #define POISON_END 0xa5 /* end-byte of poisoning */ | 49 | #define POISON_END 0xa5 /* end-byte of poisoning */ |
50 | 50 | ||
51 | /********** mm/hugetlb.c **********/ | ||
52 | /* | ||
53 | * Private mappings of hugetlb pages use this poisoned value for | ||
54 | * page->mapping. The core VM should not be doing anything with this mapping | ||
55 | * but futex requires the existence of some page->mapping value even though it | ||
56 | * is unused if PAGE_MAPPING_ANON is set. | ||
57 | */ | ||
58 | #define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON)) | ||
59 | |||
51 | /********** arch/$ARCH/mm/init.c **********/ | 60 | /********** arch/$ARCH/mm/init.c **********/ |
52 | #define POISON_FREE_INITMEM 0xcc | 61 | #define POISON_FREE_INITMEM 0xcc |
53 | 62 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 872a98e13d6a..07db2feb8572 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
101 | # define rcu_read_release_sched() \ | 101 | # define rcu_read_release_sched() \ |
102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
103 | 103 | ||
104 | static inline int debug_lockdep_rcu_enabled(void) | 104 | extern int debug_lockdep_rcu_enabled(void); |
105 | { | ||
106 | return likely(rcu_scheduler_active && debug_locks); | ||
107 | } | ||
108 | 105 | ||
109 | /** | 106 | /** |
110 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 107 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
@@ -195,12 +192,30 @@ static inline int rcu_read_lock_sched_held(void) | |||
195 | 192 | ||
196 | /** | 193 | /** |
197 | * rcu_dereference_check - rcu_dereference with debug checking | 194 | * rcu_dereference_check - rcu_dereference with debug checking |
195 | * @p: The pointer to read, prior to dereferencing | ||
196 | * @c: The conditions under which the dereference will take place | ||
197 | * | ||
198 | * Do an rcu_dereference(), but check that the conditions under which the | ||
199 | * dereference will take place are correct. Typically the conditions indicate | ||
200 | * the various locking conditions that should be held at that point. The check | ||
201 | * should return true if the conditions are satisfied. | ||
202 | * | ||
203 | * For example: | ||
204 | * | ||
205 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
206 | * lockdep_is_held(&foo->lock)); | ||
198 | * | 207 | * |
199 | * Do an rcu_dereference(), but check that the context is correct. | 208 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
200 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to | 209 | * if either the RCU read lock is held, or that the lock required to replace |
201 | * ensure that the rcu_dereference_check() executes within an RCU | 210 | * the bar struct at foo->bar is held. |
202 | * read-side critical section. It is also possible to check for | 211 | * |
203 | * locks being held, for example, by using lockdep_is_held(). | 212 | * Note that the list of conditions may also include indications of when a lock |
213 | * need not be held, for example during initialisation or destruction of the | ||
214 | * target struct: | ||
215 | * | ||
216 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
217 | * lockdep_is_held(&foo->lock) || | ||
218 | * atomic_read(&foo->usage) == 0); | ||
204 | */ | 219 | */ |
205 | #define rcu_dereference_check(p, c) \ | 220 | #define rcu_dereference_check(p, c) \ |
206 | ({ \ | 221 | ({ \ |
@@ -209,13 +224,45 @@ static inline int rcu_read_lock_sched_held(void) | |||
209 | rcu_dereference_raw(p); \ | 224 | rcu_dereference_raw(p); \ |
210 | }) | 225 | }) |
211 | 226 | ||
227 | /** | ||
228 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | ||
229 | * | ||
230 | * Return the value of the specified RCU-protected pointer, but omit | ||
231 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | ||
232 | * is useful in cases where update-side locks prevent the value of the | ||
233 | * pointer from changing. Please note that this primitive does -not- | ||
234 | * prevent the compiler from repeating this reference or combining it | ||
235 | * with other references, so it should not be used without protection | ||
236 | * of appropriate locks. | ||
237 | */ | ||
238 | #define rcu_dereference_protected(p, c) \ | ||
239 | ({ \ | ||
240 | if (debug_lockdep_rcu_enabled() && !(c)) \ | ||
241 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
242 | (p); \ | ||
243 | }) | ||
244 | |||
212 | #else /* #ifdef CONFIG_PROVE_RCU */ | 245 | #else /* #ifdef CONFIG_PROVE_RCU */ |
213 | 246 | ||
214 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 247 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) |
248 | #define rcu_dereference_protected(p, c) (p) | ||
215 | 249 | ||
216 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | 250 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
217 | 251 | ||
218 | /** | 252 | /** |
253 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | ||
254 | * | ||
255 | * Return the value of the specified RCU-protected pointer, but omit the | ||
256 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
257 | * when the value of this pointer is accessed, but the pointer is not | ||
258 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
259 | * NULL. This may also be used in cases where update-side locks prevent | ||
260 | * the value of the pointer from changing, but rcu_dereference_protected() | ||
261 | * is a lighter-weight primitive for this use case. | ||
262 | */ | ||
263 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | ||
264 | |||
265 | /** | ||
219 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 266 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
220 | * | 267 | * |
221 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 268 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 28c9fd020d39..ebd747265294 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -183,9 +183,13 @@ static inline struct regulator *__must_check regulator_get(struct device *dev, | |||
183 | { | 183 | { |
184 | /* Nothing except the stubbed out regulator API should be | 184 | /* Nothing except the stubbed out regulator API should be |
185 | * looking at the value except to check if it is an error | 185 | * looking at the value except to check if it is an error |
186 | * value so the actual return value doesn't matter. | 186 | * value. Drivers are free to handle NULL specifically by |
187 | * skipping all regulator API calls, but they don't have to. | ||
188 | * Drivers which don't, should make sure they properly handle | ||
189 | * corner cases of the API, such as regulator_get_voltage() | ||
190 | * returning 0. | ||
187 | */ | 191 | */ |
188 | return (struct regulator *)id; | 192 | return NULL; |
189 | } | 193 | } |
190 | static inline void regulator_put(struct regulator *regulator) | 194 | static inline void regulator_put(struct regulator *regulator) |
191 | { | 195 | { |
diff --git a/include/linux/smb_fs_sb.h b/include/linux/smb_fs_sb.h index 8a060a7040d8..bb947dd1fba9 100644 --- a/include/linux/smb_fs_sb.h +++ b/include/linux/smb_fs_sb.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define _SMB_FS_SB | 10 | #define _SMB_FS_SB |
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/backing-dev.h> | ||
13 | #include <linux/smb.h> | 14 | #include <linux/smb.h> |
14 | 15 | ||
15 | /* | 16 | /* |
@@ -74,6 +75,8 @@ struct smb_sb_info { | |||
74 | struct smb_ops *ops; | 75 | struct smb_ops *ops; |
75 | 76 | ||
76 | struct super_block *super_block; | 77 | struct super_block *super_block; |
78 | |||
79 | struct backing_dev_info bdi; | ||
77 | }; | 80 | }; |
78 | 81 | ||
79 | static inline int | 82 | static inline int |
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 8be5135ff7aa..2c55a7ea20af 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h | |||
@@ -107,6 +107,7 @@ typedef enum { | |||
107 | SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ | 107 | SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ |
108 | SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ | 108 | SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ |
109 | SCTP_CMD_SEND_MSG, /* Send the whole use message */ | 109 | SCTP_CMD_SEND_MSG, /* Send the whole use message */ |
110 | SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ | ||
110 | SCTP_CMD_LAST | 111 | SCTP_CMD_LAST |
111 | } sctp_verb_t; | 112 | } sctp_verb_t; |
112 | 113 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 78740ec57d5d..fa6cde578a1d 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t); | |||
128 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); | 128 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); |
129 | int sctp_inet_listen(struct socket *sock, int backlog); | 129 | int sctp_inet_listen(struct socket *sock, int backlog); |
130 | void sctp_write_space(struct sock *sk); | 130 | void sctp_write_space(struct sock *sk); |
131 | void sctp_data_ready(struct sock *sk, int len); | ||
131 | unsigned int sctp_poll(struct file *file, struct socket *sock, | 132 | unsigned int sctp_poll(struct file *file, struct socket *sock, |
132 | poll_table *wait); | 133 | poll_table *wait); |
133 | void sctp_sock_rfree(struct sk_buff *skb); | 134 | void sctp_sock_rfree(struct sk_buff *skb); |
diff --git a/include/net/x25.h b/include/net/x25.h index 15ef9624ab75..468551ea4f1d 100644 --- a/include/net/x25.h +++ b/include/net/x25.h | |||
@@ -183,6 +183,10 @@ extern int sysctl_x25_clear_request_timeout; | |||
183 | extern int sysctl_x25_ack_holdback_timeout; | 183 | extern int sysctl_x25_ack_holdback_timeout; |
184 | extern int sysctl_x25_forward; | 184 | extern int sysctl_x25_forward; |
185 | 185 | ||
186 | extern int x25_parse_address_block(struct sk_buff *skb, | ||
187 | struct x25_address *called_addr, | ||
188 | struct x25_address *calling_addr); | ||
189 | |||
186 | extern int x25_addr_ntoa(unsigned char *, struct x25_address *, | 190 | extern int x25_addr_ntoa(unsigned char *, struct x25_address *, |
187 | struct x25_address *); | 191 | struct x25_address *); |
188 | extern int x25_addr_aton(unsigned char *, struct x25_address *, | 192 | extern int x25_addr_aton(unsigned char *, struct x25_address *, |
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index d57847f2f6c1..aab3c13dc310 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #ifdef __KERNEL__ | 26 | #ifdef __KERNEL__ |
27 | #include <linux/device.h> | 27 | #include <linux/device.h> |
28 | #include <pcmcia/ss.h> | 28 | #include <pcmcia/ss.h> |
29 | #include <asm/atomic.h> | ||
29 | 30 | ||
30 | /* | 31 | /* |
31 | * PCMCIA device drivers (16-bit cards only; 32-bit cards require CardBus | 32 | * PCMCIA device drivers (16-bit cards only; 32-bit cards require CardBus |
@@ -94,10 +95,8 @@ struct pcmcia_device { | |||
94 | config_req_t conf; | 95 | config_req_t conf; |
95 | window_handle_t win; | 96 | window_handle_t win; |
96 | 97 | ||
97 | /* Is the device suspended, or in the process of | 98 | /* Is the device suspended? */ |
98 | * being removed? */ | ||
99 | u16 suspended:1; | 99 | u16 suspended:1; |
100 | u16 _removed:1; | ||
101 | 100 | ||
102 | /* Flags whether io, irq, win configurations were | 101 | /* Flags whether io, irq, win configurations were |
103 | * requested, and whether the configuration is "locked" */ | 102 | * requested, and whether the configuration is "locked" */ |
@@ -115,7 +114,7 @@ struct pcmcia_device { | |||
115 | u16 has_card_id:1; | 114 | u16 has_card_id:1; |
116 | u16 has_func_id:1; | 115 | u16 has_func_id:1; |
117 | 116 | ||
118 | u16 reserved:3; | 117 | u16 reserved:4; |
119 | 118 | ||
120 | u8 func_id; | 119 | u8 func_id; |
121 | u16 manf_id; | 120 | u16 manf_id; |
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 2e488b60bc76..344705cb42f4 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h | |||
@@ -224,18 +224,16 @@ struct pcmcia_socket { | |||
224 | 224 | ||
225 | /* 16-bit state: */ | 225 | /* 16-bit state: */ |
226 | struct { | 226 | struct { |
227 | /* PCMCIA card is present in socket */ | ||
228 | u8 present:1; | ||
229 | /* "master" ioctl is used */ | 227 | /* "master" ioctl is used */ |
230 | u8 busy:1; | 228 | u8 busy:1; |
231 | /* pcmcia module is being unloaded */ | ||
232 | u8 dead:1; | ||
233 | /* the PCMCIA card consists of two pseudo devices */ | 229 | /* the PCMCIA card consists of two pseudo devices */ |
234 | u8 has_pfc:1; | 230 | u8 has_pfc:1; |
235 | 231 | ||
236 | u8 reserved:4; | 232 | u8 reserved:6; |
237 | } pcmcia_state; | 233 | } pcmcia_state; |
238 | 234 | ||
235 | /* non-zero if PCMCIA card is present */ | ||
236 | atomic_t present; | ||
239 | 237 | ||
240 | #ifdef CONFIG_PCMCIA_IOCTL | 238 | #ifdef CONFIG_PCMCIA_IOCTL |
241 | struct user_info_t *user; | 239 | struct user_info_t *user; |
diff --git a/init/initramfs.c b/init/initramfs.c index 37d3859b1b32..4b9c20205092 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
@@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len) | |||
457 | compress_name); | 457 | compress_name); |
458 | message = msg_buf; | 458 | message = msg_buf; |
459 | } | 459 | } |
460 | } | 460 | } else |
461 | error("junk in compressed archive"); | ||
461 | if (state != Reset) | 462 | if (state != Reset) |
462 | error("junk in compressed archive"); | 463 | error("junk in compressed archive"); |
463 | this_header = saved_offset + my_inptr; | 464 | this_header = saved_offset + my_inptr; |
diff --git a/kernel/cred.c b/kernel/cred.c index e1dbe9eef800..62af1816c235 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -398,6 +398,8 @@ struct cred *prepare_usermodehelper_creds(void) | |||
398 | 398 | ||
399 | error: | 399 | error: |
400 | put_cred(new); | 400 | put_cred(new); |
401 | return NULL; | ||
402 | |||
401 | free_tgcred: | 403 | free_tgcred: |
402 | #ifdef CONFIG_KEYS | 404 | #ifdef CONFIG_KEYS |
403 | kfree(tgcred); | 405 | kfree(tgcred); |
@@ -791,8 +793,6 @@ bool creds_are_invalid(const struct cred *cred) | |||
791 | { | 793 | { |
792 | if (cred->magic != CRED_MAGIC) | 794 | if (cred->magic != CRED_MAGIC) |
793 | return true; | 795 | return true; |
794 | if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) | ||
795 | return true; | ||
796 | #ifdef CONFIG_SECURITY_SELINUX | 796 | #ifdef CONFIG_SECURITY_SELINUX |
797 | if (selinux_is_enabled()) { | 797 | if (selinux_is_enabled()) { |
798 | if ((unsigned long) cred->security < PAGE_SIZE) | 798 | if ((unsigned long) cred->security < PAGE_SIZE) |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 4d2289626a84..a8c96212bc1b 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
420 | * User space encodes device types as two-byte values, | 420 | * User space encodes device types as two-byte values, |
421 | * so we need to recode them | 421 | * so we need to recode them |
422 | */ | 422 | */ |
423 | swdev = old_decode_dev(swap_area.dev); | 423 | swdev = new_decode_dev(swap_area.dev); |
424 | if (swdev) { | 424 | if (swdev) { |
425 | offset = swap_area.offset; | 425 | offset = swap_area.offset; |
426 | data->swap = swap_type_of(swdev, offset, NULL); | 426 | data->swap = swap_type_of(swdev, offset, NULL); |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 63fe25433980..03a7ea1579f6 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -69,6 +69,13 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); | |||
69 | 69 | ||
70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
71 | 71 | ||
72 | int debug_lockdep_rcu_enabled(void) | ||
73 | { | ||
74 | return rcu_scheduler_active && debug_locks && | ||
75 | current->lockdep_recursion == 0; | ||
76 | } | ||
77 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | ||
78 | |||
72 | /** | 79 | /** |
73 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | 80 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? |
74 | * | 81 | * |
diff --git a/kernel/sys.c b/kernel/sys.c index 6d1a7e0f9d5b..7cb426a58965 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1118,7 +1118,7 @@ DECLARE_RWSEM(uts_sem); | |||
1118 | 1118 | ||
1119 | #ifdef COMPAT_UTS_MACHINE | 1119 | #ifdef COMPAT_UTS_MACHINE |
1120 | #define override_architecture(name) \ | 1120 | #define override_architecture(name) \ |
1121 | (current->personality == PER_LINUX32 && \ | 1121 | (personality(current->personality) == PER_LINUX32 && \ |
1122 | copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ | 1122 | copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ |
1123 | sizeof(COMPAT_UTS_MACHINE))) | 1123 | sizeof(COMPAT_UTS_MACHINE))) |
1124 | #else | 1124 | #else |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ff017108700d..935248bdbc47 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -356,7 +356,7 @@ config SLUB_STATS | |||
356 | config DEBUG_KMEMLEAK | 356 | config DEBUG_KMEMLEAK |
357 | bool "Kernel memory leak detector" | 357 | bool "Kernel memory leak detector" |
358 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 358 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
359 | (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE) | 359 | (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) |
360 | 360 | ||
361 | select DEBUG_FS if SYSFS | 361 | select DEBUG_FS if SYSFS |
362 | select STACKTRACE if STACKTRACE_SUPPORT | 362 | select STACKTRACE if STACKTRACE_SUPPORT |
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index db521f45626e..bcb3a4bd68ff 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c | |||
@@ -97,7 +97,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, | |||
97 | u32 src_len, dst_len; | 97 | u32 src_len, dst_len; |
98 | size_t tmp; | 98 | size_t tmp; |
99 | u8 *in_buf, *in_buf_save, *out_buf; | 99 | u8 *in_buf, *in_buf_save, *out_buf; |
100 | int obytes_processed = 0; | 100 | int ret = -1; |
101 | 101 | ||
102 | set_error_fn(error_fn); | 102 | set_error_fn(error_fn); |
103 | 103 | ||
@@ -174,15 +174,22 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, | |||
174 | 174 | ||
175 | /* decompress */ | 175 | /* decompress */ |
176 | tmp = dst_len; | 176 | tmp = dst_len; |
177 | r = lzo1x_decompress_safe((u8 *) in_buf, src_len, | 177 | |
178 | /* When the input data is not compressed at all, | ||
179 | * lzo1x_decompress_safe will fail, so call memcpy() | ||
180 | * instead */ | ||
181 | if (unlikely(dst_len == src_len)) | ||
182 | memcpy(out_buf, in_buf, src_len); | ||
183 | else { | ||
184 | r = lzo1x_decompress_safe((u8 *) in_buf, src_len, | ||
178 | out_buf, &tmp); | 185 | out_buf, &tmp); |
179 | 186 | ||
180 | if (r != LZO_E_OK || dst_len != tmp) { | 187 | if (r != LZO_E_OK || dst_len != tmp) { |
181 | error("Compressed data violation"); | 188 | error("Compressed data violation"); |
182 | goto exit_2; | 189 | goto exit_2; |
190 | } | ||
183 | } | 191 | } |
184 | 192 | ||
185 | obytes_processed += dst_len; | ||
186 | if (flush) | 193 | if (flush) |
187 | flush(out_buf, dst_len); | 194 | flush(out_buf, dst_len); |
188 | if (output) | 195 | if (output) |
@@ -196,6 +203,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, | |||
196 | in_buf += src_len; | 203 | in_buf += src_len; |
197 | } | 204 | } |
198 | 205 | ||
206 | ret = 0; | ||
199 | exit_2: | 207 | exit_2: |
200 | if (!input) | 208 | if (!input) |
201 | free(in_buf); | 209 | free(in_buf); |
@@ -203,7 +211,7 @@ exit_1: | |||
203 | if (!output) | 211 | if (!output) |
204 | free(out_buf); | 212 | free(out_buf); |
205 | exit: | 213 | exit: |
206 | return obytes_processed; | 214 | return ret; |
207 | } | 215 | } |
208 | 216 | ||
209 | #define decompress unlzo | 217 | #define decompress unlzo |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ba8b67039d13..01e64270e246 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf, | |||
570 | * Now parse out the first token and use it as the name for the | 570 | * Now parse out the first token and use it as the name for the |
571 | * driver to filter for. | 571 | * driver to filter for. |
572 | */ | 572 | */ |
573 | for (i = 0; i < NAME_MAX_LEN; ++i) { | 573 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
574 | current_driver_name[i] = buf[i]; | 574 | current_driver_name[i] = buf[i]; |
575 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) | 575 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) |
576 | break; | 576 | break; |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 66eef2e4483e..41b1804fa728 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
99 | ret->element_size = element_size; | 99 | ret->element_size = element_size; |
100 | ret->total_nr_elements = total; | 100 | ret->total_nr_elements = total; |
101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) | 101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) |
102 | memset(ret->parts[0], FLEX_ARRAY_FREE, | 102 | memset(&ret->parts[0], FLEX_ARRAY_FREE, |
103 | FLEX_ARRAY_BASE_BYTES_LEFT); | 103 | FLEX_ARRAY_BASE_BYTES_LEFT); |
104 | return ret; | 104 | return ret; |
105 | } | 105 | } |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 24112e5a5780..46d34b0b74a8 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -118,6 +118,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base) | |||
118 | 118 | ||
119 | return simple_strtoull(cp, endp, base); | 119 | return simple_strtoull(cp, endp, base); |
120 | } | 120 | } |
121 | EXPORT_SYMBOL(simple_strtoll); | ||
121 | 122 | ||
122 | /** | 123 | /** |
123 | * strict_strtoul - convert a string to an unsigned long strictly | 124 | * strict_strtoul - convert a string to an unsigned long strictly |
@@ -408,12 +409,12 @@ enum format_type { | |||
408 | }; | 409 | }; |
409 | 410 | ||
410 | struct printf_spec { | 411 | struct printf_spec { |
411 | u16 type; | 412 | u8 type; /* format_type enum */ |
412 | s16 field_width; /* width of output field */ | ||
413 | u8 flags; /* flags to number() */ | 413 | u8 flags; /* flags to number() */ |
414 | u8 base; | 414 | u8 base; /* number base, 8, 10 or 16 only */ |
415 | s8 precision; /* # of digits/chars */ | 415 | u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */ |
416 | u8 qualifier; | 416 | s16 field_width; /* width of output field */ |
417 | s16 precision; /* # of digits/chars */ | ||
417 | }; | 418 | }; |
418 | 419 | ||
419 | static char *number(char *buf, char *end, unsigned long long num, | 420 | static char *number(char *buf, char *end, unsigned long long num, |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f13e067e1467..707d0dc6da0f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/writeback.h> | 11 | #include <linux/writeback.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | 13 | ||
14 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | ||
15 | |||
14 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | 16 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) |
15 | { | 17 | { |
16 | } | 18 | } |
@@ -25,6 +27,11 @@ struct backing_dev_info default_backing_dev_info = { | |||
25 | }; | 27 | }; |
26 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | 28 | EXPORT_SYMBOL_GPL(default_backing_dev_info); |
27 | 29 | ||
30 | struct backing_dev_info noop_backing_dev_info = { | ||
31 | .name = "noop", | ||
32 | }; | ||
33 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | ||
34 | |||
28 | static struct class *bdi_class; | 35 | static struct class *bdi_class; |
29 | 36 | ||
30 | /* | 37 | /* |
@@ -715,6 +722,33 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
715 | } | 722 | } |
716 | EXPORT_SYMBOL(bdi_destroy); | 723 | EXPORT_SYMBOL(bdi_destroy); |
717 | 724 | ||
725 | /* | ||
726 | * For use from filesystems to quickly init and register a bdi associated | ||
727 | * with dirty writeback | ||
728 | */ | ||
729 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, | ||
730 | unsigned int cap) | ||
731 | { | ||
732 | char tmp[32]; | ||
733 | int err; | ||
734 | |||
735 | bdi->name = name; | ||
736 | bdi->capabilities = cap; | ||
737 | err = bdi_init(bdi); | ||
738 | if (err) | ||
739 | return err; | ||
740 | |||
741 | sprintf(tmp, "%.28s%s", name, "-%d"); | ||
742 | err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); | ||
743 | if (err) { | ||
744 | bdi_destroy(bdi); | ||
745 | return err; | ||
746 | } | ||
747 | |||
748 | return 0; | ||
749 | } | ||
750 | EXPORT_SYMBOL(bdi_setup_and_register); | ||
751 | |||
718 | static wait_queue_head_t congestion_wqh[2] = { | 752 | static wait_queue_head_t congestion_wqh[2] = { |
719 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | 753 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
720 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 754 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6034dc9e9796..ffbdfc86aedf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -546,6 +546,7 @@ static void free_huge_page(struct page *page) | |||
546 | 546 | ||
547 | mapping = (struct address_space *) page_private(page); | 547 | mapping = (struct address_space *) page_private(page); |
548 | set_page_private(page, 0); | 548 | set_page_private(page, 0); |
549 | page->mapping = NULL; | ||
549 | BUG_ON(page_count(page)); | 550 | BUG_ON(page_count(page)); |
550 | INIT_LIST_HEAD(&page->lru); | 551 | INIT_LIST_HEAD(&page->lru); |
551 | 552 | ||
@@ -2447,8 +2448,10 @@ retry: | |||
2447 | spin_lock(&inode->i_lock); | 2448 | spin_lock(&inode->i_lock); |
2448 | inode->i_blocks += blocks_per_huge_page(h); | 2449 | inode->i_blocks += blocks_per_huge_page(h); |
2449 | spin_unlock(&inode->i_lock); | 2450 | spin_unlock(&inode->i_lock); |
2450 | } else | 2451 | } else { |
2451 | lock_page(page); | 2452 | lock_page(page); |
2453 | page->mapping = HUGETLB_POISON; | ||
2454 | } | ||
2452 | } | 2455 | } |
2453 | 2456 | ||
2454 | /* | 2457 | /* |
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
365 | do { | 365 | do { |
366 | cond_resched(); | 366 | cond_resched(); |
367 | page = follow_page(vma, addr, FOLL_GET); | 367 | page = follow_page(vma, addr, FOLL_GET); |
368 | if (!page) | 368 | if (IS_ERR_OR_NULL(page)) |
369 | break; | 369 | break; |
370 | if (PageKsm(page)) | 370 | if (PageKsm(page)) |
371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | 371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |||
447 | goto out; | 447 | goto out; |
448 | 448 | ||
449 | page = follow_page(vma, addr, FOLL_GET); | 449 | page = follow_page(vma, addr, FOLL_GET); |
450 | if (!page) | 450 | if (IS_ERR_OR_NULL(page)) |
451 | goto out; | 451 | goto out; |
452 | if (PageAnon(page)) { | 452 | if (PageAnon(page)) { |
453 | flush_anon_page(vma, page, addr); | 453 | flush_anon_page(vma, page, addr); |
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |||
1086 | cond_resched(); | 1086 | cond_resched(); |
1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | 1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
1088 | tree_page = get_mergeable_page(tree_rmap_item); | 1088 | tree_page = get_mergeable_page(tree_rmap_item); |
1089 | if (!tree_page) | 1089 | if (IS_ERR_OR_NULL(tree_page)) |
1090 | return NULL; | 1090 | return NULL; |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
@@ -1294,7 +1294,7 @@ next_mm: | |||
1294 | if (ksm_test_exit(mm)) | 1294 | if (ksm_test_exit(mm)) |
1295 | break; | 1295 | break; |
1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); | 1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1297 | if (*page && PageAnon(*page)) { | 1297 | if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { |
1298 | flush_anon_page(vma, *page, ksm_scan.address); | 1298 | flush_anon_page(vma, *page, ksm_scan.address); |
1299 | flush_dcache_page(*page); | 1299 | flush_dcache_page(*page); |
1300 | rmap_item = get_next_rmap_item(slot, | 1300 | rmap_item = get_next_rmap_item(slot, |
@@ -1308,7 +1308,7 @@ next_mm: | |||
1308 | up_read(&mm->mmap_sem); | 1308 | up_read(&mm->mmap_sem); |
1309 | return rmap_item; | 1309 | return rmap_item; |
1310 | } | 1310 | } |
1311 | if (*page) | 1311 | if (!IS_ERR_OR_NULL(*page)) |
1312 | put_page(*page); | 1312 | put_page(*page); |
1313 | ksm_scan.address += PAGE_SIZE; | 1313 | ksm_scan.address += PAGE_SIZE; |
1314 | cond_resched(); | 1314 | cond_resched(); |
@@ -1367,7 +1367,7 @@ next_mm: | |||
1367 | static void ksm_do_scan(unsigned int scan_npages) | 1367 | static void ksm_do_scan(unsigned int scan_npages) |
1368 | { | 1368 | { |
1369 | struct rmap_item *rmap_item; | 1369 | struct rmap_item *rmap_item; |
1370 | struct page *page; | 1370 | struct page *uninitialized_var(page); |
1371 | 1371 | ||
1372 | while (scan_npages--) { | 1372 | while (scan_npages--) { |
1373 | cond_resched(); | 1373 | cond_resched(); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ede99c8b9b..6c755de385f7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2429,11 +2429,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) | |||
2429 | } | 2429 | } |
2430 | unlock_page_cgroup(pc); | 2430 | unlock_page_cgroup(pc); |
2431 | 2431 | ||
2432 | *ptr = mem; | ||
2432 | if (mem) { | 2433 | if (mem) { |
2433 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); | 2434 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); |
2434 | css_put(&mem->css); | 2435 | css_put(&mem->css); |
2435 | } | 2436 | } |
2436 | *ptr = mem; | ||
2437 | return ret; | 2437 | return ret; |
2438 | } | 2438 | } |
2439 | 2439 | ||
@@ -1977,7 +1977,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1977 | return 0; | 1977 | return 0; |
1978 | 1978 | ||
1979 | /* Clean everything up if vma_adjust failed. */ | 1979 | /* Clean everything up if vma_adjust failed. */ |
1980 | new->vm_ops->close(new); | 1980 | if (new->vm_ops && new->vm_ops->close) |
1981 | new->vm_ops->close(new); | ||
1981 | if (new->vm_file) { | 1982 | if (new->vm_file) { |
1982 | if (vma->vm_flags & VM_EXECUTABLE) | 1983 | if (vma->vm_flags & VM_EXECUTABLE) |
1983 | removed_exe_file_vma(mm); | 1984 | removed_exe_file_vma(mm); |
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
133 | goto out_enomem_free_avc; | 133 | goto out_enomem_free_avc; |
134 | allocated = anon_vma; | 134 | allocated = anon_vma; |
135 | } | 135 | } |
136 | spin_lock(&anon_vma->lock); | ||
137 | 136 | ||
137 | spin_lock(&anon_vma->lock); | ||
138 | /* page_table_lock to protect against threads */ | 138 | /* page_table_lock to protect against threads */ |
139 | spin_lock(&mm->page_table_lock); | 139 | spin_lock(&mm->page_table_lock); |
140 | if (likely(!vma->anon_vma)) { | 140 | if (likely(!vma->anon_vma)) { |
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
144 | list_add(&avc->same_vma, &vma->anon_vma_chain); | 144 | list_add(&avc->same_vma, &vma->anon_vma_chain); |
145 | list_add(&avc->same_anon_vma, &anon_vma->head); | 145 | list_add(&avc->same_anon_vma, &anon_vma->head); |
146 | allocated = NULL; | 146 | allocated = NULL; |
147 | avc = NULL; | ||
147 | } | 148 | } |
148 | spin_unlock(&mm->page_table_lock); | 149 | spin_unlock(&mm->page_table_lock); |
149 | |||
150 | spin_unlock(&anon_vma->lock); | 150 | spin_unlock(&anon_vma->lock); |
151 | if (unlikely(allocated)) { | 151 | |
152 | if (unlikely(allocated)) | ||
152 | anon_vma_free(allocated); | 153 | anon_vma_free(allocated); |
154 | if (unlikely(avc)) | ||
153 | anon_vma_chain_free(avc); | 155 | anon_vma_chain_free(avc); |
154 | } | ||
155 | } | 156 | } |
156 | return 0; | 157 | return 0; |
157 | 158 | ||
@@ -730,23 +731,28 @@ void page_move_anon_rmap(struct page *page, | |||
730 | * @page: the page to add the mapping to | 731 | * @page: the page to add the mapping to |
731 | * @vma: the vm area in which the mapping is added | 732 | * @vma: the vm area in which the mapping is added |
732 | * @address: the user virtual address mapped | 733 | * @address: the user virtual address mapped |
734 | * @exclusive: the page is exclusively owned by the current process | ||
733 | */ | 735 | */ |
734 | static void __page_set_anon_rmap(struct page *page, | 736 | static void __page_set_anon_rmap(struct page *page, |
735 | struct vm_area_struct *vma, unsigned long address) | 737 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
736 | { | 738 | { |
737 | struct anon_vma_chain *avc; | 739 | struct anon_vma *anon_vma = vma->anon_vma; |
738 | struct anon_vma *anon_vma; | ||
739 | 740 | ||
740 | BUG_ON(!vma->anon_vma); | 741 | BUG_ON(!anon_vma); |
741 | 742 | ||
742 | /* | 743 | /* |
743 | * We must use the _oldest_ possible anon_vma for the page mapping! | 744 | * If the page isn't exclusively mapped into this vma, |
745 | * we must use the _oldest_ possible anon_vma for the | ||
746 | * page mapping! | ||
744 | * | 747 | * |
745 | * So take the last AVC chain entry in the vma, which is the deepest | 748 | * So take the last AVC chain entry in the vma, which is |
746 | * ancestor, and use the anon_vma from that. | 749 | * the deepest ancestor, and use the anon_vma from that. |
747 | */ | 750 | */ |
748 | avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); | 751 | if (!exclusive) { |
749 | anon_vma = avc->anon_vma; | 752 | struct anon_vma_chain *avc; |
753 | avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); | ||
754 | anon_vma = avc->anon_vma; | ||
755 | } | ||
750 | 756 | ||
751 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 757 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
752 | page->mapping = (struct address_space *) anon_vma; | 758 | page->mapping = (struct address_space *) anon_vma; |
@@ -802,7 +808,7 @@ void page_add_anon_rmap(struct page *page, | |||
802 | VM_BUG_ON(!PageLocked(page)); | 808 | VM_BUG_ON(!PageLocked(page)); |
803 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 809 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
804 | if (first) | 810 | if (first) |
805 | __page_set_anon_rmap(page, vma, address); | 811 | __page_set_anon_rmap(page, vma, address, 0); |
806 | else | 812 | else |
807 | __page_check_anon_rmap(page, vma, address); | 813 | __page_check_anon_rmap(page, vma, address); |
808 | } | 814 | } |
@@ -824,7 +830,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
824 | SetPageSwapBacked(page); | 830 | SetPageSwapBacked(page); |
825 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ | 831 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ |
826 | __inc_zone_page_state(page, NR_ANON_PAGES); | 832 | __inc_zone_page_state(page, NR_ANON_PAGES); |
827 | __page_set_anon_rmap(page, vma, address); | 833 | __page_set_anon_rmap(page, vma, address, 1); |
828 | if (page_evictable(page, vma)) | 834 | if (page_evictable(page, vma)) |
829 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); | 835 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); |
830 | else | 836 | else |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 99d68c34e4f1..9753b690a8b3 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -1626,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms | |||
1626 | /* Connectionless channel */ | 1626 | /* Connectionless channel */ |
1627 | if (sk->sk_type == SOCK_DGRAM) { | 1627 | if (sk->sk_type == SOCK_DGRAM) { |
1628 | skb = l2cap_create_connless_pdu(sk, msg, len); | 1628 | skb = l2cap_create_connless_pdu(sk, msg, len); |
1629 | err = l2cap_do_send(sk, skb); | 1629 | if (IS_ERR(skb)) |
1630 | err = PTR_ERR(skb); | ||
1631 | else | ||
1632 | err = l2cap_do_send(sk, skb); | ||
1630 | goto done; | 1633 | goto done; |
1631 | } | 1634 | } |
1632 | 1635 | ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 6980625537ca..eaa0e1bae49b 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -723,11 +723,11 @@ static int br_multicast_igmp3_report(struct net_bridge *br, | |||
723 | if (!pskb_may_pull(skb, len)) | 723 | if (!pskb_may_pull(skb, len)) |
724 | return -EINVAL; | 724 | return -EINVAL; |
725 | 725 | ||
726 | grec = (void *)(skb->data + len); | 726 | grec = (void *)(skb->data + len - sizeof(*grec)); |
727 | group = grec->grec_mca; | 727 | group = grec->grec_mca; |
728 | type = grec->grec_type; | 728 | type = grec->grec_type; |
729 | 729 | ||
730 | len += grec->grec_nsrcs * 4; | 730 | len += ntohs(grec->grec_nsrcs) * 4; |
731 | if (!pskb_may_pull(skb, len)) | 731 | if (!pskb_may_pull(skb, len)) |
732 | return -EINVAL; | 732 | return -EINVAL; |
733 | 733 | ||
@@ -957,9 +957,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
957 | unsigned offset; | 957 | unsigned offset; |
958 | int err; | 958 | int err; |
959 | 959 | ||
960 | BR_INPUT_SKB_CB(skb)->igmp = 0; | ||
961 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; | ||
962 | |||
963 | /* We treat OOM as packet loss for now. */ | 960 | /* We treat OOM as packet loss for now. */ |
964 | if (!pskb_may_pull(skb, sizeof(*iph))) | 961 | if (!pskb_may_pull(skb, sizeof(*iph))) |
965 | return -EINVAL; | 962 | return -EINVAL; |
@@ -1049,6 +1046,9 @@ err_out: | |||
1049 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | 1046 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, |
1050 | struct sk_buff *skb) | 1047 | struct sk_buff *skb) |
1051 | { | 1048 | { |
1049 | BR_INPUT_SKB_CB(skb)->igmp = 0; | ||
1050 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; | ||
1051 | |||
1052 | if (br->multicast_disabled) | 1052 | if (br->multicast_disabled) |
1053 | return 0; | 1053 | return 0; |
1054 | 1054 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index 3a7dffb6519c..da99cf153b33 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -445,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
445 | return -EFAULT; | 445 | return -EFAULT; |
446 | } | 446 | } |
447 | } else if (count == 1) { | 447 | } else if (count == 1) { |
448 | if (copy_from_user(&sfilter, optval, optlen)) | 448 | if (copy_from_user(&sfilter, optval, sizeof(sfilter))) |
449 | return -EFAULT; | 449 | return -EFAULT; |
450 | } | 450 | } |
451 | 451 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 1c8a0ce473a8..f769098774b7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1989,8 +1989,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
1989 | if (dev->real_num_tx_queues > 1) | 1989 | if (dev->real_num_tx_queues > 1) |
1990 | queue_index = skb_tx_hash(dev, skb); | 1990 | queue_index = skb_tx_hash(dev, skb); |
1991 | 1991 | ||
1992 | if (sk && sk->sk_dst_cache) | 1992 | if (sk) { |
1993 | sk_tx_queue_set(sk, queue_index); | 1993 | struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache); |
1994 | |||
1995 | if (dst && skb_dst(skb) == dst) | ||
1996 | sk_tx_queue_set(sk, queue_index); | ||
1997 | } | ||
1994 | } | 1998 | } |
1995 | } | 1999 | } |
1996 | 2000 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4568120d8533..fe776c9ddeca 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1270,10 +1270,11 @@ replay: | |||
1270 | err = ops->newlink(net, dev, tb, data); | 1270 | err = ops->newlink(net, dev, tb, data); |
1271 | else | 1271 | else |
1272 | err = register_netdevice(dev); | 1272 | err = register_netdevice(dev); |
1273 | if (err < 0 && !IS_ERR(dev)) { | 1273 | |
1274 | if (err < 0 && !IS_ERR(dev)) | ||
1274 | free_netdev(dev); | 1275 | free_netdev(dev); |
1276 | if (err < 0) | ||
1275 | goto out; | 1277 | goto out; |
1276 | } | ||
1277 | 1278 | ||
1278 | err = rtnl_configure_link(dev, ifm); | 1279 | err = rtnl_configure_link(dev, ifm); |
1279 | if (err < 0) | 1280 | if (err < 0) |
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index c7da600750bb..93c91b633a56 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c | |||
@@ -151,6 +151,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg, | |||
151 | dev_load(sock_net(sk), ifr.ifr_name); | 151 | dev_load(sock_net(sk), ifr.ifr_name); |
152 | dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); | 152 | dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); |
153 | 153 | ||
154 | if (!dev) | ||
155 | return -ENODEV; | ||
156 | |||
154 | if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) | 157 | if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) |
155 | ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); | 158 | ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); |
156 | 159 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 59a838795e3e..c98f115fb0fd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -209,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) | |||
209 | { | 209 | { |
210 | struct node *ret = tnode_get_child(tn, i); | 210 | struct node *ret = tnode_get_child(tn, i); |
211 | 211 | ||
212 | return rcu_dereference(ret); | 212 | return rcu_dereference_check(ret, |
213 | rcu_read_lock_held() || | ||
214 | lockdep_rtnl_is_held()); | ||
213 | } | 215 | } |
214 | 216 | ||
215 | static inline int tnode_child_length(const struct tnode *tn) | 217 | static inline int tnode_child_length(const struct tnode *tn) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c65f18e0936e..d1bcc9f21d4f 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -120,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
120 | newskb->pkt_type = PACKET_LOOPBACK; | 120 | newskb->pkt_type = PACKET_LOOPBACK; |
121 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 121 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
122 | WARN_ON(!skb_dst(newskb)); | 122 | WARN_ON(!skb_dst(newskb)); |
123 | netif_rx(newskb); | 123 | netif_rx_ni(newskb); |
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 954bbfb39dff..8fef859db35d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -472,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
472 | if (hslot->count < hslot2->count) | 472 | if (hslot->count < hslot2->count) |
473 | goto begin; | 473 | goto begin; |
474 | 474 | ||
475 | result = udp4_lib_lookup2(net, INADDR_ANY, sport, | 475 | result = udp4_lib_lookup2(net, saddr, sport, |
476 | daddr, hnum, dif, | 476 | INADDR_ANY, hnum, dif, |
477 | hslot2, slot2); | 477 | hslot2, slot2); |
478 | } | 478 | } |
479 | rcu_read_unlock(); | 479 | rcu_read_unlock(); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16c4391f952b..75d5ef830097 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -108,7 +108,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | |||
108 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 108 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
109 | WARN_ON(!skb_dst(newskb)); | 109 | WARN_ON(!skb_dst(newskb)); |
110 | 110 | ||
111 | netif_rx(newskb); | 111 | netif_rx_ni(newskb); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | 114 | ||
@@ -629,7 +629,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
629 | /* We must not fragment if the socket is set to force MTU discovery | 629 | /* We must not fragment if the socket is set to force MTU discovery |
630 | * or if the skb it not generated by a local socket. | 630 | * or if the skb it not generated by a local socket. |
631 | */ | 631 | */ |
632 | if (!skb->local_df) { | 632 | if (!skb->local_df && skb->len > mtu) { |
633 | skb->dev = skb_dst(skb)->dev; | 633 | skb->dev = skb_dst(skb)->dev; |
634 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 634 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
635 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 635 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c2438e8cb9d0..05ebd7833043 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -815,7 +815,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
815 | { | 815 | { |
816 | int flags = 0; | 816 | int flags = 0; |
817 | 817 | ||
818 | if (rt6_need_strict(&fl->fl6_dst)) | 818 | if (fl->oif || rt6_need_strict(&fl->fl6_dst)) |
819 | flags |= RT6_LOOKUP_F_IFACE; | 819 | flags |= RT6_LOOKUP_F_IFACE; |
820 | 820 | ||
821 | if (!ipv6_addr_any(&fl->fl6_src)) | 821 | if (!ipv6_addr_any(&fl->fl6_src)) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c92ebe8f80d5..075f540ec197 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1015,7 +1015,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1015 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); | 1015 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); |
1016 | 1016 | ||
1017 | t1 = (struct tcphdr *) skb_push(buff, tot_len); | 1017 | t1 = (struct tcphdr *) skb_push(buff, tot_len); |
1018 | skb_reset_transport_header(skb); | 1018 | skb_reset_transport_header(buff); |
1019 | 1019 | ||
1020 | /* Swap the send and the receive. */ | 1020 | /* Swap the send and the receive. */ |
1021 | memset(t1, 0, sizeof(*t1)); | 1021 | memset(t1, 0, sizeof(*t1)); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c177aea88c0b..90824852f598 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -259,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
259 | if (hslot->count < hslot2->count) | 259 | if (hslot->count < hslot2->count) |
260 | goto begin; | 260 | goto begin; |
261 | 261 | ||
262 | result = udp6_lib_lookup2(net, &in6addr_any, sport, | 262 | result = udp6_lib_lookup2(net, saddr, sport, |
263 | daddr, hnum, dif, | 263 | &in6addr_any, hnum, dif, |
264 | hslot2, slot2); | 264 | hslot2, slot2); |
265 | } | 265 | } |
266 | rcu_read_unlock(); | 266 | rcu_read_unlock(); |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ae181651c75a..00bf7c962b7e 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -124,7 +124,7 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
124 | xdst->u.dst.dev = dev; | 124 | xdst->u.dst.dev = dev; |
125 | dev_hold(dev); | 125 | dev_hold(dev); |
126 | 126 | ||
127 | xdst->u.rt6.rt6i_idev = in6_dev_get(rt->u.dst.dev); | 127 | xdst->u.rt6.rt6i_idev = in6_dev_get(dev); |
128 | if (!xdst->u.rt6.rt6i_idev) | 128 | if (!xdst->u.rt6.rt6i_idev) |
129 | return -ENODEV; | 129 | return -ENODEV; |
130 | 130 | ||
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 96d25348aa59..87782a4bb541 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -184,7 +184,6 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
184 | HT_AGG_STATE_REQ_STOP_BA_MSK)) != | 184 | HT_AGG_STATE_REQ_STOP_BA_MSK)) != |
185 | HT_ADDBA_REQUESTED_MSK) { | 185 | HT_ADDBA_REQUESTED_MSK) { |
186 | spin_unlock_bh(&sta->lock); | 186 | spin_unlock_bh(&sta->lock); |
187 | *state = HT_AGG_STATE_IDLE; | ||
188 | #ifdef CONFIG_MAC80211_HT_DEBUG | 187 | #ifdef CONFIG_MAC80211_HT_DEBUG |
189 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | 188 | printk(KERN_DEBUG "timer expired on tid %d but we are not " |
190 | "(or no longer) expecting addBA response there", | 189 | "(or no longer) expecting addBA response there", |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 06c33b68d8e5..b887e484ae04 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
225 | switch (sdata->vif.type) { | 225 | switch (sdata->vif.type) { |
226 | case NL80211_IFTYPE_AP: | 226 | case NL80211_IFTYPE_AP: |
227 | sdata->vif.bss_conf.enable_beacon = | 227 | sdata->vif.bss_conf.enable_beacon = |
228 | !!rcu_dereference(sdata->u.ap.beacon); | 228 | !!sdata->u.ap.beacon; |
229 | break; | 229 | break; |
230 | case NL80211_IFTYPE_ADHOC: | 230 | case NL80211_IFTYPE_ADHOC: |
231 | sdata->vif.bss_conf.enable_beacon = | 231 | sdata->vif.bss_conf.enable_beacon = |
232 | !!rcu_dereference(sdata->u.ibss.presp); | 232 | !!sdata->u.ibss.presp; |
233 | break; | 233 | break; |
234 | case NL80211_IFTYPE_MESH_POINT: | 234 | case NL80211_IFTYPE_MESH_POINT: |
235 | sdata->vif.bss_conf.enable_beacon = true; | 235 | sdata->vif.bss_conf.enable_beacon = true; |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 58e3e3a61d99..859ee5f3d941 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -750,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
750 | 750 | ||
751 | switch (fc & IEEE80211_FCTL_STYPE) { | 751 | switch (fc & IEEE80211_FCTL_STYPE) { |
752 | case IEEE80211_STYPE_ACTION: | 752 | case IEEE80211_STYPE_ACTION: |
753 | if (skb->len < IEEE80211_MIN_ACTION_SIZE) | ||
754 | return RX_DROP_MONITOR; | ||
755 | /* fall through */ | ||
756 | case IEEE80211_STYPE_PROBE_RESP: | 753 | case IEEE80211_STYPE_PROBE_RESP: |
757 | case IEEE80211_STYPE_BEACON: | 754 | case IEEE80211_STYPE_BEACON: |
758 | skb_queue_tail(&ifmsh->skb_queue, skb); | 755 | skb_queue_tail(&ifmsh->skb_queue, skb); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c8cd169fc10e..4aefa6dc3091 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -168,6 +168,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
168 | ht_changed = conf_is_ht(&local->hw.conf) != enable_ht || | 168 | ht_changed = conf_is_ht(&local->hw.conf) != enable_ht || |
169 | channel_type != local->hw.conf.channel_type; | 169 | channel_type != local->hw.conf.channel_type; |
170 | 170 | ||
171 | if (local->tmp_channel) | ||
172 | local->tmp_channel_type = channel_type; | ||
171 | local->oper_channel_type = channel_type; | 173 | local->oper_channel_type = channel_type; |
172 | 174 | ||
173 | if (ht_changed) { | 175 | if (ht_changed) { |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index f0accf622cd7..04ea07f0e78a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1974,6 +1974,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1974 | goto handled; | 1974 | goto handled; |
1975 | } | 1975 | } |
1976 | break; | 1976 | break; |
1977 | case MESH_PLINK_CATEGORY: | ||
1978 | case MESH_PATH_SEL_CATEGORY: | ||
1979 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
1980 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | ||
1981 | break; | ||
1977 | } | 1982 | } |
1978 | 1983 | ||
1979 | /* | 1984 | /* |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 56422d894351..fb12cec4d333 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, | |||
93 | struct ieee80211_local *local = sdata->local; | 93 | struct ieee80211_local *local = sdata->local; |
94 | struct sta_info *sta; | 94 | struct sta_info *sta; |
95 | 95 | ||
96 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 96 | sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], |
97 | rcu_read_lock_held() || | ||
98 | lockdep_is_held(&local->sta_lock) || | ||
99 | lockdep_is_held(&local->sta_mtx)); | ||
97 | while (sta) { | 100 | while (sta) { |
98 | if (sta->sdata == sdata && | 101 | if (sta->sdata == sdata && |
99 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) | 102 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) |
100 | break; | 103 | break; |
101 | sta = rcu_dereference(sta->hnext); | 104 | sta = rcu_dereference_check(sta->hnext, |
105 | rcu_read_lock_held() || | ||
106 | lockdep_is_held(&local->sta_lock) || | ||
107 | lockdep_is_held(&local->sta_mtx)); | ||
102 | } | 108 | } |
103 | return sta; | 109 | return sta; |
104 | } | 110 | } |
@@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, | |||
113 | struct ieee80211_local *local = sdata->local; | 119 | struct ieee80211_local *local = sdata->local; |
114 | struct sta_info *sta; | 120 | struct sta_info *sta; |
115 | 121 | ||
116 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 122 | sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], |
123 | rcu_read_lock_held() || | ||
124 | lockdep_is_held(&local->sta_lock) || | ||
125 | lockdep_is_held(&local->sta_mtx)); | ||
117 | while (sta) { | 126 | while (sta) { |
118 | if ((sta->sdata == sdata || | 127 | if ((sta->sdata == sdata || |
119 | sta->sdata->bss == sdata->bss) && | 128 | sta->sdata->bss == sdata->bss) && |
120 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) | 129 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) |
121 | break; | 130 | break; |
122 | sta = rcu_dereference(sta->hnext); | 131 | sta = rcu_dereference_check(sta->hnext, |
132 | rcu_read_lock_held() || | ||
133 | lockdep_is_held(&local->sta_lock) || | ||
134 | lockdep_is_held(&local->sta_mtx)); | ||
123 | } | 135 | } |
124 | return sta; | 136 | return sta; |
125 | } | 137 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index cc90363d7e7a..243946d4809d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2169,8 +2169,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, | |||
2169 | case SIOCGIFDSTADDR: | 2169 | case SIOCGIFDSTADDR: |
2170 | case SIOCSIFDSTADDR: | 2170 | case SIOCSIFDSTADDR: |
2171 | case SIOCSIFFLAGS: | 2171 | case SIOCSIFFLAGS: |
2172 | if (!net_eq(sock_net(sk), &init_net)) | ||
2173 | return -ENOIOCTLCMD; | ||
2174 | return inet_dgram_ops.ioctl(sock, cmd, arg); | 2172 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
2175 | #endif | 2173 | #endif |
2176 | 2174 | ||
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 9ece910ea394..7b155081b4dc 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -134,7 +134,7 @@ static int __init rds_rdma_listen_init(void) | |||
134 | ret = PTR_ERR(cm_id); | 134 | ret = PTR_ERR(cm_id); |
135 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " | 135 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
136 | "rdma_create_id() returned %d\n", ret); | 136 | "rdma_create_id() returned %d\n", ret); |
137 | goto out; | 137 | return ret; |
138 | } | 138 | } |
139 | 139 | ||
140 | sin.sin_family = AF_INET, | 140 | sin.sin_family = AF_INET, |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index df5abbff63e2..99c93ee98ad9 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1194 | /* Remove any peer addresses not present in the new association. */ | 1194 | /* Remove any peer addresses not present in the new association. */ |
1195 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | 1195 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { |
1196 | trans = list_entry(pos, struct sctp_transport, transports); | 1196 | trans = list_entry(pos, struct sctp_transport, transports); |
1197 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) | 1197 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { |
1198 | sctp_assoc_del_peer(asoc, &trans->ipaddr); | 1198 | sctp_assoc_rm_peer(asoc, trans); |
1199 | continue; | ||
1200 | } | ||
1199 | 1201 | ||
1200 | if (asoc->state >= SCTP_STATE_ESTABLISHED) | 1202 | if (asoc->state >= SCTP_STATE_ESTABLISHED) |
1201 | sctp_transport_reset(trans); | 1203 | sctp_transport_reset(trans); |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 905fda582b92..7ec09ba03a1c 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
144 | /* Use SCTP specific send buffer space queues. */ | 144 | /* Use SCTP specific send buffer space queues. */ |
145 | ep->sndbuf_policy = sctp_sndbuf_policy; | 145 | ep->sndbuf_policy = sctp_sndbuf_policy; |
146 | 146 | ||
147 | sk->sk_data_ready = sctp_data_ready; | ||
147 | sk->sk_write_space = sctp_write_space; | 148 | sk->sk_write_space = sctp_write_space; |
148 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 149 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
149 | 150 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 17cb400ecd6a..0fd5b4c88358 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -208,7 +208,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
208 | sp = sctp_sk(asoc->base.sk); | 208 | sp = sctp_sk(asoc->base.sk); |
209 | num_types = sp->pf->supported_addrs(sp, types); | 209 | num_types = sp->pf->supported_addrs(sp, types); |
210 | 210 | ||
211 | chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); | 211 | chunksize = sizeof(init) + addrs_len; |
212 | chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); | ||
212 | chunksize += sizeof(ecap_param); | 213 | chunksize += sizeof(ecap_param); |
213 | 214 | ||
214 | if (sctp_prsctp_enable) | 215 | if (sctp_prsctp_enable) |
@@ -238,14 +239,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
238 | /* Add HMACS parameter length if any were defined */ | 239 | /* Add HMACS parameter length if any were defined */ |
239 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; | 240 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; |
240 | if (auth_hmacs->length) | 241 | if (auth_hmacs->length) |
241 | chunksize += ntohs(auth_hmacs->length); | 242 | chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); |
242 | else | 243 | else |
243 | auth_hmacs = NULL; | 244 | auth_hmacs = NULL; |
244 | 245 | ||
245 | /* Add CHUNKS parameter length */ | 246 | /* Add CHUNKS parameter length */ |
246 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; | 247 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; |
247 | if (auth_chunks->length) | 248 | if (auth_chunks->length) |
248 | chunksize += ntohs(auth_chunks->length); | 249 | chunksize += WORD_ROUND(ntohs(auth_chunks->length)); |
249 | else | 250 | else |
250 | auth_chunks = NULL; | 251 | auth_chunks = NULL; |
251 | 252 | ||
@@ -255,7 +256,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
255 | 256 | ||
256 | /* If we have any extensions to report, account for that */ | 257 | /* If we have any extensions to report, account for that */ |
257 | if (num_ext) | 258 | if (num_ext) |
258 | chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; | 259 | chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + |
260 | num_ext); | ||
259 | 261 | ||
260 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | 262 | /* RFC 2960 3.3.2 Initiation (INIT) (1) |
261 | * | 263 | * |
@@ -397,13 +399,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
397 | 399 | ||
398 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; | 400 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; |
399 | if (auth_hmacs->length) | 401 | if (auth_hmacs->length) |
400 | chunksize += ntohs(auth_hmacs->length); | 402 | chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); |
401 | else | 403 | else |
402 | auth_hmacs = NULL; | 404 | auth_hmacs = NULL; |
403 | 405 | ||
404 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; | 406 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; |
405 | if (auth_chunks->length) | 407 | if (auth_chunks->length) |
406 | chunksize += ntohs(auth_chunks->length); | 408 | chunksize += WORD_ROUND(ntohs(auth_chunks->length)); |
407 | else | 409 | else |
408 | auth_chunks = NULL; | 410 | auth_chunks = NULL; |
409 | 411 | ||
@@ -412,7 +414,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
412 | } | 414 | } |
413 | 415 | ||
414 | if (num_ext) | 416 | if (num_ext) |
415 | chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; | 417 | chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + |
418 | num_ext); | ||
416 | 419 | ||
417 | /* Now allocate and fill out the chunk. */ | 420 | /* Now allocate and fill out the chunk. */ |
418 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); | 421 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); |
@@ -3315,21 +3318,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
3315 | sctp_chunk_free(asconf); | 3318 | sctp_chunk_free(asconf); |
3316 | asoc->addip_last_asconf = NULL; | 3319 | asoc->addip_last_asconf = NULL; |
3317 | 3320 | ||
3318 | /* Send the next asconf chunk from the addip chunk queue. */ | ||
3319 | if (!list_empty(&asoc->addip_chunk_list)) { | ||
3320 | struct list_head *entry = asoc->addip_chunk_list.next; | ||
3321 | asconf = list_entry(entry, struct sctp_chunk, list); | ||
3322 | |||
3323 | list_del_init(entry); | ||
3324 | |||
3325 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
3326 | sctp_chunk_hold(asconf); | ||
3327 | if (sctp_primitive_ASCONF(asoc, asconf)) | ||
3328 | sctp_chunk_free(asconf); | ||
3329 | else | ||
3330 | asoc->addip_last_asconf = asconf; | ||
3331 | } | ||
3332 | |||
3333 | return retval; | 3321 | return retval; |
3334 | } | 3322 | } |
3335 | 3323 | ||
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 4c5bed9af4e3..d5ae450b6f02 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -962,6 +962,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc, | |||
962 | } | 962 | } |
963 | 963 | ||
964 | 964 | ||
965 | /* Sent the next ASCONF packet currently stored in the association. | ||
966 | * This happens after the ASCONF_ACK was succeffully processed. | ||
967 | */ | ||
968 | static void sctp_cmd_send_asconf(struct sctp_association *asoc) | ||
969 | { | ||
970 | /* Send the next asconf chunk from the addip chunk | ||
971 | * queue. | ||
972 | */ | ||
973 | if (!list_empty(&asoc->addip_chunk_list)) { | ||
974 | struct list_head *entry = asoc->addip_chunk_list.next; | ||
975 | struct sctp_chunk *asconf = list_entry(entry, | ||
976 | struct sctp_chunk, list); | ||
977 | list_del_init(entry); | ||
978 | |||
979 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
980 | sctp_chunk_hold(asconf); | ||
981 | if (sctp_primitive_ASCONF(asoc, asconf)) | ||
982 | sctp_chunk_free(asconf); | ||
983 | else | ||
984 | asoc->addip_last_asconf = asconf; | ||
985 | } | ||
986 | } | ||
987 | |||
965 | 988 | ||
966 | /* These three macros allow us to pull the debugging code out of the | 989 | /* These three macros allow us to pull the debugging code out of the |
967 | * main flow of sctp_do_sm() to keep attention focused on the real | 990 | * main flow of sctp_do_sm() to keep attention focused on the real |
@@ -1617,6 +1640,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1617 | } | 1640 | } |
1618 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); | 1641 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); |
1619 | break; | 1642 | break; |
1643 | case SCTP_CMD_SEND_NEXT_ASCONF: | ||
1644 | sctp_cmd_send_asconf(asoc); | ||
1645 | break; | ||
1620 | default: | 1646 | default: |
1621 | printk(KERN_WARNING "Impossible command: %u, %p\n", | 1647 | printk(KERN_WARNING "Impossible command: %u, %p\n", |
1622 | cmd->verb, cmd->obj.ptr); | 1648 | cmd->verb, cmd->obj.ptr); |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index abf601a1b847..24b2cd555637 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3676 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | 3676 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
3677 | 3677 | ||
3678 | if (!sctp_process_asconf_ack((struct sctp_association *)asoc, | 3678 | if (!sctp_process_asconf_ack((struct sctp_association *)asoc, |
3679 | asconf_ack)) | 3679 | asconf_ack)) { |
3680 | /* Successfully processed ASCONF_ACK. We can | ||
3681 | * release the next asconf if we have one. | ||
3682 | */ | ||
3683 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, | ||
3684 | SCTP_NULL()); | ||
3680 | return SCTP_DISPOSITION_CONSUME; | 3685 | return SCTP_DISPOSITION_CONSUME; |
3686 | } | ||
3681 | 3687 | ||
3682 | abort = sctp_make_abort(asoc, asconf_ack, | 3688 | abort = sctp_make_abort(asoc, asconf_ack, |
3683 | sizeof(sctp_errhdr_t)); | 3689 | sizeof(sctp_errhdr_t)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 007e8baba089..44a1ab03a3f0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -3719,12 +3719,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3719 | sp->hmac = NULL; | 3719 | sp->hmac = NULL; |
3720 | 3720 | ||
3721 | SCTP_DBG_OBJCNT_INC(sock); | 3721 | SCTP_DBG_OBJCNT_INC(sock); |
3722 | percpu_counter_inc(&sctp_sockets_allocated); | ||
3723 | 3722 | ||
3724 | /* Set socket backlog limit. */ | 3723 | /* Set socket backlog limit. */ |
3725 | sk->sk_backlog.limit = sysctl_sctp_rmem[1]; | 3724 | sk->sk_backlog.limit = sysctl_sctp_rmem[1]; |
3726 | 3725 | ||
3727 | local_bh_disable(); | 3726 | local_bh_disable(); |
3727 | percpu_counter_inc(&sctp_sockets_allocated); | ||
3728 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 3728 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
3729 | local_bh_enable(); | 3729 | local_bh_enable(); |
3730 | 3730 | ||
@@ -3741,8 +3741,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) | |||
3741 | /* Release our hold on the endpoint. */ | 3741 | /* Release our hold on the endpoint. */ |
3742 | ep = sctp_sk(sk)->ep; | 3742 | ep = sctp_sk(sk)->ep; |
3743 | sctp_endpoint_free(ep); | 3743 | sctp_endpoint_free(ep); |
3744 | percpu_counter_dec(&sctp_sockets_allocated); | ||
3745 | local_bh_disable(); | 3744 | local_bh_disable(); |
3745 | percpu_counter_dec(&sctp_sockets_allocated); | ||
3746 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 3746 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
3747 | local_bh_enable(); | 3747 | local_bh_enable(); |
3748 | } | 3748 | } |
@@ -6189,6 +6189,16 @@ do_nonblock: | |||
6189 | goto out; | 6189 | goto out; |
6190 | } | 6190 | } |
6191 | 6191 | ||
6192 | void sctp_data_ready(struct sock *sk, int len) | ||
6193 | { | ||
6194 | read_lock_bh(&sk->sk_callback_lock); | ||
6195 | if (sk_has_sleeper(sk)) | ||
6196 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | | ||
6197 | POLLRDNORM | POLLRDBAND); | ||
6198 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | ||
6199 | read_unlock_bh(&sk->sk_callback_lock); | ||
6200 | } | ||
6201 | |||
6192 | /* If socket sndbuf has changed, wake up all per association waiters. */ | 6202 | /* If socket sndbuf has changed, wake up all per association waiters. */ |
6193 | void sctp_write_space(struct sock *sk) | 6203 | void sctp_write_space(struct sock *sk) |
6194 | { | 6204 | { |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index e56f711baccc..36e84e13c6aa 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -83,6 +83,41 @@ struct compat_x25_subscrip_struct { | |||
83 | }; | 83 | }; |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | |||
87 | int x25_parse_address_block(struct sk_buff *skb, | ||
88 | struct x25_address *called_addr, | ||
89 | struct x25_address *calling_addr) | ||
90 | { | ||
91 | unsigned char len; | ||
92 | int needed; | ||
93 | int rc; | ||
94 | |||
95 | if (skb->len < 1) { | ||
96 | /* packet has no address block */ | ||
97 | rc = 0; | ||
98 | goto empty; | ||
99 | } | ||
100 | |||
101 | len = *skb->data; | ||
102 | needed = 1 + (len >> 4) + (len & 0x0f); | ||
103 | |||
104 | if (skb->len < needed) { | ||
105 | /* packet is too short to hold the addresses it claims | ||
106 | to hold */ | ||
107 | rc = -1; | ||
108 | goto empty; | ||
109 | } | ||
110 | |||
111 | return x25_addr_ntoa(skb->data, called_addr, calling_addr); | ||
112 | |||
113 | empty: | ||
114 | *called_addr->x25_addr = 0; | ||
115 | *calling_addr->x25_addr = 0; | ||
116 | |||
117 | return rc; | ||
118 | } | ||
119 | |||
120 | |||
86 | int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, | 121 | int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, |
87 | struct x25_address *calling_addr) | 122 | struct x25_address *calling_addr) |
88 | { | 123 | { |
@@ -367,6 +402,7 @@ static void __x25_destroy_socket(struct sock *sk) | |||
367 | /* | 402 | /* |
368 | * Queue the unaccepted socket for death | 403 | * Queue the unaccepted socket for death |
369 | */ | 404 | */ |
405 | skb->sk->sk_state = TCP_LISTEN; | ||
370 | sock_set_flag(skb->sk, SOCK_DEAD); | 406 | sock_set_flag(skb->sk, SOCK_DEAD); |
371 | x25_start_heartbeat(skb->sk); | 407 | x25_start_heartbeat(skb->sk); |
372 | x25_sk(skb->sk)->state = X25_STATE_0; | 408 | x25_sk(skb->sk)->state = X25_STATE_0; |
@@ -554,7 +590,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, | |||
554 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; | 590 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; |
555 | x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; | 591 | x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; |
556 | x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; | 592 | x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; |
557 | x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; | 593 | x25->facilities.throughput = 0; /* by default don't negotiate |
594 | throughput */ | ||
558 | x25->facilities.reverse = X25_DEFAULT_REVERSE; | 595 | x25->facilities.reverse = X25_DEFAULT_REVERSE; |
559 | x25->dte_facilities.calling_len = 0; | 596 | x25->dte_facilities.calling_len = 0; |
560 | x25->dte_facilities.called_len = 0; | 597 | x25->dte_facilities.called_len = 0; |
@@ -922,16 +959,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
922 | /* | 959 | /* |
923 | * Extract the X.25 addresses and convert them to ASCII strings, | 960 | * Extract the X.25 addresses and convert them to ASCII strings, |
924 | * and remove them. | 961 | * and remove them. |
962 | * | ||
963 | * Address block is mandatory in call request packets | ||
925 | */ | 964 | */ |
926 | addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); | 965 | addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); |
966 | if (addr_len <= 0) | ||
967 | goto out_clear_request; | ||
927 | skb_pull(skb, addr_len); | 968 | skb_pull(skb, addr_len); |
928 | 969 | ||
929 | /* | 970 | /* |
930 | * Get the length of the facilities, skip past them for the moment | 971 | * Get the length of the facilities, skip past them for the moment |
931 | * get the call user data because this is needed to determine | 972 | * get the call user data because this is needed to determine |
932 | * the correct listener | 973 | * the correct listener |
974 | * | ||
975 | * Facilities length is mandatory in call request packets | ||
933 | */ | 976 | */ |
977 | if (skb->len < 1) | ||
978 | goto out_clear_request; | ||
934 | len = skb->data[0] + 1; | 979 | len = skb->data[0] + 1; |
980 | if (skb->len < len) | ||
981 | goto out_clear_request; | ||
935 | skb_pull(skb,len); | 982 | skb_pull(skb,len); |
936 | 983 | ||
937 | /* | 984 | /* |
@@ -1415,9 +1462,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1415 | if (facilities.winsize_in < 1 || | 1462 | if (facilities.winsize_in < 1 || |
1416 | facilities.winsize_in > 127) | 1463 | facilities.winsize_in > 127) |
1417 | break; | 1464 | break; |
1418 | if (facilities.throughput < 0x03 || | 1465 | if (facilities.throughput) { |
1419 | facilities.throughput > 0xDD) | 1466 | int out = facilities.throughput & 0xf0; |
1420 | break; | 1467 | int in = facilities.throughput & 0x0f; |
1468 | if (!out) | ||
1469 | facilities.throughput |= | ||
1470 | X25_DEFAULT_THROUGHPUT << 4; | ||
1471 | else if (out < 0x30 || out > 0xD0) | ||
1472 | break; | ||
1473 | if (!in) | ||
1474 | facilities.throughput |= | ||
1475 | X25_DEFAULT_THROUGHPUT; | ||
1476 | else if (in < 0x03 || in > 0x0D) | ||
1477 | break; | ||
1478 | } | ||
1421 | if (facilities.reverse && | 1479 | if (facilities.reverse && |
1422 | (facilities.reverse & 0x81) != 0x81) | 1480 | (facilities.reverse & 0x81) != 0x81) |
1423 | break; | 1481 | break; |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index a21f6646eb3a..771bab00754b 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
35 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) | 35 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) |
36 | { | 36 | { |
37 | unsigned char *p = skb->data; | 37 | unsigned char *p = skb->data; |
38 | unsigned int len = *p++; | 38 | unsigned int len; |
39 | 39 | ||
40 | *vc_fac_mask = 0; | 40 | *vc_fac_mask = 0; |
41 | 41 | ||
@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
50 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); | 50 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); |
51 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); | 51 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); |
52 | 52 | ||
53 | if (skb->len < 1) | ||
54 | return 0; | ||
55 | |||
56 | len = *p++; | ||
57 | |||
58 | if (len >= skb->len) | ||
59 | return -1; | ||
60 | |||
53 | while (len > 0) { | 61 | while (len > 0) { |
54 | switch (*p & X25_FAC_CLASS_MASK) { | 62 | switch (*p & X25_FAC_CLASS_MASK) { |
55 | case X25_FAC_CLASS_A: | 63 | case X25_FAC_CLASS_A: |
@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
247 | memcpy(new, ours, sizeof(*new)); | 255 | memcpy(new, ours, sizeof(*new)); |
248 | 256 | ||
249 | len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); | 257 | len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); |
258 | if (len < 0) | ||
259 | return len; | ||
250 | 260 | ||
251 | /* | 261 | /* |
252 | * They want reverse charging, we won't accept it. | 262 | * They want reverse charging, we won't accept it. |
@@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
259 | new->reverse = theirs.reverse; | 269 | new->reverse = theirs.reverse; |
260 | 270 | ||
261 | if (theirs.throughput) { | 271 | if (theirs.throughput) { |
262 | if (theirs.throughput < ours->throughput) { | 272 | int theirs_in = theirs.throughput & 0x0f; |
263 | SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); | 273 | int theirs_out = theirs.throughput & 0xf0; |
264 | new->throughput = theirs.throughput; | 274 | int ours_in = ours->throughput & 0x0f; |
275 | int ours_out = ours->throughput & 0xf0; | ||
276 | if (!ours_in || theirs_in < ours_in) { | ||
277 | SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n"); | ||
278 | new->throughput = (new->throughput & 0xf0) | theirs_in; | ||
279 | } | ||
280 | if (!ours_out || theirs_out < ours_out) { | ||
281 | SOCK_DEBUG(sk, | ||
282 | "X.25: outbound throughput negotiated\n"); | ||
283 | new->throughput = (new->throughput & 0x0f) | theirs_out; | ||
265 | } | 284 | } |
266 | } | 285 | } |
267 | 286 | ||
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index a31b3b9e5966..372ac226e648 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -90,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) | |||
90 | static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) | 90 | static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
91 | { | 91 | { |
92 | struct x25_address source_addr, dest_addr; | 92 | struct x25_address source_addr, dest_addr; |
93 | int len; | ||
93 | 94 | ||
94 | switch (frametype) { | 95 | switch (frametype) { |
95 | case X25_CALL_ACCEPTED: { | 96 | case X25_CALL_ACCEPTED: { |
@@ -107,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
107 | * Parse the data in the frame. | 108 | * Parse the data in the frame. |
108 | */ | 109 | */ |
109 | skb_pull(skb, X25_STD_MIN_LEN); | 110 | skb_pull(skb, X25_STD_MIN_LEN); |
110 | skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); | 111 | |
111 | skb_pull(skb, | 112 | len = x25_parse_address_block(skb, &source_addr, |
112 | x25_parse_facilities(skb, &x25->facilities, | 113 | &dest_addr); |
114 | if (len > 0) | ||
115 | skb_pull(skb, len); | ||
116 | |||
117 | len = x25_parse_facilities(skb, &x25->facilities, | ||
113 | &x25->dte_facilities, | 118 | &x25->dte_facilities, |
114 | &x25->vc_facil_mask)); | 119 | &x25->vc_facil_mask); |
120 | if (len > 0) | ||
121 | skb_pull(skb, len); | ||
115 | /* | 122 | /* |
116 | * Copy any Call User Data. | 123 | * Copy any Call User Data. |
117 | */ | 124 | */ |
diff --git a/security/inode.c b/security/inode.c index c3a793881d04..1c812e874504 100644 --- a/security/inode.c +++ b/security/inode.c | |||
@@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode, | |||
161 | 161 | ||
162 | mutex_lock(&parent->d_inode->i_mutex); | 162 | mutex_lock(&parent->d_inode->i_mutex); |
163 | *dentry = lookup_one_len(name, parent, strlen(name)); | 163 | *dentry = lookup_one_len(name, parent, strlen(name)); |
164 | if (!IS_ERR(dentry)) { | 164 | if (!IS_ERR(*dentry)) { |
165 | if ((mode & S_IFMT) == S_IFDIR) | 165 | if ((mode & S_IFMT) == S_IFDIR) |
166 | error = mkdir(parent->d_inode, *dentry, mode); | 166 | error = mkdir(parent->d_inode, *dentry, mode); |
167 | else | 167 | else |
168 | error = create(parent->d_inode, *dentry, mode); | 168 | error = create(parent->d_inode, *dentry, mode); |
169 | } else | 169 | } else |
170 | error = PTR_ERR(dentry); | 170 | error = PTR_ERR(*dentry); |
171 | mutex_unlock(&parent->d_inode->i_mutex); | 171 | mutex_unlock(&parent->d_inode->i_mutex); |
172 | 172 | ||
173 | return error; | 173 | return error; |
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index e814d2109f8e..dd7cd0f8e13c 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c | |||
@@ -201,7 +201,7 @@ static long keyring_read(const struct key *keyring, | |||
201 | int loop, ret; | 201 | int loop, ret; |
202 | 202 | ||
203 | ret = 0; | 203 | ret = 0; |
204 | klist = rcu_dereference(keyring->payload.subscriptions); | 204 | klist = keyring->payload.subscriptions; |
205 | 205 | ||
206 | if (klist) { | 206 | if (klist) { |
207 | /* calculate how much data we could return */ | 207 | /* calculate how much data we could return */ |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 03fe63ed55bd..d737cea5347c 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -68,7 +68,8 @@ static int call_sbin_request_key(struct key_construction *cons, | |||
68 | { | 68 | { |
69 | const struct cred *cred = current_cred(); | 69 | const struct cred *cred = current_cred(); |
70 | key_serial_t prkey, sskey; | 70 | key_serial_t prkey, sskey; |
71 | struct key *key = cons->key, *authkey = cons->authkey, *keyring; | 71 | struct key *key = cons->key, *authkey = cons->authkey, *keyring, |
72 | *session; | ||
72 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; | 73 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; |
73 | char key_str[12], keyring_str[3][12]; | 74 | char key_str[12], keyring_str[3][12]; |
74 | char desc[20]; | 75 | char desc[20]; |
@@ -112,10 +113,12 @@ static int call_sbin_request_key(struct key_construction *cons, | |||
112 | if (cred->tgcred->process_keyring) | 113 | if (cred->tgcred->process_keyring) |
113 | prkey = cred->tgcred->process_keyring->serial; | 114 | prkey = cred->tgcred->process_keyring->serial; |
114 | 115 | ||
115 | if (cred->tgcred->session_keyring) | 116 | rcu_read_lock(); |
116 | sskey = rcu_dereference(cred->tgcred->session_keyring)->serial; | 117 | session = rcu_dereference(cred->tgcred->session_keyring); |
117 | else | 118 | if (!session) |
118 | sskey = cred->user->session_keyring->serial; | 119 | session = cred->user->session_keyring; |
120 | sskey = session->serial; | ||
121 | rcu_read_unlock(); | ||
119 | 122 | ||
120 | sprintf(keyring_str[2], "%d", sskey); | 123 | sprintf(keyring_str[2], "%d", sskey); |
121 | 124 | ||
@@ -336,8 +339,10 @@ static int construct_alloc_key(struct key_type *type, | |||
336 | 339 | ||
337 | key_already_present: | 340 | key_already_present: |
338 | mutex_unlock(&key_construction_mutex); | 341 | mutex_unlock(&key_construction_mutex); |
339 | if (dest_keyring) | 342 | if (dest_keyring) { |
343 | __key_link(dest_keyring, key_ref_to_ptr(key_ref)); | ||
340 | up_write(&dest_keyring->sem); | 344 | up_write(&dest_keyring->sem); |
345 | } | ||
341 | mutex_unlock(&user->cons_lock); | 346 | mutex_unlock(&user->cons_lock); |
342 | key_put(key); | 347 | key_put(key); |
343 | *_key = key = key_ref_to_ptr(key_ref); | 348 | *_key = key = key_ref_to_ptr(key_ref); |
@@ -428,6 +433,11 @@ struct key *request_key_and_link(struct key_type *type, | |||
428 | 433 | ||
429 | if (!IS_ERR(key_ref)) { | 434 | if (!IS_ERR(key_ref)) { |
430 | key = key_ref_to_ptr(key_ref); | 435 | key = key_ref_to_ptr(key_ref); |
436 | if (dest_keyring) { | ||
437 | construct_get_dest_keyring(&dest_keyring); | ||
438 | key_link(dest_keyring, key); | ||
439 | key_put(dest_keyring); | ||
440 | } | ||
431 | } else if (PTR_ERR(key_ref) != -EAGAIN) { | 441 | } else if (PTR_ERR(key_ref) != -EAGAIN) { |
432 | key = ERR_CAST(key_ref); | 442 | key = ERR_CAST(key_ref); |
433 | } else { | 443 | } else { |
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h index 8da6a8428086..cd4f734e2749 100644 --- a/security/selinux/ss/avtab.h +++ b/security/selinux/ss/avtab.h | |||
@@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified | |||
82 | void avtab_cache_init(void); | 82 | void avtab_cache_init(void); |
83 | void avtab_cache_destroy(void); | 83 | void avtab_cache_destroy(void); |
84 | 84 | ||
85 | #define MAX_AVTAB_HASH_BITS 13 | 85 | #define MAX_AVTAB_HASH_BITS 11 |
86 | #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) | 86 | #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) |
87 | #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) | 87 | #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) |
88 | #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS | 88 | #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS |
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index 656e474dca47..91acc9a243ec 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c | |||
@@ -863,7 +863,6 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci) | |||
863 | struct snd_ac97 *ac97; | 863 | struct snd_ac97 *ac97; |
864 | int ret; | 864 | int ret; |
865 | 865 | ||
866 | writel(0, aaci->base + AC97_POWERDOWN); | ||
867 | /* | 866 | /* |
868 | * Assert AACIRESET for 2us | 867 | * Assert AACIRESET for 2us |
869 | */ | 868 | */ |
@@ -1047,7 +1046,11 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id) | |||
1047 | 1046 | ||
1048 | writel(0x1fff, aaci->base + AACI_INTCLR); | 1047 | writel(0x1fff, aaci->base + AACI_INTCLR); |
1049 | writel(aaci->maincr, aaci->base + AACI_MAINCR); | 1048 | writel(aaci->maincr, aaci->base + AACI_MAINCR); |
1050 | 1049 | /* | |
1050 | * Fix: ac97 read back fail errors by reading | ||
1051 | * from any arbitrary aaci register. | ||
1052 | */ | ||
1053 | readl(aaci->base + AACI_CSCH1); | ||
1051 | ret = aaci_probe_ac97(aaci); | 1054 | ret = aaci_probe_ac97(aaci); |
1052 | if (ret) | 1055 | if (ret) |
1053 | goto out; | 1056 | goto out; |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index f8fd586ae024..cec68152dcb1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2272,6 +2272,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
2272 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), | 2272 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), |
2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), | 2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), |
2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), | 2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), |
2275 | SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), | ||
2276 | SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB), | ||
2275 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), | 2277 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), |
2276 | {} | 2278 | {} |
2277 | }; | 2279 | }; |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index af34606c30c3..e9fdfc4b1c57 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -519,14 +519,6 @@ static int ad198x_suspend(struct hda_codec *codec, pm_message_t state) | |||
519 | ad198x_power_eapd(codec); | 519 | ad198x_power_eapd(codec); |
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | |||
523 | static int ad198x_resume(struct hda_codec *codec) | ||
524 | { | ||
525 | ad198x_init(codec); | ||
526 | snd_hda_codec_resume_amp(codec); | ||
527 | snd_hda_codec_resume_cache(codec); | ||
528 | return 0; | ||
529 | } | ||
530 | #endif | 522 | #endif |
531 | 523 | ||
532 | static struct hda_codec_ops ad198x_patch_ops = { | 524 | static struct hda_codec_ops ad198x_patch_ops = { |
@@ -539,7 +531,6 @@ static struct hda_codec_ops ad198x_patch_ops = { | |||
539 | #endif | 531 | #endif |
540 | #ifdef SND_HDA_NEEDS_RESUME | 532 | #ifdef SND_HDA_NEEDS_RESUME |
541 | .suspend = ad198x_suspend, | 533 | .suspend = ad198x_suspend, |
542 | .resume = ad198x_resume, | ||
543 | #endif | 534 | #endif |
544 | .reboot_notify = ad198x_shutup, | 535 | .reboot_notify = ad198x_shutup, |
545 | }; | 536 | }; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c7730dbb9ddb..7404dba16f83 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -230,6 +230,7 @@ enum { | |||
230 | ALC888_ACER_ASPIRE_7730G, | 230 | ALC888_ACER_ASPIRE_7730G, |
231 | ALC883_MEDION, | 231 | ALC883_MEDION, |
232 | ALC883_MEDION_MD2, | 232 | ALC883_MEDION_MD2, |
233 | ALC883_MEDION_WIM2160, | ||
233 | ALC883_LAPTOP_EAPD, | 234 | ALC883_LAPTOP_EAPD, |
234 | ALC883_LENOVO_101E_2ch, | 235 | ALC883_LENOVO_101E_2ch, |
235 | ALC883_LENOVO_NB0763, | 236 | ALC883_LENOVO_NB0763, |
@@ -1389,22 +1390,31 @@ struct alc_fixup { | |||
1389 | 1390 | ||
1390 | static void alc_pick_fixup(struct hda_codec *codec, | 1391 | static void alc_pick_fixup(struct hda_codec *codec, |
1391 | const struct snd_pci_quirk *quirk, | 1392 | const struct snd_pci_quirk *quirk, |
1392 | const struct alc_fixup *fix) | 1393 | const struct alc_fixup *fix, |
1394 | int pre_init) | ||
1393 | { | 1395 | { |
1394 | const struct alc_pincfg *cfg; | 1396 | const struct alc_pincfg *cfg; |
1395 | 1397 | ||
1396 | quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); | 1398 | quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); |
1397 | if (!quirk) | 1399 | if (!quirk) |
1398 | return; | 1400 | return; |
1399 | |||
1400 | fix += quirk->value; | 1401 | fix += quirk->value; |
1401 | cfg = fix->pins; | 1402 | cfg = fix->pins; |
1402 | if (cfg) { | 1403 | if (pre_init && cfg) { |
1404 | #ifdef CONFIG_SND_DEBUG_VERBOSE | ||
1405 | snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n", | ||
1406 | codec->chip_name, quirk->name); | ||
1407 | #endif | ||
1403 | for (; cfg->nid; cfg++) | 1408 | for (; cfg->nid; cfg++) |
1404 | snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); | 1409 | snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); |
1405 | } | 1410 | } |
1406 | if (fix->verbs) | 1411 | if (!pre_init && fix->verbs) { |
1412 | #ifdef CONFIG_SND_DEBUG_VERBOSE | ||
1413 | snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n", | ||
1414 | codec->chip_name, quirk->name); | ||
1415 | #endif | ||
1407 | add_verb(codec->spec, fix->verbs); | 1416 | add_verb(codec->spec, fix->verbs); |
1417 | } | ||
1408 | } | 1418 | } |
1409 | 1419 | ||
1410 | static int alc_read_coef_idx(struct hda_codec *codec, | 1420 | static int alc_read_coef_idx(struct hda_codec *codec, |
@@ -4133,7 +4143,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = { | |||
4133 | SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG), | 4143 | SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG), |
4134 | SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734), | 4144 | SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734), |
4135 | SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU), | 4145 | SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU), |
4136 | SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL), | 4146 | SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734), |
4137 | SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), | 4147 | SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), |
4138 | SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), | 4148 | SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), |
4139 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), | 4149 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), |
@@ -4808,6 +4818,25 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec) | |||
4808 | } | 4818 | } |
4809 | } | 4819 | } |
4810 | 4820 | ||
4821 | static void alc880_auto_init_input_src(struct hda_codec *codec) | ||
4822 | { | ||
4823 | struct alc_spec *spec = codec->spec; | ||
4824 | int c; | ||
4825 | |||
4826 | for (c = 0; c < spec->num_adc_nids; c++) { | ||
4827 | unsigned int mux_idx; | ||
4828 | const struct hda_input_mux *imux; | ||
4829 | mux_idx = c >= spec->num_mux_defs ? 0 : c; | ||
4830 | imux = &spec->input_mux[mux_idx]; | ||
4831 | if (!imux->num_items && mux_idx > 0) | ||
4832 | imux = &spec->input_mux[0]; | ||
4833 | if (imux) | ||
4834 | snd_hda_codec_write(codec, spec->adc_nids[c], 0, | ||
4835 | AC_VERB_SET_CONNECT_SEL, | ||
4836 | imux->items[0].index); | ||
4837 | } | ||
4838 | } | ||
4839 | |||
4811 | /* parse the BIOS configuration and set up the alc_spec */ | 4840 | /* parse the BIOS configuration and set up the alc_spec */ |
4812 | /* return 1 if successful, 0 if the proper config is not found, | 4841 | /* return 1 if successful, 0 if the proper config is not found, |
4813 | * or a negative error code | 4842 | * or a negative error code |
@@ -4886,6 +4915,7 @@ static void alc880_auto_init(struct hda_codec *codec) | |||
4886 | alc880_auto_init_multi_out(codec); | 4915 | alc880_auto_init_multi_out(codec); |
4887 | alc880_auto_init_extra_out(codec); | 4916 | alc880_auto_init_extra_out(codec); |
4888 | alc880_auto_init_analog_input(codec); | 4917 | alc880_auto_init_analog_input(codec); |
4918 | alc880_auto_init_input_src(codec); | ||
4889 | if (spec->unsol_event) | 4919 | if (spec->unsol_event) |
4890 | alc_inithook(codec); | 4920 | alc_inithook(codec); |
4891 | } | 4921 | } |
@@ -6397,6 +6427,8 @@ static void alc260_auto_init_analog_input(struct hda_codec *codec) | |||
6397 | } | 6427 | } |
6398 | } | 6428 | } |
6399 | 6429 | ||
6430 | #define alc260_auto_init_input_src alc880_auto_init_input_src | ||
6431 | |||
6400 | /* | 6432 | /* |
6401 | * generic initialization of ADC, input mixers and output mixers | 6433 | * generic initialization of ADC, input mixers and output mixers |
6402 | */ | 6434 | */ |
@@ -6483,6 +6515,7 @@ static void alc260_auto_init(struct hda_codec *codec) | |||
6483 | struct alc_spec *spec = codec->spec; | 6515 | struct alc_spec *spec = codec->spec; |
6484 | alc260_auto_init_multi_out(codec); | 6516 | alc260_auto_init_multi_out(codec); |
6485 | alc260_auto_init_analog_input(codec); | 6517 | alc260_auto_init_analog_input(codec); |
6518 | alc260_auto_init_input_src(codec); | ||
6486 | if (spec->unsol_event) | 6519 | if (spec->unsol_event) |
6487 | alc_inithook(codec); | 6520 | alc_inithook(codec); |
6488 | } | 6521 | } |
@@ -8455,6 +8488,42 @@ static struct snd_kcontrol_new alc883_medion_md2_mixer[] = { | |||
8455 | { } /* end */ | 8488 | { } /* end */ |
8456 | }; | 8489 | }; |
8457 | 8490 | ||
8491 | static struct snd_kcontrol_new alc883_medion_wim2160_mixer[] = { | ||
8492 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | ||
8493 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | ||
8494 | HDA_CODEC_MUTE("Speaker Playback Switch", 0x15, 0x0, HDA_OUTPUT), | ||
8495 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT), | ||
8496 | HDA_CODEC_VOLUME("Line Playback Volume", 0x08, 0x0, HDA_INPUT), | ||
8497 | HDA_CODEC_MUTE("Line Playback Switch", 0x08, 0x0, HDA_INPUT), | ||
8498 | { } /* end */ | ||
8499 | }; | ||
8500 | |||
8501 | static struct hda_verb alc883_medion_wim2160_verbs[] = { | ||
8502 | /* Unmute front mixer */ | ||
8503 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | ||
8504 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, | ||
8505 | |||
8506 | /* Set speaker pin to front mixer */ | ||
8507 | {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, | ||
8508 | |||
8509 | /* Init headphone pin */ | ||
8510 | {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, | ||
8511 | {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, | ||
8512 | {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00}, | ||
8513 | {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, | ||
8514 | |||
8515 | { } /* end */ | ||
8516 | }; | ||
8517 | |||
8518 | /* toggle speaker-output according to the hp-jack state */ | ||
8519 | static void alc883_medion_wim2160_setup(struct hda_codec *codec) | ||
8520 | { | ||
8521 | struct alc_spec *spec = codec->spec; | ||
8522 | |||
8523 | spec->autocfg.hp_pins[0] = 0x1a; | ||
8524 | spec->autocfg.speaker_pins[0] = 0x15; | ||
8525 | } | ||
8526 | |||
8458 | static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { | 8527 | static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { |
8459 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | 8528 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), |
8460 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | 8529 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), |
@@ -9164,6 +9233,7 @@ static const char *alc882_models[ALC882_MODEL_LAST] = { | |||
9164 | [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", | 9233 | [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", |
9165 | [ALC883_MEDION] = "medion", | 9234 | [ALC883_MEDION] = "medion", |
9166 | [ALC883_MEDION_MD2] = "medion-md2", | 9235 | [ALC883_MEDION_MD2] = "medion-md2", |
9236 | [ALC883_MEDION_WIM2160] = "medion-wim2160", | ||
9167 | [ALC883_LAPTOP_EAPD] = "laptop-eapd", | 9237 | [ALC883_LAPTOP_EAPD] = "laptop-eapd", |
9168 | [ALC883_LENOVO_101E_2ch] = "lenovo-101e", | 9238 | [ALC883_LENOVO_101E_2ch] = "lenovo-101e", |
9169 | [ALC883_LENOVO_NB0763] = "lenovo-nb0763", | 9239 | [ALC883_LENOVO_NB0763] = "lenovo-nb0763", |
@@ -9280,6 +9350,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { | |||
9280 | SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), | 9350 | SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), |
9281 | 9351 | ||
9282 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), | 9352 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), |
9353 | SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG), | ||
9283 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), | 9354 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), |
9284 | SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), | 9355 | SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), |
9285 | SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), | 9356 | SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), |
@@ -9818,6 +9889,21 @@ static struct alc_config_preset alc882_presets[] = { | |||
9818 | .setup = alc883_medion_md2_setup, | 9889 | .setup = alc883_medion_md2_setup, |
9819 | .init_hook = alc_automute_amp, | 9890 | .init_hook = alc_automute_amp, |
9820 | }, | 9891 | }, |
9892 | [ALC883_MEDION_WIM2160] = { | ||
9893 | .mixers = { alc883_medion_wim2160_mixer }, | ||
9894 | .init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs }, | ||
9895 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), | ||
9896 | .dac_nids = alc883_dac_nids, | ||
9897 | .dig_out_nid = ALC883_DIGOUT_NID, | ||
9898 | .num_adc_nids = ARRAY_SIZE(alc883_adc_nids), | ||
9899 | .adc_nids = alc883_adc_nids, | ||
9900 | .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), | ||
9901 | .channel_mode = alc883_3ST_2ch_modes, | ||
9902 | .input_mux = &alc883_capture_source, | ||
9903 | .unsol_event = alc_automute_amp_unsol_event, | ||
9904 | .setup = alc883_medion_wim2160_setup, | ||
9905 | .init_hook = alc_automute_amp, | ||
9906 | }, | ||
9821 | [ALC883_LAPTOP_EAPD] = { | 9907 | [ALC883_LAPTOP_EAPD] = { |
9822 | .mixers = { alc883_base_mixer }, | 9908 | .mixers = { alc883_base_mixer }, |
9823 | .init_verbs = { alc883_init_verbs, alc882_eapd_verbs }, | 9909 | .init_verbs = { alc883_init_verbs, alc882_eapd_verbs }, |
@@ -10363,7 +10449,8 @@ static int patch_alc882(struct hda_codec *codec) | |||
10363 | board_config = ALC882_AUTO; | 10449 | board_config = ALC882_AUTO; |
10364 | } | 10450 | } |
10365 | 10451 | ||
10366 | alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups); | 10452 | if (board_config == ALC882_AUTO) |
10453 | alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 1); | ||
10367 | 10454 | ||
10368 | if (board_config == ALC882_AUTO) { | 10455 | if (board_config == ALC882_AUTO) { |
10369 | /* automatic parse from the BIOS config */ | 10456 | /* automatic parse from the BIOS config */ |
@@ -10436,6 +10523,9 @@ static int patch_alc882(struct hda_codec *codec) | |||
10436 | set_capture_mixer(codec); | 10523 | set_capture_mixer(codec); |
10437 | set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); | 10524 | set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); |
10438 | 10525 | ||
10526 | if (board_config == ALC882_AUTO) | ||
10527 | alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0); | ||
10528 | |||
10439 | spec->vmaster_nid = 0x0c; | 10529 | spec->vmaster_nid = 0x0c; |
10440 | 10530 | ||
10441 | codec->patch_ops = alc_patch_ops; | 10531 | codec->patch_ops = alc_patch_ops; |
@@ -12816,6 +12906,7 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid, | |||
12816 | dac = 0x02; | 12906 | dac = 0x02; |
12817 | break; | 12907 | break; |
12818 | case 0x15: | 12908 | case 0x15: |
12909 | case 0x21: /* ALC269vb has this pin, too */ | ||
12819 | dac = 0x03; | 12910 | dac = 0x03; |
12820 | break; | 12911 | break; |
12821 | default: | 12912 | default: |
@@ -13735,19 +13826,19 @@ static void alc269_laptop_unsol_event(struct hda_codec *codec, | |||
13735 | } | 13826 | } |
13736 | } | 13827 | } |
13737 | 13828 | ||
13738 | static void alc269_laptop_dmic_setup(struct hda_codec *codec) | 13829 | static void alc269_laptop_amic_setup(struct hda_codec *codec) |
13739 | { | 13830 | { |
13740 | struct alc_spec *spec = codec->spec; | 13831 | struct alc_spec *spec = codec->spec; |
13741 | spec->autocfg.hp_pins[0] = 0x15; | 13832 | spec->autocfg.hp_pins[0] = 0x15; |
13742 | spec->autocfg.speaker_pins[0] = 0x14; | 13833 | spec->autocfg.speaker_pins[0] = 0x14; |
13743 | spec->ext_mic.pin = 0x18; | 13834 | spec->ext_mic.pin = 0x18; |
13744 | spec->ext_mic.mux_idx = 0; | 13835 | spec->ext_mic.mux_idx = 0; |
13745 | spec->int_mic.pin = 0x12; | 13836 | spec->int_mic.pin = 0x19; |
13746 | spec->int_mic.mux_idx = 5; | 13837 | spec->int_mic.mux_idx = 1; |
13747 | spec->auto_mic = 1; | 13838 | spec->auto_mic = 1; |
13748 | } | 13839 | } |
13749 | 13840 | ||
13750 | static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) | 13841 | static void alc269_laptop_dmic_setup(struct hda_codec *codec) |
13751 | { | 13842 | { |
13752 | struct alc_spec *spec = codec->spec; | 13843 | struct alc_spec *spec = codec->spec; |
13753 | spec->autocfg.hp_pins[0] = 0x15; | 13844 | spec->autocfg.hp_pins[0] = 0x15; |
@@ -13755,14 +13846,14 @@ static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) | |||
13755 | spec->ext_mic.pin = 0x18; | 13846 | spec->ext_mic.pin = 0x18; |
13756 | spec->ext_mic.mux_idx = 0; | 13847 | spec->ext_mic.mux_idx = 0; |
13757 | spec->int_mic.pin = 0x12; | 13848 | spec->int_mic.pin = 0x12; |
13758 | spec->int_mic.mux_idx = 6; | 13849 | spec->int_mic.mux_idx = 5; |
13759 | spec->auto_mic = 1; | 13850 | spec->auto_mic = 1; |
13760 | } | 13851 | } |
13761 | 13852 | ||
13762 | static void alc269_laptop_amic_setup(struct hda_codec *codec) | 13853 | static void alc269vb_laptop_amic_setup(struct hda_codec *codec) |
13763 | { | 13854 | { |
13764 | struct alc_spec *spec = codec->spec; | 13855 | struct alc_spec *spec = codec->spec; |
13765 | spec->autocfg.hp_pins[0] = 0x15; | 13856 | spec->autocfg.hp_pins[0] = 0x21; |
13766 | spec->autocfg.speaker_pins[0] = 0x14; | 13857 | spec->autocfg.speaker_pins[0] = 0x14; |
13767 | spec->ext_mic.pin = 0x18; | 13858 | spec->ext_mic.pin = 0x18; |
13768 | spec->ext_mic.mux_idx = 0; | 13859 | spec->ext_mic.mux_idx = 0; |
@@ -13771,6 +13862,18 @@ static void alc269_laptop_amic_setup(struct hda_codec *codec) | |||
13771 | spec->auto_mic = 1; | 13862 | spec->auto_mic = 1; |
13772 | } | 13863 | } |
13773 | 13864 | ||
13865 | static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) | ||
13866 | { | ||
13867 | struct alc_spec *spec = codec->spec; | ||
13868 | spec->autocfg.hp_pins[0] = 0x21; | ||
13869 | spec->autocfg.speaker_pins[0] = 0x14; | ||
13870 | spec->ext_mic.pin = 0x18; | ||
13871 | spec->ext_mic.mux_idx = 0; | ||
13872 | spec->int_mic.pin = 0x12; | ||
13873 | spec->int_mic.mux_idx = 6; | ||
13874 | spec->auto_mic = 1; | ||
13875 | } | ||
13876 | |||
13774 | static void alc269_laptop_inithook(struct hda_codec *codec) | 13877 | static void alc269_laptop_inithook(struct hda_codec *codec) |
13775 | { | 13878 | { |
13776 | alc269_speaker_automute(codec); | 13879 | alc269_speaker_automute(codec); |
@@ -13975,6 +14078,27 @@ static void alc269_auto_init(struct hda_codec *codec) | |||
13975 | alc_inithook(codec); | 14078 | alc_inithook(codec); |
13976 | } | 14079 | } |
13977 | 14080 | ||
14081 | enum { | ||
14082 | ALC269_FIXUP_SONY_VAIO, | ||
14083 | }; | ||
14084 | |||
14085 | const static struct hda_verb alc269_sony_vaio_fixup_verbs[] = { | ||
14086 | {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD}, | ||
14087 | {} | ||
14088 | }; | ||
14089 | |||
14090 | static const struct alc_fixup alc269_fixups[] = { | ||
14091 | [ALC269_FIXUP_SONY_VAIO] = { | ||
14092 | .verbs = alc269_sony_vaio_fixup_verbs | ||
14093 | }, | ||
14094 | }; | ||
14095 | |||
14096 | static struct snd_pci_quirk alc269_fixup_tbl[] = { | ||
14097 | SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), | ||
14098 | {} | ||
14099 | }; | ||
14100 | |||
14101 | |||
13978 | /* | 14102 | /* |
13979 | * configuration and preset | 14103 | * configuration and preset |
13980 | */ | 14104 | */ |
@@ -14034,7 +14158,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = { | |||
14034 | ALC269_DMIC), | 14158 | ALC269_DMIC), |
14035 | SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), | 14159 | SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), |
14036 | SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), | 14160 | SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), |
14037 | SND_PCI_QUIRK(0x104d, 0x9071, "SONY XTB", ALC269_DMIC), | 14161 | SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_AUTO), |
14038 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), | 14162 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), |
14039 | SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), | 14163 | SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), |
14040 | SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU), | 14164 | SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU), |
@@ -14108,7 +14232,7 @@ static struct alc_config_preset alc269_presets[] = { | |||
14108 | .num_channel_mode = ARRAY_SIZE(alc269_modes), | 14232 | .num_channel_mode = ARRAY_SIZE(alc269_modes), |
14109 | .channel_mode = alc269_modes, | 14233 | .channel_mode = alc269_modes, |
14110 | .unsol_event = alc269_laptop_unsol_event, | 14234 | .unsol_event = alc269_laptop_unsol_event, |
14111 | .setup = alc269_laptop_amic_setup, | 14235 | .setup = alc269vb_laptop_amic_setup, |
14112 | .init_hook = alc269_laptop_inithook, | 14236 | .init_hook = alc269_laptop_inithook, |
14113 | }, | 14237 | }, |
14114 | [ALC269VB_DMIC] = { | 14238 | [ALC269VB_DMIC] = { |
@@ -14188,6 +14312,9 @@ static int patch_alc269(struct hda_codec *codec) | |||
14188 | board_config = ALC269_AUTO; | 14312 | board_config = ALC269_AUTO; |
14189 | } | 14313 | } |
14190 | 14314 | ||
14315 | if (board_config == ALC269_AUTO) | ||
14316 | alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 1); | ||
14317 | |||
14191 | if (board_config == ALC269_AUTO) { | 14318 | if (board_config == ALC269_AUTO) { |
14192 | /* automatic parse from the BIOS config */ | 14319 | /* automatic parse from the BIOS config */ |
14193 | err = alc269_parse_auto_config(codec); | 14320 | err = alc269_parse_auto_config(codec); |
@@ -14240,6 +14367,9 @@ static int patch_alc269(struct hda_codec *codec) | |||
14240 | set_capture_mixer(codec); | 14367 | set_capture_mixer(codec); |
14241 | set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); | 14368 | set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); |
14242 | 14369 | ||
14370 | if (board_config == ALC269_AUTO) | ||
14371 | alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0); | ||
14372 | |||
14243 | spec->vmaster_nid = 0x02; | 14373 | spec->vmaster_nid = 0x02; |
14244 | 14374 | ||
14245 | codec->patch_ops = alc_patch_ops; | 14375 | codec->patch_ops = alc_patch_ops; |
@@ -15328,7 +15458,8 @@ static int patch_alc861(struct hda_codec *codec) | |||
15328 | board_config = ALC861_AUTO; | 15458 | board_config = ALC861_AUTO; |
15329 | } | 15459 | } |
15330 | 15460 | ||
15331 | alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups); | 15461 | if (board_config == ALC861_AUTO) |
15462 | alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 1); | ||
15332 | 15463 | ||
15333 | if (board_config == ALC861_AUTO) { | 15464 | if (board_config == ALC861_AUTO) { |
15334 | /* automatic parse from the BIOS config */ | 15465 | /* automatic parse from the BIOS config */ |
@@ -15365,6 +15496,9 @@ static int patch_alc861(struct hda_codec *codec) | |||
15365 | 15496 | ||
15366 | spec->vmaster_nid = 0x03; | 15497 | spec->vmaster_nid = 0x03; |
15367 | 15498 | ||
15499 | if (board_config == ALC861_AUTO) | ||
15500 | alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 0); | ||
15501 | |||
15368 | codec->patch_ops = alc_patch_ops; | 15502 | codec->patch_ops = alc_patch_ops; |
15369 | if (board_config == ALC861_AUTO) { | 15503 | if (board_config == ALC861_AUTO) { |
15370 | spec->init_hook = alc861_auto_init; | 15504 | spec->init_hook = alc861_auto_init; |
@@ -16299,7 +16433,8 @@ static int patch_alc861vd(struct hda_codec *codec) | |||
16299 | board_config = ALC861VD_AUTO; | 16433 | board_config = ALC861VD_AUTO; |
16300 | } | 16434 | } |
16301 | 16435 | ||
16302 | alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups); | 16436 | if (board_config == ALC861VD_AUTO) |
16437 | alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 1); | ||
16303 | 16438 | ||
16304 | if (board_config == ALC861VD_AUTO) { | 16439 | if (board_config == ALC861VD_AUTO) { |
16305 | /* automatic parse from the BIOS config */ | 16440 | /* automatic parse from the BIOS config */ |
@@ -16347,6 +16482,9 @@ static int patch_alc861vd(struct hda_codec *codec) | |||
16347 | 16482 | ||
16348 | spec->vmaster_nid = 0x02; | 16483 | spec->vmaster_nid = 0x02; |
16349 | 16484 | ||
16485 | if (board_config == ALC861VD_AUTO) | ||
16486 | alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 0); | ||
16487 | |||
16350 | codec->patch_ops = alc_patch_ops; | 16488 | codec->patch_ops = alc_patch_ops; |
16351 | 16489 | ||
16352 | if (board_config == ALC861VD_AUTO) | 16490 | if (board_config == ALC861VD_AUTO) |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index c4be3fab94e5..7fb7d017a347 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -1607,6 +1607,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { | |||
1607 | "Dell Studio 1555", STAC_DELL_M6_DMIC), | 1607 | "Dell Studio 1555", STAC_DELL_M6_DMIC), |
1608 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, | 1608 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, |
1609 | "Dell Studio 1557", STAC_DELL_M6_DMIC), | 1609 | "Dell Studio 1557", STAC_DELL_M6_DMIC), |
1610 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe, | ||
1611 | "Dell Studio XPS 1645", STAC_DELL_M6_BOTH), | ||
1612 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413, | ||
1613 | "Dell Studio 1558", STAC_DELL_M6_BOTH), | ||
1610 | {} /* terminator */ | 1614 | {} /* terminator */ |
1611 | }; | 1615 | }; |
1612 | 1616 | ||
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 9ddc37300f6b..73453814e098 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
@@ -476,7 +476,7 @@ static struct snd_kcontrol_new *via_clone_control(struct via_spec *spec, | |||
476 | knew->name = kstrdup(tmpl->name, GFP_KERNEL); | 476 | knew->name = kstrdup(tmpl->name, GFP_KERNEL); |
477 | if (!knew->name) | 477 | if (!knew->name) |
478 | return NULL; | 478 | return NULL; |
479 | return 0; | 479 | return knew; |
480 | } | 480 | } |
481 | 481 | ||
482 | static void via_free_kctls(struct hda_codec *codec) | 482 | static void via_free_kctls(struct hda_codec *codec) |
@@ -1215,14 +1215,13 @@ static struct snd_kcontrol_new via_hp_mixer[2] = { | |||
1215 | }, | 1215 | }, |
1216 | }; | 1216 | }; |
1217 | 1217 | ||
1218 | static int via_hp_build(struct via_spec *spec) | 1218 | static int via_hp_build(struct hda_codec *codec) |
1219 | { | 1219 | { |
1220 | struct via_spec *spec = codec->spec; | ||
1220 | struct snd_kcontrol_new *knew; | 1221 | struct snd_kcontrol_new *knew; |
1221 | hda_nid_t nid; | 1222 | hda_nid_t nid; |
1222 | 1223 | int nums; | |
1223 | knew = via_clone_control(spec, &via_hp_mixer[0]); | 1224 | hda_nid_t conn[HDA_MAX_CONNECTIONS]; |
1224 | if (knew == NULL) | ||
1225 | return -ENOMEM; | ||
1226 | 1225 | ||
1227 | switch (spec->codec_type) { | 1226 | switch (spec->codec_type) { |
1228 | case VT1718S: | 1227 | case VT1718S: |
@@ -1239,6 +1238,14 @@ static int via_hp_build(struct via_spec *spec) | |||
1239 | break; | 1238 | break; |
1240 | } | 1239 | } |
1241 | 1240 | ||
1241 | nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); | ||
1242 | if (nums <= 1) | ||
1243 | return 0; | ||
1244 | |||
1245 | knew = via_clone_control(spec, &via_hp_mixer[0]); | ||
1246 | if (knew == NULL) | ||
1247 | return -ENOMEM; | ||
1248 | |||
1242 | knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; | 1249 | knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; |
1243 | knew->private_value = nid; | 1250 | knew->private_value = nid; |
1244 | 1251 | ||
@@ -2561,7 +2568,7 @@ static int vt1708_parse_auto_config(struct hda_codec *codec) | |||
2561 | spec->input_mux = &spec->private_imux[0]; | 2568 | spec->input_mux = &spec->private_imux[0]; |
2562 | 2569 | ||
2563 | if (spec->hp_mux) | 2570 | if (spec->hp_mux) |
2564 | via_hp_build(spec); | 2571 | via_hp_build(codec); |
2565 | 2572 | ||
2566 | via_smart51_build(spec); | 2573 | via_smart51_build(spec); |
2567 | return 1; | 2574 | return 1; |
@@ -3087,7 +3094,7 @@ static int vt1709_parse_auto_config(struct hda_codec *codec) | |||
3087 | spec->input_mux = &spec->private_imux[0]; | 3094 | spec->input_mux = &spec->private_imux[0]; |
3088 | 3095 | ||
3089 | if (spec->hp_mux) | 3096 | if (spec->hp_mux) |
3090 | via_hp_build(spec); | 3097 | via_hp_build(codec); |
3091 | 3098 | ||
3092 | via_smart51_build(spec); | 3099 | via_smart51_build(spec); |
3093 | return 1; | 3100 | return 1; |
@@ -3654,7 +3661,7 @@ static int vt1708B_parse_auto_config(struct hda_codec *codec) | |||
3654 | spec->input_mux = &spec->private_imux[0]; | 3661 | spec->input_mux = &spec->private_imux[0]; |
3655 | 3662 | ||
3656 | if (spec->hp_mux) | 3663 | if (spec->hp_mux) |
3657 | via_hp_build(spec); | 3664 | via_hp_build(codec); |
3658 | 3665 | ||
3659 | via_smart51_build(spec); | 3666 | via_smart51_build(spec); |
3660 | return 1; | 3667 | return 1; |
@@ -4140,7 +4147,7 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec) | |||
4140 | spec->input_mux = &spec->private_imux[0]; | 4147 | spec->input_mux = &spec->private_imux[0]; |
4141 | 4148 | ||
4142 | if (spec->hp_mux) | 4149 | if (spec->hp_mux) |
4143 | via_hp_build(spec); | 4150 | via_hp_build(codec); |
4144 | 4151 | ||
4145 | via_smart51_build(spec); | 4152 | via_smart51_build(spec); |
4146 | return 1; | 4153 | return 1; |
@@ -4510,7 +4517,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec) | |||
4510 | spec->input_mux = &spec->private_imux[0]; | 4517 | spec->input_mux = &spec->private_imux[0]; |
4511 | 4518 | ||
4512 | if (spec->hp_mux) | 4519 | if (spec->hp_mux) |
4513 | via_hp_build(spec); | 4520 | via_hp_build(codec); |
4514 | 4521 | ||
4515 | return 1; | 4522 | return 1; |
4516 | } | 4523 | } |
@@ -4930,7 +4937,7 @@ static int vt1718S_parse_auto_config(struct hda_codec *codec) | |||
4930 | spec->input_mux = &spec->private_imux[0]; | 4937 | spec->input_mux = &spec->private_imux[0]; |
4931 | 4938 | ||
4932 | if (spec->hp_mux) | 4939 | if (spec->hp_mux) |
4933 | via_hp_build(spec); | 4940 | via_hp_build(codec); |
4934 | 4941 | ||
4935 | via_smart51_build(spec); | 4942 | via_smart51_build(spec); |
4936 | 4943 | ||
@@ -5425,7 +5432,7 @@ static int vt1716S_parse_auto_config(struct hda_codec *codec) | |||
5425 | spec->input_mux = &spec->private_imux[0]; | 5432 | spec->input_mux = &spec->private_imux[0]; |
5426 | 5433 | ||
5427 | if (spec->hp_mux) | 5434 | if (spec->hp_mux) |
5428 | via_hp_build(spec); | 5435 | via_hp_build(codec); |
5429 | 5436 | ||
5430 | via_smart51_build(spec); | 5437 | via_smart51_build(spec); |
5431 | 5438 | ||
@@ -5781,7 +5788,7 @@ static int vt2002P_parse_auto_config(struct hda_codec *codec) | |||
5781 | spec->input_mux = &spec->private_imux[0]; | 5788 | spec->input_mux = &spec->private_imux[0]; |
5782 | 5789 | ||
5783 | if (spec->hp_mux) | 5790 | if (spec->hp_mux) |
5784 | via_hp_build(spec); | 5791 | via_hp_build(codec); |
5785 | 5792 | ||
5786 | return 1; | 5793 | return 1; |
5787 | } | 5794 | } |
@@ -6000,12 +6007,12 @@ static int vt1812_auto_create_multi_out_ctls(struct via_spec *spec, | |||
6000 | 6007 | ||
6001 | /* Line-Out: PortE */ | 6008 | /* Line-Out: PortE */ |
6002 | err = via_add_control(spec, VIA_CTL_WIDGET_VOL, | 6009 | err = via_add_control(spec, VIA_CTL_WIDGET_VOL, |
6003 | "Master Front Playback Volume", | 6010 | "Front Playback Volume", |
6004 | HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT)); | 6011 | HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT)); |
6005 | if (err < 0) | 6012 | if (err < 0) |
6006 | return err; | 6013 | return err; |
6007 | err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE, | 6014 | err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE, |
6008 | "Master Front Playback Switch", | 6015 | "Front Playback Switch", |
6009 | HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT)); | 6016 | HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT)); |
6010 | if (err < 0) | 6017 | if (err < 0) |
6011 | return err; | 6018 | return err; |
@@ -6130,7 +6137,7 @@ static int vt1812_parse_auto_config(struct hda_codec *codec) | |||
6130 | spec->input_mux = &spec->private_imux[0]; | 6137 | spec->input_mux = &spec->private_imux[0]; |
6131 | 6138 | ||
6132 | if (spec->hp_mux) | 6139 | if (spec->hp_mux) |
6133 | via_hp_build(spec); | 6140 | via_hp_build(codec); |
6134 | 6141 | ||
6135 | return 1; | 6142 | return 1; |
6136 | } | 6143 | } |
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c index b64e78139d63..b56e33676780 100644 --- a/sound/pci/maestro3.c +++ b/sound/pci/maestro3.c | |||
@@ -849,6 +849,7 @@ struct snd_m3 { | |||
849 | struct snd_kcontrol *master_switch; | 849 | struct snd_kcontrol *master_switch; |
850 | struct snd_kcontrol *master_volume; | 850 | struct snd_kcontrol *master_volume; |
851 | struct tasklet_struct hwvol_tq; | 851 | struct tasklet_struct hwvol_tq; |
852 | unsigned int in_suspend; | ||
852 | 853 | ||
853 | #ifdef CONFIG_PM | 854 | #ifdef CONFIG_PM |
854 | u16 *suspend_mem; | 855 | u16 *suspend_mem; |
@@ -884,6 +885,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_m3_ids) = { | |||
884 | MODULE_DEVICE_TABLE(pci, snd_m3_ids); | 885 | MODULE_DEVICE_TABLE(pci, snd_m3_ids); |
885 | 886 | ||
886 | static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { | 887 | static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { |
888 | SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c), | ||
887 | SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), | 889 | SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), |
888 | SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), | 890 | SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), |
889 | SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), | 891 | SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), |
@@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data) | |||
1613 | outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); | 1615 | outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); |
1614 | outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); | 1616 | outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); |
1615 | 1617 | ||
1618 | /* Ignore spurious HV interrupts during suspend / resume, this avoids | ||
1619 | mistaking them for a mute button press. */ | ||
1620 | if (chip->in_suspend) | ||
1621 | return; | ||
1622 | |||
1616 | if (!chip->master_switch || !chip->master_volume) | 1623 | if (!chip->master_switch || !chip->master_volume) |
1617 | return; | 1624 | return; |
1618 | 1625 | ||
@@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state) | |||
2424 | if (chip->suspend_mem == NULL) | 2431 | if (chip->suspend_mem == NULL) |
2425 | return 0; | 2432 | return 0; |
2426 | 2433 | ||
2434 | chip->in_suspend = 1; | ||
2427 | snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); | 2435 | snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); |
2428 | snd_pcm_suspend_all(chip->pcm); | 2436 | snd_pcm_suspend_all(chip->pcm); |
2429 | snd_ac97_suspend(chip->ac97); | 2437 | snd_ac97_suspend(chip->ac97); |
@@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci) | |||
2497 | snd_m3_hv_init(chip); | 2505 | snd_m3_hv_init(chip); |
2498 | 2506 | ||
2499 | snd_power_change_state(card, SNDRV_CTL_POWER_D0); | 2507 | snd_power_change_state(card, SNDRV_CTL_POWER_D0); |
2508 | chip->in_suspend = 0; | ||
2500 | return 0; | 2509 | return 0; |
2501 | } | 2510 | } |
2502 | #endif /* CONFIG_PM */ | 2511 | #endif /* CONFIG_PM */ |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a34cbcf7904f..002e289d1255 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
26 | #include <linux/version.h> | ||
27 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
29 | #include <linux/firmware.h> | 28 | #include <linux/firmware.h> |
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index 2e79d7136298..2b31ac673ea4 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c | |||
@@ -71,7 +71,12 @@ static void imx_ssi_dma_callback(int channel, void *data) | |||
71 | 71 | ||
72 | static void snd_imx_dma_err_callback(int channel, void *data, int err) | 72 | static void snd_imx_dma_err_callback(int channel, void *data, int err) |
73 | { | 73 | { |
74 | pr_err("DMA error callback called\n"); | 74 | struct snd_pcm_substream *substream = data; |
75 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | ||
76 | struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; | ||
77 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
78 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
79 | int ret; | ||
75 | 80 | ||
76 | pr_err("DMA timeout on channel %d -%s%s%s%s\n", | 81 | pr_err("DMA timeout on channel %d -%s%s%s%s\n", |
77 | channel, | 82 | channel, |
@@ -79,6 +84,14 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err) | |||
79 | err & IMX_DMA_ERR_REQUEST ? " request" : "", | 84 | err & IMX_DMA_ERR_REQUEST ? " request" : "", |
80 | err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | 85 | err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", |
81 | err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | 86 | err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); |
87 | |||
88 | imx_dma_disable(iprtd->dma); | ||
89 | ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, | ||
90 | IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, | ||
91 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | ||
92 | DMA_MODE_WRITE : DMA_MODE_READ); | ||
93 | if (!ret) | ||
94 | imx_dma_enable(iprtd->dma); | ||
82 | } | 95 | } |
83 | 96 | ||
84 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) | 97 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) |
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c index f96a373699cf..6b518e07eea9 100644 --- a/sound/soc/imx/imx-pcm-fiq.c +++ b/sound/soc/imx/imx-pcm-fiq.c | |||
@@ -39,23 +39,24 @@ struct imx_pcm_runtime_data { | |||
39 | unsigned long offset; | 39 | unsigned long offset; |
40 | unsigned long last_offset; | 40 | unsigned long last_offset; |
41 | unsigned long size; | 41 | unsigned long size; |
42 | struct timer_list timer; | 42 | struct hrtimer hrt; |
43 | int poll_time; | 43 | int poll_time_ns; |
44 | struct snd_pcm_substream *substream; | ||
45 | atomic_t running; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | static inline void imx_ssi_set_next_poll(struct imx_pcm_runtime_data *iprtd) | 48 | static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) |
47 | { | 49 | { |
48 | iprtd->timer.expires = jiffies + iprtd->poll_time; | 50 | struct imx_pcm_runtime_data *iprtd = |
49 | } | 51 | container_of(hrt, struct imx_pcm_runtime_data, hrt); |
50 | 52 | struct snd_pcm_substream *substream = iprtd->substream; | |
51 | static void imx_ssi_timer_callback(unsigned long data) | ||
52 | { | ||
53 | struct snd_pcm_substream *substream = (void *)data; | ||
54 | struct snd_pcm_runtime *runtime = substream->runtime; | 53 | struct snd_pcm_runtime *runtime = substream->runtime; |
55 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
56 | struct pt_regs regs; | 54 | struct pt_regs regs; |
57 | unsigned long delta; | 55 | unsigned long delta; |
58 | 56 | ||
57 | if (!atomic_read(&iprtd->running)) | ||
58 | return HRTIMER_NORESTART; | ||
59 | |||
59 | get_fiq_regs(®s); | 60 | get_fiq_regs(®s); |
60 | 61 | ||
61 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 62 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
@@ -72,16 +73,14 @@ static void imx_ssi_timer_callback(unsigned long data) | |||
72 | 73 | ||
73 | /* If we've transferred at least a period then report it and | 74 | /* If we've transferred at least a period then report it and |
74 | * reset our poll time */ | 75 | * reset our poll time */ |
75 | if (delta >= runtime->period_size) { | 76 | if (delta >= iprtd->period) { |
76 | snd_pcm_period_elapsed(substream); | 77 | snd_pcm_period_elapsed(substream); |
77 | iprtd->last_offset = iprtd->offset; | 78 | iprtd->last_offset = iprtd->offset; |
78 | |||
79 | imx_ssi_set_next_poll(iprtd); | ||
80 | } | 79 | } |
81 | 80 | ||
82 | /* Restart the timer; if we didn't report we'll run on the next tick */ | 81 | hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns)); |
83 | add_timer(&iprtd->timer); | ||
84 | 82 | ||
83 | return HRTIMER_RESTART; | ||
85 | } | 84 | } |
86 | 85 | ||
87 | static struct fiq_handler fh = { | 86 | static struct fiq_handler fh = { |
@@ -99,8 +98,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
99 | iprtd->period = params_period_bytes(params) ; | 98 | iprtd->period = params_period_bytes(params) ; |
100 | iprtd->offset = 0; | 99 | iprtd->offset = 0; |
101 | iprtd->last_offset = 0; | 100 | iprtd->last_offset = 0; |
102 | iprtd->poll_time = HZ / (params_rate(params) / params_period_size(params)); | 101 | iprtd->poll_time_ns = 1000000000 / params_rate(params) * |
103 | 102 | params_period_size(params); | |
104 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); | 103 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); |
105 | 104 | ||
106 | return 0; | 105 | return 0; |
@@ -135,8 +134,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
135 | case SNDRV_PCM_TRIGGER_START: | 134 | case SNDRV_PCM_TRIGGER_START: |
136 | case SNDRV_PCM_TRIGGER_RESUME: | 135 | case SNDRV_PCM_TRIGGER_RESUME: |
137 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 136 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
138 | imx_ssi_set_next_poll(iprtd); | 137 | atomic_set(&iprtd->running, 1); |
139 | add_timer(&iprtd->timer); | 138 | hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns), |
139 | HRTIMER_MODE_REL); | ||
140 | if (++fiq_enable == 1) | 140 | if (++fiq_enable == 1) |
141 | enable_fiq(imx_pcm_fiq); | 141 | enable_fiq(imx_pcm_fiq); |
142 | 142 | ||
@@ -145,11 +145,11 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
145 | case SNDRV_PCM_TRIGGER_STOP: | 145 | case SNDRV_PCM_TRIGGER_STOP: |
146 | case SNDRV_PCM_TRIGGER_SUSPEND: | 146 | case SNDRV_PCM_TRIGGER_SUSPEND: |
147 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 147 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
148 | del_timer(&iprtd->timer); | 148 | atomic_set(&iprtd->running, 0); |
149 | |||
149 | if (--fiq_enable == 0) | 150 | if (--fiq_enable == 0) |
150 | disable_fiq(imx_pcm_fiq); | 151 | disable_fiq(imx_pcm_fiq); |
151 | 152 | ||
152 | |||
153 | break; | 153 | break; |
154 | default: | 154 | default: |
155 | return -EINVAL; | 155 | return -EINVAL; |
@@ -180,7 +180,7 @@ static struct snd_pcm_hardware snd_imx_hardware = { | |||
180 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, | 180 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, |
181 | .period_bytes_min = 128, | 181 | .period_bytes_min = 128, |
182 | .period_bytes_max = 16 * 1024, | 182 | .period_bytes_max = 16 * 1024, |
183 | .periods_min = 2, | 183 | .periods_min = 4, |
184 | .periods_max = 255, | 184 | .periods_max = 255, |
185 | .fifo_size = 0, | 185 | .fifo_size = 0, |
186 | }; | 186 | }; |
@@ -194,9 +194,11 @@ static int snd_imx_open(struct snd_pcm_substream *substream) | |||
194 | iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); | 194 | iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); |
195 | runtime->private_data = iprtd; | 195 | runtime->private_data = iprtd; |
196 | 196 | ||
197 | init_timer(&iprtd->timer); | 197 | iprtd->substream = substream; |
198 | iprtd->timer.data = (unsigned long)substream; | 198 | |
199 | iprtd->timer.function = imx_ssi_timer_callback; | 199 | atomic_set(&iprtd->running, 0); |
200 | hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
201 | iprtd->hrt.function = snd_hrtimer_callback; | ||
200 | 202 | ||
201 | ret = snd_pcm_hw_constraint_integer(substream->runtime, | 203 | ret = snd_pcm_hw_constraint_integer(substream->runtime, |
202 | SNDRV_PCM_HW_PARAM_PERIODS); | 204 | SNDRV_PCM_HW_PARAM_PERIODS); |
@@ -212,7 +214,8 @@ static int snd_imx_close(struct snd_pcm_substream *substream) | |||
212 | struct snd_pcm_runtime *runtime = substream->runtime; | 214 | struct snd_pcm_runtime *runtime = substream->runtime; |
213 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 215 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
214 | 216 | ||
215 | del_timer_sync(&iprtd->timer); | 217 | hrtimer_cancel(&iprtd->hrt); |
218 | |||
216 | kfree(iprtd); | 219 | kfree(iprtd); |
217 | 220 | ||
218 | return 0; | 221 | return 0; |
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index 0bcc6d7d9471..80b4fee2442b 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c | |||
@@ -656,7 +656,8 @@ static int imx_ssi_probe(struct platform_device *pdev) | |||
656 | dai->private_data = ssi; | 656 | dai->private_data = ssi; |
657 | 657 | ||
658 | if ((cpu_is_mx27() || cpu_is_mx21()) && | 658 | if ((cpu_is_mx27() || cpu_is_mx21()) && |
659 | !(ssi->flags & IMX_SSI_USE_AC97)) { | 659 | !(ssi->flags & IMX_SSI_USE_AC97) && |
660 | (ssi->flags & IMX_SSI_DMA)) { | ||
660 | ssi->flags |= IMX_SSI_DMA; | 661 | ssi->flags |= IMX_SSI_DMA; |
661 | platform = imx_ssi_dma_mx2_init(pdev, ssi); | 662 | platform = imx_ssi_dma_mx2_init(pdev, ssi); |
662 | } else | 663 | } else |
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c index 2c59afd99611..9e28b20cb2ce 100644 --- a/sound/usb/usbmidi.c +++ b/sound/usb/usbmidi.c | |||
@@ -986,6 +986,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) | |||
986 | DEFINE_WAIT(wait); | 986 | DEFINE_WAIT(wait); |
987 | long timeout = msecs_to_jiffies(50); | 987 | long timeout = msecs_to_jiffies(50); |
988 | 988 | ||
989 | if (ep->umidi->disconnected) | ||
990 | return; | ||
989 | /* | 991 | /* |
990 | * The substream buffer is empty, but some data might still be in the | 992 | * The substream buffer is empty, but some data might still be in the |
991 | * currently active URBs, so we have to wait for those to complete. | 993 | * currently active URBs, so we have to wait for those to complete. |
@@ -1123,14 +1125,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi, | |||
1123 | * Frees an output endpoint. | 1125 | * Frees an output endpoint. |
1124 | * May be called when ep hasn't been initialized completely. | 1126 | * May be called when ep hasn't been initialized completely. |
1125 | */ | 1127 | */ |
1126 | static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep) | 1128 | static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep) |
1127 | { | 1129 | { |
1128 | unsigned int i; | 1130 | unsigned int i; |
1129 | 1131 | ||
1130 | for (i = 0; i < OUTPUT_URBS; ++i) | 1132 | for (i = 0; i < OUTPUT_URBS; ++i) |
1131 | if (ep->urbs[i].urb) | 1133 | if (ep->urbs[i].urb) { |
1132 | free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, | 1134 | free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, |
1133 | ep->max_transfer); | 1135 | ep->max_transfer); |
1136 | ep->urbs[i].urb = NULL; | ||
1137 | } | ||
1138 | } | ||
1139 | |||
1140 | static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep) | ||
1141 | { | ||
1142 | snd_usbmidi_out_endpoint_clear(ep); | ||
1134 | kfree(ep); | 1143 | kfree(ep); |
1135 | } | 1144 | } |
1136 | 1145 | ||
@@ -1262,15 +1271,18 @@ void snd_usbmidi_disconnect(struct list_head* p) | |||
1262 | usb_kill_urb(ep->out->urbs[j].urb); | 1271 | usb_kill_urb(ep->out->urbs[j].urb); |
1263 | if (umidi->usb_protocol_ops->finish_out_endpoint) | 1272 | if (umidi->usb_protocol_ops->finish_out_endpoint) |
1264 | umidi->usb_protocol_ops->finish_out_endpoint(ep->out); | 1273 | umidi->usb_protocol_ops->finish_out_endpoint(ep->out); |
1274 | ep->out->active_urbs = 0; | ||
1275 | if (ep->out->drain_urbs) { | ||
1276 | ep->out->drain_urbs = 0; | ||
1277 | wake_up(&ep->out->drain_wait); | ||
1278 | } | ||
1265 | } | 1279 | } |
1266 | if (ep->in) | 1280 | if (ep->in) |
1267 | for (j = 0; j < INPUT_URBS; ++j) | 1281 | for (j = 0; j < INPUT_URBS; ++j) |
1268 | usb_kill_urb(ep->in->urbs[j]); | 1282 | usb_kill_urb(ep->in->urbs[j]); |
1269 | /* free endpoints here; later call can result in Oops */ | 1283 | /* free endpoints here; later call can result in Oops */ |
1270 | if (ep->out) { | 1284 | if (ep->out) |
1271 | snd_usbmidi_out_endpoint_delete(ep->out); | 1285 | snd_usbmidi_out_endpoint_clear(ep->out); |
1272 | ep->out = NULL; | ||
1273 | } | ||
1274 | if (ep->in) { | 1286 | if (ep->in) { |
1275 | snd_usbmidi_in_endpoint_delete(ep->in); | 1287 | snd_usbmidi_in_endpoint_delete(ep->in); |
1276 | ep->in = NULL; | 1288 | ep->in = NULL; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5a0cd194dce0..c82ae2492634 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | |||
341 | struct mm_struct *mm) | 341 | struct mm_struct *mm) |
342 | { | 342 | { |
343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
344 | int idx; | ||
345 | |||
346 | idx = srcu_read_lock(&kvm->srcu); | ||
344 | kvm_arch_flush_shadow(kvm); | 347 | kvm_arch_flush_shadow(kvm); |
348 | srcu_read_unlock(&kvm->srcu, idx); | ||
345 | } | 349 | } |
346 | 350 | ||
347 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | 351 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
@@ -648,7 +652,7 @@ skip_lpage: | |||
648 | 652 | ||
649 | /* Allocate page dirty bitmap if needed */ | 653 | /* Allocate page dirty bitmap if needed */ |
650 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 654 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
651 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 655 | unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); |
652 | 656 | ||
653 | new.dirty_bitmap = vmalloc(dirty_bytes); | 657 | new.dirty_bitmap = vmalloc(dirty_bytes); |
654 | if (!new.dirty_bitmap) | 658 | if (!new.dirty_bitmap) |
@@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
768 | { | 772 | { |
769 | struct kvm_memory_slot *memslot; | 773 | struct kvm_memory_slot *memslot; |
770 | int r, i; | 774 | int r, i; |
771 | int n; | 775 | unsigned long n; |
772 | unsigned long any = 0; | 776 | unsigned long any = 0; |
773 | 777 | ||
774 | r = -EINVAL; | 778 | r = -EINVAL; |
@@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
780 | if (!memslot->dirty_bitmap) | 784 | if (!memslot->dirty_bitmap) |
781 | goto out; | 785 | goto out; |
782 | 786 | ||
783 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 787 | n = kvm_dirty_bitmap_bytes(memslot); |
784 | 788 | ||
785 | for (i = 0; !any && i < n/sizeof(long); ++i) | 789 | for (i = 0; !any && i < n/sizeof(long); ++i) |
786 | any = memslot->dirty_bitmap[i]; | 790 | any = memslot->dirty_bitmap[i]; |
@@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
1186 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1190 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1187 | if (memslot && memslot->dirty_bitmap) { | 1191 | if (memslot && memslot->dirty_bitmap) { |
1188 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1192 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1193 | unsigned long *p = memslot->dirty_bitmap + | ||
1194 | rel_gfn / BITS_PER_LONG; | ||
1195 | int offset = rel_gfn % BITS_PER_LONG; | ||
1189 | 1196 | ||
1190 | /* avoid RMW */ | 1197 | /* avoid RMW */ |
1191 | if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) | 1198 | if (!generic_test_le_bit(offset, p)) |
1192 | generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); | 1199 | generic___set_le_bit(offset, p); |
1193 | } | 1200 | } |
1194 | } | 1201 | } |
1195 | 1202 | ||