diff options
958 files changed, 23077 insertions, 10851 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index c17cd4bb2290..1b777b960492 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX | |||
@@ -328,8 +328,6 @@ sysrq.txt | |||
328 | - info on the magic SysRq key. | 328 | - info on the magic SysRq key. |
329 | telephony/ | 329 | telephony/ |
330 | - directory with info on telephony (e.g. voice over IP) support. | 330 | - directory with info on telephony (e.g. voice over IP) support. |
331 | uml/ | ||
332 | - directory with information about User Mode Linux. | ||
333 | unicode.txt | 331 | unicode.txt |
334 | - info on the Unicode character/font mapping used in Linux. | 332 | - info on the Unicode character/font mapping used in Linux. |
335 | unshare.txt | 333 | unshare.txt |
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 7564e88bfa43..e7be75b96e4b 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu | |||
@@ -183,21 +183,21 @@ Description: Discover and change clock speed of CPUs | |||
183 | to learn how to control the knobs. | 183 | to learn how to control the knobs. |
184 | 184 | ||
185 | 185 | ||
186 | What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X | 186 | What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} |
187 | Date: August 2008 | 187 | Date: August 2008 |
188 | KernelVersion: 2.6.27 | 188 | KernelVersion: 2.6.27 |
189 | Contact: mark.langsdorf@amd.com | 189 | Contact: discuss@x86-64.org |
190 | Description: These files exist in every cpu's cache index directories. | 190 | Description: Disable L3 cache indices |
191 | There are currently 2 cache_disable_# files in each | 191 | |
192 | directory. Reading from these files on a supported | 192 | These files exist in every CPU's cache/index3 directory. Each |
193 | processor will return that cache disable index value | 193 | cache_disable_{0,1} file corresponds to one disable slot which |
194 | for that processor and node. Writing to one of these | 194 | can be used to disable a cache index. Reading from these files |
195 | files will cause the specificed cache index to be disabled. | 195 | on a processor with this functionality will return the currently |
196 | 196 | disabled index for that node. There is one L3 structure per | |
197 | Currently, only AMD Family 10h Processors support cache index | 197 | node, or per internal node on MCM machines. Writing a valid |
198 | disable, and only for their L3 caches. See the BIOS and | 198 | index to one of these files will cause the specificed cache |
199 | Kernel Developer's Guide at | 199 | index to be disabled. |
200 | http://support.amd.com/us/Embedded_TechDocs/31116-Public-GH-BKDG_3-28_5-28-09.pdf | 200 | |
201 | for formatting information and other details on the | 201 | All AMD processors with L3 caches provide this functionality. |
202 | cache index disable. | 202 | For details, see BKDGs at |
203 | Users: joachim.deguara@amd.com | 203 | http://developer.amd.com/documentation/guides/Pages/default.aspx |
diff --git a/Documentation/ABI/testing/sysfs-firmware-dmi b/Documentation/ABI/testing/sysfs-firmware-dmi index ba9da9503c23..c78f9ab01e56 100644 --- a/Documentation/ABI/testing/sysfs-firmware-dmi +++ b/Documentation/ABI/testing/sysfs-firmware-dmi | |||
@@ -14,14 +14,15 @@ Description: | |||
14 | 14 | ||
15 | DMI is structured as a large table of entries, where | 15 | DMI is structured as a large table of entries, where |
16 | each entry has a common header indicating the type and | 16 | each entry has a common header indicating the type and |
17 | length of the entry, as well as 'handle' that is | 17 | length of the entry, as well as a firmware-provided |
18 | supposed to be unique amongst all entries. | 18 | 'handle' that is supposed to be unique amongst all |
19 | entries. | ||
19 | 20 | ||
20 | Some entries are required by the specification, but many | 21 | Some entries are required by the specification, but many |
21 | others are optional. In general though, users should | 22 | others are optional. In general though, users should |
22 | never expect to find a specific entry type on their | 23 | never expect to find a specific entry type on their |
23 | system unless they know for certain what their firmware | 24 | system unless they know for certain what their firmware |
24 | is doing. Machine to machine will vary. | 25 | is doing. Machine to machine experiences will vary. |
25 | 26 | ||
26 | Multiple entries of the same type are allowed. In order | 27 | Multiple entries of the same type are allowed. In order |
27 | to handle these duplicate entry types, each entry is | 28 | to handle these duplicate entry types, each entry is |
@@ -67,25 +68,24 @@ Description: | |||
67 | and the two terminating nul characters. | 68 | and the two terminating nul characters. |
68 | type : The type of the entry. This value is the same | 69 | type : The type of the entry. This value is the same |
69 | as found in the directory name. It indicates | 70 | as found in the directory name. It indicates |
70 | how the rest of the entry should be | 71 | how the rest of the entry should be interpreted. |
71 | interpreted. | ||
72 | instance: The instance ordinal of the entry for the | 72 | instance: The instance ordinal of the entry for the |
73 | given type. This value is the same as found | 73 | given type. This value is the same as found |
74 | in the parent directory name. | 74 | in the parent directory name. |
75 | position: The position of the entry within the entirety | 75 | position: The ordinal position (zero-based) of the entry |
76 | of the entirety. | 76 | within the entirety of the DMI entry table. |
77 | 77 | ||
78 | === Entry Specialization === | 78 | === Entry Specialization === |
79 | 79 | ||
80 | Some entry types may have other information available in | 80 | Some entry types may have other information available in |
81 | sysfs. | 81 | sysfs. Not all types are specialized. |
82 | 82 | ||
83 | --- Type 15 - System Event Log --- | 83 | --- Type 15 - System Event Log --- |
84 | 84 | ||
85 | This entry allows the firmware to export a log of | 85 | This entry allows the firmware to export a log of |
86 | events the system has taken. This information is | 86 | events the system has taken. This information is |
87 | typically backed by nvram, but the implementation | 87 | typically backed by nvram, but the implementation |
88 | details are abstracted by this table. This entries data | 88 | details are abstracted by this table. This entry's data |
89 | is exported in the directory: | 89 | is exported in the directory: |
90 | 90 | ||
91 | /sys/firmware/dmi/entries/15-0/system_event_log | 91 | /sys/firmware/dmi/entries/15-0/system_event_log |
diff --git a/Documentation/ABI/testing/sysfs-firmware-gsmi b/Documentation/ABI/testing/sysfs-firmware-gsmi new file mode 100644 index 000000000000..0faa0aaf4b6a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-gsmi | |||
@@ -0,0 +1,58 @@ | |||
1 | What: /sys/firmware/gsmi | ||
2 | Date: March 2011 | ||
3 | Contact: Mike Waychison <mikew@google.com> | ||
4 | Description: | ||
5 | Some servers used internally at Google have firmware | ||
6 | that provides callback functionality via explicit SMI | ||
7 | triggers. Some of the callbacks are similar to those | ||
8 | provided by the EFI runtime services page, but due to | ||
9 | historical reasons this different entry-point has been | ||
10 | used. | ||
11 | |||
12 | The gsmi driver implements the kernel's abstraction for | ||
13 | these firmware callbacks. Currently, this functionality | ||
14 | is limited to handling the system event log and getting | ||
15 | access to EFI-style variables stored in nvram. | ||
16 | |||
17 | Layout: | ||
18 | |||
19 | /sys/firmware/gsmi/vars: | ||
20 | |||
21 | This directory has the same layout (and | ||
22 | underlying implementation as /sys/firmware/efi/vars. | ||
23 | See Documentation/ABI/*/sysfs-firmware-efi-vars | ||
24 | for more information on how to interact with | ||
25 | this structure. | ||
26 | |||
27 | /sys/firmware/gsmi/append_to_eventlog - write-only: | ||
28 | |||
29 | This file takes a binary blob and passes it onto | ||
30 | the firmware to be timestamped and appended to | ||
31 | the system eventlog. The binary format is | ||
32 | interpreted by the firmware and may change from | ||
33 | platform to platform. The only kernel-enforced | ||
34 | requirement is that the blob be prefixed with a | ||
35 | 32bit host-endian type used as part of the | ||
36 | firmware call. | ||
37 | |||
38 | /sys/firmware/gsmi/clear_config - write-only: | ||
39 | |||
40 | Writing any value to this file will cause the | ||
41 | entire firmware configuration to be reset to | ||
42 | "factory defaults". Callers should assume that | ||
43 | a reboot is required for the configuration to be | ||
44 | cleared. | ||
45 | |||
46 | /sys/firmware/gsmi/clear_eventlog - write-only: | ||
47 | |||
48 | This file is used to clear out a portion/the | ||
49 | whole of the system event log. Values written | ||
50 | should be values between 1 and 100 inclusive (in | ||
51 | ASCII) representing the fraction of the log to | ||
52 | clear. Not all platforms support fractional | ||
53 | clearing though, and this writes to this file | ||
54 | will error out if the firmware doesn't like your | ||
55 | submitted fraction. | ||
56 | |||
57 | Callers should assume that a reboot is needed | ||
58 | for this operation to complete. | ||
diff --git a/Documentation/ABI/testing/sysfs-firmware-log b/Documentation/ABI/testing/sysfs-firmware-log new file mode 100644 index 000000000000..9b58e7c5365f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-log | |||
@@ -0,0 +1,7 @@ | |||
1 | What: /sys/firmware/log | ||
2 | Date: February 2011 | ||
3 | Contact: Mike Waychison <mikew@google.com> | ||
4 | Description: | ||
5 | The /sys/firmware/log is a binary file that represents a | ||
6 | read-only copy of the firmware's log if one is | ||
7 | available. | ||
diff --git a/Documentation/ABI/testing/sysfs-kernel-fscaps b/Documentation/ABI/testing/sysfs-kernel-fscaps new file mode 100644 index 000000000000..50a3033b5e15 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-fscaps | |||
@@ -0,0 +1,8 @@ | |||
1 | What: /sys/kernel/fscaps | ||
2 | Date: February 2011 | ||
3 | KernelVersion: 2.6.38 | ||
4 | Contact: Ludwig Nussel <ludwig.nussel@suse.de> | ||
5 | Description | ||
6 | Shows whether file system capabilities are honored | ||
7 | when executing a binary | ||
8 | |||
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index 194ca446ac28..b464d12761ba 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power | |||
@@ -158,3 +158,17 @@ Description: | |||
158 | successful, will make the kernel abort a subsequent transition | 158 | successful, will make the kernel abort a subsequent transition |
159 | to a sleep state if any wakeup events are reported after the | 159 | to a sleep state if any wakeup events are reported after the |
160 | write has returned. | 160 | write has returned. |
161 | |||
162 | What: /sys/power/reserved_size | ||
163 | Date: May 2011 | ||
164 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
165 | Description: | ||
166 | The /sys/power/reserved_size file allows user space to control | ||
167 | the amount of memory reserved for allocations made by device | ||
168 | drivers during the "device freeze" stage of hibernation. It can | ||
169 | be written a string representing a non-negative integer that | ||
170 | will be used as the amount of memory to reserve for allocations | ||
171 | made by device drivers' "freeze" callbacks, in bytes. | ||
172 | |||
173 | Reading from this file will display the current value, which is | ||
174 | set to 1 MB by default. | ||
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index 36f63d4a0a06..b638e50cf8f6 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -96,10 +96,10 @@ X!Iinclude/linux/kobject.h | |||
96 | 96 | ||
97 | <chapter id="devdrivers"> | 97 | <chapter id="devdrivers"> |
98 | <title>Device drivers infrastructure</title> | 98 | <title>Device drivers infrastructure</title> |
99 | <sect1><title>The Basic Device Driver-Model Structures </title> | ||
100 | !Iinclude/linux/device.h | ||
101 | </sect1> | ||
99 | <sect1><title>Device Drivers Base</title> | 102 | <sect1><title>Device Drivers Base</title> |
100 | <!-- | ||
101 | X!Iinclude/linux/device.h | ||
102 | --> | ||
103 | !Edrivers/base/driver.c | 103 | !Edrivers/base/driver.c |
104 | !Edrivers/base/core.c | 104 | !Edrivers/base/core.c |
105 | !Edrivers/base/class.c | 105 | !Edrivers/base/class.c |
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl index fb10fd08c05c..b3422341d65c 100644 --- a/Documentation/DocBook/genericirq.tmpl +++ b/Documentation/DocBook/genericirq.tmpl | |||
@@ -191,8 +191,8 @@ | |||
191 | <para> | 191 | <para> |
192 | Whenever an interrupt triggers, the lowlevel arch code calls into | 192 | Whenever an interrupt triggers, the lowlevel arch code calls into |
193 | the generic interrupt code by calling desc->handle_irq(). | 193 | the generic interrupt code by calling desc->handle_irq(). |
194 | This highlevel IRQ handling function only uses desc->chip primitives | 194 | This highlevel IRQ handling function only uses desc->irq_data.chip |
195 | referenced by the assigned chip descriptor structure. | 195 | primitives referenced by the assigned chip descriptor structure. |
196 | </para> | 196 | </para> |
197 | </sect1> | 197 | </sect1> |
198 | <sect1 id="Highlevel_Driver_API"> | 198 | <sect1 id="Highlevel_Driver_API"> |
@@ -206,11 +206,11 @@ | |||
206 | <listitem><para>enable_irq()</para></listitem> | 206 | <listitem><para>enable_irq()</para></listitem> |
207 | <listitem><para>disable_irq_nosync() (SMP only)</para></listitem> | 207 | <listitem><para>disable_irq_nosync() (SMP only)</para></listitem> |
208 | <listitem><para>synchronize_irq() (SMP only)</para></listitem> | 208 | <listitem><para>synchronize_irq() (SMP only)</para></listitem> |
209 | <listitem><para>set_irq_type()</para></listitem> | 209 | <listitem><para>irq_set_irq_type()</para></listitem> |
210 | <listitem><para>set_irq_wake()</para></listitem> | 210 | <listitem><para>irq_set_irq_wake()</para></listitem> |
211 | <listitem><para>set_irq_data()</para></listitem> | 211 | <listitem><para>irq_set_handler_data()</para></listitem> |
212 | <listitem><para>set_irq_chip()</para></listitem> | 212 | <listitem><para>irq_set_chip()</para></listitem> |
213 | <listitem><para>set_irq_chip_data()</para></listitem> | 213 | <listitem><para>irq_set_chip_data()</para></listitem> |
214 | </itemizedlist> | 214 | </itemizedlist> |
215 | See the autogenerated function documentation for details. | 215 | See the autogenerated function documentation for details. |
216 | </para> | 216 | </para> |
@@ -225,6 +225,8 @@ | |||
225 | <listitem><para>handle_fasteoi_irq</para></listitem> | 225 | <listitem><para>handle_fasteoi_irq</para></listitem> |
226 | <listitem><para>handle_simple_irq</para></listitem> | 226 | <listitem><para>handle_simple_irq</para></listitem> |
227 | <listitem><para>handle_percpu_irq</para></listitem> | 227 | <listitem><para>handle_percpu_irq</para></listitem> |
228 | <listitem><para>handle_edge_eoi_irq</para></listitem> | ||
229 | <listitem><para>handle_bad_irq</para></listitem> | ||
228 | </itemizedlist> | 230 | </itemizedlist> |
229 | The interrupt flow handlers (either predefined or architecture | 231 | The interrupt flow handlers (either predefined or architecture |
230 | specific) are assigned to specific interrupts by the architecture | 232 | specific) are assigned to specific interrupts by the architecture |
@@ -241,13 +243,13 @@ | |||
241 | <programlisting> | 243 | <programlisting> |
242 | default_enable(struct irq_data *data) | 244 | default_enable(struct irq_data *data) |
243 | { | 245 | { |
244 | desc->chip->irq_unmask(data); | 246 | desc->irq_data.chip->irq_unmask(data); |
245 | } | 247 | } |
246 | 248 | ||
247 | default_disable(struct irq_data *data) | 249 | default_disable(struct irq_data *data) |
248 | { | 250 | { |
249 | if (!delay_disable(data)) | 251 | if (!delay_disable(data)) |
250 | desc->chip->irq_mask(data); | 252 | desc->irq_data.chip->irq_mask(data); |
251 | } | 253 | } |
252 | 254 | ||
253 | default_ack(struct irq_data *data) | 255 | default_ack(struct irq_data *data) |
@@ -284,9 +286,9 @@ noop(struct irq_data *data)) | |||
284 | <para> | 286 | <para> |
285 | The following control flow is implemented (simplified excerpt): | 287 | The following control flow is implemented (simplified excerpt): |
286 | <programlisting> | 288 | <programlisting> |
287 | desc->chip->irq_mask(); | 289 | desc->irq_data.chip->irq_mask_ack(); |
288 | handle_IRQ_event(desc->action); | 290 | handle_irq_event(desc->action); |
289 | desc->chip->irq_unmask(); | 291 | desc->irq_data.chip->irq_unmask(); |
290 | </programlisting> | 292 | </programlisting> |
291 | </para> | 293 | </para> |
292 | </sect3> | 294 | </sect3> |
@@ -300,8 +302,8 @@ desc->chip->irq_unmask(); | |||
300 | <para> | 302 | <para> |
301 | The following control flow is implemented (simplified excerpt): | 303 | The following control flow is implemented (simplified excerpt): |
302 | <programlisting> | 304 | <programlisting> |
303 | handle_IRQ_event(desc->action); | 305 | handle_irq_event(desc->action); |
304 | desc->chip->irq_eoi(); | 306 | desc->irq_data.chip->irq_eoi(); |
305 | </programlisting> | 307 | </programlisting> |
306 | </para> | 308 | </para> |
307 | </sect3> | 309 | </sect3> |
@@ -315,17 +317,17 @@ desc->chip->irq_eoi(); | |||
315 | The following control flow is implemented (simplified excerpt): | 317 | The following control flow is implemented (simplified excerpt): |
316 | <programlisting> | 318 | <programlisting> |
317 | if (desc->status & running) { | 319 | if (desc->status & running) { |
318 | desc->chip->irq_mask(); | 320 | desc->irq_data.chip->irq_mask_ack(); |
319 | desc->status |= pending | masked; | 321 | desc->status |= pending | masked; |
320 | return; | 322 | return; |
321 | } | 323 | } |
322 | desc->chip->irq_ack(); | 324 | desc->irq_data.chip->irq_ack(); |
323 | desc->status |= running; | 325 | desc->status |= running; |
324 | do { | 326 | do { |
325 | if (desc->status & masked) | 327 | if (desc->status & masked) |
326 | desc->chip->irq_unmask(); | 328 | desc->irq_data.chip->irq_unmask(); |
327 | desc->status &= ~pending; | 329 | desc->status &= ~pending; |
328 | handle_IRQ_event(desc->action); | 330 | handle_irq_event(desc->action); |
329 | } while (status & pending); | 331 | } while (status & pending); |
330 | desc->status &= ~running; | 332 | desc->status &= ~running; |
331 | </programlisting> | 333 | </programlisting> |
@@ -344,7 +346,7 @@ desc->status &= ~running; | |||
344 | <para> | 346 | <para> |
345 | The following control flow is implemented (simplified excerpt): | 347 | The following control flow is implemented (simplified excerpt): |
346 | <programlisting> | 348 | <programlisting> |
347 | handle_IRQ_event(desc->action); | 349 | handle_irq_event(desc->action); |
348 | </programlisting> | 350 | </programlisting> |
349 | </para> | 351 | </para> |
350 | </sect3> | 352 | </sect3> |
@@ -362,12 +364,29 @@ handle_IRQ_event(desc->action); | |||
362 | <para> | 364 | <para> |
363 | The following control flow is implemented (simplified excerpt): | 365 | The following control flow is implemented (simplified excerpt): |
364 | <programlisting> | 366 | <programlisting> |
365 | handle_IRQ_event(desc->action); | 367 | if (desc->irq_data.chip->irq_ack) |
366 | if (desc->chip->irq_eoi) | 368 | desc->irq_data.chip->irq_ack(); |
367 | desc->chip->irq_eoi(); | 369 | handle_irq_event(desc->action); |
370 | if (desc->irq_data.chip->irq_eoi) | ||
371 | desc->irq_data.chip->irq_eoi(); | ||
368 | </programlisting> | 372 | </programlisting> |
369 | </para> | 373 | </para> |
370 | </sect3> | 374 | </sect3> |
375 | <sect3 id="EOI_Edge_IRQ_flow_handler"> | ||
376 | <title>EOI Edge IRQ flow handler</title> | ||
377 | <para> | ||
378 | handle_edge_eoi_irq provides an abnomination of the edge | ||
379 | handler which is solely used to tame a badly wreckaged | ||
380 | irq controller on powerpc/cell. | ||
381 | </para> | ||
382 | </sect3> | ||
383 | <sect3 id="BAD_IRQ_flow_handler"> | ||
384 | <title>Bad IRQ flow handler</title> | ||
385 | <para> | ||
386 | handle_bad_irq is used for spurious interrupts which | ||
387 | have no real handler assigned.. | ||
388 | </para> | ||
389 | </sect3> | ||
371 | </sect2> | 390 | </sect2> |
372 | <sect2 id="Quirks_and_optimizations"> | 391 | <sect2 id="Quirks_and_optimizations"> |
373 | <title>Quirks and optimizations</title> | 392 | <title>Quirks and optimizations</title> |
@@ -410,6 +429,7 @@ if (desc->chip->irq_eoi) | |||
410 | <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> | 429 | <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> |
411 | <listitem><para>irq_mask()</para></listitem> | 430 | <listitem><para>irq_mask()</para></listitem> |
412 | <listitem><para>irq_unmask()</para></listitem> | 431 | <listitem><para>irq_unmask()</para></listitem> |
432 | <listitem><para>irq_eoi() - Optional, required for eoi flow handlers</para></listitem> | ||
413 | <listitem><para>irq_retrigger() - Optional</para></listitem> | 433 | <listitem><para>irq_retrigger() - Optional</para></listitem> |
414 | <listitem><para>irq_set_type() - Optional</para></listitem> | 434 | <listitem><para>irq_set_type() - Optional</para></listitem> |
415 | <listitem><para>irq_set_wake() - Optional</para></listitem> | 435 | <listitem><para>irq_set_wake() - Optional</para></listitem> |
@@ -424,32 +444,24 @@ if (desc->chip->irq_eoi) | |||
424 | <chapter id="doirq"> | 444 | <chapter id="doirq"> |
425 | <title>__do_IRQ entry point</title> | 445 | <title>__do_IRQ entry point</title> |
426 | <para> | 446 | <para> |
427 | The original implementation __do_IRQ() is an alternative entry | 447 | The original implementation __do_IRQ() was an alternative entry |
428 | point for all types of interrupts. | 448 | point for all types of interrupts. It not longer exists. |
429 | </para> | 449 | </para> |
430 | <para> | 450 | <para> |
431 | This handler turned out to be not suitable for all | 451 | This handler turned out to be not suitable for all |
432 | interrupt hardware and was therefore reimplemented with split | 452 | interrupt hardware and was therefore reimplemented with split |
433 | functionality for egde/level/simple/percpu interrupts. This is not | 453 | functionality for edge/level/simple/percpu interrupts. This is not |
434 | only a functional optimization. It also shortens code paths for | 454 | only a functional optimization. It also shortens code paths for |
435 | interrupts. | 455 | interrupts. |
436 | </para> | 456 | </para> |
437 | <para> | ||
438 | To make use of the split implementation, replace the call to | ||
439 | __do_IRQ by a call to desc->handle_irq() and associate | ||
440 | the appropriate handler function to desc->handle_irq(). | ||
441 | In most cases the generic handler implementations should | ||
442 | be sufficient. | ||
443 | </para> | ||
444 | </chapter> | 457 | </chapter> |
445 | 458 | ||
446 | <chapter id="locking"> | 459 | <chapter id="locking"> |
447 | <title>Locking on SMP</title> | 460 | <title>Locking on SMP</title> |
448 | <para> | 461 | <para> |
449 | The locking of chip registers is up to the architecture that | 462 | The locking of chip registers is up to the architecture that |
450 | defines the chip primitives. There is a chip->lock field that can be used | 463 | defines the chip primitives. The per-irq structure is |
451 | for serialization, but the generic layer does not touch it. The per-irq | 464 | protected via desc->lock, by the generic layer. |
452 | structure is protected via desc->lock, by the generic layer. | ||
453 | </para> | 465 | </para> |
454 | </chapter> | 466 | </chapter> |
455 | <chapter id="structs"> | 467 | <chapter id="structs"> |
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX index 71b6f500ddb9..1d7a885761f5 100644 --- a/Documentation/RCU/00-INDEX +++ b/Documentation/RCU/00-INDEX | |||
@@ -21,7 +21,7 @@ rcu.txt | |||
21 | RTFP.txt | 21 | RTFP.txt |
22 | - List of RCU papers (bibliography) going back to 1980. | 22 | - List of RCU papers (bibliography) going back to 1980. |
23 | stallwarn.txt | 23 | stallwarn.txt |
24 | - RCU CPU stall warnings (CONFIG_RCU_CPU_STALL_DETECTOR) | 24 | - RCU CPU stall warnings (module parameter rcu_cpu_stall_suppress) |
25 | torture.txt | 25 | torture.txt |
26 | - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST) | 26 | - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST) |
27 | trace.txt | 27 | trace.txt |
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 862c08ef1fde..4e959208f736 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt | |||
@@ -1,22 +1,25 @@ | |||
1 | Using RCU's CPU Stall Detector | 1 | Using RCU's CPU Stall Detector |
2 | 2 | ||
3 | The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables | 3 | The rcu_cpu_stall_suppress module parameter enables RCU's CPU stall |
4 | RCU's CPU stall detector, which detects conditions that unduly delay | 4 | detector, which detects conditions that unduly delay RCU grace periods. |
5 | RCU grace periods. The stall detector's idea of what constitutes | 5 | This module parameter enables CPU stall detection by default, but |
6 | "unduly delayed" is controlled by a set of C preprocessor macros: | 6 | may be overridden via boot-time parameter or at runtime via sysfs. |
7 | The stall detector's idea of what constitutes "unduly delayed" is | ||
8 | controlled by a set of kernel configuration variables and cpp macros: | ||
7 | 9 | ||
8 | RCU_SECONDS_TILL_STALL_CHECK | 10 | CONFIG_RCU_CPU_STALL_TIMEOUT |
9 | 11 | ||
10 | This macro defines the period of time that RCU will wait from | 12 | This kernel configuration parameter defines the period of time |
11 | the beginning of a grace period until it issues an RCU CPU | 13 | that RCU will wait from the beginning of a grace period until it |
12 | stall warning. This time period is normally ten seconds. | 14 | issues an RCU CPU stall warning. This time period is normally |
15 | ten seconds. | ||
13 | 16 | ||
14 | RCU_SECONDS_TILL_STALL_RECHECK | 17 | RCU_SECONDS_TILL_STALL_RECHECK |
15 | 18 | ||
16 | This macro defines the period of time that RCU will wait after | 19 | This macro defines the period of time that RCU will wait after |
17 | issuing a stall warning until it issues another stall warning | 20 | issuing a stall warning until it issues another stall warning |
18 | for the same stall. This time period is normally set to thirty | 21 | for the same stall. This time period is normally set to three |
19 | seconds. | 22 | times the check interval plus thirty seconds. |
20 | 23 | ||
21 | RCU_STALL_RAT_DELAY | 24 | RCU_STALL_RAT_DELAY |
22 | 25 | ||
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index 6a8c73f55b80..c078ad48f7a1 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt | |||
@@ -10,34 +10,46 @@ for rcutree and next for rcutiny. | |||
10 | 10 | ||
11 | CONFIG_TREE_RCU and CONFIG_TREE_PREEMPT_RCU debugfs Files and Formats | 11 | CONFIG_TREE_RCU and CONFIG_TREE_PREEMPT_RCU debugfs Files and Formats |
12 | 12 | ||
13 | These implementations of RCU provides five debugfs files under the | 13 | These implementations of RCU provides several debugfs files under the |
14 | top-level directory RCU: rcu/rcudata (which displays fields in struct | 14 | top-level directory "rcu": |
15 | rcu_data), rcu/rcudata.csv (which is a .csv spreadsheet version of | 15 | |
16 | rcu/rcudata), rcu/rcugp (which displays grace-period counters), | 16 | rcu/rcudata: |
17 | rcu/rcuhier (which displays the struct rcu_node hierarchy), and | 17 | Displays fields in struct rcu_data. |
18 | rcu/rcu_pending (which displays counts of the reasons that the | 18 | rcu/rcudata.csv: |
19 | rcu_pending() function decided that there was core RCU work to do). | 19 | Comma-separated values spreadsheet version of rcudata. |
20 | rcu/rcugp: | ||
21 | Displays grace-period counters. | ||
22 | rcu/rcuhier: | ||
23 | Displays the struct rcu_node hierarchy. | ||
24 | rcu/rcu_pending: | ||
25 | Displays counts of the reasons rcu_pending() decided that RCU had | ||
26 | work to do. | ||
27 | rcu/rcutorture: | ||
28 | Displays rcutorture test progress. | ||
29 | rcu/rcuboost: | ||
30 | Displays RCU boosting statistics. Only present if | ||
31 | CONFIG_RCU_BOOST=y. | ||
20 | 32 | ||
21 | The output of "cat rcu/rcudata" looks as follows: | 33 | The output of "cat rcu/rcudata" looks as follows: |
22 | 34 | ||
23 | rcu_sched: | 35 | rcu_sched: |
24 | 0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10 | 36 | 0 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=545/1/0 df=50 of=0 ri=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0 |
25 | 1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10 | 37 | 1 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=967/1/0 df=58 of=0 ri=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0 |
26 | 2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10 | 38 | 2 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=1081/1/0 df=175 of=0 ri=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0 |
27 | 3 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=6681/1 dn=0 df=1545 of=0 ri=0 ql=0 b=10 | 39 | 3 c=20942 g=20943 pq=1 pqc=20942 qp=1 dt=1846/0/0 df=404 of=0 ri=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0 |
28 | 4 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1003/1 dn=0 df=1992 of=0 ri=0 ql=0 b=10 | 40 | 4 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=369/1/0 df=83 of=0 ri=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0 |
29 | 5 c=17829 g=17830 pq=1 pqc=17829 qp=1 dt=3887/1 dn=0 df=3331 of=0 ri=4 ql=2 b=10 | 41 | 5 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=381/1/0 df=64 of=0 ri=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0 |
30 | 6 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=859/1 dn=0 df=3224 of=0 ri=0 ql=0 b=10 | 42 | 6 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=1037/1/0 df=183 of=0 ri=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0 |
31 | 7 c=17829 g=17830 pq=0 pqc=17829 qp=1 dt=3761/1 dn=0 df=1818 of=0 ri=0 ql=2 b=10 | 43 | 7 c=20897 g=20897 pq=1 pqc=20896 qp=0 dt=1572/0/0 df=382 of=0 ri=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0 |
32 | rcu_bh: | 44 | rcu_bh: |
33 | 0 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=10951/1 dn=0 df=0 of=0 ri=0 ql=0 b=10 | 45 | 0 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=545/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0 |
34 | 1 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=16117/1 dn=0 df=13 of=0 ri=0 ql=0 b=10 | 46 | 1 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=967/1/0 df=3 of=0 ri=1 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0 |
35 | 2 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=1445/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 | 47 | 2 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1081/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0 |
36 | 3 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=6681/1 dn=0 df=9 of=0 ri=0 ql=0 b=10 | 48 | 3 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1846/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0 |
37 | 4 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=1003/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 | 49 | 4 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=369/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0 |
38 | 5 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3887/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 | 50 | 5 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=381/1/0 df=4 of=0 ri=1 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0 |
39 | 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 | 51 | 6 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1037/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0 |
40 | 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 | 52 | 7 c=1474 g=1474 pq=1 pqc=1473 qp=0 dt=1572/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0 |
41 | 53 | ||
42 | The first section lists the rcu_data structures for rcu_sched, the second | 54 | The first section lists the rcu_data structures for rcu_sched, the second |
43 | for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an | 55 | for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an |
@@ -52,17 +64,18 @@ o The number at the beginning of each line is the CPU number. | |||
52 | substantially larger than the number of actual CPUs. | 64 | substantially larger than the number of actual CPUs. |
53 | 65 | ||
54 | o "c" is the count of grace periods that this CPU believes have | 66 | o "c" is the count of grace periods that this CPU believes have |
55 | completed. CPUs in dynticks idle mode may lag quite a ways | 67 | completed. Offlined CPUs and CPUs in dynticks idle mode may |
56 | behind, for example, CPU 4 under "rcu_sched" above, which has | 68 | lag quite a ways behind, for example, CPU 6 under "rcu_sched" |
57 | slept through the past 25 RCU grace periods. It is not unusual | 69 | above, which has been offline through not quite 40,000 RCU grace |
58 | to see CPUs lagging by thousands of grace periods. | 70 | periods. It is not unusual to see CPUs lagging by thousands of |
71 | grace periods. | ||
59 | 72 | ||
60 | o "g" is the count of grace periods that this CPU believes have | 73 | o "g" is the count of grace periods that this CPU believes have |
61 | started. Again, CPUs in dynticks idle mode may lag behind. | 74 | started. Again, offlined CPUs and CPUs in dynticks idle mode |
62 | If the "c" and "g" values are equal, this CPU has already | 75 | may lag behind. If the "c" and "g" values are equal, this CPU |
63 | reported a quiescent state for the last RCU grace period that | 76 | has already reported a quiescent state for the last RCU grace |
64 | it is aware of, otherwise, the CPU believes that it owes RCU a | 77 | period that it is aware of, otherwise, the CPU believes that it |
65 | quiescent state. | 78 | owes RCU a quiescent state. |
66 | 79 | ||
67 | o "pq" indicates that this CPU has passed through a quiescent state | 80 | o "pq" indicates that this CPU has passed through a quiescent state |
68 | for the current grace period. It is possible for "pq" to be | 81 | for the current grace period. It is possible for "pq" to be |
@@ -81,7 +94,8 @@ o "pqc" indicates which grace period the last-observed quiescent | |||
81 | the next grace period! | 94 | the next grace period! |
82 | 95 | ||
83 | o "qp" indicates that RCU still expects a quiescent state from | 96 | o "qp" indicates that RCU still expects a quiescent state from |
84 | this CPU. | 97 | this CPU. Offlined CPUs and CPUs in dyntick idle mode might |
98 | well have qp=1, which is OK: RCU is still ignoring them. | ||
85 | 99 | ||
86 | o "dt" is the current value of the dyntick counter that is incremented | 100 | o "dt" is the current value of the dyntick counter that is incremented |
87 | when entering or leaving dynticks idle state, either by the | 101 | when entering or leaving dynticks idle state, either by the |
@@ -108,7 +122,7 @@ o "df" is the number of times that some other CPU has forced a | |||
108 | 122 | ||
109 | o "of" is the number of times that some other CPU has forced a | 123 | o "of" is the number of times that some other CPU has forced a |
110 | quiescent state on behalf of this CPU due to this CPU being | 124 | quiescent state on behalf of this CPU due to this CPU being |
111 | offline. In a perfect world, this might neve happen, but it | 125 | offline. In a perfect world, this might never happen, but it |
112 | turns out that offlining and onlining a CPU can take several grace | 126 | turns out that offlining and onlining a CPU can take several grace |
113 | periods, and so there is likely to be an extended period of time | 127 | periods, and so there is likely to be an extended period of time |
114 | when RCU believes that the CPU is online when it really is not. | 128 | when RCU believes that the CPU is online when it really is not. |
@@ -125,6 +139,62 @@ o "ql" is the number of RCU callbacks currently residing on | |||
125 | of what state they are in (new, waiting for grace period to | 139 | of what state they are in (new, waiting for grace period to |
126 | start, waiting for grace period to end, ready to invoke). | 140 | start, waiting for grace period to end, ready to invoke). |
127 | 141 | ||
142 | o "qs" gives an indication of the state of the callback queue | ||
143 | with four characters: | ||
144 | |||
145 | "N" Indicates that there are callbacks queued that are not | ||
146 | ready to be handled by the next grace period, and thus | ||
147 | will be handled by the grace period following the next | ||
148 | one. | ||
149 | |||
150 | "R" Indicates that there are callbacks queued that are | ||
151 | ready to be handled by the next grace period. | ||
152 | |||
153 | "W" Indicates that there are callbacks queued that are | ||
154 | waiting on the current grace period. | ||
155 | |||
156 | "D" Indicates that there are callbacks queued that have | ||
157 | already been handled by a prior grace period, and are | ||
158 | thus waiting to be invoked. Note that callbacks in | ||
159 | the process of being invoked are not counted here. | ||
160 | Callbacks in the process of being invoked are those | ||
161 | that have been removed from the rcu_data structures | ||
162 | queues by rcu_do_batch(), but which have not yet been | ||
163 | invoked. | ||
164 | |||
165 | If there are no callbacks in a given one of the above states, | ||
166 | the corresponding character is replaced by ".". | ||
167 | |||
168 | o "kt" is the per-CPU kernel-thread state. The digit preceding | ||
169 | the first slash is zero if there is no work pending and 1 | ||
170 | otherwise. The character between the first pair of slashes is | ||
171 | as follows: | ||
172 | |||
173 | "S" The kernel thread is stopped, in other words, all | ||
174 | CPUs corresponding to this rcu_node structure are | ||
175 | offline. | ||
176 | |||
177 | "R" The kernel thread is running. | ||
178 | |||
179 | "W" The kernel thread is waiting because there is no work | ||
180 | for it to do. | ||
181 | |||
182 | "O" The kernel thread is waiting because it has been | ||
183 | forced off of its designated CPU or because its | ||
184 | ->cpus_allowed mask permits it to run on other than | ||
185 | its designated CPU. | ||
186 | |||
187 | "Y" The kernel thread is yielding to avoid hogging CPU. | ||
188 | |||
189 | "?" Unknown value, indicates a bug. | ||
190 | |||
191 | The number after the final slash is the CPU that the kthread | ||
192 | is actually running on. | ||
193 | |||
194 | o "ktl" is the low-order 16 bits (in hexadecimal) of the count of | ||
195 | the number of times that this CPU's per-CPU kthread has gone | ||
196 | through its loop servicing invoke_rcu_cpu_kthread() requests. | ||
197 | |||
128 | o "b" is the batch limit for this CPU. If more than this number | 198 | o "b" is the batch limit for this CPU. If more than this number |
129 | of RCU callbacks is ready to invoke, then the remainder will | 199 | of RCU callbacks is ready to invoke, then the remainder will |
130 | be deferred. | 200 | be deferred. |
@@ -174,14 +244,14 @@ o "gpnum" is the number of grace periods that have started. It is | |||
174 | The output of "cat rcu/rcuhier" looks as follows, with very long lines: | 244 | The output of "cat rcu/rcuhier" looks as follows, with very long lines: |
175 | 245 | ||
176 | c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 | 246 | c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 |
177 | 1/1 .>. 0:127 ^0 | 247 | 1/1 ..>. 0:127 ^0 |
178 | 3/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3 | 248 | 3/3 ..>. 0:35 ^0 0/0 ..>. 36:71 ^1 0/0 ..>. 72:107 ^2 0/0 ..>. 108:127 ^3 |
179 | 3/3f .>. 0:5 ^0 2/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3 | 249 | 3/3f ..>. 0:5 ^0 2/3 ..>. 6:11 ^1 0/0 ..>. 12:17 ^2 0/0 ..>. 18:23 ^3 0/0 ..>. 24:29 ^4 0/0 ..>. 30:35 ^5 0/0 ..>. 36:41 ^0 0/0 ..>. 42:47 ^1 0/0 ..>. 48:53 ^2 0/0 ..>. 54:59 ^3 0/0 ..>. 60:65 ^4 0/0 ..>. 66:71 ^5 0/0 ..>. 72:77 ^0 0/0 ..>. 78:83 ^1 0/0 ..>. 84:89 ^2 0/0 ..>. 90:95 ^3 0/0 ..>. 96:101 ^4 0/0 ..>. 102:107 ^5 0/0 ..>. 108:113 ^0 0/0 ..>. 114:119 ^1 0/0 ..>. 120:125 ^2 0/0 ..>. 126:127 ^3 |
180 | rcu_bh: | 250 | rcu_bh: |
181 | c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 | 251 | c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 |
182 | 0/1 .>. 0:127 ^0 | 252 | 0/1 ..>. 0:127 ^0 |
183 | 0/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3 | 253 | 0/3 ..>. 0:35 ^0 0/0 ..>. 36:71 ^1 0/0 ..>. 72:107 ^2 0/0 ..>. 108:127 ^3 |
184 | 0/3f .>. 0:5 ^0 0/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3 | 254 | 0/3f ..>. 0:5 ^0 0/3 ..>. 6:11 ^1 0/0 ..>. 12:17 ^2 0/0 ..>. 18:23 ^3 0/0 ..>. 24:29 ^4 0/0 ..>. 30:35 ^5 0/0 ..>. 36:41 ^0 0/0 ..>. 42:47 ^1 0/0 ..>. 48:53 ^2 0/0 ..>. 54:59 ^3 0/0 ..>. 60:65 ^4 0/0 ..>. 66:71 ^5 0/0 ..>. 72:77 ^0 0/0 ..>. 78:83 ^1 0/0 ..>. 84:89 ^2 0/0 ..>. 90:95 ^3 0/0 ..>. 96:101 ^4 0/0 ..>. 102:107 ^5 0/0 ..>. 108:113 ^0 0/0 ..>. 114:119 ^1 0/0 ..>. 120:125 ^2 0/0 ..>. 126:127 ^3 |
185 | 255 | ||
186 | This is once again split into "rcu_sched" and "rcu_bh" portions, | 256 | This is once again split into "rcu_sched" and "rcu_bh" portions, |
187 | and CONFIG_TREE_PREEMPT_RCU kernels will again have an additional | 257 | and CONFIG_TREE_PREEMPT_RCU kernels will again have an additional |
@@ -240,13 +310,20 @@ o Each element of the form "1/1 0:127 ^0" represents one struct | |||
240 | current grace period. | 310 | current grace period. |
241 | 311 | ||
242 | o The characters separated by the ">" indicate the state | 312 | o The characters separated by the ">" indicate the state |
243 | of the blocked-tasks lists. A "T" preceding the ">" | 313 | of the blocked-tasks lists. A "G" preceding the ">" |
244 | indicates that at least one task blocked in an RCU | 314 | indicates that at least one task blocked in an RCU |
245 | read-side critical section blocks the current grace | 315 | read-side critical section blocks the current grace |
246 | period, while a "." preceding the ">" indicates otherwise. | 316 | period, while a "E" preceding the ">" indicates that |
247 | The character following the ">" indicates similarly for | 317 | at least one task blocked in an RCU read-side critical |
248 | the next grace period. A "T" should appear in this | 318 | section blocks the current expedited grace period. |
249 | field only for rcu-preempt. | 319 | A "T" character following the ">" indicates that at |
320 | least one task is blocked within an RCU read-side | ||
321 | critical section, regardless of whether any current | ||
322 | grace period (expedited or normal) is inconvenienced. | ||
323 | A "." character appears if the corresponding condition | ||
324 | does not hold, so that "..>." indicates that no tasks | ||
325 | are blocked. In contrast, "GE>T" indicates maximal | ||
326 | inconvenience from blocked tasks. | ||
250 | 327 | ||
251 | o The numbers separated by the ":" are the range of CPUs | 328 | o The numbers separated by the ":" are the range of CPUs |
252 | served by this struct rcu_node. This can be helpful | 329 | served by this struct rcu_node. This can be helpful |
@@ -328,6 +405,113 @@ o "nn" is the number of times that this CPU needed nothing. Alert | |||
328 | is due to short-circuit evaluation in rcu_pending(). | 405 | is due to short-circuit evaluation in rcu_pending(). |
329 | 406 | ||
330 | 407 | ||
408 | The output of "cat rcu/rcutorture" looks as follows: | ||
409 | |||
410 | rcutorture test sequence: 0 (test in progress) | ||
411 | rcutorture update version number: 615 | ||
412 | |||
413 | The first line shows the number of rcutorture tests that have completed | ||
414 | since boot. If a test is currently running, the "(test in progress)" | ||
415 | string will appear as shown above. The second line shows the number of | ||
416 | update cycles that the current test has started, or zero if there is | ||
417 | no test in progress. | ||
418 | |||
419 | |||
420 | The output of "cat rcu/rcuboost" looks as follows: | ||
421 | |||
422 | 0:5 tasks=.... kt=W ntb=0 neb=0 nnb=0 j=2f95 bt=300f | ||
423 | balk: nt=0 egt=989 bt=0 nb=0 ny=0 nos=16 | ||
424 | 6:7 tasks=.... kt=W ntb=0 neb=0 nnb=0 j=2f95 bt=300f | ||
425 | balk: nt=0 egt=225 bt=0 nb=0 ny=0 nos=6 | ||
426 | |||
427 | This information is output only for rcu_preempt. Each two-line entry | ||
428 | corresponds to a leaf rcu_node strcuture. The fields are as follows: | ||
429 | |||
430 | o "n:m" is the CPU-number range for the corresponding two-line | ||
431 | entry. In the sample output above, the first entry covers | ||
432 | CPUs zero through five and the second entry covers CPUs 6 | ||
433 | and 7. | ||
434 | |||
435 | o "tasks=TNEB" gives the state of the various segments of the | ||
436 | rnp->blocked_tasks list: | ||
437 | |||
438 | "T" This indicates that there are some tasks that blocked | ||
439 | while running on one of the corresponding CPUs while | ||
440 | in an RCU read-side critical section. | ||
441 | |||
442 | "N" This indicates that some of the blocked tasks are preventing | ||
443 | the current normal (non-expedited) grace period from | ||
444 | completing. | ||
445 | |||
446 | "E" This indicates that some of the blocked tasks are preventing | ||
447 | the current expedited grace period from completing. | ||
448 | |||
449 | "B" This indicates that some of the blocked tasks are in | ||
450 | need of RCU priority boosting. | ||
451 | |||
452 | Each character is replaced with "." if the corresponding | ||
453 | condition does not hold. | ||
454 | |||
455 | o "kt" is the state of the RCU priority-boosting kernel | ||
456 | thread associated with the corresponding rcu_node structure. | ||
457 | The state can be one of the following: | ||
458 | |||
459 | "S" The kernel thread is stopped, in other words, all | ||
460 | CPUs corresponding to this rcu_node structure are | ||
461 | offline. | ||
462 | |||
463 | "R" The kernel thread is running. | ||
464 | |||
465 | "W" The kernel thread is waiting because there is no work | ||
466 | for it to do. | ||
467 | |||
468 | "Y" The kernel thread is yielding to avoid hogging CPU. | ||
469 | |||
470 | "?" Unknown value, indicates a bug. | ||
471 | |||
472 | o "ntb" is the number of tasks boosted. | ||
473 | |||
474 | o "neb" is the number of tasks boosted in order to complete an | ||
475 | expedited grace period. | ||
476 | |||
477 | o "nnb" is the number of tasks boosted in order to complete a | ||
478 | normal (non-expedited) grace period. When boosting a task | ||
479 | that was blocking both an expedited and a normal grace period, | ||
480 | it is counted against the expedited total above. | ||
481 | |||
482 | o "j" is the low-order 16 bits of the jiffies counter in | ||
483 | hexadecimal. | ||
484 | |||
485 | o "bt" is the low-order 16 bits of the value that the jiffies | ||
486 | counter will have when we next start boosting, assuming that | ||
487 | the current grace period does not end beforehand. This is | ||
488 | also in hexadecimal. | ||
489 | |||
490 | o "balk: nt" counts the number of times we didn't boost (in | ||
491 | other words, we balked) even though it was time to boost because | ||
492 | there were no blocked tasks to boost. This situation occurs | ||
493 | when there is one blocked task on one rcu_node structure and | ||
494 | none on some other rcu_node structure. | ||
495 | |||
496 | o "egt" counts the number of times we balked because although | ||
497 | there were blocked tasks, none of them were blocking the | ||
498 | current grace period, whether expedited or otherwise. | ||
499 | |||
500 | o "bt" counts the number of times we balked because boosting | ||
501 | had already been initiated for the current grace period. | ||
502 | |||
503 | o "nb" counts the number of times we balked because there | ||
504 | was at least one task blocking the current non-expedited grace | ||
505 | period that never had blocked. If it is already running, it | ||
506 | just won't help to boost its priority! | ||
507 | |||
508 | o "ny" counts the number of times we balked because it was | ||
509 | not yet time to start boosting. | ||
510 | |||
511 | o "nos" counts the number of times we balked for other | ||
512 | reasons, e.g., the grace period ended first. | ||
513 | |||
514 | |||
331 | CONFIG_TINY_RCU and CONFIG_TINY_PREEMPT_RCU debugfs Files and Formats | 515 | CONFIG_TINY_RCU and CONFIG_TINY_PREEMPT_RCU debugfs Files and Formats |
332 | 516 | ||
333 | These implementations of RCU provides a single debugfs file under the | 517 | These implementations of RCU provides a single debugfs file under the |
@@ -394,9 +578,9 @@ o "neb" is the number of expedited grace periods that have had | |||
394 | o "nnb" is the number of normal grace periods that have had | 578 | o "nnb" is the number of normal grace periods that have had |
395 | to resort to RCU priority boosting since boot. | 579 | to resort to RCU priority boosting since boot. |
396 | 580 | ||
397 | o "j" is the low-order 12 bits of the jiffies counter in hexadecimal. | 581 | o "j" is the low-order 16 bits of the jiffies counter in hexadecimal. |
398 | 582 | ||
399 | o "bt" is the low-order 12 bits of the value that the jiffies counter | 583 | o "bt" is the low-order 16 bits of the value that the jiffies counter |
400 | will have at the next time that boosting is scheduled to begin. | 584 | will have at the next time that boosting is scheduled to begin. |
401 | 585 | ||
402 | o In the line beginning with "normal balk", the fields are as follows: | 586 | o In the line beginning with "normal balk", the fields are as follows: |
diff --git a/Documentation/driver-model/bus.txt b/Documentation/driver-model/bus.txt index 5001b7511626..6754b2df8aa1 100644 --- a/Documentation/driver-model/bus.txt +++ b/Documentation/driver-model/bus.txt | |||
@@ -3,24 +3,7 @@ Bus Types | |||
3 | 3 | ||
4 | Definition | 4 | Definition |
5 | ~~~~~~~~~~ | 5 | ~~~~~~~~~~ |
6 | 6 | See the kerneldoc for the struct bus_type. | |
7 | struct bus_type { | ||
8 | char * name; | ||
9 | |||
10 | struct subsystem subsys; | ||
11 | struct kset drivers; | ||
12 | struct kset devices; | ||
13 | |||
14 | struct bus_attribute * bus_attrs; | ||
15 | struct device_attribute * dev_attrs; | ||
16 | struct driver_attribute * drv_attrs; | ||
17 | |||
18 | int (*match)(struct device * dev, struct device_driver * drv); | ||
19 | int (*hotplug) (struct device *dev, char **envp, | ||
20 | int num_envp, char *buffer, int buffer_size); | ||
21 | int (*suspend)(struct device * dev, pm_message_t state); | ||
22 | int (*resume)(struct device * dev); | ||
23 | }; | ||
24 | 7 | ||
25 | int bus_register(struct bus_type * bus); | 8 | int bus_register(struct bus_type * bus); |
26 | 9 | ||
diff --git a/Documentation/driver-model/class.txt b/Documentation/driver-model/class.txt index 548505f14aa4..1fefc480a80b 100644 --- a/Documentation/driver-model/class.txt +++ b/Documentation/driver-model/class.txt | |||
@@ -27,22 +27,7 @@ The device class structure looks like: | |||
27 | typedef int (*devclass_add)(struct device *); | 27 | typedef int (*devclass_add)(struct device *); |
28 | typedef void (*devclass_remove)(struct device *); | 28 | typedef void (*devclass_remove)(struct device *); |
29 | 29 | ||
30 | struct device_class { | 30 | See the kerneldoc for the struct class. |
31 | char * name; | ||
32 | rwlock_t lock; | ||
33 | u32 devnum; | ||
34 | struct list_head node; | ||
35 | |||
36 | struct list_head drivers; | ||
37 | struct list_head intf_list; | ||
38 | |||
39 | struct driver_dir_entry dir; | ||
40 | struct driver_dir_entry device_dir; | ||
41 | struct driver_dir_entry driver_dir; | ||
42 | |||
43 | devclass_add add_device; | ||
44 | devclass_remove remove_device; | ||
45 | }; | ||
46 | 31 | ||
47 | A typical device class definition would look like: | 32 | A typical device class definition would look like: |
48 | 33 | ||
diff --git a/Documentation/driver-model/device.txt b/Documentation/driver-model/device.txt index a124f3126b0d..b2ff42685bcb 100644 --- a/Documentation/driver-model/device.txt +++ b/Documentation/driver-model/device.txt | |||
@@ -2,96 +2,7 @@ | |||
2 | The Basic Device Structure | 2 | The Basic Device Structure |
3 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ | 3 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ |
4 | 4 | ||
5 | struct device { | 5 | See the kerneldoc for the struct device. |
6 | struct list_head g_list; | ||
7 | struct list_head node; | ||
8 | struct list_head bus_list; | ||
9 | struct list_head driver_list; | ||
10 | struct list_head intf_list; | ||
11 | struct list_head children; | ||
12 | struct device * parent; | ||
13 | |||
14 | char name[DEVICE_NAME_SIZE]; | ||
15 | char bus_id[BUS_ID_SIZE]; | ||
16 | |||
17 | spinlock_t lock; | ||
18 | atomic_t refcount; | ||
19 | |||
20 | struct bus_type * bus; | ||
21 | struct driver_dir_entry dir; | ||
22 | |||
23 | u32 class_num; | ||
24 | |||
25 | struct device_driver *driver; | ||
26 | void *driver_data; | ||
27 | void *platform_data; | ||
28 | |||
29 | u32 current_state; | ||
30 | unsigned char *saved_state; | ||
31 | |||
32 | void (*release)(struct device * dev); | ||
33 | }; | ||
34 | |||
35 | Fields | ||
36 | ~~~~~~ | ||
37 | g_list: Node in the global device list. | ||
38 | |||
39 | node: Node in device's parent's children list. | ||
40 | |||
41 | bus_list: Node in device's bus's devices list. | ||
42 | |||
43 | driver_list: Node in device's driver's devices list. | ||
44 | |||
45 | intf_list: List of intf_data. There is one structure allocated for | ||
46 | each interface that the device supports. | ||
47 | |||
48 | children: List of child devices. | ||
49 | |||
50 | parent: *** FIXME *** | ||
51 | |||
52 | name: ASCII description of device. | ||
53 | Example: " 3Com Corporation 3c905 100BaseTX [Boomerang]" | ||
54 | |||
55 | bus_id: ASCII representation of device's bus position. This | ||
56 | field should be a name unique across all devices on the | ||
57 | bus type the device belongs to. | ||
58 | |||
59 | Example: PCI bus_ids are in the form of | ||
60 | <bus number>:<slot number>.<function number> | ||
61 | This name is unique across all PCI devices in the system. | ||
62 | |||
63 | lock: Spinlock for the device. | ||
64 | |||
65 | refcount: Reference count on the device. | ||
66 | |||
67 | bus: Pointer to struct bus_type that device belongs to. | ||
68 | |||
69 | dir: Device's sysfs directory. | ||
70 | |||
71 | class_num: Class-enumerated value of the device. | ||
72 | |||
73 | driver: Pointer to struct device_driver that controls the device. | ||
74 | |||
75 | driver_data: Driver-specific data. | ||
76 | |||
77 | platform_data: Platform data specific to the device. | ||
78 | |||
79 | Example: for devices on custom boards, as typical of embedded | ||
80 | and SOC based hardware, Linux often uses platform_data to point | ||
81 | to board-specific structures describing devices and how they | ||
82 | are wired. That can include what ports are available, chip | ||
83 | variants, which GPIO pins act in what additional roles, and so | ||
84 | on. This shrinks the "Board Support Packages" (BSPs) and | ||
85 | minimizes board-specific #ifdefs in drivers. | ||
86 | |||
87 | current_state: Current power state of the device. | ||
88 | |||
89 | saved_state: Pointer to saved state of the device. This is usable by | ||
90 | the device driver controlling the device. | ||
91 | |||
92 | release: Callback to free the device after all references have | ||
93 | gone away. This should be set by the allocator of the | ||
94 | device (i.e. the bus driver that discovered the device). | ||
95 | 6 | ||
96 | 7 | ||
97 | Programming Interface | 8 | Programming Interface |
diff --git a/Documentation/driver-model/driver.txt b/Documentation/driver-model/driver.txt index d2cd6fb8ba9e..4421135826a2 100644 --- a/Documentation/driver-model/driver.txt +++ b/Documentation/driver-model/driver.txt | |||
@@ -1,23 +1,7 @@ | |||
1 | 1 | ||
2 | Device Drivers | 2 | Device Drivers |
3 | 3 | ||
4 | struct device_driver { | 4 | See the kerneldoc for the struct device_driver. |
5 | char * name; | ||
6 | struct bus_type * bus; | ||
7 | |||
8 | struct completion unloaded; | ||
9 | struct kobject kobj; | ||
10 | list_t devices; | ||
11 | |||
12 | struct module *owner; | ||
13 | |||
14 | int (*probe) (struct device * dev); | ||
15 | int (*remove) (struct device * dev); | ||
16 | |||
17 | int (*suspend) (struct device * dev, pm_message_t state); | ||
18 | int (*resume) (struct device * dev); | ||
19 | }; | ||
20 | |||
21 | 5 | ||
22 | 6 | ||
23 | Allocation | 7 | Allocation |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 492e81df2968..f6a24e8aa11e 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -460,14 +460,6 @@ Who: Thomas Gleixner <tglx@linutronix.de> | |||
460 | 460 | ||
461 | ---------------------------- | 461 | ---------------------------- |
462 | 462 | ||
463 | What: The acpi_sleep=s4_nonvs command line option | ||
464 | When: 2.6.37 | ||
465 | Files: arch/x86/kernel/acpi/sleep.c | ||
466 | Why: superseded by acpi_sleep=nonvs | ||
467 | Who: Rafael J. Wysocki <rjw@sisk.pl> | ||
468 | |||
469 | ---------------------------- | ||
470 | |||
471 | What: PCI DMA unmap state API | 463 | What: PCI DMA unmap state API |
472 | When: August 2012 | 464 | When: August 2012 |
473 | Why: PCI DMA unmap state API (include/linux/pci-dma.h) was replaced | 465 | Why: PCI DMA unmap state API (include/linux/pci-dma.h) was replaced |
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index b0b814d75ca1..60740e8ecb37 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -836,7 +836,6 @@ Provides counts of softirq handlers serviced since boot time, for each cpu. | |||
836 | TASKLET: 0 0 0 290 | 836 | TASKLET: 0 0 0 290 |
837 | SCHED: 27035 26983 26971 26746 | 837 | SCHED: 27035 26983 26971 26746 |
838 | HRTIMER: 0 0 0 0 | 838 | HRTIMER: 0 0 0 0 |
839 | RCU: 1678 1769 2178 2250 | ||
840 | 839 | ||
841 | 840 | ||
842 | 1.3 IDE devices in /proc/ide | 841 | 1.3 IDE devices in /proc/ide |
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO index b63301a03811..050d37fe6d40 100644 --- a/Documentation/ja_JP/HOWTO +++ b/Documentation/ja_JP/HOWTO | |||
@@ -11,14 +11,14 @@ for non English (read: Japanese) speakers and is not intended as a | |||
11 | fork. So if you have any comments or updates for this file, please try | 11 | fork. So if you have any comments or updates for this file, please try |
12 | to update the original English file first. | 12 | to update the original English file first. |
13 | 13 | ||
14 | Last Updated: 2008/10/24 | 14 | Last Updated: 2011/03/31 |
15 | ================================== | 15 | ================================== |
16 | ã“れã¯ã€ | 16 | ã“れã¯ã€ |
17 | linux-2.6.28/Documentation/HOWTO | 17 | linux-2.6.38/Documentation/HOWTO |
18 | ã®å’Œè¨³ã§ã™ã€‚ | 18 | ã®å’Œè¨³ã§ã™ã€‚ |
19 | 19 | ||
20 | 翻訳団体: JF プãƒã‚¸ã‚§ã‚¯ãƒˆ < http://www.linux.or.jp/JF/ > | 20 | 翻訳団体: JF プãƒã‚¸ã‚§ã‚¯ãƒˆ < http://www.linux.or.jp/JF/ > |
21 | 翻訳日: 2008/10/24 | 21 | 翻訳日: 2011/3/28 |
22 | 翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com> | 22 | 翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com> |
23 | æ ¡æ£è€…: æ¾å€‰ã•ã‚“ <nbh--mats at nifty dot com> | 23 | æ ¡æ£è€…: æ¾å€‰ã•ã‚“ <nbh--mats at nifty dot com> |
24 | å°æž— é›…å…¸ã•ã‚“ (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp> | 24 | å°æž— é›…å…¸ã•ã‚“ (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp> |
@@ -256,8 +256,8 @@ Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ãƒã‚»ã‚¹ã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ã‚ | |||
256 | - メイン㮠2.6.x カーãƒãƒ«ãƒ„リー | 256 | - メイン㮠2.6.x カーãƒãƒ«ãƒ„リー |
257 | - 2.6.x.y -stable カーãƒãƒ«ãƒ„リー | 257 | - 2.6.x.y -stable カーãƒãƒ«ãƒ„リー |
258 | - 2.6.x -git カーãƒãƒ«ãƒ‘ッム| 258 | - 2.6.x -git カーãƒãƒ«ãƒ‘ッム|
259 | - 2.6.x -mm カーãƒãƒ«ãƒ‘ッム| ||
260 | - サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッム| 259 | - サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッム|
260 | - çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー | ||
261 | 261 | ||
262 | 2.6.x カーãƒãƒ«ãƒ„リー | 262 | 2.6.x カーãƒãƒ«ãƒ„リー |
263 | ----------------- | 263 | ----------------- |
@@ -268,9 +268,9 @@ Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ãƒã‚»ã‚¹ã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ã‚ | |||
268 | 268 | ||
269 | - æ–°ã—ã„カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れãŸç›´å¾Œã«ã€2週間ã®ç‰¹åˆ¥æœŸé–“ãŒè¨ã‘られ〠| 269 | - æ–°ã—ã„カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れãŸç›´å¾Œã«ã€2週間ã®ç‰¹åˆ¥æœŸé–“ãŒè¨ã‘られ〠|
270 | ã“ã®æœŸé–“ä¸ã«ã€ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠Linus ã«å¤§ããªå·®åˆ†ã‚’é€ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚ | 270 | ã“ã®æœŸé–“ä¸ã«ã€ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠Linus ã«å¤§ããªå·®åˆ†ã‚’é€ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚ |
271 | ã“ã®ã‚ˆã†ãªå·®åˆ†ã¯é€šå¸¸ -mm カーãƒãƒ«ã«æ•°é€±é–“å«ã¾ã‚Œã¦ããŸãƒ‘ッãƒã§ã™ã€‚ | 271 | ã“ã®ã‚ˆã†ãªå·®åˆ†ã¯é€šå¸¸ -next カーãƒãƒ«ã«æ•°é€±é–“å«ã¾ã‚Œã¦ããŸãƒ‘ッãƒã§ã™ã€‚ |
272 | 大ããªå¤‰æ›´ã¯ git(カーãƒãƒ«ã®ã‚½ãƒ¼ã‚¹ç®¡ç†ãƒ„ールã€è©³ç´°ã¯ | 272 | 大ããªå¤‰æ›´ã¯ git(カーãƒãƒ«ã®ã‚½ãƒ¼ã‚¹ç®¡ç†ãƒ„ールã€è©³ç´°ã¯ |
273 | http://git.or.cz/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„やり方ã§ã™ãŒã€ãƒ‘ッ | 273 | http://git-scm.com/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„やり方ã§ã™ãŒã€ãƒ‘ッ |
274 | ãƒãƒ•ァイルã®å½¢å¼ã®ã¾ã¾é€ã‚‹ã®ã§ã‚‚å分ã§ã™ã€‚ | 274 | ãƒãƒ•ァイルã®å½¢å¼ã®ã¾ã¾é€ã‚‹ã®ã§ã‚‚å分ã§ã™ã€‚ |
275 | 275 | ||
276 | - 2週間後ã€-rc1 カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れã€ã“ã®å¾Œã«ã¯ã‚«ãƒ¼ãƒãƒ«å…¨ä½“ã®å®‰å®š | 276 | - 2週間後ã€-rc1 カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れã€ã“ã®å¾Œã«ã¯ã‚«ãƒ¼ãƒãƒ«å…¨ä½“ã®å®‰å®š |
@@ -333,86 +333,44 @@ git リãƒã‚¸ãƒˆãƒªã§ç®¡ç†ã•れã¦ã„ã‚‹Linus ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„ãƒªãƒ¼ã®æ¯Žæ | |||
333 | れ㯠-rc カーãƒãƒ«ã¨æ¯”ã¹ã¦ã€ãƒ‘ッãƒãŒå¤§ä¸ˆå¤«ã‹ã©ã†ã‹ã‚‚確èªã—ãªã„ã§è‡ªå‹•çš„ | 333 | れ㯠-rc カーãƒãƒ«ã¨æ¯”ã¹ã¦ã€ãƒ‘ッãƒãŒå¤§ä¸ˆå¤«ã‹ã©ã†ã‹ã‚‚確èªã—ãªã„ã§è‡ªå‹•çš„ |
334 | ã«ç”Ÿæˆã•れるã®ã§ã€ã‚ˆã‚Šå®Ÿé¨“çš„ã§ã™ã€‚ | 334 | ã«ç”Ÿæˆã•れるã®ã§ã€ã‚ˆã‚Šå®Ÿé¨“çš„ã§ã™ã€‚ |
335 | 335 | ||
336 | 2.6.x -mm カーãƒãƒ«ãƒ‘ッム| ||
337 | ------------------------ | ||
338 | |||
339 | Andrew Morton ã«ã‚ˆã£ã¦ãƒªãƒªãƒ¼ã‚¹ã•れる実験的ãªã‚«ãƒ¼ãƒãƒ«ãƒ‘ッãƒç¾¤ã§ã™ã€‚ | ||
340 | Andrew ã¯å€‹åˆ¥ã®ã‚µãƒ–システムカーãƒãƒ«ãƒ„リーã¨ãƒ‘ッãƒã‚’å…¨ã¦é›†ã‚ã¦ã㦠| ||
341 | linux-kernel メーリングリストã§åŽé›†ã•れãŸå¤šæ•°ã®ãƒ‘ッãƒã¨åŒæ™‚ã«ä¸€ã¤ã«ã¾ | ||
342 | ã¨ã‚ã¾ã™ã€‚ | ||
343 | ã“ã®ãƒ„ãƒªãƒ¼ã¯æ–°æ©Ÿèƒ½ã¨ãƒ‘ッãƒãŒæ¤œè¨¼ã•ã‚Œã‚‹å ´ã¨ãªã‚Šã¾ã™ã€‚ã‚る期間ã®é–“パッム| ||
344 | ㌠-mm ã«å…¥ã£ã¦ä¾¡å€¤ã‚’証明ã•れãŸã‚‰ã€Andrew やサブシステムメンテナãŒã€ | ||
345 | メインラインã¸å…¥ã‚Œã‚‹ã‚ˆã†ã« Linus ã«ãƒ—ッシュã—ã¾ã™ã€‚ | ||
346 | |||
347 | メインカーãƒãƒ«ãƒ„リーã«å«ã‚ã‚‹ãŸã‚ã« Linus ã«é€ã‚‹å‰ã«ã€ã™ã¹ã¦ã®æ–°ã—ã„パッ | ||
348 | ãƒãŒ -mm ツリーã§ãƒ†ã‚¹ãƒˆã•れるã“ã¨ãŒå¼·ã推奨ã•れã¦ã„ã¾ã™ã€‚マージウィン | ||
349 | ドウãŒé–‹ãå‰ã« -mm ツリーã«ç¾ã‚Œãªã‹ã£ãŸãƒ‘ッãƒã¯ãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã«ãƒžãƒ¼ã‚¸ã• | ||
350 | れるã“ã¨ã¯å›°é›£ã«ãªã‚Šã¾ã™ã€‚ | ||
351 | |||
352 | ã“れらã®ã‚«ãƒ¼ãƒãƒ«ã¯å®‰å®šã—ã¦å‹•作ã™ã¹ãシステムã¨ã—ã¦ä½¿ã†ã®ã«ã¯é©åˆ‡ã§ã¯ã‚ | ||
353 | りã¾ã›ã‚“ã—ã€ã‚«ãƒ¼ãƒãƒ«ãƒ–ランãƒã®ä¸ã§ã‚‚ã‚‚ã£ã¨ã‚‚動作ã«ãƒªã‚¹ã‚¯ãŒé«˜ã„ã‚‚ã®ã§ã™ã€‚ | ||
354 | |||
355 | ã‚‚ã—ã‚ãªãŸãŒã€ã‚«ãƒ¼ãƒãƒ«é–‹ç™ºãƒ—ãƒã‚»ã‚¹ã®æ”¯æ´ã‚’ã—ãŸã„ã¨æ€ã£ã¦ã„ã‚‹ã®ã§ã‚れã°ã€ | ||
356 | ã©ã†ãžã“れらã®ã‚«ãƒ¼ãƒãƒ«ãƒªãƒªãƒ¼ã‚¹ã‚’テストã«ä½¿ã£ã¦ã¿ã¦ã€ãã—ã¦ã‚‚ã—å•題ãŒã‚ | ||
357 | れã°ã€ã¾ãŸã‚‚ã—å…¨ã¦ãŒæ£ã—ã動作ã—ãŸã¨ã—ã¦ã‚‚ã€linux-kernel メーリングリ | ||
358 | ストã«ãƒ•ィードãƒãƒƒã‚¯ã‚’æä¾›ã—ã¦ãã ã•ã„。 | ||
359 | |||
360 | ã™ã¹ã¦ã®ä»–ã®å®Ÿé¨“的パッãƒã«åŠ ãˆã¦ã€ã“れらã®ã‚«ãƒ¼ãƒãƒ«ã¯é€šå¸¸ãƒªãƒªãƒ¼ã‚¹æ™‚点㧠| ||
361 | メインライン㮠-git カーãƒãƒ«ã«å«ã¾ã‚Œã‚‹å…¨ã¦ã®å¤‰æ›´ã‚‚å«ã‚“ã§ã„ã¾ã™ã€‚ | ||
362 | |||
363 | -mm カーãƒãƒ«ã¯æ±ºã¾ã£ãŸã‚¹ã‚±ã‚¸ãƒ¥ãƒ¼ãƒ«ã§ã¯ãƒªãƒªãƒ¼ã‚¹ã•れã¾ã›ã‚“ã€ã—ã‹ã—通常幾 | ||
364 | ã¤ã‹ã® -mm カーãƒãƒ« (1 ã‹ã‚‰ 3 ãŒæ™®é€šï¼‰ãŒå„-rc カーãƒãƒ«ã®é–“ã«ãƒªãƒªãƒ¼ã‚¹ã• | ||
365 | れã¾ã™ã€‚ | ||
366 | |||
367 | サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッム| 336 | サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッム|
368 | ------------------------------------------- | 337 | ------------------------------------------- |
369 | 338 | ||
370 | カーãƒãƒ«ã®æ§˜ã€…ãªé ˜åŸŸã§ä½•ãŒèµ·ãã¦ã„ã‚‹ã‹ã‚’見られるよã†ã«ã™ã‚‹ãŸã‚ã€å¤šãã® | 339 | ãれãžã‚Œã®ã‚«ãƒ¼ãƒãƒ«ã‚µãƒ–システムã®ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠--- ãã—ã¦å¤šãã®ã‚«ãƒ¼ãƒãƒ« |
371 | カーãƒãƒ«ã‚µãƒ–システム開発者ã¯å½¼ã‚‰ã®é–‹ç™ºãƒ„リーを公開ã—ã¦ã„ã¾ã™ã€‚ã“れら㮠| 340 | サブシステムã®é–‹ç™ºè€…é”ã‚‚ --- å„è‡ªã®æœ€æ–°ã®é–‹ç™ºçжæ³ã‚’ソースリãƒã‚¸ãƒˆãƒªã« |
372 | ツリーã¯èª¬æ˜Žã—ãŸã‚ˆã†ã« -mm カーãƒãƒ«ãƒªãƒªãƒ¼ã‚¹ã«å…¥ã‚Œè¾¼ã¾ã‚Œã¾ã™ã€‚ | 341 | 公開ã—ã¦ã„ã¾ã™ã€‚ãã®ãŸã‚ã€è‡ªåˆ†ã¨ã¯ç•°ãªã‚‹é ˜åŸŸã®ã‚«ãƒ¼ãƒãƒ«ã§ä½•ãŒèµ·ãã¦ã„ã‚‹ |
373 | 342 | ã‹ã‚’ä»–ã®äººãŒè¦‹ã‚‰ã‚Œã‚‹ã‚ˆã†ã«ãªã£ã¦ã„ã¾ã™ã€‚é–‹ç™ºãŒæ—©ã進んã§ã„ã‚‹é ˜åŸŸã§ã¯ã€ | |
374 | 以下ã¯ã•ã¾ã–ã¾ãªã‚«ãƒ¼ãƒãƒ«ãƒ„リーã®ä¸ã®ã„ãã¤ã‹ã®ãƒªã‚¹ãƒˆ- | 343 | 開発者ã¯è‡ªèº«ã®æŠ•稿ãŒã©ã®ã‚µãƒ–システムカーãƒãƒ«ãƒ„リーを元ã«ã—ã¦ã„ã‚‹ã‹è³ªå• |
375 | 344 | ã•れるã®ã§ã€ãã®æŠ•ç¨¿ã¨ã™ã§ã«é€²è¡Œä¸ã®ä»–ã®ä½œæ¥ã¨ã®è¡çªãŒé¿ã‘られã¾ã™ã€‚ | |
376 | git ツリー- | 345 | |
377 | - Kbuild ã®é–‹ç™ºãƒ„リーã€Sam Ravnborg <sam@ravnborg.org> | 346 | 大部分ã®ã“れらã®ãƒªãƒã‚¸ãƒˆãƒªã¯ git ツリーã§ã™ã€‚ã—ã‹ã—ãã®ä»–ã® SCM ã‚„ |
378 | git.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git | 347 | quilt シリーズã¨ã—ã¦å…¬é–‹ã•れã¦ã„るパッãƒã‚ューも使ã‚れã¦ã„ã¾ã™ã€‚ã“れら |
379 | 348 | ã®ã‚µãƒ–システムリãƒã‚¸ãƒˆãƒªã®ã‚¢ãƒ‰ãƒ¬ã‚¹ã¯ MAINTAINERS ファイルã«ãƒªã‚¹ãƒˆã•れ | |
380 | - ACPI ã®é–‹ç™ºãƒ„リー〠Len Brown <len.brown@intel.com> | 349 | ã¦ã„ã¾ã™ã€‚ã“れらã®å¤šã㯠http://git.kernel.org/ ã§å‚ç…§ã™ã‚‹ã“ã¨ãŒã§ãã¾ |
381 | git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git | 350 | ã™ã€‚ |
382 | |||
383 | - Block ã®é–‹ç™ºãƒ„リーã€Jens Axboe <axboe@suse.de> | ||
384 | git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git | ||
385 | |||
386 | - DRM ã®é–‹ç™ºãƒ„リーã€Dave Airlie <airlied@linux.ie> | ||
387 | git.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git | ||
388 | |||
389 | - ia64 ã®é–‹ç™ºãƒ„リーã€Tony Luck <tony.luck@intel.com> | ||
390 | git.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git | ||
391 | |||
392 | - infiniband, Roland Dreier <rolandd@cisco.com> | ||
393 | git.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git | ||
394 | |||
395 | - libata, Jeff Garzik <jgarzik@pobox.com> | ||
396 | git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git | ||
397 | |||
398 | - ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ‰ãƒ©ã‚¤ãƒ, Jeff Garzik <jgarzik@pobox.com> | ||
399 | git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git | ||
400 | |||
401 | - pcmcia, Dominik Brodowski <linux@dominikbrodowski.net> | ||
402 | git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git | ||
403 | |||
404 | - SCSI, James Bottomley <James.Bottomley@hansenpartnership.com> | ||
405 | git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git | ||
406 | |||
407 | - x86, Ingo Molnar <mingo@elte.hu> | ||
408 | git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git | ||
409 | |||
410 | quilt ツリー- | ||
411 | - USB, ドライãƒã‚³ã‚¢ã¨ I2C, Greg Kroah-Hartman <gregkh@suse.de> | ||
412 | kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | ||
413 | 351 | ||
414 | ãã®ä»–ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リー㯠http://git.kernel.org/ 㨠MAINTAINERS ファ | 352 | ææ¡ˆã•れãŸãƒ‘ッãƒãŒã“ã®ã‚ˆã†ãªã‚µãƒ–システムツリーã«ã‚³ãƒŸãƒƒãƒˆã•れるå‰ã«ã€ãƒ¡ãƒ¼ |
415 | イルã«ä¸€è¦§è¡¨ãŒã‚りã¾ã™ã€‚ | 353 | リングリストã§äº‹å‰ã«ãƒ¬ãƒ“ューã«ã‹ã‘られã¾ã™ï¼ˆä»¥ä¸‹ã®å¯¾å¿œã™ã‚‹ã‚»ã‚¯ã‚·ãƒ§ãƒ³ã‚’ |
354 | å‚照)。ã„ãã¤ã‹ã®ã‚«ãƒ¼ãƒãƒ«ã‚µãƒ–システムã§ã¯ã€ã“ã®ãƒ¬ãƒ“ュー㯠patchwork | ||
355 | ã¨ã„ã†ãƒ„ールã«ã‚ˆã£ã¦è¿½è·¡ã•れã¾ã™ã€‚Patchwork 㯠web インターフェイス㫠| ||
356 | よã£ã¦ãƒ‘ãƒƒãƒæŠ•ç¨¿ã®è¡¨ç¤ºã€ãƒ‘ッãƒã¸ã®ã‚³ãƒ¡ãƒ³ãƒˆä»˜ã‘や改訂ãªã©ãŒã§ãã€ãã—㦠| ||
357 | メンテナã¯ãƒ‘ッãƒã«å¯¾ã—ã¦ã€ãƒ¬ãƒ“ューä¸ã€å—付済ã¿ã€æ‹’å¦ã¨ã„ã†ã‚ˆã†ãªãƒžãƒ¼ã‚¯ | ||
358 | ã‚’ã¤ã‘ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚大部分ã®ã“れら㮠patchwork ã®ã‚µã‚¤ãƒˆã¯ | ||
359 | http://patchwork.kernel.org/ ã§ãƒªã‚¹ãƒˆã•れã¦ã„ã¾ã™ã€‚ | ||
360 | |||
361 | çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー | ||
362 | --------------------------------------------- | ||
363 | |||
364 | ã‚µãƒ–ã‚·ã‚¹ãƒ†ãƒ ãƒ„ãƒªãƒ¼ã®æ›´æ–°å†…容ãŒãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã® 2.6.x ツリーã«ãƒžãƒ¼ã‚¸ã•れ | ||
365 | ã‚‹å‰ã«ã€ãれらã¯çµ±åˆãƒ†ã‚¹ãƒˆã•れる必è¦ãŒã‚りã¾ã™ã€‚ã“ã®ç›®çš„ã®ãŸã‚ã€å®Ÿè³ªçš„ | ||
366 | ã«å…¨ã‚µãƒ–システムツリーã‹ã‚‰ã»ã¼æ¯Žæ—¥ãƒ—ルã•れã¦ã§ãる特別ãªãƒ†ã‚¹ãƒˆç”¨ã®ãƒª | ||
367 | ãƒã‚¸ãƒˆãƒªãŒå˜åœ¨ã—ã¾ã™- | ||
368 | http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git | ||
369 | http://linux.f-seidel.de/linux-next/pmwiki/ | ||
370 | |||
371 | ã“ã®ã‚„り方ã«ã‚ˆã£ã¦ã€-next カーãƒãƒ«ã¯æ¬¡ã®ãƒžãƒ¼ã‚¸æ©Ÿä¼šã§ã©ã‚“ãªã‚‚ã®ãŒãƒ¡ã‚¤ãƒ³ | ||
372 | ラインカーãƒãƒ«ã«ãƒžãƒ¼ã‚¸ã•れるã‹ã€ãŠãŠã¾ã‹ãªã®å±•望をæä¾›ã—ã¾ã™ã€‚-next | ||
373 | カーãƒãƒ«ã®å®Ÿè¡Œãƒ†ã‚¹ãƒˆã‚’行ã†å†’険好ããªãƒ†ã‚¹ã‚¿ãƒ¼ã¯å¤§ã„ã«æ“迎ã•れã¾ã™ | ||
416 | 374 | ||
417 | ãƒã‚°ãƒ¬ãƒãƒ¼ãƒˆ | 375 | ãƒã‚°ãƒ¬ãƒãƒ¼ãƒˆ |
418 | ------------- | 376 | ------------- |
@@ -673,10 +631,9 @@ Linux カーãƒãƒ«ã‚³ãƒŸãƒ¥ãƒ‹ãƒ†ã‚£ã¯ã€ä¸€åº¦ã«å¤§é‡ã®ã‚³ãƒ¼ãƒ‰ã®å¡Šã‚’å– | |||
673 | ã˜ã¨ã“ã‚ã‹ã‚‰ã‚¹ã‚¿ãƒ¼ãƒˆã—ãŸã®ã§ã™ã‹ã‚‰ã€‚ | 631 | ã˜ã¨ã“ã‚ã‹ã‚‰ã‚¹ã‚¿ãƒ¼ãƒˆã—ãŸã®ã§ã™ã‹ã‚‰ã€‚ |
674 | 632 | ||
675 | Paolo Ciarrocchi ã«æ„Ÿè¬ã€å½¼ã¯å½¼ã®æ›¸ã„㟠"Development Process" | 633 | Paolo Ciarrocchi ã«æ„Ÿè¬ã€å½¼ã¯å½¼ã®æ›¸ã„㟠"Development Process" |
676 | (http://linux.tar.bz/articles/2.6-development_process)セクショ | 634 | (http://lwn.net/Articles/94386/) セクションをã“ã®ãƒ†ã‚ストã®åŽŸåž‹ã«ã™ã‚‹ |
677 | ンをã“ã®ãƒ†ã‚ストã®åŽŸåž‹ã«ã™ã‚‹ã“ã¨ã‚’許å¯ã—ã¦ãれã¾ã—ãŸã€‚ | 635 | ã“ã¨ã‚’許å¯ã—ã¦ãれã¾ã—ãŸã€‚Rundy Dunlap 㨠Gerrit Huizenga ã¯ãƒ¡ãƒ¼ãƒªãƒ³ã‚° |
678 | Rundy Dunlap 㨠Gerrit Huizenga ã¯ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆã§ã‚„ã‚‹ã¹ãã“ã¨ã¨ã‚„㣠| 636 | リストã§ã‚„ã‚‹ã¹ãã“ã¨ã¨ã‚„ã£ã¦ã¯ã„ã‘ãªã„ã“ã¨ã®ãƒªã‚¹ãƒˆã‚’æä¾›ã—ã¦ãれã¾ã—ãŸã€‚ |
679 | ã¦ã¯ã„ã‘ãªã„ã“ã¨ã®ãƒªã‚¹ãƒˆã‚’æä¾›ã—ã¦ãれã¾ã—ãŸã€‚ | ||
680 | 以下ã®äººã€…ã®ãƒ¬ãƒ“ューã€ã‚³ãƒ¡ãƒ³ãƒˆã€è²¢çŒ®ã«æ„Ÿè¬ã€‚ | 637 | 以下ã®äººã€…ã®ãƒ¬ãƒ“ューã€ã‚³ãƒ¡ãƒ³ãƒˆã€è²¢çŒ®ã«æ„Ÿè¬ã€‚ |
681 | Pat Mochel, Hanna Linder, Randy Dunlap, Kay Sievers, | 638 | Pat Mochel, Hanna Linder, Randy Dunlap, Kay Sievers, |
682 | Vojtech Pavlik, Jan Kara, Josh Boyer, Kees Cook, Andrew Morton, Andi | 639 | Vojtech Pavlik, Jan Kara, Josh Boyer, Kees Cook, Andrew Morton, Andi |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index cc85a9278190..c603ef7b0568 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -245,7 +245,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
245 | 245 | ||
246 | acpi_sleep= [HW,ACPI] Sleep options | 246 | acpi_sleep= [HW,ACPI] Sleep options |
247 | Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, | 247 | Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, |
248 | old_ordering, s4_nonvs, sci_force_enable } | 248 | old_ordering, nonvs, sci_force_enable } |
249 | See Documentation/power/video.txt for information on | 249 | See Documentation/power/video.txt for information on |
250 | s3_bios and s3_mode. | 250 | s3_bios and s3_mode. |
251 | s3_beep is for debugging; it makes the PC's speaker beep | 251 | s3_beep is for debugging; it makes the PC's speaker beep |
@@ -1664,6 +1664,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1664 | noexec=on: enable non-executable mappings (default) | 1664 | noexec=on: enable non-executable mappings (default) |
1665 | noexec=off: disable non-executable mappings | 1665 | noexec=off: disable non-executable mappings |
1666 | 1666 | ||
1667 | nosmep [X86] | ||
1668 | Disable SMEP (Supervisor Mode Execution Protection) | ||
1669 | even if it is supported by processor. | ||
1670 | |||
1667 | noexec32 [X86-64] | 1671 | noexec32 [X86-64] |
1668 | This affects only 32-bit executables. | 1672 | This affects only 32-bit executables. |
1669 | noexec32=on: enable non-executable mappings (default) | 1673 | noexec32=on: enable non-executable mappings (default) |
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 1971bcf48a60..88880839ece4 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -279,11 +279,15 @@ When the system goes into the standby or memory sleep state, the phases are: | |||
279 | time.) Unlike the other suspend-related phases, during the prepare | 279 | time.) Unlike the other suspend-related phases, during the prepare |
280 | phase the device tree is traversed top-down. | 280 | phase the device tree is traversed top-down. |
281 | 281 | ||
282 | The prepare phase uses only a bus callback. After the callback method | 282 | In addition to that, if device drivers need to allocate additional |
283 | returns, no new children may be registered below the device. The method | 283 | memory to be able to hadle device suspend correctly, that should be |
284 | may also prepare the device or driver in some way for the upcoming | 284 | done in the prepare phase. |
285 | system power transition, but it should not put the device into a | 285 | |
286 | low-power state. | 286 | After the prepare callback method returns, no new children may be |
287 | registered below the device. The method may also prepare the device or | ||
288 | driver in some way for the upcoming system power transition (for | ||
289 | example, by allocating additional memory required for this purpose), but | ||
290 | it should not put the device into a low-power state. | ||
287 | 291 | ||
288 | 2. The suspend methods should quiesce the device to stop it from performing | 292 | 2. The suspend methods should quiesce the device to stop it from performing |
289 | I/O. They also may save the device registers and put it into the | 293 | I/O. They also may save the device registers and put it into the |
diff --git a/Documentation/power/notifiers.txt b/Documentation/power/notifiers.txt index cf980709122a..c2a4a346c0d9 100644 --- a/Documentation/power/notifiers.txt +++ b/Documentation/power/notifiers.txt | |||
@@ -1,46 +1,41 @@ | |||
1 | Suspend notifiers | 1 | Suspend notifiers |
2 | (C) 2007 Rafael J. Wysocki <rjw@sisk.pl>, GPL | 2 | (C) 2007-2011 Rafael J. Wysocki <rjw@sisk.pl>, GPL |
3 | 3 | ||
4 | There are some operations that device drivers may want to carry out in their | 4 | There are some operations that subsystems or drivers may want to carry out |
5 | .suspend() routines, but shouldn't, because they can cause the hibernation or | 5 | before hibernation/suspend or after restore/resume, but they require the system |
6 | suspend to fail. For example, a driver may want to allocate a substantial amount | 6 | to be fully functional, so the drivers' and subsystems' .suspend() and .resume() |
7 | of memory (like 50 MB) in .suspend(), but that shouldn't be done after the | 7 | or even .prepare() and .complete() callbacks are not suitable for this purpose. |
8 | swsusp's memory shrinker has run. | 8 | For example, device drivers may want to upload firmware to their devices after |
9 | 9 | resume/restore, but they cannot do it by calling request_firmware() from their | |
10 | Also, there may be some operations, that subsystems want to carry out before a | 10 | .resume() or .complete() routines (user land processes are frozen at these |
11 | hibernation/suspend or after a restore/resume, requiring the system to be fully | 11 | points). The solution may be to load the firmware into memory before processes |
12 | functional, so the drivers' .suspend() and .resume() routines are not suitable | 12 | are frozen and upload it from there in the .resume() routine. |
13 | for this purpose. For example, device drivers may want to upload firmware to | 13 | A suspend/hibernation notifier may be used for this purpose. |
14 | their devices after a restore from a hibernation image, but they cannot do it by | 14 | |
15 | calling request_firmware() from their .resume() routines (user land processes | 15 | The subsystems or drivers having such needs can register suspend notifiers that |
16 | are frozen at this point). The solution may be to load the firmware into | 16 | will be called upon the following events by the PM core: |
17 | memory before processes are frozen and upload it from there in the .resume() | ||
18 | routine. Of course, a hibernation notifier may be used for this purpose. | ||
19 | |||
20 | The subsystems that have such needs can register suspend notifiers that will be | ||
21 | called upon the following events by the suspend core: | ||
22 | 17 | ||
23 | PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will | 18 | PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will |
24 | be frozen immediately. | 19 | be frozen immediately. |
25 | 20 | ||
26 | PM_POST_HIBERNATION The system memory state has been restored from a | 21 | PM_POST_HIBERNATION The system memory state has been restored from a |
27 | hibernation image or an error occurred during the | 22 | hibernation image or an error occurred during |
28 | hibernation. Device drivers' .resume() callbacks have | 23 | hibernation. Device drivers' restore callbacks have |
29 | been executed and tasks have been thawed. | 24 | been executed and tasks have been thawed. |
30 | 25 | ||
31 | PM_RESTORE_PREPARE The system is going to restore a hibernation image. | 26 | PM_RESTORE_PREPARE The system is going to restore a hibernation image. |
32 | If all goes well the restored kernel will issue a | 27 | If all goes well, the restored kernel will issue a |
33 | PM_POST_HIBERNATION notification. | 28 | PM_POST_HIBERNATION notification. |
34 | 29 | ||
35 | PM_POST_RESTORE An error occurred during the hibernation restore. | 30 | PM_POST_RESTORE An error occurred during restore from hibernation. |
36 | Device drivers' .resume() callbacks have been executed | 31 | Device drivers' restore callbacks have been executed |
37 | and tasks have been thawed. | 32 | and tasks have been thawed. |
38 | 33 | ||
39 | PM_SUSPEND_PREPARE The system is preparing for a suspend. | 34 | PM_SUSPEND_PREPARE The system is preparing for suspend. |
40 | 35 | ||
41 | PM_POST_SUSPEND The system has just resumed or an error occurred during | 36 | PM_POST_SUSPEND The system has just resumed or an error occurred during |
42 | the suspend. Device drivers' .resume() callbacks have | 37 | suspend. Device drivers' resume callbacks have been |
43 | been executed and tasks have been thawed. | 38 | executed and tasks have been thawed. |
44 | 39 | ||
45 | It is generally assumed that whatever the notifiers do for | 40 | It is generally assumed that whatever the notifiers do for |
46 | PM_HIBERNATION_PREPARE, should be undone for PM_POST_HIBERNATION. Analogously, | 41 | PM_HIBERNATION_PREPARE, should be undone for PM_POST_HIBERNATION. Analogously, |
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 6d27ab8d6e9f..c83bd6b4e6e8 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt | |||
@@ -120,7 +120,6 @@ format: | |||
120 | field:unsigned char common_flags; offset:2; size:1; signed:0; | 120 | field:unsigned char common_flags; offset:2; size:1; signed:0; |
121 | field:unsigned char common_preempt_count; offset:3; size:1;signed:0; | 121 | field:unsigned char common_preempt_count; offset:3; size:1;signed:0; |
122 | field:int common_pid; offset:4; size:4; signed:1; | 122 | field:int common_pid; offset:4; size:4; signed:1; |
123 | field:int common_lock_depth; offset:8; size:4; signed:1; | ||
124 | 123 | ||
125 | field:unsigned long __probe_ip; offset:12; size:4; signed:0; | 124 | field:unsigned long __probe_ip; offset:12; size:4; signed:0; |
126 | field:int __probe_nargs; offset:16; size:4; signed:1; | 125 | field:int __probe_nargs; offset:16; size:4; signed:1; |
diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX new file mode 100644 index 000000000000..fe0251c4cfb7 --- /dev/null +++ b/Documentation/virtual/00-INDEX | |||
@@ -0,0 +1,10 @@ | |||
1 | Virtualization support in the Linux kernel. | ||
2 | |||
3 | 00-INDEX | ||
4 | - this file. | ||
5 | kvm/ | ||
6 | - Kernel Virtual Machine. See also http://linux-kvm.org | ||
7 | lguest/ | ||
8 | - Extremely simple hypervisor for experimental/educational use. | ||
9 | uml/ | ||
10 | - User Mode Linux, builds/runs Linux kernel as a userspace program. | ||
diff --git a/Documentation/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 9bef4e4cec50..9bef4e4cec50 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
diff --git a/Documentation/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index 882068538c9c..882068538c9c 100644 --- a/Documentation/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt | |||
diff --git a/Documentation/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt index 3b4cd3bf5631..3b4cd3bf5631 100644 --- a/Documentation/kvm/locking.txt +++ b/Documentation/virtual/kvm/locking.txt | |||
diff --git a/Documentation/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index f46aa58389ca..f46aa58389ca 100644 --- a/Documentation/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt | |||
diff --git a/Documentation/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index d079aed27e03..d079aed27e03 100644 --- a/Documentation/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt | |||
diff --git a/Documentation/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt index 3ab969c59046..3ab969c59046 100644 --- a/Documentation/kvm/ppc-pv.txt +++ b/Documentation/virtual/kvm/ppc-pv.txt | |||
diff --git a/Documentation/kvm/review-checklist.txt b/Documentation/virtual/kvm/review-checklist.txt index 730475ae1b8d..a850986ed684 100644 --- a/Documentation/kvm/review-checklist.txt +++ b/Documentation/virtual/kvm/review-checklist.txt | |||
@@ -7,7 +7,7 @@ Review checklist for kvm patches | |||
7 | 2. Patches should be against kvm.git master branch. | 7 | 2. Patches should be against kvm.git master branch. |
8 | 8 | ||
9 | 3. If the patch introduces or modifies a new userspace API: | 9 | 3. If the patch introduces or modifies a new userspace API: |
10 | - the API must be documented in Documentation/kvm/api.txt | 10 | - the API must be documented in Documentation/virtual/kvm/api.txt |
11 | - the API must be discoverable using KVM_CHECK_EXTENSION | 11 | - the API must be discoverable using KVM_CHECK_EXTENSION |
12 | 12 | ||
13 | 4. New state must include support for save/restore. | 13 | 4. New state must include support for save/restore. |
diff --git a/Documentation/kvm/timekeeping.txt b/Documentation/virtual/kvm/timekeeping.txt index df8946377cb6..df8946377cb6 100644 --- a/Documentation/kvm/timekeeping.txt +++ b/Documentation/virtual/kvm/timekeeping.txt | |||
diff --git a/Documentation/lguest/.gitignore b/Documentation/virtual/lguest/.gitignore index 115587fd5f65..115587fd5f65 100644 --- a/Documentation/lguest/.gitignore +++ b/Documentation/virtual/lguest/.gitignore | |||
diff --git a/Documentation/lguest/Makefile b/Documentation/virtual/lguest/Makefile index bebac6b4f332..bebac6b4f332 100644 --- a/Documentation/lguest/Makefile +++ b/Documentation/virtual/lguest/Makefile | |||
diff --git a/Documentation/lguest/extract b/Documentation/virtual/lguest/extract index 7730bb6e4b94..7730bb6e4b94 100644 --- a/Documentation/lguest/extract +++ b/Documentation/virtual/lguest/extract | |||
diff --git a/Documentation/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c index d9da7e148538..d9da7e148538 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/virtual/lguest/lguest.c | |||
diff --git a/Documentation/lguest/lguest.txt b/Documentation/virtual/lguest/lguest.txt index dad99978a6a8..bff0c554485d 100644 --- a/Documentation/lguest/lguest.txt +++ b/Documentation/virtual/lguest/lguest.txt | |||
@@ -74,7 +74,8 @@ Running Lguest: | |||
74 | 74 | ||
75 | - Run an lguest as root: | 75 | - Run an lguest as root: |
76 | 76 | ||
77 | Documentation/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 --block=rootfile root=/dev/vda | 77 | Documentation/virtual/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \ |
78 | --block=rootfile root=/dev/vda | ||
78 | 79 | ||
79 | Explanation: | 80 | Explanation: |
80 | 64: the amount of memory to use, in MB. | 81 | 64: the amount of memory to use, in MB. |
diff --git a/Documentation/uml/UserModeLinux-HOWTO.txt b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt index 9b7e1904db1c..9b7e1904db1c 100644 --- a/Documentation/uml/UserModeLinux-HOWTO.txt +++ b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt | |||
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt index 092e596a1301..c54b4f503e2a 100644 --- a/Documentation/x86/x86_64/boot-options.txt +++ b/Documentation/x86/x86_64/boot-options.txt | |||
@@ -206,7 +206,7 @@ IOMMU (input/output memory management unit) | |||
206 | (e.g. because you have < 3 GB memory). | 206 | (e.g. because you have < 3 GB memory). |
207 | Kernel boot message: "PCI-DMA: Disabling IOMMU" | 207 | Kernel boot message: "PCI-DMA: Disabling IOMMU" |
208 | 208 | ||
209 | 2. <arch/x86_64/kernel/pci-gart.c>: AMD GART based hardware IOMMU. | 209 | 2. <arch/x86/kernel/amd_gart_64.c>: AMD GART based hardware IOMMU. |
210 | Kernel boot message: "PCI-DMA: using GART IOMMU" | 210 | Kernel boot message: "PCI-DMA: using GART IOMMU" |
211 | 211 | ||
212 | 3. <arch/x86_64/kernel/pci-swiotlb.c> : Software IOMMU implementation. Used | 212 | 3. <arch/x86_64/kernel/pci-swiotlb.c> : Software IOMMU implementation. Used |
diff --git a/Documentation/zh_CN/email-clients.txt b/Documentation/zh_CN/email-clients.txt new file mode 100644 index 000000000000..5d65e323d060 --- /dev/null +++ b/Documentation/zh_CN/email-clients.txt | |||
@@ -0,0 +1,210 @@ | |||
1 | 锘?Chinese translated version of Documentation/email-clients.txt | ||
2 | |||
3 | If you have any comment or update to the content, please contact the | ||
4 | original document maintainer directly. However, if you have a problem | ||
5 | communicating in English you can also ask the Chinese maintainer for | ||
6 | help. Contact the Chinese maintainer if this translation is outdated | ||
7 | or if there is a problem with the translation. | ||
8 | |||
9 | Chinese maintainer: Harry Wei <harryxiyou@gmail.com> | ||
10 | --------------------------------------------------------------------- | ||
11 | Documentation/email-clients.txt ???涓????缈�? | ||
12 | |||
13 | æ¿¡??????å® ??ç’烘????å˜?版???????????瀹癸??璇风?å˜?ヨ??绯诲?????妗g??ç¼å˜?よ?????æ¿¡????æµ£?浣跨?ㄨ?辨?? | ||
14 | 浜ゆ???????ä¼´?剧??ç’‡?é”›?æ¶”????æµ ãƒ¥??æ¶“???????ç¼å˜?よ??å§¹???┿??æ¿¡???????缈æ˜????å˜?é¢???????舵?????缈? | ||
15 | ç’‡?瀛???ã„©??棰?é”›?璇疯??绯讳腑??????ç¼å˜?よ????? | ||
16 | |||
17 | æ¶“???????ç¼å˜?よ??é”›? ç’æƒ§??濞? Harry Wei <harryxiyou@gmail.com> | ||
18 | æ¶“???????缈æ˜?????é”›? ç’æƒ§??濞? Harry Wei <harryxiyou@gmail.com> | ||
19 | 涓?????????¤?????锛? Yinglin Luan <synmyth@gmail.com> | ||
20 | Xiaochen Wang <wangxiaochen0@gmail.com> | ||
21 | yaxinsn <yaxinsn@163.com> | ||
22 | |||
23 | æµ ãƒ¤??涓烘?f?? | ||
24 | --------------------------------------------------------------------- | ||
25 | |||
26 | Linux???æµ è·º?㈡?风?????缃?淇℃?? | ||
27 | ====================================================================== | ||
28 | |||
29 | ?????????缃? | ||
30 | ---------------------------------------------------------------------- | ||
31 | Linux?????æŒË‰æ¶“???????æ©????æµ æƒ°?????浜ょ??é”›????濂芥??ç›ãƒ¤??æµ£?æ¶“æ´ª??æµ æœµ????????宓?????????????浜?ç¼å˜?よ?? | ||
32 | ??ユ?å •??æµ è®¹??æµ£???????æµ å‰?????瀹规?ç…Ž??æ´?璇ユ??"text/plain"?????惰??é”›????æµ æœµ????????æ¶“?ç’§???????é”›? | ||
33 | ???涓鸿??æµ¼?浣胯ˉ涓????寮???ã„©?ã„¥????ㄨ??ç’鸿??绋?æ¶“???????寰???ä¼´?俱?? | ||
34 | |||
35 | ??ㄦ?ュ?????Linux?????æŒË‰æ¶“???????æµ è·º?㈡?风????ã„¥?????ç›ãƒ¤????è·º??璇ュ??浜?????????????æ¿®???舵?????渚?æ¿¡?é”›? | ||
36 | æµ ?æµ ?æ¶“???芥?ç‘°?????????????ã‚…?惰〃绗???????绌烘?ç¡·???????虫????ㄦ??æ¶“?ç›????寮?æ¾¶å˜?????ç¼?ç俱?? | ||
37 | |||
38 | æ¶“?ç‘•????æ©?"format=flowed"妯″????????ç›ãƒ¤?????æ©???蜂??寮?璧蜂?????棰????æµ ãƒ¥?????瀹崇?????ç›???? | ||
39 | |||
40 | æ¶“?ç‘•?ç’â•€????????æµ è·º?㈡?风??æ©?ç›??????ㄦ?㈣?????æ©???蜂??æµ¼???æ‘??æµ£????ç›ãƒ¤????? | ||
41 | |||
42 | ???æµ è·º?㈡?风??æ¶“???芥?ç‘°???????????瀛?ç»—????缂??????ç‘°?????ç‘•??????????ç›ãƒ¤???????芥??ASCII??????UTF-8缂??????ç‘°??é”›? | ||
43 | æ¿¡????æµ£?浣跨??UTF-8缂??????ç‘°???????????æµ è®¹????d??æµ£?ç?æµ¼???åž®??æ¶“?浜??????è—‰????????瀛?ç»—???????棰???? | ||
44 | |||
45 | ???æµ è·º?㈡?风??æ´?璇ュ舰???骞朵??æ·‡???? References: ?????? In-Reply-To: ???棰?é”›???d?? | ||
46 | ???æµ æƒ°??棰?çå˜??æµ¼?æ¶“??????? | ||
47 | |||
48 | æ¾¶???å‰??甯?(?????????ç’寸??甯?)???甯é•????界?ㄤ??ç›ãƒ¤??é”›????æ¶“å“„?惰〃绗?æµ¼?æž????涓虹┖??笺??浣跨??xclipboard, xclip | ||
49 | ??????xcutselæ¶”?ç’稿??æµ ãƒ¯??æµ£???????濂芥??ç’‡?æ¶“?æ¶“?????????åž®??浣跨?ã„¥????å‰??甯???? | ||
50 | |||
51 | æ¶“?ç‘•???ㄤ娇???PGP/GPG缃æ’????????æµ æœµè…‘??????ç›ãƒ¤?????æ©???蜂??浣垮??寰?æ¾¶???????æ¶“???借?诲??????????ㄤ??æµ£????ç›ãƒ¤????? | ||
52 | é”›?æ©?æ¶“????棰?æ´?璇ユ?????æµ ãƒ¤æ…¨æ¾¶????é”›? | ||
53 | |||
54 | ??ã„§???????æ??æµ è·º??ç›ã„¥?????ç›ãƒ¤??æ¶”????é”›?ç¼????宸åž?????æ¶“?æ¶“?ç›ãƒ¤?????æ¶“?æ¶“???????涓绘??é”›?æ·‡?瀛???ユ?è·º?扮?? | ||
55 | ???æµ è®¹??ç?ç›ãƒ¤?????'patch'??戒护???æ¶“?é”›?æ¿¡??????????浜?é”›????ç¼??????æ??æµ è·º??ç›ã„¥???????? | ||
56 | |||
57 | |||
58 | æ¶“?浜????æµ è·º?㈡?风?????绀? | ||
59 | ---------------------------------------------------------------------- | ||
60 | æ©????ç¼???è½°??浜?ç’‡?ç¼????MUA???缃????绀猴?????æµ ãƒ§?ㄤ??ç¼?Linux?????稿?????ç›ãƒ¤?????æ©?浜?骞朵???????虫?? | ||
61 | ?????????æž?æµ è·º?????缃???è¤????? | ||
62 | |||
63 | 璇�?锛? | ||
64 | TUI = æµ ãƒ¦?????æ¶“å“„?虹???????ㄦ?锋?ュ?? | ||
65 | GUI = ??惧舰?????㈢?ㄦ?锋?ュ?? | ||
66 | |||
67 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
68 | Alpine (TUI) | ||
69 | |||
70 | ???缃????椤癸?? | ||
71 | ???"Sending Preferences"??ã„¥??é”›? | ||
72 | |||
73 | - "Do Not Send Flowed Text"蹇?椤诲????? | ||
74 | - "Strip Whitespace Before Sending"蹇?椤诲?抽?? | ||
75 | |||
76 | 褰???????æµ èˆµ?讹????????æ´?璇ユ?惧?ㄨˉ涓?æµ¼???虹?扮????版?癸????è·º?????æ¶“?CTRL-Rç¼???????é”›?浣挎??瀹???? | ||
77 | ç›ãƒ¤?????æµ è·º????ュ?ä¼´??æµ æœµè…‘??? | ||
78 | |||
79 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
80 | Evolution (GUI) | ||
81 | |||
82 | æ¶“?浜?寮????????????????浣跨?ã„¥????????ç›ãƒ¤?? | ||
83 | |||
84 | 褰??????â•…??æµ å •??椤癸??Preformat | ||
85 | æµ ?Format->Heading->Preformatted (Ctrl-7)??????宸ュ?锋?? | ||
86 | |||
87 | ??跺??浣跨??锛? | ||
88 | Insert->Text File... (Alt-n x)?????ヨˉ涓????æµ èº²?? | ||
89 | |||
90 | æµ£?æ©????æµ ?"diff -Nru old.c new.c | xclip"é”›???????Preformaté”›???è·º??浣跨?ㄤ腑??æ’®??æ©?ç›?ç»®?甯???? | ||
91 | |||
92 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
93 | Kmail (GUI) | ||
94 | |||
95 | æ¶“?浜?寮????????????????浣跨?ã„¥????????ç›ãƒ¤????? | ||
96 | |||
97 | 榛?ç’よ?剧疆涓?æ¶“?HTML??ç…Ž??????????????é”›?æ¶“?ç‘•??????ã„¥????? | ||
98 | |||
99 | 褰?æ¶”????æ¶“?ç????æµ å‰????è·º??é”›???ã„©??椤逛?????æ¶“?ç‘•??????â•„????ㄦ?㈣????????æ¶“????缂虹?ç‘°æ°¨???æµ£???ã„©??æµ æœµè…‘æˆ???ョ??æµ è®³???????? | ||
100 | ??戒??æµ¼?çš??????ㄦ?㈣??é”›????å§ã‚„??蹇?椤诲?ã„¥?????ç›ãƒ¤??æ¶”?????????ㄦ?㈣????????ç» ?????????è§„??ç辨???????ㄨ????ㄦ?㈣????ヤ功??????æµ è®¹?? | ||
101 | ??è·º?????瀹?æ·‡?瀛?涓鸿??绋裤??æ¶“????æµ£???ㄨ??绋夸腑???娆℃??寮?瀹?é”›?瀹?宸茬????ã„©?ㄨ????ㄦ?㈣??浜?é”›???d??æµ£???????æµ æƒ°?界?舵病??? | ||
102 | ?????╄????ㄦ?㈣??锛?浣????�涓?浼?澶�诲凡???????????ㄦ?㈣????? | ||
103 | |||
104 | ??ã„©??æµ å‰??æ´????é”›??????ヨˉ涓?æ¶”????é”›???å¥??甯哥?ã„§??ç›ãƒ¤??瀹????ç»—?é”›?æ¶“?æ¶“?æ©?瀛????(---)??? | ||
105 | |||
106 | ??è·º?????"Message"????????$??é”›??????â•‚????ユ??æµ è®¹????ョ????????æµ£????ç›ãƒ¤?????æµ èº²??æ©????æ¶“?æ¶“?棰?æ¾¶???????椤癸??æµ£????æµ ? | ||
107 | ???æ©?瀹????缃?æµ£???????æµ è·ºç¼“ç»”?宸ュ?锋????????é”›?æ©????æµ ãƒ¥ç”«æ¶“?"insert file"??炬????? | ||
108 | |||
109 | æµ£????æµ ãƒ¥????ã„¥?ä¼´??æ©?GPG???ç’ä¼´??æµ è®¹??æµ£???????宓?ç›ãƒ¤?????濂戒??ç‘•?浣跨??GPG???ç’æ¿??æµ ????æµ£?æ¶“å“„??宓??????????绛惧??ç›ãƒ¤??é”›? | ||
110 | 褰?æµ ?GPGæ¶“???????7æµ£?缂??????朵??浣夸??æµ ?????????æ‘??æ¾¶??????? | ||
111 | |||
112 | æ¿¡????æµ£????ç‘•?æµ ãƒ©??æµ å‰??褰㈠????????ç›ãƒ¤??é”›???d??çåž?抽????ç‘°?婚??æµ è®¹????è·º?????æ¶“?çž???Ñ??ç»????"Suggest automatic | ||
113 | display"é”›?æ©???å³°??宓????æµ èˆµ?æ‘?è§„??ç’â•„?æ˜???????般?? | ||
114 | |||
115 | 褰?æµ£?ç‘•?æ·‡?瀛?ç?ç‘•?????????????宓???????ç›ãƒ¤??é”›?æµ£????æµ ãƒ¤??娑???????ç›ã„§????奸????â•?????ç›ãƒ¤????????æµ è®¹????è·º????冲?婚????? | ||
116 | "save as"???æµ£????æµ ãƒ¤å¨‡??ㄤ??æ¶“?娌℃????å˜?åœ????????ç›ãƒ¤????????æµ è®¹??æ¿¡????瀹????æµ ãƒ¦?g‘???褰㈠??ç¼???????褰?æµ£?å§ï½‡????ã„¥?? | ||
117 | ???宸辩??ç»???d??æ¶“?瀵????é”›???f?舵病??????椤瑰??æµ ãƒ¤??瀛????æµ ?--宸茬?????æ¶“?æ¶“?æ©???风??bugçš?å§¹???ュ?é¢??kmail???bugzilla | ||
118 | 骞朵??甯????æ©?ç?æµ¼?çš?æ¾¶??????????æµ èˆµ??æµ ãƒ¥?????瀵规??æ¶“???ㄦ?å³°??璇诲???????????çš?æ·‡?瀛????é”›????æµ ãƒ¥?????æµ£???虫?????æµ è·º????è·º?æ¿?朵????版?癸?? | ||
119 | æµ£?æ¶“?寰?æ¶“????æµ ?æµ ????????????逛负ç¼?????????ç¿ ?????璇汇?? | ||
120 | |||
121 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
122 | Lotus Notes (GUI) | ||
123 | |||
124 | 涓?瑕?浣跨?ㄥ????? | ||
125 | |||
126 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
127 | Mutt (TUI) | ||
128 | |||
129 | 寰?æ¾¶?Linux寮????浜哄??浣跨??mutt瀹㈡?风??é”›????æµ ãƒ¨?????瀹????瀹?宸ヤ????????甯告??浜???? | ||
130 | |||
131 | Muttæ¶“????甯?缂?æˆ????é”›????æµ ãƒ¤??ç» â€²??浣跨?ㄤ??æ¶”?缂?æˆ???ã„©?戒??æ´?璇ュ甫????????ㄦ??ç›????澶у????扮??æˆ???ã„©?藉甫??? | ||
132 | æ¶“?æ¶“?"insert file"???椤癸??瀹????æµ ãƒ©??æ©?æ¶“???ç‘°?????æµ è·º??瀹åœ????ç‘°???????ユ??æµ èº²?? | ||
133 | |||
134 | 'vim'浣?涓?mutt???缂?�???锛? | ||
135 | set editor="vi" | ||
136 | |||
137 | 濡????浣跨??xclip锛???�ヤ互涓???戒护 | ||
138 | :set paste | ||
139 | ???涓????涔??????????shift-insert??????浣跨?? | ||
140 | :r filename | ||
141 | |||
142 | æ¿¡??????å® ?????ç›ãƒ¤??æµ£?æ¶“å“„??宓?????????? | ||
143 | (a)ttach宸ヤ?????寰?濂斤??涓?甯????"set paste"??? | ||
144 | |||
145 | ???缃????椤癸?? | ||
146 | 瀹?æ´?璇ヤ互榛?ç’よ?剧疆???褰㈠??宸ヤ????? | ||
147 | ??惰??é”›????"send_charset"ç’剧疆涓?"us-ascii::utf-8"æ¶”????æ¶“?æ¶“?æ¶“???????涓绘????? | ||
148 | |||
149 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
150 | Pine (TUI) | ||
151 | |||
152 | Pine�??绘??涓?浜?绌烘?煎????????棰?锛?浣????�浜???�ㄥ??璇ラ?借??淇?澶?浜???? | ||
153 | |||
154 | æ¿¡???????æµ ãƒ¯??璇蜂娇???alpine(pine???ç¼Ñ„?胯??) | ||
155 | |||
156 | ???缃????椤癸?? | ||
157 | - ???�????????????瑕?娑???ゆ??绋??????? | ||
158 | - "no-strip-whitespace-before-send"???椤逛????????瑕??????? | ||
159 | |||
160 | |||
161 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
162 | Sylpheed (GUI) | ||
163 | |||
164 | - ???宓??????????æµ ãƒ¥??濂界??宸ヤ??é”›???????浣跨?ã„©??æµ è®¹????? | ||
165 | - ???ç’é•娇??ã„¥????ã„§??缂?æˆ???ã„£?? | ||
166 | - 瀵逛?????褰?æˆ?æ¾¶???å •??甯告????? | ||
167 | - 濡???????�non-SSL�??ワ?????娉?浣跨??TLS SMTP????????? | ||
168 | - ??ㄧ?????�??d腑???涓?涓?寰??????ㄧ??ruler bar??? | ||
169 | - ç¼???æ¿?????æ¶“?娣诲????æ¿??çå˜??æµ¼?å§ï½‡â€˜???浜?瑙f?剧ãš?????? | ||
170 | |||
171 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
172 | Thunderbird (GUI) | ||
173 | |||
174 | 榛?ç’ゆ????å…¸??é”›?thunderbird寰?瀹规??????????????é”›?æµ£????æ©????æ¶“?浜???è§„?????æµ ãƒ¥å·±??è·º?????寰???æ‘ソ??? | ||
175 | |||
176 | - ??ㄧ?ㄦ?峰????疯?剧疆???锛?�??????瀵诲??锛?涓?瑕???????"Compose messages in HTML format"??? | ||
177 | |||
178 | - 缂?æˆ?æµ£????Thunderbird???缃?ç’剧疆??ヤ娇瀹?æ¶“?ç‘•????ç›?浣跨??é”›?user_pref("mailnews.wraplength", 0); | ||
179 | |||
180 | - 缂?æˆ?æµ£????Thunderbird???缃?ç’剧疆锛?浣垮??æ¶“?ç‘•?浣跨??"format=flowed"??ç…Ž??é”›?user_pref("mailnews. | ||
181 | send_plaintext_flowed", false); | ||
182 | |||
183 | - æµ£????ç‘•?æµ£?Thunderbird???æ¶“æ´ª???????ç…Ž????ç‘°??é”›? | ||
184 | æ¿¡????榛?ç’ゆ????å…¸??æµ£?æ¶”??????????HTML??ç…Ž??é”›???d?????寰???俱??æµ ?æµ ?æµ ????棰???????æ¶“????妗?æ¶“???????"Preformat"??ç…Ž????? | ||
185 | æ¿¡????榛?ç’ゆ????å…¸??æµ£?æ¶”??????????????????ç…Ž??é”›?æµ£?æ¶“?寰????瀹???逛负HTML??ç…Ž??é”›?æµ ?æµ ?æµ£?æ¶“è½°??娆℃?Ñ…??é”›???ヤ功?????扮??娑????é”›? | ||
186 | ??è·º??寮哄?朵娇瀹??????版???????ç…Ž??é”›???????瀹?çå˜?????ç›????ç‘•?瀹???æ¿??é”›???ã„¥??淇$????炬??æ¶“?浣跨??shift?????ヤ娇瀹????æ¶“?HTML | ||
187 | ??煎??锛???跺?????棰???????涓????妗?涓???????"Preformat"??煎????? | ||
188 | |||
189 | - ???ç’é•娇??ã„¥????ã„§??缂?æˆ????é”›? | ||
190 | ???瀵?Thunderbird???ç›ãƒ¤?????ç» ?????????è§„??ç辨??浣跨?ㄤ??æ¶“?"external editor"??â•??é”›???è·º??浣跨?ㄤ????????娆㈢?? | ||
191 | $EDITOR??ヨ?诲???????????骞惰ˉ涓???版?????æ¶“????ç‘•?瀹???æ¿??é”›????æµ ãƒ¤??æžè—‰è‹Ÿæ¶“?瀹?ç‘?æ©?æ¶“???â•??é”›???è·º??娣诲??æ¶“?æ¶“?浣跨?ã„¥????? | ||
192 | ??????View->Toolbars->Customize...??????褰?æµ£?æ¶”????淇℃???????è·º??æµ ?æµ ???ç‘°?诲??çåž??æµ ãƒ¤????? | ||
193 | |||
194 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
195 | TkRat (GUI) | ||
196 | |||
197 | ???æµ ãƒ¤å¨‡??ã„¥?????浣跨??"Insert file..."??????æ¾¶???ã„§??缂?æˆ???ã„£?? | ||
198 | |||
199 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
200 | Gmail (Web GUI) | ||
201 | |||
202 | æ¶“?ç‘•?浣跨?ã„¥????????ç›ãƒ¤????? | ||
203 | |||
204 | Gmail缃?椤�㈡?风???????ㄥ?版????惰〃绗?�???涓虹┖??笺?? | ||
205 | |||
206 | ??界?è·º?惰〃绗?æž????涓虹┖??奸??棰????æµ ãƒ¨??æ¾¶???ã„§??æˆ???ㄨВ??ç¹???????è·º??æ©?æµ¼?浣跨?ã„¥??æž???㈣?????å§£?ç›???????æ¶“?78æ¶“?瀛?ç»—???? | ||
207 | |||
208 | ???æ¶“?æ¶“????棰????Gmailæ©?æµ¼????æµ è®³??æ¶“????ASCII???瀛?ç»—????淇℃????逛负base64缂???????瀹????æ¶“?ç‘—åž®????????娆ф床浜虹?????瀛???? | ||
209 | |||
210 | ### | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 16a5c5f2c6a6..8df8d2dfba28 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -405,8 +405,8 @@ S: Maintained | |||
405 | F: sound/oss/aedsp16.c | 405 | F: sound/oss/aedsp16.c |
406 | 406 | ||
407 | AFFS FILE SYSTEM | 407 | AFFS FILE SYSTEM |
408 | M: Roman Zippel <zippel@linux-m68k.org> | 408 | L: linux-fsdevel@vger.kernel.org |
409 | S: Maintained | 409 | S: Orphan |
410 | F: Documentation/filesystems/affs.txt | 410 | F: Documentation/filesystems/affs.txt |
411 | F: fs/affs/ | 411 | F: fs/affs/ |
412 | 412 | ||
@@ -2813,38 +2813,19 @@ F: Documentation/gpio.txt | |||
2813 | F: drivers/gpio/ | 2813 | F: drivers/gpio/ |
2814 | F: include/linux/gpio* | 2814 | F: include/linux/gpio* |
2815 | 2815 | ||
2816 | GRE DEMULTIPLEXER DRIVER | ||
2817 | M: Dmitry Kozlov <xeb@mail.ru> | ||
2818 | L: netdev@vger.kernel.org | ||
2819 | S: Maintained | ||
2820 | F: net/ipv4/gre.c | ||
2821 | F: include/net/gre.h | ||
2822 | |||
2816 | GRETH 10/100/1G Ethernet MAC device driver | 2823 | GRETH 10/100/1G Ethernet MAC device driver |
2817 | M: Kristoffer Glembo <kristoffer@gaisler.com> | 2824 | M: Kristoffer Glembo <kristoffer@gaisler.com> |
2818 | L: netdev@vger.kernel.org | 2825 | L: netdev@vger.kernel.org |
2819 | S: Maintained | 2826 | S: Maintained |
2820 | F: drivers/net/greth* | 2827 | F: drivers/net/greth* |
2821 | 2828 | ||
2822 | HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER | ||
2823 | M: Frank Seidel <frank@f-seidel.de> | ||
2824 | L: platform-driver-x86@vger.kernel.org | ||
2825 | W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ | ||
2826 | S: Maintained | ||
2827 | F: drivers/platform/x86/hdaps.c | ||
2828 | |||
2829 | HWPOISON MEMORY FAILURE HANDLING | ||
2830 | M: Andi Kleen <andi@firstfloor.org> | ||
2831 | L: linux-mm@kvack.org | ||
2832 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison | ||
2833 | S: Maintained | ||
2834 | F: mm/memory-failure.c | ||
2835 | F: mm/hwpoison-inject.c | ||
2836 | |||
2837 | HYPERVISOR VIRTUAL CONSOLE DRIVER | ||
2838 | L: linuxppc-dev@lists.ozlabs.org | ||
2839 | S: Odd Fixes | ||
2840 | F: drivers/tty/hvc/ | ||
2841 | |||
2842 | iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER | ||
2843 | M: Peter Jones <pjones@redhat.com> | ||
2844 | M: Konrad Rzeszutek Wilk <konrad@kernel.org> | ||
2845 | S: Maintained | ||
2846 | F: drivers/firmware/iscsi_ibft* | ||
2847 | |||
2848 | GSPCA FINEPIX SUBDRIVER | 2829 | GSPCA FINEPIX SUBDRIVER |
2849 | M: Frank Zago <frank@zago.net> | 2830 | M: Frank Zago <frank@zago.net> |
2850 | L: linux-media@vger.kernel.org | 2831 | L: linux-media@vger.kernel.org |
@@ -2895,6 +2876,26 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | |||
2895 | S: Maintained | 2876 | S: Maintained |
2896 | F: drivers/media/video/gspca/ | 2877 | F: drivers/media/video/gspca/ |
2897 | 2878 | ||
2879 | HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER | ||
2880 | M: Frank Seidel <frank@f-seidel.de> | ||
2881 | L: platform-driver-x86@vger.kernel.org | ||
2882 | W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ | ||
2883 | S: Maintained | ||
2884 | F: drivers/platform/x86/hdaps.c | ||
2885 | |||
2886 | HWPOISON MEMORY FAILURE HANDLING | ||
2887 | M: Andi Kleen <andi@firstfloor.org> | ||
2888 | L: linux-mm@kvack.org | ||
2889 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison | ||
2890 | S: Maintained | ||
2891 | F: mm/memory-failure.c | ||
2892 | F: mm/hwpoison-inject.c | ||
2893 | |||
2894 | HYPERVISOR VIRTUAL CONSOLE DRIVER | ||
2895 | L: linuxppc-dev@lists.ozlabs.org | ||
2896 | S: Odd Fixes | ||
2897 | F: drivers/tty/hvc/ | ||
2898 | |||
2898 | HARDWARE MONITORING | 2899 | HARDWARE MONITORING |
2899 | M: Jean Delvare <khali@linux-fr.org> | 2900 | M: Jean Delvare <khali@linux-fr.org> |
2900 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 2901 | M: Guenter Roeck <guenter.roeck@ericsson.com> |
@@ -2945,8 +2946,8 @@ F: drivers/block/cciss* | |||
2945 | F: include/linux/cciss_ioctl.h | 2946 | F: include/linux/cciss_ioctl.h |
2946 | 2947 | ||
2947 | HFS FILESYSTEM | 2948 | HFS FILESYSTEM |
2948 | M: Roman Zippel <zippel@linux-m68k.org> | 2949 | L: linux-fsdevel@vger.kernel.org |
2949 | S: Maintained | 2950 | S: Orphan |
2950 | F: Documentation/filesystems/hfs.txt | 2951 | F: Documentation/filesystems/hfs.txt |
2951 | F: fs/hfs/ | 2952 | F: fs/hfs/ |
2952 | 2953 | ||
@@ -3478,6 +3479,12 @@ F: Documentation/isapnp.txt | |||
3478 | F: drivers/pnp/isapnp/ | 3479 | F: drivers/pnp/isapnp/ |
3479 | F: include/linux/isapnp.h | 3480 | F: include/linux/isapnp.h |
3480 | 3481 | ||
3482 | iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER | ||
3483 | M: Peter Jones <pjones@redhat.com> | ||
3484 | M: Konrad Rzeszutek Wilk <konrad@kernel.org> | ||
3485 | S: Maintained | ||
3486 | F: drivers/firmware/iscsi_ibft* | ||
3487 | |||
3481 | ISCSI | 3488 | ISCSI |
3482 | M: Mike Christie <michaelc@cs.wisc.edu> | 3489 | M: Mike Christie <michaelc@cs.wisc.edu> |
3483 | L: open-iscsi@googlegroups.com | 3490 | L: open-iscsi@googlegroups.com |
@@ -3807,7 +3814,7 @@ M: Rusty Russell <rusty@rustcorp.com.au> | |||
3807 | L: lguest@lists.ozlabs.org | 3814 | L: lguest@lists.ozlabs.org |
3808 | W: http://lguest.ozlabs.org/ | 3815 | W: http://lguest.ozlabs.org/ |
3809 | S: Odd Fixes | 3816 | S: Odd Fixes |
3810 | F: Documentation/lguest/ | 3817 | F: Documentation/virtual/lguest/ |
3811 | F: arch/x86/lguest/ | 3818 | F: arch/x86/lguest/ |
3812 | F: drivers/lguest/ | 3819 | F: drivers/lguest/ |
3813 | F: include/linux/lguest*.h | 3820 | F: include/linux/lguest*.h |
@@ -3994,7 +4001,6 @@ F: arch/m32r/ | |||
3994 | 4001 | ||
3995 | M68K ARCHITECTURE | 4002 | M68K ARCHITECTURE |
3996 | M: Geert Uytterhoeven <geert@linux-m68k.org> | 4003 | M: Geert Uytterhoeven <geert@linux-m68k.org> |
3997 | M: Roman Zippel <zippel@linux-m68k.org> | ||
3998 | L: linux-m68k@lists.linux-m68k.org | 4004 | L: linux-m68k@lists.linux-m68k.org |
3999 | W: http://www.linux-m68k.org/ | 4005 | W: http://www.linux-m68k.org/ |
4000 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git | 4006 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git |
@@ -4989,6 +4995,13 @@ F: Documentation/pps/ | |||
4989 | F: drivers/pps/ | 4995 | F: drivers/pps/ |
4990 | F: include/linux/pps*.h | 4996 | F: include/linux/pps*.h |
4991 | 4997 | ||
4998 | PPTP DRIVER | ||
4999 | M: Dmitry Kozlov <xeb@mail.ru> | ||
5000 | L: netdev@vger.kernel.org | ||
5001 | S: Maintained | ||
5002 | F: drivers/net/pptp.c | ||
5003 | W: http://sourceforge.net/projects/accel-pptp | ||
5004 | |||
4992 | PREEMPTIBLE KERNEL | 5005 | PREEMPTIBLE KERNEL |
4993 | M: Robert Love <rml@tech9.net> | 5006 | M: Robert Love <rml@tech9.net> |
4994 | L: kpreempt-tech@lists.sourceforge.net | 5007 | L: kpreempt-tech@lists.sourceforge.net |
@@ -6618,7 +6631,7 @@ L: user-mode-linux-devel@lists.sourceforge.net | |||
6618 | L: user-mode-linux-user@lists.sourceforge.net | 6631 | L: user-mode-linux-user@lists.sourceforge.net |
6619 | W: http://user-mode-linux.sourceforge.net | 6632 | W: http://user-mode-linux.sourceforge.net |
6620 | S: Maintained | 6633 | S: Maintained |
6621 | F: Documentation/uml/ | 6634 | F: Documentation/virtual/uml/ |
6622 | F: arch/um/ | 6635 | F: arch/um/ |
6623 | F: fs/hostfs/ | 6636 | F: fs/hostfs/ |
6624 | F: fs/hppfs/ | 6637 | F: fs/hppfs/ |
@@ -7024,20 +7037,6 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org> | |||
7024 | S: Maintained | 7037 | S: Maintained |
7025 | F: drivers/tty/serial/zs.* | 7038 | F: drivers/tty/serial/zs.* |
7026 | 7039 | ||
7027 | GRE DEMULTIPLEXER DRIVER | ||
7028 | M: Dmitry Kozlov <xeb@mail.ru> | ||
7029 | L: netdev@vger.kernel.org | ||
7030 | S: Maintained | ||
7031 | F: net/ipv4/gre.c | ||
7032 | F: include/net/gre.h | ||
7033 | |||
7034 | PPTP DRIVER | ||
7035 | M: Dmitry Kozlov <xeb@mail.ru> | ||
7036 | L: netdev@vger.kernel.org | ||
7037 | S: Maintained | ||
7038 | F: drivers/net/pptp.c | ||
7039 | W: http://sourceforge.net/projects/accel-pptp | ||
7040 | |||
7041 | THE REST | 7040 | THE REST |
7042 | M: Linus Torvalds <torvalds@linux-foundation.org> | 7041 | M: Linus Torvalds <torvalds@linux-foundation.org> |
7043 | L: linux-kernel@vger.kernel.org | 7042 | L: linux-kernel@vger.kernel.org |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 39 | 3 | SUBLEVEL = 39 |
4 | EXTRAVERSION = -rc7 | 4 | EXTRAVERSION = |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 058937bf5a77..b1834166922d 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h | |||
@@ -452,10 +452,14 @@ | |||
452 | #define __NR_fanotify_init 494 | 452 | #define __NR_fanotify_init 494 |
453 | #define __NR_fanotify_mark 495 | 453 | #define __NR_fanotify_mark 495 |
454 | #define __NR_prlimit64 496 | 454 | #define __NR_prlimit64 496 |
455 | #define __NR_name_to_handle_at 497 | ||
456 | #define __NR_open_by_handle_at 498 | ||
457 | #define __NR_clock_adjtime 499 | ||
458 | #define __NR_syncfs 500 | ||
455 | 459 | ||
456 | #ifdef __KERNEL__ | 460 | #ifdef __KERNEL__ |
457 | 461 | ||
458 | #define NR_SYSCALLS 497 | 462 | #define NR_SYSCALLS 501 |
459 | 463 | ||
460 | #define __ARCH_WANT_IPC_PARSE_VERSION | 464 | #define __ARCH_WANT_IPC_PARSE_VERSION |
461 | #define __ARCH_WANT_OLD_READDIR | 465 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 42aa078a5e4d..5a621c6d22ab 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs) | |||
585 | 585 | ||
586 | switch (which) { | 586 | switch (which) { |
587 | case IPI_RESCHEDULE: | 587 | case IPI_RESCHEDULE: |
588 | /* Reschedule callback. Everything to be done | 588 | scheduler_ipi(); |
589 | is done by the interrupt return path. */ | ||
590 | break; | 589 | break; |
591 | 590 | ||
592 | case IPI_CALL_FUNC: | 591 | case IPI_CALL_FUNC: |
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index a6a1de9db16f..15f999d41c75 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -498,23 +498,27 @@ sys_call_table: | |||
498 | .quad sys_ni_syscall /* sys_timerfd */ | 498 | .quad sys_ni_syscall /* sys_timerfd */ |
499 | .quad sys_eventfd | 499 | .quad sys_eventfd |
500 | .quad sys_recvmmsg | 500 | .quad sys_recvmmsg |
501 | .quad sys_fallocate /* 480 */ | 501 | .quad sys_fallocate /* 480 */ |
502 | .quad sys_timerfd_create | 502 | .quad sys_timerfd_create |
503 | .quad sys_timerfd_settime | 503 | .quad sys_timerfd_settime |
504 | .quad sys_timerfd_gettime | 504 | .quad sys_timerfd_gettime |
505 | .quad sys_signalfd4 | 505 | .quad sys_signalfd4 |
506 | .quad sys_eventfd2 /* 485 */ | 506 | .quad sys_eventfd2 /* 485 */ |
507 | .quad sys_epoll_create1 | 507 | .quad sys_epoll_create1 |
508 | .quad sys_dup3 | 508 | .quad sys_dup3 |
509 | .quad sys_pipe2 | 509 | .quad sys_pipe2 |
510 | .quad sys_inotify_init1 | 510 | .quad sys_inotify_init1 |
511 | .quad sys_preadv /* 490 */ | 511 | .quad sys_preadv /* 490 */ |
512 | .quad sys_pwritev | 512 | .quad sys_pwritev |
513 | .quad sys_rt_tgsigqueueinfo | 513 | .quad sys_rt_tgsigqueueinfo |
514 | .quad sys_perf_event_open | 514 | .quad sys_perf_event_open |
515 | .quad sys_fanotify_init | 515 | .quad sys_fanotify_init |
516 | .quad sys_fanotify_mark /* 495 */ | 516 | .quad sys_fanotify_mark /* 495 */ |
517 | .quad sys_prlimit64 | 517 | .quad sys_prlimit64 |
518 | .quad sys_name_to_handle_at | ||
519 | .quad sys_open_by_handle_at | ||
520 | .quad sys_clock_adjtime | ||
521 | .quad sys_syncfs /* 500 */ | ||
518 | 522 | ||
519 | .size sys_call_table, . - sys_call_table | 523 | .size sys_call_table, . - sys_call_table |
520 | .type sys_call_table, @object | 524 | .type sys_call_table, @object |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 918e8e0b72ff..818e74ed45dc 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = { | |||
375 | 375 | ||
376 | static inline void register_rpcc_clocksource(long cycle_freq) | 376 | static inline void register_rpcc_clocksource(long cycle_freq) |
377 | { | 377 | { |
378 | clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); | 378 | clocksource_register_hz(&clocksource_rpcc, cycle_freq); |
379 | clocksource_register(&clocksource_rpcc); | ||
380 | } | 379 | } |
381 | #else /* !CONFIG_SMP */ | 380 | #else /* !CONFIG_SMP */ |
382 | static inline void register_rpcc_clocksource(long cycle_freq) | 381 | static inline void register_rpcc_clocksource(long cycle_freq) |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 8ebbb511c783..0c6852d93506 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) | |||
74 | ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) | 74 | ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) |
75 | else | 75 | else |
76 | ZTEXTADDR := 0 | 76 | ZTEXTADDR := 0 |
77 | ZBSSADDR := ALIGN(4) | 77 | ZBSSADDR := ALIGN(8) |
78 | endif | 78 | endif |
79 | 79 | ||
80 | SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ | 80 | SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index adf583cd0c35..49f5b2eaaa87 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -179,15 +179,14 @@ not_angel: | |||
179 | bl cache_on | 179 | bl cache_on |
180 | 180 | ||
181 | restart: adr r0, LC0 | 181 | restart: adr r0, LC0 |
182 | ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} | 182 | ldmia r0, {r1, r2, r3, r6, r9, r11, r12} |
183 | ldr sp, [r0, #32] | 183 | ldr sp, [r0, #28] |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * We might be running at a different address. We need | 186 | * We might be running at a different address. We need |
187 | * to fix up various pointers. | 187 | * to fix up various pointers. |
188 | */ | 188 | */ |
189 | sub r0, r0, r1 @ calculate the delta offset | 189 | sub r0, r0, r1 @ calculate the delta offset |
190 | add r5, r5, r0 @ _start | ||
191 | add r6, r6, r0 @ _edata | 190 | add r6, r6, r0 @ _edata |
192 | 191 | ||
193 | #ifndef CONFIG_ZBOOT_ROM | 192 | #ifndef CONFIG_ZBOOT_ROM |
@@ -206,31 +205,40 @@ restart: adr r0, LC0 | |||
206 | /* | 205 | /* |
207 | * Check to see if we will overwrite ourselves. | 206 | * Check to see if we will overwrite ourselves. |
208 | * r4 = final kernel address | 207 | * r4 = final kernel address |
209 | * r5 = start of this image | ||
210 | * r9 = size of decompressed image | 208 | * r9 = size of decompressed image |
211 | * r10 = end of this image, including bss/stack/malloc space if non XIP | 209 | * r10 = end of this image, including bss/stack/malloc space if non XIP |
212 | * We basically want: | 210 | * We basically want: |
213 | * r4 >= r10 -> OK | 211 | * r4 - 16k page directory >= r10 -> OK |
214 | * r4 + image length <= r5 -> OK | 212 | * r4 + image length <= current position (pc) -> OK |
215 | */ | 213 | */ |
214 | add r10, r10, #16384 | ||
216 | cmp r4, r10 | 215 | cmp r4, r10 |
217 | bhs wont_overwrite | 216 | bhs wont_overwrite |
218 | add r10, r4, r9 | 217 | add r10, r4, r9 |
219 | cmp r10, r5 | 218 | ARM( cmp r10, pc ) |
219 | THUMB( mov lr, pc ) | ||
220 | THUMB( cmp r10, lr ) | ||
220 | bls wont_overwrite | 221 | bls wont_overwrite |
221 | 222 | ||
222 | /* | 223 | /* |
223 | * Relocate ourselves past the end of the decompressed kernel. | 224 | * Relocate ourselves past the end of the decompressed kernel. |
224 | * r5 = start of this image | ||
225 | * r6 = _edata | 225 | * r6 = _edata |
226 | * r10 = end of the decompressed kernel | 226 | * r10 = end of the decompressed kernel |
227 | * Because we always copy ahead, we need to do it from the end and go | 227 | * Because we always copy ahead, we need to do it from the end and go |
228 | * backward in case the source and destination overlap. | 228 | * backward in case the source and destination overlap. |
229 | */ | 229 | */ |
230 | /* Round up to next 256-byte boundary. */ | 230 | /* |
231 | add r10, r10, #256 | 231 | * Bump to the next 256-byte boundary with the size of |
232 | * the relocation code added. This avoids overwriting | ||
233 | * ourself when the offset is small. | ||
234 | */ | ||
235 | add r10, r10, #((reloc_code_end - restart + 256) & ~255) | ||
232 | bic r10, r10, #255 | 236 | bic r10, r10, #255 |
233 | 237 | ||
238 | /* Get start of code we want to copy and align it down. */ | ||
239 | adr r5, restart | ||
240 | bic r5, r5, #31 | ||
241 | |||
234 | sub r9, r6, r5 @ size to copy | 242 | sub r9, r6, r5 @ size to copy |
235 | add r9, r9, #31 @ rounded up to a multiple | 243 | add r9, r9, #31 @ rounded up to a multiple |
236 | bic r9, r9, #31 @ ... of 32 bytes | 244 | bic r9, r9, #31 @ ... of 32 bytes |
@@ -245,6 +253,11 @@ restart: adr r0, LC0 | |||
245 | /* Preserve offset to relocated code. */ | 253 | /* Preserve offset to relocated code. */ |
246 | sub r6, r9, r6 | 254 | sub r6, r9, r6 |
247 | 255 | ||
256 | #ifndef CONFIG_ZBOOT_ROM | ||
257 | /* cache_clean_flush may use the stack, so relocate it */ | ||
258 | add sp, sp, r6 | ||
259 | #endif | ||
260 | |||
248 | bl cache_clean_flush | 261 | bl cache_clean_flush |
249 | 262 | ||
250 | adr r0, BSYM(restart) | 263 | adr r0, BSYM(restart) |
@@ -333,7 +346,6 @@ not_relocated: mov r0, #0 | |||
333 | LC0: .word LC0 @ r1 | 346 | LC0: .word LC0 @ r1 |
334 | .word __bss_start @ r2 | 347 | .word __bss_start @ r2 |
335 | .word _end @ r3 | 348 | .word _end @ r3 |
336 | .word _start @ r5 | ||
337 | .word _edata @ r6 | 349 | .word _edata @ r6 |
338 | .word _image_size @ r9 | 350 | .word _image_size @ r9 |
339 | .word _got_start @ r11 | 351 | .word _got_start @ r11 |
@@ -1062,6 +1074,7 @@ memdump: mov r12, r0 | |||
1062 | #endif | 1074 | #endif |
1063 | 1075 | ||
1064 | .ltorg | 1076 | .ltorg |
1077 | reloc_code_end: | ||
1065 | 1078 | ||
1066 | .align | 1079 | .align |
1067 | .section ".stack", "aw", %nobits | 1080 | .section ".stack", "aw", %nobits |
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index 5309909d7282..ea80abe78844 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in | |||
@@ -54,6 +54,7 @@ SECTIONS | |||
54 | .bss : { *(.bss) } | 54 | .bss : { *(.bss) } |
55 | _end = .; | 55 | _end = .; |
56 | 56 | ||
57 | . = ALIGN(8); /* the stack must be 64-bit aligned */ | ||
57 | .stack : { *(.stack) } | 58 | .stack : { *(.stack) } |
58 | 59 | ||
59 | .stab 0 : { *(.stab) } | 60 | .stab 0 : { *(.stab) } |
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 113085a77123..7aa4262ada7a 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c | |||
@@ -22,17 +22,16 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/syscore_ops.h> |
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/amba/bus.h> | 27 | #include <linux/amba/bus.h> |
28 | 28 | ||
29 | #include <asm/mach/irq.h> | 29 | #include <asm/mach/irq.h> |
30 | #include <asm/hardware/vic.h> | 30 | #include <asm/hardware/vic.h> |
31 | 31 | ||
32 | #if defined(CONFIG_PM) | 32 | #ifdef CONFIG_PM |
33 | /** | 33 | /** |
34 | * struct vic_device - VIC PM device | 34 | * struct vic_device - VIC PM device |
35 | * @sysdev: The system device which is registered. | ||
36 | * @irq: The IRQ number for the base of the VIC. | 35 | * @irq: The IRQ number for the base of the VIC. |
37 | * @base: The register base for the VIC. | 36 | * @base: The register base for the VIC. |
38 | * @resume_sources: A bitmask of interrupts for resume. | 37 | * @resume_sources: A bitmask of interrupts for resume. |
@@ -43,8 +42,6 @@ | |||
43 | * @protect: Save for VIC_PROTECT. | 42 | * @protect: Save for VIC_PROTECT. |
44 | */ | 43 | */ |
45 | struct vic_device { | 44 | struct vic_device { |
46 | struct sys_device sysdev; | ||
47 | |||
48 | void __iomem *base; | 45 | void __iomem *base; |
49 | int irq; | 46 | int irq; |
50 | u32 resume_sources; | 47 | u32 resume_sources; |
@@ -59,11 +56,6 @@ struct vic_device { | |||
59 | static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; | 56 | static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; |
60 | 57 | ||
61 | static int vic_id; | 58 | static int vic_id; |
62 | |||
63 | static inline struct vic_device *to_vic(struct sys_device *sys) | ||
64 | { | ||
65 | return container_of(sys, struct vic_device, sysdev); | ||
66 | } | ||
67 | #endif /* CONFIG_PM */ | 59 | #endif /* CONFIG_PM */ |
68 | 60 | ||
69 | /** | 61 | /** |
@@ -85,10 +77,9 @@ static void vic_init2(void __iomem *base) | |||
85 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); | 77 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); |
86 | } | 78 | } |
87 | 79 | ||
88 | #if defined(CONFIG_PM) | 80 | #ifdef CONFIG_PM |
89 | static int vic_class_resume(struct sys_device *dev) | 81 | static void resume_one_vic(struct vic_device *vic) |
90 | { | 82 | { |
91 | struct vic_device *vic = to_vic(dev); | ||
92 | void __iomem *base = vic->base; | 83 | void __iomem *base = vic->base; |
93 | 84 | ||
94 | printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); | 85 | printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); |
@@ -107,13 +98,18 @@ static int vic_class_resume(struct sys_device *dev) | |||
107 | 98 | ||
108 | writel(vic->soft_int, base + VIC_INT_SOFT); | 99 | writel(vic->soft_int, base + VIC_INT_SOFT); |
109 | writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); | 100 | writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); |
101 | } | ||
110 | 102 | ||
111 | return 0; | 103 | static void vic_resume(void) |
104 | { | ||
105 | int id; | ||
106 | |||
107 | for (id = vic_id - 1; id >= 0; id--) | ||
108 | resume_one_vic(vic_devices + id); | ||
112 | } | 109 | } |
113 | 110 | ||
114 | static int vic_class_suspend(struct sys_device *dev, pm_message_t state) | 111 | static void suspend_one_vic(struct vic_device *vic) |
115 | { | 112 | { |
116 | struct vic_device *vic = to_vic(dev); | ||
117 | void __iomem *base = vic->base; | 113 | void __iomem *base = vic->base; |
118 | 114 | ||
119 | printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); | 115 | printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); |
@@ -128,14 +124,21 @@ static int vic_class_suspend(struct sys_device *dev, pm_message_t state) | |||
128 | 124 | ||
129 | writel(vic->resume_irqs, base + VIC_INT_ENABLE); | 125 | writel(vic->resume_irqs, base + VIC_INT_ENABLE); |
130 | writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); | 126 | writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); |
127 | } | ||
128 | |||
129 | static int vic_suspend(void) | ||
130 | { | ||
131 | int id; | ||
132 | |||
133 | for (id = 0; id < vic_id; id++) | ||
134 | suspend_one_vic(vic_devices + id); | ||
131 | 135 | ||
132 | return 0; | 136 | return 0; |
133 | } | 137 | } |
134 | 138 | ||
135 | struct sysdev_class vic_class = { | 139 | struct syscore_ops vic_syscore_ops = { |
136 | .name = "vic", | 140 | .suspend = vic_suspend, |
137 | .suspend = vic_class_suspend, | 141 | .resume = vic_resume, |
138 | .resume = vic_class_resume, | ||
139 | }; | 142 | }; |
140 | 143 | ||
141 | /** | 144 | /** |
@@ -147,30 +150,8 @@ struct sysdev_class vic_class = { | |||
147 | */ | 150 | */ |
148 | static int __init vic_pm_init(void) | 151 | static int __init vic_pm_init(void) |
149 | { | 152 | { |
150 | struct vic_device *dev = vic_devices; | 153 | if (vic_id > 0) |
151 | int err; | 154 | register_syscore_ops(&vic_syscore_ops); |
152 | int id; | ||
153 | |||
154 | if (vic_id == 0) | ||
155 | return 0; | ||
156 | |||
157 | err = sysdev_class_register(&vic_class); | ||
158 | if (err) { | ||
159 | printk(KERN_ERR "%s: cannot register class\n", __func__); | ||
160 | return err; | ||
161 | } | ||
162 | |||
163 | for (id = 0; id < vic_id; id++, dev++) { | ||
164 | dev->sysdev.id = id; | ||
165 | dev->sysdev.cls = &vic_class; | ||
166 | |||
167 | err = sysdev_register(&dev->sysdev); | ||
168 | if (err) { | ||
169 | printk(KERN_ERR "%s: failed to register device\n", | ||
170 | __func__); | ||
171 | return err; | ||
172 | } | ||
173 | } | ||
174 | 155 | ||
175 | return 0; | 156 | return 0; |
176 | } | 157 | } |
diff --git a/arch/arm/include/asm/i8253.h b/arch/arm/include/asm/i8253.h new file mode 100644 index 000000000000..70656b69d5ce --- /dev/null +++ b/arch/arm/include/asm/i8253.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __ASMARM_I8253_H | ||
2 | #define __ASMARM_I8253_H | ||
3 | |||
4 | /* i8253A PIT registers */ | ||
5 | #define PIT_MODE 0x43 | ||
6 | #define PIT_CH0 0x40 | ||
7 | |||
8 | #define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) | ||
9 | |||
10 | extern raw_spinlock_t i8253_lock; | ||
11 | |||
12 | #define outb_pit outb_p | ||
13 | #define inb_pit inb_p | ||
14 | |||
15 | #endif | ||
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index 883f6be5117a..d5adaae5ee2c 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h | |||
@@ -34,7 +34,6 @@ | |||
34 | * timer interrupt which may be pending. | 34 | * timer interrupt which may be pending. |
35 | */ | 35 | */ |
36 | struct sys_timer { | 36 | struct sys_timer { |
37 | struct sys_device dev; | ||
38 | void (*init)(void); | 37 | void (*init)(void); |
39 | void (*suspend)(void); | 38 | void (*suspend)(void); |
40 | void (*resume)(void); | 39 | void (*resume)(void); |
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 885be097769d..832888d0c20c 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -159,7 +159,7 @@ extern unsigned int user_debug; | |||
159 | #include <mach/barriers.h> | 159 | #include <mach/barriers.h> |
160 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) | 160 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) |
161 | #define mb() do { dsb(); outer_sync(); } while (0) | 161 | #define mb() do { dsb(); outer_sync(); } while (0) |
162 | #define rmb() dmb() | 162 | #define rmb() dsb() |
163 | #define wmb() mb() | 163 | #define wmb() mb() |
164 | #else | 164 | #else |
165 | #include <asm/memory.h> | 165 | #include <asm/memory.h> |
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c index 31a316c1777b..0f107dcb0347 100644 --- a/arch/arm/kernel/leds.c +++ b/arch/arm/kernel/leds.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/sysdev.h> | 12 | #include <linux/sysdev.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | 14 | ||
14 | #include <asm/leds.h> | 15 | #include <asm/leds.h> |
15 | 16 | ||
@@ -69,36 +70,37 @@ static ssize_t leds_store(struct sys_device *dev, | |||
69 | 70 | ||
70 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | 71 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); |
71 | 72 | ||
72 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | 73 | static struct sysdev_class leds_sysclass = { |
74 | .name = "leds", | ||
75 | }; | ||
76 | |||
77 | static struct sys_device leds_device = { | ||
78 | .id = 0, | ||
79 | .cls = &leds_sysclass, | ||
80 | }; | ||
81 | |||
82 | static int leds_suspend(void) | ||
73 | { | 83 | { |
74 | leds_event(led_stop); | 84 | leds_event(led_stop); |
75 | return 0; | 85 | return 0; |
76 | } | 86 | } |
77 | 87 | ||
78 | static int leds_resume(struct sys_device *dev) | 88 | static void leds_resume(void) |
79 | { | 89 | { |
80 | leds_event(led_start); | 90 | leds_event(led_start); |
81 | return 0; | ||
82 | } | 91 | } |
83 | 92 | ||
84 | static int leds_shutdown(struct sys_device *dev) | 93 | static void leds_shutdown(void) |
85 | { | 94 | { |
86 | leds_event(led_halted); | 95 | leds_event(led_halted); |
87 | return 0; | ||
88 | } | 96 | } |
89 | 97 | ||
90 | static struct sysdev_class leds_sysclass = { | 98 | static struct syscore_ops leds_syscore_ops = { |
91 | .name = "leds", | ||
92 | .shutdown = leds_shutdown, | 99 | .shutdown = leds_shutdown, |
93 | .suspend = leds_suspend, | 100 | .suspend = leds_suspend, |
94 | .resume = leds_resume, | 101 | .resume = leds_resume, |
95 | }; | 102 | }; |
96 | 103 | ||
97 | static struct sys_device leds_device = { | ||
98 | .id = 0, | ||
99 | .cls = &leds_sysclass, | ||
100 | }; | ||
101 | |||
102 | static int __init leds_init(void) | 104 | static int __init leds_init(void) |
103 | { | 105 | { |
104 | int ret; | 106 | int ret; |
@@ -107,6 +109,8 @@ static int __init leds_init(void) | |||
107 | ret = sysdev_register(&leds_device); | 109 | ret = sysdev_register(&leds_device); |
108 | if (ret == 0) | 110 | if (ret == 0) |
109 | ret = sysdev_create_file(&leds_device, &attr_event); | 111 | ret = sysdev_create_file(&leds_device, &attr_event); |
112 | if (ret == 0) | ||
113 | register_syscore_ops(&leds_syscore_ops); | ||
110 | return ret; | 114 | return ret; |
111 | } | 115 | } |
112 | 116 | ||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index cb8398317644..0340224cf73c 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -597,19 +597,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
597 | return err; | 597 | return err; |
598 | } | 598 | } |
599 | 599 | ||
600 | static inline void setup_syscall_restart(struct pt_regs *regs) | ||
601 | { | ||
602 | regs->ARM_r0 = regs->ARM_ORIG_r0; | ||
603 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | ||
604 | } | ||
605 | |||
606 | /* | 600 | /* |
607 | * OK, we're invoking a handler | 601 | * OK, we're invoking a handler |
608 | */ | 602 | */ |
609 | static int | 603 | static int |
610 | handle_signal(unsigned long sig, struct k_sigaction *ka, | 604 | handle_signal(unsigned long sig, struct k_sigaction *ka, |
611 | siginfo_t *info, sigset_t *oldset, | 605 | siginfo_t *info, sigset_t *oldset, |
612 | struct pt_regs * regs, int syscall) | 606 | struct pt_regs * regs) |
613 | { | 607 | { |
614 | struct thread_info *thread = current_thread_info(); | 608 | struct thread_info *thread = current_thread_info(); |
615 | struct task_struct *tsk = current; | 609 | struct task_struct *tsk = current; |
@@ -617,26 +611,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
617 | int ret; | 611 | int ret; |
618 | 612 | ||
619 | /* | 613 | /* |
620 | * If we were from a system call, check for system call restarting... | ||
621 | */ | ||
622 | if (syscall) { | ||
623 | switch (regs->ARM_r0) { | ||
624 | case -ERESTART_RESTARTBLOCK: | ||
625 | case -ERESTARTNOHAND: | ||
626 | regs->ARM_r0 = -EINTR; | ||
627 | break; | ||
628 | case -ERESTARTSYS: | ||
629 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
630 | regs->ARM_r0 = -EINTR; | ||
631 | break; | ||
632 | } | ||
633 | /* fallthrough */ | ||
634 | case -ERESTARTNOINTR: | ||
635 | setup_syscall_restart(regs); | ||
636 | } | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * translate the signal | 614 | * translate the signal |
641 | */ | 615 | */ |
642 | if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) | 616 | if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) |
@@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
685 | */ | 659 | */ |
686 | static void do_signal(struct pt_regs *regs, int syscall) | 660 | static void do_signal(struct pt_regs *regs, int syscall) |
687 | { | 661 | { |
662 | unsigned int retval = 0, continue_addr = 0, restart_addr = 0; | ||
688 | struct k_sigaction ka; | 663 | struct k_sigaction ka; |
689 | siginfo_t info; | 664 | siginfo_t info; |
690 | int signr; | 665 | int signr; |
@@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
698 | if (!user_mode(regs)) | 673 | if (!user_mode(regs)) |
699 | return; | 674 | return; |
700 | 675 | ||
676 | /* | ||
677 | * If we were from a system call, check for system call restarting... | ||
678 | */ | ||
679 | if (syscall) { | ||
680 | continue_addr = regs->ARM_pc; | ||
681 | restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); | ||
682 | retval = regs->ARM_r0; | ||
683 | |||
684 | /* | ||
685 | * Prepare for system call restart. We do this here so that a | ||
686 | * debugger will see the already changed PSW. | ||
687 | */ | ||
688 | switch (retval) { | ||
689 | case -ERESTARTNOHAND: | ||
690 | case -ERESTARTSYS: | ||
691 | case -ERESTARTNOINTR: | ||
692 | regs->ARM_r0 = regs->ARM_ORIG_r0; | ||
693 | regs->ARM_pc = restart_addr; | ||
694 | break; | ||
695 | case -ERESTART_RESTARTBLOCK: | ||
696 | regs->ARM_r0 = -EINTR; | ||
697 | break; | ||
698 | } | ||
699 | } | ||
700 | |||
701 | if (try_to_freeze()) | 701 | if (try_to_freeze()) |
702 | goto no_signal; | 702 | goto no_signal; |
703 | 703 | ||
704 | /* | ||
705 | * Get the signal to deliver. When running under ptrace, at this | ||
706 | * point the debugger may change all our registers ... | ||
707 | */ | ||
704 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 708 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
705 | if (signr > 0) { | 709 | if (signr > 0) { |
706 | sigset_t *oldset; | 710 | sigset_t *oldset; |
707 | 711 | ||
712 | /* | ||
713 | * Depending on the signal settings we may need to revert the | ||
714 | * decision to restart the system call. But skip this if a | ||
715 | * debugger has chosen to restart at a different PC. | ||
716 | */ | ||
717 | if (regs->ARM_pc == restart_addr) { | ||
718 | if (retval == -ERESTARTNOHAND | ||
719 | || (retval == -ERESTARTSYS | ||
720 | && !(ka.sa.sa_flags & SA_RESTART))) { | ||
721 | regs->ARM_r0 = -EINTR; | ||
722 | regs->ARM_pc = continue_addr; | ||
723 | } | ||
724 | } | ||
725 | |||
708 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 726 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
709 | oldset = ¤t->saved_sigmask; | 727 | oldset = ¤t->saved_sigmask; |
710 | else | 728 | else |
711 | oldset = ¤t->blocked; | 729 | oldset = ¤t->blocked; |
712 | if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { | 730 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { |
713 | /* | 731 | /* |
714 | * A signal was successfully delivered; the saved | 732 | * A signal was successfully delivered; the saved |
715 | * sigmask will have been stored in the signal frame, | 733 | * sigmask will have been stored in the signal frame, |
@@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
723 | } | 741 | } |
724 | 742 | ||
725 | no_signal: | 743 | no_signal: |
726 | /* | ||
727 | * No signal to deliver to the process - restart the syscall. | ||
728 | */ | ||
729 | if (syscall) { | 744 | if (syscall) { |
730 | if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { | 745 | /* |
746 | * Handle restarting a different system call. As above, | ||
747 | * if a debugger has chosen to restart at a different PC, | ||
748 | * ignore the restart. | ||
749 | */ | ||
750 | if (retval == -ERESTART_RESTARTBLOCK | ||
751 | && regs->ARM_pc == continue_addr) { | ||
731 | if (thumb_mode(regs)) { | 752 | if (thumb_mode(regs)) { |
732 | regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; | 753 | regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; |
733 | regs->ARM_pc -= 2; | 754 | regs->ARM_pc -= 2; |
@@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
750 | #endif | 771 | #endif |
751 | } | 772 | } |
752 | } | 773 | } |
753 | if (regs->ARM_r0 == -ERESTARTNOHAND || | ||
754 | regs->ARM_r0 == -ERESTARTSYS || | ||
755 | regs->ARM_r0 == -ERESTARTNOINTR) { | ||
756 | setup_syscall_restart(regs); | ||
757 | } | ||
758 | 774 | ||
759 | /* If there's no signal to deliver, we just put the saved sigmask | 775 | /* If there's no signal to deliver, we just put the saved sigmask |
760 | * back. | 776 | * back. |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index f29b8a29b174..007a0a950e75 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -560,10 +560,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) | |||
560 | break; | 560 | break; |
561 | 561 | ||
562 | case IPI_RESCHEDULE: | 562 | case IPI_RESCHEDULE: |
563 | /* | 563 | scheduler_ipi(); |
564 | * nothing more to do - eveything is | ||
565 | * done on the interrupt return path | ||
566 | */ | ||
567 | break; | 564 | break; |
568 | 565 | ||
569 | case IPI_CALL_FUNC: | 566 | case IPI_CALL_FUNC: |
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 1ff46cabc7ef..cb634c3e28e9 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/timex.h> | 21 | #include <linux/timex.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/profile.h> | 23 | #include <linux/profile.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | 27 | ||
@@ -115,48 +115,37 @@ void timer_tick(void) | |||
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) | 117 | #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) |
118 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | 118 | static int timer_suspend(void) |
119 | { | 119 | { |
120 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | 120 | if (system_timer->suspend) |
121 | 121 | system_timer->suspend(); | |
122 | if (timer->suspend != NULL) | ||
123 | timer->suspend(); | ||
124 | 122 | ||
125 | return 0; | 123 | return 0; |
126 | } | 124 | } |
127 | 125 | ||
128 | static int timer_resume(struct sys_device *dev) | 126 | static void timer_resume(void) |
129 | { | 127 | { |
130 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | 128 | if (system_timer->resume) |
131 | 129 | system_timer->resume(); | |
132 | if (timer->resume != NULL) | ||
133 | timer->resume(); | ||
134 | |||
135 | return 0; | ||
136 | } | 130 | } |
137 | #else | 131 | #else |
138 | #define timer_suspend NULL | 132 | #define timer_suspend NULL |
139 | #define timer_resume NULL | 133 | #define timer_resume NULL |
140 | #endif | 134 | #endif |
141 | 135 | ||
142 | static struct sysdev_class timer_sysclass = { | 136 | static struct syscore_ops timer_syscore_ops = { |
143 | .name = "timer", | ||
144 | .suspend = timer_suspend, | 137 | .suspend = timer_suspend, |
145 | .resume = timer_resume, | 138 | .resume = timer_resume, |
146 | }; | 139 | }; |
147 | 140 | ||
148 | static int __init timer_init_sysfs(void) | 141 | static int __init timer_init_syscore_ops(void) |
149 | { | 142 | { |
150 | int ret = sysdev_class_register(&timer_sysclass); | 143 | register_syscore_ops(&timer_syscore_ops); |
151 | if (ret == 0) { | ||
152 | system_timer->dev.cls = &timer_sysclass; | ||
153 | ret = sysdev_register(&system_timer->dev); | ||
154 | } | ||
155 | 144 | ||
156 | return ret; | 145 | return 0; |
157 | } | 146 | } |
158 | 147 | ||
159 | device_initcall(timer_init_sysfs); | 148 | device_initcall(timer_init_syscore_ops); |
160 | 149 | ||
161 | void __init time_init(void) | 150 | void __init time_init(void) |
162 | { | 151 | { |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 3b54ad19d489..d52eec268b47 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -234,7 +234,6 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
234 | 234 | ||
235 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", | 235 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", |
236 | str, err, ++die_counter); | 236 | str, err, ++die_counter); |
237 | sysfs_printk_last_file(); | ||
238 | 237 | ||
239 | /* trap and error numbers are mostly meaningless on ARM */ | 238 | /* trap and error numbers are mostly meaningless on ARM */ |
240 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); | 239 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); |
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 0a95be1512bb..41669ecc1f91 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c | |||
@@ -94,9 +94,7 @@ static int davinci_target(struct cpufreq_policy *policy, | |||
94 | if (freqs.old == freqs.new) | 94 | if (freqs.old == freqs.new) |
95 | return ret; | 95 | return ret; |
96 | 96 | ||
97 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, | 97 | dev_dbg(&cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); |
98 | dev_driver_string(cpufreq.dev), | ||
99 | "transition: %u --> %u\n", freqs.old, freqs.new); | ||
100 | 98 | ||
101 | ret = cpufreq_frequency_table_target(policy, pdata->freq_table, | 99 | ret = cpufreq_frequency_table_target(policy, pdata->freq_table, |
102 | freqs.new, relation, &idx); | 100 | freqs.new, relation, &idx); |
diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index 10d917d9e3ad..8755ca8dd48d 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/suspend.h> | 18 | #include <linux/suspend.h> |
19 | #include <linux/syscore_ops.h> | ||
19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
20 | 21 | ||
21 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -372,7 +373,27 @@ void exynos4_scu_enable(void __iomem *scu_base) | |||
372 | flush_cache_all(); | 373 | flush_cache_all(); |
373 | } | 374 | } |
374 | 375 | ||
375 | static int exynos4_pm_resume(struct sys_device *dev) | 376 | static struct sysdev_driver exynos4_pm_driver = { |
377 | .add = exynos4_pm_add, | ||
378 | }; | ||
379 | |||
380 | static __init int exynos4_pm_drvinit(void) | ||
381 | { | ||
382 | unsigned int tmp; | ||
383 | |||
384 | s3c_pm_init(); | ||
385 | |||
386 | /* All wakeup disable */ | ||
387 | |||
388 | tmp = __raw_readl(S5P_WAKEUP_MASK); | ||
389 | tmp |= ((0xFF << 8) | (0x1F << 1)); | ||
390 | __raw_writel(tmp, S5P_WAKEUP_MASK); | ||
391 | |||
392 | return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); | ||
393 | } | ||
394 | arch_initcall(exynos4_pm_drvinit); | ||
395 | |||
396 | static void exynos4_pm_resume(void) | ||
376 | { | 397 | { |
377 | /* For release retention */ | 398 | /* For release retention */ |
378 | 399 | ||
@@ -394,27 +415,15 @@ static int exynos4_pm_resume(struct sys_device *dev) | |||
394 | /* enable L2X0*/ | 415 | /* enable L2X0*/ |
395 | writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); | 416 | writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); |
396 | #endif | 417 | #endif |
397 | |||
398 | return 0; | ||
399 | } | 418 | } |
400 | 419 | ||
401 | static struct sysdev_driver exynos4_pm_driver = { | 420 | static struct syscore_ops exynos4_pm_syscore_ops = { |
402 | .add = exynos4_pm_add, | ||
403 | .resume = exynos4_pm_resume, | 421 | .resume = exynos4_pm_resume, |
404 | }; | 422 | }; |
405 | 423 | ||
406 | static __init int exynos4_pm_drvinit(void) | 424 | static __init int exynos4_pm_syscore_init(void) |
407 | { | 425 | { |
408 | unsigned int tmp; | 426 | register_syscore_ops(&exynos4_pm_syscore_ops); |
409 | 427 | return 0; | |
410 | s3c_pm_init(); | ||
411 | |||
412 | /* All wakeup disable */ | ||
413 | |||
414 | tmp = __raw_readl(S5P_WAKEUP_MASK); | ||
415 | tmp |= ((0xFF << 8) | (0x1F << 1)); | ||
416 | __raw_writel(tmp, S5P_WAKEUP_MASK); | ||
417 | |||
418 | return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); | ||
419 | } | 428 | } |
420 | arch_initcall(exynos4_pm_drvinit); | 429 | arch_initcall(exynos4_pm_syscore_init); |
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index bdd257921cfb..46adca068f2c 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig | |||
@@ -4,6 +4,7 @@ menu "Footbridge Implementations" | |||
4 | 4 | ||
5 | config ARCH_CATS | 5 | config ARCH_CATS |
6 | bool "CATS" | 6 | bool "CATS" |
7 | select CLKSRC_I8253 | ||
7 | select FOOTBRIDGE_HOST | 8 | select FOOTBRIDGE_HOST |
8 | select ISA | 9 | select ISA |
9 | select ISA_DMA | 10 | select ISA_DMA |
@@ -59,6 +60,7 @@ config ARCH_EBSA285_HOST | |||
59 | 60 | ||
60 | config ARCH_NETWINDER | 61 | config ARCH_NETWINDER |
61 | bool "NetWinder" | 62 | bool "NetWinder" |
63 | select CLKSRC_I8253 | ||
62 | select FOOTBRIDGE_HOST | 64 | select FOOTBRIDGE_HOST |
63 | select ISA | 65 | select ISA |
64 | select ISA_DMA | 66 | select ISA_DMA |
diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c index 441c6ce0d555..7020f1a3feca 100644 --- a/arch/arm/mach-footbridge/isa-timer.c +++ b/arch/arm/mach-footbridge/isa-timer.c | |||
@@ -10,53 +10,16 @@ | |||
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
13 | #include <linux/spinlock.h> | ||
13 | #include <linux/timex.h> | 14 | #include <linux/timex.h> |
14 | 15 | ||
15 | #include <asm/irq.h> | 16 | #include <asm/irq.h> |
16 | 17 | #include <asm/i8253.h> | |
17 | #include <asm/mach/time.h> | 18 | #include <asm/mach/time.h> |
18 | 19 | ||
19 | #include "common.h" | 20 | #include "common.h" |
20 | 21 | ||
21 | #define PIT_MODE 0x43 | 22 | DEFINE_RAW_SPINLOCK(i8253_lock); |
22 | #define PIT_CH0 0x40 | ||
23 | |||
24 | #define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) | ||
25 | |||
26 | static cycle_t pit_read(struct clocksource *cs) | ||
27 | { | ||
28 | unsigned long flags; | ||
29 | static int old_count; | ||
30 | static u32 old_jifs; | ||
31 | int count; | ||
32 | u32 jifs; | ||
33 | |||
34 | raw_local_irq_save(flags); | ||
35 | |||
36 | jifs = jiffies; | ||
37 | outb_p(0x00, PIT_MODE); /* latch the count */ | ||
38 | count = inb_p(PIT_CH0); /* read the latched count */ | ||
39 | count |= inb_p(PIT_CH0) << 8; | ||
40 | |||
41 | if (count > old_count && jifs == old_jifs) | ||
42 | count = old_count; | ||
43 | |||
44 | old_count = count; | ||
45 | old_jifs = jifs; | ||
46 | |||
47 | raw_local_irq_restore(flags); | ||
48 | |||
49 | count = (PIT_LATCH - 1) - count; | ||
50 | |||
51 | return (cycle_t)(jifs * PIT_LATCH) + count; | ||
52 | } | ||
53 | |||
54 | static struct clocksource pit_cs = { | ||
55 | .name = "pit", | ||
56 | .rating = 110, | ||
57 | .read = pit_read, | ||
58 | .mask = CLOCKSOURCE_MASK(32), | ||
59 | }; | ||
60 | 23 | ||
61 | static void pit_set_mode(enum clock_event_mode mode, | 24 | static void pit_set_mode(enum clock_event_mode mode, |
62 | struct clock_event_device *evt) | 25 | struct clock_event_device *evt) |
@@ -121,7 +84,7 @@ static void __init isa_timer_init(void) | |||
121 | pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce); | 84 | pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce); |
122 | pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce); | 85 | pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce); |
123 | 86 | ||
124 | clocksource_register_hz(&pit_cs, PIT_TICK_RATE); | 87 | clocksource_i8253_init(); |
125 | 88 | ||
126 | setup_irq(pit_ce.irq, &pit_timer_irq); | 89 | setup_irq(pit_ce.irq, &pit_timer_irq); |
127 | clockevents_register_device(&pit_ce); | 90 | clockevents_register_device(&pit_ce); |
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 980803ff348c..d3e96451529c 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/sysdev.h> | 27 | #include <linux/syscore_ops.h> |
28 | #include <linux/amba/bus.h> | 28 | #include <linux/amba/bus.h> |
29 | #include <linux/amba/kmi.h> | 29 | #include <linux/amba/kmi.h> |
30 | #include <linux/clocksource.h> | 30 | #include <linux/clocksource.h> |
@@ -180,13 +180,13 @@ static void __init ap_init_irq(void) | |||
180 | #ifdef CONFIG_PM | 180 | #ifdef CONFIG_PM |
181 | static unsigned long ic_irq_enable; | 181 | static unsigned long ic_irq_enable; |
182 | 182 | ||
183 | static int irq_suspend(struct sys_device *dev, pm_message_t state) | 183 | static int irq_suspend(void) |
184 | { | 184 | { |
185 | ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); | 185 | ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); |
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | static int irq_resume(struct sys_device *dev) | 189 | static void irq_resume(void) |
190 | { | 190 | { |
191 | /* disable all irq sources */ | 191 | /* disable all irq sources */ |
192 | writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR); | 192 | writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR); |
@@ -194,33 +194,25 @@ static int irq_resume(struct sys_device *dev) | |||
194 | writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); | 194 | writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); |
195 | 195 | ||
196 | writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); | 196 | writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); |
197 | return 0; | ||
198 | } | 197 | } |
199 | #else | 198 | #else |
200 | #define irq_suspend NULL | 199 | #define irq_suspend NULL |
201 | #define irq_resume NULL | 200 | #define irq_resume NULL |
202 | #endif | 201 | #endif |
203 | 202 | ||
204 | static struct sysdev_class irq_class = { | 203 | static struct syscore_ops irq_syscore_ops = { |
205 | .name = "irq", | ||
206 | .suspend = irq_suspend, | 204 | .suspend = irq_suspend, |
207 | .resume = irq_resume, | 205 | .resume = irq_resume, |
208 | }; | 206 | }; |
209 | 207 | ||
210 | static struct sys_device irq_device = { | 208 | static int __init irq_syscore_init(void) |
211 | .id = 0, | ||
212 | .cls = &irq_class, | ||
213 | }; | ||
214 | |||
215 | static int __init irq_init_sysfs(void) | ||
216 | { | 209 | { |
217 | int ret = sysdev_class_register(&irq_class); | 210 | register_syscore_ops(&irq_syscore_ops); |
218 | if (ret == 0) | 211 | |
219 | ret = sysdev_register(&irq_device); | 212 | return 0; |
220 | return ret; | ||
221 | } | 213 | } |
222 | 214 | ||
223 | device_initcall(irq_init_sysfs); | 215 | device_initcall(irq_syscore_init); |
224 | 216 | ||
225 | /* | 217 | /* |
226 | * Flash handling. | 218 | * Flash handling. |
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 6588c22b8a64..fe31d933f0ed 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c | |||
@@ -24,75 +24,50 @@ | |||
24 | #ifdef CONFIG_PM_RUNTIME | 24 | #ifdef CONFIG_PM_RUNTIME |
25 | static int omap1_pm_runtime_suspend(struct device *dev) | 25 | static int omap1_pm_runtime_suspend(struct device *dev) |
26 | { | 26 | { |
27 | struct clk *iclk, *fclk; | 27 | int ret; |
28 | int ret = 0; | ||
29 | 28 | ||
30 | dev_dbg(dev, "%s\n", __func__); | 29 | dev_dbg(dev, "%s\n", __func__); |
31 | 30 | ||
32 | ret = pm_generic_runtime_suspend(dev); | 31 | ret = pm_generic_runtime_suspend(dev); |
32 | if (ret) | ||
33 | return ret; | ||
33 | 34 | ||
34 | fclk = clk_get(dev, "fck"); | 35 | ret = pm_runtime_clk_suspend(dev); |
35 | if (!IS_ERR(fclk)) { | 36 | if (ret) { |
36 | clk_disable(fclk); | 37 | pm_generic_runtime_resume(dev); |
37 | clk_put(fclk); | 38 | return ret; |
38 | } | ||
39 | |||
40 | iclk = clk_get(dev, "ick"); | ||
41 | if (!IS_ERR(iclk)) { | ||
42 | clk_disable(iclk); | ||
43 | clk_put(iclk); | ||
44 | } | 39 | } |
45 | 40 | ||
46 | return 0; | 41 | return 0; |
47 | }; | 42 | } |
48 | 43 | ||
49 | static int omap1_pm_runtime_resume(struct device *dev) | 44 | static int omap1_pm_runtime_resume(struct device *dev) |
50 | { | 45 | { |
51 | struct clk *iclk, *fclk; | ||
52 | |||
53 | dev_dbg(dev, "%s\n", __func__); | 46 | dev_dbg(dev, "%s\n", __func__); |
54 | 47 | ||
55 | iclk = clk_get(dev, "ick"); | 48 | pm_runtime_clk_resume(dev); |
56 | if (!IS_ERR(iclk)) { | 49 | return pm_generic_runtime_resume(dev); |
57 | clk_enable(iclk); | 50 | } |
58 | clk_put(iclk); | ||
59 | } | ||
60 | 51 | ||
61 | fclk = clk_get(dev, "fck"); | 52 | static struct dev_power_domain default_power_domain = { |
62 | if (!IS_ERR(fclk)) { | 53 | .ops = { |
63 | clk_enable(fclk); | 54 | .runtime_suspend = omap1_pm_runtime_suspend, |
64 | clk_put(fclk); | 55 | .runtime_resume = omap1_pm_runtime_resume, |
65 | } | 56 | USE_PLATFORM_PM_SLEEP_OPS |
57 | }, | ||
58 | }; | ||
66 | 59 | ||
67 | return pm_generic_runtime_resume(dev); | 60 | static struct pm_clk_notifier_block platform_bus_notifier = { |
61 | .pwr_domain = &default_power_domain, | ||
62 | .con_ids = { "ick", "fck", NULL, }, | ||
68 | }; | 63 | }; |
69 | 64 | ||
70 | static int __init omap1_pm_runtime_init(void) | 65 | static int __init omap1_pm_runtime_init(void) |
71 | { | 66 | { |
72 | const struct dev_pm_ops *pm; | ||
73 | struct dev_pm_ops *omap_pm; | ||
74 | |||
75 | if (!cpu_class_is_omap1()) | 67 | if (!cpu_class_is_omap1()) |
76 | return -ENODEV; | 68 | return -ENODEV; |
77 | 69 | ||
78 | pm = platform_bus_get_pm_ops(); | 70 | pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); |
79 | if (!pm) { | ||
80 | pr_err("%s: unable to get dev_pm_ops from platform_bus\n", | ||
81 | __func__); | ||
82 | return -ENODEV; | ||
83 | } | ||
84 | |||
85 | omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); | ||
86 | if (!omap_pm) { | ||
87 | pr_err("%s: unable to alloc memory for new dev_pm_ops\n", | ||
88 | __func__); | ||
89 | return -ENOMEM; | ||
90 | } | ||
91 | |||
92 | omap_pm->runtime_suspend = omap1_pm_runtime_suspend; | ||
93 | omap_pm->runtime_resume = omap1_pm_runtime_resume; | ||
94 | |||
95 | platform_bus_set_pm_ops(omap_pm); | ||
96 | 71 | ||
97 | return 0; | 72 | return 0; |
98 | } | 73 | } |
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 512b15204450..66dfbccacd25 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
@@ -59,10 +59,10 @@ endif | |||
59 | # Power Management | 59 | # Power Management |
60 | ifeq ($(CONFIG_PM),y) | 60 | ifeq ($(CONFIG_PM),y) |
61 | obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o | 61 | obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o |
62 | obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o | 62 | obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o |
63 | obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ | 63 | obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ |
64 | cpuidle34xx.o pm_bus.o | 64 | cpuidle34xx.o |
65 | obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o | 65 | obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o |
66 | obj-$(CONFIG_PM_DEBUG) += pm-debug.o | 66 | obj-$(CONFIG_PM_DEBUG) += pm-debug.o |
67 | obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o | 67 | obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o |
68 | obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o | 68 | obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o |
diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c index b2b1e37bb6bb..d6e34dd9e7e7 100644 --- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c +++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c | |||
@@ -115,6 +115,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) | |||
115 | sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, | 115 | sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, |
116 | sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, | 116 | sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, |
117 | 0, 0, 0, 0); | 117 | 0, 0, 0, 0); |
118 | clk->rate = rate; | ||
118 | 119 | ||
119 | return 0; | 120 | return 0; |
120 | } | 121 | } |
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c deleted file mode 100644 index 5acd2ab298b1..000000000000 --- a/arch/arm/mach-omap2/pm_bus.c +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * Runtime PM support code for OMAP | ||
3 | * | ||
4 | * Author: Kevin Hilman, Deep Root Systems, LLC | ||
5 | * | ||
6 | * Copyright (C) 2010 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public | ||
9 | * License version 2. This program is licensed "as is" without any | ||
10 | * warranty of any kind, whether express or implied. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/pm_runtime.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/mutex.h> | ||
18 | |||
19 | #include <plat/omap_device.h> | ||
20 | #include <plat/omap-pm.h> | ||
21 | |||
22 | #ifdef CONFIG_PM_RUNTIME | ||
23 | static int omap_pm_runtime_suspend(struct device *dev) | ||
24 | { | ||
25 | struct platform_device *pdev = to_platform_device(dev); | ||
26 | int r, ret = 0; | ||
27 | |||
28 | dev_dbg(dev, "%s\n", __func__); | ||
29 | |||
30 | ret = pm_generic_runtime_suspend(dev); | ||
31 | |||
32 | if (!ret && dev->parent == &omap_device_parent) { | ||
33 | r = omap_device_idle(pdev); | ||
34 | WARN_ON(r); | ||
35 | } | ||
36 | |||
37 | return ret; | ||
38 | }; | ||
39 | |||
40 | static int omap_pm_runtime_resume(struct device *dev) | ||
41 | { | ||
42 | struct platform_device *pdev = to_platform_device(dev); | ||
43 | int r; | ||
44 | |||
45 | dev_dbg(dev, "%s\n", __func__); | ||
46 | |||
47 | if (dev->parent == &omap_device_parent) { | ||
48 | r = omap_device_enable(pdev); | ||
49 | WARN_ON(r); | ||
50 | } | ||
51 | |||
52 | return pm_generic_runtime_resume(dev); | ||
53 | }; | ||
54 | #else | ||
55 | #define omap_pm_runtime_suspend NULL | ||
56 | #define omap_pm_runtime_resume NULL | ||
57 | #endif /* CONFIG_PM_RUNTIME */ | ||
58 | |||
59 | static int __init omap_pm_runtime_init(void) | ||
60 | { | ||
61 | const struct dev_pm_ops *pm; | ||
62 | struct dev_pm_ops *omap_pm; | ||
63 | |||
64 | pm = platform_bus_get_pm_ops(); | ||
65 | if (!pm) { | ||
66 | pr_err("%s: unable to get dev_pm_ops from platform_bus\n", | ||
67 | __func__); | ||
68 | return -ENODEV; | ||
69 | } | ||
70 | |||
71 | omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); | ||
72 | if (!omap_pm) { | ||
73 | pr_err("%s: unable to alloc memory for new dev_pm_ops\n", | ||
74 | __func__); | ||
75 | return -ENOMEM; | ||
76 | } | ||
77 | |||
78 | omap_pm->runtime_suspend = omap_pm_runtime_suspend; | ||
79 | omap_pm->runtime_resume = omap_pm_runtime_resume; | ||
80 | |||
81 | platform_bus_set_pm_ops(omap_pm); | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | core_initcall(omap_pm_runtime_init); | ||
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index bfbecec6d05f..810a982a66f8 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | ||
19 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
diff --git a/arch/arm/mach-pxa/clock-pxa2xx.c b/arch/arm/mach-pxa/clock-pxa2xx.c index 1ce090448493..1d5859d9a0e3 100644 --- a/arch/arm/mach-pxa/clock-pxa2xx.c +++ b/arch/arm/mach-pxa/clock-pxa2xx.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/sysdev.h> | 12 | #include <linux/syscore_ops.h> |
13 | 13 | ||
14 | #include <mach/pxa2xx-regs.h> | 14 | #include <mach/pxa2xx-regs.h> |
15 | 15 | ||
@@ -33,32 +33,22 @@ const struct clkops clk_pxa2xx_cken_ops = { | |||
33 | #ifdef CONFIG_PM | 33 | #ifdef CONFIG_PM |
34 | static uint32_t saved_cken; | 34 | static uint32_t saved_cken; |
35 | 35 | ||
36 | static int pxa2xx_clock_suspend(struct sys_device *d, pm_message_t state) | 36 | static int pxa2xx_clock_suspend(void) |
37 | { | 37 | { |
38 | saved_cken = CKEN; | 38 | saved_cken = CKEN; |
39 | return 0; | 39 | return 0; |
40 | } | 40 | } |
41 | 41 | ||
42 | static int pxa2xx_clock_resume(struct sys_device *d) | 42 | static void pxa2xx_clock_resume(void) |
43 | { | 43 | { |
44 | CKEN = saved_cken; | 44 | CKEN = saved_cken; |
45 | return 0; | ||
46 | } | 45 | } |
47 | #else | 46 | #else |
48 | #define pxa2xx_clock_suspend NULL | 47 | #define pxa2xx_clock_suspend NULL |
49 | #define pxa2xx_clock_resume NULL | 48 | #define pxa2xx_clock_resume NULL |
50 | #endif | 49 | #endif |
51 | 50 | ||
52 | struct sysdev_class pxa2xx_clock_sysclass = { | 51 | struct syscore_ops pxa2xx_clock_syscore_ops = { |
53 | .name = "pxa2xx-clock", | ||
54 | .suspend = pxa2xx_clock_suspend, | 52 | .suspend = pxa2xx_clock_suspend, |
55 | .resume = pxa2xx_clock_resume, | 53 | .resume = pxa2xx_clock_resume, |
56 | }; | 54 | }; |
57 | |||
58 | static int __init pxa2xx_clock_init(void) | ||
59 | { | ||
60 | if (cpu_is_pxa2xx()) | ||
61 | return sysdev_class_register(&pxa2xx_clock_sysclass); | ||
62 | return 0; | ||
63 | } | ||
64 | postcore_initcall(pxa2xx_clock_init); | ||
diff --git a/arch/arm/mach-pxa/clock-pxa3xx.c b/arch/arm/mach-pxa/clock-pxa3xx.c index 3f864cd0bd28..2a37a9a8f621 100644 --- a/arch/arm/mach-pxa/clock-pxa3xx.c +++ b/arch/arm/mach-pxa/clock-pxa3xx.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | 14 | ||
14 | #include <mach/smemc.h> | 15 | #include <mach/smemc.h> |
15 | #include <mach/pxa3xx-regs.h> | 16 | #include <mach/pxa3xx-regs.h> |
@@ -182,7 +183,7 @@ const struct clkops clk_pxa3xx_pout_ops = { | |||
182 | static uint32_t cken[2]; | 183 | static uint32_t cken[2]; |
183 | static uint32_t accr; | 184 | static uint32_t accr; |
184 | 185 | ||
185 | static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) | 186 | static int pxa3xx_clock_suspend(void) |
186 | { | 187 | { |
187 | cken[0] = CKENA; | 188 | cken[0] = CKENA; |
188 | cken[1] = CKENB; | 189 | cken[1] = CKENB; |
@@ -190,28 +191,18 @@ static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) | |||
190 | return 0; | 191 | return 0; |
191 | } | 192 | } |
192 | 193 | ||
193 | static int pxa3xx_clock_resume(struct sys_device *d) | 194 | static void pxa3xx_clock_resume(void) |
194 | { | 195 | { |
195 | ACCR = accr; | 196 | ACCR = accr; |
196 | CKENA = cken[0]; | 197 | CKENA = cken[0]; |
197 | CKENB = cken[1]; | 198 | CKENB = cken[1]; |
198 | return 0; | ||
199 | } | 199 | } |
200 | #else | 200 | #else |
201 | #define pxa3xx_clock_suspend NULL | 201 | #define pxa3xx_clock_suspend NULL |
202 | #define pxa3xx_clock_resume NULL | 202 | #define pxa3xx_clock_resume NULL |
203 | #endif | 203 | #endif |
204 | 204 | ||
205 | struct sysdev_class pxa3xx_clock_sysclass = { | 205 | struct syscore_ops pxa3xx_clock_syscore_ops = { |
206 | .name = "pxa3xx-clock", | ||
207 | .suspend = pxa3xx_clock_suspend, | 206 | .suspend = pxa3xx_clock_suspend, |
208 | .resume = pxa3xx_clock_resume, | 207 | .resume = pxa3xx_clock_resume, |
209 | }; | 208 | }; |
210 | |||
211 | static int __init pxa3xx_clock_init(void) | ||
212 | { | ||
213 | if (cpu_is_pxa3xx() || cpu_is_pxa95x()) | ||
214 | return sysdev_class_register(&pxa3xx_clock_sysclass); | ||
215 | return 0; | ||
216 | } | ||
217 | postcore_initcall(pxa3xx_clock_init); | ||
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h index f9f349a21b54..1f2fb9c43f06 100644 --- a/arch/arm/mach-pxa/clock.h +++ b/arch/arm/mach-pxa/clock.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/clkdev.h> | 1 | #include <linux/clkdev.h> |
2 | #include <linux/sysdev.h> | 2 | #include <linux/syscore_ops.h> |
3 | 3 | ||
4 | struct clkops { | 4 | struct clkops { |
5 | void (*enable)(struct clk *); | 5 | void (*enable)(struct clk *); |
@@ -54,7 +54,7 @@ extern const struct clkops clk_pxa2xx_cken_ops; | |||
54 | void clk_pxa2xx_cken_enable(struct clk *clk); | 54 | void clk_pxa2xx_cken_enable(struct clk *clk); |
55 | void clk_pxa2xx_cken_disable(struct clk *clk); | 55 | void clk_pxa2xx_cken_disable(struct clk *clk); |
56 | 56 | ||
57 | extern struct sysdev_class pxa2xx_clock_sysclass; | 57 | extern struct syscore_ops pxa2xx_clock_syscore_ops; |
58 | 58 | ||
59 | #if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) | 59 | #if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) |
60 | #define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \ | 60 | #define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \ |
@@ -74,5 +74,6 @@ extern const struct clkops clk_pxa3xx_smemc_ops; | |||
74 | extern void clk_pxa3xx_cken_enable(struct clk *); | 74 | extern void clk_pxa3xx_cken_enable(struct clk *); |
75 | extern void clk_pxa3xx_cken_disable(struct clk *); | 75 | extern void clk_pxa3xx_cken_disable(struct clk *); |
76 | 76 | ||
77 | extern struct sysdev_class pxa3xx_clock_sysclass; | 77 | extern struct syscore_ops pxa3xx_clock_syscore_ops; |
78 | |||
78 | #endif | 79 | #endif |
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index b88d601a8090..13518a705399 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
13 | #include <linux/sysdev.h> | ||
14 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
15 | #include <linux/gpio.h> | 14 | #include <linux/gpio.h> |
16 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c index 8225e2e58c6e..a10996782476 100644 --- a/arch/arm/mach-pxa/cm-x2xx.c +++ b/arch/arm/mach-pxa/cm-x2xx.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
13 | #include <linux/sysdev.h> | 13 | #include <linux/syscore_ops.h> |
14 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
15 | #include <linux/gpio.h> | 15 | #include <linux/gpio.h> |
16 | 16 | ||
@@ -388,7 +388,7 @@ static inline void cmx2xx_init_display(void) {} | |||
388 | #ifdef CONFIG_PM | 388 | #ifdef CONFIG_PM |
389 | static unsigned long sleep_save_msc[10]; | 389 | static unsigned long sleep_save_msc[10]; |
390 | 390 | ||
391 | static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) | 391 | static int cmx2xx_suspend(void) |
392 | { | 392 | { |
393 | cmx2xx_pci_suspend(); | 393 | cmx2xx_pci_suspend(); |
394 | 394 | ||
@@ -412,7 +412,7 @@ static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) | |||
412 | return 0; | 412 | return 0; |
413 | } | 413 | } |
414 | 414 | ||
415 | static int cmx2xx_resume(struct sys_device *dev) | 415 | static void cmx2xx_resume(void) |
416 | { | 416 | { |
417 | cmx2xx_pci_resume(); | 417 | cmx2xx_pci_resume(); |
418 | 418 | ||
@@ -420,27 +420,18 @@ static int cmx2xx_resume(struct sys_device *dev) | |||
420 | __raw_writel(sleep_save_msc[0], MSC0); | 420 | __raw_writel(sleep_save_msc[0], MSC0); |
421 | __raw_writel(sleep_save_msc[1], MSC1); | 421 | __raw_writel(sleep_save_msc[1], MSC1); |
422 | __raw_writel(sleep_save_msc[2], MSC2); | 422 | __raw_writel(sleep_save_msc[2], MSC2); |
423 | |||
424 | return 0; | ||
425 | } | 423 | } |
426 | 424 | ||
427 | static struct sysdev_class cmx2xx_pm_sysclass = { | 425 | static struct syscore_ops cmx2xx_pm_syscore_ops = { |
428 | .name = "pm", | ||
429 | .resume = cmx2xx_resume, | 426 | .resume = cmx2xx_resume, |
430 | .suspend = cmx2xx_suspend, | 427 | .suspend = cmx2xx_suspend, |
431 | }; | 428 | }; |
432 | 429 | ||
433 | static struct sys_device cmx2xx_pm_device = { | ||
434 | .cls = &cmx2xx_pm_sysclass, | ||
435 | }; | ||
436 | |||
437 | static int __init cmx2xx_pm_init(void) | 430 | static int __init cmx2xx_pm_init(void) |
438 | { | 431 | { |
439 | int error; | 432 | register_syscore_ops(&cmx2xx_pm_syscore_ops); |
440 | error = sysdev_class_register(&cmx2xx_pm_sysclass); | 433 | |
441 | if (error == 0) | 434 | return 0; |
442 | error = sysdev_register(&cmx2xx_pm_device); | ||
443 | return error; | ||
444 | } | 435 | } |
445 | #else | 436 | #else |
446 | static int __init cmx2xx_pm_init(void) { return 0; } | 437 | static int __init cmx2xx_pm_init(void) { return 0; } |
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c index 81c3c433e2d6..d28e802e2448 100644 --- a/arch/arm/mach-pxa/colibri-evalboard.c +++ b/arch/arm/mach-pxa/colibri-evalboard.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/sysdev.h> | ||
17 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
18 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
19 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c index 44c1b77ece67..80538b8806ed 100644 --- a/arch/arm/mach-pxa/colibri-pxa270-income.c +++ b/arch/arm/mach-pxa/colibri-pxa270-income.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/pwm_backlight.h> | 23 | #include <linux/pwm_backlight.h> |
24 | #include <linux/i2c/pxa-i2c.h> | 24 | #include <linux/i2c/pxa-i2c.h> |
25 | #include <linux/sysdev.h> | ||
26 | 25 | ||
27 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
28 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c index 6fc5d328ba7f..7545a48ed88b 100644 --- a/arch/arm/mach-pxa/colibri-pxa270.c +++ b/arch/arm/mach-pxa/colibri-pxa270.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/mtd/partitions.h> | 17 | #include <linux/mtd/partitions.h> |
18 | #include <linux/mtd/physmap.h> | 18 | #include <linux/mtd/physmap.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/sysdev.h> | ||
21 | #include <linux/ucb1400.h> | 20 | #include <linux/ucb1400.h> |
22 | 21 | ||
23 | #include <asm/mach/arch.h> | 22 | #include <asm/mach/arch.h> |
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h index a079d8baa45a..e6c9344a95ae 100644 --- a/arch/arm/mach-pxa/generic.h +++ b/arch/arm/mach-pxa/generic.h | |||
@@ -61,10 +61,10 @@ extern unsigned pxa3xx_get_clk_frequency_khz(int); | |||
61 | #define pxa3xx_get_clk_frequency_khz(x) (0) | 61 | #define pxa3xx_get_clk_frequency_khz(x) (0) |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | extern struct sysdev_class pxa_irq_sysclass; | 64 | extern struct syscore_ops pxa_irq_syscore_ops; |
65 | extern struct sysdev_class pxa_gpio_sysclass; | 65 | extern struct syscore_ops pxa_gpio_syscore_ops; |
66 | extern struct sysdev_class pxa2xx_mfp_sysclass; | 66 | extern struct syscore_ops pxa2xx_mfp_syscore_ops; |
67 | extern struct sysdev_class pxa3xx_mfp_sysclass; | 67 | extern struct syscore_ops pxa3xx_mfp_syscore_ops; |
68 | 68 | ||
69 | void __init pxa_set_ffuart_info(void *info); | 69 | void __init pxa_set_ffuart_info(void *info); |
70 | void __init pxa_set_btuart_info(void *info); | 70 | void __init pxa_set_btuart_info(void *info); |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 6251e3f5c62c..32ed551bf9c5 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/irq.h> | 20 | #include <linux/irq.h> |
21 | 21 | ||
@@ -183,7 +183,7 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn) | |||
183 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; | 183 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; |
184 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; | 184 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; |
185 | 185 | ||
186 | static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) | 186 | static int pxa_irq_suspend(void) |
187 | { | 187 | { |
188 | int i; | 188 | int i; |
189 | 189 | ||
@@ -202,7 +202,7 @@ static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) | |||
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | static int pxa_irq_resume(struct sys_device *dev) | 205 | static void pxa_irq_resume(void) |
206 | { | 206 | { |
207 | int i; | 207 | int i; |
208 | 208 | ||
@@ -218,22 +218,13 @@ static int pxa_irq_resume(struct sys_device *dev) | |||
218 | __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); | 218 | __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); |
219 | 219 | ||
220 | __raw_writel(1, IRQ_BASE + ICCR); | 220 | __raw_writel(1, IRQ_BASE + ICCR); |
221 | return 0; | ||
222 | } | 221 | } |
223 | #else | 222 | #else |
224 | #define pxa_irq_suspend NULL | 223 | #define pxa_irq_suspend NULL |
225 | #define pxa_irq_resume NULL | 224 | #define pxa_irq_resume NULL |
226 | #endif | 225 | #endif |
227 | 226 | ||
228 | struct sysdev_class pxa_irq_sysclass = { | 227 | struct syscore_ops pxa_irq_syscore_ops = { |
229 | .name = "irq", | ||
230 | .suspend = pxa_irq_suspend, | 228 | .suspend = pxa_irq_suspend, |
231 | .resume = pxa_irq_resume, | 229 | .resume = pxa_irq_resume, |
232 | }; | 230 | }; |
233 | |||
234 | static int __init pxa_irq_init(void) | ||
235 | { | ||
236 | return sysdev_class_register(&pxa_irq_sysclass); | ||
237 | } | ||
238 | |||
239 | core_initcall(pxa_irq_init); | ||
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index f5de541725b1..6cf8180bf5bd 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
@@ -159,30 +159,22 @@ static void __init lpd270_init_irq(void) | |||
159 | 159 | ||
160 | 160 | ||
161 | #ifdef CONFIG_PM | 161 | #ifdef CONFIG_PM |
162 | static int lpd270_irq_resume(struct sys_device *dev) | 162 | static void lpd270_irq_resume(void) |
163 | { | 163 | { |
164 | __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); | 164 | __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); |
165 | return 0; | ||
166 | } | 165 | } |
167 | 166 | ||
168 | static struct sysdev_class lpd270_irq_sysclass = { | 167 | static struct syscore_ops lpd270_irq_syscore_ops = { |
169 | .name = "cpld_irq", | ||
170 | .resume = lpd270_irq_resume, | 168 | .resume = lpd270_irq_resume, |
171 | }; | 169 | }; |
172 | 170 | ||
173 | static struct sys_device lpd270_irq_device = { | ||
174 | .cls = &lpd270_irq_sysclass, | ||
175 | }; | ||
176 | |||
177 | static int __init lpd270_irq_device_init(void) | 171 | static int __init lpd270_irq_device_init(void) |
178 | { | 172 | { |
179 | int ret = -ENODEV; | ||
180 | if (machine_is_logicpd_pxa270()) { | 173 | if (machine_is_logicpd_pxa270()) { |
181 | ret = sysdev_class_register(&lpd270_irq_sysclass); | 174 | register_syscore_ops(&lpd270_irq_syscore_ops); |
182 | if (ret == 0) | 175 | return 0; |
183 | ret = sysdev_register(&lpd270_irq_device); | ||
184 | } | 176 | } |
185 | return ret; | 177 | return -ENODEV; |
186 | } | 178 | } |
187 | 179 | ||
188 | device_initcall(lpd270_irq_device_init); | 180 | device_initcall(lpd270_irq_device_init); |
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index 3ede978c83d9..e10ddb827147 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/major.h> | 19 | #include <linux/major.h> |
20 | #include <linux/fb.h> | 20 | #include <linux/fb.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
@@ -176,31 +176,22 @@ static void __init lubbock_init_irq(void) | |||
176 | 176 | ||
177 | #ifdef CONFIG_PM | 177 | #ifdef CONFIG_PM |
178 | 178 | ||
179 | static int lubbock_irq_resume(struct sys_device *dev) | 179 | static void lubbock_irq_resume(void) |
180 | { | 180 | { |
181 | LUB_IRQ_MASK_EN = lubbock_irq_enabled; | 181 | LUB_IRQ_MASK_EN = lubbock_irq_enabled; |
182 | return 0; | ||
183 | } | 182 | } |
184 | 183 | ||
185 | static struct sysdev_class lubbock_irq_sysclass = { | 184 | static struct syscore_ops lubbock_irq_syscore_ops = { |
186 | .name = "cpld_irq", | ||
187 | .resume = lubbock_irq_resume, | 185 | .resume = lubbock_irq_resume, |
188 | }; | 186 | }; |
189 | 187 | ||
190 | static struct sys_device lubbock_irq_device = { | ||
191 | .cls = &lubbock_irq_sysclass, | ||
192 | }; | ||
193 | |||
194 | static int __init lubbock_irq_device_init(void) | 188 | static int __init lubbock_irq_device_init(void) |
195 | { | 189 | { |
196 | int ret = -ENODEV; | ||
197 | |||
198 | if (machine_is_lubbock()) { | 190 | if (machine_is_lubbock()) { |
199 | ret = sysdev_class_register(&lubbock_irq_sysclass); | 191 | register_syscore_ops(&lubbock_irq_syscore_ops); |
200 | if (ret == 0) | 192 | return 0; |
201 | ret = sysdev_register(&lubbock_irq_device); | ||
202 | } | 193 | } |
203 | return ret; | 194 | return -ENODEV; |
204 | } | 195 | } |
205 | 196 | ||
206 | device_initcall(lubbock_irq_device_init); | 197 | device_initcall(lubbock_irq_device_init); |
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 95163baca29e..3479e2b3b511 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
@@ -185,31 +185,21 @@ static void __init mainstone_init_irq(void) | |||
185 | 185 | ||
186 | #ifdef CONFIG_PM | 186 | #ifdef CONFIG_PM |
187 | 187 | ||
188 | static int mainstone_irq_resume(struct sys_device *dev) | 188 | static void mainstone_irq_resume(void) |
189 | { | 189 | { |
190 | MST_INTMSKENA = mainstone_irq_enabled; | 190 | MST_INTMSKENA = mainstone_irq_enabled; |
191 | return 0; | ||
192 | } | 191 | } |
193 | 192 | ||
194 | static struct sysdev_class mainstone_irq_sysclass = { | 193 | static struct syscore_ops mainstone_irq_syscore_ops = { |
195 | .name = "cpld_irq", | ||
196 | .resume = mainstone_irq_resume, | 194 | .resume = mainstone_irq_resume, |
197 | }; | 195 | }; |
198 | 196 | ||
199 | static struct sys_device mainstone_irq_device = { | ||
200 | .cls = &mainstone_irq_sysclass, | ||
201 | }; | ||
202 | |||
203 | static int __init mainstone_irq_device_init(void) | 197 | static int __init mainstone_irq_device_init(void) |
204 | { | 198 | { |
205 | int ret = -ENODEV; | 199 | if (machine_is_mainstone()) |
200 | register_syscore_ops(&mainstone_irq_syscore_ops); | ||
206 | 201 | ||
207 | if (machine_is_mainstone()) { | 202 | return 0; |
208 | ret = sysdev_class_register(&mainstone_irq_sysclass); | ||
209 | if (ret == 0) | ||
210 | ret = sysdev_register(&mainstone_irq_device); | ||
211 | } | ||
212 | return ret; | ||
213 | } | 203 | } |
214 | 204 | ||
215 | device_initcall(mainstone_irq_device_init); | 205 | device_initcall(mainstone_irq_device_init); |
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index 1d1419b73457..87ae3129f4f7 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/sysdev.h> | 19 | #include <linux/syscore_ops.h> |
20 | 20 | ||
21 | #include <mach/gpio.h> | 21 | #include <mach/gpio.h> |
22 | #include <mach/pxa2xx-regs.h> | 22 | #include <mach/pxa2xx-regs.h> |
@@ -338,7 +338,7 @@ static unsigned long saved_gafr[2][4]; | |||
338 | static unsigned long saved_gpdr[4]; | 338 | static unsigned long saved_gpdr[4]; |
339 | static unsigned long saved_pgsr[4]; | 339 | static unsigned long saved_pgsr[4]; |
340 | 340 | ||
341 | static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) | 341 | static int pxa2xx_mfp_suspend(void) |
342 | { | 342 | { |
343 | int i; | 343 | int i; |
344 | 344 | ||
@@ -365,7 +365,7 @@ static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) | |||
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | 367 | ||
368 | static int pxa2xx_mfp_resume(struct sys_device *d) | 368 | static void pxa2xx_mfp_resume(void) |
369 | { | 369 | { |
370 | int i; | 370 | int i; |
371 | 371 | ||
@@ -376,15 +376,13 @@ static int pxa2xx_mfp_resume(struct sys_device *d) | |||
376 | PGSR(i) = saved_pgsr[i]; | 376 | PGSR(i) = saved_pgsr[i]; |
377 | } | 377 | } |
378 | PSSR = PSSR_RDH | PSSR_PH; | 378 | PSSR = PSSR_RDH | PSSR_PH; |
379 | return 0; | ||
380 | } | 379 | } |
381 | #else | 380 | #else |
382 | #define pxa2xx_mfp_suspend NULL | 381 | #define pxa2xx_mfp_suspend NULL |
383 | #define pxa2xx_mfp_resume NULL | 382 | #define pxa2xx_mfp_resume NULL |
384 | #endif | 383 | #endif |
385 | 384 | ||
386 | struct sysdev_class pxa2xx_mfp_sysclass = { | 385 | struct syscore_ops pxa2xx_mfp_syscore_ops = { |
387 | .name = "mfp", | ||
388 | .suspend = pxa2xx_mfp_suspend, | 386 | .suspend = pxa2xx_mfp_suspend, |
389 | .resume = pxa2xx_mfp_resume, | 387 | .resume = pxa2xx_mfp_resume, |
390 | }; | 388 | }; |
@@ -409,6 +407,6 @@ static int __init pxa2xx_mfp_init(void) | |||
409 | for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) | 407 | for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) |
410 | gpdr_lpm[i] = GPDR(i * 32); | 408 | gpdr_lpm[i] = GPDR(i * 32); |
411 | 409 | ||
412 | return sysdev_class_register(&pxa2xx_mfp_sysclass); | 410 | return 0; |
413 | } | 411 | } |
414 | postcore_initcall(pxa2xx_mfp_init); | 412 | postcore_initcall(pxa2xx_mfp_init); |
diff --git a/arch/arm/mach-pxa/mfp-pxa3xx.c b/arch/arm/mach-pxa/mfp-pxa3xx.c index 7a270eecd480..89863a01ecd7 100644 --- a/arch/arm/mach-pxa/mfp-pxa3xx.c +++ b/arch/arm/mach-pxa/mfp-pxa3xx.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | 21 | ||
22 | #include <mach/hardware.h> | 22 | #include <mach/hardware.h> |
23 | #include <mach/mfp-pxa3xx.h> | 23 | #include <mach/mfp-pxa3xx.h> |
@@ -31,13 +31,13 @@ | |||
31 | * a pull-down mode if they're an active low chip select, and we're | 31 | * a pull-down mode if they're an active low chip select, and we're |
32 | * just entering standby. | 32 | * just entering standby. |
33 | */ | 33 | */ |
34 | static int pxa3xx_mfp_suspend(struct sys_device *d, pm_message_t state) | 34 | static int pxa3xx_mfp_suspend(void) |
35 | { | 35 | { |
36 | mfp_config_lpm(); | 36 | mfp_config_lpm(); |
37 | return 0; | 37 | return 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | static int pxa3xx_mfp_resume(struct sys_device *d) | 40 | static void pxa3xx_mfp_resume(void) |
41 | { | 41 | { |
42 | mfp_config_run(); | 42 | mfp_config_run(); |
43 | 43 | ||
@@ -47,24 +47,13 @@ static int pxa3xx_mfp_resume(struct sys_device *d) | |||
47 | * preserve them here in case they will be referenced later | 47 | * preserve them here in case they will be referenced later |
48 | */ | 48 | */ |
49 | ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); | 49 | ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); |
50 | return 0; | ||
51 | } | 50 | } |
52 | #else | 51 | #else |
53 | #define pxa3xx_mfp_suspend NULL | 52 | #define pxa3xx_mfp_suspend NULL |
54 | #define pxa3xx_mfp_resume NULL | 53 | #define pxa3xx_mfp_resume NULL |
55 | #endif | 54 | #endif |
56 | 55 | ||
57 | struct sysdev_class pxa3xx_mfp_sysclass = { | 56 | struct syscore_ops pxa3xx_mfp_syscore_ops = { |
58 | .name = "mfp", | ||
59 | .suspend = pxa3xx_mfp_suspend, | 57 | .suspend = pxa3xx_mfp_suspend, |
60 | .resume = pxa3xx_mfp_resume, | 58 | .resume = pxa3xx_mfp_resume, |
61 | }; | 59 | }; |
62 | |||
63 | static int __init mfp_init_devicefs(void) | ||
64 | { | ||
65 | if (cpu_is_pxa3xx()) | ||
66 | return sysdev_class_register(&pxa3xx_mfp_sysclass); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | postcore_initcall(mfp_init_devicefs); | ||
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index 23925db8ff74..e3470137c934 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/syscore_ops.h> |
26 | #include <linux/input.h> | 26 | #include <linux/input.h> |
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/gpio_keys.h> | 28 | #include <linux/gpio_keys.h> |
@@ -488,7 +488,7 @@ static void install_bootstrap(void) | |||
488 | } | 488 | } |
489 | 489 | ||
490 | 490 | ||
491 | static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) | 491 | static int mioa701_sys_suspend(void) |
492 | { | 492 | { |
493 | int i = 0, is_bt_on; | 493 | int i = 0, is_bt_on; |
494 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); | 494 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); |
@@ -514,7 +514,7 @@ static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) | |||
514 | return 0; | 514 | return 0; |
515 | } | 515 | } |
516 | 516 | ||
517 | static int mioa701_sys_resume(struct sys_device *sysdev) | 517 | static void mioa701_sys_resume(void) |
518 | { | 518 | { |
519 | int i = 0; | 519 | int i = 0; |
520 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); | 520 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); |
@@ -527,43 +527,18 @@ static int mioa701_sys_resume(struct sys_device *sysdev) | |||
527 | *mem_resume_enabler = save_buffer[i++]; | 527 | *mem_resume_enabler = save_buffer[i++]; |
528 | *mem_resume_bt = save_buffer[i++]; | 528 | *mem_resume_bt = save_buffer[i++]; |
529 | *mem_resume_unknown = save_buffer[i++]; | 529 | *mem_resume_unknown = save_buffer[i++]; |
530 | |||
531 | return 0; | ||
532 | } | 530 | } |
533 | 531 | ||
534 | static struct sysdev_class mioa701_sysclass = { | 532 | static struct syscore_ops mioa701_syscore_ops = { |
535 | .name = "mioa701", | 533 | .suspend = mioa701_sys_suspend, |
536 | }; | 534 | .resume = mioa701_sys_resume, |
537 | |||
538 | static struct sys_device sysdev_bootstrap = { | ||
539 | .cls = &mioa701_sysclass, | ||
540 | }; | ||
541 | |||
542 | static struct sysdev_driver driver_bootstrap = { | ||
543 | .suspend = &mioa701_sys_suspend, | ||
544 | .resume = &mioa701_sys_resume, | ||
545 | }; | 535 | }; |
546 | 536 | ||
547 | static int __init bootstrap_init(void) | 537 | static int __init bootstrap_init(void) |
548 | { | 538 | { |
549 | int rc; | ||
550 | int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); | 539 | int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); |
551 | 540 | ||
552 | rc = sysdev_class_register(&mioa701_sysclass); | 541 | register_syscore_ops(&mioa701_syscore_ops); |
553 | if (rc) { | ||
554 | printk(KERN_ERR "Failed registering mioa701 sys class\n"); | ||
555 | return -ENODEV; | ||
556 | } | ||
557 | rc = sysdev_register(&sysdev_bootstrap); | ||
558 | if (rc) { | ||
559 | printk(KERN_ERR "Failed registering mioa701 sys device\n"); | ||
560 | return -ENODEV; | ||
561 | } | ||
562 | rc = sysdev_driver_register(&mioa701_sysclass, &driver_bootstrap); | ||
563 | if (rc) { | ||
564 | printk(KERN_ERR "Failed registering PMU sys driver\n"); | ||
565 | return -ENODEV; | ||
566 | } | ||
567 | 542 | ||
568 | save_buffer = kmalloc(save_size, GFP_KERNEL); | 543 | save_buffer = kmalloc(save_size, GFP_KERNEL); |
569 | if (!save_buffer) | 544 | if (!save_buffer) |
@@ -576,9 +551,7 @@ static int __init bootstrap_init(void) | |||
576 | static void bootstrap_exit(void) | 551 | static void bootstrap_exit(void) |
577 | { | 552 | { |
578 | kfree(save_buffer); | 553 | kfree(save_buffer); |
579 | sysdev_driver_unregister(&mioa701_sysclass, &driver_bootstrap); | 554 | unregister_syscore_ops(&mioa701_syscore_ops); |
580 | sysdev_unregister(&sysdev_bootstrap); | ||
581 | sysdev_class_unregister(&mioa701_sysclass); | ||
582 | 555 | ||
583 | printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" | 556 | printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" |
584 | "resume !!!\n"); | 557 | "resume !!!\n"); |
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index a6f898cbfac9..4061ecddee70 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/wm97xx.h> | 25 | #include <linux/wm97xx.h> |
26 | #include <linux/power_supply.h> | 26 | #include <linux/power_supply.h> |
27 | #include <linux/sysdev.h> | ||
28 | #include <linux/mtd/mtd.h> | 27 | #include <linux/mtd/mtd.h> |
29 | #include <linux/mtd/partitions.h> | 28 | #include <linux/mtd/partitions.h> |
30 | #include <linux/mtd/physmap.h> | 29 | #include <linux/mtd/physmap.h> |
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c index 8aadad55fbe4..20d1b18b1733 100644 --- a/arch/arm/mach-pxa/palmtreo.c +++ b/arch/arm/mach-pxa/palmtreo.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/pwm_backlight.h> | 25 | #include <linux/pwm_backlight.h> |
26 | #include <linux/gpio.h> | 26 | #include <linux/gpio.h> |
27 | #include <linux/power_supply.h> | 27 | #include <linux/power_supply.h> |
28 | #include <linux/sysdev.h> | ||
29 | #include <linux/w1-gpio.h> | 28 | #include <linux/w1-gpio.h> |
30 | 29 | ||
31 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 3b8a4f37dbbe..65f24f0b77e8 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c | |||
@@ -19,7 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/sysdev.h> | 22 | #include <linux/syscore_ops.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/gpio_keys.h> | 25 | #include <linux/gpio_keys.h> |
@@ -233,9 +233,9 @@ static struct palmz72_resume_info palmz72_resume_info = { | |||
233 | 233 | ||
234 | static unsigned long store_ptr; | 234 | static unsigned long store_ptr; |
235 | 235 | ||
236 | /* sys_device for Palm Zire 72 PM */ | 236 | /* syscore_ops for Palm Zire 72 PM */ |
237 | 237 | ||
238 | static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) | 238 | static int palmz72_pm_suspend(void) |
239 | { | 239 | { |
240 | /* setup the resume_info struct for the original bootloader */ | 240 | /* setup the resume_info struct for the original bootloader */ |
241 | palmz72_resume_info.resume_addr = (u32) cpu_resume; | 241 | palmz72_resume_info.resume_addr = (u32) cpu_resume; |
@@ -249,31 +249,23 @@ static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) | |||
249 | return 0; | 249 | return 0; |
250 | } | 250 | } |
251 | 251 | ||
252 | static int palmz72_pm_resume(struct sys_device *dev) | 252 | static void palmz72_pm_resume(void) |
253 | { | 253 | { |
254 | *PALMZ72_SAVE_DWORD = store_ptr; | 254 | *PALMZ72_SAVE_DWORD = store_ptr; |
255 | return 0; | ||
256 | } | 255 | } |
257 | 256 | ||
258 | static struct sysdev_class palmz72_pm_sysclass = { | 257 | static struct syscore_ops palmz72_pm_syscore_ops = { |
259 | .name = "palmz72_pm", | ||
260 | .suspend = palmz72_pm_suspend, | 258 | .suspend = palmz72_pm_suspend, |
261 | .resume = palmz72_pm_resume, | 259 | .resume = palmz72_pm_resume, |
262 | }; | 260 | }; |
263 | 261 | ||
264 | static struct sys_device palmz72_pm_device = { | ||
265 | .cls = &palmz72_pm_sysclass, | ||
266 | }; | ||
267 | |||
268 | static int __init palmz72_pm_init(void) | 262 | static int __init palmz72_pm_init(void) |
269 | { | 263 | { |
270 | int ret = -ENODEV; | ||
271 | if (machine_is_palmz72()) { | 264 | if (machine_is_palmz72()) { |
272 | ret = sysdev_class_register(&palmz72_pm_sysclass); | 265 | register_syscore_ops(&palmz72_pm_syscore_ops); |
273 | if (ret == 0) | 266 | return 0; |
274 | ret = sysdev_register(&palmz72_pm_device); | ||
275 | } | 267 | } |
276 | return ret; | 268 | return -ENODEV; |
277 | } | 269 | } |
278 | 270 | ||
279 | device_initcall(palmz72_pm_init); | 271 | device_initcall(palmz72_pm_init); |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index a4af8c52d7ee..fed363cec9c6 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/suspend.h> | 23 | #include <linux/suspend.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | 26 | ||
27 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
@@ -350,21 +350,9 @@ static struct platform_device *pxa25x_devices[] __initdata = { | |||
350 | &pxa_device_asoc_platform, | 350 | &pxa_device_asoc_platform, |
351 | }; | 351 | }; |
352 | 352 | ||
353 | static struct sys_device pxa25x_sysdev[] = { | ||
354 | { | ||
355 | .cls = &pxa_irq_sysclass, | ||
356 | }, { | ||
357 | .cls = &pxa2xx_mfp_sysclass, | ||
358 | }, { | ||
359 | .cls = &pxa_gpio_sysclass, | ||
360 | }, { | ||
361 | .cls = &pxa2xx_clock_sysclass, | ||
362 | } | ||
363 | }; | ||
364 | |||
365 | static int __init pxa25x_init(void) | 353 | static int __init pxa25x_init(void) |
366 | { | 354 | { |
367 | int i, ret = 0; | 355 | int ret = 0; |
368 | 356 | ||
369 | if (cpu_is_pxa25x()) { | 357 | if (cpu_is_pxa25x()) { |
370 | 358 | ||
@@ -377,11 +365,10 @@ static int __init pxa25x_init(void) | |||
377 | 365 | ||
378 | pxa25x_init_pm(); | 366 | pxa25x_init_pm(); |
379 | 367 | ||
380 | for (i = 0; i < ARRAY_SIZE(pxa25x_sysdev); i++) { | 368 | register_syscore_ops(&pxa_irq_syscore_ops); |
381 | ret = sysdev_register(&pxa25x_sysdev[i]); | 369 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); |
382 | if (ret) | 370 | register_syscore_ops(&pxa_gpio_syscore_ops); |
383 | pr_err("failed to register sysdev[%d]\n", i); | 371 | register_syscore_ops(&pxa2xx_clock_syscore_ops); |
384 | } | ||
385 | 372 | ||
386 | ret = platform_add_devices(pxa25x_devices, | 373 | ret = platform_add_devices(pxa25x_devices, |
387 | ARRAY_SIZE(pxa25x_devices)); | 374 | ARRAY_SIZE(pxa25x_devices)); |
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 909756eaf4b7..2fecbec58d88 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/sysdev.h> | 19 | #include <linux/syscore_ops.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/i2c/pxa-i2c.h> | 22 | #include <linux/i2c/pxa-i2c.h> |
@@ -428,21 +428,9 @@ static struct platform_device *devices[] __initdata = { | |||
428 | &pxa27x_device_pwm1, | 428 | &pxa27x_device_pwm1, |
429 | }; | 429 | }; |
430 | 430 | ||
431 | static struct sys_device pxa27x_sysdev[] = { | ||
432 | { | ||
433 | .cls = &pxa_irq_sysclass, | ||
434 | }, { | ||
435 | .cls = &pxa2xx_mfp_sysclass, | ||
436 | }, { | ||
437 | .cls = &pxa_gpio_sysclass, | ||
438 | }, { | ||
439 | .cls = &pxa2xx_clock_sysclass, | ||
440 | } | ||
441 | }; | ||
442 | |||
443 | static int __init pxa27x_init(void) | 431 | static int __init pxa27x_init(void) |
444 | { | 432 | { |
445 | int i, ret = 0; | 433 | int ret = 0; |
446 | 434 | ||
447 | if (cpu_is_pxa27x()) { | 435 | if (cpu_is_pxa27x()) { |
448 | 436 | ||
@@ -455,11 +443,10 @@ static int __init pxa27x_init(void) | |||
455 | 443 | ||
456 | pxa27x_init_pm(); | 444 | pxa27x_init_pm(); |
457 | 445 | ||
458 | for (i = 0; i < ARRAY_SIZE(pxa27x_sysdev); i++) { | 446 | register_syscore_ops(&pxa_irq_syscore_ops); |
459 | ret = sysdev_register(&pxa27x_sysdev[i]); | 447 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); |
460 | if (ret) | 448 | register_syscore_ops(&pxa_gpio_syscore_ops); |
461 | pr_err("failed to register sysdev[%d]\n", i); | 449 | register_syscore_ops(&pxa2xx_clock_syscore_ops); |
462 | } | ||
463 | 450 | ||
464 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 451 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
465 | } | 452 | } |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 8dd107391157..8521d7d6f1da 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/sysdev.h> | 23 | #include <linux/syscore_ops.h> |
24 | #include <linux/i2c/pxa-i2c.h> | 24 | #include <linux/i2c/pxa-i2c.h> |
25 | 25 | ||
26 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
@@ -427,21 +427,9 @@ static struct platform_device *devices[] __initdata = { | |||
427 | &pxa27x_device_pwm1, | 427 | &pxa27x_device_pwm1, |
428 | }; | 428 | }; |
429 | 429 | ||
430 | static struct sys_device pxa3xx_sysdev[] = { | ||
431 | { | ||
432 | .cls = &pxa_irq_sysclass, | ||
433 | }, { | ||
434 | .cls = &pxa3xx_mfp_sysclass, | ||
435 | }, { | ||
436 | .cls = &pxa_gpio_sysclass, | ||
437 | }, { | ||
438 | .cls = &pxa3xx_clock_sysclass, | ||
439 | } | ||
440 | }; | ||
441 | |||
442 | static int __init pxa3xx_init(void) | 430 | static int __init pxa3xx_init(void) |
443 | { | 431 | { |
444 | int i, ret = 0; | 432 | int ret = 0; |
445 | 433 | ||
446 | if (cpu_is_pxa3xx()) { | 434 | if (cpu_is_pxa3xx()) { |
447 | 435 | ||
@@ -462,11 +450,10 @@ static int __init pxa3xx_init(void) | |||
462 | 450 | ||
463 | pxa3xx_init_pm(); | 451 | pxa3xx_init_pm(); |
464 | 452 | ||
465 | for (i = 0; i < ARRAY_SIZE(pxa3xx_sysdev); i++) { | 453 | register_syscore_ops(&pxa_irq_syscore_ops); |
466 | ret = sysdev_register(&pxa3xx_sysdev[i]); | 454 | register_syscore_ops(&pxa3xx_mfp_syscore_ops); |
467 | if (ret) | 455 | register_syscore_ops(&pxa_gpio_syscore_ops); |
468 | pr_err("failed to register sysdev[%d]\n", i); | 456 | register_syscore_ops(&pxa3xx_clock_syscore_ops); |
469 | } | ||
470 | 457 | ||
471 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 458 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
472 | } | 459 | } |
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c index 23b229bd06e9..ecc82a330fad 100644 --- a/arch/arm/mach-pxa/pxa95x.c +++ b/arch/arm/mach-pxa/pxa95x.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/i2c/pxa-i2c.h> | 18 | #include <linux/i2c/pxa-i2c.h> |
19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/syscore_ops.h> |
22 | 22 | ||
23 | #include <mach/hardware.h> | 23 | #include <mach/hardware.h> |
24 | #include <mach/gpio.h> | 24 | #include <mach/gpio.h> |
@@ -260,16 +260,6 @@ static struct platform_device *devices[] __initdata = { | |||
260 | &pxa27x_device_pwm1, | 260 | &pxa27x_device_pwm1, |
261 | }; | 261 | }; |
262 | 262 | ||
263 | static struct sys_device pxa95x_sysdev[] = { | ||
264 | { | ||
265 | .cls = &pxa_irq_sysclass, | ||
266 | }, { | ||
267 | .cls = &pxa_gpio_sysclass, | ||
268 | }, { | ||
269 | .cls = &pxa3xx_clock_sysclass, | ||
270 | } | ||
271 | }; | ||
272 | |||
273 | static int __init pxa95x_init(void) | 263 | static int __init pxa95x_init(void) |
274 | { | 264 | { |
275 | int ret = 0, i; | 265 | int ret = 0, i; |
@@ -293,11 +283,9 @@ static int __init pxa95x_init(void) | |||
293 | if ((ret = pxa_init_dma(IRQ_DMA, 32))) | 283 | if ((ret = pxa_init_dma(IRQ_DMA, 32))) |
294 | return ret; | 284 | return ret; |
295 | 285 | ||
296 | for (i = 0; i < ARRAY_SIZE(pxa95x_sysdev); i++) { | 286 | register_syscore_ops(&pxa_irq_syscore_ops); |
297 | ret = sysdev_register(&pxa95x_sysdev[i]); | 287 | register_syscore_ops(&pxa_gpio_syscore_ops); |
298 | if (ret) | 288 | register_syscore_ops(&pxa3xx_clock_syscore_ops); |
299 | pr_err("failed to register sysdev[%d]\n", i); | ||
300 | } | ||
301 | 289 | ||
302 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 290 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
303 | } | 291 | } |
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index cd1861351f75..d130f77b6d11 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/sysdev.h> | ||
22 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
24 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c index 232b7316ec08..79923058d10f 100644 --- a/arch/arm/mach-pxa/smemc.c +++ b/arch/arm/mach-pxa/smemc.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/io.h> | 8 | #include <linux/io.h> |
9 | #include <linux/sysdev.h> | 9 | #include <linux/syscore_ops.h> |
10 | 10 | ||
11 | #include <mach/hardware.h> | 11 | #include <mach/hardware.h> |
12 | #include <mach/smemc.h> | 12 | #include <mach/smemc.h> |
@@ -16,7 +16,7 @@ static unsigned long msc[2]; | |||
16 | static unsigned long sxcnfg, memclkcfg; | 16 | static unsigned long sxcnfg, memclkcfg; |
17 | static unsigned long csadrcfg[4]; | 17 | static unsigned long csadrcfg[4]; |
18 | 18 | ||
19 | static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) | 19 | static int pxa3xx_smemc_suspend(void) |
20 | { | 20 | { |
21 | msc[0] = __raw_readl(MSC0); | 21 | msc[0] = __raw_readl(MSC0); |
22 | msc[1] = __raw_readl(MSC1); | 22 | msc[1] = __raw_readl(MSC1); |
@@ -30,7 +30,7 @@ static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) | |||
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | static int pxa3xx_smemc_resume(struct sys_device *dev) | 33 | static void pxa3xx_smemc_resume(void) |
34 | { | 34 | { |
35 | __raw_writel(msc[0], MSC0); | 35 | __raw_writel(msc[0], MSC0); |
36 | __raw_writel(msc[1], MSC1); | 36 | __raw_writel(msc[1], MSC1); |
@@ -40,34 +40,19 @@ static int pxa3xx_smemc_resume(struct sys_device *dev) | |||
40 | __raw_writel(csadrcfg[1], CSADRCFG1); | 40 | __raw_writel(csadrcfg[1], CSADRCFG1); |
41 | __raw_writel(csadrcfg[2], CSADRCFG2); | 41 | __raw_writel(csadrcfg[2], CSADRCFG2); |
42 | __raw_writel(csadrcfg[3], CSADRCFG3); | 42 | __raw_writel(csadrcfg[3], CSADRCFG3); |
43 | |||
44 | return 0; | ||
45 | } | 43 | } |
46 | 44 | ||
47 | static struct sysdev_class smemc_sysclass = { | 45 | static struct syscore_ops smemc_syscore_ops = { |
48 | .name = "smemc", | ||
49 | .suspend = pxa3xx_smemc_suspend, | 46 | .suspend = pxa3xx_smemc_suspend, |
50 | .resume = pxa3xx_smemc_resume, | 47 | .resume = pxa3xx_smemc_resume, |
51 | }; | 48 | }; |
52 | 49 | ||
53 | static struct sys_device smemc_sysdev = { | ||
54 | .id = 0, | ||
55 | .cls = &smemc_sysclass, | ||
56 | }; | ||
57 | |||
58 | static int __init smemc_init(void) | 50 | static int __init smemc_init(void) |
59 | { | 51 | { |
60 | int ret = 0; | 52 | if (cpu_is_pxa3xx()) |
53 | register_syscore_ops(&smemc_syscore_ops); | ||
61 | 54 | ||
62 | if (cpu_is_pxa3xx()) { | 55 | return 0; |
63 | ret = sysdev_class_register(&smemc_sysclass); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | |||
67 | ret = sysdev_register(&smemc_sysdev); | ||
68 | } | ||
69 | |||
70 | return ret; | ||
71 | } | 56 | } |
72 | subsys_initcall(smemc_init); | 57 | subsys_initcall(smemc_init); |
73 | #endif | 58 | #endif |
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index b9cfbebdfe9c..687417a93698 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | ||
19 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index b523f119e0f0..903218eab56d 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/mtd/mtd.h> | 44 | #include <linux/mtd/mtd.h> |
45 | #include <linux/mtd/partitions.h> | 45 | #include <linux/mtd/partitions.h> |
46 | #include <linux/mtd/physmap.h> | 46 | #include <linux/mtd/physmap.h> |
47 | #include <linux/syscore_ops.h> | ||
47 | 48 | ||
48 | #include <mach/pxa25x.h> | 49 | #include <mach/pxa25x.h> |
49 | #include <mach/audio.h> | 50 | #include <mach/audio.h> |
@@ -130,20 +131,19 @@ static u8 viper_hw_version(void) | |||
130 | return v1; | 131 | return v1; |
131 | } | 132 | } |
132 | 133 | ||
133 | /* CPU sysdev */ | 134 | /* CPU system core operations. */ |
134 | static int viper_cpu_suspend(struct sys_device *sysdev, pm_message_t state) | 135 | static int viper_cpu_suspend(void) |
135 | { | 136 | { |
136 | viper_icr_set_bit(VIPER_ICR_R_DIS); | 137 | viper_icr_set_bit(VIPER_ICR_R_DIS); |
137 | return 0; | 138 | return 0; |
138 | } | 139 | } |
139 | 140 | ||
140 | static int viper_cpu_resume(struct sys_device *sysdev) | 141 | static void viper_cpu_resume(void) |
141 | { | 142 | { |
142 | viper_icr_clear_bit(VIPER_ICR_R_DIS); | 143 | viper_icr_clear_bit(VIPER_ICR_R_DIS); |
143 | return 0; | ||
144 | } | 144 | } |
145 | 145 | ||
146 | static struct sysdev_driver viper_cpu_sysdev_driver = { | 146 | static struct syscore_ops viper_cpu_syscore_ops = { |
147 | .suspend = viper_cpu_suspend, | 147 | .suspend = viper_cpu_suspend, |
148 | .resume = viper_cpu_resume, | 148 | .resume = viper_cpu_resume, |
149 | }; | 149 | }; |
@@ -945,7 +945,7 @@ static void __init viper_init(void) | |||
945 | viper_init_vcore_gpios(); | 945 | viper_init_vcore_gpios(); |
946 | viper_init_cpufreq(); | 946 | viper_init_cpufreq(); |
947 | 947 | ||
948 | sysdev_driver_register(&cpu_sysdev_class, &viper_cpu_sysdev_driver); | 948 | register_syscore_ops(&viper_cpu_syscore_ops); |
949 | 949 | ||
950 | if (version) { | 950 | if (version) { |
951 | pr_info("viper: hardware v%di%d detected. " | 951 | pr_info("viper: hardware v%di%d detected. " |
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index f71d377c8640..67bd41488bf8 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/gpio_keys.h> | 16 | #include <linux/gpio_keys.h> |
17 | #include <linux/input.h> | 17 | #include <linux/input.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/sysdev.h> | ||
20 | #include <linux/usb/gpio_vbus.h> | 19 | #include <linux/usb/gpio_vbus.h> |
21 | #include <linux/mtd/mtd.h> | 20 | #include <linux/mtd/mtd.h> |
22 | #include <linux/mtd/partitions.h> | 21 | #include <linux/mtd/partitions.h> |
diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h index 0c5d749d7b5f..9a732195aa1c 100644 --- a/arch/arm/mach-realview/include/mach/barriers.h +++ b/arch/arm/mach-realview/include/mach/barriers.h | |||
@@ -4,5 +4,5 @@ | |||
4 | * operation to deadlock the system. | 4 | * operation to deadlock the system. |
5 | */ | 5 | */ |
6 | #define mb() dsb() | 6 | #define mb() dsb() |
7 | #define rmb() dmb() | 7 | #define rmb() dsb() |
8 | #define wmb() mb() | 8 | #define wmb() mb() |
diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c index 5e2f35332056..2854129f8cc7 100644 --- a/arch/arm/mach-s3c2410/irq.c +++ b/arch/arm/mach-s3c2410/irq.c | |||
@@ -23,38 +23,12 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
26 | #include <linux/sysdev.h> | 26 | #include <linux/syscore_ops.h> |
27 | 27 | ||
28 | #include <plat/cpu.h> | 28 | #include <plat/cpu.h> |
29 | #include <plat/pm.h> | 29 | #include <plat/pm.h> |
30 | 30 | ||
31 | static int s3c2410_irq_add(struct sys_device *sysdev) | 31 | struct syscore_ops s3c24xx_irq_syscore_ops = { |
32 | { | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static struct sysdev_driver s3c2410_irq_driver = { | ||
37 | .add = s3c2410_irq_add, | ||
38 | .suspend = s3c24xx_irq_suspend, | 32 | .suspend = s3c24xx_irq_suspend, |
39 | .resume = s3c24xx_irq_resume, | 33 | .resume = s3c24xx_irq_resume, |
40 | }; | 34 | }; |
41 | |||
42 | static int __init s3c2410_irq_init(void) | ||
43 | { | ||
44 | return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_irq_driver); | ||
45 | } | ||
46 | |||
47 | arch_initcall(s3c2410_irq_init); | ||
48 | |||
49 | static struct sysdev_driver s3c2410a_irq_driver = { | ||
50 | .add = s3c2410_irq_add, | ||
51 | .suspend = s3c24xx_irq_suspend, | ||
52 | .resume = s3c24xx_irq_resume, | ||
53 | }; | ||
54 | |||
55 | static int __init s3c2410a_irq_init(void) | ||
56 | { | ||
57 | return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_irq_driver); | ||
58 | } | ||
59 | |||
60 | arch_initcall(s3c2410a_irq_init); | ||
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c index 2970ea9f7c2b..1e2d536adda9 100644 --- a/arch/arm/mach-s3c2410/mach-bast.c +++ b/arch/arm/mach-s3c2410/mach-bast.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/serial_core.h> | 21 | #include <linux/serial_core.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/dm9000.h> | 23 | #include <linux/dm9000.h> |
@@ -214,17 +214,16 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = { | |||
214 | /* NAND Flash on BAST board */ | 214 | /* NAND Flash on BAST board */ |
215 | 215 | ||
216 | #ifdef CONFIG_PM | 216 | #ifdef CONFIG_PM |
217 | static int bast_pm_suspend(struct sys_device *sd, pm_message_t state) | 217 | static int bast_pm_suspend(void) |
218 | { | 218 | { |
219 | /* ensure that an nRESET is not generated on resume. */ | 219 | /* ensure that an nRESET is not generated on resume. */ |
220 | gpio_direction_output(S3C2410_GPA(21), 1); | 220 | gpio_direction_output(S3C2410_GPA(21), 1); |
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int bast_pm_resume(struct sys_device *sd) | 224 | static void bast_pm_resume(void) |
225 | { | 225 | { |
226 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); | 226 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); |
227 | return 0; | ||
228 | } | 227 | } |
229 | 228 | ||
230 | #else | 229 | #else |
@@ -232,16 +231,11 @@ static int bast_pm_resume(struct sys_device *sd) | |||
232 | #define bast_pm_resume NULL | 231 | #define bast_pm_resume NULL |
233 | #endif | 232 | #endif |
234 | 233 | ||
235 | static struct sysdev_class bast_pm_sysclass = { | 234 | static struct syscore_ops bast_pm_syscore_ops = { |
236 | .name = "mach-bast", | ||
237 | .suspend = bast_pm_suspend, | 235 | .suspend = bast_pm_suspend, |
238 | .resume = bast_pm_resume, | 236 | .resume = bast_pm_resume, |
239 | }; | 237 | }; |
240 | 238 | ||
241 | static struct sys_device bast_pm_sysdev = { | ||
242 | .cls = &bast_pm_sysclass, | ||
243 | }; | ||
244 | |||
245 | static int smartmedia_map[] = { 0 }; | 239 | static int smartmedia_map[] = { 0 }; |
246 | static int chip0_map[] = { 1 }; | 240 | static int chip0_map[] = { 1 }; |
247 | static int chip1_map[] = { 2 }; | 241 | static int chip1_map[] = { 2 }; |
@@ -642,8 +636,7 @@ static void __init bast_map_io(void) | |||
642 | 636 | ||
643 | static void __init bast_init(void) | 637 | static void __init bast_init(void) |
644 | { | 638 | { |
645 | sysdev_class_register(&bast_pm_sysclass); | 639 | register_syscore_ops(&bast_pm_syscore_ops); |
646 | sysdev_register(&bast_pm_sysdev); | ||
647 | 640 | ||
648 | s3c_i2c0_set_platdata(&bast_i2c_info); | 641 | s3c_i2c0_set_platdata(&bast_i2c_info); |
649 | s3c_nand_set_platdata(&bast_nand_info); | 642 | s3c_nand_set_platdata(&bast_nand_info); |
diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c index 725636fc4dc3..4728f9aa7df1 100644 --- a/arch/arm/mach-s3c2410/pm.c +++ b/arch/arm/mach-s3c2410/pm.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/time.h> | 26 | #include <linux/time.h> |
27 | #include <linux/sysdev.h> | 27 | #include <linux/sysdev.h> |
28 | #include <linux/syscore_ops.h> | ||
28 | #include <linux/gpio.h> | 29 | #include <linux/gpio.h> |
29 | #include <linux/io.h> | 30 | #include <linux/io.h> |
30 | 31 | ||
@@ -92,7 +93,7 @@ static void s3c2410_pm_prepare(void) | |||
92 | } | 93 | } |
93 | } | 94 | } |
94 | 95 | ||
95 | static int s3c2410_pm_resume(struct sys_device *dev) | 96 | static void s3c2410_pm_resume(void) |
96 | { | 97 | { |
97 | unsigned long tmp; | 98 | unsigned long tmp; |
98 | 99 | ||
@@ -104,10 +105,12 @@ static int s3c2410_pm_resume(struct sys_device *dev) | |||
104 | 105 | ||
105 | if ( machine_is_aml_m5900() ) | 106 | if ( machine_is_aml_m5900() ) |
106 | s3c2410_gpio_setpin(S3C2410_GPF(2), 0); | 107 | s3c2410_gpio_setpin(S3C2410_GPF(2), 0); |
107 | |||
108 | return 0; | ||
109 | } | 108 | } |
110 | 109 | ||
110 | struct syscore_ops s3c2410_pm_syscore_ops = { | ||
111 | .resume = s3c2410_pm_resume, | ||
112 | }; | ||
113 | |||
111 | static int s3c2410_pm_add(struct sys_device *dev) | 114 | static int s3c2410_pm_add(struct sys_device *dev) |
112 | { | 115 | { |
113 | pm_cpu_prep = s3c2410_pm_prepare; | 116 | pm_cpu_prep = s3c2410_pm_prepare; |
@@ -119,7 +122,6 @@ static int s3c2410_pm_add(struct sys_device *dev) | |||
119 | #if defined(CONFIG_CPU_S3C2410) | 122 | #if defined(CONFIG_CPU_S3C2410) |
120 | static struct sysdev_driver s3c2410_pm_driver = { | 123 | static struct sysdev_driver s3c2410_pm_driver = { |
121 | .add = s3c2410_pm_add, | 124 | .add = s3c2410_pm_add, |
122 | .resume = s3c2410_pm_resume, | ||
123 | }; | 125 | }; |
124 | 126 | ||
125 | /* register ourselves */ | 127 | /* register ourselves */ |
@@ -133,7 +135,6 @@ arch_initcall(s3c2410_pm_drvinit); | |||
133 | 135 | ||
134 | static struct sysdev_driver s3c2410a_pm_driver = { | 136 | static struct sysdev_driver s3c2410a_pm_driver = { |
135 | .add = s3c2410_pm_add, | 137 | .add = s3c2410_pm_add, |
136 | .resume = s3c2410_pm_resume, | ||
137 | }; | 138 | }; |
138 | 139 | ||
139 | static int __init s3c2410a_pm_drvinit(void) | 140 | static int __init s3c2410a_pm_drvinit(void) |
@@ -147,7 +148,6 @@ arch_initcall(s3c2410a_pm_drvinit); | |||
147 | #if defined(CONFIG_CPU_S3C2440) | 148 | #if defined(CONFIG_CPU_S3C2440) |
148 | static struct sysdev_driver s3c2440_pm_driver = { | 149 | static struct sysdev_driver s3c2440_pm_driver = { |
149 | .add = s3c2410_pm_add, | 150 | .add = s3c2410_pm_add, |
150 | .resume = s3c2410_pm_resume, | ||
151 | }; | 151 | }; |
152 | 152 | ||
153 | static int __init s3c2440_pm_drvinit(void) | 153 | static int __init s3c2440_pm_drvinit(void) |
@@ -161,7 +161,6 @@ arch_initcall(s3c2440_pm_drvinit); | |||
161 | #if defined(CONFIG_CPU_S3C2442) | 161 | #if defined(CONFIG_CPU_S3C2442) |
162 | static struct sysdev_driver s3c2442_pm_driver = { | 162 | static struct sysdev_driver s3c2442_pm_driver = { |
163 | .add = s3c2410_pm_add, | 163 | .add = s3c2410_pm_add, |
164 | .resume = s3c2410_pm_resume, | ||
165 | }; | 164 | }; |
166 | 165 | ||
167 | static int __init s3c2442_pm_drvinit(void) | 166 | static int __init s3c2442_pm_drvinit(void) |
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c index adc90a3c5890..f1d3bd8f6f17 100644 --- a/arch/arm/mach-s3c2410/s3c2410.c +++ b/arch/arm/mach-s3c2410/s3c2410.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/serial_core.h> | 23 | #include <linux/serial_core.h> |
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
@@ -40,6 +41,7 @@ | |||
40 | #include <plat/devs.h> | 41 | #include <plat/devs.h> |
41 | #include <plat/clock.h> | 42 | #include <plat/clock.h> |
42 | #include <plat/pll.h> | 43 | #include <plat/pll.h> |
44 | #include <plat/pm.h> | ||
43 | 45 | ||
44 | #include <plat/gpio-core.h> | 46 | #include <plat/gpio-core.h> |
45 | #include <plat/gpio-cfg.h> | 47 | #include <plat/gpio-cfg.h> |
@@ -168,6 +170,9 @@ int __init s3c2410_init(void) | |||
168 | { | 170 | { |
169 | printk("S3C2410: Initialising architecture\n"); | 171 | printk("S3C2410: Initialising architecture\n"); |
170 | 172 | ||
173 | register_syscore_ops(&s3c2410_pm_syscore_ops); | ||
174 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
175 | |||
171 | return sysdev_register(&s3c2410_sysdev); | 176 | return sysdev_register(&s3c2410_sysdev); |
172 | } | 177 | } |
173 | 178 | ||
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c index f3355d2ec634..1a1aa220972b 100644 --- a/arch/arm/mach-s3c2412/irq.c +++ b/arch/arm/mach-s3c2412/irq.c | |||
@@ -202,8 +202,6 @@ static int s3c2412_irq_add(struct sys_device *sysdev) | |||
202 | 202 | ||
203 | static struct sysdev_driver s3c2412_irq_driver = { | 203 | static struct sysdev_driver s3c2412_irq_driver = { |
204 | .add = s3c2412_irq_add, | 204 | .add = s3c2412_irq_add, |
205 | .suspend = s3c24xx_irq_suspend, | ||
206 | .resume = s3c24xx_irq_resume, | ||
207 | }; | 205 | }; |
208 | 206 | ||
209 | static int s3c2412_irq_init(void) | 207 | static int s3c2412_irq_init(void) |
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c index 923e01bdf017..85dcaeb9e62f 100644 --- a/arch/arm/mach-s3c2412/mach-jive.c +++ b/arch/arm/mach-s3c2412/mach-jive.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/serial_core.h> | 21 | #include <linux/serial_core.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
@@ -486,7 +486,7 @@ static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = { | |||
486 | /* Jive power management device */ | 486 | /* Jive power management device */ |
487 | 487 | ||
488 | #ifdef CONFIG_PM | 488 | #ifdef CONFIG_PM |
489 | static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) | 489 | static int jive_pm_suspend(void) |
490 | { | 490 | { |
491 | /* Write the magic value u-boot uses to check for resume into | 491 | /* Write the magic value u-boot uses to check for resume into |
492 | * the INFORM0 register, and ensure INFORM1 is set to the | 492 | * the INFORM0 register, and ensure INFORM1 is set to the |
@@ -498,10 +498,9 @@ static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) | |||
498 | return 0; | 498 | return 0; |
499 | } | 499 | } |
500 | 500 | ||
501 | static int jive_pm_resume(struct sys_device *sd) | 501 | static void jive_pm_resume(void) |
502 | { | 502 | { |
503 | __raw_writel(0x0, S3C2412_INFORM0); | 503 | __raw_writel(0x0, S3C2412_INFORM0); |
504 | return 0; | ||
505 | } | 504 | } |
506 | 505 | ||
507 | #else | 506 | #else |
@@ -509,16 +508,11 @@ static int jive_pm_resume(struct sys_device *sd) | |||
509 | #define jive_pm_resume NULL | 508 | #define jive_pm_resume NULL |
510 | #endif | 509 | #endif |
511 | 510 | ||
512 | static struct sysdev_class jive_pm_sysclass = { | 511 | static struct syscore_ops jive_pm_syscore_ops = { |
513 | .name = "jive-pm", | ||
514 | .suspend = jive_pm_suspend, | 512 | .suspend = jive_pm_suspend, |
515 | .resume = jive_pm_resume, | 513 | .resume = jive_pm_resume, |
516 | }; | 514 | }; |
517 | 515 | ||
518 | static struct sys_device jive_pm_sysdev = { | ||
519 | .cls = &jive_pm_sysclass, | ||
520 | }; | ||
521 | |||
522 | static void __init jive_map_io(void) | 516 | static void __init jive_map_io(void) |
523 | { | 517 | { |
524 | s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); | 518 | s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); |
@@ -536,10 +530,9 @@ static void jive_power_off(void) | |||
536 | 530 | ||
537 | static void __init jive_machine_init(void) | 531 | static void __init jive_machine_init(void) |
538 | { | 532 | { |
539 | /* register system devices for managing low level suspend */ | 533 | /* register system core operations for managing low level suspend */ |
540 | 534 | ||
541 | sysdev_class_register(&jive_pm_sysclass); | 535 | register_syscore_ops(&jive_pm_syscore_ops); |
542 | sysdev_register(&jive_pm_sysdev); | ||
543 | 536 | ||
544 | /* write our sleep configurations for the IO. Pull down all unused | 537 | /* write our sleep configurations for the IO. Pull down all unused |
545 | * IO, ensure that we have turned off all peripherals we do not | 538 | * IO, ensure that we have turned off all peripherals we do not |
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index a7417c479ffe..752b13a7b3db 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
20 | #include <linux/syscore_ops.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | #include <linux/io.h> | 22 | #include <linux/io.h> |
22 | 23 | ||
@@ -86,13 +87,24 @@ static struct sleep_save s3c2412_sleep[] = { | |||
86 | SAVE_ITEM(S3C2413_GPJSLPCON), | 87 | SAVE_ITEM(S3C2413_GPJSLPCON), |
87 | }; | 88 | }; |
88 | 89 | ||
89 | static int s3c2412_pm_suspend(struct sys_device *dev, pm_message_t state) | 90 | static struct sysdev_driver s3c2412_pm_driver = { |
91 | .add = s3c2412_pm_add, | ||
92 | }; | ||
93 | |||
94 | static __init int s3c2412_pm_init(void) | ||
95 | { | ||
96 | return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); | ||
97 | } | ||
98 | |||
99 | arch_initcall(s3c2412_pm_init); | ||
100 | |||
101 | static int s3c2412_pm_suspend(void) | ||
90 | { | 102 | { |
91 | s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); | 103 | s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); |
92 | return 0; | 104 | return 0; |
93 | } | 105 | } |
94 | 106 | ||
95 | static int s3c2412_pm_resume(struct sys_device *dev) | 107 | static void s3c2412_pm_resume(void) |
96 | { | 108 | { |
97 | unsigned long tmp; | 109 | unsigned long tmp; |
98 | 110 | ||
@@ -102,18 +114,9 @@ static int s3c2412_pm_resume(struct sys_device *dev) | |||
102 | __raw_writel(tmp, S3C2412_PWRCFG); | 114 | __raw_writel(tmp, S3C2412_PWRCFG); |
103 | 115 | ||
104 | s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); | 116 | s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); |
105 | return 0; | ||
106 | } | 117 | } |
107 | 118 | ||
108 | static struct sysdev_driver s3c2412_pm_driver = { | 119 | struct syscore_ops s3c2412_pm_syscore_ops = { |
109 | .add = s3c2412_pm_add, | ||
110 | .suspend = s3c2412_pm_suspend, | 120 | .suspend = s3c2412_pm_suspend, |
111 | .resume = s3c2412_pm_resume, | 121 | .resume = s3c2412_pm_resume, |
112 | }; | 122 | }; |
113 | |||
114 | static __init int s3c2412_pm_init(void) | ||
115 | { | ||
116 | return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); | ||
117 | } | ||
118 | |||
119 | arch_initcall(s3c2412_pm_init); | ||
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c index 4c6df51ddf33..ef0958d3e5c6 100644 --- a/arch/arm/mach-s3c2412/s3c2412.c +++ b/arch/arm/mach-s3c2412/s3c2412.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/serial_core.h> | 23 | #include <linux/serial_core.h> |
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
@@ -244,5 +245,8 @@ int __init s3c2412_init(void) | |||
244 | { | 245 | { |
245 | printk("S3C2412: Initialising architecture\n"); | 246 | printk("S3C2412: Initialising architecture\n"); |
246 | 247 | ||
248 | register_syscore_ops(&s3c2412_pm_syscore_ops); | ||
249 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
250 | |||
247 | return sysdev_register(&s3c2412_sysdev); | 251 | return sysdev_register(&s3c2412_sysdev); |
248 | } | 252 | } |
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c index 77b38f2381c1..28ad20d42445 100644 --- a/arch/arm/mach-s3c2416/irq.c +++ b/arch/arm/mach-s3c2416/irq.c | |||
@@ -236,8 +236,6 @@ static int __init s3c2416_irq_add(struct sys_device *sysdev) | |||
236 | 236 | ||
237 | static struct sysdev_driver s3c2416_irq_driver = { | 237 | static struct sysdev_driver s3c2416_irq_driver = { |
238 | .add = s3c2416_irq_add, | 238 | .add = s3c2416_irq_add, |
239 | .suspend = s3c24xx_irq_suspend, | ||
240 | .resume = s3c24xx_irq_resume, | ||
241 | }; | 239 | }; |
242 | 240 | ||
243 | static int __init s3c2416_irq_init(void) | 241 | static int __init s3c2416_irq_init(void) |
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 4a04205b04d5..41db2b21e213 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/sysdev.h> | 13 | #include <linux/sysdev.h> |
14 | #include <linux/syscore_ops.h> | ||
14 | #include <linux/io.h> | 15 | #include <linux/io.h> |
15 | 16 | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
@@ -55,30 +56,26 @@ static int s3c2416_pm_add(struct sys_device *sysdev) | |||
55 | return 0; | 56 | return 0; |
56 | } | 57 | } |
57 | 58 | ||
58 | static int s3c2416_pm_suspend(struct sys_device *dev, pm_message_t state) | 59 | static struct sysdev_driver s3c2416_pm_driver = { |
60 | .add = s3c2416_pm_add, | ||
61 | }; | ||
62 | |||
63 | static __init int s3c2416_pm_init(void) | ||
59 | { | 64 | { |
60 | return 0; | 65 | return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); |
61 | } | 66 | } |
62 | 67 | ||
63 | static int s3c2416_pm_resume(struct sys_device *dev) | 68 | arch_initcall(s3c2416_pm_init); |
69 | |||
70 | |||
71 | static void s3c2416_pm_resume(void) | ||
64 | { | 72 | { |
65 | /* unset the return-from-sleep amd inform flags */ | 73 | /* unset the return-from-sleep amd inform flags */ |
66 | __raw_writel(0x0, S3C2443_PWRMODE); | 74 | __raw_writel(0x0, S3C2443_PWRMODE); |
67 | __raw_writel(0x0, S3C2412_INFORM0); | 75 | __raw_writel(0x0, S3C2412_INFORM0); |
68 | __raw_writel(0x0, S3C2412_INFORM1); | 76 | __raw_writel(0x0, S3C2412_INFORM1); |
69 | |||
70 | return 0; | ||
71 | } | 77 | } |
72 | 78 | ||
73 | static struct sysdev_driver s3c2416_pm_driver = { | 79 | struct syscore_ops s3c2416_pm_syscore_ops = { |
74 | .add = s3c2416_pm_add, | ||
75 | .suspend = s3c2416_pm_suspend, | ||
76 | .resume = s3c2416_pm_resume, | 80 | .resume = s3c2416_pm_resume, |
77 | }; | 81 | }; |
78 | |||
79 | static __init int s3c2416_pm_init(void) | ||
80 | { | ||
81 | return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); | ||
82 | } | ||
83 | |||
84 | arch_initcall(s3c2416_pm_init); | ||
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c index ba7fd8737434..494ce913dc95 100644 --- a/arch/arm/mach-s3c2416/s3c2416.c +++ b/arch/arm/mach-s3c2416/s3c2416.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/serial_core.h> | 33 | #include <linux/serial_core.h> |
34 | #include <linux/sysdev.h> | 34 | #include <linux/sysdev.h> |
35 | #include <linux/syscore_ops.h> | ||
35 | #include <linux/clk.h> | 36 | #include <linux/clk.h> |
36 | #include <linux/io.h> | 37 | #include <linux/io.h> |
37 | 38 | ||
@@ -54,6 +55,7 @@ | |||
54 | #include <plat/devs.h> | 55 | #include <plat/devs.h> |
55 | #include <plat/cpu.h> | 56 | #include <plat/cpu.h> |
56 | #include <plat/sdhci.h> | 57 | #include <plat/sdhci.h> |
58 | #include <plat/pm.h> | ||
57 | 59 | ||
58 | #include <plat/iic-core.h> | 60 | #include <plat/iic-core.h> |
59 | #include <plat/fb-core.h> | 61 | #include <plat/fb-core.h> |
@@ -95,6 +97,9 @@ int __init s3c2416_init(void) | |||
95 | 97 | ||
96 | s3c_fb_setname("s3c2443-fb"); | 98 | s3c_fb_setname("s3c2443-fb"); |
97 | 99 | ||
100 | register_syscore_ops(&s3c2416_pm_syscore_ops); | ||
101 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
102 | |||
98 | return sysdev_register(&s3c2416_sysdev); | 103 | return sysdev_register(&s3c2416_sysdev); |
99 | } | 104 | } |
100 | 105 | ||
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c index 14dc67897757..d88536393310 100644 --- a/arch/arm/mach-s3c2440/mach-osiris.c +++ b/arch/arm/mach-s3c2440/mach-osiris.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/serial_core.h> | 21 | #include <linux/serial_core.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
@@ -284,7 +284,7 @@ static struct platform_device osiris_pcmcia = { | |||
284 | #ifdef CONFIG_PM | 284 | #ifdef CONFIG_PM |
285 | static unsigned char pm_osiris_ctrl0; | 285 | static unsigned char pm_osiris_ctrl0; |
286 | 286 | ||
287 | static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) | 287 | static int osiris_pm_suspend(void) |
288 | { | 288 | { |
289 | unsigned int tmp; | 289 | unsigned int tmp; |
290 | 290 | ||
@@ -304,7 +304,7 @@ static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | static int osiris_pm_resume(struct sys_device *sd) | 307 | static void osiris_pm_resume(void) |
308 | { | 308 | { |
309 | if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) | 309 | if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) |
310 | __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); | 310 | __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); |
@@ -312,8 +312,6 @@ static int osiris_pm_resume(struct sys_device *sd) | |||
312 | __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); | 312 | __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); |
313 | 313 | ||
314 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); | 314 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); |
315 | |||
316 | return 0; | ||
317 | } | 315 | } |
318 | 316 | ||
319 | #else | 317 | #else |
@@ -321,16 +319,11 @@ static int osiris_pm_resume(struct sys_device *sd) | |||
321 | #define osiris_pm_resume NULL | 319 | #define osiris_pm_resume NULL |
322 | #endif | 320 | #endif |
323 | 321 | ||
324 | static struct sysdev_class osiris_pm_sysclass = { | 322 | static struct syscore_ops osiris_pm_syscore_ops = { |
325 | .name = "mach-osiris", | ||
326 | .suspend = osiris_pm_suspend, | 323 | .suspend = osiris_pm_suspend, |
327 | .resume = osiris_pm_resume, | 324 | .resume = osiris_pm_resume, |
328 | }; | 325 | }; |
329 | 326 | ||
330 | static struct sys_device osiris_pm_sysdev = { | ||
331 | .cls = &osiris_pm_sysclass, | ||
332 | }; | ||
333 | |||
334 | /* Link for DVS driver to TPS65011 */ | 327 | /* Link for DVS driver to TPS65011 */ |
335 | 328 | ||
336 | static void osiris_tps_release(struct device *dev) | 329 | static void osiris_tps_release(struct device *dev) |
@@ -439,8 +432,7 @@ static void __init osiris_map_io(void) | |||
439 | 432 | ||
440 | static void __init osiris_init(void) | 433 | static void __init osiris_init(void) |
441 | { | 434 | { |
442 | sysdev_class_register(&osiris_pm_sysclass); | 435 | register_syscore_ops(&osiris_pm_syscore_ops); |
443 | sysdev_register(&osiris_pm_sysdev); | ||
444 | 436 | ||
445 | s3c_i2c0_set_platdata(NULL); | 437 | s3c_i2c0_set_platdata(NULL); |
446 | s3c_nand_set_platdata(&osiris_nand_info); | 438 | s3c_nand_set_platdata(&osiris_nand_info); |
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c index f7663f731ea0..ce99ff72838d 100644 --- a/arch/arm/mach-s3c2440/s3c2440.c +++ b/arch/arm/mach-s3c2440/s3c2440.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/serial_core.h> | 20 | #include <linux/serial_core.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
23 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
@@ -33,6 +34,7 @@ | |||
33 | #include <plat/devs.h> | 34 | #include <plat/devs.h> |
34 | #include <plat/cpu.h> | 35 | #include <plat/cpu.h> |
35 | #include <plat/s3c244x.h> | 36 | #include <plat/s3c244x.h> |
37 | #include <plat/pm.h> | ||
36 | 38 | ||
37 | #include <plat/gpio-core.h> | 39 | #include <plat/gpio-core.h> |
38 | #include <plat/gpio-cfg.h> | 40 | #include <plat/gpio-cfg.h> |
@@ -51,6 +53,12 @@ int __init s3c2440_init(void) | |||
51 | s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; | 53 | s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; |
52 | s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; | 54 | s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; |
53 | 55 | ||
56 | /* register suspend/resume handlers */ | ||
57 | |||
58 | register_syscore_ops(&s3c2410_pm_syscore_ops); | ||
59 | register_syscore_ops(&s3c244x_pm_syscore_ops); | ||
60 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
61 | |||
54 | /* register our system device for everything else */ | 62 | /* register our system device for everything else */ |
55 | 63 | ||
56 | return sysdev_register(&s3c2440_sysdev); | 64 | return sysdev_register(&s3c2440_sysdev); |
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c index ecf813546554..6224bad4d604 100644 --- a/arch/arm/mach-s3c2440/s3c2442.c +++ b/arch/arm/mach-s3c2440/s3c2442.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/sysdev.h> | 31 | #include <linux/sysdev.h> |
32 | #include <linux/syscore_ops.h> | ||
32 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
33 | #include <linux/ioport.h> | 34 | #include <linux/ioport.h> |
34 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
@@ -45,6 +46,7 @@ | |||
45 | #include <plat/clock.h> | 46 | #include <plat/clock.h> |
46 | #include <plat/cpu.h> | 47 | #include <plat/cpu.h> |
47 | #include <plat/s3c244x.h> | 48 | #include <plat/s3c244x.h> |
49 | #include <plat/pm.h> | ||
48 | 50 | ||
49 | #include <plat/gpio-core.h> | 51 | #include <plat/gpio-core.h> |
50 | #include <plat/gpio-cfg.h> | 52 | #include <plat/gpio-cfg.h> |
@@ -167,6 +169,10 @@ int __init s3c2442_init(void) | |||
167 | { | 169 | { |
168 | printk("S3C2442: Initialising architecture\n"); | 170 | printk("S3C2442: Initialising architecture\n"); |
169 | 171 | ||
172 | register_syscore_ops(&s3c2410_pm_syscore_ops); | ||
173 | register_syscore_ops(&s3c244x_pm_syscore_ops); | ||
174 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
175 | |||
170 | return sysdev_register(&s3c2442_sysdev); | 176 | return sysdev_register(&s3c2442_sysdev); |
171 | } | 177 | } |
172 | 178 | ||
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c index de07c2feaa32..c63e8f26d901 100644 --- a/arch/arm/mach-s3c2440/s3c244x-irq.c +++ b/arch/arm/mach-s3c2440/s3c244x-irq.c | |||
@@ -116,8 +116,6 @@ static int s3c244x_irq_add(struct sys_device *sysdev) | |||
116 | 116 | ||
117 | static struct sysdev_driver s3c2440_irq_driver = { | 117 | static struct sysdev_driver s3c2440_irq_driver = { |
118 | .add = s3c244x_irq_add, | 118 | .add = s3c244x_irq_add, |
119 | .suspend = s3c24xx_irq_suspend, | ||
120 | .resume = s3c24xx_irq_resume, | ||
121 | }; | 119 | }; |
122 | 120 | ||
123 | static int s3c2440_irq_init(void) | 121 | static int s3c2440_irq_init(void) |
@@ -129,8 +127,6 @@ arch_initcall(s3c2440_irq_init); | |||
129 | 127 | ||
130 | static struct sysdev_driver s3c2442_irq_driver = { | 128 | static struct sysdev_driver s3c2442_irq_driver = { |
131 | .add = s3c244x_irq_add, | 129 | .add = s3c244x_irq_add, |
132 | .suspend = s3c24xx_irq_suspend, | ||
133 | .resume = s3c24xx_irq_resume, | ||
134 | }; | 130 | }; |
135 | 131 | ||
136 | 132 | ||
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c index 90c1707b9c95..7e8a23d2098a 100644 --- a/arch/arm/mach-s3c2440/s3c244x.c +++ b/arch/arm/mach-s3c2440/s3c244x.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/serial_core.h> | 19 | #include <linux/serial_core.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
23 | #include <linux/io.h> | 24 | #include <linux/io.h> |
24 | 25 | ||
@@ -134,45 +135,14 @@ void __init s3c244x_init_clocks(int xtal) | |||
134 | s3c2410_baseclk_add(); | 135 | s3c2410_baseclk_add(); |
135 | } | 136 | } |
136 | 137 | ||
137 | #ifdef CONFIG_PM | ||
138 | |||
139 | static struct sleep_save s3c244x_sleep[] = { | ||
140 | SAVE_ITEM(S3C2440_DSC0), | ||
141 | SAVE_ITEM(S3C2440_DSC1), | ||
142 | SAVE_ITEM(S3C2440_GPJDAT), | ||
143 | SAVE_ITEM(S3C2440_GPJCON), | ||
144 | SAVE_ITEM(S3C2440_GPJUP) | ||
145 | }; | ||
146 | |||
147 | static int s3c244x_suspend(struct sys_device *dev, pm_message_t state) | ||
148 | { | ||
149 | s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int s3c244x_resume(struct sys_device *dev) | ||
154 | { | ||
155 | s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | #else | ||
160 | #define s3c244x_suspend NULL | ||
161 | #define s3c244x_resume NULL | ||
162 | #endif | ||
163 | |||
164 | /* Since the S3C2442 and S3C2440 share items, put both sysclasses here */ | 138 | /* Since the S3C2442 and S3C2440 share items, put both sysclasses here */ |
165 | 139 | ||
166 | struct sysdev_class s3c2440_sysclass = { | 140 | struct sysdev_class s3c2440_sysclass = { |
167 | .name = "s3c2440-core", | 141 | .name = "s3c2440-core", |
168 | .suspend = s3c244x_suspend, | ||
169 | .resume = s3c244x_resume | ||
170 | }; | 142 | }; |
171 | 143 | ||
172 | struct sysdev_class s3c2442_sysclass = { | 144 | struct sysdev_class s3c2442_sysclass = { |
173 | .name = "s3c2442-core", | 145 | .name = "s3c2442-core", |
174 | .suspend = s3c244x_suspend, | ||
175 | .resume = s3c244x_resume | ||
176 | }; | 146 | }; |
177 | 147 | ||
178 | /* need to register class before we actually register the device, and | 148 | /* need to register class before we actually register the device, and |
@@ -194,3 +164,33 @@ static int __init s3c2442_core_init(void) | |||
194 | } | 164 | } |
195 | 165 | ||
196 | core_initcall(s3c2442_core_init); | 166 | core_initcall(s3c2442_core_init); |
167 | |||
168 | |||
169 | #ifdef CONFIG_PM | ||
170 | static struct sleep_save s3c244x_sleep[] = { | ||
171 | SAVE_ITEM(S3C2440_DSC0), | ||
172 | SAVE_ITEM(S3C2440_DSC1), | ||
173 | SAVE_ITEM(S3C2440_GPJDAT), | ||
174 | SAVE_ITEM(S3C2440_GPJCON), | ||
175 | SAVE_ITEM(S3C2440_GPJUP) | ||
176 | }; | ||
177 | |||
178 | static int s3c244x_suspend(void) | ||
179 | { | ||
180 | s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static void s3c244x_resume(void) | ||
185 | { | ||
186 | s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
187 | } | ||
188 | #else | ||
189 | #define s3c244x_suspend NULL | ||
190 | #define s3c244x_resume NULL | ||
191 | #endif | ||
192 | |||
193 | struct syscore_ops s3c244x_pm_syscore_ops = { | ||
194 | .suspend = s3c244x_suspend, | ||
195 | .resume = s3c244x_resume, | ||
196 | }; | ||
diff --git a/arch/arm/mach-s3c64xx/irq-pm.c b/arch/arm/mach-s3c64xx/irq-pm.c index da1bec64b9da..8bec61e242c7 100644 --- a/arch/arm/mach-s3c64xx/irq-pm.c +++ b/arch/arm/mach-s3c64xx/irq-pm.c | |||
@@ -13,7 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/sysdev.h> | 16 | #include <linux/syscore_ops.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/serial_core.h> | 18 | #include <linux/serial_core.h> |
19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
@@ -54,7 +54,7 @@ static struct irq_grp_save { | |||
54 | 54 | ||
55 | static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; | 55 | static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; |
56 | 56 | ||
57 | static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) | 57 | static int s3c64xx_irq_pm_suspend(void) |
58 | { | 58 | { |
59 | struct irq_grp_save *grp = eint_grp_save; | 59 | struct irq_grp_save *grp = eint_grp_save; |
60 | int i; | 60 | int i; |
@@ -75,7 +75,7 @@ static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | static int s3c64xx_irq_pm_resume(struct sys_device *dev) | 78 | static void s3c64xx_irq_pm_resume(void) |
79 | { | 79 | { |
80 | struct irq_grp_save *grp = eint_grp_save; | 80 | struct irq_grp_save *grp = eint_grp_save; |
81 | int i; | 81 | int i; |
@@ -94,18 +94,18 @@ static int s3c64xx_irq_pm_resume(struct sys_device *dev) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | S3C_PMDBG("%s: IRQ configuration restored\n", __func__); | 96 | S3C_PMDBG("%s: IRQ configuration restored\n", __func__); |
97 | return 0; | ||
98 | } | 97 | } |
99 | 98 | ||
100 | static struct sysdev_driver s3c64xx_irq_driver = { | 99 | struct syscore_ops s3c64xx_irq_syscore_ops = { |
101 | .suspend = s3c64xx_irq_pm_suspend, | 100 | .suspend = s3c64xx_irq_pm_suspend, |
102 | .resume = s3c64xx_irq_pm_resume, | 101 | .resume = s3c64xx_irq_pm_resume, |
103 | }; | 102 | }; |
104 | 103 | ||
105 | static int __init s3c64xx_irq_pm_init(void) | 104 | static __init int s3c64xx_syscore_init(void) |
106 | { | 105 | { |
107 | return sysdev_driver_register(&s3c64xx_sysclass, &s3c64xx_irq_driver); | 106 | register_syscore_ops(&s3c64xx_irq_syscore_ops); |
108 | } | ||
109 | 107 | ||
110 | arch_initcall(s3c64xx_irq_pm_init); | 108 | return 0; |
109 | } | ||
111 | 110 | ||
111 | core_initcall(s3c64xx_syscore_init); | ||
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c index 549d7924fd4c..24febae3d4c0 100644 --- a/arch/arm/mach-s5pv210/pm.c +++ b/arch/arm/mach-s5pv210/pm.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/suspend.h> | 18 | #include <linux/suspend.h> |
19 | #include <linux/syscore_ops.h> | ||
19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
20 | 21 | ||
21 | #include <plat/cpu.h> | 22 | #include <plat/cpu.h> |
@@ -140,7 +141,17 @@ static int s5pv210_pm_add(struct sys_device *sysdev) | |||
140 | return 0; | 141 | return 0; |
141 | } | 142 | } |
142 | 143 | ||
143 | static int s5pv210_pm_resume(struct sys_device *dev) | 144 | static struct sysdev_driver s5pv210_pm_driver = { |
145 | .add = s5pv210_pm_add, | ||
146 | }; | ||
147 | |||
148 | static __init int s5pv210_pm_drvinit(void) | ||
149 | { | ||
150 | return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); | ||
151 | } | ||
152 | arch_initcall(s5pv210_pm_drvinit); | ||
153 | |||
154 | static void s5pv210_pm_resume(void) | ||
144 | { | 155 | { |
145 | u32 tmp; | 156 | u32 tmp; |
146 | 157 | ||
@@ -150,17 +161,15 @@ static int s5pv210_pm_resume(struct sys_device *dev) | |||
150 | __raw_writel(tmp , S5P_OTHERS); | 161 | __raw_writel(tmp , S5P_OTHERS); |
151 | 162 | ||
152 | s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); | 163 | s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); |
153 | |||
154 | return 0; | ||
155 | } | 164 | } |
156 | 165 | ||
157 | static struct sysdev_driver s5pv210_pm_driver = { | 166 | static struct syscore_ops s5pv210_pm_syscore_ops = { |
158 | .add = s5pv210_pm_add, | ||
159 | .resume = s5pv210_pm_resume, | 167 | .resume = s5pv210_pm_resume, |
160 | }; | 168 | }; |
161 | 169 | ||
162 | static __init int s5pv210_pm_drvinit(void) | 170 | static __init int s5pv210_pm_syscore_init(void) |
163 | { | 171 | { |
164 | return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); | 172 | register_syscore_ops(&s5pv210_pm_syscore_ops); |
173 | return 0; | ||
165 | } | 174 | } |
166 | arch_initcall(s5pv210_pm_drvinit); | 175 | arch_initcall(s5pv210_pm_syscore_init); |
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c index 423ddb3d65e9..dfbf824a69fa 100644 --- a/arch/arm/mach-sa1100/irq.c +++ b/arch/arm/mach-sa1100/irq.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
17 | #include <linux/sysdev.h> | 17 | #include <linux/syscore_ops.h> |
18 | 18 | ||
19 | #include <mach/hardware.h> | 19 | #include <mach/hardware.h> |
20 | #include <asm/mach/irq.h> | 20 | #include <asm/mach/irq.h> |
@@ -234,7 +234,7 @@ static struct sa1100irq_state { | |||
234 | unsigned int iccr; | 234 | unsigned int iccr; |
235 | } sa1100irq_state; | 235 | } sa1100irq_state; |
236 | 236 | ||
237 | static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) | 237 | static int sa1100irq_suspend(void) |
238 | { | 238 | { |
239 | struct sa1100irq_state *st = &sa1100irq_state; | 239 | struct sa1100irq_state *st = &sa1100irq_state; |
240 | 240 | ||
@@ -264,7 +264,7 @@ static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) | |||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | static int sa1100irq_resume(struct sys_device *dev) | 267 | static void sa1100irq_resume(void) |
268 | { | 268 | { |
269 | struct sa1100irq_state *st = &sa1100irq_state; | 269 | struct sa1100irq_state *st = &sa1100irq_state; |
270 | 270 | ||
@@ -277,24 +277,17 @@ static int sa1100irq_resume(struct sys_device *dev) | |||
277 | 277 | ||
278 | ICMR = st->icmr; | 278 | ICMR = st->icmr; |
279 | } | 279 | } |
280 | return 0; | ||
281 | } | 280 | } |
282 | 281 | ||
283 | static struct sysdev_class sa1100irq_sysclass = { | 282 | static struct syscore_ops sa1100irq_syscore_ops = { |
284 | .name = "sa11x0-irq", | ||
285 | .suspend = sa1100irq_suspend, | 283 | .suspend = sa1100irq_suspend, |
286 | .resume = sa1100irq_resume, | 284 | .resume = sa1100irq_resume, |
287 | }; | 285 | }; |
288 | 286 | ||
289 | static struct sys_device sa1100irq_device = { | ||
290 | .id = 0, | ||
291 | .cls = &sa1100irq_sysclass, | ||
292 | }; | ||
293 | |||
294 | static int __init sa1100irq_init_devicefs(void) | 287 | static int __init sa1100irq_init_devicefs(void) |
295 | { | 288 | { |
296 | sysdev_class_register(&sa1100irq_sysclass); | 289 | register_syscore_ops(&sa1100irq_syscore_ops); |
297 | return sysdev_register(&sa1100irq_device); | 290 | return 0; |
298 | } | 291 | } |
299 | 292 | ||
300 | device_initcall(sa1100irq_init_devicefs); | 293 | device_initcall(sa1100irq_init_devicefs); |
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 94912d3944d3..2d1b67a59e4a 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c | |||
@@ -18,152 +18,41 @@ | |||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/sh_clk.h> | 19 | #include <linux/sh_clk.h> |
20 | #include <linux/bitmap.h> | 20 | #include <linux/bitmap.h> |
21 | #include <linux/slab.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_PM_RUNTIME | 23 | #ifdef CONFIG_PM_RUNTIME |
23 | #define BIT_ONCE 0 | ||
24 | #define BIT_ACTIVE 1 | ||
25 | #define BIT_CLK_ENABLED 2 | ||
26 | 24 | ||
27 | struct pm_runtime_data { | 25 | static int default_platform_runtime_idle(struct device *dev) |
28 | unsigned long flags; | ||
29 | struct clk *clk; | ||
30 | }; | ||
31 | |||
32 | static void __devres_release(struct device *dev, void *res) | ||
33 | { | ||
34 | struct pm_runtime_data *prd = res; | ||
35 | |||
36 | dev_dbg(dev, "__devres_release()\n"); | ||
37 | |||
38 | if (test_bit(BIT_CLK_ENABLED, &prd->flags)) | ||
39 | clk_disable(prd->clk); | ||
40 | |||
41 | if (test_bit(BIT_ACTIVE, &prd->flags)) | ||
42 | clk_put(prd->clk); | ||
43 | } | ||
44 | |||
45 | static struct pm_runtime_data *__to_prd(struct device *dev) | ||
46 | { | ||
47 | return devres_find(dev, __devres_release, NULL, NULL); | ||
48 | } | ||
49 | |||
50 | static void platform_pm_runtime_init(struct device *dev, | ||
51 | struct pm_runtime_data *prd) | ||
52 | { | ||
53 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { | ||
54 | prd->clk = clk_get(dev, NULL); | ||
55 | if (!IS_ERR(prd->clk)) { | ||
56 | set_bit(BIT_ACTIVE, &prd->flags); | ||
57 | dev_info(dev, "clocks managed by runtime pm\n"); | ||
58 | } | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static void platform_pm_runtime_bug(struct device *dev, | ||
63 | struct pm_runtime_data *prd) | ||
64 | { | ||
65 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) | ||
66 | dev_err(dev, "runtime pm suspend before resume\n"); | ||
67 | } | ||
68 | |||
69 | int platform_pm_runtime_suspend(struct device *dev) | ||
70 | { | ||
71 | struct pm_runtime_data *prd = __to_prd(dev); | ||
72 | |||
73 | dev_dbg(dev, "platform_pm_runtime_suspend()\n"); | ||
74 | |||
75 | platform_pm_runtime_bug(dev, prd); | ||
76 | |||
77 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
78 | clk_disable(prd->clk); | ||
79 | clear_bit(BIT_CLK_ENABLED, &prd->flags); | ||
80 | } | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int platform_pm_runtime_resume(struct device *dev) | ||
86 | { | ||
87 | struct pm_runtime_data *prd = __to_prd(dev); | ||
88 | |||
89 | dev_dbg(dev, "platform_pm_runtime_resume()\n"); | ||
90 | |||
91 | platform_pm_runtime_init(dev, prd); | ||
92 | |||
93 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
94 | clk_enable(prd->clk); | ||
95 | set_bit(BIT_CLK_ENABLED, &prd->flags); | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | int platform_pm_runtime_idle(struct device *dev) | ||
102 | { | 26 | { |
103 | /* suspend synchronously to disable clocks immediately */ | 27 | /* suspend synchronously to disable clocks immediately */ |
104 | return pm_runtime_suspend(dev); | 28 | return pm_runtime_suspend(dev); |
105 | } | 29 | } |
106 | 30 | ||
107 | static int platform_bus_notify(struct notifier_block *nb, | 31 | static struct dev_power_domain default_power_domain = { |
108 | unsigned long action, void *data) | 32 | .ops = { |
109 | { | 33 | .runtime_suspend = pm_runtime_clk_suspend, |
110 | struct device *dev = data; | 34 | .runtime_resume = pm_runtime_clk_resume, |
111 | struct pm_runtime_data *prd; | 35 | .runtime_idle = default_platform_runtime_idle, |
112 | 36 | USE_PLATFORM_PM_SLEEP_OPS | |
113 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | 37 | }, |
114 | 38 | }; | |
115 | if (action == BUS_NOTIFY_BIND_DRIVER) { | ||
116 | prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); | ||
117 | if (prd) | ||
118 | devres_add(dev, prd); | ||
119 | else | ||
120 | dev_err(dev, "unable to alloc memory for runtime pm\n"); | ||
121 | } | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | #else /* CONFIG_PM_RUNTIME */ | ||
127 | |||
128 | static int platform_bus_notify(struct notifier_block *nb, | ||
129 | unsigned long action, void *data) | ||
130 | { | ||
131 | struct device *dev = data; | ||
132 | struct clk *clk; | ||
133 | 39 | ||
134 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | 40 | #define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) |
135 | 41 | ||
136 | switch (action) { | 42 | #else |
137 | case BUS_NOTIFY_BIND_DRIVER: | ||
138 | clk = clk_get(dev, NULL); | ||
139 | if (!IS_ERR(clk)) { | ||
140 | clk_enable(clk); | ||
141 | clk_put(clk); | ||
142 | dev_info(dev, "runtime pm disabled, clock forced on\n"); | ||
143 | } | ||
144 | break; | ||
145 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
146 | clk = clk_get(dev, NULL); | ||
147 | if (!IS_ERR(clk)) { | ||
148 | clk_disable(clk); | ||
149 | clk_put(clk); | ||
150 | dev_info(dev, "runtime pm disabled, clock forced off\n"); | ||
151 | } | ||
152 | break; | ||
153 | } | ||
154 | 43 | ||
155 | return 0; | 44 | #define DEFAULT_PWR_DOMAIN_PTR NULL |
156 | } | ||
157 | 45 | ||
158 | #endif /* CONFIG_PM_RUNTIME */ | 46 | #endif /* CONFIG_PM_RUNTIME */ |
159 | 47 | ||
160 | static struct notifier_block platform_bus_notifier = { | 48 | static struct pm_clk_notifier_block platform_bus_notifier = { |
161 | .notifier_call = platform_bus_notify | 49 | .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, |
50 | .con_ids = { NULL, }, | ||
162 | }; | 51 | }; |
163 | 52 | ||
164 | static int __init sh_pm_runtime_init(void) | 53 | static int __init sh_pm_runtime_init(void) |
165 | { | 54 | { |
166 | bus_register_notifier(&platform_bus_type, &platform_bus_notifier); | 55 | pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); |
167 | return 0; | 56 | return 0; |
168 | } | 57 | } |
169 | core_initcall(sh_pm_runtime_init); | 58 | core_initcall(sh_pm_runtime_init); |
diff --git a/arch/arm/mach-tegra/include/mach/barriers.h b/arch/arm/mach-tegra/include/mach/barriers.h index cc115174899b..425b42e91ef6 100644 --- a/arch/arm/mach-tegra/include/mach/barriers.h +++ b/arch/arm/mach-tegra/include/mach/barriers.h | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #include <asm/outercache.h> | 24 | #include <asm/outercache.h> |
25 | 25 | ||
26 | #define rmb() dmb() | 26 | #define rmb() dsb() |
27 | #define wmb() do { dsb(); outer_sync(); } while (0) | 27 | #define wmb() do { dsb(); outer_sync(); } while (0) |
28 | #define mb() wmb() | 28 | #define mb() wmb() |
29 | 29 | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e5f6fc428348..e591513bb53e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
392 | * Convert start_pfn/end_pfn to a struct page pointer. | 392 | * Convert start_pfn/end_pfn to a struct page pointer. |
393 | */ | 393 | */ |
394 | start_pg = pfn_to_page(start_pfn - 1) + 1; | 394 | start_pg = pfn_to_page(start_pfn - 1) + 1; |
395 | end_pg = pfn_to_page(end_pfn); | 395 | end_pg = pfn_to_page(end_pfn - 1) + 1; |
396 | 396 | ||
397 | /* | 397 | /* |
398 | * Convert to physical addresses, and | 398 | * Convert to physical addresses, and |
@@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
426 | 426 | ||
427 | bank_start = bank_pfn_start(bank); | 427 | bank_start = bank_pfn_start(bank); |
428 | 428 | ||
429 | #ifdef CONFIG_SPARSEMEM | ||
430 | /* | ||
431 | * Take care not to free memmap entries that don't exist | ||
432 | * due to SPARSEMEM sections which aren't present. | ||
433 | */ | ||
434 | bank_start = min(bank_start, | ||
435 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | ||
436 | #endif | ||
429 | /* | 437 | /* |
430 | * If we had a previous bank, and there is a space | 438 | * If we had a previous bank, and there is a space |
431 | * between the current bank and the previous, free it. | 439 | * between the current bank and the previous, free it. |
@@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
440 | */ | 448 | */ |
441 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); | 449 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); |
442 | } | 450 | } |
451 | |||
452 | #ifdef CONFIG_SPARSEMEM | ||
453 | if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) | ||
454 | free_memmap(prev_bank_end, | ||
455 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | ||
456 | #endif | ||
443 | } | 457 | } |
444 | 458 | ||
445 | static void __init free_highpages(void) | 459 | static void __init free_highpages(void) |
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index d2adcdda23cf..bd9e32187eab 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -1372,9 +1372,7 @@ static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { | |||
1372 | .resume_noirq = omap_mpuio_resume_noirq, | 1372 | .resume_noirq = omap_mpuio_resume_noirq, |
1373 | }; | 1373 | }; |
1374 | 1374 | ||
1375 | /* use platform_driver for this, now that there's no longer any | 1375 | /* use platform_driver for this. */ |
1376 | * point to sys_device (other than not disturbing old code). | ||
1377 | */ | ||
1378 | static struct platform_driver omap_mpuio_driver = { | 1376 | static struct platform_driver omap_mpuio_driver = { |
1379 | .driver = { | 1377 | .driver = { |
1380 | .name = "mpuio", | 1378 | .name = "mpuio", |
@@ -1745,7 +1743,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev) | |||
1745 | } | 1743 | } |
1746 | 1744 | ||
1747 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | 1745 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) |
1748 | static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) | 1746 | static int omap_gpio_suspend(void) |
1749 | { | 1747 | { |
1750 | int i; | 1748 | int i; |
1751 | 1749 | ||
@@ -1795,12 +1793,12 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) | |||
1795 | return 0; | 1793 | return 0; |
1796 | } | 1794 | } |
1797 | 1795 | ||
1798 | static int omap_gpio_resume(struct sys_device *dev) | 1796 | static void omap_gpio_resume(void) |
1799 | { | 1797 | { |
1800 | int i; | 1798 | int i; |
1801 | 1799 | ||
1802 | if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) | 1800 | if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) |
1803 | return 0; | 1801 | return; |
1804 | 1802 | ||
1805 | for (i = 0; i < gpio_bank_count; i++) { | 1803 | for (i = 0; i < gpio_bank_count; i++) { |
1806 | struct gpio_bank *bank = &gpio_bank[i]; | 1804 | struct gpio_bank *bank = &gpio_bank[i]; |
@@ -1836,21 +1834,13 @@ static int omap_gpio_resume(struct sys_device *dev) | |||
1836 | __raw_writel(bank->saved_wakeup, wake_set); | 1834 | __raw_writel(bank->saved_wakeup, wake_set); |
1837 | spin_unlock_irqrestore(&bank->lock, flags); | 1835 | spin_unlock_irqrestore(&bank->lock, flags); |
1838 | } | 1836 | } |
1839 | |||
1840 | return 0; | ||
1841 | } | 1837 | } |
1842 | 1838 | ||
1843 | static struct sysdev_class omap_gpio_sysclass = { | 1839 | static struct syscore_ops omap_gpio_syscore_ops = { |
1844 | .name = "gpio", | ||
1845 | .suspend = omap_gpio_suspend, | 1840 | .suspend = omap_gpio_suspend, |
1846 | .resume = omap_gpio_resume, | 1841 | .resume = omap_gpio_resume, |
1847 | }; | 1842 | }; |
1848 | 1843 | ||
1849 | static struct sys_device omap_gpio_device = { | ||
1850 | .id = 0, | ||
1851 | .cls = &omap_gpio_sysclass, | ||
1852 | }; | ||
1853 | |||
1854 | #endif | 1844 | #endif |
1855 | 1845 | ||
1856 | #ifdef CONFIG_ARCH_OMAP2PLUS | 1846 | #ifdef CONFIG_ARCH_OMAP2PLUS |
@@ -2108,21 +2098,14 @@ postcore_initcall(omap_gpio_drv_reg); | |||
2108 | 2098 | ||
2109 | static int __init omap_gpio_sysinit(void) | 2099 | static int __init omap_gpio_sysinit(void) |
2110 | { | 2100 | { |
2111 | int ret = 0; | ||
2112 | |||
2113 | mpuio_init(); | 2101 | mpuio_init(); |
2114 | 2102 | ||
2115 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | 2103 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) |
2116 | if (cpu_is_omap16xx() || cpu_class_is_omap2()) { | 2104 | if (cpu_is_omap16xx() || cpu_class_is_omap2()) |
2117 | if (ret == 0) { | 2105 | register_syscore_ops(&omap_gpio_syscore_ops); |
2118 | ret = sysdev_class_register(&omap_gpio_sysclass); | ||
2119 | if (ret == 0) | ||
2120 | ret = sysdev_register(&omap_gpio_device); | ||
2121 | } | ||
2122 | } | ||
2123 | #endif | 2106 | #endif |
2124 | 2107 | ||
2125 | return ret; | 2108 | return 0; |
2126 | } | 2109 | } |
2127 | 2110 | ||
2128 | arch_initcall(omap_gpio_sysinit); | 2111 | arch_initcall(omap_gpio_sysinit); |
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index 8a51fd58f656..34fc31ee9081 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c | |||
@@ -793,6 +793,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
793 | clk_enable(obj->clk); | 793 | clk_enable(obj->clk); |
794 | errs = iommu_report_fault(obj, &da); | 794 | errs = iommu_report_fault(obj, &da); |
795 | clk_disable(obj->clk); | 795 | clk_disable(obj->clk); |
796 | if (errs == 0) | ||
797 | return IRQ_HANDLED; | ||
796 | 798 | ||
797 | /* Fault callback or TLB/PTE Dynamic loading */ | 799 | /* Fault callback or TLB/PTE Dynamic loading */ |
798 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) | 800 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) |
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index 9bbda9acb73b..a37b8eb65b76 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c | |||
@@ -536,6 +536,28 @@ int omap_early_device_register(struct omap_device *od) | |||
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | 538 | ||
539 | static int _od_runtime_suspend(struct device *dev) | ||
540 | { | ||
541 | struct platform_device *pdev = to_platform_device(dev); | ||
542 | |||
543 | return omap_device_idle(pdev); | ||
544 | } | ||
545 | |||
546 | static int _od_runtime_resume(struct device *dev) | ||
547 | { | ||
548 | struct platform_device *pdev = to_platform_device(dev); | ||
549 | |||
550 | return omap_device_enable(pdev); | ||
551 | } | ||
552 | |||
553 | static struct dev_power_domain omap_device_power_domain = { | ||
554 | .ops = { | ||
555 | .runtime_suspend = _od_runtime_suspend, | ||
556 | .runtime_resume = _od_runtime_resume, | ||
557 | USE_PLATFORM_PM_SLEEP_OPS | ||
558 | } | ||
559 | }; | ||
560 | |||
539 | /** | 561 | /** |
540 | * omap_device_register - register an omap_device with one omap_hwmod | 562 | * omap_device_register - register an omap_device with one omap_hwmod |
541 | * @od: struct omap_device * to register | 563 | * @od: struct omap_device * to register |
@@ -549,6 +571,7 @@ int omap_device_register(struct omap_device *od) | |||
549 | pr_debug("omap_device: %s: registering\n", od->pdev.name); | 571 | pr_debug("omap_device: %s: registering\n", od->pdev.name); |
550 | 572 | ||
551 | od->pdev.dev.parent = &omap_device_parent; | 573 | od->pdev.dev.parent = &omap_device_parent; |
574 | od->pdev.dev.pwr_domain = &omap_device_power_domain; | ||
552 | return platform_device_register(&od->pdev); | 575 | return platform_device_register(&od->pdev); |
553 | } | 576 | } |
554 | 577 | ||
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c index dce088f45678..48ebb9479b61 100644 --- a/arch/arm/plat-pxa/gpio.c +++ b/arch/arm/plat-pxa/gpio.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | 20 | ||
21 | #include <mach/gpio.h> | 21 | #include <mach/gpio.h> |
@@ -295,7 +295,7 @@ void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn) | |||
295 | } | 295 | } |
296 | 296 | ||
297 | #ifdef CONFIG_PM | 297 | #ifdef CONFIG_PM |
298 | static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) | 298 | static int pxa_gpio_suspend(void) |
299 | { | 299 | { |
300 | struct pxa_gpio_chip *c; | 300 | struct pxa_gpio_chip *c; |
301 | int gpio; | 301 | int gpio; |
@@ -312,7 +312,7 @@ static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) | |||
312 | return 0; | 312 | return 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | static int pxa_gpio_resume(struct sys_device *dev) | 315 | static void pxa_gpio_resume(void) |
316 | { | 316 | { |
317 | struct pxa_gpio_chip *c; | 317 | struct pxa_gpio_chip *c; |
318 | int gpio; | 318 | int gpio; |
@@ -326,22 +326,13 @@ static int pxa_gpio_resume(struct sys_device *dev) | |||
326 | __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET); | 326 | __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET); |
327 | __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET); | 327 | __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET); |
328 | } | 328 | } |
329 | return 0; | ||
330 | } | 329 | } |
331 | #else | 330 | #else |
332 | #define pxa_gpio_suspend NULL | 331 | #define pxa_gpio_suspend NULL |
333 | #define pxa_gpio_resume NULL | 332 | #define pxa_gpio_resume NULL |
334 | #endif | 333 | #endif |
335 | 334 | ||
336 | struct sysdev_class pxa_gpio_sysclass = { | 335 | struct syscore_ops pxa_gpio_syscore_ops = { |
337 | .name = "gpio", | ||
338 | .suspend = pxa_gpio_suspend, | 336 | .suspend = pxa_gpio_suspend, |
339 | .resume = pxa_gpio_resume, | 337 | .resume = pxa_gpio_resume, |
340 | }; | 338 | }; |
341 | |||
342 | static int __init pxa_gpio_init(void) | ||
343 | { | ||
344 | return sysdev_class_register(&pxa_gpio_sysclass); | ||
345 | } | ||
346 | |||
347 | core_initcall(pxa_gpio_init); | ||
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c index a9aa5ad3f4eb..be12eadcce20 100644 --- a/arch/arm/plat-pxa/mfp.c +++ b/arch/arm/plat-pxa/mfp.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/sysdev.h> | ||
21 | 20 | ||
22 | #include <plat/mfp.h> | 21 | #include <plat/mfp.h> |
23 | 22 | ||
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 27ea852e3370..c10d10c56e2e 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/syscore_ops.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
@@ -1195,19 +1195,12 @@ int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *d | |||
1195 | 1195 | ||
1196 | EXPORT_SYMBOL(s3c2410_dma_getposition); | 1196 | EXPORT_SYMBOL(s3c2410_dma_getposition); |
1197 | 1197 | ||
1198 | static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) | 1198 | /* system core operations */ |
1199 | { | ||
1200 | return container_of(dev, struct s3c2410_dma_chan, dev); | ||
1201 | } | ||
1202 | |||
1203 | /* system device class */ | ||
1204 | 1199 | ||
1205 | #ifdef CONFIG_PM | 1200 | #ifdef CONFIG_PM |
1206 | 1201 | ||
1207 | static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) | 1202 | static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) |
1208 | { | 1203 | { |
1209 | struct s3c2410_dma_chan *cp = to_dma_chan(dev); | ||
1210 | |||
1211 | printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); | 1204 | printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); |
1212 | 1205 | ||
1213 | if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { | 1206 | if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { |
@@ -1222,13 +1215,21 @@ static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) | |||
1222 | 1215 | ||
1223 | s3c2410_dma_dostop(cp); | 1216 | s3c2410_dma_dostop(cp); |
1224 | } | 1217 | } |
1218 | } | ||
1219 | |||
1220 | static int s3c2410_dma_suspend(void) | ||
1221 | { | ||
1222 | struct s3c2410_dma_chan *cp = s3c2410_chans; | ||
1223 | int channel; | ||
1224 | |||
1225 | for (channel = 0; channel < dma_channels; cp++, channel++) | ||
1226 | s3c2410_dma_suspend_chan(cp); | ||
1225 | 1227 | ||
1226 | return 0; | 1228 | return 0; |
1227 | } | 1229 | } |
1228 | 1230 | ||
1229 | static int s3c2410_dma_resume(struct sys_device *dev) | 1231 | static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) |
1230 | { | 1232 | { |
1231 | struct s3c2410_dma_chan *cp = to_dma_chan(dev); | ||
1232 | unsigned int no = cp->number | DMACH_LOW_LEVEL; | 1233 | unsigned int no = cp->number | DMACH_LOW_LEVEL; |
1233 | 1234 | ||
1234 | /* restore channel's hardware configuration */ | 1235 | /* restore channel's hardware configuration */ |
@@ -1249,13 +1250,21 @@ static int s3c2410_dma_resume(struct sys_device *dev) | |||
1249 | return 0; | 1250 | return 0; |
1250 | } | 1251 | } |
1251 | 1252 | ||
1253 | static void s3c2410_dma_resume(void) | ||
1254 | { | ||
1255 | struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; | ||
1256 | int channel; | ||
1257 | |||
1258 | for (channel = dma_channels - 1; channel >= 0; cp++, channel--) | ||
1259 | s3c2410_dma_resume_chan(cp); | ||
1260 | } | ||
1261 | |||
1252 | #else | 1262 | #else |
1253 | #define s3c2410_dma_suspend NULL | 1263 | #define s3c2410_dma_suspend NULL |
1254 | #define s3c2410_dma_resume NULL | 1264 | #define s3c2410_dma_resume NULL |
1255 | #endif /* CONFIG_PM */ | 1265 | #endif /* CONFIG_PM */ |
1256 | 1266 | ||
1257 | struct sysdev_class dma_sysclass = { | 1267 | struct syscore_ops dma_syscore_ops = { |
1258 | .name = "s3c24xx-dma", | ||
1259 | .suspend = s3c2410_dma_suspend, | 1268 | .suspend = s3c2410_dma_suspend, |
1260 | .resume = s3c2410_dma_resume, | 1269 | .resume = s3c2410_dma_resume, |
1261 | }; | 1270 | }; |
@@ -1269,39 +1278,14 @@ static void s3c2410_dma_cache_ctor(void *p) | |||
1269 | 1278 | ||
1270 | /* initialisation code */ | 1279 | /* initialisation code */ |
1271 | 1280 | ||
1272 | static int __init s3c24xx_dma_sysclass_init(void) | 1281 | static int __init s3c24xx_dma_syscore_init(void) |
1273 | { | 1282 | { |
1274 | int ret = sysdev_class_register(&dma_sysclass); | 1283 | register_syscore_ops(&dma_syscore_ops); |
1275 | |||
1276 | if (ret != 0) | ||
1277 | printk(KERN_ERR "dma sysclass registration failed\n"); | ||
1278 | |||
1279 | return ret; | ||
1280 | } | ||
1281 | |||
1282 | core_initcall(s3c24xx_dma_sysclass_init); | ||
1283 | |||
1284 | static int __init s3c24xx_dma_sysdev_register(void) | ||
1285 | { | ||
1286 | struct s3c2410_dma_chan *cp = s3c2410_chans; | ||
1287 | int channel, ret; | ||
1288 | |||
1289 | for (channel = 0; channel < dma_channels; cp++, channel++) { | ||
1290 | cp->dev.cls = &dma_sysclass; | ||
1291 | cp->dev.id = channel; | ||
1292 | ret = sysdev_register(&cp->dev); | ||
1293 | |||
1294 | if (ret) { | ||
1295 | printk(KERN_ERR "error registering dev for dma %d\n", | ||
1296 | channel); | ||
1297 | return ret; | ||
1298 | } | ||
1299 | } | ||
1300 | 1284 | ||
1301 | return 0; | 1285 | return 0; |
1302 | } | 1286 | } |
1303 | 1287 | ||
1304 | late_initcall(s3c24xx_dma_sysdev_register); | 1288 | late_initcall(s3c24xx_dma_syscore_init); |
1305 | 1289 | ||
1306 | int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, | 1290 | int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, |
1307 | unsigned int stride) | 1291 | unsigned int stride) |
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/plat-s3c24xx/irq-pm.c index c3624d898630..0efb2e2848c8 100644 --- a/arch/arm/plat-s3c24xx/irq-pm.c +++ b/arch/arm/plat-s3c24xx/irq-pm.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/sysdev.h> | ||
18 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
19 | 18 | ||
20 | #include <plat/cpu.h> | 19 | #include <plat/cpu.h> |
@@ -65,7 +64,7 @@ static unsigned long save_extint[3]; | |||
65 | static unsigned long save_eintflt[4]; | 64 | static unsigned long save_eintflt[4]; |
66 | static unsigned long save_eintmask; | 65 | static unsigned long save_eintmask; |
67 | 66 | ||
68 | int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) | 67 | int s3c24xx_irq_suspend(void) |
69 | { | 68 | { |
70 | unsigned int i; | 69 | unsigned int i; |
71 | 70 | ||
@@ -81,7 +80,7 @@ int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) | |||
81 | return 0; | 80 | return 0; |
82 | } | 81 | } |
83 | 82 | ||
84 | int s3c24xx_irq_resume(struct sys_device *dev) | 83 | void s3c24xx_irq_resume(void) |
85 | { | 84 | { |
86 | unsigned int i; | 85 | unsigned int i; |
87 | 86 | ||
@@ -93,6 +92,4 @@ int s3c24xx_irq_resume(struct sys_device *dev) | |||
93 | 92 | ||
94 | s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); | 93 | s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); |
95 | __raw_writel(save_eintmask, S3C24XX_EINTMASK); | 94 | __raw_writel(save_eintmask, S3C24XX_EINTMASK); |
96 | |||
97 | return 0; | ||
98 | } | 95 | } |
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c index 5259ad458bc8..327acb3a4464 100644 --- a/arch/arm/plat-s5p/irq-pm.c +++ b/arch/arm/plat-s5p/irq-pm.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/sysdev.h> | ||
20 | 19 | ||
21 | #include <plat/cpu.h> | 20 | #include <plat/cpu.h> |
22 | #include <plat/irqs.h> | 21 | #include <plat/irqs.h> |
@@ -77,17 +76,15 @@ static struct sleep_save eint_save[] = { | |||
77 | SAVE_ITEM(S5P_EINT_MASK(3)), | 76 | SAVE_ITEM(S5P_EINT_MASK(3)), |
78 | }; | 77 | }; |
79 | 78 | ||
80 | int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) | 79 | int s3c24xx_irq_suspend(void) |
81 | { | 80 | { |
82 | s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save)); | 81 | s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save)); |
83 | 82 | ||
84 | return 0; | 83 | return 0; |
85 | } | 84 | } |
86 | 85 | ||
87 | int s3c24xx_irq_resume(struct sys_device *dev) | 86 | void s3c24xx_irq_resume(void) |
88 | { | 87 | { |
89 | s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save)); | 88 | s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save)); |
90 | |||
91 | return 0; | ||
92 | } | 89 | } |
93 | 90 | ||
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h index cedfff51c82b..3aedac0034ba 100644 --- a/arch/arm/plat-samsung/include/plat/cpu.h +++ b/arch/arm/plat-samsung/include/plat/cpu.h | |||
@@ -68,6 +68,12 @@ extern void s3c24xx_init_uartdevs(char *name, | |||
68 | struct sys_timer; | 68 | struct sys_timer; |
69 | extern struct sys_timer s3c24xx_timer; | 69 | extern struct sys_timer s3c24xx_timer; |
70 | 70 | ||
71 | extern struct syscore_ops s3c2410_pm_syscore_ops; | ||
72 | extern struct syscore_ops s3c2412_pm_syscore_ops; | ||
73 | extern struct syscore_ops s3c2416_pm_syscore_ops; | ||
74 | extern struct syscore_ops s3c244x_pm_syscore_ops; | ||
75 | extern struct syscore_ops s3c64xx_irq_syscore_ops; | ||
76 | |||
71 | /* system device classes */ | 77 | /* system device classes */ |
72 | 78 | ||
73 | extern struct sysdev_class s3c2410_sysclass; | 79 | extern struct sysdev_class s3c2410_sysclass; |
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index 937cc2ace517..7fb6f6be8c81 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h | |||
@@ -103,14 +103,16 @@ extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count); | |||
103 | 103 | ||
104 | #ifdef CONFIG_PM | 104 | #ifdef CONFIG_PM |
105 | extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); | 105 | extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); |
106 | extern int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state); | 106 | extern int s3c24xx_irq_suspend(void); |
107 | extern int s3c24xx_irq_resume(struct sys_device *dev); | 107 | extern void s3c24xx_irq_resume(void); |
108 | #else | 108 | #else |
109 | #define s3c_irqext_wake NULL | 109 | #define s3c_irqext_wake NULL |
110 | #define s3c24xx_irq_suspend NULL | 110 | #define s3c24xx_irq_suspend NULL |
111 | #define s3c24xx_irq_resume NULL | 111 | #define s3c24xx_irq_resume NULL |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | extern struct syscore_ops s3c24xx_irq_syscore_ops; | ||
115 | |||
114 | /* PM debug functions */ | 116 | /* PM debug functions */ |
115 | 117 | ||
116 | #ifdef CONFIG_SAMSUNG_PM_DEBUG | 118 | #ifdef CONFIG_SAMSUNG_PM_DEBUG |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f74695075e64..f25e7ec89416 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -398,9 +398,9 @@ static void vfp_enable(void *unused) | |||
398 | } | 398 | } |
399 | 399 | ||
400 | #ifdef CONFIG_PM | 400 | #ifdef CONFIG_PM |
401 | #include <linux/sysdev.h> | 401 | #include <linux/syscore_ops.h> |
402 | 402 | ||
403 | static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) | 403 | static int vfp_pm_suspend(void) |
404 | { | 404 | { |
405 | struct thread_info *ti = current_thread_info(); | 405 | struct thread_info *ti = current_thread_info(); |
406 | u32 fpexc = fmrx(FPEXC); | 406 | u32 fpexc = fmrx(FPEXC); |
@@ -420,34 +420,25 @@ static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) | |||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | static int vfp_pm_resume(struct sys_device *dev) | 423 | static void vfp_pm_resume(void) |
424 | { | 424 | { |
425 | /* ensure we have access to the vfp */ | 425 | /* ensure we have access to the vfp */ |
426 | vfp_enable(NULL); | 426 | vfp_enable(NULL); |
427 | 427 | ||
428 | /* and disable it to ensure the next usage restores the state */ | 428 | /* and disable it to ensure the next usage restores the state */ |
429 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 429 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
430 | |||
431 | return 0; | ||
432 | } | 430 | } |
433 | 431 | ||
434 | static struct sysdev_class vfp_pm_sysclass = { | 432 | static struct syscore_ops vfp_pm_syscore_ops = { |
435 | .name = "vfp", | ||
436 | .suspend = vfp_pm_suspend, | 433 | .suspend = vfp_pm_suspend, |
437 | .resume = vfp_pm_resume, | 434 | .resume = vfp_pm_resume, |
438 | }; | 435 | }; |
439 | 436 | ||
440 | static struct sys_device vfp_pm_sysdev = { | ||
441 | .cls = &vfp_pm_sysclass, | ||
442 | }; | ||
443 | |||
444 | static void vfp_pm_init(void) | 437 | static void vfp_pm_init(void) |
445 | { | 438 | { |
446 | sysdev_class_register(&vfp_pm_sysclass); | 439 | register_syscore_ops(&vfp_pm_syscore_ops); |
447 | sysdev_register(&vfp_pm_sysdev); | ||
448 | } | 440 | } |
449 | 441 | ||
450 | |||
451 | #else | 442 | #else |
452 | static inline void vfp_pm_init(void) { } | 443 | static inline void vfp_pm_init(void) { } |
453 | #endif /* CONFIG_PM */ | 444 | #endif /* CONFIG_PM */ |
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c index 21ce35f33aa5..3e3646186c9f 100644 --- a/arch/avr32/mach-at32ap/intc.c +++ b/arch/avr32/mach-at32ap/intc.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/sysdev.h> | 15 | #include <linux/syscore_ops.h> |
16 | 16 | ||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
@@ -21,7 +21,6 @@ | |||
21 | struct intc { | 21 | struct intc { |
22 | void __iomem *regs; | 22 | void __iomem *regs; |
23 | struct irq_chip chip; | 23 | struct irq_chip chip; |
24 | struct sys_device sysdev; | ||
25 | #ifdef CONFIG_PM | 24 | #ifdef CONFIG_PM |
26 | unsigned long suspend_ipr; | 25 | unsigned long suspend_ipr; |
27 | unsigned long saved_ipr[64]; | 26 | unsigned long saved_ipr[64]; |
@@ -146,9 +145,8 @@ void intc_set_suspend_handler(unsigned long offset) | |||
146 | intc0.suspend_ipr = offset; | 145 | intc0.suspend_ipr = offset; |
147 | } | 146 | } |
148 | 147 | ||
149 | static int intc_suspend(struct sys_device *sdev, pm_message_t state) | 148 | static int intc_suspend(void) |
150 | { | 149 | { |
151 | struct intc *intc = container_of(sdev, struct intc, sysdev); | ||
152 | int i; | 150 | int i; |
153 | 151 | ||
154 | if (unlikely(!irqs_disabled())) { | 152 | if (unlikely(!irqs_disabled())) { |
@@ -156,28 +154,25 @@ static int intc_suspend(struct sys_device *sdev, pm_message_t state) | |||
156 | return -EINVAL; | 154 | return -EINVAL; |
157 | } | 155 | } |
158 | 156 | ||
159 | if (unlikely(!intc->suspend_ipr)) { | 157 | if (unlikely(!intc0.suspend_ipr)) { |
160 | pr_err("intc_suspend: suspend_ipr not initialized\n"); | 158 | pr_err("intc_suspend: suspend_ipr not initialized\n"); |
161 | return -EINVAL; | 159 | return -EINVAL; |
162 | } | 160 | } |
163 | 161 | ||
164 | for (i = 0; i < 64; i++) { | 162 | for (i = 0; i < 64; i++) { |
165 | intc->saved_ipr[i] = intc_readl(intc, INTPR0 + 4 * i); | 163 | intc0.saved_ipr[i] = intc_readl(&intc0, INTPR0 + 4 * i); |
166 | intc_writel(intc, INTPR0 + 4 * i, intc->suspend_ipr); | 164 | intc_writel(&intc0, INTPR0 + 4 * i, intc0.suspend_ipr); |
167 | } | 165 | } |
168 | 166 | ||
169 | return 0; | 167 | return 0; |
170 | } | 168 | } |
171 | 169 | ||
172 | static int intc_resume(struct sys_device *sdev) | 170 | static int intc_resume(void) |
173 | { | 171 | { |
174 | struct intc *intc = container_of(sdev, struct intc, sysdev); | ||
175 | int i; | 172 | int i; |
176 | 173 | ||
177 | WARN_ON(!irqs_disabled()); | ||
178 | |||
179 | for (i = 0; i < 64; i++) | 174 | for (i = 0; i < 64; i++) |
180 | intc_writel(intc, INTPR0 + 4 * i, intc->saved_ipr[i]); | 175 | intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); |
181 | 176 | ||
182 | return 0; | 177 | return 0; |
183 | } | 178 | } |
@@ -186,27 +181,18 @@ static int intc_resume(struct sys_device *sdev) | |||
186 | #define intc_resume NULL | 181 | #define intc_resume NULL |
187 | #endif | 182 | #endif |
188 | 183 | ||
189 | static struct sysdev_class intc_class = { | 184 | static struct syscore_ops intc_syscore_ops = { |
190 | .name = "intc", | ||
191 | .suspend = intc_suspend, | 185 | .suspend = intc_suspend, |
192 | .resume = intc_resume, | 186 | .resume = intc_resume, |
193 | }; | 187 | }; |
194 | 188 | ||
195 | static int __init intc_init_sysdev(void) | 189 | static int __init intc_init_syscore(void) |
196 | { | 190 | { |
197 | int ret; | 191 | register_syscore_ops(&intc_syscore_ops); |
198 | |||
199 | ret = sysdev_class_register(&intc_class); | ||
200 | if (ret) | ||
201 | return ret; | ||
202 | 192 | ||
203 | intc0.sysdev.id = 0; | 193 | return 0; |
204 | intc0.sysdev.cls = &intc_class; | ||
205 | ret = sysdev_register(&intc0.sysdev); | ||
206 | |||
207 | return ret; | ||
208 | } | 194 | } |
209 | device_initcall(intc_init_sysdev); | 195 | device_initcall(intc_init_syscore); |
210 | 196 | ||
211 | unsigned long intc_get_pending(unsigned int group) | 197 | unsigned long intc_get_pending(unsigned int group) |
212 | { | 198 | { |
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c index 0b5f72f17fd0..401eb1d8e3b4 100644 --- a/arch/blackfin/kernel/nmi.c +++ b/arch/blackfin/kernel/nmi.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
14 | #include <linux/hardirq.h> | 14 | #include <linux/hardirq.h> |
15 | #include <linux/sysdev.h> | 15 | #include <linux/syscore_ops.h> |
16 | #include <linux/pm.h> | 16 | #include <linux/pm.h> |
17 | #include <linux/nmi.h> | 17 | #include <linux/nmi.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
@@ -196,43 +196,31 @@ void touch_nmi_watchdog(void) | |||
196 | 196 | ||
197 | /* Suspend/resume support */ | 197 | /* Suspend/resume support */ |
198 | #ifdef CONFIG_PM | 198 | #ifdef CONFIG_PM |
199 | static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) | 199 | static int nmi_wdt_suspend(void) |
200 | { | 200 | { |
201 | nmi_wdt_stop(); | 201 | nmi_wdt_stop(); |
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | static int nmi_wdt_resume(struct sys_device *dev) | 205 | static void nmi_wdt_resume(void) |
206 | { | 206 | { |
207 | if (nmi_active) | 207 | if (nmi_active) |
208 | nmi_wdt_start(); | 208 | nmi_wdt_start(); |
209 | return 0; | ||
210 | } | 209 | } |
211 | 210 | ||
212 | static struct sysdev_class nmi_sysclass = { | 211 | static struct syscore_ops nmi_syscore_ops = { |
213 | .name = DRV_NAME, | ||
214 | .resume = nmi_wdt_resume, | 212 | .resume = nmi_wdt_resume, |
215 | .suspend = nmi_wdt_suspend, | 213 | .suspend = nmi_wdt_suspend, |
216 | }; | 214 | }; |
217 | 215 | ||
218 | static struct sys_device device_nmi_wdt = { | 216 | static int __init init_nmi_wdt_syscore(void) |
219 | .id = 0, | ||
220 | .cls = &nmi_sysclass, | ||
221 | }; | ||
222 | |||
223 | static int __init init_nmi_wdt_sysfs(void) | ||
224 | { | 217 | { |
225 | int error; | 218 | if (nmi_active) |
226 | 219 | register_syscore_ops(&nmi_syscore_ops); | |
227 | if (!nmi_active) | ||
228 | return 0; | ||
229 | 220 | ||
230 | error = sysdev_class_register(&nmi_sysclass); | 221 | return 0; |
231 | if (!error) | ||
232 | error = sysdev_register(&device_nmi_wdt); | ||
233 | return error; | ||
234 | } | 222 | } |
235 | late_initcall(init_nmi_wdt_sysfs); | 223 | late_initcall(init_nmi_wdt_syscore); |
236 | 224 | ||
237 | #endif /* CONFIG_PM */ | 225 | #endif /* CONFIG_PM */ |
238 | 226 | ||
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index cdb4beb6bc8f..9e9b60d969dc 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c | |||
@@ -23,29 +23,6 @@ | |||
23 | #include <asm/gptimers.h> | 23 | #include <asm/gptimers.h> |
24 | #include <asm/nmi.h> | 24 | #include <asm/nmi.h> |
25 | 25 | ||
26 | /* Accelerators for sched_clock() | ||
27 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
28 | * basic equation: | ||
29 | * ns = cycles / (freq / ns_per_sec) | ||
30 | * ns = cycles * (ns_per_sec / freq) | ||
31 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
32 | * ns = cycles * (10^6 / cpu_khz) | ||
33 | * | ||
34 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
35 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
36 | * ns = cycles * cyc2ns_scale / SC | ||
37 | * | ||
38 | * And since SC is a constant power of two, we can convert the div | ||
39 | * into a shift. | ||
40 | * | ||
41 | * We can use khz divisor instead of mhz to keep a better precision, since | ||
42 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
43 | * (mathieu.desnoyers@polymtl.ca) | ||
44 | * | ||
45 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
46 | */ | ||
47 | |||
48 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
49 | 26 | ||
50 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) | 27 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
51 | 28 | ||
@@ -63,7 +40,6 @@ static struct clocksource bfin_cs_cycles = { | |||
63 | .rating = 400, | 40 | .rating = 400, |
64 | .read = bfin_read_cycles, | 41 | .read = bfin_read_cycles, |
65 | .mask = CLOCKSOURCE_MASK(64), | 42 | .mask = CLOCKSOURCE_MASK(64), |
66 | .shift = CYC2NS_SCALE_FACTOR, | ||
67 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 43 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
68 | }; | 44 | }; |
69 | 45 | ||
@@ -75,10 +51,7 @@ static inline unsigned long long bfin_cs_cycles_sched_clock(void) | |||
75 | 51 | ||
76 | static int __init bfin_cs_cycles_init(void) | 52 | static int __init bfin_cs_cycles_init(void) |
77 | { | 53 | { |
78 | bfin_cs_cycles.mult = \ | 54 | if (clocksource_register_hz(&bfin_cs_cycles, get_cclk())) |
79 | clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); | ||
80 | |||
81 | if (clocksource_register(&bfin_cs_cycles)) | ||
82 | panic("failed to register clocksource"); | 55 | panic("failed to register clocksource"); |
83 | 56 | ||
84 | return 0; | 57 | return 0; |
@@ -111,7 +84,6 @@ static struct clocksource bfin_cs_gptimer0 = { | |||
111 | .rating = 350, | 84 | .rating = 350, |
112 | .read = bfin_read_gptimer0, | 85 | .read = bfin_read_gptimer0, |
113 | .mask = CLOCKSOURCE_MASK(32), | 86 | .mask = CLOCKSOURCE_MASK(32), |
114 | .shift = CYC2NS_SCALE_FACTOR, | ||
115 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 87 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
116 | }; | 88 | }; |
117 | 89 | ||
@@ -125,10 +97,7 @@ static int __init bfin_cs_gptimer0_init(void) | |||
125 | { | 97 | { |
126 | setup_gptimer0(); | 98 | setup_gptimer0(); |
127 | 99 | ||
128 | bfin_cs_gptimer0.mult = \ | 100 | if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk())) |
129 | clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); | ||
130 | |||
131 | if (clocksource_register(&bfin_cs_gptimer0)) | ||
132 | panic("failed to register clocksource"); | 101 | panic("failed to register clocksource"); |
133 | 102 | ||
134 | return 0; | 103 | return 0; |
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c index 382099fd5561..5e4112e518a9 100644 --- a/arch/blackfin/mach-common/dpmc.c +++ b/arch/blackfin/mach-common/dpmc.c | |||
@@ -19,9 +19,6 @@ | |||
19 | 19 | ||
20 | #define DRIVER_NAME "bfin dpmc" | 20 | #define DRIVER_NAME "bfin dpmc" |
21 | 21 | ||
22 | #define dprintk(msg...) \ | ||
23 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg) | ||
24 | |||
25 | struct bfin_dpmc_platform_data *pdata; | 22 | struct bfin_dpmc_platform_data *pdata; |
26 | 23 | ||
27 | /** | 24 | /** |
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 8bce5ed031e4..1fbd94c44457 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -177,6 +177,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) | |||
177 | while (msg_queue->count) { | 177 | while (msg_queue->count) { |
178 | msg = &msg_queue->ipi_message[msg_queue->head]; | 178 | msg = &msg_queue->ipi_message[msg_queue->head]; |
179 | switch (msg->type) { | 179 | switch (msg->type) { |
180 | case BFIN_IPI_RESCHEDULE: | ||
181 | scheduler_ipi(); | ||
182 | break; | ||
180 | case BFIN_IPI_CALL_FUNC: | 183 | case BFIN_IPI_CALL_FUNC: |
181 | spin_unlock_irqrestore(&msg_queue->lock, flags); | 184 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
182 | ipi_call_function(cpu, msg); | 185 | ipi_call_function(cpu, msg); |
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 4c9e3e1ba5d1..66cc75657e2f 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -342,15 +342,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) | |||
342 | 342 | ||
343 | ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); | 343 | ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); |
344 | 344 | ||
345 | if (ipi.vector & IPI_SCHEDULE) { | ||
346 | scheduler_ipi(); | ||
347 | } | ||
345 | if (ipi.vector & IPI_CALL) { | 348 | if (ipi.vector & IPI_CALL) { |
346 | func(info); | 349 | func(info); |
347 | } | 350 | } |
348 | if (ipi.vector & IPI_FLUSH_TLB) { | 351 | if (ipi.vector & IPI_FLUSH_TLB) { |
349 | if (flush_mm == FLUSH_ALL) | 352 | if (flush_mm == FLUSH_ALL) |
350 | __flush_tlb_all(); | 353 | __flush_tlb_all(); |
351 | else if (flush_vma == FLUSH_ALL) | 354 | else if (flush_vma == FLUSH_ALL) |
352 | __flush_tlb_mm(flush_mm); | 355 | __flush_tlb_mm(flush_mm); |
353 | else | 356 | else |
354 | __flush_tlb_page(flush_vma, flush_addr); | 357 | __flush_tlb_page(flush_vma, flush_addr); |
355 | } | 358 | } |
356 | 359 | ||
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 22f61526a8e1..f09b174244d5 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c | |||
@@ -23,8 +23,6 @@ | |||
23 | #include <linux/acpi.h> | 23 | #include <linux/acpi.h> |
24 | #include <acpi/processor.h> | 24 | #include <acpi/processor.h> |
25 | 25 | ||
26 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) | ||
27 | |||
28 | MODULE_AUTHOR("Venkatesh Pallipadi"); | 26 | MODULE_AUTHOR("Venkatesh Pallipadi"); |
29 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | 27 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); |
30 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
@@ -47,12 +45,12 @@ processor_set_pstate ( | |||
47 | { | 45 | { |
48 | s64 retval; | 46 | s64 retval; |
49 | 47 | ||
50 | dprintk("processor_set_pstate\n"); | 48 | pr_debug("processor_set_pstate\n"); |
51 | 49 | ||
52 | retval = ia64_pal_set_pstate((u64)value); | 50 | retval = ia64_pal_set_pstate((u64)value); |
53 | 51 | ||
54 | if (retval) { | 52 | if (retval) { |
55 | dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", | 53 | pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", |
56 | value, retval); | 54 | value, retval); |
57 | return -ENODEV; | 55 | return -ENODEV; |
58 | } | 56 | } |
@@ -67,14 +65,14 @@ processor_get_pstate ( | |||
67 | u64 pstate_index = 0; | 65 | u64 pstate_index = 0; |
68 | s64 retval; | 66 | s64 retval; |
69 | 67 | ||
70 | dprintk("processor_get_pstate\n"); | 68 | pr_debug("processor_get_pstate\n"); |
71 | 69 | ||
72 | retval = ia64_pal_get_pstate(&pstate_index, | 70 | retval = ia64_pal_get_pstate(&pstate_index, |
73 | PAL_GET_PSTATE_TYPE_INSTANT); | 71 | PAL_GET_PSTATE_TYPE_INSTANT); |
74 | *value = (u32) pstate_index; | 72 | *value = (u32) pstate_index; |
75 | 73 | ||
76 | if (retval) | 74 | if (retval) |
77 | dprintk("Failed to get current freq with " | 75 | pr_debug("Failed to get current freq with " |
78 | "error 0x%lx, idx 0x%x\n", retval, *value); | 76 | "error 0x%lx, idx 0x%x\n", retval, *value); |
79 | 77 | ||
80 | return (int)retval; | 78 | return (int)retval; |
@@ -90,7 +88,7 @@ extract_clock ( | |||
90 | { | 88 | { |
91 | unsigned long i; | 89 | unsigned long i; |
92 | 90 | ||
93 | dprintk("extract_clock\n"); | 91 | pr_debug("extract_clock\n"); |
94 | 92 | ||
95 | for (i = 0; i < data->acpi_data.state_count; i++) { | 93 | for (i = 0; i < data->acpi_data.state_count; i++) { |
96 | if (value == data->acpi_data.states[i].status) | 94 | if (value == data->acpi_data.states[i].status) |
@@ -110,7 +108,7 @@ processor_get_freq ( | |||
110 | cpumask_t saved_mask; | 108 | cpumask_t saved_mask; |
111 | unsigned long clock_freq; | 109 | unsigned long clock_freq; |
112 | 110 | ||
113 | dprintk("processor_get_freq\n"); | 111 | pr_debug("processor_get_freq\n"); |
114 | 112 | ||
115 | saved_mask = current->cpus_allowed; | 113 | saved_mask = current->cpus_allowed; |
116 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 114 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
@@ -148,7 +146,7 @@ processor_set_freq ( | |||
148 | cpumask_t saved_mask; | 146 | cpumask_t saved_mask; |
149 | int retval; | 147 | int retval; |
150 | 148 | ||
151 | dprintk("processor_set_freq\n"); | 149 | pr_debug("processor_set_freq\n"); |
152 | 150 | ||
153 | saved_mask = current->cpus_allowed; | 151 | saved_mask = current->cpus_allowed; |
154 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 152 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
@@ -159,16 +157,16 @@ processor_set_freq ( | |||
159 | 157 | ||
160 | if (state == data->acpi_data.state) { | 158 | if (state == data->acpi_data.state) { |
161 | if (unlikely(data->resume)) { | 159 | if (unlikely(data->resume)) { |
162 | dprintk("Called after resume, resetting to P%d\n", state); | 160 | pr_debug("Called after resume, resetting to P%d\n", state); |
163 | data->resume = 0; | 161 | data->resume = 0; |
164 | } else { | 162 | } else { |
165 | dprintk("Already at target state (P%d)\n", state); | 163 | pr_debug("Already at target state (P%d)\n", state); |
166 | retval = 0; | 164 | retval = 0; |
167 | goto migrate_end; | 165 | goto migrate_end; |
168 | } | 166 | } |
169 | } | 167 | } |
170 | 168 | ||
171 | dprintk("Transitioning from P%d to P%d\n", | 169 | pr_debug("Transitioning from P%d to P%d\n", |
172 | data->acpi_data.state, state); | 170 | data->acpi_data.state, state); |
173 | 171 | ||
174 | /* cpufreq frequency struct */ | 172 | /* cpufreq frequency struct */ |
@@ -186,7 +184,7 @@ processor_set_freq ( | |||
186 | 184 | ||
187 | value = (u32) data->acpi_data.states[state].control; | 185 | value = (u32) data->acpi_data.states[state].control; |
188 | 186 | ||
189 | dprintk("Transitioning to state: 0x%08x\n", value); | 187 | pr_debug("Transitioning to state: 0x%08x\n", value); |
190 | 188 | ||
191 | ret = processor_set_pstate(value); | 189 | ret = processor_set_pstate(value); |
192 | if (ret) { | 190 | if (ret) { |
@@ -219,7 +217,7 @@ acpi_cpufreq_get ( | |||
219 | { | 217 | { |
220 | struct cpufreq_acpi_io *data = acpi_io_data[cpu]; | 218 | struct cpufreq_acpi_io *data = acpi_io_data[cpu]; |
221 | 219 | ||
222 | dprintk("acpi_cpufreq_get\n"); | 220 | pr_debug("acpi_cpufreq_get\n"); |
223 | 221 | ||
224 | return processor_get_freq(data, cpu); | 222 | return processor_get_freq(data, cpu); |
225 | } | 223 | } |
@@ -235,7 +233,7 @@ acpi_cpufreq_target ( | |||
235 | unsigned int next_state = 0; | 233 | unsigned int next_state = 0; |
236 | unsigned int result = 0; | 234 | unsigned int result = 0; |
237 | 235 | ||
238 | dprintk("acpi_cpufreq_setpolicy\n"); | 236 | pr_debug("acpi_cpufreq_setpolicy\n"); |
239 | 237 | ||
240 | result = cpufreq_frequency_table_target(policy, | 238 | result = cpufreq_frequency_table_target(policy, |
241 | data->freq_table, target_freq, relation, &next_state); | 239 | data->freq_table, target_freq, relation, &next_state); |
@@ -255,7 +253,7 @@ acpi_cpufreq_verify ( | |||
255 | unsigned int result = 0; | 253 | unsigned int result = 0; |
256 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 254 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; |
257 | 255 | ||
258 | dprintk("acpi_cpufreq_verify\n"); | 256 | pr_debug("acpi_cpufreq_verify\n"); |
259 | 257 | ||
260 | result = cpufreq_frequency_table_verify(policy, | 258 | result = cpufreq_frequency_table_verify(policy, |
261 | data->freq_table); | 259 | data->freq_table); |
@@ -273,7 +271,7 @@ acpi_cpufreq_cpu_init ( | |||
273 | struct cpufreq_acpi_io *data; | 271 | struct cpufreq_acpi_io *data; |
274 | unsigned int result = 0; | 272 | unsigned int result = 0; |
275 | 273 | ||
276 | dprintk("acpi_cpufreq_cpu_init\n"); | 274 | pr_debug("acpi_cpufreq_cpu_init\n"); |
277 | 275 | ||
278 | data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); | 276 | data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); |
279 | if (!data) | 277 | if (!data) |
@@ -288,7 +286,7 @@ acpi_cpufreq_cpu_init ( | |||
288 | 286 | ||
289 | /* capability check */ | 287 | /* capability check */ |
290 | if (data->acpi_data.state_count <= 1) { | 288 | if (data->acpi_data.state_count <= 1) { |
291 | dprintk("No P-States\n"); | 289 | pr_debug("No P-States\n"); |
292 | result = -ENODEV; | 290 | result = -ENODEV; |
293 | goto err_unreg; | 291 | goto err_unreg; |
294 | } | 292 | } |
@@ -297,7 +295,7 @@ acpi_cpufreq_cpu_init ( | |||
297 | ACPI_ADR_SPACE_FIXED_HARDWARE) || | 295 | ACPI_ADR_SPACE_FIXED_HARDWARE) || |
298 | (data->acpi_data.status_register.space_id != | 296 | (data->acpi_data.status_register.space_id != |
299 | ACPI_ADR_SPACE_FIXED_HARDWARE)) { | 297 | ACPI_ADR_SPACE_FIXED_HARDWARE)) { |
300 | dprintk("Unsupported address space [%d, %d]\n", | 298 | pr_debug("Unsupported address space [%d, %d]\n", |
301 | (u32) (data->acpi_data.control_register.space_id), | 299 | (u32) (data->acpi_data.control_register.space_id), |
302 | (u32) (data->acpi_data.status_register.space_id)); | 300 | (u32) (data->acpi_data.status_register.space_id)); |
303 | result = -ENODEV; | 301 | result = -ENODEV; |
@@ -348,7 +346,7 @@ acpi_cpufreq_cpu_init ( | |||
348 | "activated.\n", cpu); | 346 | "activated.\n", cpu); |
349 | 347 | ||
350 | for (i = 0; i < data->acpi_data.state_count; i++) | 348 | for (i = 0; i < data->acpi_data.state_count; i++) |
351 | dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", | 349 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", |
352 | (i == data->acpi_data.state?'*':' '), i, | 350 | (i == data->acpi_data.state?'*':' '), i, |
353 | (u32) data->acpi_data.states[i].core_frequency, | 351 | (u32) data->acpi_data.states[i].core_frequency, |
354 | (u32) data->acpi_data.states[i].power, | 352 | (u32) data->acpi_data.states[i].power, |
@@ -383,7 +381,7 @@ acpi_cpufreq_cpu_exit ( | |||
383 | { | 381 | { |
384 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 382 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; |
385 | 383 | ||
386 | dprintk("acpi_cpufreq_cpu_exit\n"); | 384 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
387 | 385 | ||
388 | if (data) { | 386 | if (data) { |
389 | cpufreq_frequency_table_put_attr(policy->cpu); | 387 | cpufreq_frequency_table_put_attr(policy->cpu); |
@@ -418,7 +416,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = { | |||
418 | static int __init | 416 | static int __init |
419 | acpi_cpufreq_init (void) | 417 | acpi_cpufreq_init (void) |
420 | { | 418 | { |
421 | dprintk("acpi_cpufreq_init\n"); | 419 | pr_debug("acpi_cpufreq_init\n"); |
422 | 420 | ||
423 | return cpufreq_register_driver(&acpi_cpufreq_driver); | 421 | return cpufreq_register_driver(&acpi_cpufreq_driver); |
424 | } | 422 | } |
@@ -427,7 +425,7 @@ acpi_cpufreq_init (void) | |||
427 | static void __exit | 425 | static void __exit |
428 | acpi_cpufreq_exit (void) | 426 | acpi_cpufreq_exit (void) |
429 | { | 427 | { |
430 | dprintk("acpi_cpufreq_exit\n"); | 428 | pr_debug("acpi_cpufreq_exit\n"); |
431 | 429 | ||
432 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 430 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
433 | return; | 431 | return; |
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 1b811c61bdc6..f64097b5118a 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c | |||
@@ -31,8 +31,6 @@ static struct clocksource clocksource_cyclone = { | |||
31 | .rating = 300, | 31 | .rating = 300, |
32 | .read = read_cyclone, | 32 | .read = read_cyclone, |
33 | .mask = (1LL << 40) - 1, | 33 | .mask = (1LL << 40) - 1, |
34 | .mult = 0, /*to be calculated*/ | ||
35 | .shift = 16, | ||
36 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 34 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
37 | }; | 35 | }; |
38 | 36 | ||
@@ -118,9 +116,7 @@ int __init init_cyclone_clock(void) | |||
118 | /* initialize last tick */ | 116 | /* initialize last tick */ |
119 | cyclone_mc = cyclone_timer; | 117 | cyclone_mc = cyclone_timer; |
120 | clocksource_cyclone.fsys_mmio = cyclone_timer; | 118 | clocksource_cyclone.fsys_mmio = cyclone_timer; |
121 | clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, | 119 | clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); |
122 | clocksource_cyclone.shift); | ||
123 | clocksource_register(&clocksource_cyclone); | ||
124 | 120 | ||
125 | return 0; | 121 | return 0; |
126 | } | 122 | } |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 5b704740f160..782c3a357f24 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
32 | #include <linux/ratelimit.h> | 32 | #include <linux/ratelimit.h> |
33 | #include <linux/acpi.h> | 33 | #include <linux/acpi.h> |
34 | #include <linux/sched.h> | ||
34 | 35 | ||
35 | #include <asm/delay.h> | 36 | #include <asm/delay.h> |
36 | #include <asm/intrinsics.h> | 37 | #include <asm/intrinsics.h> |
@@ -496,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
496 | smp_local_flush_tlb(); | 497 | smp_local_flush_tlb(); |
497 | kstat_incr_irqs_this_cpu(irq, desc); | 498 | kstat_incr_irqs_this_cpu(irq, desc); |
498 | } else if (unlikely(IS_RESCHEDULE(vector))) { | 499 | } else if (unlikely(IS_RESCHEDULE(vector))) { |
500 | scheduler_ipi(); | ||
499 | kstat_incr_irqs_this_cpu(irq, desc); | 501 | kstat_incr_irqs_this_cpu(irq, desc); |
500 | } else { | 502 | } else { |
501 | ia64_setreg(_IA64_REG_CR_TPR, vector); | 503 | ia64_setreg(_IA64_REG_CR_TPR, vector); |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 156ad803d5b7..04440cc09b40 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -73,8 +73,6 @@ static struct clocksource clocksource_itc = { | |||
73 | .rating = 350, | 73 | .rating = 350, |
74 | .read = itc_get_cycles, | 74 | .read = itc_get_cycles, |
75 | .mask = CLOCKSOURCE_MASK(64), | 75 | .mask = CLOCKSOURCE_MASK(64), |
76 | .mult = 0, /*to be calculated*/ | ||
77 | .shift = 16, | ||
78 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 76 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
79 | #ifdef CONFIG_PARAVIRT | 77 | #ifdef CONFIG_PARAVIRT |
80 | .resume = paravirt_clocksource_resume, | 78 | .resume = paravirt_clocksource_resume, |
@@ -365,11 +363,8 @@ ia64_init_itm (void) | |||
365 | ia64_cpu_local_tick(); | 363 | ia64_cpu_local_tick(); |
366 | 364 | ||
367 | if (!itc_clocksource) { | 365 | if (!itc_clocksource) { |
368 | /* Sort out mult/shift values: */ | 366 | clocksource_register_hz(&clocksource_itc, |
369 | clocksource_itc.mult = | 367 | local_cpu_data->itc_freq); |
370 | clocksource_hz2mult(local_cpu_data->itc_freq, | ||
371 | clocksource_itc.shift); | ||
372 | clocksource_register(&clocksource_itc); | ||
373 | itc_clocksource = &clocksource_itc; | 368 | itc_clocksource = &clocksource_itc; |
374 | } | 369 | } |
375 | } | 370 | } |
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index 21d6f09e3447..c34efda122e1 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c | |||
@@ -33,8 +33,6 @@ static struct clocksource clocksource_sn2 = { | |||
33 | .rating = 450, | 33 | .rating = 450, |
34 | .read = read_sn2, | 34 | .read = read_sn2, |
35 | .mask = (1LL << 55) - 1, | 35 | .mask = (1LL << 55) - 1, |
36 | .mult = 0, | ||
37 | .shift = 10, | ||
38 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 36 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
39 | }; | 37 | }; |
40 | 38 | ||
@@ -57,9 +55,7 @@ ia64_sn_udelay (unsigned long usecs) | |||
57 | void __init sn_timer_init(void) | 55 | void __init sn_timer_init(void) |
58 | { | 56 | { |
59 | clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; | 57 | clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; |
60 | clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, | 58 | clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); |
61 | clocksource_sn2.shift); | ||
62 | clocksource_register(&clocksource_sn2); | ||
63 | 59 | ||
64 | ia64_udelay = &ia64_sn_udelay; | 60 | ia64_udelay = &ia64_sn_udelay; |
65 | } | 61 | } |
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c index 108bb858acf2..b279e142c633 100644 --- a/arch/ia64/xen/irq_xen.c +++ b/arch/ia64/xen/irq_xen.c | |||
@@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt; | |||
92 | static int xen_slab_ready; | 92 | static int xen_slab_ready; |
93 | 93 | ||
94 | #ifdef CONFIG_SMP | 94 | #ifdef CONFIG_SMP |
95 | #include <linux/sched.h> | ||
96 | |||
95 | /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, | 97 | /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, |
96 | * it ends up to issue several memory accesses upon percpu data and | 98 | * it ends up to issue several memory accesses upon percpu data and |
97 | * thus adds unnecessary traffic to other paths. | 99 | * thus adds unnecessary traffic to other paths. |
@@ -99,7 +101,13 @@ static int xen_slab_ready; | |||
99 | static irqreturn_t | 101 | static irqreturn_t |
100 | xen_dummy_handler(int irq, void *dev_id) | 102 | xen_dummy_handler(int irq, void *dev_id) |
101 | { | 103 | { |
104 | return IRQ_HANDLED; | ||
105 | } | ||
102 | 106 | ||
107 | static irqreturn_t | ||
108 | xen_resched_handler(int irq, void *dev_id) | ||
109 | { | ||
110 | scheduler_ipi(); | ||
103 | return IRQ_HANDLED; | 111 | return IRQ_HANDLED; |
104 | } | 112 | } |
105 | 113 | ||
@@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = { | |||
110 | }; | 118 | }; |
111 | 119 | ||
112 | static struct irqaction xen_resched_irqaction = { | 120 | static struct irqaction xen_resched_irqaction = { |
113 | .handler = xen_dummy_handler, | 121 | .handler = xen_resched_handler, |
114 | .flags = IRQF_DISABLED, | 122 | .flags = IRQF_DISABLED, |
115 | .name = "resched" | 123 | .name = "resched" |
116 | }; | 124 | }; |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 31cef20b2996..fc10b39893d4 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -122,8 +122,6 @@ void smp_send_reschedule(int cpu_id) | |||
122 | * | 122 | * |
123 | * Description: This routine executes on CPU which received | 123 | * Description: This routine executes on CPU which received |
124 | * 'RESCHEDULE_IPI'. | 124 | * 'RESCHEDULE_IPI'. |
125 | * Rescheduling is processed at the exit of interrupt | ||
126 | * operation. | ||
127 | * | 125 | * |
128 | * Born on Date: 2002.02.05 | 126 | * Born on Date: 2002.02.05 |
129 | * | 127 | * |
@@ -138,7 +136,7 @@ void smp_send_reschedule(int cpu_id) | |||
138 | *==========================================================================*/ | 136 | *==========================================================================*/ |
139 | void smp_reschedule_interrupt(void) | 137 | void smp_reschedule_interrupt(void) |
140 | { | 138 | { |
141 | /* nothing to do */ | 139 | scheduler_ipi(); |
142 | } | 140 | } |
143 | 141 | ||
144 | /*==========================================================================* | 142 | /*==========================================================================* |
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c index b995513d527f..95022b04b62d 100644 --- a/arch/m68k/atari/atakeyb.c +++ b/arch/m68k/atari/atakeyb.c | |||
@@ -36,13 +36,10 @@ | |||
36 | 36 | ||
37 | /* Hook for MIDI serial driver */ | 37 | /* Hook for MIDI serial driver */ |
38 | void (*atari_MIDI_interrupt_hook) (void); | 38 | void (*atari_MIDI_interrupt_hook) (void); |
39 | /* Hook for mouse driver */ | ||
40 | void (*atari_mouse_interrupt_hook) (char *); | ||
41 | /* Hook for keyboard inputdev driver */ | 39 | /* Hook for keyboard inputdev driver */ |
42 | void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); | 40 | void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); |
43 | /* Hook for mouse inputdev driver */ | 41 | /* Hook for mouse inputdev driver */ |
44 | void (*atari_input_mouse_interrupt_hook) (char *); | 42 | void (*atari_input_mouse_interrupt_hook) (char *); |
45 | EXPORT_SYMBOL(atari_mouse_interrupt_hook); | ||
46 | EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); | 43 | EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); |
47 | EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); | 44 | EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); |
48 | 45 | ||
@@ -263,8 +260,8 @@ repeat: | |||
263 | kb_state.buf[kb_state.len++] = scancode; | 260 | kb_state.buf[kb_state.len++] = scancode; |
264 | if (kb_state.len == 3) { | 261 | if (kb_state.len == 3) { |
265 | kb_state.state = KEYBOARD; | 262 | kb_state.state = KEYBOARD; |
266 | if (atari_mouse_interrupt_hook) | 263 | if (atari_input_mouse_interrupt_hook) |
267 | atari_mouse_interrupt_hook(kb_state.buf); | 264 | atari_input_mouse_interrupt_hook(kb_state.buf); |
268 | } | 265 | } |
269 | break; | 266 | break; |
270 | 267 | ||
@@ -575,7 +572,7 @@ int atari_keyb_init(void) | |||
575 | kb_state.len = 0; | 572 | kb_state.len = 0; |
576 | 573 | ||
577 | error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, | 574 | error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, |
578 | IRQ_TYPE_SLOW, "keyboard/mouse/MIDI", | 575 | IRQ_TYPE_SLOW, "keyboard,mouse,MIDI", |
579 | atari_keyboard_interrupt); | 576 | atari_keyboard_interrupt); |
580 | if (error) | 577 | if (error) |
581 | return error; | 578 | return error; |
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c index 604329fafbb8..ddbf43ca8858 100644 --- a/arch/m68k/atari/stdma.c +++ b/arch/m68k/atari/stdma.c | |||
@@ -180,7 +180,7 @@ void __init stdma_init(void) | |||
180 | { | 180 | { |
181 | stdma_isr = NULL; | 181 | stdma_isr = NULL; |
182 | if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, | 182 | if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, |
183 | "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int)) | 183 | "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) |
184 | pr_err("Couldn't register ST-DMA interrupt\n"); | 184 | pr_err("Couldn't register ST-DMA interrupt\n"); |
185 | } | 185 | } |
186 | 186 | ||
diff --git a/arch/m68k/include/asm/atarikb.h b/arch/m68k/include/asm/atarikb.h index 546e7da5804f..68f3622bf591 100644 --- a/arch/m68k/include/asm/atarikb.h +++ b/arch/m68k/include/asm/atarikb.h | |||
@@ -34,8 +34,6 @@ void ikbd_joystick_disable(void); | |||
34 | 34 | ||
35 | /* Hook for MIDI serial driver */ | 35 | /* Hook for MIDI serial driver */ |
36 | extern void (*atari_MIDI_interrupt_hook) (void); | 36 | extern void (*atari_MIDI_interrupt_hook) (void); |
37 | /* Hook for mouse driver */ | ||
38 | extern void (*atari_mouse_interrupt_hook) (char *); | ||
39 | /* Hook for keyboard inputdev driver */ | 37 | /* Hook for keyboard inputdev driver */ |
40 | extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); | 38 | extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); |
41 | /* Hook for mouse inputdev driver */ | 39 | /* Hook for mouse inputdev driver */ |
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h index 9d69f6e62365..e9020f88a748 100644 --- a/arch/m68k/include/asm/bitops_mm.h +++ b/arch/m68k/include/asm/bitops_mm.h | |||
@@ -181,14 +181,15 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, | |||
181 | { | 181 | { |
182 | const unsigned long *p = vaddr; | 182 | const unsigned long *p = vaddr; |
183 | int res = 32; | 183 | int res = 32; |
184 | unsigned int words; | ||
184 | unsigned long num; | 185 | unsigned long num; |
185 | 186 | ||
186 | if (!size) | 187 | if (!size) |
187 | return 0; | 188 | return 0; |
188 | 189 | ||
189 | size = (size + 31) >> 5; | 190 | words = (size + 31) >> 5; |
190 | while (!(num = ~*p++)) { | 191 | while (!(num = ~*p++)) { |
191 | if (!--size) | 192 | if (!--words) |
192 | goto out; | 193 | goto out; |
193 | } | 194 | } |
194 | 195 | ||
@@ -196,7 +197,8 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, | |||
196 | : "=d" (res) : "d" (num & -num)); | 197 | : "=d" (res) : "d" (num & -num)); |
197 | res ^= 31; | 198 | res ^= 31; |
198 | out: | 199 | out: |
199 | return ((long)p - (long)vaddr - 4) * 8 + res; | 200 | res += ((long)p - (long)vaddr - 4) * 8; |
201 | return res < size ? res : size; | ||
200 | } | 202 | } |
201 | 203 | ||
202 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | 204 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, |
@@ -215,27 +217,32 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | |||
215 | /* Look for zero in first longword */ | 217 | /* Look for zero in first longword */ |
216 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 218 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
217 | : "=d" (res) : "d" (num & -num)); | 219 | : "=d" (res) : "d" (num & -num)); |
218 | if (res < 32) | 220 | if (res < 32) { |
219 | return offset + (res ^ 31); | 221 | offset += res ^ 31; |
222 | return offset < size ? offset : size; | ||
223 | } | ||
220 | offset += 32; | 224 | offset += 32; |
225 | |||
226 | if (offset >= size) | ||
227 | return size; | ||
221 | } | 228 | } |
222 | /* No zero yet, search remaining full bytes for a zero */ | 229 | /* No zero yet, search remaining full bytes for a zero */ |
223 | res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); | 230 | return offset + find_first_zero_bit(p, size - offset); |
224 | return offset + res; | ||
225 | } | 231 | } |
226 | 232 | ||
227 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) | 233 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) |
228 | { | 234 | { |
229 | const unsigned long *p = vaddr; | 235 | const unsigned long *p = vaddr; |
230 | int res = 32; | 236 | int res = 32; |
237 | unsigned int words; | ||
231 | unsigned long num; | 238 | unsigned long num; |
232 | 239 | ||
233 | if (!size) | 240 | if (!size) |
234 | return 0; | 241 | return 0; |
235 | 242 | ||
236 | size = (size + 31) >> 5; | 243 | words = (size + 31) >> 5; |
237 | while (!(num = *p++)) { | 244 | while (!(num = *p++)) { |
238 | if (!--size) | 245 | if (!--words) |
239 | goto out; | 246 | goto out; |
240 | } | 247 | } |
241 | 248 | ||
@@ -243,7 +250,8 @@ static inline int find_first_bit(const unsigned long *vaddr, unsigned size) | |||
243 | : "=d" (res) : "d" (num & -num)); | 250 | : "=d" (res) : "d" (num & -num)); |
244 | res ^= 31; | 251 | res ^= 31; |
245 | out: | 252 | out: |
246 | return ((long)p - (long)vaddr - 4) * 8 + res; | 253 | res += ((long)p - (long)vaddr - 4) * 8; |
254 | return res < size ? res : size; | ||
247 | } | 255 | } |
248 | 256 | ||
249 | static inline int find_next_bit(const unsigned long *vaddr, int size, | 257 | static inline int find_next_bit(const unsigned long *vaddr, int size, |
@@ -262,13 +270,17 @@ static inline int find_next_bit(const unsigned long *vaddr, int size, | |||
262 | /* Look for one in first longword */ | 270 | /* Look for one in first longword */ |
263 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 271 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
264 | : "=d" (res) : "d" (num & -num)); | 272 | : "=d" (res) : "d" (num & -num)); |
265 | if (res < 32) | 273 | if (res < 32) { |
266 | return offset + (res ^ 31); | 274 | offset += res ^ 31; |
275 | return offset < size ? offset : size; | ||
276 | } | ||
267 | offset += 32; | 277 | offset += 32; |
278 | |||
279 | if (offset >= size) | ||
280 | return size; | ||
268 | } | 281 | } |
269 | /* No one yet, search remaining full bytes for a one */ | 282 | /* No one yet, search remaining full bytes for a one */ |
270 | res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); | 283 | return offset + find_first_bit(p, size - offset); |
271 | return offset + res; | ||
272 | } | 284 | } |
273 | 285 | ||
274 | /* | 286 | /* |
@@ -366,23 +378,25 @@ static inline int test_bit_le(int nr, const void *vaddr) | |||
366 | static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) | 378 | static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) |
367 | { | 379 | { |
368 | const unsigned long *p = vaddr, *addr = vaddr; | 380 | const unsigned long *p = vaddr, *addr = vaddr; |
369 | int res; | 381 | int res = 0; |
382 | unsigned int words; | ||
370 | 383 | ||
371 | if (!size) | 384 | if (!size) |
372 | return 0; | 385 | return 0; |
373 | 386 | ||
374 | size = (size >> 5) + ((size & 31) > 0); | 387 | words = (size >> 5) + ((size & 31) > 0); |
375 | while (*p++ == ~0UL) | 388 | while (*p++ == ~0UL) { |
376 | { | 389 | if (--words == 0) |
377 | if (--size == 0) | 390 | goto out; |
378 | return (p - addr) << 5; | ||
379 | } | 391 | } |
380 | 392 | ||
381 | --p; | 393 | --p; |
382 | for (res = 0; res < 32; res++) | 394 | for (res = 0; res < 32; res++) |
383 | if (!test_bit_le(res, p)) | 395 | if (!test_bit_le(res, p)) |
384 | break; | 396 | break; |
385 | return (p - addr) * 32 + res; | 397 | out: |
398 | res += (p - addr) * 32; | ||
399 | return res < size ? res : size; | ||
386 | } | 400 | } |
387 | 401 | ||
388 | static inline unsigned long find_next_zero_bit_le(const void *addr, | 402 | static inline unsigned long find_next_zero_bit_le(const void *addr, |
@@ -400,10 +414,15 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, | |||
400 | offset -= bit; | 414 | offset -= bit; |
401 | /* Look for zero in first longword */ | 415 | /* Look for zero in first longword */ |
402 | for (res = bit; res < 32; res++) | 416 | for (res = bit; res < 32; res++) |
403 | if (!test_bit_le(res, p)) | 417 | if (!test_bit_le(res, p)) { |
404 | return offset + res; | 418 | offset += res; |
419 | return offset < size ? offset : size; | ||
420 | } | ||
405 | p++; | 421 | p++; |
406 | offset += 32; | 422 | offset += 32; |
423 | |||
424 | if (offset >= size) | ||
425 | return size; | ||
407 | } | 426 | } |
408 | /* No zero yet, search remaining full bytes for a zero */ | 427 | /* No zero yet, search remaining full bytes for a zero */ |
409 | return offset + find_first_zero_bit_le(p, size - offset); | 428 | return offset + find_first_zero_bit_le(p, size - offset); |
@@ -412,22 +431,25 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, | |||
412 | static inline int find_first_bit_le(const void *vaddr, unsigned size) | 431 | static inline int find_first_bit_le(const void *vaddr, unsigned size) |
413 | { | 432 | { |
414 | const unsigned long *p = vaddr, *addr = vaddr; | 433 | const unsigned long *p = vaddr, *addr = vaddr; |
415 | int res; | 434 | int res = 0; |
435 | unsigned int words; | ||
416 | 436 | ||
417 | if (!size) | 437 | if (!size) |
418 | return 0; | 438 | return 0; |
419 | 439 | ||
420 | size = (size >> 5) + ((size & 31) > 0); | 440 | words = (size >> 5) + ((size & 31) > 0); |
421 | while (*p++ == 0UL) { | 441 | while (*p++ == 0UL) { |
422 | if (--size == 0) | 442 | if (--words == 0) |
423 | return (p - addr) << 5; | 443 | goto out; |
424 | } | 444 | } |
425 | 445 | ||
426 | --p; | 446 | --p; |
427 | for (res = 0; res < 32; res++) | 447 | for (res = 0; res < 32; res++) |
428 | if (test_bit_le(res, p)) | 448 | if (test_bit_le(res, p)) |
429 | break; | 449 | break; |
430 | return (p - addr) * 32 + res; | 450 | out: |
451 | res += (p - addr) * 32; | ||
452 | return res < size ? res : size; | ||
431 | } | 453 | } |
432 | 454 | ||
433 | static inline unsigned long find_next_bit_le(const void *addr, | 455 | static inline unsigned long find_next_bit_le(const void *addr, |
@@ -445,10 +467,15 @@ static inline unsigned long find_next_bit_le(const void *addr, | |||
445 | offset -= bit; | 467 | offset -= bit; |
446 | /* Look for one in first longword */ | 468 | /* Look for one in first longword */ |
447 | for (res = bit; res < 32; res++) | 469 | for (res = bit; res < 32; res++) |
448 | if (test_bit_le(res, p)) | 470 | if (test_bit_le(res, p)) { |
449 | return offset + res; | 471 | offset += res; |
472 | return offset < size ? offset : size; | ||
473 | } | ||
450 | p++; | 474 | p++; |
451 | offset += 32; | 475 | offset += 32; |
476 | |||
477 | if (offset >= size) | ||
478 | return size; | ||
452 | } | 479 | } |
453 | /* No set bit yet, search remaining full bytes for a set bit */ | 480 | /* No set bit yet, search remaining full bytes for a set bit */ |
454 | return offset + find_first_bit_le(p, size - offset); | 481 | return offset + find_first_bit_le(p, size - offset); |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 29e17907d9f2..f3b649de2a1b 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #define __NR_mknod 14 | 22 | #define __NR_mknod 14 |
23 | #define __NR_chmod 15 | 23 | #define __NR_chmod 15 |
24 | #define __NR_chown 16 | 24 | #define __NR_chown 16 |
25 | #define __NR_break 17 | 25 | /*#define __NR_break 17*/ |
26 | #define __NR_oldstat 18 | 26 | #define __NR_oldstat 18 |
27 | #define __NR_lseek 19 | 27 | #define __NR_lseek 19 |
28 | #define __NR_getpid 20 | 28 | #define __NR_getpid 20 |
@@ -36,11 +36,11 @@ | |||
36 | #define __NR_oldfstat 28 | 36 | #define __NR_oldfstat 28 |
37 | #define __NR_pause 29 | 37 | #define __NR_pause 29 |
38 | #define __NR_utime 30 | 38 | #define __NR_utime 30 |
39 | #define __NR_stty 31 | 39 | /*#define __NR_stty 31*/ |
40 | #define __NR_gtty 32 | 40 | /*#define __NR_gtty 32*/ |
41 | #define __NR_access 33 | 41 | #define __NR_access 33 |
42 | #define __NR_nice 34 | 42 | #define __NR_nice 34 |
43 | #define __NR_ftime 35 | 43 | /*#define __NR_ftime 35*/ |
44 | #define __NR_sync 36 | 44 | #define __NR_sync 36 |
45 | #define __NR_kill 37 | 45 | #define __NR_kill 37 |
46 | #define __NR_rename 38 | 46 | #define __NR_rename 38 |
@@ -49,7 +49,7 @@ | |||
49 | #define __NR_dup 41 | 49 | #define __NR_dup 41 |
50 | #define __NR_pipe 42 | 50 | #define __NR_pipe 42 |
51 | #define __NR_times 43 | 51 | #define __NR_times 43 |
52 | #define __NR_prof 44 | 52 | /*#define __NR_prof 44*/ |
53 | #define __NR_brk 45 | 53 | #define __NR_brk 45 |
54 | #define __NR_setgid 46 | 54 | #define __NR_setgid 46 |
55 | #define __NR_getgid 47 | 55 | #define __NR_getgid 47 |
@@ -58,13 +58,13 @@ | |||
58 | #define __NR_getegid 50 | 58 | #define __NR_getegid 50 |
59 | #define __NR_acct 51 | 59 | #define __NR_acct 51 |
60 | #define __NR_umount2 52 | 60 | #define __NR_umount2 52 |
61 | #define __NR_lock 53 | 61 | /*#define __NR_lock 53*/ |
62 | #define __NR_ioctl 54 | 62 | #define __NR_ioctl 54 |
63 | #define __NR_fcntl 55 | 63 | #define __NR_fcntl 55 |
64 | #define __NR_mpx 56 | 64 | /*#define __NR_mpx 56*/ |
65 | #define __NR_setpgid 57 | 65 | #define __NR_setpgid 57 |
66 | #define __NR_ulimit 58 | 66 | /*#define __NR_ulimit 58*/ |
67 | #define __NR_oldolduname 59 | 67 | /*#define __NR_oldolduname 59*/ |
68 | #define __NR_umask 60 | 68 | #define __NR_umask 60 |
69 | #define __NR_chroot 61 | 69 | #define __NR_chroot 61 |
70 | #define __NR_ustat 62 | 70 | #define __NR_ustat 62 |
@@ -103,10 +103,10 @@ | |||
103 | #define __NR_fchown 95 | 103 | #define __NR_fchown 95 |
104 | #define __NR_getpriority 96 | 104 | #define __NR_getpriority 96 |
105 | #define __NR_setpriority 97 | 105 | #define __NR_setpriority 97 |
106 | #define __NR_profil 98 | 106 | /*#define __NR_profil 98*/ |
107 | #define __NR_statfs 99 | 107 | #define __NR_statfs 99 |
108 | #define __NR_fstatfs 100 | 108 | #define __NR_fstatfs 100 |
109 | #define __NR_ioperm 101 | 109 | /*#define __NR_ioperm 101*/ |
110 | #define __NR_socketcall 102 | 110 | #define __NR_socketcall 102 |
111 | #define __NR_syslog 103 | 111 | #define __NR_syslog 103 |
112 | #define __NR_setitimer 104 | 112 | #define __NR_setitimer 104 |
@@ -114,11 +114,11 @@ | |||
114 | #define __NR_stat 106 | 114 | #define __NR_stat 106 |
115 | #define __NR_lstat 107 | 115 | #define __NR_lstat 107 |
116 | #define __NR_fstat 108 | 116 | #define __NR_fstat 108 |
117 | #define __NR_olduname 109 | 117 | /*#define __NR_olduname 109*/ |
118 | #define __NR_iopl /* 110 */ not supported | 118 | /*#define __NR_iopl 110*/ /* not supported */ |
119 | #define __NR_vhangup 111 | 119 | #define __NR_vhangup 111 |
120 | #define __NR_idle /* 112 */ Obsolete | 120 | /*#define __NR_idle 112*/ /* Obsolete */ |
121 | #define __NR_vm86 /* 113 */ not supported | 121 | /*#define __NR_vm86 113*/ /* not supported */ |
122 | #define __NR_wait4 114 | 122 | #define __NR_wait4 114 |
123 | #define __NR_swapoff 115 | 123 | #define __NR_swapoff 115 |
124 | #define __NR_sysinfo 116 | 124 | #define __NR_sysinfo 116 |
@@ -132,17 +132,17 @@ | |||
132 | #define __NR_adjtimex 124 | 132 | #define __NR_adjtimex 124 |
133 | #define __NR_mprotect 125 | 133 | #define __NR_mprotect 125 |
134 | #define __NR_sigprocmask 126 | 134 | #define __NR_sigprocmask 126 |
135 | #define __NR_create_module 127 | 135 | /*#define __NR_create_module 127*/ |
136 | #define __NR_init_module 128 | 136 | #define __NR_init_module 128 |
137 | #define __NR_delete_module 129 | 137 | #define __NR_delete_module 129 |
138 | #define __NR_get_kernel_syms 130 | 138 | /*#define __NR_get_kernel_syms 130*/ |
139 | #define __NR_quotactl 131 | 139 | #define __NR_quotactl 131 |
140 | #define __NR_getpgid 132 | 140 | #define __NR_getpgid 132 |
141 | #define __NR_fchdir 133 | 141 | #define __NR_fchdir 133 |
142 | #define __NR_bdflush 134 | 142 | #define __NR_bdflush 134 |
143 | #define __NR_sysfs 135 | 143 | #define __NR_sysfs 135 |
144 | #define __NR_personality 136 | 144 | #define __NR_personality 136 |
145 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ | 145 | /*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */ |
146 | #define __NR_setfsuid 138 | 146 | #define __NR_setfsuid 138 |
147 | #define __NR_setfsgid 139 | 147 | #define __NR_setfsgid 139 |
148 | #define __NR__llseek 140 | 148 | #define __NR__llseek 140 |
@@ -172,7 +172,7 @@ | |||
172 | #define __NR_setresuid 164 | 172 | #define __NR_setresuid 164 |
173 | #define __NR_getresuid 165 | 173 | #define __NR_getresuid 165 |
174 | #define __NR_getpagesize 166 | 174 | #define __NR_getpagesize 166 |
175 | #define __NR_query_module 167 | 175 | /*#define __NR_query_module 167*/ |
176 | #define __NR_poll 168 | 176 | #define __NR_poll 168 |
177 | #define __NR_nfsservctl 169 | 177 | #define __NR_nfsservctl 169 |
178 | #define __NR_setresgid 170 | 178 | #define __NR_setresgid 170 |
@@ -193,8 +193,8 @@ | |||
193 | #define __NR_capset 185 | 193 | #define __NR_capset 185 |
194 | #define __NR_sigaltstack 186 | 194 | #define __NR_sigaltstack 186 |
195 | #define __NR_sendfile 187 | 195 | #define __NR_sendfile 187 |
196 | #define __NR_getpmsg 188 /* some people actually want streams */ | 196 | /*#define __NR_getpmsg 188*/ /* some people actually want streams */ |
197 | #define __NR_putpmsg 189 /* some people actually want streams */ | 197 | /*#define __NR_putpmsg 189*/ /* some people actually want streams */ |
198 | #define __NR_vfork 190 | 198 | #define __NR_vfork 190 |
199 | #define __NR_ugetrlimit 191 | 199 | #define __NR_ugetrlimit 191 |
200 | #define __NR_mmap2 192 | 200 | #define __NR_mmap2 192 |
@@ -223,6 +223,8 @@ | |||
223 | #define __NR_setfsuid32 215 | 223 | #define __NR_setfsuid32 215 |
224 | #define __NR_setfsgid32 216 | 224 | #define __NR_setfsgid32 216 |
225 | #define __NR_pivot_root 217 | 225 | #define __NR_pivot_root 217 |
226 | /* 218*/ | ||
227 | /* 219*/ | ||
226 | #define __NR_getdents64 220 | 228 | #define __NR_getdents64 220 |
227 | #define __NR_gettid 221 | 229 | #define __NR_gettid 221 |
228 | #define __NR_tkill 222 | 230 | #define __NR_tkill 222 |
@@ -281,7 +283,7 @@ | |||
281 | #define __NR_mq_notify 275 | 283 | #define __NR_mq_notify 275 |
282 | #define __NR_mq_getsetattr 276 | 284 | #define __NR_mq_getsetattr 276 |
283 | #define __NR_waitid 277 | 285 | #define __NR_waitid 277 |
284 | #define __NR_vserver 278 | 286 | /*#define __NR_vserver 278*/ |
285 | #define __NR_add_key 279 | 287 | #define __NR_add_key 279 |
286 | #define __NR_request_key 280 | 288 | #define __NR_request_key 280 |
287 | #define __NR_keyctl 281 | 289 | #define __NR_keyctl 281 |
diff --git a/arch/m68k/kernel/Makefile_mm b/arch/m68k/kernel/Makefile_mm index 55d5d6b680a2..aced67804579 100644 --- a/arch/m68k/kernel/Makefile_mm +++ b/arch/m68k/kernel/Makefile_mm | |||
@@ -10,7 +10,7 @@ endif | |||
10 | extra-y += vmlinux.lds | 10 | extra-y += vmlinux.lds |
11 | 11 | ||
12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ | 12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ |
13 | sys_m68k.o time.o setup.o m68k_ksyms.o devres.o | 13 | sys_m68k.o time.o setup.o m68k_ksyms.o devres.o syscalltable.o |
14 | 14 | ||
15 | devres-y = ../../../kernel/irq/devres.o | 15 | devres-y = ../../../kernel/irq/devres.o |
16 | 16 | ||
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S index 1359ee659574..bd0ec05263b2 100644 --- a/arch/m68k/kernel/entry_mm.S +++ b/arch/m68k/kernel/entry_mm.S | |||
@@ -407,351 +407,3 @@ resume: | |||
407 | 407 | ||
408 | rts | 408 | rts |
409 | 409 | ||
410 | .data | ||
411 | ALIGN | ||
412 | sys_call_table: | ||
413 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ | ||
414 | .long sys_exit | ||
415 | .long sys_fork | ||
416 | .long sys_read | ||
417 | .long sys_write | ||
418 | .long sys_open /* 5 */ | ||
419 | .long sys_close | ||
420 | .long sys_waitpid | ||
421 | .long sys_creat | ||
422 | .long sys_link | ||
423 | .long sys_unlink /* 10 */ | ||
424 | .long sys_execve | ||
425 | .long sys_chdir | ||
426 | .long sys_time | ||
427 | .long sys_mknod | ||
428 | .long sys_chmod /* 15 */ | ||
429 | .long sys_chown16 | ||
430 | .long sys_ni_syscall /* old break syscall holder */ | ||
431 | .long sys_stat | ||
432 | .long sys_lseek | ||
433 | .long sys_getpid /* 20 */ | ||
434 | .long sys_mount | ||
435 | .long sys_oldumount | ||
436 | .long sys_setuid16 | ||
437 | .long sys_getuid16 | ||
438 | .long sys_stime /* 25 */ | ||
439 | .long sys_ptrace | ||
440 | .long sys_alarm | ||
441 | .long sys_fstat | ||
442 | .long sys_pause | ||
443 | .long sys_utime /* 30 */ | ||
444 | .long sys_ni_syscall /* old stty syscall holder */ | ||
445 | .long sys_ni_syscall /* old gtty syscall holder */ | ||
446 | .long sys_access | ||
447 | .long sys_nice | ||
448 | .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | ||
449 | .long sys_sync | ||
450 | .long sys_kill | ||
451 | .long sys_rename | ||
452 | .long sys_mkdir | ||
453 | .long sys_rmdir /* 40 */ | ||
454 | .long sys_dup | ||
455 | .long sys_pipe | ||
456 | .long sys_times | ||
457 | .long sys_ni_syscall /* old prof syscall holder */ | ||
458 | .long sys_brk /* 45 */ | ||
459 | .long sys_setgid16 | ||
460 | .long sys_getgid16 | ||
461 | .long sys_signal | ||
462 | .long sys_geteuid16 | ||
463 | .long sys_getegid16 /* 50 */ | ||
464 | .long sys_acct | ||
465 | .long sys_umount /* recycled never used phys() */ | ||
466 | .long sys_ni_syscall /* old lock syscall holder */ | ||
467 | .long sys_ioctl | ||
468 | .long sys_fcntl /* 55 */ | ||
469 | .long sys_ni_syscall /* old mpx syscall holder */ | ||
470 | .long sys_setpgid | ||
471 | .long sys_ni_syscall /* old ulimit syscall holder */ | ||
472 | .long sys_ni_syscall | ||
473 | .long sys_umask /* 60 */ | ||
474 | .long sys_chroot | ||
475 | .long sys_ustat | ||
476 | .long sys_dup2 | ||
477 | .long sys_getppid | ||
478 | .long sys_getpgrp /* 65 */ | ||
479 | .long sys_setsid | ||
480 | .long sys_sigaction | ||
481 | .long sys_sgetmask | ||
482 | .long sys_ssetmask | ||
483 | .long sys_setreuid16 /* 70 */ | ||
484 | .long sys_setregid16 | ||
485 | .long sys_sigsuspend | ||
486 | .long sys_sigpending | ||
487 | .long sys_sethostname | ||
488 | .long sys_setrlimit /* 75 */ | ||
489 | .long sys_old_getrlimit | ||
490 | .long sys_getrusage | ||
491 | .long sys_gettimeofday | ||
492 | .long sys_settimeofday | ||
493 | .long sys_getgroups16 /* 80 */ | ||
494 | .long sys_setgroups16 | ||
495 | .long sys_old_select | ||
496 | .long sys_symlink | ||
497 | .long sys_lstat | ||
498 | .long sys_readlink /* 85 */ | ||
499 | .long sys_uselib | ||
500 | .long sys_swapon | ||
501 | .long sys_reboot | ||
502 | .long sys_old_readdir | ||
503 | .long sys_old_mmap /* 90 */ | ||
504 | .long sys_munmap | ||
505 | .long sys_truncate | ||
506 | .long sys_ftruncate | ||
507 | .long sys_fchmod | ||
508 | .long sys_fchown16 /* 95 */ | ||
509 | .long sys_getpriority | ||
510 | .long sys_setpriority | ||
511 | .long sys_ni_syscall /* old profil syscall holder */ | ||
512 | .long sys_statfs | ||
513 | .long sys_fstatfs /* 100 */ | ||
514 | .long sys_ni_syscall /* ioperm for i386 */ | ||
515 | .long sys_socketcall | ||
516 | .long sys_syslog | ||
517 | .long sys_setitimer | ||
518 | .long sys_getitimer /* 105 */ | ||
519 | .long sys_newstat | ||
520 | .long sys_newlstat | ||
521 | .long sys_newfstat | ||
522 | .long sys_ni_syscall | ||
523 | .long sys_ni_syscall /* 110 */ /* iopl for i386 */ | ||
524 | .long sys_vhangup | ||
525 | .long sys_ni_syscall /* obsolete idle() syscall */ | ||
526 | .long sys_ni_syscall /* vm86old for i386 */ | ||
527 | .long sys_wait4 | ||
528 | .long sys_swapoff /* 115 */ | ||
529 | .long sys_sysinfo | ||
530 | .long sys_ipc | ||
531 | .long sys_fsync | ||
532 | .long sys_sigreturn | ||
533 | .long sys_clone /* 120 */ | ||
534 | .long sys_setdomainname | ||
535 | .long sys_newuname | ||
536 | .long sys_cacheflush /* modify_ldt for i386 */ | ||
537 | .long sys_adjtimex | ||
538 | .long sys_mprotect /* 125 */ | ||
539 | .long sys_sigprocmask | ||
540 | .long sys_ni_syscall /* old "create_module" */ | ||
541 | .long sys_init_module | ||
542 | .long sys_delete_module | ||
543 | .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ | ||
544 | .long sys_quotactl | ||
545 | .long sys_getpgid | ||
546 | .long sys_fchdir | ||
547 | .long sys_bdflush | ||
548 | .long sys_sysfs /* 135 */ | ||
549 | .long sys_personality | ||
550 | .long sys_ni_syscall /* for afs_syscall */ | ||
551 | .long sys_setfsuid16 | ||
552 | .long sys_setfsgid16 | ||
553 | .long sys_llseek /* 140 */ | ||
554 | .long sys_getdents | ||
555 | .long sys_select | ||
556 | .long sys_flock | ||
557 | .long sys_msync | ||
558 | .long sys_readv /* 145 */ | ||
559 | .long sys_writev | ||
560 | .long sys_getsid | ||
561 | .long sys_fdatasync | ||
562 | .long sys_sysctl | ||
563 | .long sys_mlock /* 150 */ | ||
564 | .long sys_munlock | ||
565 | .long sys_mlockall | ||
566 | .long sys_munlockall | ||
567 | .long sys_sched_setparam | ||
568 | .long sys_sched_getparam /* 155 */ | ||
569 | .long sys_sched_setscheduler | ||
570 | .long sys_sched_getscheduler | ||
571 | .long sys_sched_yield | ||
572 | .long sys_sched_get_priority_max | ||
573 | .long sys_sched_get_priority_min /* 160 */ | ||
574 | .long sys_sched_rr_get_interval | ||
575 | .long sys_nanosleep | ||
576 | .long sys_mremap | ||
577 | .long sys_setresuid16 | ||
578 | .long sys_getresuid16 /* 165 */ | ||
579 | .long sys_getpagesize | ||
580 | .long sys_ni_syscall /* old sys_query_module */ | ||
581 | .long sys_poll | ||
582 | .long sys_nfsservctl | ||
583 | .long sys_setresgid16 /* 170 */ | ||
584 | .long sys_getresgid16 | ||
585 | .long sys_prctl | ||
586 | .long sys_rt_sigreturn | ||
587 | .long sys_rt_sigaction | ||
588 | .long sys_rt_sigprocmask /* 175 */ | ||
589 | .long sys_rt_sigpending | ||
590 | .long sys_rt_sigtimedwait | ||
591 | .long sys_rt_sigqueueinfo | ||
592 | .long sys_rt_sigsuspend | ||
593 | .long sys_pread64 /* 180 */ | ||
594 | .long sys_pwrite64 | ||
595 | .long sys_lchown16; | ||
596 | .long sys_getcwd | ||
597 | .long sys_capget | ||
598 | .long sys_capset /* 185 */ | ||
599 | .long sys_sigaltstack | ||
600 | .long sys_sendfile | ||
601 | .long sys_ni_syscall /* streams1 */ | ||
602 | .long sys_ni_syscall /* streams2 */ | ||
603 | .long sys_vfork /* 190 */ | ||
604 | .long sys_getrlimit | ||
605 | .long sys_mmap2 | ||
606 | .long sys_truncate64 | ||
607 | .long sys_ftruncate64 | ||
608 | .long sys_stat64 /* 195 */ | ||
609 | .long sys_lstat64 | ||
610 | .long sys_fstat64 | ||
611 | .long sys_chown | ||
612 | .long sys_getuid | ||
613 | .long sys_getgid /* 200 */ | ||
614 | .long sys_geteuid | ||
615 | .long sys_getegid | ||
616 | .long sys_setreuid | ||
617 | .long sys_setregid | ||
618 | .long sys_getgroups /* 205 */ | ||
619 | .long sys_setgroups | ||
620 | .long sys_fchown | ||
621 | .long sys_setresuid | ||
622 | .long sys_getresuid | ||
623 | .long sys_setresgid /* 210 */ | ||
624 | .long sys_getresgid | ||
625 | .long sys_lchown | ||
626 | .long sys_setuid | ||
627 | .long sys_setgid | ||
628 | .long sys_setfsuid /* 215 */ | ||
629 | .long sys_setfsgid | ||
630 | .long sys_pivot_root | ||
631 | .long sys_ni_syscall | ||
632 | .long sys_ni_syscall | ||
633 | .long sys_getdents64 /* 220 */ | ||
634 | .long sys_gettid | ||
635 | .long sys_tkill | ||
636 | .long sys_setxattr | ||
637 | .long sys_lsetxattr | ||
638 | .long sys_fsetxattr /* 225 */ | ||
639 | .long sys_getxattr | ||
640 | .long sys_lgetxattr | ||
641 | .long sys_fgetxattr | ||
642 | .long sys_listxattr | ||
643 | .long sys_llistxattr /* 230 */ | ||
644 | .long sys_flistxattr | ||
645 | .long sys_removexattr | ||
646 | .long sys_lremovexattr | ||
647 | .long sys_fremovexattr | ||
648 | .long sys_futex /* 235 */ | ||
649 | .long sys_sendfile64 | ||
650 | .long sys_mincore | ||
651 | .long sys_madvise | ||
652 | .long sys_fcntl64 | ||
653 | .long sys_readahead /* 240 */ | ||
654 | .long sys_io_setup | ||
655 | .long sys_io_destroy | ||
656 | .long sys_io_getevents | ||
657 | .long sys_io_submit | ||
658 | .long sys_io_cancel /* 245 */ | ||
659 | .long sys_fadvise64 | ||
660 | .long sys_exit_group | ||
661 | .long sys_lookup_dcookie | ||
662 | .long sys_epoll_create | ||
663 | .long sys_epoll_ctl /* 250 */ | ||
664 | .long sys_epoll_wait | ||
665 | .long sys_remap_file_pages | ||
666 | .long sys_set_tid_address | ||
667 | .long sys_timer_create | ||
668 | .long sys_timer_settime /* 255 */ | ||
669 | .long sys_timer_gettime | ||
670 | .long sys_timer_getoverrun | ||
671 | .long sys_timer_delete | ||
672 | .long sys_clock_settime | ||
673 | .long sys_clock_gettime /* 260 */ | ||
674 | .long sys_clock_getres | ||
675 | .long sys_clock_nanosleep | ||
676 | .long sys_statfs64 | ||
677 | .long sys_fstatfs64 | ||
678 | .long sys_tgkill /* 265 */ | ||
679 | .long sys_utimes | ||
680 | .long sys_fadvise64_64 | ||
681 | .long sys_mbind | ||
682 | .long sys_get_mempolicy | ||
683 | .long sys_set_mempolicy /* 270 */ | ||
684 | .long sys_mq_open | ||
685 | .long sys_mq_unlink | ||
686 | .long sys_mq_timedsend | ||
687 | .long sys_mq_timedreceive | ||
688 | .long sys_mq_notify /* 275 */ | ||
689 | .long sys_mq_getsetattr | ||
690 | .long sys_waitid | ||
691 | .long sys_ni_syscall /* for sys_vserver */ | ||
692 | .long sys_add_key | ||
693 | .long sys_request_key /* 280 */ | ||
694 | .long sys_keyctl | ||
695 | .long sys_ioprio_set | ||
696 | .long sys_ioprio_get | ||
697 | .long sys_inotify_init | ||
698 | .long sys_inotify_add_watch /* 285 */ | ||
699 | .long sys_inotify_rm_watch | ||
700 | .long sys_migrate_pages | ||
701 | .long sys_openat | ||
702 | .long sys_mkdirat | ||
703 | .long sys_mknodat /* 290 */ | ||
704 | .long sys_fchownat | ||
705 | .long sys_futimesat | ||
706 | .long sys_fstatat64 | ||
707 | .long sys_unlinkat | ||
708 | .long sys_renameat /* 295 */ | ||
709 | .long sys_linkat | ||
710 | .long sys_symlinkat | ||
711 | .long sys_readlinkat | ||
712 | .long sys_fchmodat | ||
713 | .long sys_faccessat /* 300 */ | ||
714 | .long sys_ni_syscall /* Reserved for pselect6 */ | ||
715 | .long sys_ni_syscall /* Reserved for ppoll */ | ||
716 | .long sys_unshare | ||
717 | .long sys_set_robust_list | ||
718 | .long sys_get_robust_list /* 305 */ | ||
719 | .long sys_splice | ||
720 | .long sys_sync_file_range | ||
721 | .long sys_tee | ||
722 | .long sys_vmsplice | ||
723 | .long sys_move_pages /* 310 */ | ||
724 | .long sys_sched_setaffinity | ||
725 | .long sys_sched_getaffinity | ||
726 | .long sys_kexec_load | ||
727 | .long sys_getcpu | ||
728 | .long sys_epoll_pwait /* 315 */ | ||
729 | .long sys_utimensat | ||
730 | .long sys_signalfd | ||
731 | .long sys_timerfd_create | ||
732 | .long sys_eventfd | ||
733 | .long sys_fallocate /* 320 */ | ||
734 | .long sys_timerfd_settime | ||
735 | .long sys_timerfd_gettime | ||
736 | .long sys_signalfd4 | ||
737 | .long sys_eventfd2 | ||
738 | .long sys_epoll_create1 /* 325 */ | ||
739 | .long sys_dup3 | ||
740 | .long sys_pipe2 | ||
741 | .long sys_inotify_init1 | ||
742 | .long sys_preadv | ||
743 | .long sys_pwritev /* 330 */ | ||
744 | .long sys_rt_tgsigqueueinfo | ||
745 | .long sys_perf_event_open | ||
746 | .long sys_get_thread_area | ||
747 | .long sys_set_thread_area | ||
748 | .long sys_atomic_cmpxchg_32 /* 335 */ | ||
749 | .long sys_atomic_barrier | ||
750 | .long sys_fanotify_init | ||
751 | .long sys_fanotify_mark | ||
752 | .long sys_prlimit64 | ||
753 | .long sys_name_to_handle_at /* 340 */ | ||
754 | .long sys_open_by_handle_at | ||
755 | .long sys_clock_adjtime | ||
756 | .long sys_syncfs | ||
757 | |||
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 9b8393d8adb8..5909e392cb1e 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/m68knommu/kernel/syscalltable.S | ||
3 | * | ||
4 | * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) | 2 | * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) |
5 | * | 3 | * |
6 | * Based on older entry.S files, the following copyrights apply: | 4 | * Based on older entry.S files, the following copyrights apply: |
@@ -9,171 +7,176 @@ | |||
9 | * Kenneth Albanowski <kjahds@kjahds.com>, | 7 | * Kenneth Albanowski <kjahds@kjahds.com>, |
10 | * Copyright (C) 2000 Lineo Inc. (www.lineo.com) | 8 | * Copyright (C) 2000 Lineo Inc. (www.lineo.com) |
11 | * Copyright (C) 1991, 1992 Linus Torvalds | 9 | * Copyright (C) 1991, 1992 Linus Torvalds |
10 | * | ||
11 | * Linux/m68k support by Hamish Macdonald | ||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/sys.h> | 14 | #include <linux/sys.h> |
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/unistd.h> | ||
17 | 16 | ||
18 | .text | 17 | #ifndef CONFIG_MMU |
18 | #define sys_mmap2 sys_mmap_pgoff | ||
19 | #endif | ||
20 | |||
21 | .section .rodata | ||
19 | ALIGN | 22 | ALIGN |
20 | ENTRY(sys_call_table) | 23 | ENTRY(sys_call_table) |
21 | .long sys_restart_syscall /* 0 - old "setup()" system call */ | 24 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ |
22 | .long sys_exit | 25 | .long sys_exit |
23 | .long sys_fork | 26 | .long sys_fork |
24 | .long sys_read | 27 | .long sys_read |
25 | .long sys_write | 28 | .long sys_write |
26 | .long sys_open /* 5 */ | 29 | .long sys_open /* 5 */ |
27 | .long sys_close | 30 | .long sys_close |
28 | .long sys_waitpid | 31 | .long sys_waitpid |
29 | .long sys_creat | 32 | .long sys_creat |
30 | .long sys_link | 33 | .long sys_link |
31 | .long sys_unlink /* 10 */ | 34 | .long sys_unlink /* 10 */ |
32 | .long sys_execve | 35 | .long sys_execve |
33 | .long sys_chdir | 36 | .long sys_chdir |
34 | .long sys_time | 37 | .long sys_time |
35 | .long sys_mknod | 38 | .long sys_mknod |
36 | .long sys_chmod /* 15 */ | 39 | .long sys_chmod /* 15 */ |
37 | .long sys_chown16 | 40 | .long sys_chown16 |
38 | .long sys_ni_syscall /* old break syscall holder */ | 41 | .long sys_ni_syscall /* old break syscall holder */ |
39 | .long sys_stat | 42 | .long sys_stat |
40 | .long sys_lseek | 43 | .long sys_lseek |
41 | .long sys_getpid /* 20 */ | 44 | .long sys_getpid /* 20 */ |
42 | .long sys_mount | 45 | .long sys_mount |
43 | .long sys_oldumount | 46 | .long sys_oldumount |
44 | .long sys_setuid16 | 47 | .long sys_setuid16 |
45 | .long sys_getuid16 | 48 | .long sys_getuid16 |
46 | .long sys_stime /* 25 */ | 49 | .long sys_stime /* 25 */ |
47 | .long sys_ptrace | 50 | .long sys_ptrace |
48 | .long sys_alarm | 51 | .long sys_alarm |
49 | .long sys_fstat | 52 | .long sys_fstat |
50 | .long sys_pause | 53 | .long sys_pause |
51 | .long sys_utime /* 30 */ | 54 | .long sys_utime /* 30 */ |
52 | .long sys_ni_syscall /* old stty syscall holder */ | 55 | .long sys_ni_syscall /* old stty syscall holder */ |
53 | .long sys_ni_syscall /* old gtty syscall holder */ | 56 | .long sys_ni_syscall /* old gtty syscall holder */ |
54 | .long sys_access | 57 | .long sys_access |
55 | .long sys_nice | 58 | .long sys_nice |
56 | .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | 59 | .long sys_ni_syscall /* 35 - old ftime syscall holder */ |
57 | .long sys_sync | 60 | .long sys_sync |
58 | .long sys_kill | 61 | .long sys_kill |
59 | .long sys_rename | 62 | .long sys_rename |
60 | .long sys_mkdir | 63 | .long sys_mkdir |
61 | .long sys_rmdir /* 40 */ | 64 | .long sys_rmdir /* 40 */ |
62 | .long sys_dup | 65 | .long sys_dup |
63 | .long sys_pipe | 66 | .long sys_pipe |
64 | .long sys_times | 67 | .long sys_times |
65 | .long sys_ni_syscall /* old prof syscall holder */ | 68 | .long sys_ni_syscall /* old prof syscall holder */ |
66 | .long sys_brk /* 45 */ | 69 | .long sys_brk /* 45 */ |
67 | .long sys_setgid16 | 70 | .long sys_setgid16 |
68 | .long sys_getgid16 | 71 | .long sys_getgid16 |
69 | .long sys_signal | 72 | .long sys_signal |
70 | .long sys_geteuid16 | 73 | .long sys_geteuid16 |
71 | .long sys_getegid16 /* 50 */ | 74 | .long sys_getegid16 /* 50 */ |
72 | .long sys_acct | 75 | .long sys_acct |
73 | .long sys_umount /* recycled never used phys() */ | 76 | .long sys_umount /* recycled never used phys() */ |
74 | .long sys_ni_syscall /* old lock syscall holder */ | 77 | .long sys_ni_syscall /* old lock syscall holder */ |
75 | .long sys_ioctl | 78 | .long sys_ioctl |
76 | .long sys_fcntl /* 55 */ | 79 | .long sys_fcntl /* 55 */ |
77 | .long sys_ni_syscall /* old mpx syscall holder */ | 80 | .long sys_ni_syscall /* old mpx syscall holder */ |
78 | .long sys_setpgid | 81 | .long sys_setpgid |
79 | .long sys_ni_syscall /* old ulimit syscall holder */ | 82 | .long sys_ni_syscall /* old ulimit syscall holder */ |
80 | .long sys_ni_syscall | 83 | .long sys_ni_syscall |
81 | .long sys_umask /* 60 */ | 84 | .long sys_umask /* 60 */ |
82 | .long sys_chroot | 85 | .long sys_chroot |
83 | .long sys_ustat | 86 | .long sys_ustat |
84 | .long sys_dup2 | 87 | .long sys_dup2 |
85 | .long sys_getppid | 88 | .long sys_getppid |
86 | .long sys_getpgrp /* 65 */ | 89 | .long sys_getpgrp /* 65 */ |
87 | .long sys_setsid | 90 | .long sys_setsid |
88 | .long sys_sigaction | 91 | .long sys_sigaction |
89 | .long sys_sgetmask | 92 | .long sys_sgetmask |
90 | .long sys_ssetmask | 93 | .long sys_ssetmask |
91 | .long sys_setreuid16 /* 70 */ | 94 | .long sys_setreuid16 /* 70 */ |
92 | .long sys_setregid16 | 95 | .long sys_setregid16 |
93 | .long sys_sigsuspend | 96 | .long sys_sigsuspend |
94 | .long sys_sigpending | 97 | .long sys_sigpending |
95 | .long sys_sethostname | 98 | .long sys_sethostname |
96 | .long sys_setrlimit /* 75 */ | 99 | .long sys_setrlimit /* 75 */ |
97 | .long sys_old_getrlimit | 100 | .long sys_old_getrlimit |
98 | .long sys_getrusage | 101 | .long sys_getrusage |
99 | .long sys_gettimeofday | 102 | .long sys_gettimeofday |
100 | .long sys_settimeofday | 103 | .long sys_settimeofday |
101 | .long sys_getgroups16 /* 80 */ | 104 | .long sys_getgroups16 /* 80 */ |
102 | .long sys_setgroups16 | 105 | .long sys_setgroups16 |
103 | .long sys_old_select | 106 | .long sys_old_select |
104 | .long sys_symlink | 107 | .long sys_symlink |
105 | .long sys_lstat | 108 | .long sys_lstat |
106 | .long sys_readlink /* 85 */ | 109 | .long sys_readlink /* 85 */ |
107 | .long sys_uselib | 110 | .long sys_uselib |
108 | .long sys_ni_syscall /* sys_swapon */ | 111 | .long sys_swapon |
109 | .long sys_reboot | 112 | .long sys_reboot |
110 | .long sys_old_readdir | 113 | .long sys_old_readdir |
111 | .long sys_old_mmap /* 90 */ | 114 | .long sys_old_mmap /* 90 */ |
112 | .long sys_munmap | 115 | .long sys_munmap |
113 | .long sys_truncate | 116 | .long sys_truncate |
114 | .long sys_ftruncate | 117 | .long sys_ftruncate |
115 | .long sys_fchmod | 118 | .long sys_fchmod |
116 | .long sys_fchown16 /* 95 */ | 119 | .long sys_fchown16 /* 95 */ |
117 | .long sys_getpriority | 120 | .long sys_getpriority |
118 | .long sys_setpriority | 121 | .long sys_setpriority |
119 | .long sys_ni_syscall /* old profil syscall holder */ | 122 | .long sys_ni_syscall /* old profil syscall holder */ |
120 | .long sys_statfs | 123 | .long sys_statfs |
121 | .long sys_fstatfs /* 100 */ | 124 | .long sys_fstatfs /* 100 */ |
122 | .long sys_ni_syscall /* ioperm for i386 */ | 125 | .long sys_ni_syscall /* ioperm for i386 */ |
123 | .long sys_socketcall | 126 | .long sys_socketcall |
124 | .long sys_syslog | 127 | .long sys_syslog |
125 | .long sys_setitimer | 128 | .long sys_setitimer |
126 | .long sys_getitimer /* 105 */ | 129 | .long sys_getitimer /* 105 */ |
127 | .long sys_newstat | 130 | .long sys_newstat |
128 | .long sys_newlstat | 131 | .long sys_newlstat |
129 | .long sys_newfstat | 132 | .long sys_newfstat |
130 | .long sys_ni_syscall | 133 | .long sys_ni_syscall |
131 | .long sys_ni_syscall /* iopl for i386 */ /* 110 */ | 134 | .long sys_ni_syscall /* 110 - iopl for i386 */ |
132 | .long sys_vhangup | 135 | .long sys_vhangup |
133 | .long sys_ni_syscall /* obsolete idle() syscall */ | 136 | .long sys_ni_syscall /* obsolete idle() syscall */ |
134 | .long sys_ni_syscall /* vm86old for i386 */ | 137 | .long sys_ni_syscall /* vm86old for i386 */ |
135 | .long sys_wait4 | 138 | .long sys_wait4 |
136 | .long sys_ni_syscall /* 115 */ /* sys_swapoff */ | 139 | .long sys_swapoff /* 115 */ |
137 | .long sys_sysinfo | 140 | .long sys_sysinfo |
138 | .long sys_ipc | 141 | .long sys_ipc |
139 | .long sys_fsync | 142 | .long sys_fsync |
140 | .long sys_sigreturn | 143 | .long sys_sigreturn |
141 | .long sys_clone /* 120 */ | 144 | .long sys_clone /* 120 */ |
142 | .long sys_setdomainname | 145 | .long sys_setdomainname |
143 | .long sys_newuname | 146 | .long sys_newuname |
144 | .long sys_cacheflush /* modify_ldt for i386 */ | 147 | .long sys_cacheflush /* modify_ldt for i386 */ |
145 | .long sys_adjtimex | 148 | .long sys_adjtimex |
146 | .long sys_ni_syscall /* 125 */ /* sys_mprotect */ | 149 | .long sys_mprotect /* 125 */ |
147 | .long sys_sigprocmask | 150 | .long sys_sigprocmask |
148 | .long sys_ni_syscall /* old "creat_module" */ | 151 | .long sys_ni_syscall /* old "create_module" */ |
149 | .long sys_init_module | 152 | .long sys_init_module |
150 | .long sys_delete_module | 153 | .long sys_delete_module |
151 | .long sys_ni_syscall /* 130: old "get_kernel_syms" */ | 154 | .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ |
152 | .long sys_quotactl | 155 | .long sys_quotactl |
153 | .long sys_getpgid | 156 | .long sys_getpgid |
154 | .long sys_fchdir | 157 | .long sys_fchdir |
155 | .long sys_bdflush | 158 | .long sys_bdflush |
156 | .long sys_sysfs /* 135 */ | 159 | .long sys_sysfs /* 135 */ |
157 | .long sys_personality | 160 | .long sys_personality |
158 | .long sys_ni_syscall /* for afs_syscall */ | 161 | .long sys_ni_syscall /* for afs_syscall */ |
159 | .long sys_setfsuid16 | 162 | .long sys_setfsuid16 |
160 | .long sys_setfsgid16 | 163 | .long sys_setfsgid16 |
161 | .long sys_llseek /* 140 */ | 164 | .long sys_llseek /* 140 */ |
162 | .long sys_getdents | 165 | .long sys_getdents |
163 | .long sys_select | 166 | .long sys_select |
164 | .long sys_flock | 167 | .long sys_flock |
165 | .long sys_ni_syscall /* sys_msync */ | 168 | .long sys_msync |
166 | .long sys_readv /* 145 */ | 169 | .long sys_readv /* 145 */ |
167 | .long sys_writev | 170 | .long sys_writev |
168 | .long sys_getsid | 171 | .long sys_getsid |
169 | .long sys_fdatasync | 172 | .long sys_fdatasync |
170 | .long sys_sysctl | 173 | .long sys_sysctl |
171 | .long sys_ni_syscall /* 150 */ /* sys_mlock */ | 174 | .long sys_mlock /* 150 */ |
172 | .long sys_ni_syscall /* sys_munlock */ | 175 | .long sys_munlock |
173 | .long sys_ni_syscall /* sys_mlockall */ | 176 | .long sys_mlockall |
174 | .long sys_ni_syscall /* sys_munlockall */ | 177 | .long sys_munlockall |
175 | .long sys_sched_setparam | 178 | .long sys_sched_setparam |
176 | .long sys_sched_getparam /* 155 */ | 179 | .long sys_sched_getparam /* 155 */ |
177 | .long sys_sched_setscheduler | 180 | .long sys_sched_setscheduler |
178 | .long sys_sched_getscheduler | 181 | .long sys_sched_getscheduler |
179 | .long sys_sched_yield | 182 | .long sys_sched_yield |
@@ -181,124 +184,124 @@ ENTRY(sys_call_table) | |||
181 | .long sys_sched_get_priority_min /* 160 */ | 184 | .long sys_sched_get_priority_min /* 160 */ |
182 | .long sys_sched_rr_get_interval | 185 | .long sys_sched_rr_get_interval |
183 | .long sys_nanosleep | 186 | .long sys_nanosleep |
184 | .long sys_ni_syscall /* sys_mremap */ | 187 | .long sys_mremap |
185 | .long sys_setresuid16 | 188 | .long sys_setresuid16 |
186 | .long sys_getresuid16 /* 165 */ | 189 | .long sys_getresuid16 /* 165 */ |
187 | .long sys_getpagesize /* sys_getpagesize */ | 190 | .long sys_getpagesize |
188 | .long sys_ni_syscall /* old "query_module" */ | 191 | .long sys_ni_syscall /* old "query_module" */ |
189 | .long sys_poll | 192 | .long sys_poll |
190 | .long sys_ni_syscall /* sys_nfsservctl */ | 193 | .long sys_nfsservctl |
191 | .long sys_setresgid16 /* 170 */ | 194 | .long sys_setresgid16 /* 170 */ |
192 | .long sys_getresgid16 | 195 | .long sys_getresgid16 |
193 | .long sys_prctl | 196 | .long sys_prctl |
194 | .long sys_rt_sigreturn | 197 | .long sys_rt_sigreturn |
195 | .long sys_rt_sigaction | 198 | .long sys_rt_sigaction |
196 | .long sys_rt_sigprocmask /* 175 */ | 199 | .long sys_rt_sigprocmask /* 175 */ |
197 | .long sys_rt_sigpending | 200 | .long sys_rt_sigpending |
198 | .long sys_rt_sigtimedwait | 201 | .long sys_rt_sigtimedwait |
199 | .long sys_rt_sigqueueinfo | 202 | .long sys_rt_sigqueueinfo |
200 | .long sys_rt_sigsuspend | 203 | .long sys_rt_sigsuspend |
201 | .long sys_pread64 /* 180 */ | 204 | .long sys_pread64 /* 180 */ |
202 | .long sys_pwrite64 | 205 | .long sys_pwrite64 |
203 | .long sys_lchown16 | 206 | .long sys_lchown16 |
204 | .long sys_getcwd | 207 | .long sys_getcwd |
205 | .long sys_capget | 208 | .long sys_capget |
206 | .long sys_capset /* 185 */ | 209 | .long sys_capset /* 185 */ |
207 | .long sys_sigaltstack | 210 | .long sys_sigaltstack |
208 | .long sys_sendfile | 211 | .long sys_sendfile |
209 | .long sys_ni_syscall /* streams1 */ | 212 | .long sys_ni_syscall /* streams1 */ |
210 | .long sys_ni_syscall /* streams2 */ | 213 | .long sys_ni_syscall /* streams2 */ |
211 | .long sys_vfork /* 190 */ | 214 | .long sys_vfork /* 190 */ |
212 | .long sys_getrlimit | 215 | .long sys_getrlimit |
213 | .long sys_mmap_pgoff | 216 | .long sys_mmap2 |
214 | .long sys_truncate64 | 217 | .long sys_truncate64 |
215 | .long sys_ftruncate64 | 218 | .long sys_ftruncate64 |
216 | .long sys_stat64 /* 195 */ | 219 | .long sys_stat64 /* 195 */ |
217 | .long sys_lstat64 | 220 | .long sys_lstat64 |
218 | .long sys_fstat64 | 221 | .long sys_fstat64 |
219 | .long sys_chown | 222 | .long sys_chown |
220 | .long sys_getuid | 223 | .long sys_getuid |
221 | .long sys_getgid /* 200 */ | 224 | .long sys_getgid /* 200 */ |
222 | .long sys_geteuid | 225 | .long sys_geteuid |
223 | .long sys_getegid | 226 | .long sys_getegid |
224 | .long sys_setreuid | 227 | .long sys_setreuid |
225 | .long sys_setregid | 228 | .long sys_setregid |
226 | .long sys_getgroups /* 205 */ | 229 | .long sys_getgroups /* 205 */ |
227 | .long sys_setgroups | 230 | .long sys_setgroups |
228 | .long sys_fchown | 231 | .long sys_fchown |
229 | .long sys_setresuid | 232 | .long sys_setresuid |
230 | .long sys_getresuid | 233 | .long sys_getresuid |
231 | .long sys_setresgid /* 210 */ | 234 | .long sys_setresgid /* 210 */ |
232 | .long sys_getresgid | 235 | .long sys_getresgid |
233 | .long sys_lchown | 236 | .long sys_lchown |
234 | .long sys_setuid | 237 | .long sys_setuid |
235 | .long sys_setgid | 238 | .long sys_setgid |
236 | .long sys_setfsuid /* 215 */ | 239 | .long sys_setfsuid /* 215 */ |
237 | .long sys_setfsgid | 240 | .long sys_setfsgid |
238 | .long sys_pivot_root | 241 | .long sys_pivot_root |
239 | .long sys_ni_syscall | 242 | .long sys_ni_syscall |
240 | .long sys_ni_syscall | 243 | .long sys_ni_syscall |
241 | .long sys_getdents64 /* 220 */ | 244 | .long sys_getdents64 /* 220 */ |
242 | .long sys_gettid | 245 | .long sys_gettid |
243 | .long sys_tkill | 246 | .long sys_tkill |
244 | .long sys_setxattr | 247 | .long sys_setxattr |
245 | .long sys_lsetxattr | 248 | .long sys_lsetxattr |
246 | .long sys_fsetxattr /* 225 */ | 249 | .long sys_fsetxattr /* 225 */ |
247 | .long sys_getxattr | 250 | .long sys_getxattr |
248 | .long sys_lgetxattr | 251 | .long sys_lgetxattr |
249 | .long sys_fgetxattr | 252 | .long sys_fgetxattr |
250 | .long sys_listxattr | 253 | .long sys_listxattr |
251 | .long sys_llistxattr /* 230 */ | 254 | .long sys_llistxattr /* 230 */ |
252 | .long sys_flistxattr | 255 | .long sys_flistxattr |
253 | .long sys_removexattr | 256 | .long sys_removexattr |
254 | .long sys_lremovexattr | 257 | .long sys_lremovexattr |
255 | .long sys_fremovexattr | 258 | .long sys_fremovexattr |
256 | .long sys_futex /* 235 */ | 259 | .long sys_futex /* 235 */ |
257 | .long sys_sendfile64 | 260 | .long sys_sendfile64 |
258 | .long sys_ni_syscall /* sys_mincore */ | 261 | .long sys_mincore |
259 | .long sys_ni_syscall /* sys_madvise */ | 262 | .long sys_madvise |
260 | .long sys_fcntl64 | 263 | .long sys_fcntl64 |
261 | .long sys_readahead /* 240 */ | 264 | .long sys_readahead /* 240 */ |
262 | .long sys_io_setup | 265 | .long sys_io_setup |
263 | .long sys_io_destroy | 266 | .long sys_io_destroy |
264 | .long sys_io_getevents | 267 | .long sys_io_getevents |
265 | .long sys_io_submit | 268 | .long sys_io_submit |
266 | .long sys_io_cancel /* 245 */ | 269 | .long sys_io_cancel /* 245 */ |
267 | .long sys_fadvise64 | 270 | .long sys_fadvise64 |
268 | .long sys_exit_group | 271 | .long sys_exit_group |
269 | .long sys_lookup_dcookie | 272 | .long sys_lookup_dcookie |
270 | .long sys_epoll_create | 273 | .long sys_epoll_create |
271 | .long sys_epoll_ctl /* 250 */ | 274 | .long sys_epoll_ctl /* 250 */ |
272 | .long sys_epoll_wait | 275 | .long sys_epoll_wait |
273 | .long sys_ni_syscall /* sys_remap_file_pages */ | 276 | .long sys_remap_file_pages |
274 | .long sys_set_tid_address | 277 | .long sys_set_tid_address |
275 | .long sys_timer_create | 278 | .long sys_timer_create |
276 | .long sys_timer_settime /* 255 */ | 279 | .long sys_timer_settime /* 255 */ |
277 | .long sys_timer_gettime | 280 | .long sys_timer_gettime |
278 | .long sys_timer_getoverrun | 281 | .long sys_timer_getoverrun |
279 | .long sys_timer_delete | 282 | .long sys_timer_delete |
280 | .long sys_clock_settime | 283 | .long sys_clock_settime |
281 | .long sys_clock_gettime /* 260 */ | 284 | .long sys_clock_gettime /* 260 */ |
282 | .long sys_clock_getres | 285 | .long sys_clock_getres |
283 | .long sys_clock_nanosleep | 286 | .long sys_clock_nanosleep |
284 | .long sys_statfs64 | 287 | .long sys_statfs64 |
285 | .long sys_fstatfs64 | 288 | .long sys_fstatfs64 |
286 | .long sys_tgkill /* 265 */ | 289 | .long sys_tgkill /* 265 */ |
287 | .long sys_utimes | 290 | .long sys_utimes |
288 | .long sys_fadvise64_64 | 291 | .long sys_fadvise64_64 |
289 | .long sys_mbind | 292 | .long sys_mbind |
290 | .long sys_get_mempolicy | 293 | .long sys_get_mempolicy |
291 | .long sys_set_mempolicy /* 270 */ | 294 | .long sys_set_mempolicy /* 270 */ |
292 | .long sys_mq_open | 295 | .long sys_mq_open |
293 | .long sys_mq_unlink | 296 | .long sys_mq_unlink |
294 | .long sys_mq_timedsend | 297 | .long sys_mq_timedsend |
295 | .long sys_mq_timedreceive | 298 | .long sys_mq_timedreceive |
296 | .long sys_mq_notify /* 275 */ | 299 | .long sys_mq_notify /* 275 */ |
297 | .long sys_mq_getsetattr | 300 | .long sys_mq_getsetattr |
298 | .long sys_waitid | 301 | .long sys_waitid |
299 | .long sys_ni_syscall /* for sys_vserver */ | 302 | .long sys_ni_syscall /* for sys_vserver */ |
300 | .long sys_add_key | 303 | .long sys_add_key |
301 | .long sys_request_key /* 280 */ | 304 | .long sys_request_key /* 280 */ |
302 | .long sys_keyctl | 305 | .long sys_keyctl |
303 | .long sys_ioprio_set | 306 | .long sys_ioprio_set |
304 | .long sys_ioprio_get | 307 | .long sys_ioprio_get |
@@ -319,8 +322,8 @@ ENTRY(sys_call_table) | |||
319 | .long sys_readlinkat | 322 | .long sys_readlinkat |
320 | .long sys_fchmodat | 323 | .long sys_fchmodat |
321 | .long sys_faccessat /* 300 */ | 324 | .long sys_faccessat /* 300 */ |
322 | .long sys_ni_syscall /* Reserved for pselect6 */ | 325 | .long sys_pselect6 |
323 | .long sys_ni_syscall /* Reserved for ppoll */ | 326 | .long sys_ppoll |
324 | .long sys_unshare | 327 | .long sys_unshare |
325 | .long sys_set_robust_list | 328 | .long sys_set_robust_list |
326 | .long sys_get_robust_list /* 305 */ | 329 | .long sys_get_robust_list /* 305 */ |
@@ -363,7 +366,3 @@ ENTRY(sys_call_table) | |||
363 | .long sys_clock_adjtime | 366 | .long sys_clock_adjtime |
364 | .long sys_syncfs | 367 | .long sys_syncfs |
365 | 368 | ||
366 | .rept NR_syscalls-(.-sys_call_table)/4 | ||
367 | .long sys_ni_syscall | ||
368 | .endr | ||
369 | |||
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index d8a214f11ac2..e5550ce4e0eb 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c | |||
@@ -217,16 +217,12 @@ static struct clocksource clocksource_microblaze = { | |||
217 | .rating = 300, | 217 | .rating = 300, |
218 | .read = microblaze_read, | 218 | .read = microblaze_read, |
219 | .mask = CLOCKSOURCE_MASK(32), | 219 | .mask = CLOCKSOURCE_MASK(32), |
220 | .shift = 8, /* I can shift it */ | ||
221 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 220 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
222 | }; | 221 | }; |
223 | 222 | ||
224 | static int __init microblaze_clocksource_init(void) | 223 | static int __init microblaze_clocksource_init(void) |
225 | { | 224 | { |
226 | clocksource_microblaze.mult = | 225 | if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) |
227 | clocksource_hz2mult(timer_clock_freq, | ||
228 | clocksource_microblaze.shift); | ||
229 | if (clocksource_register(&clocksource_microblaze)) | ||
230 | panic("failed to register clocksource"); | 226 | panic("failed to register clocksource"); |
231 | 227 | ||
232 | /* stop timer1 */ | 228 | /* stop timer1 */ |
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index 7ff9b5492041..aef6c917b45a 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms | |||
@@ -11,6 +11,7 @@ platforms += dec | |||
11 | platforms += emma | 11 | platforms += emma |
12 | platforms += jazz | 12 | platforms += jazz |
13 | platforms += jz4740 | 13 | platforms += jz4740 |
14 | platforms += lantiq | ||
14 | platforms += lasat | 15 | platforms += lasat |
15 | platforms += loongson | 16 | platforms += loongson |
16 | platforms += mipssim | 17 | platforms += mipssim |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8e256cc5dcd9..cef1a854487d 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -212,6 +212,24 @@ config MACH_JZ4740 | |||
212 | select HAVE_PWM | 212 | select HAVE_PWM |
213 | select HAVE_CLK | 213 | select HAVE_CLK |
214 | 214 | ||
215 | config LANTIQ | ||
216 | bool "Lantiq based platforms" | ||
217 | select DMA_NONCOHERENT | ||
218 | select IRQ_CPU | ||
219 | select CEVT_R4K | ||
220 | select CSRC_R4K | ||
221 | select SYS_HAS_CPU_MIPS32_R1 | ||
222 | select SYS_HAS_CPU_MIPS32_R2 | ||
223 | select SYS_SUPPORTS_BIG_ENDIAN | ||
224 | select SYS_SUPPORTS_32BIT_KERNEL | ||
225 | select SYS_SUPPORTS_MULTITHREADING | ||
226 | select SYS_HAS_EARLY_PRINTK | ||
227 | select ARCH_REQUIRE_GPIOLIB | ||
228 | select SWAP_IO_SPACE | ||
229 | select BOOT_RAW | ||
230 | select HAVE_CLK | ||
231 | select MIPS_MACHINE | ||
232 | |||
215 | config LASAT | 233 | config LASAT |
216 | bool "LASAT Networks platforms" | 234 | bool "LASAT Networks platforms" |
217 | select CEVT_R4K | 235 | select CEVT_R4K |
@@ -736,6 +754,33 @@ config CAVIUM_OCTEON_REFERENCE_BOARD | |||
736 | Hikari | 754 | Hikari |
737 | Say Y here for most Octeon reference boards. | 755 | Say Y here for most Octeon reference boards. |
738 | 756 | ||
757 | config NLM_XLR_BOARD | ||
758 | bool "Netlogic XLR/XLS based systems" | ||
759 | depends on EXPERIMENTAL | ||
760 | select BOOT_ELF32 | ||
761 | select NLM_COMMON | ||
762 | select NLM_XLR | ||
763 | select SYS_HAS_CPU_XLR | ||
764 | select SYS_SUPPORTS_SMP | ||
765 | select HW_HAS_PCI | ||
766 | select SWAP_IO_SPACE | ||
767 | select SYS_SUPPORTS_32BIT_KERNEL | ||
768 | select SYS_SUPPORTS_64BIT_KERNEL | ||
769 | select 64BIT_PHYS_ADDR | ||
770 | select SYS_SUPPORTS_BIG_ENDIAN | ||
771 | select SYS_SUPPORTS_HIGHMEM | ||
772 | select DMA_COHERENT | ||
773 | select NR_CPUS_DEFAULT_32 | ||
774 | select CEVT_R4K | ||
775 | select CSRC_R4K | ||
776 | select IRQ_CPU | ||
777 | select ZONE_DMA if 64BIT | ||
778 | select SYNC_R4K | ||
779 | select SYS_HAS_EARLY_PRINTK | ||
780 | help | ||
781 | Support for systems based on Netlogic XLR and XLS processors. | ||
782 | Say Y here if you have a XLR or XLS based board. | ||
783 | |||
739 | endchoice | 784 | endchoice |
740 | 785 | ||
741 | source "arch/mips/alchemy/Kconfig" | 786 | source "arch/mips/alchemy/Kconfig" |
@@ -743,6 +788,7 @@ source "arch/mips/ath79/Kconfig" | |||
743 | source "arch/mips/bcm63xx/Kconfig" | 788 | source "arch/mips/bcm63xx/Kconfig" |
744 | source "arch/mips/jazz/Kconfig" | 789 | source "arch/mips/jazz/Kconfig" |
745 | source "arch/mips/jz4740/Kconfig" | 790 | source "arch/mips/jz4740/Kconfig" |
791 | source "arch/mips/lantiq/Kconfig" | ||
746 | source "arch/mips/lasat/Kconfig" | 792 | source "arch/mips/lasat/Kconfig" |
747 | source "arch/mips/pmc-sierra/Kconfig" | 793 | source "arch/mips/pmc-sierra/Kconfig" |
748 | source "arch/mips/powertv/Kconfig" | 794 | source "arch/mips/powertv/Kconfig" |
@@ -752,6 +798,7 @@ source "arch/mips/txx9/Kconfig" | |||
752 | source "arch/mips/vr41xx/Kconfig" | 798 | source "arch/mips/vr41xx/Kconfig" |
753 | source "arch/mips/cavium-octeon/Kconfig" | 799 | source "arch/mips/cavium-octeon/Kconfig" |
754 | source "arch/mips/loongson/Kconfig" | 800 | source "arch/mips/loongson/Kconfig" |
801 | source "arch/mips/netlogic/Kconfig" | ||
755 | 802 | ||
756 | endmenu | 803 | endmenu |
757 | 804 | ||
@@ -997,9 +1044,6 @@ config IRQ_GT641XX | |||
997 | config IRQ_GIC | 1044 | config IRQ_GIC |
998 | bool | 1045 | bool |
999 | 1046 | ||
1000 | config IRQ_CPU_OCTEON | ||
1001 | bool | ||
1002 | |||
1003 | config MIPS_BOARDS_GEN | 1047 | config MIPS_BOARDS_GEN |
1004 | bool | 1048 | bool |
1005 | 1049 | ||
@@ -1359,8 +1403,6 @@ config CPU_SB1 | |||
1359 | config CPU_CAVIUM_OCTEON | 1403 | config CPU_CAVIUM_OCTEON |
1360 | bool "Cavium Octeon processor" | 1404 | bool "Cavium Octeon processor" |
1361 | depends on SYS_HAS_CPU_CAVIUM_OCTEON | 1405 | depends on SYS_HAS_CPU_CAVIUM_OCTEON |
1362 | select IRQ_CPU | ||
1363 | select IRQ_CPU_OCTEON | ||
1364 | select CPU_HAS_PREFETCH | 1406 | select CPU_HAS_PREFETCH |
1365 | select CPU_SUPPORTS_64BIT_KERNEL | 1407 | select CPU_SUPPORTS_64BIT_KERNEL |
1366 | select SYS_SUPPORTS_SMP | 1408 | select SYS_SUPPORTS_SMP |
@@ -1425,6 +1467,17 @@ config CPU_BMIPS5000 | |||
1425 | help | 1467 | help |
1426 | Broadcom BMIPS5000 processors. | 1468 | Broadcom BMIPS5000 processors. |
1427 | 1469 | ||
1470 | config CPU_XLR | ||
1471 | bool "Netlogic XLR SoC" | ||
1472 | depends on SYS_HAS_CPU_XLR | ||
1473 | select CPU_SUPPORTS_32BIT_KERNEL | ||
1474 | select CPU_SUPPORTS_64BIT_KERNEL | ||
1475 | select CPU_SUPPORTS_HIGHMEM | ||
1476 | select WEAK_ORDERING | ||
1477 | select WEAK_REORDERING_BEYOND_LLSC | ||
1478 | select CPU_SUPPORTS_HUGEPAGES | ||
1479 | help | ||
1480 | Netlogic Microsystems XLR/XLS processors. | ||
1428 | endchoice | 1481 | endchoice |
1429 | 1482 | ||
1430 | if CPU_LOONGSON2F | 1483 | if CPU_LOONGSON2F |
@@ -1555,6 +1608,9 @@ config SYS_HAS_CPU_BMIPS4380 | |||
1555 | config SYS_HAS_CPU_BMIPS5000 | 1608 | config SYS_HAS_CPU_BMIPS5000 |
1556 | bool | 1609 | bool |
1557 | 1610 | ||
1611 | config SYS_HAS_CPU_XLR | ||
1612 | bool | ||
1613 | |||
1558 | # | 1614 | # |
1559 | # CPU may reorder R->R, R->W, W->R, W->W | 1615 | # CPU may reorder R->R, R->W, W->R, W->W |
1560 | # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC | 1616 | # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC |
@@ -2339,6 +2395,7 @@ config MMU | |||
2339 | 2395 | ||
2340 | config I8253 | 2396 | config I8253 |
2341 | bool | 2397 | bool |
2398 | select CLKSRC_I8253 | ||
2342 | select MIPS_EXTERNAL_TIMER | 2399 | select MIPS_EXTERNAL_TIMER |
2343 | 2400 | ||
2344 | config ZONE_DMA32 | 2401 | config ZONE_DMA32 |
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 53e3514ba10e..884819cd0607 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile | |||
@@ -191,6 +191,18 @@ endif | |||
191 | # | 191 | # |
192 | include $(srctree)/arch/mips/Kbuild.platforms | 192 | include $(srctree)/arch/mips/Kbuild.platforms |
193 | 193 | ||
194 | # | ||
195 | # NETLOGIC SOC Common (common) | ||
196 | # | ||
197 | cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic | ||
198 | cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic | ||
199 | |||
200 | # | ||
201 | # NETLOGIC XLR/XLS SoC, Simulator and boards | ||
202 | # | ||
203 | core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ | ||
204 | load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 | ||
205 | |||
194 | cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic | 206 | cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic |
195 | drivers-$(CONFIG_PCI) += arch/mips/pci/ | 207 | drivers-$(CONFIG_PCI) += arch/mips/pci/ |
196 | 208 | ||
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c index ca0506a8585a..3a5abb54d505 100644 --- a/arch/mips/alchemy/common/dbdma.c +++ b/arch/mips/alchemy/common/dbdma.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include <linux/sysdev.h> | 39 | #include <linux/syscore_ops.h> |
40 | #include <asm/mach-au1x00/au1000.h> | 40 | #include <asm/mach-au1x00/au1000.h> |
41 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 41 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
42 | 42 | ||
@@ -58,7 +58,8 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); | |||
58 | /* I couldn't find a macro that did this... */ | 58 | /* I couldn't find a macro that did this... */ |
59 | #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) | 59 | #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) |
60 | 60 | ||
61 | static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; | 61 | static dbdma_global_t *dbdma_gptr = |
62 | (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); | ||
62 | static int dbdma_initialized; | 63 | static int dbdma_initialized; |
63 | 64 | ||
64 | static dbdev_tab_t dbdev_tab[] = { | 65 | static dbdev_tab_t dbdev_tab[] = { |
@@ -299,7 +300,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
299 | if (ctp != NULL) { | 300 | if (ctp != NULL) { |
300 | memset(ctp, 0, sizeof(chan_tab_t)); | 301 | memset(ctp, 0, sizeof(chan_tab_t)); |
301 | ctp->chan_index = chan = i; | 302 | ctp->chan_index = chan = i; |
302 | dcp = DDMA_CHANNEL_BASE; | 303 | dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); |
303 | dcp += (0x0100 * chan); | 304 | dcp += (0x0100 * chan); |
304 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; | 305 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; |
305 | cp = (au1x_dma_chan_t *)dcp; | 306 | cp = (au1x_dma_chan_t *)dcp; |
@@ -958,105 +959,75 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr) | |||
958 | } | 959 | } |
959 | 960 | ||
960 | 961 | ||
961 | struct alchemy_dbdma_sysdev { | 962 | static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6]; |
962 | struct sys_device sysdev; | ||
963 | u32 pm_regs[NUM_DBDMA_CHANS + 1][6]; | ||
964 | }; | ||
965 | 963 | ||
966 | static int alchemy_dbdma_suspend(struct sys_device *dev, | 964 | static int alchemy_dbdma_suspend(void) |
967 | pm_message_t state) | ||
968 | { | 965 | { |
969 | struct alchemy_dbdma_sysdev *sdev = | ||
970 | container_of(dev, struct alchemy_dbdma_sysdev, sysdev); | ||
971 | int i; | 966 | int i; |
972 | u32 addr; | 967 | void __iomem *addr; |
973 | 968 | ||
974 | addr = DDMA_GLOBAL_BASE; | 969 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); |
975 | sdev->pm_regs[0][0] = au_readl(addr + 0x00); | 970 | alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00); |
976 | sdev->pm_regs[0][1] = au_readl(addr + 0x04); | 971 | alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04); |
977 | sdev->pm_regs[0][2] = au_readl(addr + 0x08); | 972 | alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08); |
978 | sdev->pm_regs[0][3] = au_readl(addr + 0x0c); | 973 | alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c); |
979 | 974 | ||
980 | /* save channel configurations */ | 975 | /* save channel configurations */ |
981 | for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { | 976 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); |
982 | sdev->pm_regs[i][0] = au_readl(addr + 0x00); | 977 | for (i = 1; i <= NUM_DBDMA_CHANS; i++) { |
983 | sdev->pm_regs[i][1] = au_readl(addr + 0x04); | 978 | alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00); |
984 | sdev->pm_regs[i][2] = au_readl(addr + 0x08); | 979 | alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04); |
985 | sdev->pm_regs[i][3] = au_readl(addr + 0x0c); | 980 | alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08); |
986 | sdev->pm_regs[i][4] = au_readl(addr + 0x10); | 981 | alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c); |
987 | sdev->pm_regs[i][5] = au_readl(addr + 0x14); | 982 | alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10); |
983 | alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14); | ||
988 | 984 | ||
989 | /* halt channel */ | 985 | /* halt channel */ |
990 | au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00); | 986 | __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00); |
991 | au_sync(); | 987 | wmb(); |
992 | while (!(au_readl(addr + 0x14) & 1)) | 988 | while (!(__raw_readl(addr + 0x14) & 1)) |
993 | au_sync(); | 989 | wmb(); |
994 | 990 | ||
995 | addr += 0x100; /* next channel base */ | 991 | addr += 0x100; /* next channel base */ |
996 | } | 992 | } |
997 | /* disable channel interrupts */ | 993 | /* disable channel interrupts */ |
998 | au_writel(0, DDMA_GLOBAL_BASE + 0x0c); | 994 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); |
999 | au_sync(); | 995 | __raw_writel(0, addr + 0x0c); |
996 | wmb(); | ||
1000 | 997 | ||
1001 | return 0; | 998 | return 0; |
1002 | } | 999 | } |
1003 | 1000 | ||
1004 | static int alchemy_dbdma_resume(struct sys_device *dev) | 1001 | static void alchemy_dbdma_resume(void) |
1005 | { | 1002 | { |
1006 | struct alchemy_dbdma_sysdev *sdev = | ||
1007 | container_of(dev, struct alchemy_dbdma_sysdev, sysdev); | ||
1008 | int i; | 1003 | int i; |
1009 | u32 addr; | 1004 | void __iomem *addr; |
1010 | 1005 | ||
1011 | addr = DDMA_GLOBAL_BASE; | 1006 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); |
1012 | au_writel(sdev->pm_regs[0][0], addr + 0x00); | 1007 | __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00); |
1013 | au_writel(sdev->pm_regs[0][1], addr + 0x04); | 1008 | __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04); |
1014 | au_writel(sdev->pm_regs[0][2], addr + 0x08); | 1009 | __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08); |
1015 | au_writel(sdev->pm_regs[0][3], addr + 0x0c); | 1010 | __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c); |
1016 | 1011 | ||
1017 | /* restore channel configurations */ | 1012 | /* restore channel configurations */ |
1018 | for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { | 1013 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); |
1019 | au_writel(sdev->pm_regs[i][0], addr + 0x00); | 1014 | for (i = 1; i <= NUM_DBDMA_CHANS; i++) { |
1020 | au_writel(sdev->pm_regs[i][1], addr + 0x04); | 1015 | __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00); |
1021 | au_writel(sdev->pm_regs[i][2], addr + 0x08); | 1016 | __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04); |
1022 | au_writel(sdev->pm_regs[i][3], addr + 0x0c); | 1017 | __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08); |
1023 | au_writel(sdev->pm_regs[i][4], addr + 0x10); | 1018 | __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c); |
1024 | au_writel(sdev->pm_regs[i][5], addr + 0x14); | 1019 | __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10); |
1025 | au_sync(); | 1020 | __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14); |
1021 | wmb(); | ||
1026 | addr += 0x100; /* next channel base */ | 1022 | addr += 0x100; /* next channel base */ |
1027 | } | 1023 | } |
1028 | |||
1029 | return 0; | ||
1030 | } | 1024 | } |
1031 | 1025 | ||
1032 | static struct sysdev_class alchemy_dbdma_sysdev_class = { | 1026 | static struct syscore_ops alchemy_dbdma_syscore_ops = { |
1033 | .name = "dbdma", | ||
1034 | .suspend = alchemy_dbdma_suspend, | 1027 | .suspend = alchemy_dbdma_suspend, |
1035 | .resume = alchemy_dbdma_resume, | 1028 | .resume = alchemy_dbdma_resume, |
1036 | }; | 1029 | }; |
1037 | 1030 | ||
1038 | static int __init alchemy_dbdma_sysdev_init(void) | ||
1039 | { | ||
1040 | struct alchemy_dbdma_sysdev *sdev; | ||
1041 | int ret; | ||
1042 | |||
1043 | ret = sysdev_class_register(&alchemy_dbdma_sysdev_class); | ||
1044 | if (ret) | ||
1045 | return ret; | ||
1046 | |||
1047 | sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL); | ||
1048 | if (!sdev) | ||
1049 | return -ENOMEM; | ||
1050 | |||
1051 | sdev->sysdev.id = -1; | ||
1052 | sdev->sysdev.cls = &alchemy_dbdma_sysdev_class; | ||
1053 | ret = sysdev_register(&sdev->sysdev); | ||
1054 | if (ret) | ||
1055 | kfree(sdev); | ||
1056 | |||
1057 | return ret; | ||
1058 | } | ||
1059 | |||
1060 | static int __init au1xxx_dbdma_init(void) | 1031 | static int __init au1xxx_dbdma_init(void) |
1061 | { | 1032 | { |
1062 | int irq_nr, ret; | 1033 | int irq_nr, ret; |
@@ -1084,11 +1055,7 @@ static int __init au1xxx_dbdma_init(void) | |||
1084 | else { | 1055 | else { |
1085 | dbdma_initialized = 1; | 1056 | dbdma_initialized = 1; |
1086 | printk(KERN_INFO "Alchemy DBDMA initialized\n"); | 1057 | printk(KERN_INFO "Alchemy DBDMA initialized\n"); |
1087 | ret = alchemy_dbdma_sysdev_init(); | 1058 | register_syscore_ops(&alchemy_dbdma_syscore_ops); |
1088 | if (ret) { | ||
1089 | printk(KERN_ERR "DBDMA PM init failed\n"); | ||
1090 | ret = 0; | ||
1091 | } | ||
1092 | } | 1059 | } |
1093 | 1060 | ||
1094 | return ret; | 1061 | return ret; |
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c index d5278877891d..347980e79a89 100644 --- a/arch/mips/alchemy/common/dma.c +++ b/arch/mips/alchemy/common/dma.c | |||
@@ -58,6 +58,9 @@ | |||
58 | * returned from request_dma. | 58 | * returned from request_dma. |
59 | */ | 59 | */ |
60 | 60 | ||
61 | /* DMA Channel register block spacing */ | ||
62 | #define DMA_CHANNEL_LEN 0x00000100 | ||
63 | |||
61 | DEFINE_SPINLOCK(au1000_dma_spin_lock); | 64 | DEFINE_SPINLOCK(au1000_dma_spin_lock); |
62 | 65 | ||
63 | struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { | 66 | struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { |
@@ -77,22 +80,23 @@ static const struct dma_dev { | |||
77 | unsigned int fifo_addr; | 80 | unsigned int fifo_addr; |
78 | unsigned int dma_mode; | 81 | unsigned int dma_mode; |
79 | } dma_dev_table[DMA_NUM_DEV] = { | 82 | } dma_dev_table[DMA_NUM_DEV] = { |
80 | {UART0_ADDR + UART_TX, 0}, | 83 | { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */ |
81 | {UART0_ADDR + UART_RX, 0}, | 84 | { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */ |
82 | {0, 0}, | 85 | { 0, 0 }, /* DMA_REQ0 */ |
83 | {0, 0}, | 86 | { 0, 0 }, /* DMA_REQ1 */ |
84 | {AC97C_DATA, DMA_DW16 }, /* coherent */ | 87 | { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */ |
85 | {AC97C_DATA, DMA_DR | DMA_DW16 }, /* coherent */ | 88 | { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */ |
86 | {UART3_ADDR + UART_TX, DMA_DW8 | DMA_NC}, | 89 | { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */ |
87 | {UART3_ADDR + UART_RX, DMA_DR | DMA_DW8 | DMA_NC}, | 90 | { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */ |
88 | {USBD_EP0RD, DMA_DR | DMA_DW8 | DMA_NC}, | 91 | { AU1000_USBD_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */ |
89 | {USBD_EP0WR, DMA_DW8 | DMA_NC}, | 92 | { AU1000_USBD_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */ |
90 | {USBD_EP2WR, DMA_DW8 | DMA_NC}, | 93 | { AU1000_USBD_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */ |
91 | {USBD_EP3WR, DMA_DW8 | DMA_NC}, | 94 | { AU1000_USBD_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */ |
92 | {USBD_EP4RD, DMA_DR | DMA_DW8 | DMA_NC}, | 95 | { AU1000_USBD_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */ |
93 | {USBD_EP5RD, DMA_DR | DMA_DW8 | DMA_NC}, | 96 | { AU1000_USBD_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */ |
94 | {I2S_DATA, DMA_DW32 | DMA_NC}, | 97 | /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */ |
95 | {I2S_DATA, DMA_DR | DMA_DW32 | DMA_NC} | 98 | { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */ |
99 | { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */ | ||
96 | }; | 100 | }; |
97 | 101 | ||
98 | int au1000_dma_read_proc(char *buf, char **start, off_t fpos, | 102 | int au1000_dma_read_proc(char *buf, char **start, off_t fpos, |
@@ -123,10 +127,10 @@ int au1000_dma_read_proc(char *buf, char **start, off_t fpos, | |||
123 | 127 | ||
124 | /* Device FIFO addresses and default DMA modes - 2nd bank */ | 128 | /* Device FIFO addresses and default DMA modes - 2nd bank */ |
125 | static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { | 129 | static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { |
126 | { SD0_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ | 130 | { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ |
127 | { SD0_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 }, /* coherent */ | 131 | { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */ |
128 | { SD1_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ | 132 | { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ |
129 | { SD1_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 } /* coherent */ | 133 | { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */ |
130 | }; | 134 | }; |
131 | 135 | ||
132 | void dump_au1000_dma_channel(unsigned int dmanr) | 136 | void dump_au1000_dma_channel(unsigned int dmanr) |
@@ -202,7 +206,7 @@ int request_au1000_dma(int dev_id, const char *dev_str, | |||
202 | } | 206 | } |
203 | 207 | ||
204 | /* fill it in */ | 208 | /* fill it in */ |
205 | chan->io = DMA_CHANNEL_BASE + i * DMA_CHANNEL_LEN; | 209 | chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN; |
206 | chan->dev_id = dev_id; | 210 | chan->dev_id = dev_id; |
207 | chan->dev_str = dev_str; | 211 | chan->dev_str = dev_str; |
208 | chan->fifo_addr = dev->fifo_addr; | 212 | chan->fifo_addr = dev->fifo_addr; |
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c index 55dd7c888517..8b60ba0675e2 100644 --- a/arch/mips/alchemy/common/irq.c +++ b/arch/mips/alchemy/common/irq.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/sysdev.h> | 33 | #include <linux/syscore_ops.h> |
34 | 34 | ||
35 | #include <asm/irq_cpu.h> | 35 | #include <asm/irq_cpu.h> |
36 | #include <asm/mipsregs.h> | 36 | #include <asm/mipsregs.h> |
@@ -39,6 +39,36 @@ | |||
39 | #include <asm/mach-pb1x00/pb1000.h> | 39 | #include <asm/mach-pb1x00/pb1000.h> |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | /* Interrupt Controller register offsets */ | ||
43 | #define IC_CFG0RD 0x40 | ||
44 | #define IC_CFG0SET 0x40 | ||
45 | #define IC_CFG0CLR 0x44 | ||
46 | #define IC_CFG1RD 0x48 | ||
47 | #define IC_CFG1SET 0x48 | ||
48 | #define IC_CFG1CLR 0x4C | ||
49 | #define IC_CFG2RD 0x50 | ||
50 | #define IC_CFG2SET 0x50 | ||
51 | #define IC_CFG2CLR 0x54 | ||
52 | #define IC_REQ0INT 0x54 | ||
53 | #define IC_SRCRD 0x58 | ||
54 | #define IC_SRCSET 0x58 | ||
55 | #define IC_SRCCLR 0x5C | ||
56 | #define IC_REQ1INT 0x5C | ||
57 | #define IC_ASSIGNRD 0x60 | ||
58 | #define IC_ASSIGNSET 0x60 | ||
59 | #define IC_ASSIGNCLR 0x64 | ||
60 | #define IC_WAKERD 0x68 | ||
61 | #define IC_WAKESET 0x68 | ||
62 | #define IC_WAKECLR 0x6C | ||
63 | #define IC_MASKRD 0x70 | ||
64 | #define IC_MASKSET 0x70 | ||
65 | #define IC_MASKCLR 0x74 | ||
66 | #define IC_RISINGRD 0x78 | ||
67 | #define IC_RISINGCLR 0x78 | ||
68 | #define IC_FALLINGRD 0x7C | ||
69 | #define IC_FALLINGCLR 0x7C | ||
70 | #define IC_TESTBIT 0x80 | ||
71 | |||
42 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type); | 72 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type); |
43 | 73 | ||
44 | /* NOTE on interrupt priorities: The original writers of this code said: | 74 | /* NOTE on interrupt priorities: The original writers of this code said: |
@@ -221,89 +251,101 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = { | |||
221 | static void au1x_ic0_unmask(struct irq_data *d) | 251 | static void au1x_ic0_unmask(struct irq_data *d) |
222 | { | 252 | { |
223 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 253 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
224 | au_writel(1 << bit, IC0_MASKSET); | 254 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
225 | au_writel(1 << bit, IC0_WAKESET); | 255 | |
226 | au_sync(); | 256 | __raw_writel(1 << bit, base + IC_MASKSET); |
257 | __raw_writel(1 << bit, base + IC_WAKESET); | ||
258 | wmb(); | ||
227 | } | 259 | } |
228 | 260 | ||
229 | static void au1x_ic1_unmask(struct irq_data *d) | 261 | static void au1x_ic1_unmask(struct irq_data *d) |
230 | { | 262 | { |
231 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 263 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
232 | au_writel(1 << bit, IC1_MASKSET); | 264 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
233 | au_writel(1 << bit, IC1_WAKESET); | 265 | |
266 | __raw_writel(1 << bit, base + IC_MASKSET); | ||
267 | __raw_writel(1 << bit, base + IC_WAKESET); | ||
234 | 268 | ||
235 | /* very hacky. does the pb1000 cpld auto-disable this int? | 269 | /* very hacky. does the pb1000 cpld auto-disable this int? |
236 | * nowhere in the current kernel sources is it disabled. --mlau | 270 | * nowhere in the current kernel sources is it disabled. --mlau |
237 | */ | 271 | */ |
238 | #if defined(CONFIG_MIPS_PB1000) | 272 | #if defined(CONFIG_MIPS_PB1000) |
239 | if (d->irq == AU1000_GPIO15_INT) | 273 | if (d->irq == AU1000_GPIO15_INT) |
240 | au_writel(0x4000, PB1000_MDR); /* enable int */ | 274 | __raw_writel(0x4000, (void __iomem *)PB1000_MDR); /* enable int */ |
241 | #endif | 275 | #endif |
242 | au_sync(); | 276 | wmb(); |
243 | } | 277 | } |
244 | 278 | ||
245 | static void au1x_ic0_mask(struct irq_data *d) | 279 | static void au1x_ic0_mask(struct irq_data *d) |
246 | { | 280 | { |
247 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 281 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
248 | au_writel(1 << bit, IC0_MASKCLR); | 282 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
249 | au_writel(1 << bit, IC0_WAKECLR); | 283 | |
250 | au_sync(); | 284 | __raw_writel(1 << bit, base + IC_MASKCLR); |
285 | __raw_writel(1 << bit, base + IC_WAKECLR); | ||
286 | wmb(); | ||
251 | } | 287 | } |
252 | 288 | ||
253 | static void au1x_ic1_mask(struct irq_data *d) | 289 | static void au1x_ic1_mask(struct irq_data *d) |
254 | { | 290 | { |
255 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 291 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
256 | au_writel(1 << bit, IC1_MASKCLR); | 292 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
257 | au_writel(1 << bit, IC1_WAKECLR); | 293 | |
258 | au_sync(); | 294 | __raw_writel(1 << bit, base + IC_MASKCLR); |
295 | __raw_writel(1 << bit, base + IC_WAKECLR); | ||
296 | wmb(); | ||
259 | } | 297 | } |
260 | 298 | ||
261 | static void au1x_ic0_ack(struct irq_data *d) | 299 | static void au1x_ic0_ack(struct irq_data *d) |
262 | { | 300 | { |
263 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 301 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
302 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); | ||
264 | 303 | ||
265 | /* | 304 | /* |
266 | * This may assume that we don't get interrupts from | 305 | * This may assume that we don't get interrupts from |
267 | * both edges at once, or if we do, that we don't care. | 306 | * both edges at once, or if we do, that we don't care. |
268 | */ | 307 | */ |
269 | au_writel(1 << bit, IC0_FALLINGCLR); | 308 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
270 | au_writel(1 << bit, IC0_RISINGCLR); | 309 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
271 | au_sync(); | 310 | wmb(); |
272 | } | 311 | } |
273 | 312 | ||
274 | static void au1x_ic1_ack(struct irq_data *d) | 313 | static void au1x_ic1_ack(struct irq_data *d) |
275 | { | 314 | { |
276 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 315 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
316 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); | ||
277 | 317 | ||
278 | /* | 318 | /* |
279 | * This may assume that we don't get interrupts from | 319 | * This may assume that we don't get interrupts from |
280 | * both edges at once, or if we do, that we don't care. | 320 | * both edges at once, or if we do, that we don't care. |
281 | */ | 321 | */ |
282 | au_writel(1 << bit, IC1_FALLINGCLR); | 322 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
283 | au_writel(1 << bit, IC1_RISINGCLR); | 323 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
284 | au_sync(); | 324 | wmb(); |
285 | } | 325 | } |
286 | 326 | ||
287 | static void au1x_ic0_maskack(struct irq_data *d) | 327 | static void au1x_ic0_maskack(struct irq_data *d) |
288 | { | 328 | { |
289 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 329 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
330 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); | ||
290 | 331 | ||
291 | au_writel(1 << bit, IC0_WAKECLR); | 332 | __raw_writel(1 << bit, base + IC_WAKECLR); |
292 | au_writel(1 << bit, IC0_MASKCLR); | 333 | __raw_writel(1 << bit, base + IC_MASKCLR); |
293 | au_writel(1 << bit, IC0_RISINGCLR); | 334 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
294 | au_writel(1 << bit, IC0_FALLINGCLR); | 335 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
295 | au_sync(); | 336 | wmb(); |
296 | } | 337 | } |
297 | 338 | ||
298 | static void au1x_ic1_maskack(struct irq_data *d) | 339 | static void au1x_ic1_maskack(struct irq_data *d) |
299 | { | 340 | { |
300 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 341 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
342 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); | ||
301 | 343 | ||
302 | au_writel(1 << bit, IC1_WAKECLR); | 344 | __raw_writel(1 << bit, base + IC_WAKECLR); |
303 | au_writel(1 << bit, IC1_MASKCLR); | 345 | __raw_writel(1 << bit, base + IC_MASKCLR); |
304 | au_writel(1 << bit, IC1_RISINGCLR); | 346 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
305 | au_writel(1 << bit, IC1_FALLINGCLR); | 347 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
306 | au_sync(); | 348 | wmb(); |
307 | } | 349 | } |
308 | 350 | ||
309 | static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) | 351 | static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) |
@@ -318,13 +360,13 @@ static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) | |||
318 | return -EINVAL; | 360 | return -EINVAL; |
319 | 361 | ||
320 | local_irq_save(flags); | 362 | local_irq_save(flags); |
321 | wakemsk = au_readl(SYS_WAKEMSK); | 363 | wakemsk = __raw_readl((void __iomem *)SYS_WAKEMSK); |
322 | if (on) | 364 | if (on) |
323 | wakemsk |= 1 << bit; | 365 | wakemsk |= 1 << bit; |
324 | else | 366 | else |
325 | wakemsk &= ~(1 << bit); | 367 | wakemsk &= ~(1 << bit); |
326 | au_writel(wakemsk, SYS_WAKEMSK); | 368 | __raw_writel(wakemsk, (void __iomem *)SYS_WAKEMSK); |
327 | au_sync(); | 369 | wmb(); |
328 | local_irq_restore(flags); | 370 | local_irq_restore(flags); |
329 | 371 | ||
330 | return 0; | 372 | return 0; |
@@ -356,81 +398,74 @@ static struct irq_chip au1x_ic1_chip = { | |||
356 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type) | 398 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type) |
357 | { | 399 | { |
358 | struct irq_chip *chip; | 400 | struct irq_chip *chip; |
359 | unsigned long icr[6]; | 401 | unsigned int bit, irq = d->irq; |
360 | unsigned int bit, ic, irq = d->irq; | ||
361 | irq_flow_handler_t handler = NULL; | 402 | irq_flow_handler_t handler = NULL; |
362 | unsigned char *name = NULL; | 403 | unsigned char *name = NULL; |
404 | void __iomem *base; | ||
363 | int ret; | 405 | int ret; |
364 | 406 | ||
365 | if (irq >= AU1000_INTC1_INT_BASE) { | 407 | if (irq >= AU1000_INTC1_INT_BASE) { |
366 | bit = irq - AU1000_INTC1_INT_BASE; | 408 | bit = irq - AU1000_INTC1_INT_BASE; |
367 | chip = &au1x_ic1_chip; | 409 | chip = &au1x_ic1_chip; |
368 | ic = 1; | 410 | base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
369 | } else { | 411 | } else { |
370 | bit = irq - AU1000_INTC0_INT_BASE; | 412 | bit = irq - AU1000_INTC0_INT_BASE; |
371 | chip = &au1x_ic0_chip; | 413 | chip = &au1x_ic0_chip; |
372 | ic = 0; | 414 | base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
373 | } | 415 | } |
374 | 416 | ||
375 | if (bit > 31) | 417 | if (bit > 31) |
376 | return -EINVAL; | 418 | return -EINVAL; |
377 | 419 | ||
378 | icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET; | ||
379 | icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET; | ||
380 | icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET; | ||
381 | icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR; | ||
382 | icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR; | ||
383 | icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR; | ||
384 | |||
385 | ret = 0; | 420 | ret = 0; |
386 | 421 | ||
387 | switch (flow_type) { /* cfgregs 2:1:0 */ | 422 | switch (flow_type) { /* cfgregs 2:1:0 */ |
388 | case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ | 423 | case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ |
389 | au_writel(1 << bit, icr[5]); | 424 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
390 | au_writel(1 << bit, icr[4]); | 425 | __raw_writel(1 << bit, base + IC_CFG1CLR); |
391 | au_writel(1 << bit, icr[0]); | 426 | __raw_writel(1 << bit, base + IC_CFG0SET); |
392 | handler = handle_edge_irq; | 427 | handler = handle_edge_irq; |
393 | name = "riseedge"; | 428 | name = "riseedge"; |
394 | break; | 429 | break; |
395 | case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ | 430 | case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ |
396 | au_writel(1 << bit, icr[5]); | 431 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
397 | au_writel(1 << bit, icr[1]); | 432 | __raw_writel(1 << bit, base + IC_CFG1SET); |
398 | au_writel(1 << bit, icr[3]); | 433 | __raw_writel(1 << bit, base + IC_CFG0CLR); |
399 | handler = handle_edge_irq; | 434 | handler = handle_edge_irq; |
400 | name = "falledge"; | 435 | name = "falledge"; |
401 | break; | 436 | break; |
402 | case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ | 437 | case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ |
403 | au_writel(1 << bit, icr[5]); | 438 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
404 | au_writel(1 << bit, icr[1]); | 439 | __raw_writel(1 << bit, base + IC_CFG1SET); |
405 | au_writel(1 << bit, icr[0]); | 440 | __raw_writel(1 << bit, base + IC_CFG0SET); |
406 | handler = handle_edge_irq; | 441 | handler = handle_edge_irq; |
407 | name = "bothedge"; | 442 | name = "bothedge"; |
408 | break; | 443 | break; |
409 | case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ | 444 | case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ |
410 | au_writel(1 << bit, icr[2]); | 445 | __raw_writel(1 << bit, base + IC_CFG2SET); |
411 | au_writel(1 << bit, icr[4]); | 446 | __raw_writel(1 << bit, base + IC_CFG1CLR); |
412 | au_writel(1 << bit, icr[0]); | 447 | __raw_writel(1 << bit, base + IC_CFG0SET); |
413 | handler = handle_level_irq; | 448 | handler = handle_level_irq; |
414 | name = "hilevel"; | 449 | name = "hilevel"; |
415 | break; | 450 | break; |
416 | case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ | 451 | case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ |
417 | au_writel(1 << bit, icr[2]); | 452 | __raw_writel(1 << bit, base + IC_CFG2SET); |
418 | au_writel(1 << bit, icr[1]); | 453 | __raw_writel(1 << bit, base + IC_CFG1SET); |
419 | au_writel(1 << bit, icr[3]); | 454 | __raw_writel(1 << bit, base + IC_CFG0CLR); |
420 | handler = handle_level_irq; | 455 | handler = handle_level_irq; |
421 | name = "lowlevel"; | 456 | name = "lowlevel"; |
422 | break; | 457 | break; |
423 | case IRQ_TYPE_NONE: /* 0:0:0 */ | 458 | case IRQ_TYPE_NONE: /* 0:0:0 */ |
424 | au_writel(1 << bit, icr[5]); | 459 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
425 | au_writel(1 << bit, icr[4]); | 460 | __raw_writel(1 << bit, base + IC_CFG1CLR); |
426 | au_writel(1 << bit, icr[3]); | 461 | __raw_writel(1 << bit, base + IC_CFG0CLR); |
427 | break; | 462 | break; |
428 | default: | 463 | default: |
429 | ret = -EINVAL; | 464 | ret = -EINVAL; |
430 | } | 465 | } |
431 | __irq_set_chip_handler_name_locked(d->irq, chip, handler, name); | 466 | __irq_set_chip_handler_name_locked(d->irq, chip, handler, name); |
432 | 467 | ||
433 | au_sync(); | 468 | wmb(); |
434 | 469 | ||
435 | return ret; | 470 | return ret; |
436 | } | 471 | } |
@@ -444,21 +479,21 @@ asmlinkage void plat_irq_dispatch(void) | |||
444 | off = MIPS_CPU_IRQ_BASE + 7; | 479 | off = MIPS_CPU_IRQ_BASE + 7; |
445 | goto handle; | 480 | goto handle; |
446 | } else if (pending & CAUSEF_IP2) { | 481 | } else if (pending & CAUSEF_IP2) { |
447 | s = IC0_REQ0INT; | 482 | s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ0INT; |
448 | off = AU1000_INTC0_INT_BASE; | 483 | off = AU1000_INTC0_INT_BASE; |
449 | } else if (pending & CAUSEF_IP3) { | 484 | } else if (pending & CAUSEF_IP3) { |
450 | s = IC0_REQ1INT; | 485 | s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ1INT; |
451 | off = AU1000_INTC0_INT_BASE; | 486 | off = AU1000_INTC0_INT_BASE; |
452 | } else if (pending & CAUSEF_IP4) { | 487 | } else if (pending & CAUSEF_IP4) { |
453 | s = IC1_REQ0INT; | 488 | s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ0INT; |
454 | off = AU1000_INTC1_INT_BASE; | 489 | off = AU1000_INTC1_INT_BASE; |
455 | } else if (pending & CAUSEF_IP5) { | 490 | } else if (pending & CAUSEF_IP5) { |
456 | s = IC1_REQ1INT; | 491 | s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ1INT; |
457 | off = AU1000_INTC1_INT_BASE; | 492 | off = AU1000_INTC1_INT_BASE; |
458 | } else | 493 | } else |
459 | goto spurious; | 494 | goto spurious; |
460 | 495 | ||
461 | s = au_readl(s); | 496 | s = __raw_readl((void __iomem *)s); |
462 | if (unlikely(!s)) { | 497 | if (unlikely(!s)) { |
463 | spurious: | 498 | spurious: |
464 | spurious_interrupt(); | 499 | spurious_interrupt(); |
@@ -469,48 +504,42 @@ handle: | |||
469 | do_IRQ(off); | 504 | do_IRQ(off); |
470 | } | 505 | } |
471 | 506 | ||
507 | |||
508 | static inline void ic_init(void __iomem *base) | ||
509 | { | ||
510 | /* initialize interrupt controller to a safe state */ | ||
511 | __raw_writel(0xffffffff, base + IC_CFG0CLR); | ||
512 | __raw_writel(0xffffffff, base + IC_CFG1CLR); | ||
513 | __raw_writel(0xffffffff, base + IC_CFG2CLR); | ||
514 | __raw_writel(0xffffffff, base + IC_MASKCLR); | ||
515 | __raw_writel(0xffffffff, base + IC_ASSIGNCLR); | ||
516 | __raw_writel(0xffffffff, base + IC_WAKECLR); | ||
517 | __raw_writel(0xffffffff, base + IC_SRCSET); | ||
518 | __raw_writel(0xffffffff, base + IC_FALLINGCLR); | ||
519 | __raw_writel(0xffffffff, base + IC_RISINGCLR); | ||
520 | __raw_writel(0x00000000, base + IC_TESTBIT); | ||
521 | wmb(); | ||
522 | } | ||
523 | |||
472 | static void __init au1000_init_irq(struct au1xxx_irqmap *map) | 524 | static void __init au1000_init_irq(struct au1xxx_irqmap *map) |
473 | { | 525 | { |
474 | unsigned int bit, irq_nr; | 526 | unsigned int bit, irq_nr; |
475 | int i; | 527 | void __iomem *base; |
476 | |||
477 | /* | ||
478 | * Initialize interrupt controllers to a safe state. | ||
479 | */ | ||
480 | au_writel(0xffffffff, IC0_CFG0CLR); | ||
481 | au_writel(0xffffffff, IC0_CFG1CLR); | ||
482 | au_writel(0xffffffff, IC0_CFG2CLR); | ||
483 | au_writel(0xffffffff, IC0_MASKCLR); | ||
484 | au_writel(0xffffffff, IC0_ASSIGNCLR); | ||
485 | au_writel(0xffffffff, IC0_WAKECLR); | ||
486 | au_writel(0xffffffff, IC0_SRCSET); | ||
487 | au_writel(0xffffffff, IC0_FALLINGCLR); | ||
488 | au_writel(0xffffffff, IC0_RISINGCLR); | ||
489 | au_writel(0x00000000, IC0_TESTBIT); | ||
490 | |||
491 | au_writel(0xffffffff, IC1_CFG0CLR); | ||
492 | au_writel(0xffffffff, IC1_CFG1CLR); | ||
493 | au_writel(0xffffffff, IC1_CFG2CLR); | ||
494 | au_writel(0xffffffff, IC1_MASKCLR); | ||
495 | au_writel(0xffffffff, IC1_ASSIGNCLR); | ||
496 | au_writel(0xffffffff, IC1_WAKECLR); | ||
497 | au_writel(0xffffffff, IC1_SRCSET); | ||
498 | au_writel(0xffffffff, IC1_FALLINGCLR); | ||
499 | au_writel(0xffffffff, IC1_RISINGCLR); | ||
500 | au_writel(0x00000000, IC1_TESTBIT); | ||
501 | 528 | ||
529 | ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR)); | ||
530 | ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR)); | ||
502 | mips_cpu_irq_init(); | 531 | mips_cpu_irq_init(); |
503 | 532 | ||
504 | /* register all 64 possible IC0+IC1 irq sources as type "none". | 533 | /* register all 64 possible IC0+IC1 irq sources as type "none". |
505 | * Use set_irq_type() to set edge/level behaviour at runtime. | 534 | * Use set_irq_type() to set edge/level behaviour at runtime. |
506 | */ | 535 | */ |
507 | for (i = AU1000_INTC0_INT_BASE; | 536 | for (irq_nr = AU1000_INTC0_INT_BASE; |
508 | (i < AU1000_INTC0_INT_BASE + 32); i++) | 537 | (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++) |
509 | au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); | 538 | au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); |
510 | 539 | ||
511 | for (i = AU1000_INTC1_INT_BASE; | 540 | for (irq_nr = AU1000_INTC1_INT_BASE; |
512 | (i < AU1000_INTC1_INT_BASE + 32); i++) | 541 | (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++) |
513 | au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); | 542 | au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); |
514 | 543 | ||
515 | /* | 544 | /* |
516 | * Initialize IC0, which is fixed per processor. | 545 | * Initialize IC0, which is fixed per processor. |
@@ -520,13 +549,13 @@ static void __init au1000_init_irq(struct au1xxx_irqmap *map) | |||
520 | 549 | ||
521 | if (irq_nr >= AU1000_INTC1_INT_BASE) { | 550 | if (irq_nr >= AU1000_INTC1_INT_BASE) { |
522 | bit = irq_nr - AU1000_INTC1_INT_BASE; | 551 | bit = irq_nr - AU1000_INTC1_INT_BASE; |
523 | if (map->im_request) | 552 | base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
524 | au_writel(1 << bit, IC1_ASSIGNSET); | ||
525 | } else { | 553 | } else { |
526 | bit = irq_nr - AU1000_INTC0_INT_BASE; | 554 | bit = irq_nr - AU1000_INTC0_INT_BASE; |
527 | if (map->im_request) | 555 | base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
528 | au_writel(1 << bit, IC0_ASSIGNSET); | ||
529 | } | 556 | } |
557 | if (map->im_request) | ||
558 | __raw_writel(1 << bit, base + IC_ASSIGNSET); | ||
530 | 559 | ||
531 | au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type); | 560 | au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type); |
532 | ++map; | 561 | ++map; |
@@ -556,90 +585,62 @@ void __init arch_init_irq(void) | |||
556 | } | 585 | } |
557 | } | 586 | } |
558 | 587 | ||
559 | struct alchemy_ic_sysdev { | ||
560 | struct sys_device sysdev; | ||
561 | void __iomem *base; | ||
562 | unsigned long pmdata[7]; | ||
563 | }; | ||
564 | 588 | ||
565 | static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state) | 589 | static unsigned long alchemy_ic_pmdata[7 * 2]; |
566 | { | ||
567 | struct alchemy_ic_sysdev *icdev = | ||
568 | container_of(dev, struct alchemy_ic_sysdev, sysdev); | ||
569 | 590 | ||
570 | icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD); | 591 | static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d) |
571 | icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD); | 592 | { |
572 | icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD); | 593 | d[0] = __raw_readl(base + IC_CFG0RD); |
573 | icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD); | 594 | d[1] = __raw_readl(base + IC_CFG1RD); |
574 | icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD); | 595 | d[2] = __raw_readl(base + IC_CFG2RD); |
575 | icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD); | 596 | d[3] = __raw_readl(base + IC_SRCRD); |
576 | icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD); | 597 | d[4] = __raw_readl(base + IC_ASSIGNRD); |
577 | 598 | d[5] = __raw_readl(base + IC_WAKERD); | |
578 | return 0; | 599 | d[6] = __raw_readl(base + IC_MASKRD); |
600 | ic_init(base); /* shut it up too while at it */ | ||
579 | } | 601 | } |
580 | 602 | ||
581 | static int alchemy_ic_resume(struct sys_device *dev) | 603 | static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d) |
582 | { | 604 | { |
583 | struct alchemy_ic_sysdev *icdev = | 605 | ic_init(base); |
584 | container_of(dev, struct alchemy_ic_sysdev, sysdev); | 606 | |
585 | 607 | __raw_writel(d[0], base + IC_CFG0SET); | |
586 | __raw_writel(0xffffffff, icdev->base + IC_MASKCLR); | 608 | __raw_writel(d[1], base + IC_CFG1SET); |
587 | __raw_writel(0xffffffff, icdev->base + IC_CFG0CLR); | 609 | __raw_writel(d[2], base + IC_CFG2SET); |
588 | __raw_writel(0xffffffff, icdev->base + IC_CFG1CLR); | 610 | __raw_writel(d[3], base + IC_SRCSET); |
589 | __raw_writel(0xffffffff, icdev->base + IC_CFG2CLR); | 611 | __raw_writel(d[4], base + IC_ASSIGNSET); |
590 | __raw_writel(0xffffffff, icdev->base + IC_SRCCLR); | 612 | __raw_writel(d[5], base + IC_WAKESET); |
591 | __raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR); | ||
592 | __raw_writel(0xffffffff, icdev->base + IC_WAKECLR); | ||
593 | __raw_writel(0xffffffff, icdev->base + IC_RISINGCLR); | ||
594 | __raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR); | ||
595 | __raw_writel(0x00000000, icdev->base + IC_TESTBIT); | ||
596 | wmb(); | ||
597 | __raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET); | ||
598 | __raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET); | ||
599 | __raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET); | ||
600 | __raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET); | ||
601 | __raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET); | ||
602 | __raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET); | ||
603 | wmb(); | 613 | wmb(); |
604 | 614 | ||
605 | __raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET); | 615 | __raw_writel(d[6], base + IC_MASKSET); |
606 | wmb(); | 616 | wmb(); |
617 | } | ||
607 | 618 | ||
619 | static int alchemy_ic_suspend(void) | ||
620 | { | ||
621 | alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), | ||
622 | alchemy_ic_pmdata); | ||
623 | alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), | ||
624 | &alchemy_ic_pmdata[7]); | ||
608 | return 0; | 625 | return 0; |
609 | } | 626 | } |
610 | 627 | ||
611 | static struct sysdev_class alchemy_ic_sysdev_class = { | 628 | static void alchemy_ic_resume(void) |
612 | .name = "ic", | 629 | { |
630 | alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), | ||
631 | &alchemy_ic_pmdata[7]); | ||
632 | alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), | ||
633 | alchemy_ic_pmdata); | ||
634 | } | ||
635 | |||
636 | static struct syscore_ops alchemy_ic_syscore_ops = { | ||
613 | .suspend = alchemy_ic_suspend, | 637 | .suspend = alchemy_ic_suspend, |
614 | .resume = alchemy_ic_resume, | 638 | .resume = alchemy_ic_resume, |
615 | }; | 639 | }; |
616 | 640 | ||
617 | static int __init alchemy_ic_sysdev_init(void) | 641 | static int __init alchemy_ic_pm_init(void) |
618 | { | 642 | { |
619 | struct alchemy_ic_sysdev *icdev; | 643 | register_syscore_ops(&alchemy_ic_syscore_ops); |
620 | unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR }; | ||
621 | int err, i; | ||
622 | |||
623 | err = sysdev_class_register(&alchemy_ic_sysdev_class); | ||
624 | if (err) | ||
625 | return err; | ||
626 | |||
627 | for (i = 0; i < 2; i++) { | ||
628 | icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL); | ||
629 | if (!icdev) | ||
630 | return -ENOMEM; | ||
631 | |||
632 | icdev->base = ioremap(icbase[i], 0x1000); | ||
633 | |||
634 | icdev->sysdev.id = i; | ||
635 | icdev->sysdev.cls = &alchemy_ic_sysdev_class; | ||
636 | err = sysdev_register(&icdev->sysdev); | ||
637 | if (err) { | ||
638 | kfree(icdev); | ||
639 | return err; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | return 0; | 644 | return 0; |
644 | } | 645 | } |
645 | device_initcall(alchemy_ic_sysdev_init); | 646 | device_initcall(alchemy_ic_pm_init); |
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index 9e7814db3d03..3b2c18b14341 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c | |||
@@ -13,9 +13,10 @@ | |||
13 | 13 | ||
14 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
15 | #include <linux/etherdevice.h> | 15 | #include <linux/etherdevice.h> |
16 | #include <linux/init.h> | ||
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | #include <linux/serial_8250.h> | 18 | #include <linux/serial_8250.h> |
18 | #include <linux/init.h> | 19 | #include <linux/slab.h> |
19 | 20 | ||
20 | #include <asm/mach-au1x00/au1xxx.h> | 21 | #include <asm/mach-au1x00/au1xxx.h> |
21 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 22 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
@@ -30,21 +31,12 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, | |||
30 | #ifdef CONFIG_SERIAL_8250 | 31 | #ifdef CONFIG_SERIAL_8250 |
31 | switch (state) { | 32 | switch (state) { |
32 | case 0: | 33 | case 0: |
33 | if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) { | 34 | alchemy_uart_enable(CPHYSADDR(port->membase)); |
34 | /* power-on sequence as suggested in the databooks */ | ||
35 | __raw_writel(0, port->membase + UART_MOD_CNTRL); | ||
36 | wmb(); | ||
37 | __raw_writel(1, port->membase + UART_MOD_CNTRL); | ||
38 | wmb(); | ||
39 | } | ||
40 | __raw_writel(3, port->membase + UART_MOD_CNTRL); /* full on */ | ||
41 | wmb(); | ||
42 | serial8250_do_pm(port, state, old_state); | 35 | serial8250_do_pm(port, state, old_state); |
43 | break; | 36 | break; |
44 | case 3: /* power off */ | 37 | case 3: /* power off */ |
45 | serial8250_do_pm(port, state, old_state); | 38 | serial8250_do_pm(port, state, old_state); |
46 | __raw_writel(0, port->membase + UART_MOD_CNTRL); | 39 | alchemy_uart_disable(CPHYSADDR(port->membase)); |
47 | wmb(); | ||
48 | break; | 40 | break; |
49 | default: | 41 | default: |
50 | serial8250_do_pm(port, state, old_state); | 42 | serial8250_do_pm(port, state, old_state); |
@@ -65,38 +57,60 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, | |||
65 | .pm = alchemy_8250_pm, \ | 57 | .pm = alchemy_8250_pm, \ |
66 | } | 58 | } |
67 | 59 | ||
68 | static struct plat_serial8250_port au1x00_uart_data[] = { | 60 | static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { |
69 | #if defined(CONFIG_SOC_AU1000) | 61 | [ALCHEMY_CPU_AU1000] = { |
70 | PORT(UART0_PHYS_ADDR, AU1000_UART0_INT), | 62 | PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT), |
71 | PORT(UART1_PHYS_ADDR, AU1000_UART1_INT), | 63 | PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT), |
72 | PORT(UART2_PHYS_ADDR, AU1000_UART2_INT), | 64 | PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT), |
73 | PORT(UART3_PHYS_ADDR, AU1000_UART3_INT), | 65 | PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT), |
74 | #elif defined(CONFIG_SOC_AU1500) | 66 | }, |
75 | PORT(UART0_PHYS_ADDR, AU1500_UART0_INT), | 67 | [ALCHEMY_CPU_AU1500] = { |
76 | PORT(UART3_PHYS_ADDR, AU1500_UART3_INT), | 68 | PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT), |
77 | #elif defined(CONFIG_SOC_AU1100) | 69 | PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT), |
78 | PORT(UART0_PHYS_ADDR, AU1100_UART0_INT), | 70 | }, |
79 | PORT(UART1_PHYS_ADDR, AU1100_UART1_INT), | 71 | [ALCHEMY_CPU_AU1100] = { |
80 | PORT(UART3_PHYS_ADDR, AU1100_UART3_INT), | 72 | PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT), |
81 | #elif defined(CONFIG_SOC_AU1550) | 73 | PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT), |
82 | PORT(UART0_PHYS_ADDR, AU1550_UART0_INT), | 74 | PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT), |
83 | PORT(UART1_PHYS_ADDR, AU1550_UART1_INT), | 75 | }, |
84 | PORT(UART3_PHYS_ADDR, AU1550_UART3_INT), | 76 | [ALCHEMY_CPU_AU1550] = { |
85 | #elif defined(CONFIG_SOC_AU1200) | 77 | PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT), |
86 | PORT(UART0_PHYS_ADDR, AU1200_UART0_INT), | 78 | PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT), |
87 | PORT(UART1_PHYS_ADDR, AU1200_UART1_INT), | 79 | PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT), |
88 | #endif | 80 | }, |
89 | { }, | 81 | [ALCHEMY_CPU_AU1200] = { |
82 | PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT), | ||
83 | PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT), | ||
84 | }, | ||
90 | }; | 85 | }; |
91 | 86 | ||
92 | static struct platform_device au1xx0_uart_device = { | 87 | static struct platform_device au1xx0_uart_device = { |
93 | .name = "serial8250", | 88 | .name = "serial8250", |
94 | .id = PLAT8250_DEV_AU1X00, | 89 | .id = PLAT8250_DEV_AU1X00, |
95 | .dev = { | ||
96 | .platform_data = au1x00_uart_data, | ||
97 | }, | ||
98 | }; | 90 | }; |
99 | 91 | ||
92 | static void __init alchemy_setup_uarts(int ctype) | ||
93 | { | ||
94 | unsigned int uartclk = get_au1x00_uart_baud_base() * 16; | ||
95 | int s = sizeof(struct plat_serial8250_port); | ||
96 | int c = alchemy_get_uarts(ctype); | ||
97 | struct plat_serial8250_port *ports; | ||
98 | |||
99 | ports = kzalloc(s * (c + 1), GFP_KERNEL); | ||
100 | if (!ports) { | ||
101 | printk(KERN_INFO "Alchemy: no memory for UART data\n"); | ||
102 | return; | ||
103 | } | ||
104 | memcpy(ports, au1x00_uart_data[ctype], s * c); | ||
105 | au1xx0_uart_device.dev.platform_data = ports; | ||
106 | |||
107 | /* Fill up uartclk. */ | ||
108 | for (s = 0; s < c; s++) | ||
109 | ports[s].uartclk = uartclk; | ||
110 | if (platform_device_register(&au1xx0_uart_device)) | ||
111 | printk(KERN_INFO "Alchemy: failed to register UARTs\n"); | ||
112 | } | ||
113 | |||
100 | /* OHCI (USB full speed host controller) */ | 114 | /* OHCI (USB full speed host controller) */ |
101 | static struct resource au1xxx_usb_ohci_resources[] = { | 115 | static struct resource au1xxx_usb_ohci_resources[] = { |
102 | [0] = { | 116 | [0] = { |
@@ -269,8 +283,8 @@ extern struct au1xmmc_platform_data au1xmmc_platdata[2]; | |||
269 | 283 | ||
270 | static struct resource au1200_mmc0_resources[] = { | 284 | static struct resource au1200_mmc0_resources[] = { |
271 | [0] = { | 285 | [0] = { |
272 | .start = SD0_PHYS_ADDR, | 286 | .start = AU1100_SD0_PHYS_ADDR, |
273 | .end = SD0_PHYS_ADDR + 0x7ffff, | 287 | .end = AU1100_SD0_PHYS_ADDR + 0xfff, |
274 | .flags = IORESOURCE_MEM, | 288 | .flags = IORESOURCE_MEM, |
275 | }, | 289 | }, |
276 | [1] = { | 290 | [1] = { |
@@ -305,8 +319,8 @@ static struct platform_device au1200_mmc0_device = { | |||
305 | #ifndef CONFIG_MIPS_DB1200 | 319 | #ifndef CONFIG_MIPS_DB1200 |
306 | static struct resource au1200_mmc1_resources[] = { | 320 | static struct resource au1200_mmc1_resources[] = { |
307 | [0] = { | 321 | [0] = { |
308 | .start = SD1_PHYS_ADDR, | 322 | .start = AU1100_SD1_PHYS_ADDR, |
309 | .end = SD1_PHYS_ADDR + 0x7ffff, | 323 | .end = AU1100_SD1_PHYS_ADDR + 0xfff, |
310 | .flags = IORESOURCE_MEM, | 324 | .flags = IORESOURCE_MEM, |
311 | }, | 325 | }, |
312 | [1] = { | 326 | [1] = { |
@@ -359,15 +373,16 @@ static struct platform_device pbdb_smbus_device = { | |||
359 | #endif | 373 | #endif |
360 | 374 | ||
361 | /* Macro to help defining the Ethernet MAC resources */ | 375 | /* Macro to help defining the Ethernet MAC resources */ |
376 | #define MAC_RES_COUNT 3 /* MAC regs base, MAC enable reg, MAC INT */ | ||
362 | #define MAC_RES(_base, _enable, _irq) \ | 377 | #define MAC_RES(_base, _enable, _irq) \ |
363 | { \ | 378 | { \ |
364 | .start = CPHYSADDR(_base), \ | 379 | .start = _base, \ |
365 | .end = CPHYSADDR(_base + 0xffff), \ | 380 | .end = _base + 0xffff, \ |
366 | .flags = IORESOURCE_MEM, \ | 381 | .flags = IORESOURCE_MEM, \ |
367 | }, \ | 382 | }, \ |
368 | { \ | 383 | { \ |
369 | .start = CPHYSADDR(_enable), \ | 384 | .start = _enable, \ |
370 | .end = CPHYSADDR(_enable + 0x3), \ | 385 | .end = _enable + 0x3, \ |
371 | .flags = IORESOURCE_MEM, \ | 386 | .flags = IORESOURCE_MEM, \ |
372 | }, \ | 387 | }, \ |
373 | { \ | 388 | { \ |
@@ -376,19 +391,29 @@ static struct platform_device pbdb_smbus_device = { | |||
376 | .flags = IORESOURCE_IRQ \ | 391 | .flags = IORESOURCE_IRQ \ |
377 | } | 392 | } |
378 | 393 | ||
379 | static struct resource au1xxx_eth0_resources[] = { | 394 | static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { |
380 | #if defined(CONFIG_SOC_AU1000) | 395 | [ALCHEMY_CPU_AU1000] = { |
381 | MAC_RES(AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT), | 396 | MAC_RES(AU1000_MAC0_PHYS_ADDR, |
382 | #elif defined(CONFIG_SOC_AU1100) | 397 | AU1000_MACEN_PHYS_ADDR, |
383 | MAC_RES(AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT), | 398 | AU1000_MAC0_DMA_INT) |
384 | #elif defined(CONFIG_SOC_AU1550) | 399 | }, |
385 | MAC_RES(AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT), | 400 | [ALCHEMY_CPU_AU1500] = { |
386 | #elif defined(CONFIG_SOC_AU1500) | 401 | MAC_RES(AU1500_MAC0_PHYS_ADDR, |
387 | MAC_RES(AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT), | 402 | AU1500_MACEN_PHYS_ADDR, |
388 | #endif | 403 | AU1500_MAC0_DMA_INT) |
404 | }, | ||
405 | [ALCHEMY_CPU_AU1100] = { | ||
406 | MAC_RES(AU1000_MAC0_PHYS_ADDR, | ||
407 | AU1000_MACEN_PHYS_ADDR, | ||
408 | AU1100_MAC0_DMA_INT) | ||
409 | }, | ||
410 | [ALCHEMY_CPU_AU1550] = { | ||
411 | MAC_RES(AU1000_MAC0_PHYS_ADDR, | ||
412 | AU1000_MACEN_PHYS_ADDR, | ||
413 | AU1550_MAC0_DMA_INT) | ||
414 | }, | ||
389 | }; | 415 | }; |
390 | 416 | ||
391 | |||
392 | static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { | 417 | static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { |
393 | .phy1_search_mac0 = 1, | 418 | .phy1_search_mac0 = 1, |
394 | }; | 419 | }; |
@@ -396,20 +421,26 @@ static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { | |||
396 | static struct platform_device au1xxx_eth0_device = { | 421 | static struct platform_device au1xxx_eth0_device = { |
397 | .name = "au1000-eth", | 422 | .name = "au1000-eth", |
398 | .id = 0, | 423 | .id = 0, |
399 | .num_resources = ARRAY_SIZE(au1xxx_eth0_resources), | 424 | .num_resources = MAC_RES_COUNT, |
400 | .resource = au1xxx_eth0_resources, | ||
401 | .dev.platform_data = &au1xxx_eth0_platform_data, | 425 | .dev.platform_data = &au1xxx_eth0_platform_data, |
402 | }; | 426 | }; |
403 | 427 | ||
404 | #ifndef CONFIG_SOC_AU1100 | 428 | static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = { |
405 | static struct resource au1xxx_eth1_resources[] = { | 429 | [ALCHEMY_CPU_AU1000] = { |
406 | #if defined(CONFIG_SOC_AU1000) | 430 | MAC_RES(AU1000_MAC1_PHYS_ADDR, |
407 | MAC_RES(AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT), | 431 | AU1000_MACEN_PHYS_ADDR + 4, |
408 | #elif defined(CONFIG_SOC_AU1550) | 432 | AU1000_MAC1_DMA_INT) |
409 | MAC_RES(AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT), | 433 | }, |
410 | #elif defined(CONFIG_SOC_AU1500) | 434 | [ALCHEMY_CPU_AU1500] = { |
411 | MAC_RES(AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT), | 435 | MAC_RES(AU1500_MAC1_PHYS_ADDR, |
412 | #endif | 436 | AU1500_MACEN_PHYS_ADDR + 4, |
437 | AU1500_MAC1_DMA_INT) | ||
438 | }, | ||
439 | [ALCHEMY_CPU_AU1550] = { | ||
440 | MAC_RES(AU1000_MAC1_PHYS_ADDR, | ||
441 | AU1000_MACEN_PHYS_ADDR + 4, | ||
442 | AU1550_MAC1_DMA_INT) | ||
443 | }, | ||
413 | }; | 444 | }; |
414 | 445 | ||
415 | static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { | 446 | static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { |
@@ -419,11 +450,9 @@ static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { | |||
419 | static struct platform_device au1xxx_eth1_device = { | 450 | static struct platform_device au1xxx_eth1_device = { |
420 | .name = "au1000-eth", | 451 | .name = "au1000-eth", |
421 | .id = 1, | 452 | .id = 1, |
422 | .num_resources = ARRAY_SIZE(au1xxx_eth1_resources), | 453 | .num_resources = MAC_RES_COUNT, |
423 | .resource = au1xxx_eth1_resources, | ||
424 | .dev.platform_data = &au1xxx_eth1_platform_data, | 454 | .dev.platform_data = &au1xxx_eth1_platform_data, |
425 | }; | 455 | }; |
426 | #endif | ||
427 | 456 | ||
428 | void __init au1xxx_override_eth_cfg(unsigned int port, | 457 | void __init au1xxx_override_eth_cfg(unsigned int port, |
429 | struct au1000_eth_platform_data *eth_data) | 458 | struct au1000_eth_platform_data *eth_data) |
@@ -434,15 +463,65 @@ void __init au1xxx_override_eth_cfg(unsigned int port, | |||
434 | if (port == 0) | 463 | if (port == 0) |
435 | memcpy(&au1xxx_eth0_platform_data, eth_data, | 464 | memcpy(&au1xxx_eth0_platform_data, eth_data, |
436 | sizeof(struct au1000_eth_platform_data)); | 465 | sizeof(struct au1000_eth_platform_data)); |
437 | #ifndef CONFIG_SOC_AU1100 | ||
438 | else | 466 | else |
439 | memcpy(&au1xxx_eth1_platform_data, eth_data, | 467 | memcpy(&au1xxx_eth1_platform_data, eth_data, |
440 | sizeof(struct au1000_eth_platform_data)); | 468 | sizeof(struct au1000_eth_platform_data)); |
441 | #endif | 469 | } |
470 | |||
471 | static void __init alchemy_setup_macs(int ctype) | ||
472 | { | ||
473 | int ret, i; | ||
474 | unsigned char ethaddr[6]; | ||
475 | struct resource *macres; | ||
476 | |||
477 | /* Handle 1st MAC */ | ||
478 | if (alchemy_get_macs(ctype) < 1) | ||
479 | return; | ||
480 | |||
481 | macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); | ||
482 | if (!macres) { | ||
483 | printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n"); | ||
484 | return; | ||
485 | } | ||
486 | memcpy(macres, au1xxx_eth0_resources[ctype], | ||
487 | sizeof(struct resource) * MAC_RES_COUNT); | ||
488 | au1xxx_eth0_device.resource = macres; | ||
489 | |||
490 | i = prom_get_ethernet_addr(ethaddr); | ||
491 | if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) | ||
492 | memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); | ||
493 | |||
494 | ret = platform_device_register(&au1xxx_eth0_device); | ||
495 | if (!ret) | ||
496 | printk(KERN_INFO "Alchemy: failed to register MAC0\n"); | ||
497 | |||
498 | |||
499 | /* Handle 2nd MAC */ | ||
500 | if (alchemy_get_macs(ctype) < 2) | ||
501 | return; | ||
502 | |||
503 | macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); | ||
504 | if (!macres) { | ||
505 | printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n"); | ||
506 | return; | ||
507 | } | ||
508 | memcpy(macres, au1xxx_eth1_resources[ctype], | ||
509 | sizeof(struct resource) * MAC_RES_COUNT); | ||
510 | au1xxx_eth1_device.resource = macres; | ||
511 | |||
512 | ethaddr[5] += 1; /* next addr for 2nd MAC */ | ||
513 | if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) | ||
514 | memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); | ||
515 | |||
516 | /* Register second MAC if enabled in pinfunc */ | ||
517 | if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) { | ||
518 | ret = platform_device_register(&au1xxx_eth1_device); | ||
519 | if (ret) | ||
520 | printk(KERN_INFO "Alchemy: failed to register MAC1\n"); | ||
521 | } | ||
442 | } | 522 | } |
443 | 523 | ||
444 | static struct platform_device *au1xxx_platform_devices[] __initdata = { | 524 | static struct platform_device *au1xxx_platform_devices[] __initdata = { |
445 | &au1xx0_uart_device, | ||
446 | &au1xxx_usb_ohci_device, | 525 | &au1xxx_usb_ohci_device, |
447 | #ifdef CONFIG_FB_AU1100 | 526 | #ifdef CONFIG_FB_AU1100 |
448 | &au1100_lcd_device, | 527 | &au1100_lcd_device, |
@@ -460,36 +539,17 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = { | |||
460 | #ifdef SMBUS_PSC_BASE | 539 | #ifdef SMBUS_PSC_BASE |
461 | &pbdb_smbus_device, | 540 | &pbdb_smbus_device, |
462 | #endif | 541 | #endif |
463 | &au1xxx_eth0_device, | ||
464 | }; | 542 | }; |
465 | 543 | ||
466 | static int __init au1xxx_platform_init(void) | 544 | static int __init au1xxx_platform_init(void) |
467 | { | 545 | { |
468 | unsigned int uartclk = get_au1x00_uart_baud_base() * 16; | 546 | int err, ctype = alchemy_get_cputype(); |
469 | int err, i; | ||
470 | unsigned char ethaddr[6]; | ||
471 | 547 | ||
472 | /* Fill up uartclk. */ | 548 | alchemy_setup_uarts(ctype); |
473 | for (i = 0; au1x00_uart_data[i].flags; i++) | 549 | alchemy_setup_macs(ctype); |
474 | au1x00_uart_data[i].uartclk = uartclk; | ||
475 | |||
476 | /* use firmware-provided mac addr if available and necessary */ | ||
477 | i = prom_get_ethernet_addr(ethaddr); | ||
478 | if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) | ||
479 | memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); | ||
480 | 550 | ||
481 | err = platform_add_devices(au1xxx_platform_devices, | 551 | err = platform_add_devices(au1xxx_platform_devices, |
482 | ARRAY_SIZE(au1xxx_platform_devices)); | 552 | ARRAY_SIZE(au1xxx_platform_devices)); |
483 | #ifndef CONFIG_SOC_AU1100 | ||
484 | ethaddr[5] += 1; /* next addr for 2nd MAC */ | ||
485 | if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) | ||
486 | memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); | ||
487 | |||
488 | /* Register second MAC if enabled in pinfunc */ | ||
489 | if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) | ||
490 | err = platform_device_register(&au1xxx_eth1_device); | ||
491 | #endif | ||
492 | |||
493 | return err; | 553 | return err; |
494 | } | 554 | } |
495 | 555 | ||
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 561e5da2658b..1b887c868417 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c | |||
@@ -52,8 +52,6 @@ void __init plat_mem_setup(void) | |||
52 | /* this is faster than wasting cycles trying to approximate it */ | 52 | /* this is faster than wasting cycles trying to approximate it */ |
53 | preset_lpj = (est_freq >> 1) / HZ; | 53 | preset_lpj = (est_freq >> 1) / HZ; |
54 | 54 | ||
55 | board_setup(); /* board specific setup */ | ||
56 | |||
57 | if (au1xxx_cpu_needs_config_od()) | 55 | if (au1xxx_cpu_needs_config_od()) |
58 | /* Various early Au1xx0 errata corrected by this */ | 56 | /* Various early Au1xx0 errata corrected by this */ |
59 | set_c0_config(1 << 19); /* Set Config[OD] */ | 57 | set_c0_config(1 << 19); /* Set Config[OD] */ |
@@ -61,6 +59,8 @@ void __init plat_mem_setup(void) | |||
61 | /* Clear to obtain best system bus performance */ | 59 | /* Clear to obtain best system bus performance */ |
62 | clear_c0_config(1 << 19); /* Clear Config[OD] */ | 60 | clear_c0_config(1 << 19); /* Clear Config[OD] */ |
63 | 61 | ||
62 | board_setup(); /* board specific setup */ | ||
63 | |||
64 | /* IO/MEM resources. */ | 64 | /* IO/MEM resources. */ |
65 | set_io_port_base(0); | 65 | set_io_port_base(0); |
66 | ioport_resource.start = IOPORT_RESOURCE_START; | 66 | ioport_resource.start = IOPORT_RESOURCE_START; |
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 2aecb2fdf982..d5da6adbf634 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c | |||
@@ -141,8 +141,7 @@ static int __init alchemy_time_init(unsigned int m2int) | |||
141 | goto cntr_err; | 141 | goto cntr_err; |
142 | 142 | ||
143 | /* register counter1 clocksource and event device */ | 143 | /* register counter1 clocksource and event device */ |
144 | clocksource_set_clock(&au1x_counter1_clocksource, 32768); | 144 | clocksource_register_hz(&au1x_counter1_clocksource, 32768); |
145 | clocksource_register(&au1x_counter1_clocksource); | ||
146 | 145 | ||
147 | cd->shift = 32; | 146 | cd->shift = 32; |
148 | cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); | 147 | cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); |
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 4a8980027ecf..1dac4f27d334 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c | |||
@@ -23,6 +23,13 @@ void __init board_setup(void) | |||
23 | unsigned long freq0, clksrc, div, pfc; | 23 | unsigned long freq0, clksrc, div, pfc; |
24 | unsigned short whoami; | 24 | unsigned short whoami; |
25 | 25 | ||
26 | /* Set Config[OD] (disable overlapping bus transaction): | ||
27 | * This gets rid of a _lot_ of spurious interrupts (especially | ||
28 | * wrt. IDE); but incurs ~10% performance hit in some | ||
29 | * cpu-bound applications. | ||
30 | */ | ||
31 | set_c0_config(1 << 19); | ||
32 | |||
26 | bcsr_init(DB1200_BCSR_PHYS_ADDR, | 33 | bcsr_init(DB1200_BCSR_PHYS_ADDR, |
27 | DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); | 34 | DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); |
28 | 35 | ||
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c index 05f120ff90f9..5c956fe8760f 100644 --- a/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c | |||
@@ -127,13 +127,10 @@ const char *get_system_type(void) | |||
127 | void __init board_setup(void) | 127 | void __init board_setup(void) |
128 | { | 128 | { |
129 | unsigned long bcsr1, bcsr2; | 129 | unsigned long bcsr1, bcsr2; |
130 | u32 pin_func; | ||
131 | 130 | ||
132 | bcsr1 = DB1000_BCSR_PHYS_ADDR; | 131 | bcsr1 = DB1000_BCSR_PHYS_ADDR; |
133 | bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; | 132 | bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; |
134 | 133 | ||
135 | pin_func = 0; | ||
136 | |||
137 | #ifdef CONFIG_MIPS_DB1000 | 134 | #ifdef CONFIG_MIPS_DB1000 |
138 | printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); | 135 | printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); |
139 | #endif | 136 | #endif |
@@ -164,12 +161,16 @@ void __init board_setup(void) | |||
164 | /* Not valid for Au1550 */ | 161 | /* Not valid for Au1550 */ |
165 | #if defined(CONFIG_IRDA) && \ | 162 | #if defined(CONFIG_IRDA) && \ |
166 | (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) | 163 | (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) |
167 | /* Set IRFIRSEL instead of GPIO15 */ | 164 | { |
168 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; | 165 | u32 pin_func; |
169 | au_writel(pin_func, SYS_PINFUNC); | 166 | |
170 | /* Power off until the driver is in use */ | 167 | /* Set IRFIRSEL instead of GPIO15 */ |
171 | bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, | 168 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; |
172 | BCSR_RESETS_IRDA_MODE_OFF); | 169 | au_writel(pin_func, SYS_PINFUNC); |
170 | /* Power off until the driver is in use */ | ||
171 | bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, | ||
172 | BCSR_RESETS_IRDA_MODE_OFF); | ||
173 | } | ||
173 | #endif | 174 | #endif |
174 | bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ | 175 | bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ |
175 | 176 | ||
@@ -177,31 +178,35 @@ void __init board_setup(void) | |||
177 | alchemy_gpio1_input_enable(); | 178 | alchemy_gpio1_input_enable(); |
178 | 179 | ||
179 | #ifdef CONFIG_MIPS_MIRAGE | 180 | #ifdef CONFIG_MIPS_MIRAGE |
180 | /* GPIO[20] is output */ | 181 | { |
181 | alchemy_gpio_direction_output(20, 0); | 182 | u32 pin_func; |
182 | 183 | ||
183 | /* Set GPIO[210:208] instead of SSI_0 */ | 184 | /* GPIO[20] is output */ |
184 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; | 185 | alchemy_gpio_direction_output(20, 0); |
185 | 186 | ||
186 | /* Set GPIO[215:211] for LEDs */ | 187 | /* Set GPIO[210:208] instead of SSI_0 */ |
187 | pin_func |= 5 << 2; | 188 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; |
188 | 189 | ||
189 | /* Set GPIO[214:213] for more LEDs */ | 190 | /* Set GPIO[215:211] for LEDs */ |
190 | pin_func |= 5 << 12; | 191 | pin_func |= 5 << 2; |
191 | 192 | ||
192 | /* Set GPIO[207:200] instead of PCMCIA/LCD */ | 193 | /* Set GPIO[214:213] for more LEDs */ |
193 | pin_func |= SYS_PF_LCD | SYS_PF_PC; | 194 | pin_func |= 5 << 12; |
194 | au_writel(pin_func, SYS_PINFUNC); | ||
195 | 195 | ||
196 | /* | 196 | /* Set GPIO[207:200] instead of PCMCIA/LCD */ |
197 | * Enable speaker amplifier. This should | 197 | pin_func |= SYS_PF_LCD | SYS_PF_PC; |
198 | * be part of the audio driver. | 198 | au_writel(pin_func, SYS_PINFUNC); |
199 | */ | ||
200 | alchemy_gpio_direction_output(209, 1); | ||
201 | 199 | ||
202 | pm_power_off = mirage_power_off; | 200 | /* |
203 | _machine_halt = mirage_power_off; | 201 | * Enable speaker amplifier. This should |
204 | _machine_restart = (void(*)(char *))mips_softreset; | 202 | * be part of the audio driver. |
203 | */ | ||
204 | alchemy_gpio_direction_output(209, 1); | ||
205 | |||
206 | pm_power_off = mirage_power_off; | ||
207 | _machine_halt = mirage_power_off; | ||
208 | _machine_restart = (void(*)(char *))mips_softreset; | ||
209 | } | ||
205 | #endif | 210 | #endif |
206 | 211 | ||
207 | #ifdef CONFIG_MIPS_BOSPORUS | 212 | #ifdef CONFIG_MIPS_BOSPORUS |
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c index 2d85c4b5be09..e64fdcbf75d0 100644 --- a/arch/mips/alchemy/devboards/pb1000/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c | |||
@@ -65,7 +65,7 @@ void __init board_setup(void) | |||
65 | 65 | ||
66 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ | 66 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ |
67 | au_writel(8, SYS_AUXPLL); | 67 | au_writel(8, SYS_AUXPLL); |
68 | au_writel(0, SYS_PINSTATERD); | 68 | alchemy_gpio1_input_enable(); |
69 | udelay(100); | 69 | udelay(100); |
70 | 70 | ||
71 | #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) | 71 | #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) |
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c index 83f46215eb0c..3b4fa3206969 100644 --- a/arch/mips/alchemy/devboards/pb1500/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c | |||
@@ -56,7 +56,7 @@ void __init board_setup(void) | |||
56 | sys_clksrc = sys_freqctrl = pin_func = 0; | 56 | sys_clksrc = sys_freqctrl = pin_func = 0; |
57 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ | 57 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ |
58 | au_writel(8, SYS_AUXPLL); | 58 | au_writel(8, SYS_AUXPLL); |
59 | au_writel(0, SYS_PINSTATERD); | 59 | alchemy_gpio1_input_enable(); |
60 | udelay(100); | 60 | udelay(100); |
61 | 61 | ||
62 | /* GPIO201 is input for PCMCIA card detect */ | 62 | /* GPIO201 is input for PCMCIA card detect */ |
diff --git a/arch/mips/alchemy/devboards/prom.c b/arch/mips/alchemy/devboards/prom.c index baeb21385058..e5306b56da6d 100644 --- a/arch/mips/alchemy/devboards/prom.c +++ b/arch/mips/alchemy/devboards/prom.c | |||
@@ -62,5 +62,5 @@ void __init prom_init(void) | |||
62 | 62 | ||
63 | void prom_putchar(unsigned char c) | 63 | void prom_putchar(unsigned char c) |
64 | { | 64 | { |
65 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 65 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
66 | } | 66 | } |
diff --git a/arch/mips/alchemy/gpr/board_setup.c b/arch/mips/alchemy/gpr/board_setup.c index ad2e3f137933..5f8f0691ed2d 100644 --- a/arch/mips/alchemy/gpr/board_setup.c +++ b/arch/mips/alchemy/gpr/board_setup.c | |||
@@ -36,9 +36,6 @@ | |||
36 | 36 | ||
37 | #include <prom.h> | 37 | #include <prom.h> |
38 | 38 | ||
39 | #define UART1_ADDR KSEG1ADDR(UART1_PHYS_ADDR) | ||
40 | #define UART3_ADDR KSEG1ADDR(UART3_PHYS_ADDR) | ||
41 | |||
42 | char irq_tab_alchemy[][5] __initdata = { | 39 | char irq_tab_alchemy[][5] __initdata = { |
43 | [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, | 40 | [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, |
44 | }; | 41 | }; |
@@ -67,18 +64,15 @@ static void gpr_power_off(void) | |||
67 | 64 | ||
68 | void __init board_setup(void) | 65 | void __init board_setup(void) |
69 | { | 66 | { |
70 | printk(KERN_INFO "Tarpeze ITS GPR board\n"); | 67 | printk(KERN_INFO "Trapeze ITS GPR board\n"); |
71 | 68 | ||
72 | pm_power_off = gpr_power_off; | 69 | pm_power_off = gpr_power_off; |
73 | _machine_halt = gpr_power_off; | 70 | _machine_halt = gpr_power_off; |
74 | _machine_restart = gpr_reset; | 71 | _machine_restart = gpr_reset; |
75 | 72 | ||
76 | /* Enable UART3 */ | 73 | /* Enable UART1/3 */ |
77 | au_writel(0x1, UART3_ADDR + UART_MOD_CNTRL);/* clock enable (CE) */ | 74 | alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); |
78 | au_writel(0x3, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ | 75 | alchemy_uart_enable(AU1000_UART1_PHYS_ADDR); |
79 | /* Enable UART1 */ | ||
80 | au_writel(0x1, UART1_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ | ||
81 | au_writel(0x3, UART1_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ | ||
82 | 76 | ||
83 | /* Take away Reset of UMTS-card */ | 77 | /* Take away Reset of UMTS-card */ |
84 | alchemy_gpio_direction_output(215, 1); | 78 | alchemy_gpio_direction_output(215, 1); |
diff --git a/arch/mips/alchemy/gpr/init.c b/arch/mips/alchemy/gpr/init.c index f044f4c541d7..229aafae680c 100644 --- a/arch/mips/alchemy/gpr/init.c +++ b/arch/mips/alchemy/gpr/init.c | |||
@@ -59,5 +59,5 @@ void __init prom_init(void) | |||
59 | 59 | ||
60 | void prom_putchar(unsigned char c) | 60 | void prom_putchar(unsigned char c) |
61 | { | 61 | { |
62 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 62 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
63 | } | 63 | } |
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index cf436ab679ae..3ae984cf98cf 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c | |||
@@ -87,7 +87,7 @@ void __init board_setup(void) | |||
87 | au_writel(SYS_PF_NI2, SYS_PINFUNC); | 87 | au_writel(SYS_PF_NI2, SYS_PINFUNC); |
88 | 88 | ||
89 | /* Initialize GPIO */ | 89 | /* Initialize GPIO */ |
90 | au_writel(0xFFFFFFFF, SYS_TRIOUTCLR); | 90 | au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR); |
91 | alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ | 91 | alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ |
92 | alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ | 92 | alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ |
93 | alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ | 93 | alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ |
diff --git a/arch/mips/alchemy/mtx-1/init.c b/arch/mips/alchemy/mtx-1/init.c index f8d25575fa05..2e81cc7f3422 100644 --- a/arch/mips/alchemy/mtx-1/init.c +++ b/arch/mips/alchemy/mtx-1/init.c | |||
@@ -62,5 +62,5 @@ void __init prom_init(void) | |||
62 | 62 | ||
63 | void prom_putchar(unsigned char c) | 63 | void prom_putchar(unsigned char c) |
64 | { | 64 | { |
65 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 65 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
66 | } | 66 | } |
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index 956f946218c5..55628e390fd7 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c | |||
@@ -53,8 +53,8 @@ static struct platform_device mtx1_button = { | |||
53 | 53 | ||
54 | static struct resource mtx1_wdt_res[] = { | 54 | static struct resource mtx1_wdt_res[] = { |
55 | [0] = { | 55 | [0] = { |
56 | .start = 15, | 56 | .start = 215, |
57 | .end = 15, | 57 | .end = 215, |
58 | .name = "mtx1-wdt-gpio", | 58 | .name = "mtx1-wdt-gpio", |
59 | .flags = IORESOURCE_IRQ, | 59 | .flags = IORESOURCE_IRQ, |
60 | } | 60 | } |
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index febfb0fb0896..81e57fad07ab 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c | |||
@@ -66,13 +66,10 @@ void __init board_setup(void) | |||
66 | au_writel(pin_func, SYS_PINFUNC); | 66 | au_writel(pin_func, SYS_PINFUNC); |
67 | 67 | ||
68 | /* Enable UART */ | 68 | /* Enable UART */ |
69 | au_writel(0x01, UART3_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ | 69 | alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); |
70 | mdelay(10); | 70 | /* Enable DTR (MCR bit 0) = USB power up */ |
71 | au_writel(0x03, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ | 71 | __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18)); |
72 | mdelay(10); | 72 | wmb(); |
73 | |||
74 | /* Enable DTR = USB power up */ | ||
75 | au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */ | ||
76 | 73 | ||
77 | #ifdef CONFIG_PCI | 74 | #ifdef CONFIG_PCI |
78 | #if defined(__MIPSEB__) | 75 | #if defined(__MIPSEB__) |
diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c index 15125c2fda7d..0ee02cfa989d 100644 --- a/arch/mips/alchemy/xxs1500/init.c +++ b/arch/mips/alchemy/xxs1500/init.c | |||
@@ -51,14 +51,13 @@ void __init prom_init(void) | |||
51 | prom_init_cmdline(); | 51 | prom_init_cmdline(); |
52 | 52 | ||
53 | memsize_str = prom_getenv("memsize"); | 53 | memsize_str = prom_getenv("memsize"); |
54 | if (!memsize_str) | 54 | if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) |
55 | memsize = 0x04000000; | 55 | memsize = 0x04000000; |
56 | else | 56 | |
57 | strict_strtoul(memsize_str, 0, &memsize); | ||
58 | add_memory_region(0, memsize, BOOT_MEM_RAM); | 57 | add_memory_region(0, memsize, BOOT_MEM_RAM); |
59 | } | 58 | } |
60 | 59 | ||
61 | void prom_putchar(unsigned char c) | 60 | void prom_putchar(unsigned char c) |
62 | { | 61 | { |
63 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 62 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
64 | } | 63 | } |
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c index 425dfa5d6e12..bb571bcdb8f2 100644 --- a/arch/mips/ar7/gpio.c +++ b/arch/mips/ar7/gpio.c | |||
@@ -325,9 +325,7 @@ int __init ar7_gpio_init(void) | |||
325 | size = 0x1f; | 325 | size = 0x1f; |
326 | } | 326 | } |
327 | 327 | ||
328 | gpch->regs = ioremap_nocache(AR7_REGS_GPIO, | 328 | gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size); |
329 | AR7_REGS_GPIO + 0x10); | ||
330 | |||
331 | if (!gpch->regs) { | 329 | if (!gpch->regs) { |
332 | printk(KERN_ERR "%s: failed to ioremap regs\n", | 330 | printk(KERN_ERR "%s: failed to ioremap regs\n", |
333 | gpch->chip.label); | 331 | gpch->chip.label); |
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c index e5b6615731e5..54db815bc86c 100644 --- a/arch/mips/bcm47xx/nvram.c +++ b/arch/mips/bcm47xx/nvram.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2005 Broadcom Corporation | 4 | * Copyright (C) 2005 Broadcom Corporation |
5 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> | 5 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> |
6 | * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de> | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
@@ -23,7 +24,7 @@ | |||
23 | static char nvram_buf[NVRAM_SPACE]; | 24 | static char nvram_buf[NVRAM_SPACE]; |
24 | 25 | ||
25 | /* Probe for NVRAM header */ | 26 | /* Probe for NVRAM header */ |
26 | static void __init early_nvram_init(void) | 27 | static void early_nvram_init(void) |
27 | { | 28 | { |
28 | struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; | 29 | struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; |
29 | struct nvram_header *header; | 30 | struct nvram_header *header; |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index c95f90bf734c..73b529b57433 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> | 3 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> |
4 | * Copyright (C) 2006 Michael Buesch <mb@bu3sch.de> | 4 | * Copyright (C) 2006 Michael Buesch <mb@bu3sch.de> |
5 | * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> | 5 | * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> |
6 | * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de> | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
@@ -57,10 +58,49 @@ static void bcm47xx_machine_halt(void) | |||
57 | } | 58 | } |
58 | 59 | ||
59 | #define READ_FROM_NVRAM(_outvar, name, buf) \ | 60 | #define READ_FROM_NVRAM(_outvar, name, buf) \ |
60 | if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\ | 61 | if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\ |
61 | sprom->_outvar = simple_strtoul(buf, NULL, 0); | 62 | sprom->_outvar = simple_strtoul(buf, NULL, 0); |
62 | 63 | ||
63 | static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | 64 | #define READ_FROM_NVRAM2(_outvar, name1, name2, buf) \ |
65 | if (nvram_getprefix(prefix, name1, buf, sizeof(buf)) >= 0 || \ | ||
66 | nvram_getprefix(prefix, name2, buf, sizeof(buf)) >= 0)\ | ||
67 | sprom->_outvar = simple_strtoul(buf, NULL, 0); | ||
68 | |||
69 | static inline int nvram_getprefix(const char *prefix, char *name, | ||
70 | char *buf, int len) | ||
71 | { | ||
72 | if (prefix) { | ||
73 | char key[100]; | ||
74 | |||
75 | snprintf(key, sizeof(key), "%s%s", prefix, name); | ||
76 | return nvram_getenv(key, buf, len); | ||
77 | } | ||
78 | |||
79 | return nvram_getenv(name, buf, len); | ||
80 | } | ||
81 | |||
82 | static u32 nvram_getu32(const char *name, char *buf, int len) | ||
83 | { | ||
84 | int rv; | ||
85 | char key[100]; | ||
86 | u16 var0, var1; | ||
87 | |||
88 | snprintf(key, sizeof(key), "%s0", name); | ||
89 | rv = nvram_getenv(key, buf, len); | ||
90 | /* return 0 here so this looks like unset */ | ||
91 | if (rv < 0) | ||
92 | return 0; | ||
93 | var0 = simple_strtoul(buf, NULL, 0); | ||
94 | |||
95 | snprintf(key, sizeof(key), "%s1", name); | ||
96 | rv = nvram_getenv(key, buf, len); | ||
97 | if (rv < 0) | ||
98 | return 0; | ||
99 | var1 = simple_strtoul(buf, NULL, 0); | ||
100 | return var1 << 16 | var0; | ||
101 | } | ||
102 | |||
103 | static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) | ||
64 | { | 104 | { |
65 | char buf[100]; | 105 | char buf[100]; |
66 | u32 boardflags; | 106 | u32 boardflags; |
@@ -69,11 +109,12 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
69 | 109 | ||
70 | sprom->revision = 1; /* Fallback: Old hardware does not define this. */ | 110 | sprom->revision = 1; /* Fallback: Old hardware does not define this. */ |
71 | READ_FROM_NVRAM(revision, "sromrev", buf); | 111 | READ_FROM_NVRAM(revision, "sromrev", buf); |
72 | if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0) | 112 | if (nvram_getprefix(prefix, "il0macaddr", buf, sizeof(buf)) >= 0 || |
113 | nvram_getprefix(prefix, "macaddr", buf, sizeof(buf)) >= 0) | ||
73 | nvram_parse_macaddr(buf, sprom->il0mac); | 114 | nvram_parse_macaddr(buf, sprom->il0mac); |
74 | if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) | 115 | if (nvram_getprefix(prefix, "et0macaddr", buf, sizeof(buf)) >= 0) |
75 | nvram_parse_macaddr(buf, sprom->et0mac); | 116 | nvram_parse_macaddr(buf, sprom->et0mac); |
76 | if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) | 117 | if (nvram_getprefix(prefix, "et1macaddr", buf, sizeof(buf)) >= 0) |
77 | nvram_parse_macaddr(buf, sprom->et1mac); | 118 | nvram_parse_macaddr(buf, sprom->et1mac); |
78 | READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); | 119 | READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); |
79 | READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); | 120 | READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); |
@@ -95,20 +136,36 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
95 | READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); | 136 | READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); |
96 | READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); | 137 | READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); |
97 | READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); | 138 | READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); |
98 | READ_FROM_NVRAM(gpio0, "wl0gpio0", buf); | 139 | READ_FROM_NVRAM2(gpio0, "ledbh0", "wl0gpio0", buf); |
99 | READ_FROM_NVRAM(gpio1, "wl0gpio1", buf); | 140 | READ_FROM_NVRAM2(gpio1, "ledbh1", "wl0gpio1", buf); |
100 | READ_FROM_NVRAM(gpio2, "wl0gpio2", buf); | 141 | READ_FROM_NVRAM2(gpio2, "ledbh2", "wl0gpio2", buf); |
101 | READ_FROM_NVRAM(gpio3, "wl0gpio3", buf); | 142 | READ_FROM_NVRAM2(gpio3, "ledbh3", "wl0gpio3", buf); |
102 | READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf); | 143 | READ_FROM_NVRAM2(maxpwr_bg, "maxp2ga0", "pa0maxpwr", buf); |
103 | READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf); | 144 | READ_FROM_NVRAM2(maxpwr_al, "maxp5gla0", "pa1lomaxpwr", buf); |
104 | READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf); | 145 | READ_FROM_NVRAM2(maxpwr_a, "maxp5ga0", "pa1maxpwr", buf); |
105 | READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf); | 146 | READ_FROM_NVRAM2(maxpwr_ah, "maxp5gha0", "pa1himaxpwr", buf); |
106 | READ_FROM_NVRAM(itssi_a, "pa1itssit", buf); | 147 | READ_FROM_NVRAM2(itssi_bg, "itt5ga0", "pa0itssit", buf); |
107 | READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf); | 148 | READ_FROM_NVRAM2(itssi_a, "itt2ga0", "pa1itssit", buf); |
108 | READ_FROM_NVRAM(tri2g, "tri2g", buf); | 149 | READ_FROM_NVRAM(tri2g, "tri2g", buf); |
109 | READ_FROM_NVRAM(tri5gl, "tri5gl", buf); | 150 | READ_FROM_NVRAM(tri5gl, "tri5gl", buf); |
110 | READ_FROM_NVRAM(tri5g, "tri5g", buf); | 151 | READ_FROM_NVRAM(tri5g, "tri5g", buf); |
111 | READ_FROM_NVRAM(tri5gh, "tri5gh", buf); | 152 | READ_FROM_NVRAM(tri5gh, "tri5gh", buf); |
153 | READ_FROM_NVRAM(txpid2g[0], "txpid2ga0", buf); | ||
154 | READ_FROM_NVRAM(txpid2g[1], "txpid2ga1", buf); | ||
155 | READ_FROM_NVRAM(txpid2g[2], "txpid2ga2", buf); | ||
156 | READ_FROM_NVRAM(txpid2g[3], "txpid2ga3", buf); | ||
157 | READ_FROM_NVRAM(txpid5g[0], "txpid5ga0", buf); | ||
158 | READ_FROM_NVRAM(txpid5g[1], "txpid5ga1", buf); | ||
159 | READ_FROM_NVRAM(txpid5g[2], "txpid5ga2", buf); | ||
160 | READ_FROM_NVRAM(txpid5g[3], "txpid5ga3", buf); | ||
161 | READ_FROM_NVRAM(txpid5gl[0], "txpid5gla0", buf); | ||
162 | READ_FROM_NVRAM(txpid5gl[1], "txpid5gla1", buf); | ||
163 | READ_FROM_NVRAM(txpid5gl[2], "txpid5gla2", buf); | ||
164 | READ_FROM_NVRAM(txpid5gl[3], "txpid5gla3", buf); | ||
165 | READ_FROM_NVRAM(txpid5gh[0], "txpid5gha0", buf); | ||
166 | READ_FROM_NVRAM(txpid5gh[1], "txpid5gha1", buf); | ||
167 | READ_FROM_NVRAM(txpid5gh[2], "txpid5gha2", buf); | ||
168 | READ_FROM_NVRAM(txpid5gh[3], "txpid5gha3", buf); | ||
112 | READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); | 169 | READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); |
113 | READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); | 170 | READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); |
114 | READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); | 171 | READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); |
@@ -120,19 +177,27 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
120 | READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); | 177 | READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); |
121 | READ_FROM_NVRAM(bxa5g, "bxa5g", buf); | 178 | READ_FROM_NVRAM(bxa5g, "bxa5g", buf); |
122 | READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); | 179 | READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); |
123 | READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf); | ||
124 | READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf); | ||
125 | READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf); | ||
126 | READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf); | ||
127 | 180 | ||
128 | if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) { | 181 | sprom->ofdm2gpo = nvram_getu32("ofdm2gpo", buf, sizeof(buf)); |
182 | sprom->ofdm5glpo = nvram_getu32("ofdm5glpo", buf, sizeof(buf)); | ||
183 | sprom->ofdm5gpo = nvram_getu32("ofdm5gpo", buf, sizeof(buf)); | ||
184 | sprom->ofdm5ghpo = nvram_getu32("ofdm5ghpo", buf, sizeof(buf)); | ||
185 | |||
186 | READ_FROM_NVRAM(antenna_gain.ghz24.a0, "ag0", buf); | ||
187 | READ_FROM_NVRAM(antenna_gain.ghz24.a1, "ag1", buf); | ||
188 | READ_FROM_NVRAM(antenna_gain.ghz24.a2, "ag2", buf); | ||
189 | READ_FROM_NVRAM(antenna_gain.ghz24.a3, "ag3", buf); | ||
190 | memcpy(&sprom->antenna_gain.ghz5, &sprom->antenna_gain.ghz24, | ||
191 | sizeof(sprom->antenna_gain.ghz5)); | ||
192 | |||
193 | if (nvram_getprefix(prefix, "boardflags", buf, sizeof(buf)) >= 0) { | ||
129 | boardflags = simple_strtoul(buf, NULL, 0); | 194 | boardflags = simple_strtoul(buf, NULL, 0); |
130 | if (boardflags) { | 195 | if (boardflags) { |
131 | sprom->boardflags_lo = (boardflags & 0x0000FFFFU); | 196 | sprom->boardflags_lo = (boardflags & 0x0000FFFFU); |
132 | sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; | 197 | sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; |
133 | } | 198 | } |
134 | } | 199 | } |
135 | if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) { | 200 | if (nvram_getprefix(prefix, "boardflags2", buf, sizeof(buf)) >= 0) { |
136 | boardflags = simple_strtoul(buf, NULL, 0); | 201 | boardflags = simple_strtoul(buf, NULL, 0); |
137 | if (boardflags) { | 202 | if (boardflags) { |
138 | sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); | 203 | sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); |
@@ -141,6 +206,22 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
141 | } | 206 | } |
142 | } | 207 | } |
143 | 208 | ||
209 | int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out) | ||
210 | { | ||
211 | char prefix[10]; | ||
212 | |||
213 | if (bus->bustype == SSB_BUSTYPE_PCI) { | ||
214 | snprintf(prefix, sizeof(prefix), "pci/%u/%u/", | ||
215 | bus->host_pci->bus->number + 1, | ||
216 | PCI_SLOT(bus->host_pci->devfn)); | ||
217 | bcm47xx_fill_sprom(out, prefix); | ||
218 | return 0; | ||
219 | } else { | ||
220 | printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n"); | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | } | ||
224 | |||
144 | static int bcm47xx_get_invariants(struct ssb_bus *bus, | 225 | static int bcm47xx_get_invariants(struct ssb_bus *bus, |
145 | struct ssb_init_invariants *iv) | 226 | struct ssb_init_invariants *iv) |
146 | { | 227 | { |
@@ -158,7 +239,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus, | |||
158 | if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) | 239 | if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) |
159 | iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); | 240 | iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); |
160 | 241 | ||
161 | bcm47xx_fill_sprom(&iv->sprom); | 242 | bcm47xx_fill_sprom(&iv->sprom, NULL); |
162 | 243 | ||
163 | if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) | 244 | if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) |
164 | iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); | 245 | iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); |
@@ -172,6 +253,11 @@ void __init plat_mem_setup(void) | |||
172 | char buf[100]; | 253 | char buf[100]; |
173 | struct ssb_mipscore *mcore; | 254 | struct ssb_mipscore *mcore; |
174 | 255 | ||
256 | err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom); | ||
257 | if (err) | ||
258 | printk(KERN_WARNING "bcm47xx: someone else already registered" | ||
259 | " a ssb SPROM callback handler (err %d)\n", err); | ||
260 | |||
175 | err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, | 261 | err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, |
176 | bcm47xx_get_invariants); | 262 | bcm47xx_get_invariants); |
177 | if (err) | 263 | if (err) |
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index 8dba8cfb752f..40b223b603be 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c | |||
@@ -643,6 +643,17 @@ static struct ssb_sprom bcm63xx_sprom = { | |||
643 | .boardflags_lo = 0x2848, | 643 | .boardflags_lo = 0x2848, |
644 | .boardflags_hi = 0x0000, | 644 | .boardflags_hi = 0x0000, |
645 | }; | 645 | }; |
646 | |||
647 | int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out) | ||
648 | { | ||
649 | if (bus->bustype == SSB_BUSTYPE_PCI) { | ||
650 | memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom)); | ||
651 | return 0; | ||
652 | } else { | ||
653 | printk(KERN_ERR PFX "unable to fill SPROM for given bustype.\n"); | ||
654 | return -EINVAL; | ||
655 | } | ||
656 | } | ||
646 | #endif | 657 | #endif |
647 | 658 | ||
648 | /* | 659 | /* |
@@ -793,8 +804,9 @@ void __init board_prom_init(void) | |||
793 | if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { | 804 | if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { |
794 | memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); | 805 | memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); |
795 | memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); | 806 | memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); |
796 | if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) | 807 | if (ssb_arch_register_fallback_sprom( |
797 | printk(KERN_ERR "failed to register fallback SPROM\n"); | 808 | &bcm63xx_get_fallback_sprom) < 0) |
809 | printk(KERN_ERR PFX "failed to register fallback SPROM\n"); | ||
798 | } | 810 | } |
799 | #endif | 811 | #endif |
800 | } | 812 | } |
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 88c9d963be88..9a6243676e22 100644 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c | |||
@@ -16,8 +16,8 @@ | |||
16 | 16 | ||
17 | int main(int argc, char *argv[]) | 17 | int main(int argc, char *argv[]) |
18 | { | 18 | { |
19 | unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; | ||
19 | struct stat sb; | 20 | struct stat sb; |
20 | uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; | ||
21 | 21 | ||
22 | if (argc != 3) { | 22 | if (argc != 3) { |
23 | fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n", | 23 | fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n", |
diff --git a/arch/mips/boot/compressed/uart-alchemy.c b/arch/mips/boot/compressed/uart-alchemy.c index 1bff22fa089b..eb063e6dead9 100644 --- a/arch/mips/boot/compressed/uart-alchemy.c +++ b/arch/mips/boot/compressed/uart-alchemy.c | |||
@@ -3,5 +3,5 @@ | |||
3 | void putc(char c) | 3 | void putc(char c) |
4 | { | 4 | { |
5 | /* all current (Jan. 2010) in-kernel boards */ | 5 | /* all current (Jan. 2010) in-kernel boards */ |
6 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 6 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
7 | } | 7 | } |
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index caae22858163..cad555ebeca3 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig | |||
@@ -1,11 +1,7 @@ | |||
1 | config CAVIUM_OCTEON_SPECIFIC_OPTIONS | 1 | if CPU_CAVIUM_OCTEON |
2 | bool "Enable Octeon specific options" | ||
3 | depends on CPU_CAVIUM_OCTEON | ||
4 | default "y" | ||
5 | 2 | ||
6 | config CAVIUM_CN63XXP1 | 3 | config CAVIUM_CN63XXP1 |
7 | bool "Enable CN63XXP1 errata worarounds" | 4 | bool "Enable CN63XXP1 errata worarounds" |
8 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
9 | default "n" | 5 | default "n" |
10 | help | 6 | help |
11 | The CN63XXP1 chip requires build time workarounds to | 7 | The CN63XXP1 chip requires build time workarounds to |
@@ -16,7 +12,6 @@ config CAVIUM_CN63XXP1 | |||
16 | 12 | ||
17 | config CAVIUM_OCTEON_2ND_KERNEL | 13 | config CAVIUM_OCTEON_2ND_KERNEL |
18 | bool "Build the kernel to be used as a 2nd kernel on the same chip" | 14 | bool "Build the kernel to be used as a 2nd kernel on the same chip" |
19 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
20 | default "n" | 15 | default "n" |
21 | help | 16 | help |
22 | This option configures this kernel to be linked at a different | 17 | This option configures this kernel to be linked at a different |
@@ -26,7 +21,6 @@ config CAVIUM_OCTEON_2ND_KERNEL | |||
26 | 21 | ||
27 | config CAVIUM_OCTEON_HW_FIX_UNALIGNED | 22 | config CAVIUM_OCTEON_HW_FIX_UNALIGNED |
28 | bool "Enable hardware fixups of unaligned loads and stores" | 23 | bool "Enable hardware fixups of unaligned loads and stores" |
29 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
30 | default "y" | 24 | default "y" |
31 | help | 25 | help |
32 | Configure the Octeon hardware to automatically fix unaligned loads | 26 | Configure the Octeon hardware to automatically fix unaligned loads |
@@ -38,7 +32,6 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED | |||
38 | 32 | ||
39 | config CAVIUM_OCTEON_CVMSEG_SIZE | 33 | config CAVIUM_OCTEON_CVMSEG_SIZE |
40 | int "Number of L1 cache lines reserved for CVMSEG memory" | 34 | int "Number of L1 cache lines reserved for CVMSEG memory" |
41 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
42 | range 0 54 | 35 | range 0 54 |
43 | default 1 | 36 | default 1 |
44 | help | 37 | help |
@@ -50,7 +43,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE | |||
50 | 43 | ||
51 | config CAVIUM_OCTEON_LOCK_L2 | 44 | config CAVIUM_OCTEON_LOCK_L2 |
52 | bool "Lock often used kernel code in the L2" | 45 | bool "Lock often used kernel code in the L2" |
53 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
54 | default "y" | 46 | default "y" |
55 | help | 47 | help |
56 | Enable locking parts of the kernel into the L2 cache. | 48 | Enable locking parts of the kernel into the L2 cache. |
@@ -93,7 +85,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY | |||
93 | config ARCH_SPARSEMEM_ENABLE | 85 | config ARCH_SPARSEMEM_ENABLE |
94 | def_bool y | 86 | def_bool y |
95 | select SPARSEMEM_STATIC | 87 | select SPARSEMEM_STATIC |
96 | depends on CPU_CAVIUM_OCTEON | ||
97 | 88 | ||
98 | config CAVIUM_OCTEON_HELPER | 89 | config CAVIUM_OCTEON_HELPER |
99 | def_bool y | 90 | def_bool y |
@@ -107,6 +98,8 @@ config NEED_SG_DMA_LENGTH | |||
107 | 98 | ||
108 | config SWIOTLB | 99 | config SWIOTLB |
109 | def_bool y | 100 | def_bool y |
110 | depends on CPU_CAVIUM_OCTEON | ||
111 | select IOMMU_HELPER | 101 | select IOMMU_HELPER |
112 | select NEED_SG_DMA_LENGTH | 102 | select NEED_SG_DMA_LENGTH |
103 | |||
104 | |||
105 | endif # CPU_CAVIUM_OCTEON | ||
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index 26bf71130bf8..29d56afbb02d 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c | |||
@@ -105,8 +105,7 @@ unsigned long long notrace sched_clock(void) | |||
105 | void __init plat_time_init(void) | 105 | void __init plat_time_init(void) |
106 | { | 106 | { |
107 | clocksource_mips.rating = 300; | 107 | clocksource_mips.rating = 300; |
108 | clocksource_set_clock(&clocksource_mips, octeon_get_clock_rate()); | 108 | clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate()); |
109 | clocksource_register(&clocksource_mips); | ||
110 | } | 109 | } |
111 | 110 | ||
112 | static u64 octeon_udelay_factor; | 111 | static u64 octeon_udelay_factor; |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 0707fae3f0ee..2d9028f1474c 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -288,7 +288,6 @@ void octeon_user_io_init(void) | |||
288 | union octeon_cvmemctl cvmmemctl; | 288 | union octeon_cvmemctl cvmmemctl; |
289 | union cvmx_iob_fau_timeout fau_timeout; | 289 | union cvmx_iob_fau_timeout fau_timeout; |
290 | union cvmx_pow_nw_tim nm_tim; | 290 | union cvmx_pow_nw_tim nm_tim; |
291 | uint64_t cvmctl; | ||
292 | 291 | ||
293 | /* Get the current settings for CP0_CVMMEMCTL_REG */ | 292 | /* Get the current settings for CP0_CVMMEMCTL_REG */ |
294 | cvmmemctl.u64 = read_c0_cvmmemctl(); | 293 | cvmmemctl.u64 = read_c0_cvmmemctl(); |
@@ -392,12 +391,6 @@ void octeon_user_io_init(void) | |||
392 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, | 391 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, |
393 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); | 392 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); |
394 | 393 | ||
395 | /* Move the performance counter interrupts to IRQ 6 */ | ||
396 | cvmctl = read_c0_cvmctl(); | ||
397 | cvmctl &= ~(7 << 7); | ||
398 | cvmctl |= 6 << 7; | ||
399 | write_c0_cvmctl(cvmctl); | ||
400 | |||
401 | /* Set a default for the hardware timeouts */ | 394 | /* Set a default for the hardware timeouts */ |
402 | fau_timeout.u64 = 0; | 395 | fau_timeout.u64 = 0; |
403 | fau_timeout.s.tout_val = 0xfff; | 396 | fau_timeout.s.tout_val = 0xfff; |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index ba78b21cc8d0..8b606423bbd7 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -37,13 +37,15 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) | |||
37 | uint64_t action; | 37 | uint64_t action; |
38 | 38 | ||
39 | /* Load the mailbox register to figure out what we're supposed to do */ | 39 | /* Load the mailbox register to figure out what we're supposed to do */ |
40 | action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)); | 40 | action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; |
41 | 41 | ||
42 | /* Clear the mailbox to clear the interrupt */ | 42 | /* Clear the mailbox to clear the interrupt */ |
43 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); | 43 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); |
44 | 44 | ||
45 | if (action & SMP_CALL_FUNCTION) | 45 | if (action & SMP_CALL_FUNCTION) |
46 | smp_call_function_interrupt(); | 46 | smp_call_function_interrupt(); |
47 | if (action & SMP_RESCHEDULE_YOURSELF) | ||
48 | scheduler_ipi(); | ||
47 | 49 | ||
48 | /* Check if we've been told to flush the icache */ | 50 | /* Check if we've been told to flush the icache */ |
49 | if (action & SMP_ICACHE_FLUSH) | 51 | if (action & SMP_ICACHE_FLUSH) |
@@ -200,16 +202,15 @@ void octeon_prepare_cpus(unsigned int max_cpus) | |||
200 | if (labi->labi_signature != LABI_SIGNATURE) | 202 | if (labi->labi_signature != LABI_SIGNATURE) |
201 | panic("The bootloader version on this board is incorrect."); | 203 | panic("The bootloader version on this board is incorrect."); |
202 | #endif | 204 | #endif |
203 | 205 | /* | |
204 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); | 206 | * Only the low order mailbox bits are used for IPIs, leave |
207 | * the other bits alone. | ||
208 | */ | ||
209 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); | ||
205 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, | 210 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, |
206 | "mailbox0", mailbox_interrupt)) { | 211 | "SMP-IPI", mailbox_interrupt)) { |
207 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); | 212 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); |
208 | } | 213 | } |
209 | if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED, | ||
210 | "mailbox1", mailbox_interrupt)) { | ||
211 | panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); | ||
212 | } | ||
213 | } | 214 | } |
214 | 215 | ||
215 | /** | 216 | /** |
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 167c1d07b809..b6acd2f256b6 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig | |||
@@ -86,8 +86,8 @@ CONFIG_NET_SCHED=y | |||
86 | CONFIG_NET_EMATCH=y | 86 | CONFIG_NET_EMATCH=y |
87 | CONFIG_NET_CLS_ACT=y | 87 | CONFIG_NET_CLS_ACT=y |
88 | CONFIG_BT=m | 88 | CONFIG_BT=m |
89 | CONFIG_BT_L2CAP=m | 89 | CONFIG_BT_L2CAP=y |
90 | CONFIG_BT_SCO=m | 90 | CONFIG_BT_SCO=y |
91 | CONFIG_BT_RFCOMM=m | 91 | CONFIG_BT_RFCOMM=m |
92 | CONFIG_BT_RFCOMM_TTY=y | 92 | CONFIG_BT_RFCOMM_TTY=y |
93 | CONFIG_BT_BNEP=m | 93 | CONFIG_BT_BNEP=m |
@@ -329,7 +329,7 @@ CONFIG_USB_LED=m | |||
329 | CONFIG_USB_GADGET=m | 329 | CONFIG_USB_GADGET=m |
330 | CONFIG_USB_GADGET_M66592=y | 330 | CONFIG_USB_GADGET_M66592=y |
331 | CONFIG_MMC=m | 331 | CONFIG_MMC=m |
332 | CONFIG_LEDS_CLASS=m | 332 | CONFIG_LEDS_CLASS=y |
333 | CONFIG_STAGING=y | 333 | CONFIG_STAGING=y |
334 | # CONFIG_STAGING_EXCLUDE_BUILD is not set | 334 | # CONFIG_STAGING_EXCLUDE_BUILD is not set |
335 | CONFIG_FB_SM7XX=y | 335 | CONFIG_FB_SM7XX=y |
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 7270f3183bda..5527abbb7dea 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig | |||
@@ -374,7 +374,7 @@ CONFIG_FB_CIRRUS=y | |||
374 | # CONFIG_VGA_CONSOLE is not set | 374 | # CONFIG_VGA_CONSOLE is not set |
375 | CONFIG_FRAMEBUFFER_CONSOLE=y | 375 | CONFIG_FRAMEBUFFER_CONSOLE=y |
376 | CONFIG_HID=m | 376 | CONFIG_HID=m |
377 | CONFIG_LEDS_CLASS=m | 377 | CONFIG_LEDS_CLASS=y |
378 | CONFIG_LEDS_TRIGGER_TIMER=m | 378 | CONFIG_LEDS_TRIGGER_TIMER=m |
379 | CONFIG_LEDS_TRIGGER_IDE_DISK=y | 379 | CONFIG_LEDS_TRIGGER_IDE_DISK=y |
380 | CONFIG_LEDS_TRIGGER_HEARTBEAT=m | 380 | CONFIG_LEDS_TRIGGER_HEARTBEAT=m |
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index a97a42c6b2c8..37862b2ce363 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig | |||
@@ -225,8 +225,8 @@ CONFIG_TOSHIBA_FIR=m | |||
225 | CONFIG_VLSI_FIR=m | 225 | CONFIG_VLSI_FIR=m |
226 | CONFIG_MCS_FIR=m | 226 | CONFIG_MCS_FIR=m |
227 | CONFIG_BT=m | 227 | CONFIG_BT=m |
228 | CONFIG_BT_L2CAP=m | 228 | CONFIG_BT_L2CAP=y |
229 | CONFIG_BT_SCO=m | 229 | CONFIG_BT_SCO=y |
230 | CONFIG_BT_RFCOMM=m | 230 | CONFIG_BT_RFCOMM=m |
231 | CONFIG_BT_RFCOMM_TTY=y | 231 | CONFIG_BT_RFCOMM_TTY=y |
232 | CONFIG_BT_BNEP=m | 232 | CONFIG_BT_BNEP=m |
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig new file mode 100644 index 000000000000..e4b399fdaa61 --- /dev/null +++ b/arch/mips/configs/nlm_xlr_defconfig | |||
@@ -0,0 +1,574 @@ | |||
1 | CONFIG_NLM_XLR_BOARD=y | ||
2 | CONFIG_HIGHMEM=y | ||
3 | CONFIG_KSM=y | ||
4 | CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 | ||
5 | CONFIG_SMP=y | ||
6 | CONFIG_NO_HZ=y | ||
7 | CONFIG_HIGH_RES_TIMERS=y | ||
8 | CONFIG_PREEMPT_VOLUNTARY=y | ||
9 | CONFIG_KEXEC=y | ||
10 | CONFIG_EXPERIMENTAL=y | ||
11 | CONFIG_CROSS_COMPILE="mips64-unknown-linux-gnu-" | ||
12 | # CONFIG_LOCALVERSION_AUTO is not set | ||
13 | CONFIG_SYSVIPC=y | ||
14 | CONFIG_POSIX_MQUEUE=y | ||
15 | CONFIG_BSD_PROCESS_ACCT=y | ||
16 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
17 | CONFIG_TASKSTATS=y | ||
18 | CONFIG_TASK_DELAY_ACCT=y | ||
19 | CONFIG_TASK_XACCT=y | ||
20 | CONFIG_TASK_IO_ACCOUNTING=y | ||
21 | CONFIG_AUDIT=y | ||
22 | CONFIG_NAMESPACES=y | ||
23 | CONFIG_SCHED_AUTOGROUP=y | ||
24 | CONFIG_BLK_DEV_INITRD=y | ||
25 | CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs" | ||
26 | CONFIG_RD_BZIP2=y | ||
27 | CONFIG_RD_LZMA=y | ||
28 | CONFIG_INITRAMFS_COMPRESSION_GZIP=y | ||
29 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
30 | CONFIG_EXPERT=y | ||
31 | CONFIG_KALLSYMS_ALL=y | ||
32 | # CONFIG_ELF_CORE is not set | ||
33 | # CONFIG_PCSPKR_PLATFORM is not set | ||
34 | # CONFIG_PERF_EVENTS is not set | ||
35 | # CONFIG_COMPAT_BRK is not set | ||
36 | CONFIG_PROFILING=y | ||
37 | CONFIG_MODULES=y | ||
38 | CONFIG_MODULE_UNLOAD=y | ||
39 | CONFIG_MODVERSIONS=y | ||
40 | CONFIG_MODULE_SRCVERSION_ALL=y | ||
41 | CONFIG_BLK_DEV_INTEGRITY=y | ||
42 | CONFIG_BINFMT_MISC=m | ||
43 | CONFIG_PM_RUNTIME=y | ||
44 | CONFIG_PM_DEBUG=y | ||
45 | CONFIG_PACKET=y | ||
46 | CONFIG_UNIX=y | ||
47 | CONFIG_XFRM_USER=m | ||
48 | CONFIG_NET_KEY=m | ||
49 | CONFIG_INET=y | ||
50 | CONFIG_IP_MULTICAST=y | ||
51 | CONFIG_IP_ADVANCED_ROUTER=y | ||
52 | CONFIG_IP_MULTIPLE_TABLES=y | ||
53 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
54 | CONFIG_IP_ROUTE_VERBOSE=y | ||
55 | CONFIG_NET_IPIP=m | ||
56 | CONFIG_IP_MROUTE=y | ||
57 | CONFIG_IP_PIMSM_V1=y | ||
58 | CONFIG_IP_PIMSM_V2=y | ||
59 | CONFIG_SYN_COOKIES=y | ||
60 | CONFIG_INET_AH=m | ||
61 | CONFIG_INET_ESP=m | ||
62 | CONFIG_INET_IPCOMP=m | ||
63 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | ||
64 | CONFIG_INET_XFRM_MODE_TUNNEL=m | ||
65 | CONFIG_INET_XFRM_MODE_BEET=m | ||
66 | CONFIG_TCP_CONG_ADVANCED=y | ||
67 | CONFIG_TCP_CONG_HSTCP=m | ||
68 | CONFIG_TCP_CONG_HYBLA=m | ||
69 | CONFIG_TCP_CONG_SCALABLE=m | ||
70 | CONFIG_TCP_CONG_LP=m | ||
71 | CONFIG_TCP_CONG_VENO=m | ||
72 | CONFIG_TCP_CONG_YEAH=m | ||
73 | CONFIG_TCP_CONG_ILLINOIS=m | ||
74 | CONFIG_TCP_MD5SIG=y | ||
75 | CONFIG_IPV6=y | ||
76 | CONFIG_IPV6_PRIVACY=y | ||
77 | CONFIG_INET6_AH=m | ||
78 | CONFIG_INET6_ESP=m | ||
79 | CONFIG_INET6_IPCOMP=m | ||
80 | CONFIG_INET6_XFRM_MODE_TRANSPORT=m | ||
81 | CONFIG_INET6_XFRM_MODE_TUNNEL=m | ||
82 | CONFIG_INET6_XFRM_MODE_BEET=m | ||
83 | CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m | ||
84 | CONFIG_IPV6_SIT=m | ||
85 | CONFIG_IPV6_TUNNEL=m | ||
86 | CONFIG_IPV6_MULTIPLE_TABLES=y | ||
87 | CONFIG_NETLABEL=y | ||
88 | CONFIG_NETFILTER=y | ||
89 | CONFIG_NF_CONNTRACK=m | ||
90 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
91 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
92 | CONFIG_NF_CT_PROTO_UDPLITE=m | ||
93 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
94 | CONFIG_NF_CONNTRACK_FTP=m | ||
95 | CONFIG_NF_CONNTRACK_H323=m | ||
96 | CONFIG_NF_CONNTRACK_IRC=m | ||
97 | CONFIG_NF_CONNTRACK_NETBIOS_NS=m | ||
98 | CONFIG_NF_CONNTRACK_PPTP=m | ||
99 | CONFIG_NF_CONNTRACK_SANE=m | ||
100 | CONFIG_NF_CONNTRACK_SIP=m | ||
101 | CONFIG_NF_CONNTRACK_TFTP=m | ||
102 | CONFIG_NF_CT_NETLINK=m | ||
103 | CONFIG_NETFILTER_TPROXY=m | ||
104 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | ||
105 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=m | ||
106 | CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m | ||
107 | CONFIG_NETFILTER_XT_TARGET_DSCP=m | ||
108 | CONFIG_NETFILTER_XT_TARGET_MARK=m | ||
109 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m | ||
110 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | ||
111 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | ||
112 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
113 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | ||
114 | CONFIG_NETFILTER_XT_TARGET_SECMARK=m | ||
115 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | ||
116 | CONFIG_NETFILTER_XT_MATCH_CLUSTER=m | ||
117 | CONFIG_NETFILTER_XT_MATCH_COMMENT=m | ||
118 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m | ||
119 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m | ||
120 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=m | ||
121 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | ||
122 | CONFIG_NETFILTER_XT_MATCH_DSCP=m | ||
123 | CONFIG_NETFILTER_XT_MATCH_ESP=m | ||
124 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m | ||
125 | CONFIG_NETFILTER_XT_MATCH_HELPER=m | ||
126 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=m | ||
127 | CONFIG_NETFILTER_XT_MATCH_LENGTH=m | ||
128 | CONFIG_NETFILTER_XT_MATCH_LIMIT=m | ||
129 | CONFIG_NETFILTER_XT_MATCH_MAC=m | ||
130 | CONFIG_NETFILTER_XT_MATCH_MARK=m | ||
131 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m | ||
132 | CONFIG_NETFILTER_XT_MATCH_OSF=m | ||
133 | CONFIG_NETFILTER_XT_MATCH_OWNER=m | ||
134 | CONFIG_NETFILTER_XT_MATCH_POLICY=m | ||
135 | CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m | ||
136 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m | ||
137 | CONFIG_NETFILTER_XT_MATCH_QUOTA=m | ||
138 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | ||
139 | CONFIG_NETFILTER_XT_MATCH_REALM=m | ||
140 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | ||
141 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
142 | CONFIG_NETFILTER_XT_MATCH_STATE=m | ||
143 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | ||
144 | CONFIG_NETFILTER_XT_MATCH_STRING=m | ||
145 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=m | ||
146 | CONFIG_NETFILTER_XT_MATCH_TIME=m | ||
147 | CONFIG_NETFILTER_XT_MATCH_U32=m | ||
148 | CONFIG_IP_VS=m | ||
149 | CONFIG_IP_VS_IPV6=y | ||
150 | CONFIG_IP_VS_PROTO_TCP=y | ||
151 | CONFIG_IP_VS_PROTO_UDP=y | ||
152 | CONFIG_IP_VS_PROTO_ESP=y | ||
153 | CONFIG_IP_VS_PROTO_AH=y | ||
154 | CONFIG_IP_VS_RR=m | ||
155 | CONFIG_IP_VS_WRR=m | ||
156 | CONFIG_IP_VS_LC=m | ||
157 | CONFIG_IP_VS_WLC=m | ||
158 | CONFIG_IP_VS_LBLC=m | ||
159 | CONFIG_IP_VS_LBLCR=m | ||
160 | CONFIG_IP_VS_DH=m | ||
161 | CONFIG_IP_VS_SH=m | ||
162 | CONFIG_IP_VS_SED=m | ||
163 | CONFIG_IP_VS_NQ=m | ||
164 | CONFIG_IP_VS_FTP=m | ||
165 | CONFIG_NF_CONNTRACK_IPV4=m | ||
166 | CONFIG_IP_NF_QUEUE=m | ||
167 | CONFIG_IP_NF_IPTABLES=m | ||
168 | CONFIG_IP_NF_MATCH_AH=m | ||
169 | CONFIG_IP_NF_MATCH_ECN=m | ||
170 | CONFIG_IP_NF_MATCH_TTL=m | ||
171 | CONFIG_IP_NF_FILTER=m | ||
172 | CONFIG_IP_NF_TARGET_REJECT=m | ||
173 | CONFIG_IP_NF_TARGET_LOG=m | ||
174 | CONFIG_IP_NF_TARGET_ULOG=m | ||
175 | CONFIG_NF_NAT=m | ||
176 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
177 | CONFIG_IP_NF_TARGET_NETMAP=m | ||
178 | CONFIG_IP_NF_TARGET_REDIRECT=m | ||
179 | CONFIG_IP_NF_MANGLE=m | ||
180 | CONFIG_IP_NF_TARGET_CLUSTERIP=m | ||
181 | CONFIG_IP_NF_TARGET_ECN=m | ||
182 | CONFIG_IP_NF_TARGET_TTL=m | ||
183 | CONFIG_IP_NF_RAW=m | ||
184 | CONFIG_IP_NF_SECURITY=m | ||
185 | CONFIG_IP_NF_ARPTABLES=m | ||
186 | CONFIG_IP_NF_ARPFILTER=m | ||
187 | CONFIG_IP_NF_ARP_MANGLE=m | ||
188 | CONFIG_NF_CONNTRACK_IPV6=m | ||
189 | CONFIG_IP6_NF_QUEUE=m | ||
190 | CONFIG_IP6_NF_IPTABLES=m | ||
191 | CONFIG_IP6_NF_MATCH_AH=m | ||
192 | CONFIG_IP6_NF_MATCH_EUI64=m | ||
193 | CONFIG_IP6_NF_MATCH_FRAG=m | ||
194 | CONFIG_IP6_NF_MATCH_OPTS=m | ||
195 | CONFIG_IP6_NF_MATCH_HL=m | ||
196 | CONFIG_IP6_NF_MATCH_IPV6HEADER=m | ||
197 | CONFIG_IP6_NF_MATCH_MH=m | ||
198 | CONFIG_IP6_NF_MATCH_RT=m | ||
199 | CONFIG_IP6_NF_TARGET_HL=m | ||
200 | CONFIG_IP6_NF_TARGET_LOG=m | ||
201 | CONFIG_IP6_NF_FILTER=m | ||
202 | CONFIG_IP6_NF_TARGET_REJECT=m | ||
203 | CONFIG_IP6_NF_MANGLE=m | ||
204 | CONFIG_IP6_NF_RAW=m | ||
205 | CONFIG_IP6_NF_SECURITY=m | ||
206 | CONFIG_DECNET_NF_GRABULATOR=m | ||
207 | CONFIG_BRIDGE_NF_EBTABLES=m | ||
208 | CONFIG_BRIDGE_EBT_BROUTE=m | ||
209 | CONFIG_BRIDGE_EBT_T_FILTER=m | ||
210 | CONFIG_BRIDGE_EBT_T_NAT=m | ||
211 | CONFIG_BRIDGE_EBT_802_3=m | ||
212 | CONFIG_BRIDGE_EBT_AMONG=m | ||
213 | CONFIG_BRIDGE_EBT_ARP=m | ||
214 | CONFIG_BRIDGE_EBT_IP=m | ||
215 | CONFIG_BRIDGE_EBT_IP6=m | ||
216 | CONFIG_BRIDGE_EBT_LIMIT=m | ||
217 | CONFIG_BRIDGE_EBT_MARK=m | ||
218 | CONFIG_BRIDGE_EBT_PKTTYPE=m | ||
219 | CONFIG_BRIDGE_EBT_STP=m | ||
220 | CONFIG_BRIDGE_EBT_VLAN=m | ||
221 | CONFIG_BRIDGE_EBT_ARPREPLY=m | ||
222 | CONFIG_BRIDGE_EBT_DNAT=m | ||
223 | CONFIG_BRIDGE_EBT_MARK_T=m | ||
224 | CONFIG_BRIDGE_EBT_REDIRECT=m | ||
225 | CONFIG_BRIDGE_EBT_SNAT=m | ||
226 | CONFIG_BRIDGE_EBT_LOG=m | ||
227 | CONFIG_BRIDGE_EBT_ULOG=m | ||
228 | CONFIG_BRIDGE_EBT_NFLOG=m | ||
229 | CONFIG_IP_DCCP=m | ||
230 | CONFIG_RDS=m | ||
231 | CONFIG_RDS_TCP=m | ||
232 | CONFIG_TIPC=m | ||
233 | CONFIG_ATM=m | ||
234 | CONFIG_ATM_CLIP=m | ||
235 | CONFIG_ATM_LANE=m | ||
236 | CONFIG_ATM_MPOA=m | ||
237 | CONFIG_ATM_BR2684=m | ||
238 | CONFIG_BRIDGE=m | ||
239 | CONFIG_VLAN_8021Q=m | ||
240 | CONFIG_VLAN_8021Q_GVRP=y | ||
241 | CONFIG_DECNET=m | ||
242 | CONFIG_LLC2=m | ||
243 | CONFIG_IPX=m | ||
244 | CONFIG_ATALK=m | ||
245 | CONFIG_DEV_APPLETALK=m | ||
246 | CONFIG_IPDDP=m | ||
247 | CONFIG_IPDDP_ENCAP=y | ||
248 | CONFIG_IPDDP_DECAP=y | ||
249 | CONFIG_X25=m | ||
250 | CONFIG_LAPB=m | ||
251 | CONFIG_ECONET=m | ||
252 | CONFIG_ECONET_AUNUDP=y | ||
253 | CONFIG_ECONET_NATIVE=y | ||
254 | CONFIG_WAN_ROUTER=m | ||
255 | CONFIG_PHONET=m | ||
256 | CONFIG_IEEE802154=m | ||
257 | CONFIG_NET_SCHED=y | ||
258 | CONFIG_NET_SCH_CBQ=m | ||
259 | CONFIG_NET_SCH_HTB=m | ||
260 | CONFIG_NET_SCH_HFSC=m | ||
261 | CONFIG_NET_SCH_ATM=m | ||
262 | CONFIG_NET_SCH_PRIO=m | ||
263 | CONFIG_NET_SCH_MULTIQ=m | ||
264 | CONFIG_NET_SCH_RED=m | ||
265 | CONFIG_NET_SCH_SFQ=m | ||
266 | CONFIG_NET_SCH_TEQL=m | ||
267 | CONFIG_NET_SCH_TBF=m | ||
268 | CONFIG_NET_SCH_GRED=m | ||
269 | CONFIG_NET_SCH_DSMARK=m | ||
270 | CONFIG_NET_SCH_NETEM=m | ||
271 | CONFIG_NET_SCH_DRR=m | ||
272 | CONFIG_NET_SCH_INGRESS=m | ||
273 | CONFIG_NET_CLS_BASIC=m | ||
274 | CONFIG_NET_CLS_TCINDEX=m | ||
275 | CONFIG_NET_CLS_ROUTE4=m | ||
276 | CONFIG_NET_CLS_FW=m | ||
277 | CONFIG_NET_CLS_U32=m | ||
278 | CONFIG_CLS_U32_MARK=y | ||
279 | CONFIG_NET_CLS_RSVP=m | ||
280 | CONFIG_NET_CLS_RSVP6=m | ||
281 | CONFIG_NET_CLS_FLOW=m | ||
282 | CONFIG_NET_EMATCH=y | ||
283 | CONFIG_NET_EMATCH_CMP=m | ||
284 | CONFIG_NET_EMATCH_NBYTE=m | ||
285 | CONFIG_NET_EMATCH_U32=m | ||
286 | CONFIG_NET_EMATCH_META=m | ||
287 | CONFIG_NET_EMATCH_TEXT=m | ||
288 | CONFIG_NET_CLS_ACT=y | ||
289 | CONFIG_NET_ACT_POLICE=m | ||
290 | CONFIG_NET_ACT_GACT=m | ||
291 | CONFIG_GACT_PROB=y | ||
292 | CONFIG_NET_ACT_MIRRED=m | ||
293 | CONFIG_NET_ACT_IPT=m | ||
294 | CONFIG_NET_ACT_NAT=m | ||
295 | CONFIG_NET_ACT_PEDIT=m | ||
296 | CONFIG_NET_ACT_SIMP=m | ||
297 | CONFIG_NET_ACT_SKBEDIT=m | ||
298 | CONFIG_DCB=y | ||
299 | CONFIG_NET_PKTGEN=m | ||
300 | # CONFIG_WIRELESS is not set | ||
301 | CONFIG_DEVTMPFS=y | ||
302 | CONFIG_DEVTMPFS_MOUNT=y | ||
303 | # CONFIG_STANDALONE is not set | ||
304 | CONFIG_CONNECTOR=y | ||
305 | CONFIG_MTD=m | ||
306 | CONFIG_BLK_DEV_LOOP=y | ||
307 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
308 | CONFIG_BLK_DEV_NBD=m | ||
309 | CONFIG_BLK_DEV_OSD=m | ||
310 | CONFIG_BLK_DEV_RAM=y | ||
311 | CONFIG_BLK_DEV_RAM_SIZE=65536 | ||
312 | CONFIG_CDROM_PKTCDVD=y | ||
313 | CONFIG_MISC_DEVICES=y | ||
314 | CONFIG_RAID_ATTRS=m | ||
315 | CONFIG_SCSI=y | ||
316 | CONFIG_SCSI_TGT=m | ||
317 | CONFIG_BLK_DEV_SD=y | ||
318 | CONFIG_CHR_DEV_ST=m | ||
319 | CONFIG_CHR_DEV_OSST=m | ||
320 | CONFIG_BLK_DEV_SR=y | ||
321 | CONFIG_CHR_DEV_SG=y | ||
322 | CONFIG_CHR_DEV_SCH=m | ||
323 | CONFIG_SCSI_MULTI_LUN=y | ||
324 | CONFIG_SCSI_CONSTANTS=y | ||
325 | CONFIG_SCSI_LOGGING=y | ||
326 | CONFIG_SCSI_SCAN_ASYNC=y | ||
327 | CONFIG_SCSI_SPI_ATTRS=m | ||
328 | CONFIG_SCSI_FC_TGT_ATTRS=y | ||
329 | CONFIG_SCSI_SAS_LIBSAS=m | ||
330 | CONFIG_SCSI_SRP_ATTRS=m | ||
331 | CONFIG_SCSI_SRP_TGT_ATTRS=y | ||
332 | CONFIG_ISCSI_TCP=m | ||
333 | CONFIG_LIBFCOE=m | ||
334 | CONFIG_SCSI_DEBUG=m | ||
335 | CONFIG_SCSI_DH=y | ||
336 | CONFIG_SCSI_DH_RDAC=m | ||
337 | CONFIG_SCSI_DH_HP_SW=m | ||
338 | CONFIG_SCSI_DH_EMC=m | ||
339 | CONFIG_SCSI_DH_ALUA=m | ||
340 | CONFIG_SCSI_OSD_INITIATOR=m | ||
341 | CONFIG_SCSI_OSD_ULD=m | ||
342 | # CONFIG_INPUT_MOUSEDEV is not set | ||
343 | CONFIG_INPUT_EVDEV=y | ||
344 | CONFIG_INPUT_EVBUG=m | ||
345 | # CONFIG_INPUT_KEYBOARD is not set | ||
346 | # CONFIG_INPUT_MOUSE is not set | ||
347 | # CONFIG_SERIO_I8042 is not set | ||
348 | CONFIG_SERIO_SERPORT=m | ||
349 | CONFIG_SERIO_LIBPS2=y | ||
350 | CONFIG_SERIO_RAW=m | ||
351 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
352 | CONFIG_DEVPTS_MULTIPLE_INSTANCES=y | ||
353 | CONFIG_LEGACY_PTY_COUNT=0 | ||
354 | CONFIG_SERIAL_NONSTANDARD=y | ||
355 | CONFIG_N_HDLC=m | ||
356 | # CONFIG_DEVKMEM is not set | ||
357 | CONFIG_STALDRV=y | ||
358 | CONFIG_SERIAL_8250=y | ||
359 | CONFIG_SERIAL_8250_CONSOLE=y | ||
360 | CONFIG_SERIAL_8250_NR_UARTS=48 | ||
361 | CONFIG_SERIAL_8250_EXTENDED=y | ||
362 | CONFIG_SERIAL_8250_MANY_PORTS=y | ||
363 | CONFIG_SERIAL_8250_SHARE_IRQ=y | ||
364 | CONFIG_SERIAL_8250_RSA=y | ||
365 | CONFIG_HW_RANDOM=y | ||
366 | CONFIG_HW_RANDOM_TIMERIOMEM=m | ||
367 | CONFIG_RAW_DRIVER=m | ||
368 | # CONFIG_HWMON is not set | ||
369 | # CONFIG_VGA_CONSOLE is not set | ||
370 | # CONFIG_HID_SUPPORT is not set | ||
371 | # CONFIG_USB_SUPPORT is not set | ||
372 | CONFIG_UIO=y | ||
373 | CONFIG_UIO_PDRV=m | ||
374 | CONFIG_UIO_PDRV_GENIRQ=m | ||
375 | CONFIG_EXT2_FS=y | ||
376 | CONFIG_EXT2_FS_XATTR=y | ||
377 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
378 | CONFIG_EXT2_FS_SECURITY=y | ||
379 | CONFIG_EXT3_FS=y | ||
380 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
381 | CONFIG_EXT3_FS_SECURITY=y | ||
382 | CONFIG_EXT4_FS=y | ||
383 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
384 | CONFIG_EXT4_FS_SECURITY=y | ||
385 | CONFIG_GFS2_FS=m | ||
386 | CONFIG_GFS2_FS_LOCKING_DLM=y | ||
387 | CONFIG_OCFS2_FS=m | ||
388 | CONFIG_BTRFS_FS=m | ||
389 | CONFIG_BTRFS_FS_POSIX_ACL=y | ||
390 | CONFIG_NILFS2_FS=m | ||
391 | CONFIG_QUOTA_NETLINK_INTERFACE=y | ||
392 | # CONFIG_PRINT_QUOTA_WARNING is not set | ||
393 | CONFIG_QFMT_V1=m | ||
394 | CONFIG_QFMT_V2=m | ||
395 | CONFIG_AUTOFS4_FS=m | ||
396 | CONFIG_FUSE_FS=y | ||
397 | CONFIG_CUSE=m | ||
398 | CONFIG_FSCACHE=m | ||
399 | CONFIG_FSCACHE_STATS=y | ||
400 | CONFIG_FSCACHE_HISTOGRAM=y | ||
401 | CONFIG_CACHEFILES=m | ||
402 | CONFIG_ISO9660_FS=m | ||
403 | CONFIG_JOLIET=y | ||
404 | CONFIG_ZISOFS=y | ||
405 | CONFIG_UDF_FS=m | ||
406 | CONFIG_MSDOS_FS=m | ||
407 | CONFIG_VFAT_FS=m | ||
408 | CONFIG_NTFS_FS=m | ||
409 | CONFIG_PROC_KCORE=y | ||
410 | CONFIG_TMPFS=y | ||
411 | CONFIG_TMPFS_POSIX_ACL=y | ||
412 | CONFIG_CONFIGFS_FS=y | ||
413 | CONFIG_ADFS_FS=m | ||
414 | CONFIG_AFFS_FS=m | ||
415 | CONFIG_ECRYPT_FS=y | ||
416 | CONFIG_HFS_FS=m | ||
417 | CONFIG_HFSPLUS_FS=m | ||
418 | CONFIG_BEFS_FS=m | ||
419 | CONFIG_BFS_FS=m | ||
420 | CONFIG_EFS_FS=m | ||
421 | CONFIG_CRAMFS=m | ||
422 | CONFIG_SQUASHFS=m | ||
423 | CONFIG_VXFS_FS=m | ||
424 | CONFIG_MINIX_FS=m | ||
425 | CONFIG_OMFS_FS=m | ||
426 | CONFIG_HPFS_FS=m | ||
427 | CONFIG_QNX4FS_FS=m | ||
428 | CONFIG_ROMFS_FS=m | ||
429 | CONFIG_SYSV_FS=m | ||
430 | CONFIG_UFS_FS=m | ||
431 | CONFIG_EXOFS_FS=m | ||
432 | CONFIG_NFS_FS=m | ||
433 | CONFIG_NFS_V3=y | ||
434 | CONFIG_NFS_V3_ACL=y | ||
435 | CONFIG_NFS_V4=y | ||
436 | CONFIG_NFS_FSCACHE=y | ||
437 | CONFIG_NFSD=m | ||
438 | CONFIG_NFSD_V3_ACL=y | ||
439 | CONFIG_NFSD_V4=y | ||
440 | CONFIG_CIFS=m | ||
441 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
442 | CONFIG_CIFS_UPCALL=y | ||
443 | CONFIG_CIFS_XATTR=y | ||
444 | CONFIG_CIFS_POSIX=y | ||
445 | CONFIG_CIFS_DFS_UPCALL=y | ||
446 | CONFIG_CIFS_EXPERIMENTAL=y | ||
447 | CONFIG_NCP_FS=m | ||
448 | CONFIG_NCPFS_PACKET_SIGNING=y | ||
449 | CONFIG_NCPFS_IOCTL_LOCKING=y | ||
450 | CONFIG_NCPFS_STRONG=y | ||
451 | CONFIG_NCPFS_NFS_NS=y | ||
452 | CONFIG_NCPFS_OS2_NS=y | ||
453 | CONFIG_NCPFS_NLS=y | ||
454 | CONFIG_NCPFS_EXTRAS=y | ||
455 | CONFIG_CODA_FS=m | ||
456 | CONFIG_AFS_FS=m | ||
457 | CONFIG_PARTITION_ADVANCED=y | ||
458 | CONFIG_ACORN_PARTITION=y | ||
459 | CONFIG_ACORN_PARTITION_ICS=y | ||
460 | CONFIG_ACORN_PARTITION_RISCIX=y | ||
461 | CONFIG_OSF_PARTITION=y | ||
462 | CONFIG_AMIGA_PARTITION=y | ||
463 | CONFIG_ATARI_PARTITION=y | ||
464 | CONFIG_MAC_PARTITION=y | ||
465 | CONFIG_BSD_DISKLABEL=y | ||
466 | CONFIG_MINIX_SUBPARTITION=y | ||
467 | CONFIG_SOLARIS_X86_PARTITION=y | ||
468 | CONFIG_UNIXWARE_DISKLABEL=y | ||
469 | CONFIG_LDM_PARTITION=y | ||
470 | CONFIG_SGI_PARTITION=y | ||
471 | CONFIG_ULTRIX_PARTITION=y | ||
472 | CONFIG_SUN_PARTITION=y | ||
473 | CONFIG_KARMA_PARTITION=y | ||
474 | CONFIG_EFI_PARTITION=y | ||
475 | CONFIG_SYSV68_PARTITION=y | ||
476 | CONFIG_NLS=y | ||
477 | CONFIG_NLS_DEFAULT="cp437" | ||
478 | CONFIG_NLS_CODEPAGE_437=m | ||
479 | CONFIG_NLS_CODEPAGE_737=m | ||
480 | CONFIG_NLS_CODEPAGE_775=m | ||
481 | CONFIG_NLS_CODEPAGE_850=m | ||
482 | CONFIG_NLS_CODEPAGE_852=m | ||
483 | CONFIG_NLS_CODEPAGE_855=m | ||
484 | CONFIG_NLS_CODEPAGE_857=m | ||
485 | CONFIG_NLS_CODEPAGE_860=m | ||
486 | CONFIG_NLS_CODEPAGE_861=m | ||
487 | CONFIG_NLS_CODEPAGE_862=m | ||
488 | CONFIG_NLS_CODEPAGE_863=m | ||
489 | CONFIG_NLS_CODEPAGE_864=m | ||
490 | CONFIG_NLS_CODEPAGE_865=m | ||
491 | CONFIG_NLS_CODEPAGE_866=m | ||
492 | CONFIG_NLS_CODEPAGE_869=m | ||
493 | CONFIG_NLS_CODEPAGE_936=m | ||
494 | CONFIG_NLS_CODEPAGE_950=m | ||
495 | CONFIG_NLS_CODEPAGE_932=m | ||
496 | CONFIG_NLS_CODEPAGE_949=m | ||
497 | CONFIG_NLS_CODEPAGE_874=m | ||
498 | CONFIG_NLS_ISO8859_8=m | ||
499 | CONFIG_NLS_CODEPAGE_1250=m | ||
500 | CONFIG_NLS_CODEPAGE_1251=m | ||
501 | CONFIG_NLS_ASCII=m | ||
502 | CONFIG_NLS_ISO8859_1=m | ||
503 | CONFIG_NLS_ISO8859_2=m | ||
504 | CONFIG_NLS_ISO8859_3=m | ||
505 | CONFIG_NLS_ISO8859_4=m | ||
506 | CONFIG_NLS_ISO8859_5=m | ||
507 | CONFIG_NLS_ISO8859_6=m | ||
508 | CONFIG_NLS_ISO8859_7=m | ||
509 | CONFIG_NLS_ISO8859_9=m | ||
510 | CONFIG_NLS_ISO8859_13=m | ||
511 | CONFIG_NLS_ISO8859_14=m | ||
512 | CONFIG_NLS_ISO8859_15=m | ||
513 | CONFIG_NLS_KOI8_R=m | ||
514 | CONFIG_NLS_KOI8_U=m | ||
515 | CONFIG_PRINTK_TIME=y | ||
516 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | ||
517 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
518 | CONFIG_UNUSED_SYMBOLS=y | ||
519 | CONFIG_DEBUG_KERNEL=y | ||
520 | CONFIG_DETECT_HUNG_TASK=y | ||
521 | CONFIG_SCHEDSTATS=y | ||
522 | CONFIG_TIMER_STATS=y | ||
523 | CONFIG_DEBUG_INFO=y | ||
524 | CONFIG_DEBUG_MEMORY_INIT=y | ||
525 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
526 | CONFIG_SCHED_TRACER=y | ||
527 | CONFIG_BLK_DEV_IO_TRACE=y | ||
528 | CONFIG_KGDB=y | ||
529 | CONFIG_SECURITY=y | ||
530 | CONFIG_SECURITY_NETWORK=y | ||
531 | CONFIG_LSM_MMAP_MIN_ADDR=0 | ||
532 | CONFIG_SECURITY_SELINUX=y | ||
533 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | ||
534 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | ||
535 | CONFIG_SECURITY_SELINUX_DISABLE=y | ||
536 | CONFIG_SECURITY_SMACK=y | ||
537 | CONFIG_SECURITY_TOMOYO=y | ||
538 | CONFIG_CRYPTO_NULL=m | ||
539 | CONFIG_CRYPTO_CRYPTD=m | ||
540 | CONFIG_CRYPTO_TEST=m | ||
541 | CONFIG_CRYPTO_CCM=m | ||
542 | CONFIG_CRYPTO_GCM=m | ||
543 | CONFIG_CRYPTO_CTS=m | ||
544 | CONFIG_CRYPTO_LRW=m | ||
545 | CONFIG_CRYPTO_PCBC=m | ||
546 | CONFIG_CRYPTO_XTS=m | ||
547 | CONFIG_CRYPTO_HMAC=y | ||
548 | CONFIG_CRYPTO_XCBC=m | ||
549 | CONFIG_CRYPTO_VMAC=m | ||
550 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
551 | CONFIG_CRYPTO_RMD128=m | ||
552 | CONFIG_CRYPTO_RMD160=m | ||
553 | CONFIG_CRYPTO_RMD256=m | ||
554 | CONFIG_CRYPTO_RMD320=m | ||
555 | CONFIG_CRYPTO_SHA256=m | ||
556 | CONFIG_CRYPTO_SHA512=m | ||
557 | CONFIG_CRYPTO_TGR192=m | ||
558 | CONFIG_CRYPTO_WP512=m | ||
559 | CONFIG_CRYPTO_ANUBIS=m | ||
560 | CONFIG_CRYPTO_BLOWFISH=m | ||
561 | CONFIG_CRYPTO_CAMELLIA=m | ||
562 | CONFIG_CRYPTO_CAST5=m | ||
563 | CONFIG_CRYPTO_CAST6=m | ||
564 | CONFIG_CRYPTO_FCRYPT=m | ||
565 | CONFIG_CRYPTO_KHAZAD=m | ||
566 | CONFIG_CRYPTO_SALSA20=m | ||
567 | CONFIG_CRYPTO_SEED=m | ||
568 | CONFIG_CRYPTO_SERPENT=m | ||
569 | CONFIG_CRYPTO_TEA=m | ||
570 | CONFIG_CRYPTO_TWOFISH=m | ||
571 | CONFIG_CRYPTO_ZLIB=m | ||
572 | CONFIG_CRYPTO_LZO=m | ||
573 | CONFIG_CRC_CCITT=m | ||
574 | CONFIG_CRC7=m | ||
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h index 650ac9ba734c..b4db69fbc40c 100644 --- a/arch/mips/include/asm/cache.h +++ b/arch/mips/include/asm/cache.h | |||
@@ -17,6 +17,6 @@ | |||
17 | #define SMP_CACHE_SHIFT L1_CACHE_SHIFT | 17 | #define SMP_CACHE_SHIFT L1_CACHE_SHIFT |
18 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | 18 | #define SMP_CACHE_BYTES L1_CACHE_BYTES |
19 | 19 | ||
20 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | 20 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
21 | 21 | ||
22 | #endif /* _ASM_CACHE_H */ | 22 | #endif /* _ASM_CACHE_H */ |
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h index fa4328f9124f..65f9bdd02f1f 100644 --- a/arch/mips/include/asm/cevt-r4k.h +++ b/arch/mips/include/asm/cevt-r4k.h | |||
@@ -14,6 +14,9 @@ | |||
14 | #ifndef __ASM_CEVT_R4K_H | 14 | #ifndef __ASM_CEVT_R4K_H |
15 | #define __ASM_CEVT_R4K_H | 15 | #define __ASM_CEVT_R4K_H |
16 | 16 | ||
17 | #include <linux/clockchips.h> | ||
18 | #include <asm/time.h> | ||
19 | |||
17 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 20 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
18 | 21 | ||
19 | void mips_event_handler(struct clock_event_device *dev); | 22 | void mips_event_handler(struct clock_event_device *dev); |
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 86877539c6e8..34c0d3cb116f 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #define PRID_COMP_TOSHIBA 0x070000 | 33 | #define PRID_COMP_TOSHIBA 0x070000 |
34 | #define PRID_COMP_LSI 0x080000 | 34 | #define PRID_COMP_LSI 0x080000 |
35 | #define PRID_COMP_LEXRA 0x0b0000 | 35 | #define PRID_COMP_LEXRA 0x0b0000 |
36 | #define PRID_COMP_NETLOGIC 0x0c0000 | ||
36 | #define PRID_COMP_CAVIUM 0x0d0000 | 37 | #define PRID_COMP_CAVIUM 0x0d0000 |
37 | #define PRID_COMP_INGENIC 0xd00000 | 38 | #define PRID_COMP_INGENIC 0xd00000 |
38 | 39 | ||
@@ -142,6 +143,31 @@ | |||
142 | #define PRID_IMP_JZRISC 0x0200 | 143 | #define PRID_IMP_JZRISC 0x0200 |
143 | 144 | ||
144 | /* | 145 | /* |
146 | * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC | ||
147 | */ | ||
148 | #define PRID_IMP_NETLOGIC_XLR732 0x0000 | ||
149 | #define PRID_IMP_NETLOGIC_XLR716 0x0200 | ||
150 | #define PRID_IMP_NETLOGIC_XLR532 0x0900 | ||
151 | #define PRID_IMP_NETLOGIC_XLR308 0x0600 | ||
152 | #define PRID_IMP_NETLOGIC_XLR532C 0x0800 | ||
153 | #define PRID_IMP_NETLOGIC_XLR516C 0x0a00 | ||
154 | #define PRID_IMP_NETLOGIC_XLR508C 0x0b00 | ||
155 | #define PRID_IMP_NETLOGIC_XLR308C 0x0f00 | ||
156 | #define PRID_IMP_NETLOGIC_XLS608 0x8000 | ||
157 | #define PRID_IMP_NETLOGIC_XLS408 0x8800 | ||
158 | #define PRID_IMP_NETLOGIC_XLS404 0x8c00 | ||
159 | #define PRID_IMP_NETLOGIC_XLS208 0x8e00 | ||
160 | #define PRID_IMP_NETLOGIC_XLS204 0x8f00 | ||
161 | #define PRID_IMP_NETLOGIC_XLS108 0xce00 | ||
162 | #define PRID_IMP_NETLOGIC_XLS104 0xcf00 | ||
163 | #define PRID_IMP_NETLOGIC_XLS616B 0x4000 | ||
164 | #define PRID_IMP_NETLOGIC_XLS608B 0x4a00 | ||
165 | #define PRID_IMP_NETLOGIC_XLS416B 0x4400 | ||
166 | #define PRID_IMP_NETLOGIC_XLS412B 0x4c00 | ||
167 | #define PRID_IMP_NETLOGIC_XLS408B 0x4e00 | ||
168 | #define PRID_IMP_NETLOGIC_XLS404B 0x4f00 | ||
169 | |||
170 | /* | ||
145 | * Definitions for 7:0 on legacy processors | 171 | * Definitions for 7:0 on legacy processors |
146 | */ | 172 | */ |
147 | 173 | ||
@@ -234,6 +260,7 @@ enum cpu_type_enum { | |||
234 | */ | 260 | */ |
235 | CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, | 261 | CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, |
236 | CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, | 262 | CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, |
263 | CPU_XLR, | ||
237 | 264 | ||
238 | CPU_LAST | 265 | CPU_LAST |
239 | }; | 266 | }; |
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 655f849bd08d..7aa37ddfca4b 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h | |||
@@ -5,7 +5,9 @@ | |||
5 | #include <asm/cache.h> | 5 | #include <asm/cache.h> |
6 | #include <asm-generic/dma-coherent.h> | 6 | #include <asm-generic/dma-coherent.h> |
7 | 7 | ||
8 | #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ | ||
8 | #include <dma-coherence.h> | 9 | #include <dma-coherence.h> |
10 | #endif | ||
9 | 11 | ||
10 | extern struct dma_map_ops *mips_dma_map_ops; | 12 | extern struct dma_map_ops *mips_dma_map_ops; |
11 | 13 | ||
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index f5e856015329..c565b7c3f0b5 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h | |||
@@ -70,6 +70,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
70 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | 70 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
71 | unsigned long addr, pte_t *ptep) | 71 | unsigned long addr, pte_t *ptep) |
72 | { | 72 | { |
73 | flush_tlb_mm(vma->vm_mm); | ||
73 | } | 74 | } |
74 | 75 | ||
75 | static inline int huge_pte_none(pte_t pte) | 76 | static inline int huge_pte_none(pte_t pte) |
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h index 48bb82372994..9ad011366f73 100644 --- a/arch/mips/include/asm/i8253.h +++ b/arch/mips/include/asm/i8253.h | |||
@@ -12,8 +12,13 @@ | |||
12 | #define PIT_CH0 0x40 | 12 | #define PIT_CH0 0x40 |
13 | #define PIT_CH2 0x42 | 13 | #define PIT_CH2 0x42 |
14 | 14 | ||
15 | #define PIT_LATCH LATCH | ||
16 | |||
15 | extern raw_spinlock_t i8253_lock; | 17 | extern raw_spinlock_t i8253_lock; |
16 | 18 | ||
17 | extern void setup_pit_timer(void); | 19 | extern void setup_pit_timer(void); |
18 | 20 | ||
21 | #define inb_pit inb_p | ||
22 | #define outb_pit outb_p | ||
23 | |||
19 | #endif /* __ASM_I8253_H */ | 24 | #endif /* __ASM_I8253_H */ |
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h index a6976619160a..f260ebed713b 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000.h +++ b/arch/mips/include/asm/mach-au1x00/au1000.h | |||
@@ -161,6 +161,45 @@ static inline int alchemy_get_cputype(void) | |||
161 | return ALCHEMY_CPU_UNKNOWN; | 161 | return ALCHEMY_CPU_UNKNOWN; |
162 | } | 162 | } |
163 | 163 | ||
164 | /* return number of uarts on a given cputype */ | ||
165 | static inline int alchemy_get_uarts(int type) | ||
166 | { | ||
167 | switch (type) { | ||
168 | case ALCHEMY_CPU_AU1000: | ||
169 | return 4; | ||
170 | case ALCHEMY_CPU_AU1500: | ||
171 | case ALCHEMY_CPU_AU1200: | ||
172 | return 2; | ||
173 | case ALCHEMY_CPU_AU1100: | ||
174 | case ALCHEMY_CPU_AU1550: | ||
175 | return 3; | ||
176 | } | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /* enable an UART block if it isn't already */ | ||
181 | static inline void alchemy_uart_enable(u32 uart_phys) | ||
182 | { | ||
183 | void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); | ||
184 | |||
185 | /* reset, enable clock, deassert reset */ | ||
186 | if ((__raw_readl(addr + 0x100) & 3) != 3) { | ||
187 | __raw_writel(0, addr + 0x100); | ||
188 | wmb(); | ||
189 | __raw_writel(1, addr + 0x100); | ||
190 | wmb(); | ||
191 | } | ||
192 | __raw_writel(3, addr + 0x100); | ||
193 | wmb(); | ||
194 | } | ||
195 | |||
196 | static inline void alchemy_uart_disable(u32 uart_phys) | ||
197 | { | ||
198 | void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); | ||
199 | __raw_writel(0, addr + 0x100); /* UART_MOD_CNTRL */ | ||
200 | wmb(); | ||
201 | } | ||
202 | |||
164 | static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) | 203 | static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) |
165 | { | 204 | { |
166 | void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys); | 205 | void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys); |
@@ -180,6 +219,20 @@ static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) | |||
180 | wmb(); | 219 | wmb(); |
181 | } | 220 | } |
182 | 221 | ||
222 | /* return number of ethernet MACs on a given cputype */ | ||
223 | static inline int alchemy_get_macs(int type) | ||
224 | { | ||
225 | switch (type) { | ||
226 | case ALCHEMY_CPU_AU1000: | ||
227 | case ALCHEMY_CPU_AU1500: | ||
228 | case ALCHEMY_CPU_AU1550: | ||
229 | return 2; | ||
230 | case ALCHEMY_CPU_AU1100: | ||
231 | return 1; | ||
232 | } | ||
233 | return 0; | ||
234 | } | ||
235 | |||
183 | /* arch/mips/au1000/common/clocks.c */ | 236 | /* arch/mips/au1000/common/clocks.c */ |
184 | extern void set_au1x00_speed(unsigned int new_freq); | 237 | extern void set_au1x00_speed(unsigned int new_freq); |
185 | extern unsigned int get_au1x00_speed(void); | 238 | extern unsigned int get_au1x00_speed(void); |
@@ -630,38 +683,42 @@ enum soc_au1200_ints { | |||
630 | 683 | ||
631 | /* | 684 | /* |
632 | * Physical base addresses for integrated peripherals | 685 | * Physical base addresses for integrated peripherals |
686 | * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 | ||
633 | */ | 687 | */ |
634 | 688 | ||
689 | #define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ | ||
690 | #define AU1000_USBD_PHYS_ADDR 0x10200000 /* 0123 */ | ||
691 | #define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ | ||
692 | #define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ | ||
693 | #define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ | ||
694 | #define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ | ||
695 | #define AU1100_SD0_PHYS_ADDR 0x10600000 /* 24 */ | ||
696 | #define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ | ||
697 | #define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ | ||
698 | #define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ | ||
699 | #define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ | ||
700 | #define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ | ||
701 | #define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ | ||
702 | #define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ | ||
703 | #define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ | ||
704 | #define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ | ||
705 | #define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ | ||
706 | #define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ | ||
707 | #define AU1000_SYS_PHYS_ADDR 0x11900000 /* 01234 */ | ||
708 | #define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ | ||
709 | #define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 34 */ | ||
710 | #define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 34 */ | ||
711 | #define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ | ||
712 | #define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ | ||
713 | |||
714 | |||
635 | #ifdef CONFIG_SOC_AU1000 | 715 | #ifdef CONFIG_SOC_AU1000 |
636 | #define MEM_PHYS_ADDR 0x14000000 | 716 | #define MEM_PHYS_ADDR 0x14000000 |
637 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 717 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
638 | #define DMA0_PHYS_ADDR 0x14002000 | ||
639 | #define DMA1_PHYS_ADDR 0x14002100 | ||
640 | #define DMA2_PHYS_ADDR 0x14002200 | ||
641 | #define DMA3_PHYS_ADDR 0x14002300 | ||
642 | #define DMA4_PHYS_ADDR 0x14002400 | ||
643 | #define DMA5_PHYS_ADDR 0x14002500 | ||
644 | #define DMA6_PHYS_ADDR 0x14002600 | ||
645 | #define DMA7_PHYS_ADDR 0x14002700 | ||
646 | #define IC0_PHYS_ADDR 0x10400000 | ||
647 | #define IC1_PHYS_ADDR 0x11800000 | ||
648 | #define AC97_PHYS_ADDR 0x10000000 | ||
649 | #define USBH_PHYS_ADDR 0x10100000 | 718 | #define USBH_PHYS_ADDR 0x10100000 |
650 | #define USBD_PHYS_ADDR 0x10200000 | ||
651 | #define IRDA_PHYS_ADDR 0x10300000 | 719 | #define IRDA_PHYS_ADDR 0x10300000 |
652 | #define MAC0_PHYS_ADDR 0x10500000 | ||
653 | #define MAC1_PHYS_ADDR 0x10510000 | ||
654 | #define MACEN_PHYS_ADDR 0x10520000 | ||
655 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
656 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
657 | #define I2S_PHYS_ADDR 0x11000000 | ||
658 | #define UART0_PHYS_ADDR 0x11100000 | ||
659 | #define UART1_PHYS_ADDR 0x11200000 | ||
660 | #define UART2_PHYS_ADDR 0x11300000 | ||
661 | #define UART3_PHYS_ADDR 0x11400000 | ||
662 | #define SSI0_PHYS_ADDR 0x11600000 | 720 | #define SSI0_PHYS_ADDR 0x11600000 |
663 | #define SSI1_PHYS_ADDR 0x11680000 | 721 | #define SSI1_PHYS_ADDR 0x11680000 |
664 | #define SYS_PHYS_ADDR 0x11900000 | ||
665 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL | 722 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL |
666 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL | 723 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL |
667 | #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL | 724 | #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL |
@@ -672,30 +729,8 @@ enum soc_au1200_ints { | |||
672 | #ifdef CONFIG_SOC_AU1500 | 729 | #ifdef CONFIG_SOC_AU1500 |
673 | #define MEM_PHYS_ADDR 0x14000000 | 730 | #define MEM_PHYS_ADDR 0x14000000 |
674 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 731 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
675 | #define DMA0_PHYS_ADDR 0x14002000 | ||
676 | #define DMA1_PHYS_ADDR 0x14002100 | ||
677 | #define DMA2_PHYS_ADDR 0x14002200 | ||
678 | #define DMA3_PHYS_ADDR 0x14002300 | ||
679 | #define DMA4_PHYS_ADDR 0x14002400 | ||
680 | #define DMA5_PHYS_ADDR 0x14002500 | ||
681 | #define DMA6_PHYS_ADDR 0x14002600 | ||
682 | #define DMA7_PHYS_ADDR 0x14002700 | ||
683 | #define IC0_PHYS_ADDR 0x10400000 | ||
684 | #define IC1_PHYS_ADDR 0x11800000 | ||
685 | #define AC97_PHYS_ADDR 0x10000000 | ||
686 | #define USBH_PHYS_ADDR 0x10100000 | 732 | #define USBH_PHYS_ADDR 0x10100000 |
687 | #define USBD_PHYS_ADDR 0x10200000 | ||
688 | #define PCI_PHYS_ADDR 0x14005000 | 733 | #define PCI_PHYS_ADDR 0x14005000 |
689 | #define MAC0_PHYS_ADDR 0x11500000 | ||
690 | #define MAC1_PHYS_ADDR 0x11510000 | ||
691 | #define MACEN_PHYS_ADDR 0x11520000 | ||
692 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
693 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
694 | #define I2S_PHYS_ADDR 0x11000000 | ||
695 | #define UART0_PHYS_ADDR 0x11100000 | ||
696 | #define UART3_PHYS_ADDR 0x11400000 | ||
697 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
698 | #define SYS_PHYS_ADDR 0x11900000 | ||
699 | #define PCI_MEM_PHYS_ADDR 0x400000000ULL | 734 | #define PCI_MEM_PHYS_ADDR 0x400000000ULL |
700 | #define PCI_IO_PHYS_ADDR 0x500000000ULL | 735 | #define PCI_IO_PHYS_ADDR 0x500000000ULL |
701 | #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL | 736 | #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL |
@@ -710,34 +745,10 @@ enum soc_au1200_ints { | |||
710 | #ifdef CONFIG_SOC_AU1100 | 745 | #ifdef CONFIG_SOC_AU1100 |
711 | #define MEM_PHYS_ADDR 0x14000000 | 746 | #define MEM_PHYS_ADDR 0x14000000 |
712 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 747 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
713 | #define DMA0_PHYS_ADDR 0x14002000 | ||
714 | #define DMA1_PHYS_ADDR 0x14002100 | ||
715 | #define DMA2_PHYS_ADDR 0x14002200 | ||
716 | #define DMA3_PHYS_ADDR 0x14002300 | ||
717 | #define DMA4_PHYS_ADDR 0x14002400 | ||
718 | #define DMA5_PHYS_ADDR 0x14002500 | ||
719 | #define DMA6_PHYS_ADDR 0x14002600 | ||
720 | #define DMA7_PHYS_ADDR 0x14002700 | ||
721 | #define IC0_PHYS_ADDR 0x10400000 | ||
722 | #define SD0_PHYS_ADDR 0x10600000 | ||
723 | #define SD1_PHYS_ADDR 0x10680000 | ||
724 | #define IC1_PHYS_ADDR 0x11800000 | ||
725 | #define AC97_PHYS_ADDR 0x10000000 | ||
726 | #define USBH_PHYS_ADDR 0x10100000 | 748 | #define USBH_PHYS_ADDR 0x10100000 |
727 | #define USBD_PHYS_ADDR 0x10200000 | ||
728 | #define IRDA_PHYS_ADDR 0x10300000 | 749 | #define IRDA_PHYS_ADDR 0x10300000 |
729 | #define MAC0_PHYS_ADDR 0x10500000 | ||
730 | #define MACEN_PHYS_ADDR 0x10520000 | ||
731 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
732 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
733 | #define I2S_PHYS_ADDR 0x11000000 | ||
734 | #define UART0_PHYS_ADDR 0x11100000 | ||
735 | #define UART1_PHYS_ADDR 0x11200000 | ||
736 | #define UART3_PHYS_ADDR 0x11400000 | ||
737 | #define SSI0_PHYS_ADDR 0x11600000 | 750 | #define SSI0_PHYS_ADDR 0x11600000 |
738 | #define SSI1_PHYS_ADDR 0x11680000 | 751 | #define SSI1_PHYS_ADDR 0x11680000 |
739 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
740 | #define SYS_PHYS_ADDR 0x11900000 | ||
741 | #define LCD_PHYS_ADDR 0x15000000 | 752 | #define LCD_PHYS_ADDR 0x15000000 |
742 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL | 753 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL |
743 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL | 754 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL |
@@ -749,22 +760,8 @@ enum soc_au1200_ints { | |||
749 | #ifdef CONFIG_SOC_AU1550 | 760 | #ifdef CONFIG_SOC_AU1550 |
750 | #define MEM_PHYS_ADDR 0x14000000 | 761 | #define MEM_PHYS_ADDR 0x14000000 |
751 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 762 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
752 | #define IC0_PHYS_ADDR 0x10400000 | ||
753 | #define IC1_PHYS_ADDR 0x11800000 | ||
754 | #define USBH_PHYS_ADDR 0x14020000 | 763 | #define USBH_PHYS_ADDR 0x14020000 |
755 | #define USBD_PHYS_ADDR 0x10200000 | ||
756 | #define PCI_PHYS_ADDR 0x14005000 | 764 | #define PCI_PHYS_ADDR 0x14005000 |
757 | #define MAC0_PHYS_ADDR 0x10500000 | ||
758 | #define MAC1_PHYS_ADDR 0x10510000 | ||
759 | #define MACEN_PHYS_ADDR 0x10520000 | ||
760 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
761 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
762 | #define UART0_PHYS_ADDR 0x11100000 | ||
763 | #define UART1_PHYS_ADDR 0x11200000 | ||
764 | #define UART3_PHYS_ADDR 0x11400000 | ||
765 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
766 | #define SYS_PHYS_ADDR 0x11900000 | ||
767 | #define DDMA_PHYS_ADDR 0x14002000 | ||
768 | #define PE_PHYS_ADDR 0x14008000 | 765 | #define PE_PHYS_ADDR 0x14008000 |
769 | #define PSC0_PHYS_ADDR 0x11A00000 | 766 | #define PSC0_PHYS_ADDR 0x11A00000 |
770 | #define PSC1_PHYS_ADDR 0x11B00000 | 767 | #define PSC1_PHYS_ADDR 0x11B00000 |
@@ -786,19 +783,10 @@ enum soc_au1200_ints { | |||
786 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 783 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
787 | #define AES_PHYS_ADDR 0x10300000 | 784 | #define AES_PHYS_ADDR 0x10300000 |
788 | #define CIM_PHYS_ADDR 0x14004000 | 785 | #define CIM_PHYS_ADDR 0x14004000 |
789 | #define IC0_PHYS_ADDR 0x10400000 | ||
790 | #define IC1_PHYS_ADDR 0x11800000 | ||
791 | #define USBM_PHYS_ADDR 0x14020000 | 786 | #define USBM_PHYS_ADDR 0x14020000 |
792 | #define USBH_PHYS_ADDR 0x14020100 | 787 | #define USBH_PHYS_ADDR 0x14020100 |
793 | #define UART0_PHYS_ADDR 0x11100000 | ||
794 | #define UART1_PHYS_ADDR 0x11200000 | ||
795 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
796 | #define SYS_PHYS_ADDR 0x11900000 | ||
797 | #define DDMA_PHYS_ADDR 0x14002000 | ||
798 | #define PSC0_PHYS_ADDR 0x11A00000 | 788 | #define PSC0_PHYS_ADDR 0x11A00000 |
799 | #define PSC1_PHYS_ADDR 0x11B00000 | 789 | #define PSC1_PHYS_ADDR 0x11B00000 |
800 | #define SD0_PHYS_ADDR 0x10600000 | ||
801 | #define SD1_PHYS_ADDR 0x10680000 | ||
802 | #define LCD_PHYS_ADDR 0x15000000 | 790 | #define LCD_PHYS_ADDR 0x15000000 |
803 | #define SWCNT_PHYS_ADDR 0x1110010C | 791 | #define SWCNT_PHYS_ADDR 0x1110010C |
804 | #define MAEFE_PHYS_ADDR 0x14012000 | 792 | #define MAEFE_PHYS_ADDR 0x14012000 |
@@ -835,183 +823,43 @@ enum soc_au1200_ints { | |||
835 | #endif | 823 | #endif |
836 | 824 | ||
837 | 825 | ||
838 | /* Interrupt Controller register offsets */ | ||
839 | #define IC_CFG0RD 0x40 | ||
840 | #define IC_CFG0SET 0x40 | ||
841 | #define IC_CFG0CLR 0x44 | ||
842 | #define IC_CFG1RD 0x48 | ||
843 | #define IC_CFG1SET 0x48 | ||
844 | #define IC_CFG1CLR 0x4C | ||
845 | #define IC_CFG2RD 0x50 | ||
846 | #define IC_CFG2SET 0x50 | ||
847 | #define IC_CFG2CLR 0x54 | ||
848 | #define IC_REQ0INT 0x54 | ||
849 | #define IC_SRCRD 0x58 | ||
850 | #define IC_SRCSET 0x58 | ||
851 | #define IC_SRCCLR 0x5C | ||
852 | #define IC_REQ1INT 0x5C | ||
853 | #define IC_ASSIGNRD 0x60 | ||
854 | #define IC_ASSIGNSET 0x60 | ||
855 | #define IC_ASSIGNCLR 0x64 | ||
856 | #define IC_WAKERD 0x68 | ||
857 | #define IC_WAKESET 0x68 | ||
858 | #define IC_WAKECLR 0x6C | ||
859 | #define IC_MASKRD 0x70 | ||
860 | #define IC_MASKSET 0x70 | ||
861 | #define IC_MASKCLR 0x74 | ||
862 | #define IC_RISINGRD 0x78 | ||
863 | #define IC_RISINGCLR 0x78 | ||
864 | #define IC_FALLINGRD 0x7C | ||
865 | #define IC_FALLINGCLR 0x7C | ||
866 | #define IC_TESTBIT 0x80 | ||
867 | |||
868 | |||
869 | /* Interrupt Controller 0 */ | ||
870 | #define IC0_CFG0RD 0xB0400040 | ||
871 | #define IC0_CFG0SET 0xB0400040 | ||
872 | #define IC0_CFG0CLR 0xB0400044 | ||
873 | |||
874 | #define IC0_CFG1RD 0xB0400048 | ||
875 | #define IC0_CFG1SET 0xB0400048 | ||
876 | #define IC0_CFG1CLR 0xB040004C | ||
877 | |||
878 | #define IC0_CFG2RD 0xB0400050 | ||
879 | #define IC0_CFG2SET 0xB0400050 | ||
880 | #define IC0_CFG2CLR 0xB0400054 | ||
881 | |||
882 | #define IC0_REQ0INT 0xB0400054 | ||
883 | #define IC0_SRCRD 0xB0400058 | ||
884 | #define IC0_SRCSET 0xB0400058 | ||
885 | #define IC0_SRCCLR 0xB040005C | ||
886 | #define IC0_REQ1INT 0xB040005C | ||
887 | |||
888 | #define IC0_ASSIGNRD 0xB0400060 | ||
889 | #define IC0_ASSIGNSET 0xB0400060 | ||
890 | #define IC0_ASSIGNCLR 0xB0400064 | ||
891 | |||
892 | #define IC0_WAKERD 0xB0400068 | ||
893 | #define IC0_WAKESET 0xB0400068 | ||
894 | #define IC0_WAKECLR 0xB040006C | ||
895 | |||
896 | #define IC0_MASKRD 0xB0400070 | ||
897 | #define IC0_MASKSET 0xB0400070 | ||
898 | #define IC0_MASKCLR 0xB0400074 | ||
899 | |||
900 | #define IC0_RISINGRD 0xB0400078 | ||
901 | #define IC0_RISINGCLR 0xB0400078 | ||
902 | #define IC0_FALLINGRD 0xB040007C | ||
903 | #define IC0_FALLINGCLR 0xB040007C | ||
904 | |||
905 | #define IC0_TESTBIT 0xB0400080 | ||
906 | |||
907 | /* Interrupt Controller 1 */ | ||
908 | #define IC1_CFG0RD 0xB1800040 | ||
909 | #define IC1_CFG0SET 0xB1800040 | ||
910 | #define IC1_CFG0CLR 0xB1800044 | ||
911 | |||
912 | #define IC1_CFG1RD 0xB1800048 | ||
913 | #define IC1_CFG1SET 0xB1800048 | ||
914 | #define IC1_CFG1CLR 0xB180004C | ||
915 | |||
916 | #define IC1_CFG2RD 0xB1800050 | ||
917 | #define IC1_CFG2SET 0xB1800050 | ||
918 | #define IC1_CFG2CLR 0xB1800054 | ||
919 | |||
920 | #define IC1_REQ0INT 0xB1800054 | ||
921 | #define IC1_SRCRD 0xB1800058 | ||
922 | #define IC1_SRCSET 0xB1800058 | ||
923 | #define IC1_SRCCLR 0xB180005C | ||
924 | #define IC1_REQ1INT 0xB180005C | ||
925 | |||
926 | #define IC1_ASSIGNRD 0xB1800060 | ||
927 | #define IC1_ASSIGNSET 0xB1800060 | ||
928 | #define IC1_ASSIGNCLR 0xB1800064 | ||
929 | |||
930 | #define IC1_WAKERD 0xB1800068 | ||
931 | #define IC1_WAKESET 0xB1800068 | ||
932 | #define IC1_WAKECLR 0xB180006C | ||
933 | |||
934 | #define IC1_MASKRD 0xB1800070 | ||
935 | #define IC1_MASKSET 0xB1800070 | ||
936 | #define IC1_MASKCLR 0xB1800074 | ||
937 | |||
938 | #define IC1_RISINGRD 0xB1800078 | ||
939 | #define IC1_RISINGCLR 0xB1800078 | ||
940 | #define IC1_FALLINGRD 0xB180007C | ||
941 | #define IC1_FALLINGCLR 0xB180007C | ||
942 | |||
943 | #define IC1_TESTBIT 0xB1800080 | ||
944 | 826 | ||
945 | 827 | ||
946 | /* Au1000 */ | 828 | /* Au1000 */ |
947 | #ifdef CONFIG_SOC_AU1000 | 829 | #ifdef CONFIG_SOC_AU1000 |
948 | 830 | ||
949 | #define UART0_ADDR 0xB1100000 | ||
950 | #define UART3_ADDR 0xB1400000 | ||
951 | |||
952 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ | 831 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ |
953 | #define USB_HOST_CONFIG 0xB017FFFC | 832 | #define USB_HOST_CONFIG 0xB017FFFC |
954 | #define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT | 833 | #define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT |
955 | |||
956 | #define AU1000_ETH0_BASE 0xB0500000 | ||
957 | #define AU1000_ETH1_BASE 0xB0510000 | ||
958 | #define AU1000_MAC0_ENABLE 0xB0520000 | ||
959 | #define AU1000_MAC1_ENABLE 0xB0520004 | ||
960 | #define NUM_ETH_INTERFACES 2 | ||
961 | #endif /* CONFIG_SOC_AU1000 */ | 834 | #endif /* CONFIG_SOC_AU1000 */ |
962 | 835 | ||
963 | /* Au1500 */ | 836 | /* Au1500 */ |
964 | #ifdef CONFIG_SOC_AU1500 | 837 | #ifdef CONFIG_SOC_AU1500 |
965 | 838 | ||
966 | #define UART0_ADDR 0xB1100000 | ||
967 | #define UART3_ADDR 0xB1400000 | ||
968 | |||
969 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ | 839 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ |
970 | #define USB_HOST_CONFIG 0xB017fffc | 840 | #define USB_HOST_CONFIG 0xB017fffc |
971 | #define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT | 841 | #define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT |
972 | |||
973 | #define AU1500_ETH0_BASE 0xB1500000 | ||
974 | #define AU1500_ETH1_BASE 0xB1510000 | ||
975 | #define AU1500_MAC0_ENABLE 0xB1520000 | ||
976 | #define AU1500_MAC1_ENABLE 0xB1520004 | ||
977 | #define NUM_ETH_INTERFACES 2 | ||
978 | #endif /* CONFIG_SOC_AU1500 */ | 842 | #endif /* CONFIG_SOC_AU1500 */ |
979 | 843 | ||
980 | /* Au1100 */ | 844 | /* Au1100 */ |
981 | #ifdef CONFIG_SOC_AU1100 | 845 | #ifdef CONFIG_SOC_AU1100 |
982 | 846 | ||
983 | #define UART0_ADDR 0xB1100000 | ||
984 | #define UART3_ADDR 0xB1400000 | ||
985 | |||
986 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ | 847 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ |
987 | #define USB_HOST_CONFIG 0xB017FFFC | 848 | #define USB_HOST_CONFIG 0xB017FFFC |
988 | #define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT | 849 | #define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT |
989 | |||
990 | #define AU1100_ETH0_BASE 0xB0500000 | ||
991 | #define AU1100_MAC0_ENABLE 0xB0520000 | ||
992 | #define NUM_ETH_INTERFACES 1 | ||
993 | #endif /* CONFIG_SOC_AU1100 */ | 850 | #endif /* CONFIG_SOC_AU1100 */ |
994 | 851 | ||
995 | #ifdef CONFIG_SOC_AU1550 | 852 | #ifdef CONFIG_SOC_AU1550 |
996 | #define UART0_ADDR 0xB1100000 | ||
997 | 853 | ||
998 | #define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ | 854 | #define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ |
999 | #define USB_OHCI_LEN 0x00060000 | 855 | #define USB_OHCI_LEN 0x00060000 |
1000 | #define USB_HOST_CONFIG 0xB4027ffc | 856 | #define USB_HOST_CONFIG 0xB4027ffc |
1001 | #define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT | 857 | #define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT |
1002 | |||
1003 | #define AU1550_ETH0_BASE 0xB0500000 | ||
1004 | #define AU1550_ETH1_BASE 0xB0510000 | ||
1005 | #define AU1550_MAC0_ENABLE 0xB0520000 | ||
1006 | #define AU1550_MAC1_ENABLE 0xB0520004 | ||
1007 | #define NUM_ETH_INTERFACES 2 | ||
1008 | #endif /* CONFIG_SOC_AU1550 */ | 858 | #endif /* CONFIG_SOC_AU1550 */ |
1009 | 859 | ||
1010 | 860 | ||
1011 | #ifdef CONFIG_SOC_AU1200 | 861 | #ifdef CONFIG_SOC_AU1200 |
1012 | 862 | ||
1013 | #define UART0_ADDR 0xB1100000 | ||
1014 | |||
1015 | #define USB_UOC_BASE 0x14020020 | 863 | #define USB_UOC_BASE 0x14020020 |
1016 | #define USB_UOC_LEN 0x20 | 864 | #define USB_UOC_LEN 0x20 |
1017 | #define USB_OHCI_BASE 0x14020100 | 865 | #define USB_OHCI_BASE 0x14020100 |
@@ -1504,22 +1352,6 @@ enum soc_au1200_ints { | |||
1504 | #define SYS_PINFUNC_S1B (1 << 2) | 1352 | #define SYS_PINFUNC_S1B (1 << 2) |
1505 | #endif | 1353 | #endif |
1506 | 1354 | ||
1507 | #define SYS_TRIOUTRD 0xB1900100 | ||
1508 | #define SYS_TRIOUTCLR 0xB1900100 | ||
1509 | #define SYS_OUTPUTRD 0xB1900108 | ||
1510 | #define SYS_OUTPUTSET 0xB1900108 | ||
1511 | #define SYS_OUTPUTCLR 0xB190010C | ||
1512 | #define SYS_PINSTATERD 0xB1900110 | ||
1513 | #define SYS_PININPUTEN 0xB1900110 | ||
1514 | |||
1515 | /* GPIO2, Au1500, Au1550 only */ | ||
1516 | #define GPIO2_BASE 0xB1700000 | ||
1517 | #define GPIO2_DIR (GPIO2_BASE + 0) | ||
1518 | #define GPIO2_OUTPUT (GPIO2_BASE + 8) | ||
1519 | #define GPIO2_PINSTATE (GPIO2_BASE + 0xC) | ||
1520 | #define GPIO2_INTENABLE (GPIO2_BASE + 0x10) | ||
1521 | #define GPIO2_ENABLE (GPIO2_BASE + 0x14) | ||
1522 | |||
1523 | /* Power Management */ | 1355 | /* Power Management */ |
1524 | #define SYS_SCRATCH0 0xB1900018 | 1356 | #define SYS_SCRATCH0 0xB1900018 |
1525 | #define SYS_SCRATCH1 0xB190001C | 1357 | #define SYS_SCRATCH1 0xB190001C |
@@ -1635,12 +1467,6 @@ enum soc_au1200_ints { | |||
1635 | # define AC97C_RS (1 << 1) | 1467 | # define AC97C_RS (1 << 1) |
1636 | # define AC97C_CE (1 << 0) | 1468 | # define AC97C_CE (1 << 0) |
1637 | 1469 | ||
1638 | /* Secure Digital (SD) Controller */ | ||
1639 | #define SD0_XMIT_FIFO 0xB0600000 | ||
1640 | #define SD0_RECV_FIFO 0xB0600004 | ||
1641 | #define SD1_XMIT_FIFO 0xB0680000 | ||
1642 | #define SD1_RECV_FIFO 0xB0680004 | ||
1643 | |||
1644 | #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) | 1470 | #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) |
1645 | /* Au1500 PCI Controller */ | 1471 | /* Au1500 PCI Controller */ |
1646 | #define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ | 1472 | #define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ |
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h index c333b4e1cd44..59f5b55b2200 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h +++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h | |||
@@ -37,10 +37,6 @@ | |||
37 | 37 | ||
38 | #define NUM_AU1000_DMA_CHANNELS 8 | 38 | #define NUM_AU1000_DMA_CHANNELS 8 |
39 | 39 | ||
40 | /* DMA Channel Base Addresses */ | ||
41 | #define DMA_CHANNEL_BASE 0xB4002000 | ||
42 | #define DMA_CHANNEL_LEN 0x00000100 | ||
43 | |||
44 | /* DMA Channel Register Offsets */ | 40 | /* DMA Channel Register Offsets */ |
45 | #define DMA_MODE_SET 0x00000000 | 41 | #define DMA_MODE_SET 0x00000000 |
46 | #define DMA_MODE_READ DMA_MODE_SET | 42 | #define DMA_MODE_READ DMA_MODE_SET |
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h index c8a553a36ba4..2fdacfe85e23 100644 --- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h +++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h | |||
@@ -37,14 +37,6 @@ | |||
37 | 37 | ||
38 | #ifndef _LANGUAGE_ASSEMBLY | 38 | #ifndef _LANGUAGE_ASSEMBLY |
39 | 39 | ||
40 | /* | ||
41 | * The DMA base addresses. | ||
42 | * The channels are every 256 bytes (0x0100) from the channel 0 base. | ||
43 | * Interrupt status/enable is bits 15:0 for channels 15 to zero. | ||
44 | */ | ||
45 | #define DDMA_GLOBAL_BASE 0xb4003000 | ||
46 | #define DDMA_CHANNEL_BASE 0xb4002000 | ||
47 | |||
48 | typedef volatile struct dbdma_global { | 40 | typedef volatile struct dbdma_global { |
49 | u32 ddma_config; | 41 | u32 ddma_config; |
50 | u32 ddma_intstat; | 42 | u32 ddma_intstat; |
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h index 62d2f136d941..1f41a522906d 100644 --- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h +++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h | |||
@@ -24,6 +24,23 @@ | |||
24 | 24 | ||
25 | #define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) | 25 | #define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) |
26 | 26 | ||
27 | /* GPIO1 registers within SYS_ area */ | ||
28 | #define SYS_TRIOUTRD 0x100 | ||
29 | #define SYS_TRIOUTCLR 0x100 | ||
30 | #define SYS_OUTPUTRD 0x108 | ||
31 | #define SYS_OUTPUTSET 0x108 | ||
32 | #define SYS_OUTPUTCLR 0x10C | ||
33 | #define SYS_PINSTATERD 0x110 | ||
34 | #define SYS_PININPUTEN 0x110 | ||
35 | |||
36 | /* register offsets within GPIO2 block */ | ||
37 | #define GPIO2_DIR 0x00 | ||
38 | #define GPIO2_OUTPUT 0x08 | ||
39 | #define GPIO2_PINSTATE 0x0C | ||
40 | #define GPIO2_INTENABLE 0x10 | ||
41 | #define GPIO2_ENABLE 0x14 | ||
42 | |||
43 | struct gpio; | ||
27 | 44 | ||
28 | static inline int au1000_gpio1_to_irq(int gpio) | 45 | static inline int au1000_gpio1_to_irq(int gpio) |
29 | { | 46 | { |
@@ -200,23 +217,26 @@ static inline int au1200_irq_to_gpio(int irq) | |||
200 | */ | 217 | */ |
201 | static inline void alchemy_gpio1_set_value(int gpio, int v) | 218 | static inline void alchemy_gpio1_set_value(int gpio, int v) |
202 | { | 219 | { |
220 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); | ||
203 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); | 221 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); |
204 | unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; | 222 | unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; |
205 | au_writel(mask, r); | 223 | __raw_writel(mask, base + r); |
206 | au_sync(); | 224 | wmb(); |
207 | } | 225 | } |
208 | 226 | ||
209 | static inline int alchemy_gpio1_get_value(int gpio) | 227 | static inline int alchemy_gpio1_get_value(int gpio) |
210 | { | 228 | { |
229 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); | ||
211 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); | 230 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); |
212 | return au_readl(SYS_PINSTATERD) & mask; | 231 | return __raw_readl(base + SYS_PINSTATERD) & mask; |
213 | } | 232 | } |
214 | 233 | ||
215 | static inline int alchemy_gpio1_direction_input(int gpio) | 234 | static inline int alchemy_gpio1_direction_input(int gpio) |
216 | { | 235 | { |
236 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); | ||
217 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); | 237 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); |
218 | au_writel(mask, SYS_TRIOUTCLR); | 238 | __raw_writel(mask, base + SYS_TRIOUTCLR); |
219 | au_sync(); | 239 | wmb(); |
220 | return 0; | 240 | return 0; |
221 | } | 241 | } |
222 | 242 | ||
@@ -257,27 +277,31 @@ static inline int alchemy_gpio1_to_irq(int gpio) | |||
257 | */ | 277 | */ |
258 | static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) | 278 | static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) |
259 | { | 279 | { |
280 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); | ||
260 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); | 281 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); |
261 | unsigned long d = au_readl(GPIO2_DIR); | 282 | unsigned long d = __raw_readl(base + GPIO2_DIR); |
283 | |||
262 | if (to_out) | 284 | if (to_out) |
263 | d |= mask; | 285 | d |= mask; |
264 | else | 286 | else |
265 | d &= ~mask; | 287 | d &= ~mask; |
266 | au_writel(d, GPIO2_DIR); | 288 | __raw_writel(d, base + GPIO2_DIR); |
267 | au_sync(); | 289 | wmb(); |
268 | } | 290 | } |
269 | 291 | ||
270 | static inline void alchemy_gpio2_set_value(int gpio, int v) | 292 | static inline void alchemy_gpio2_set_value(int gpio, int v) |
271 | { | 293 | { |
294 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); | ||
272 | unsigned long mask; | 295 | unsigned long mask; |
273 | mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); | 296 | mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); |
274 | au_writel(mask, GPIO2_OUTPUT); | 297 | __raw_writel(mask, base + GPIO2_OUTPUT); |
275 | au_sync(); | 298 | wmb(); |
276 | } | 299 | } |
277 | 300 | ||
278 | static inline int alchemy_gpio2_get_value(int gpio) | 301 | static inline int alchemy_gpio2_get_value(int gpio) |
279 | { | 302 | { |
280 | return au_readl(GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); | 303 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
304 | return __raw_readl(base + GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); | ||
281 | } | 305 | } |
282 | 306 | ||
283 | static inline int alchemy_gpio2_direction_input(int gpio) | 307 | static inline int alchemy_gpio2_direction_input(int gpio) |
@@ -329,21 +353,23 @@ static inline int alchemy_gpio2_to_irq(int gpio) | |||
329 | */ | 353 | */ |
330 | static inline void alchemy_gpio1_input_enable(void) | 354 | static inline void alchemy_gpio1_input_enable(void) |
331 | { | 355 | { |
332 | au_writel(0, SYS_PININPUTEN); /* the write op is key */ | 356 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); |
333 | au_sync(); | 357 | __raw_writel(0, base + SYS_PININPUTEN); /* the write op is key */ |
358 | wmb(); | ||
334 | } | 359 | } |
335 | 360 | ||
336 | /* GPIO2 shared interrupts and control */ | 361 | /* GPIO2 shared interrupts and control */ |
337 | 362 | ||
338 | static inline void __alchemy_gpio2_mod_int(int gpio2, int en) | 363 | static inline void __alchemy_gpio2_mod_int(int gpio2, int en) |
339 | { | 364 | { |
340 | unsigned long r = au_readl(GPIO2_INTENABLE); | 365 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
366 | unsigned long r = __raw_readl(base + GPIO2_INTENABLE); | ||
341 | if (en) | 367 | if (en) |
342 | r |= 1 << gpio2; | 368 | r |= 1 << gpio2; |
343 | else | 369 | else |
344 | r &= ~(1 << gpio2); | 370 | r &= ~(1 << gpio2); |
345 | au_writel(r, GPIO2_INTENABLE); | 371 | __raw_writel(r, base + GPIO2_INTENABLE); |
346 | au_sync(); | 372 | wmb(); |
347 | } | 373 | } |
348 | 374 | ||
349 | /** | 375 | /** |
@@ -418,10 +444,11 @@ static inline void alchemy_gpio2_disable_int(int gpio2) | |||
418 | */ | 444 | */ |
419 | static inline void alchemy_gpio2_enable(void) | 445 | static inline void alchemy_gpio2_enable(void) |
420 | { | 446 | { |
421 | au_writel(3, GPIO2_ENABLE); /* reset, clock enabled */ | 447 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
422 | au_sync(); | 448 | __raw_writel(3, base + GPIO2_ENABLE); /* reset, clock enabled */ |
423 | au_writel(1, GPIO2_ENABLE); /* clock enabled */ | 449 | wmb(); |
424 | au_sync(); | 450 | __raw_writel(1, base + GPIO2_ENABLE); /* clock enabled */ |
451 | wmb(); | ||
425 | } | 452 | } |
426 | 453 | ||
427 | /** | 454 | /** |
@@ -431,8 +458,9 @@ static inline void alchemy_gpio2_enable(void) | |||
431 | */ | 458 | */ |
432 | static inline void alchemy_gpio2_disable(void) | 459 | static inline void alchemy_gpio2_disable(void) |
433 | { | 460 | { |
434 | au_writel(2, GPIO2_ENABLE); /* reset, clock disabled */ | 461 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
435 | au_sync(); | 462 | __raw_writel(2, base + GPIO2_ENABLE); /* reset, clock disabled */ |
463 | wmb(); | ||
436 | } | 464 | } |
437 | 465 | ||
438 | /**********************************************************************/ | 466 | /**********************************************************************/ |
@@ -556,6 +584,16 @@ static inline void gpio_set_value(int gpio, int v) | |||
556 | alchemy_gpio_set_value(gpio, v); | 584 | alchemy_gpio_set_value(gpio, v); |
557 | } | 585 | } |
558 | 586 | ||
587 | static inline int gpio_get_value_cansleep(unsigned gpio) | ||
588 | { | ||
589 | return gpio_get_value(gpio); | ||
590 | } | ||
591 | |||
592 | static inline void gpio_set_value_cansleep(unsigned gpio, int value) | ||
593 | { | ||
594 | gpio_set_value(gpio, value); | ||
595 | } | ||
596 | |||
559 | static inline int gpio_is_valid(int gpio) | 597 | static inline int gpio_is_valid(int gpio) |
560 | { | 598 | { |
561 | return alchemy_gpio_is_valid(gpio); | 599 | return alchemy_gpio_is_valid(gpio); |
@@ -581,10 +619,50 @@ static inline int gpio_request(unsigned gpio, const char *label) | |||
581 | return 0; | 619 | return 0; |
582 | } | 620 | } |
583 | 621 | ||
622 | static inline int gpio_request_one(unsigned gpio, | ||
623 | unsigned long flags, const char *label) | ||
624 | { | ||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | static inline int gpio_request_array(struct gpio *array, size_t num) | ||
629 | { | ||
630 | return 0; | ||
631 | } | ||
632 | |||
584 | static inline void gpio_free(unsigned gpio) | 633 | static inline void gpio_free(unsigned gpio) |
585 | { | 634 | { |
586 | } | 635 | } |
587 | 636 | ||
637 | static inline void gpio_free_array(struct gpio *array, size_t num) | ||
638 | { | ||
639 | } | ||
640 | |||
641 | static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) | ||
642 | { | ||
643 | return -ENOSYS; | ||
644 | } | ||
645 | |||
646 | static inline int gpio_export(unsigned gpio, bool direction_may_change) | ||
647 | { | ||
648 | return -ENOSYS; | ||
649 | } | ||
650 | |||
651 | static inline int gpio_export_link(struct device *dev, const char *name, | ||
652 | unsigned gpio) | ||
653 | { | ||
654 | return -ENOSYS; | ||
655 | } | ||
656 | |||
657 | static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) | ||
658 | { | ||
659 | return -ENOSYS; | ||
660 | } | ||
661 | |||
662 | static inline void gpio_unexport(unsigned gpio) | ||
663 | { | ||
664 | } | ||
665 | |||
588 | #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ | 666 | #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ |
589 | 667 | ||
590 | 668 | ||
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h index 9759588ba3cf..184d5ecb5f51 100644 --- a/arch/mips/include/asm/mach-bcm47xx/nvram.h +++ b/arch/mips/include/asm/mach-bcm47xx/nvram.h | |||
@@ -39,8 +39,16 @@ extern int nvram_getenv(char *name, char *val, size_t val_len); | |||
39 | 39 | ||
40 | static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) | 40 | static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) |
41 | { | 41 | { |
42 | sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1], | 42 | if (strchr(buf, ':')) |
43 | &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]); | 43 | sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], |
44 | &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], | ||
45 | &macaddr[5]); | ||
46 | else if (strchr(buf, '-')) | ||
47 | sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0], | ||
48 | &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], | ||
49 | &macaddr[5]); | ||
50 | else | ||
51 | printk(KERN_WARNING "Can not parse mac address: %s\n", buf); | ||
44 | } | 52 | } |
45 | 53 | ||
46 | #endif | 54 | #endif |
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h index 32978d32561a..ed72e6a26b73 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h | |||
@@ -88,7 +88,7 @@ struct bcm_tag { | |||
88 | char kernel_crc[CRC_LEN]; | 88 | char kernel_crc[CRC_LEN]; |
89 | /* 228-235: Unused at present */ | 89 | /* 228-235: Unused at present */ |
90 | char reserved1[8]; | 90 | char reserved1[8]; |
91 | /* 236-239: CRC32 of header excluding tagVersion */ | 91 | /* 236-239: CRC32 of header excluding last 20 bytes */ |
92 | char header_crc[CRC_LEN]; | 92 | char header_crc[CRC_LEN]; |
93 | /* 240-255: Unused at present */ | 93 | /* 240-255: Unused at present */ |
94 | char reserved2[16]; | 94 | char reserved2[16]; |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 0b2b5eb22e9b..dedef7d2b01f 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h | |||
@@ -63,6 +63,11 @@ | |||
63 | # CN30XX Disable instruction prefetching | 63 | # CN30XX Disable instruction prefetching |
64 | or v0, v0, 0x2000 | 64 | or v0, v0, 0x2000 |
65 | skip: | 65 | skip: |
66 | # First clear off CvmCtl[IPPCI] bit and move the performance | ||
67 | # counters interrupt to IRQ 6 | ||
68 | li v1, ~(7 << 7) | ||
69 | and v0, v0, v1 | ||
70 | ori v0, v0, (6 << 7) | ||
66 | # Write the cavium control register | 71 | # Write the cavium control register |
67 | dmtc0 v0, CP0_CVMCTL_REG | 72 | dmtc0 v0, CP0_CVMCTL_REG |
68 | sync | 73 | sync |
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h new file mode 100644 index 000000000000..ce2f02929d22 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/lantiq.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | #ifndef _LANTIQ_H__ | ||
9 | #define _LANTIQ_H__ | ||
10 | |||
11 | #include <linux/irq.h> | ||
12 | |||
13 | /* generic reg access functions */ | ||
14 | #define ltq_r32(reg) __raw_readl(reg) | ||
15 | #define ltq_w32(val, reg) __raw_writel(val, reg) | ||
16 | #define ltq_w32_mask(clear, set, reg) \ | ||
17 | ltq_w32((ltq_r32(reg) & ~(clear)) | (set), reg) | ||
18 | #define ltq_r8(reg) __raw_readb(reg) | ||
19 | #define ltq_w8(val, reg) __raw_writeb(val, reg) | ||
20 | |||
21 | /* register access macros for EBU and CGU */ | ||
22 | #define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y)) | ||
23 | #define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x)) | ||
24 | #define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y)) | ||
25 | #define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x)) | ||
26 | |||
27 | extern __iomem void *ltq_ebu_membase; | ||
28 | extern __iomem void *ltq_cgu_membase; | ||
29 | |||
30 | extern unsigned int ltq_get_cpu_ver(void); | ||
31 | extern unsigned int ltq_get_soc_type(void); | ||
32 | |||
33 | /* clock speeds */ | ||
34 | #define CLOCK_60M 60000000 | ||
35 | #define CLOCK_83M 83333333 | ||
36 | #define CLOCK_111M 111111111 | ||
37 | #define CLOCK_133M 133333333 | ||
38 | #define CLOCK_167M 166666667 | ||
39 | #define CLOCK_200M 200000000 | ||
40 | #define CLOCK_266M 266666666 | ||
41 | #define CLOCK_333M 333333333 | ||
42 | #define CLOCK_400M 400000000 | ||
43 | |||
44 | /* spinlock all ebu i/o */ | ||
45 | extern spinlock_t ebu_lock; | ||
46 | |||
47 | /* some irq helpers */ | ||
48 | extern void ltq_disable_irq(struct irq_data *data); | ||
49 | extern void ltq_mask_and_ack_irq(struct irq_data *data); | ||
50 | extern void ltq_enable_irq(struct irq_data *data); | ||
51 | |||
52 | /* find out what caused the last cpu reset */ | ||
53 | extern int ltq_reset_cause(void); | ||
54 | #define LTQ_RST_CAUSE_WDTRST 0x20 | ||
55 | |||
56 | #define IOPORT_RESOURCE_START 0x10000000 | ||
57 | #define IOPORT_RESOURCE_END 0xffffffff | ||
58 | #define IOMEM_RESOURCE_START 0x10000000 | ||
59 | #define IOMEM_RESOURCE_END 0xffffffff | ||
60 | #define LTQ_FLASH_START 0x10000000 | ||
61 | #define LTQ_FLASH_MAX 0x04000000 | ||
62 | |||
63 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h new file mode 100644 index 000000000000..a305f1d0259e --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LANTIQ_PLATFORM_H__ | ||
10 | #define _LANTIQ_PLATFORM_H__ | ||
11 | |||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/socket.h> | ||
14 | |||
15 | /* struct used to pass info to the pci core */ | ||
16 | enum { | ||
17 | PCI_CLOCK_INT = 0, | ||
18 | PCI_CLOCK_EXT | ||
19 | }; | ||
20 | |||
21 | #define PCI_EXIN0 0x0001 | ||
22 | #define PCI_EXIN1 0x0002 | ||
23 | #define PCI_EXIN2 0x0004 | ||
24 | #define PCI_EXIN3 0x0008 | ||
25 | #define PCI_EXIN4 0x0010 | ||
26 | #define PCI_EXIN5 0x0020 | ||
27 | #define PCI_EXIN_MAX 6 | ||
28 | |||
29 | #define PCI_GNT1 0x0040 | ||
30 | #define PCI_GNT2 0x0080 | ||
31 | #define PCI_GNT3 0x0100 | ||
32 | #define PCI_GNT4 0x0200 | ||
33 | |||
34 | #define PCI_REQ1 0x0400 | ||
35 | #define PCI_REQ2 0x0800 | ||
36 | #define PCI_REQ3 0x1000 | ||
37 | #define PCI_REQ4 0x2000 | ||
38 | #define PCI_REQ_SHIFT 10 | ||
39 | #define PCI_REQ_MASK 0xf | ||
40 | |||
41 | struct ltq_pci_data { | ||
42 | int clock; | ||
43 | int gpio; | ||
44 | int irq[16]; | ||
45 | }; | ||
46 | |||
47 | /* struct used to pass info to network drivers */ | ||
48 | struct ltq_eth_data { | ||
49 | struct sockaddr mac; | ||
50 | int mii_mode; | ||
51 | }; | ||
52 | |||
53 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/war.h b/arch/mips/include/asm/mach-lantiq/war.h new file mode 100644 index 000000000000..01b08ef368d1 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/war.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | */ | ||
7 | #ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H | ||
8 | #define __ASM_MIPS_MACH_LANTIQ_WAR_H | ||
9 | |||
10 | #define R4600_V1_INDEX_ICACHEOP_WAR 0 | ||
11 | #define R4600_V1_HIT_CACHEOP_WAR 0 | ||
12 | #define R4600_V2_HIT_CACHEOP_WAR 0 | ||
13 | #define R5432_CP0_INTERRUPT_WAR 0 | ||
14 | #define BCM1250_M3_WAR 0 | ||
15 | #define SIBYTE_1956_WAR 0 | ||
16 | #define MIPS4K_ICACHE_REFILL_WAR 0 | ||
17 | #define MIPS_CACHE_SYNC_WAR 0 | ||
18 | #define TX49XX_ICACHE_INDEX_INV_WAR 0 | ||
19 | #define RM9000_CDEX_SMP_WAR 0 | ||
20 | #define ICACHE_REFILLS_WORKAROUND_WAR 0 | ||
21 | #define R10000_LLSC_WAR 0 | ||
22 | #define MIPS34K_MISSED_ITLB_WAR 0 | ||
23 | |||
24 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/irq.h b/arch/mips/include/asm/mach-lantiq/xway/irq.h new file mode 100644 index 000000000000..a1471d2dd0d2 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/irq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef __LANTIQ_IRQ_H | ||
10 | #define __LANTIQ_IRQ_H | ||
11 | |||
12 | #include <lantiq_irq.h> | ||
13 | |||
14 | #define NR_IRQS 256 | ||
15 | |||
16 | #include_next <irq.h> | ||
17 | |||
18 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h new file mode 100644 index 000000000000..b4465a888e20 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LANTIQ_XWAY_IRQ_H__ | ||
10 | #define _LANTIQ_XWAY_IRQ_H__ | ||
11 | |||
12 | #define INT_NUM_IRQ0 8 | ||
13 | #define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0) | ||
14 | #define INT_NUM_IM1_IRL0 (INT_NUM_IRQ0 + 32) | ||
15 | #define INT_NUM_IM2_IRL0 (INT_NUM_IRQ0 + 64) | ||
16 | #define INT_NUM_IM3_IRL0 (INT_NUM_IRQ0 + 96) | ||
17 | #define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128) | ||
18 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) | ||
19 | |||
20 | #define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8)) | ||
21 | #define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1) | ||
22 | #define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2) | ||
23 | |||
24 | #define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0 | ||
25 | #define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2) | ||
26 | #define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3) | ||
27 | |||
28 | #define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15) | ||
29 | #define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14) | ||
30 | #define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16) | ||
31 | |||
32 | #define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21) | ||
33 | #define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23) | ||
34 | |||
35 | #define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23) | ||
36 | #define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22) | ||
37 | #define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23) | ||
38 | |||
39 | #define MIPS_CPU_TIMER_IRQ 7 | ||
40 | |||
41 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) | ||
42 | #define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1) | ||
43 | #define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2) | ||
44 | #define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3) | ||
45 | #define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4) | ||
46 | #define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5) | ||
47 | #define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6) | ||
48 | #define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7) | ||
49 | #define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8) | ||
50 | #define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9) | ||
51 | #define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10) | ||
52 | #define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11) | ||
53 | #define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25) | ||
54 | #define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26) | ||
55 | #define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27) | ||
56 | #define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28) | ||
57 | #define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29) | ||
58 | #define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30) | ||
59 | #define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16) | ||
60 | #define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21) | ||
61 | |||
62 | #define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24) | ||
63 | |||
64 | #define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14) | ||
65 | |||
66 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h new file mode 100644 index 000000000000..8a3c6be669d2 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_XWAY_H__ | ||
10 | #define _LTQ_XWAY_H__ | ||
11 | |||
12 | #ifdef CONFIG_SOC_TYPE_XWAY | ||
13 | |||
14 | #include <lantiq.h> | ||
15 | |||
16 | /* Chip IDs */ | ||
17 | #define SOC_ID_DANUBE1 0x129 | ||
18 | #define SOC_ID_DANUBE2 0x12B | ||
19 | #define SOC_ID_TWINPASS 0x12D | ||
20 | #define SOC_ID_AMAZON_SE 0x152 | ||
21 | #define SOC_ID_ARX188 0x16C | ||
22 | #define SOC_ID_ARX168 0x16D | ||
23 | #define SOC_ID_ARX182 0x16F | ||
24 | |||
25 | /* SoC Types */ | ||
26 | #define SOC_TYPE_DANUBE 0x01 | ||
27 | #define SOC_TYPE_TWINPASS 0x02 | ||
28 | #define SOC_TYPE_AR9 0x03 | ||
29 | #define SOC_TYPE_VR9 0x04 | ||
30 | #define SOC_TYPE_AMAZON_SE 0x05 | ||
31 | |||
32 | /* ASC0/1 - serial port */ | ||
33 | #define LTQ_ASC0_BASE_ADDR 0x1E100400 | ||
34 | #define LTQ_ASC1_BASE_ADDR 0x1E100C00 | ||
35 | #define LTQ_ASC_SIZE 0x400 | ||
36 | |||
37 | /* RCU - reset control unit */ | ||
38 | #define LTQ_RCU_BASE_ADDR 0x1F203000 | ||
39 | #define LTQ_RCU_SIZE 0x1000 | ||
40 | |||
41 | /* GPTU - general purpose timer unit */ | ||
42 | #define LTQ_GPTU_BASE_ADDR 0x18000300 | ||
43 | #define LTQ_GPTU_SIZE 0x100 | ||
44 | |||
45 | /* EBU - external bus unit */ | ||
46 | #define LTQ_EBU_GPIO_START 0x14000000 | ||
47 | #define LTQ_EBU_GPIO_SIZE 0x1000 | ||
48 | |||
49 | #define LTQ_EBU_BASE_ADDR 0x1E105300 | ||
50 | #define LTQ_EBU_SIZE 0x100 | ||
51 | |||
52 | #define LTQ_EBU_BUSCON0 0x0060 | ||
53 | #define LTQ_EBU_PCC_CON 0x0090 | ||
54 | #define LTQ_EBU_PCC_IEN 0x00A4 | ||
55 | #define LTQ_EBU_PCC_ISTAT 0x00A0 | ||
56 | #define LTQ_EBU_BUSCON1 0x0064 | ||
57 | #define LTQ_EBU_ADDRSEL1 0x0024 | ||
58 | #define EBU_WRDIS 0x80000000 | ||
59 | |||
60 | /* CGU - clock generation unit */ | ||
61 | #define LTQ_CGU_BASE_ADDR 0x1F103000 | ||
62 | #define LTQ_CGU_SIZE 0x1000 | ||
63 | |||
64 | /* ICU - interrupt control unit */ | ||
65 | #define LTQ_ICU_BASE_ADDR 0x1F880200 | ||
66 | #define LTQ_ICU_SIZE 0x100 | ||
67 | |||
68 | /* EIU - external interrupt unit */ | ||
69 | #define LTQ_EIU_BASE_ADDR 0x1F101000 | ||
70 | #define LTQ_EIU_SIZE 0x1000 | ||
71 | |||
72 | /* PMU - power management unit */ | ||
73 | #define LTQ_PMU_BASE_ADDR 0x1F102000 | ||
74 | #define LTQ_PMU_SIZE 0x1000 | ||
75 | |||
76 | #define PMU_DMA 0x0020 | ||
77 | #define PMU_USB 0x8041 | ||
78 | #define PMU_LED 0x0800 | ||
79 | #define PMU_GPT 0x1000 | ||
80 | #define PMU_PPE 0x2000 | ||
81 | #define PMU_FPI 0x4000 | ||
82 | #define PMU_SWITCH 0x10000000 | ||
83 | |||
84 | /* ETOP - ethernet */ | ||
85 | #define LTQ_ETOP_BASE_ADDR 0x1E180000 | ||
86 | #define LTQ_ETOP_SIZE 0x40000 | ||
87 | |||
88 | /* DMA */ | ||
89 | #define LTQ_DMA_BASE_ADDR 0x1E104100 | ||
90 | #define LTQ_DMA_SIZE 0x800 | ||
91 | |||
92 | /* PCI */ | ||
93 | #define PCI_CR_BASE_ADDR 0x1E105400 | ||
94 | #define PCI_CR_SIZE 0x400 | ||
95 | |||
96 | /* WDT */ | ||
97 | #define LTQ_WDT_BASE_ADDR 0x1F8803F0 | ||
98 | #define LTQ_WDT_SIZE 0x10 | ||
99 | |||
100 | /* STP - serial to parallel conversion unit */ | ||
101 | #define LTQ_STP_BASE_ADDR 0x1E100BB0 | ||
102 | #define LTQ_STP_SIZE 0x40 | ||
103 | |||
104 | /* GPIO */ | ||
105 | #define LTQ_GPIO0_BASE_ADDR 0x1E100B10 | ||
106 | #define LTQ_GPIO1_BASE_ADDR 0x1E100B40 | ||
107 | #define LTQ_GPIO2_BASE_ADDR 0x1E100B70 | ||
108 | #define LTQ_GPIO_SIZE 0x30 | ||
109 | |||
110 | /* SSC */ | ||
111 | #define LTQ_SSC_BASE_ADDR 0x1e100800 | ||
112 | #define LTQ_SSC_SIZE 0x100 | ||
113 | |||
114 | /* MEI - dsl core */ | ||
115 | #define LTQ_MEI_BASE_ADDR 0x1E116000 | ||
116 | |||
117 | /* DEU - data encryption unit */ | ||
118 | #define LTQ_DEU_BASE_ADDR 0x1E103100 | ||
119 | |||
120 | /* MPS - multi processor unit (voice) */ | ||
121 | #define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000) | ||
122 | #define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344)) | ||
123 | |||
124 | /* request a non-gpio and set the PIO config */ | ||
125 | extern int ltq_gpio_request(unsigned int pin, unsigned int alt0, | ||
126 | unsigned int alt1, unsigned int dir, const char *name); | ||
127 | extern void ltq_pmu_enable(unsigned int module); | ||
128 | extern void ltq_pmu_disable(unsigned int module); | ||
129 | |||
130 | static inline int ltq_is_ar9(void) | ||
131 | { | ||
132 | return (ltq_get_soc_type() == SOC_TYPE_AR9); | ||
133 | } | ||
134 | |||
135 | static inline int ltq_is_vr9(void) | ||
136 | { | ||
137 | return (ltq_get_soc_type() == SOC_TYPE_VR9); | ||
138 | } | ||
139 | |||
140 | #endif /* CONFIG_SOC_TYPE_XWAY */ | ||
141 | #endif /* _LTQ_XWAY_H__ */ | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h new file mode 100644 index 000000000000..872943a4b90e --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
14 | * | ||
15 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
16 | */ | ||
17 | |||
18 | #ifndef LTQ_DMA_H__ | ||
19 | #define LTQ_DMA_H__ | ||
20 | |||
21 | #define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */ | ||
22 | #define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */ | ||
23 | |||
24 | #define LTQ_DMA_OWN BIT(31) /* owner bit */ | ||
25 | #define LTQ_DMA_C BIT(30) /* complete bit */ | ||
26 | #define LTQ_DMA_SOP BIT(29) /* start of packet */ | ||
27 | #define LTQ_DMA_EOP BIT(28) /* end of packet */ | ||
28 | #define LTQ_DMA_TX_OFFSET(x) ((x & 0x1f) << 23) /* data bytes offset */ | ||
29 | #define LTQ_DMA_RX_OFFSET(x) ((x & 0x7) << 23) /* data bytes offset */ | ||
30 | #define LTQ_DMA_SIZE_MASK (0xffff) /* the size field is 16 bit */ | ||
31 | |||
32 | struct ltq_dma_desc { | ||
33 | u32 ctl; | ||
34 | u32 addr; | ||
35 | }; | ||
36 | |||
37 | struct ltq_dma_channel { | ||
38 | int nr; /* the channel number */ | ||
39 | int irq; /* the mapped irq */ | ||
40 | int desc; /* the current descriptor */ | ||
41 | struct ltq_dma_desc *desc_base; /* the descriptor base */ | ||
42 | int phys; /* physical addr */ | ||
43 | }; | ||
44 | |||
45 | enum { | ||
46 | DMA_PORT_ETOP = 0, | ||
47 | DMA_PORT_DEU, | ||
48 | }; | ||
49 | |||
50 | extern void ltq_dma_enable_irq(struct ltq_dma_channel *ch); | ||
51 | extern void ltq_dma_disable_irq(struct ltq_dma_channel *ch); | ||
52 | extern void ltq_dma_ack_irq(struct ltq_dma_channel *ch); | ||
53 | extern void ltq_dma_open(struct ltq_dma_channel *ch); | ||
54 | extern void ltq_dma_close(struct ltq_dma_channel *ch); | ||
55 | extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch); | ||
56 | extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch); | ||
57 | extern void ltq_dma_free(struct ltq_dma_channel *ch); | ||
58 | extern void ltq_dma_init_port(int p); | ||
59 | |||
60 | #endif | ||
diff --git a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h new file mode 100644 index 000000000000..3b728275b9b0 --- /dev/null +++ b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Netlogic Microsystems | ||
7 | * Copyright (C) 2003 Ralf Baechle | ||
8 | */ | ||
9 | #ifndef __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H | ||
10 | #define __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H | ||
11 | |||
12 | #define cpu_has_4kex 1 | ||
13 | #define cpu_has_4k_cache 1 | ||
14 | #define cpu_has_watch 1 | ||
15 | #define cpu_has_mips16 0 | ||
16 | #define cpu_has_counter 1 | ||
17 | #define cpu_has_divec 1 | ||
18 | #define cpu_has_vce 0 | ||
19 | #define cpu_has_cache_cdex_p 0 | ||
20 | #define cpu_has_cache_cdex_s 0 | ||
21 | #define cpu_has_prefetch 1 | ||
22 | #define cpu_has_mcheck 1 | ||
23 | #define cpu_has_ejtag 1 | ||
24 | |||
25 | #define cpu_has_llsc 1 | ||
26 | #define cpu_has_vtag_icache 0 | ||
27 | #define cpu_has_dc_aliases 0 | ||
28 | #define cpu_has_ic_fills_f_dc 0 | ||
29 | #define cpu_has_dsp 0 | ||
30 | #define cpu_has_mipsmt 0 | ||
31 | #define cpu_has_userlocal 0 | ||
32 | #define cpu_icache_snoops_remote_store 0 | ||
33 | |||
34 | #define cpu_has_nofpuex 0 | ||
35 | #define cpu_has_64bits 1 | ||
36 | |||
37 | #define cpu_has_mips32r1 1 | ||
38 | #define cpu_has_mips32r2 0 | ||
39 | #define cpu_has_mips64r1 1 | ||
40 | #define cpu_has_mips64r2 0 | ||
41 | |||
42 | #define cpu_has_inclusive_pcaches 0 | ||
43 | |||
44 | #define cpu_dcache_line_size() 32 | ||
45 | #define cpu_icache_line_size() 32 | ||
46 | |||
47 | #endif /* __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H */ | ||
diff --git a/arch/mips/include/asm/mach-netlogic/irq.h b/arch/mips/include/asm/mach-netlogic/irq.h new file mode 100644 index 000000000000..b5902458e7c1 --- /dev/null +++ b/arch/mips/include/asm/mach-netlogic/irq.h | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Netlogic Microsystems. | ||
7 | */ | ||
8 | #ifndef __ASM_NETLOGIC_IRQ_H | ||
9 | #define __ASM_NETLOGIC_IRQ_H | ||
10 | |||
11 | #define NR_IRQS 64 | ||
12 | #define MIPS_CPU_IRQ_BASE 0 | ||
13 | |||
14 | #endif /* __ASM_NETLOGIC_IRQ_H */ | ||
diff --git a/arch/mips/include/asm/mach-netlogic/war.h b/arch/mips/include/asm/mach-netlogic/war.h new file mode 100644 index 000000000000..22da89327352 --- /dev/null +++ b/arch/mips/include/asm/mach-netlogic/war.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Netlogic Microsystems. | ||
7 | * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | #ifndef __ASM_MIPS_MACH_NLM_WAR_H | ||
10 | #define __ASM_MIPS_MACH_NLM_WAR_H | ||
11 | |||
12 | #define R4600_V1_INDEX_ICACHEOP_WAR 0 | ||
13 | #define R4600_V1_HIT_CACHEOP_WAR 0 | ||
14 | #define R4600_V2_HIT_CACHEOP_WAR 0 | ||
15 | #define R5432_CP0_INTERRUPT_WAR 0 | ||
16 | #define BCM1250_M3_WAR 0 | ||
17 | #define SIBYTE_1956_WAR 0 | ||
18 | #define MIPS4K_ICACHE_REFILL_WAR 0 | ||
19 | #define MIPS_CACHE_SYNC_WAR 0 | ||
20 | #define TX49XX_ICACHE_INDEX_INV_WAR 0 | ||
21 | #define RM9000_CDEX_SMP_WAR 0 | ||
22 | #define ICACHE_REFILLS_WORKAROUND_WAR 0 | ||
23 | #define R10000_LLSC_WAR 0 | ||
24 | #define MIPS34K_MISSED_ITLB_WAR 0 | ||
25 | |||
26 | #endif /* __ASM_MIPS_MACH_NLM_WAR_H */ | ||
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index d94085a3eafb..bc01a02cacd8 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h | |||
@@ -118,6 +118,8 @@ search_module_dbetables(unsigned long addr) | |||
118 | #define MODULE_PROC_FAMILY "LOONGSON2 " | 118 | #define MODULE_PROC_FAMILY "LOONGSON2 " |
119 | #elif defined CONFIG_CPU_CAVIUM_OCTEON | 119 | #elif defined CONFIG_CPU_CAVIUM_OCTEON |
120 | #define MODULE_PROC_FAMILY "OCTEON " | 120 | #define MODULE_PROC_FAMILY "OCTEON " |
121 | #elif defined CONFIG_CPU_XLR | ||
122 | #define MODULE_PROC_FAMILY "XLR " | ||
121 | #else | 123 | #else |
122 | #error MODULE_PROC_FAMILY undefined for your processor configuration | 124 | #error MODULE_PROC_FAMILY undefined for your processor configuration |
123 | #endif | 125 | #endif |
diff --git a/arch/mips/include/asm/netlogic/interrupt.h b/arch/mips/include/asm/netlogic/interrupt.h new file mode 100644 index 000000000000..a85aadb6cfd7 --- /dev/null +++ b/arch/mips/include/asm/netlogic/interrupt.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_INTERRUPT_H | ||
36 | #define _ASM_NLM_INTERRUPT_H | ||
37 | |||
38 | /* Defines for the IRQ numbers */ | ||
39 | |||
40 | #define IRQ_IPI_SMP_FUNCTION 3 | ||
41 | #define IRQ_IPI_SMP_RESCHEDULE 4 | ||
42 | #define IRQ_MSGRING 6 | ||
43 | #define IRQ_TIMER 7 | ||
44 | |||
45 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h new file mode 100644 index 000000000000..8c53d0ba4bf2 --- /dev/null +++ b/arch/mips/include/asm/netlogic/mips-extns.h | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_MIPS_EXTS_H | ||
36 | #define _ASM_NLM_MIPS_EXTS_H | ||
37 | |||
38 | /* | ||
39 | * XLR and XLP interrupt request and interrupt mask registers | ||
40 | */ | ||
41 | #define read_c0_eirr() __read_64bit_c0_register($9, 6) | ||
42 | #define read_c0_eimr() __read_64bit_c0_register($9, 7) | ||
43 | #define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val) | ||
44 | |||
45 | /* | ||
46 | * Writing EIMR in 32 bit is a special case, the lower 8 bit of the | ||
47 | * EIMR is shadowed in the status register, so we cannot save and | ||
48 | * restore status register for split read. | ||
49 | */ | ||
50 | #define write_c0_eimr(val) \ | ||
51 | do { \ | ||
52 | if (sizeof(unsigned long) == 4) { \ | ||
53 | unsigned long __flags; \ | ||
54 | \ | ||
55 | local_irq_save(__flags); \ | ||
56 | __asm__ __volatile__( \ | ||
57 | ".set\tmips64\n\t" \ | ||
58 | "dsll\t%L0, %L0, 32\n\t" \ | ||
59 | "dsrl\t%L0, %L0, 32\n\t" \ | ||
60 | "dsll\t%M0, %M0, 32\n\t" \ | ||
61 | "or\t%L0, %L0, %M0\n\t" \ | ||
62 | "dmtc0\t%L0, $9, 7\n\t" \ | ||
63 | ".set\tmips0" \ | ||
64 | : : "r" (val)); \ | ||
65 | __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\ | ||
66 | local_irq_restore(__flags); \ | ||
67 | } else \ | ||
68 | __write_64bit_c0_register($9, 7, (val)); \ | ||
69 | } while (0) | ||
70 | |||
71 | static inline int hard_smp_processor_id(void) | ||
72 | { | ||
73 | return __read_32bit_c0_register($15, 1) & 0x3ff; | ||
74 | } | ||
75 | |||
76 | #endif /*_ASM_NLM_MIPS_EXTS_H */ | ||
diff --git a/arch/mips/include/asm/netlogic/psb-bootinfo.h b/arch/mips/include/asm/netlogic/psb-bootinfo.h new file mode 100644 index 000000000000..6878307f0ee6 --- /dev/null +++ b/arch/mips/include/asm/netlogic/psb-bootinfo.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NETLOGIC_BOOTINFO_H | ||
36 | #define _ASM_NETLOGIC_BOOTINFO_H | ||
37 | |||
38 | struct psb_info { | ||
39 | uint64_t boot_level; | ||
40 | uint64_t io_base; | ||
41 | uint64_t output_device; | ||
42 | uint64_t uart_print; | ||
43 | uint64_t led_output; | ||
44 | uint64_t init; | ||
45 | uint64_t exit; | ||
46 | uint64_t warm_reset; | ||
47 | uint64_t wakeup; | ||
48 | uint64_t online_cpu_map; | ||
49 | uint64_t master_reentry_sp; | ||
50 | uint64_t master_reentry_gp; | ||
51 | uint64_t master_reentry_fn; | ||
52 | uint64_t slave_reentry_fn; | ||
53 | uint64_t magic_dword; | ||
54 | uint64_t uart_putchar; | ||
55 | uint64_t size; | ||
56 | uint64_t uart_getchar; | ||
57 | uint64_t nmi_handler; | ||
58 | uint64_t psb_version; | ||
59 | uint64_t mac_addr; | ||
60 | uint64_t cpu_frequency; | ||
61 | uint64_t board_version; | ||
62 | uint64_t malloc; | ||
63 | uint64_t free; | ||
64 | uint64_t global_shmem_addr; | ||
65 | uint64_t global_shmem_size; | ||
66 | uint64_t psb_os_cpu_map; | ||
67 | uint64_t userapp_cpu_map; | ||
68 | uint64_t wakeup_os; | ||
69 | uint64_t psb_mem_map; | ||
70 | uint64_t board_major_version; | ||
71 | uint64_t board_minor_version; | ||
72 | uint64_t board_manf_revision; | ||
73 | uint64_t board_serial_number; | ||
74 | uint64_t psb_physaddr_map; | ||
75 | uint64_t xlr_loaderip_config; | ||
76 | uint64_t bldr_envp; | ||
77 | uint64_t avail_mem_map; | ||
78 | }; | ||
79 | |||
80 | enum { | ||
81 | NETLOGIC_IO_SPACE = 0x10, | ||
82 | PCIX_IO_SPACE, | ||
83 | PCIX_CFG_SPACE, | ||
84 | PCIX_MEMORY_SPACE, | ||
85 | HT_IO_SPACE, | ||
86 | HT_CFG_SPACE, | ||
87 | HT_MEMORY_SPACE, | ||
88 | SRAM_SPACE, | ||
89 | FLASH_CONTROLLER_SPACE | ||
90 | }; | ||
91 | |||
92 | #define NLM_MAX_ARGS 64 | ||
93 | #define NLM_MAX_ENVS 32 | ||
94 | |||
95 | /* This is what netlboot passes and linux boot_mem_map is subtly different */ | ||
96 | #define NLM_BOOT_MEM_MAP_MAX 32 | ||
97 | struct nlm_boot_mem_map { | ||
98 | int nr_map; | ||
99 | struct nlm_boot_mem_map_entry { | ||
100 | uint64_t addr; /* start of memory segment */ | ||
101 | uint64_t size; /* size of memory segment */ | ||
102 | uint32_t type; /* type of memory segment */ | ||
103 | } map[NLM_BOOT_MEM_MAP_MAX]; | ||
104 | }; | ||
105 | |||
106 | /* Pointer to saved boot loader info */ | ||
107 | extern struct psb_info nlm_prom_info; | ||
108 | |||
109 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/gpio.h b/arch/mips/include/asm/netlogic/xlr/gpio.h new file mode 100644 index 000000000000..51f6ad4aeb14 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/gpio.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_GPIO_H | ||
36 | #define _ASM_NLM_GPIO_H | ||
37 | |||
38 | #define NETLOGIC_GPIO_INT_EN_REG 0 | ||
39 | #define NETLOGIC_GPIO_INPUT_INVERSION_REG 1 | ||
40 | #define NETLOGIC_GPIO_IO_DIR_REG 2 | ||
41 | #define NETLOGIC_GPIO_IO_DATA_WR_REG 3 | ||
42 | #define NETLOGIC_GPIO_IO_DATA_RD_REG 4 | ||
43 | |||
44 | #define NETLOGIC_GPIO_SWRESET_REG 8 | ||
45 | #define NETLOGIC_GPIO_DRAM1_CNTRL_REG 9 | ||
46 | #define NETLOGIC_GPIO_DRAM1_RATIO_REG 10 | ||
47 | #define NETLOGIC_GPIO_DRAM1_RESET_REG 11 | ||
48 | #define NETLOGIC_GPIO_DRAM1_STATUS_REG 12 | ||
49 | #define NETLOGIC_GPIO_DRAM2_CNTRL_REG 13 | ||
50 | #define NETLOGIC_GPIO_DRAM2_RATIO_REG 14 | ||
51 | #define NETLOGIC_GPIO_DRAM2_RESET_REG 15 | ||
52 | #define NETLOGIC_GPIO_DRAM2_STATUS_REG 16 | ||
53 | |||
54 | #define NETLOGIC_GPIO_PWRON_RESET_CFG_REG 21 | ||
55 | #define NETLOGIC_GPIO_BIST_ALL_GO_STATUS_REG 24 | ||
56 | #define NETLOGIC_GPIO_BIST_CPU_GO_STATUS_REG 25 | ||
57 | #define NETLOGIC_GPIO_BIST_DEV_GO_STATUS_REG 26 | ||
58 | |||
59 | #define NETLOGIC_GPIO_FUSE_BANK_REG 35 | ||
60 | #define NETLOGIC_GPIO_CPU_RESET_REG 40 | ||
61 | #define NETLOGIC_GPIO_RNG_REG 43 | ||
62 | |||
63 | #define NETLOGIC_PWRON_RESET_PCMCIA_BOOT 17 | ||
64 | #define NETLOGIC_GPIO_LED_BITMAP 0x1700000 | ||
65 | #define NETLOGIC_GPIO_LED_0_SHIFT 20 | ||
66 | #define NETLOGIC_GPIO_LED_1_SHIFT 24 | ||
67 | |||
68 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_RESET 0x01 | ||
69 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02 | ||
70 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03 | ||
71 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_MAIN 0x04 | ||
72 | |||
73 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/iomap.h b/arch/mips/include/asm/netlogic/xlr/iomap.h new file mode 100644 index 000000000000..2e3a4dd53045 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/iomap.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_IOMAP_H | ||
36 | #define _ASM_NLM_IOMAP_H | ||
37 | |||
38 | #define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000) | ||
39 | #define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000 | ||
40 | #define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000 | ||
41 | #define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000 | ||
42 | #define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000 | ||
43 | #define NETLOGIC_IO_PIC_OFFSET 0x08000 | ||
44 | #define NETLOGIC_IO_UART_0_OFFSET 0x14000 | ||
45 | #define NETLOGIC_IO_UART_1_OFFSET 0x15100 | ||
46 | |||
47 | #define NETLOGIC_IO_SIZE 0x1000 | ||
48 | |||
49 | #define NETLOGIC_IO_BRIDGE_OFFSET 0x00000 | ||
50 | |||
51 | #define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000 | ||
52 | #define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000 | ||
53 | |||
54 | #define NETLOGIC_IO_SRAM_OFFSET 0x07000 | ||
55 | |||
56 | #define NETLOGIC_IO_PCIX_OFFSET 0x09000 | ||
57 | #define NETLOGIC_IO_HT_OFFSET 0x0A000 | ||
58 | |||
59 | #define NETLOGIC_IO_SECURITY_OFFSET 0x0B000 | ||
60 | |||
61 | #define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000 | ||
62 | #define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000 | ||
63 | #define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000 | ||
64 | #define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000 | ||
65 | |||
66 | /* XLS devices */ | ||
67 | #define NETLOGIC_IO_GMAC_4_OFFSET 0x20000 | ||
68 | #define NETLOGIC_IO_GMAC_5_OFFSET 0x21000 | ||
69 | #define NETLOGIC_IO_GMAC_6_OFFSET 0x22000 | ||
70 | #define NETLOGIC_IO_GMAC_7_OFFSET 0x23000 | ||
71 | |||
72 | #define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000 | ||
73 | #define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000 | ||
74 | #define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000 | ||
75 | #define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000 | ||
76 | |||
77 | #define NETLOGIC_IO_USB_0_OFFSET 0x24000 | ||
78 | #define NETLOGIC_IO_USB_1_OFFSET 0x25000 | ||
79 | |||
80 | #define NETLOGIC_IO_COMP_OFFSET 0x1D000 | ||
81 | /* end XLS devices */ | ||
82 | |||
83 | /* XLR devices */ | ||
84 | #define NETLOGIC_IO_SPI4_0_OFFSET 0x10000 | ||
85 | #define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000 | ||
86 | #define NETLOGIC_IO_SPI4_1_OFFSET 0x12000 | ||
87 | #define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000 | ||
88 | /* end XLR devices */ | ||
89 | |||
90 | #define NETLOGIC_IO_I2C_0_OFFSET 0x16000 | ||
91 | #define NETLOGIC_IO_I2C_1_OFFSET 0x17000 | ||
92 | |||
93 | #define NETLOGIC_IO_GPIO_OFFSET 0x18000 | ||
94 | #define NETLOGIC_IO_FLASH_OFFSET 0x19000 | ||
95 | #define NETLOGIC_IO_TB_OFFSET 0x1C000 | ||
96 | |||
97 | #define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000) | ||
98 | |||
99 | /* | ||
100 | * Base Address (Virtual) of the PCI Config address space | ||
101 | * For now, choose 256M phys in kseg1 = 0xA0000000 + (1<<28) | ||
102 | * Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes | ||
103 | * ie 1<<24 = 16M | ||
104 | */ | ||
105 | #define DEFAULT_PCI_CONFIG_BASE 0x18000000 | ||
106 | #define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000 | ||
107 | #define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000 | ||
108 | |||
109 | #ifndef __ASSEMBLY__ | ||
110 | #include <linux/types.h> | ||
111 | #include <asm/byteorder.h> | ||
112 | |||
113 | typedef volatile __u32 nlm_reg_t; | ||
114 | extern unsigned long netlogic_io_base; | ||
115 | |||
116 | /* FIXME read once in write_reg */ | ||
117 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
118 | #define netlogic_read_reg(base, offset) ((base)[(offset)]) | ||
119 | #define netlogic_write_reg(base, offset, value) ((base)[(offset)] = (value)) | ||
120 | #else | ||
121 | #define netlogic_read_reg(base, offset) (be32_to_cpu((base)[(offset)])) | ||
122 | #define netlogic_write_reg(base, offset, value) \ | ||
123 | ((base)[(offset)] = cpu_to_be32((value))) | ||
124 | #endif | ||
125 | |||
126 | #define netlogic_read_reg_le32(base, offset) (le32_to_cpu((base)[(offset)])) | ||
127 | #define netlogic_write_reg_le32(base, offset, value) \ | ||
128 | ((base)[(offset)] = cpu_to_le32((value))) | ||
129 | #define netlogic_io_mmio(offset) ((nlm_reg_t *)(netlogic_io_base+(offset))) | ||
130 | #endif /* __ASSEMBLY__ */ | ||
131 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/pic.h b/arch/mips/include/asm/netlogic/xlr/pic.h new file mode 100644 index 000000000000..5cceb746f080 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/pic.h | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_XLR_PIC_H | ||
36 | #define _ASM_NLM_XLR_PIC_H | ||
37 | |||
38 | #define PIC_CLKS_PER_SEC 66666666ULL | ||
39 | /* PIC hardware interrupt numbers */ | ||
40 | #define PIC_IRT_WD_INDEX 0 | ||
41 | #define PIC_IRT_TIMER_0_INDEX 1 | ||
42 | #define PIC_IRT_TIMER_1_INDEX 2 | ||
43 | #define PIC_IRT_TIMER_2_INDEX 3 | ||
44 | #define PIC_IRT_TIMER_3_INDEX 4 | ||
45 | #define PIC_IRT_TIMER_4_INDEX 5 | ||
46 | #define PIC_IRT_TIMER_5_INDEX 6 | ||
47 | #define PIC_IRT_TIMER_6_INDEX 7 | ||
48 | #define PIC_IRT_TIMER_7_INDEX 8 | ||
49 | #define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX | ||
50 | #define PIC_IRT_UART_0_INDEX 9 | ||
51 | #define PIC_IRT_UART_1_INDEX 10 | ||
52 | #define PIC_IRT_I2C_0_INDEX 11 | ||
53 | #define PIC_IRT_I2C_1_INDEX 12 | ||
54 | #define PIC_IRT_PCMCIA_INDEX 13 | ||
55 | #define PIC_IRT_GPIO_INDEX 14 | ||
56 | #define PIC_IRT_HYPER_INDEX 15 | ||
57 | #define PIC_IRT_PCIX_INDEX 16 | ||
58 | /* XLS */ | ||
59 | #define PIC_IRT_CDE_INDEX 15 | ||
60 | #define PIC_IRT_BRIDGE_TB_XLS_INDEX 16 | ||
61 | /* XLS */ | ||
62 | #define PIC_IRT_GMAC0_INDEX 17 | ||
63 | #define PIC_IRT_GMAC1_INDEX 18 | ||
64 | #define PIC_IRT_GMAC2_INDEX 19 | ||
65 | #define PIC_IRT_GMAC3_INDEX 20 | ||
66 | #define PIC_IRT_XGS0_INDEX 21 | ||
67 | #define PIC_IRT_XGS1_INDEX 22 | ||
68 | #define PIC_IRT_HYPER_FATAL_INDEX 23 | ||
69 | #define PIC_IRT_PCIX_FATAL_INDEX 24 | ||
70 | #define PIC_IRT_BRIDGE_AERR_INDEX 25 | ||
71 | #define PIC_IRT_BRIDGE_BERR_INDEX 26 | ||
72 | #define PIC_IRT_BRIDGE_TB_XLR_INDEX 27 | ||
73 | #define PIC_IRT_BRIDGE_AERR_NMI_INDEX 28 | ||
74 | /* XLS */ | ||
75 | #define PIC_IRT_GMAC4_INDEX 21 | ||
76 | #define PIC_IRT_GMAC5_INDEX 22 | ||
77 | #define PIC_IRT_GMAC6_INDEX 23 | ||
78 | #define PIC_IRT_GMAC7_INDEX 24 | ||
79 | #define PIC_IRT_BRIDGE_ERR_INDEX 25 | ||
80 | #define PIC_IRT_PCIE_LINK0_INDEX 26 | ||
81 | #define PIC_IRT_PCIE_LINK1_INDEX 27 | ||
82 | #define PIC_IRT_PCIE_LINK2_INDEX 23 | ||
83 | #define PIC_IRT_PCIE_LINK3_INDEX 24 | ||
84 | #define PIC_IRT_PCIE_XLSB0_LINK2_INDEX 28 | ||
85 | #define PIC_IRT_PCIE_XLSB0_LINK3_INDEX 29 | ||
86 | #define PIC_IRT_SRIO_LINK0_INDEX 26 | ||
87 | #define PIC_IRT_SRIO_LINK1_INDEX 27 | ||
88 | #define PIC_IRT_SRIO_LINK2_INDEX 28 | ||
89 | #define PIC_IRT_SRIO_LINK3_INDEX 29 | ||
90 | #define PIC_IRT_PCIE_INT_INDEX 28 | ||
91 | #define PIC_IRT_PCIE_FATAL_INDEX 29 | ||
92 | #define PIC_IRT_GPIO_B_INDEX 30 | ||
93 | #define PIC_IRT_USB_INDEX 31 | ||
94 | /* XLS */ | ||
95 | #define PIC_NUM_IRTS 32 | ||
96 | |||
97 | |||
98 | #define PIC_CLOCK_TIMER 7 | ||
99 | |||
100 | /* PIC Registers */ | ||
101 | #define PIC_CTRL 0x00 | ||
102 | #define PIC_IPI 0x04 | ||
103 | #define PIC_INT_ACK 0x06 | ||
104 | |||
105 | #define WD_MAX_VAL_0 0x08 | ||
106 | #define WD_MAX_VAL_1 0x09 | ||
107 | #define WD_MASK_0 0x0a | ||
108 | #define WD_MASK_1 0x0b | ||
109 | #define WD_HEARBEAT_0 0x0c | ||
110 | #define WD_HEARBEAT_1 0x0d | ||
111 | |||
112 | #define PIC_IRT_0_BASE 0x40 | ||
113 | #define PIC_IRT_1_BASE 0x80 | ||
114 | #define PIC_TIMER_MAXVAL_0_BASE 0x100 | ||
115 | #define PIC_TIMER_MAXVAL_1_BASE 0x110 | ||
116 | #define PIC_TIMER_COUNT_0_BASE 0x120 | ||
117 | #define PIC_TIMER_COUNT_1_BASE 0x130 | ||
118 | |||
119 | #define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr)) | ||
120 | #define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr)) | ||
121 | |||
122 | #define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i)) | ||
123 | #define PIC_TIMER_MAXVAL_1(i) (PIC_TIMER_MAXVAL_1_BASE + (i)) | ||
124 | #define PIC_TIMER_COUNT_0(i) (PIC_TIMER_COUNT_0_BASE + (i)) | ||
125 | #define PIC_TIMER_COUNT_1(i) (PIC_TIMER_COUNT_0_BASE + (i)) | ||
126 | |||
127 | /* | ||
128 | * Mapping between hardware interrupt numbers and IRQs on CPU | ||
129 | * we use a simple scheme to map PIC interrupts 0-31 to IRQs | ||
130 | * 8-39. This leaves the IRQ 0-7 for cpu interrupts like | ||
131 | * count/compare and FMN | ||
132 | */ | ||
133 | #define PIC_IRQ_BASE 8 | ||
134 | #define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i)) | ||
135 | #define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE) | ||
136 | |||
137 | #define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE | ||
138 | #define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX) | ||
139 | #define PIC_TIMER_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_0_INDEX) | ||
140 | #define PIC_TIMER_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_1_INDEX) | ||
141 | #define PIC_TIMER_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_2_INDEX) | ||
142 | #define PIC_TIMER_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_3_INDEX) | ||
143 | #define PIC_TIMER_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_4_INDEX) | ||
144 | #define PIC_TIMER_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_5_INDEX) | ||
145 | #define PIC_TIMER_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_6_INDEX) | ||
146 | #define PIC_TIMER_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_7_INDEX) | ||
147 | #define PIC_CLOCK_IRQ (PIC_TIMER_7_IRQ) | ||
148 | #define PIC_UART_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_0_INDEX) | ||
149 | #define PIC_UART_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_1_INDEX) | ||
150 | #define PIC_I2C_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_0_INDEX) | ||
151 | #define PIC_I2C_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_1_INDEX) | ||
152 | #define PIC_PCMCIA_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCMCIA_INDEX) | ||
153 | #define PIC_GPIO_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_INDEX) | ||
154 | #define PIC_HYPER_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_INDEX) | ||
155 | #define PIC_PCIX_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_INDEX) | ||
156 | /* XLS */ | ||
157 | #define PIC_CDE_IRQ PIC_INTR_TO_IRQ(PIC_IRT_CDE_INDEX) | ||
158 | #define PIC_BRIDGE_TB_XLS_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLS_INDEX) | ||
159 | /* end XLS */ | ||
160 | #define PIC_GMAC_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC0_INDEX) | ||
161 | #define PIC_GMAC_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC1_INDEX) | ||
162 | #define PIC_GMAC_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC2_INDEX) | ||
163 | #define PIC_GMAC_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC3_INDEX) | ||
164 | #define PIC_XGS_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS0_INDEX) | ||
165 | #define PIC_XGS_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS1_INDEX) | ||
166 | #define PIC_HYPER_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_FATAL_INDEX) | ||
167 | #define PIC_PCIX_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_FATAL_INDEX) | ||
168 | #define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX) | ||
169 | #define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX) | ||
170 | #define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX) | ||
171 | #define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX) | ||
172 | /* XLS defines */ | ||
173 | #define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX) | ||
174 | #define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX) | ||
175 | #define PIC_GMAC_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC6_INDEX) | ||
176 | #define PIC_GMAC_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC7_INDEX) | ||
177 | #define PIC_BRIDGE_ERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_ERR_INDEX) | ||
178 | #define PIC_PCIE_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK0_INDEX) | ||
179 | #define PIC_PCIE_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK1_INDEX) | ||
180 | #define PIC_PCIE_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK2_INDEX) | ||
181 | #define PIC_PCIE_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK3_INDEX) | ||
182 | #define PIC_PCIE_XLSB0_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK2_INDEX) | ||
183 | #define PIC_PCIE_XLSB0_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK3_INDEX) | ||
184 | #define PIC_SRIO_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK0_INDEX) | ||
185 | #define PIC_SRIO_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK1_INDEX) | ||
186 | #define PIC_SRIO_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK2_INDEX) | ||
187 | #define PIC_SRIO_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK3_INDEX) | ||
188 | #define PIC_PCIE_INT_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_INT__INDEX) | ||
189 | #define PIC_PCIE_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_FATAL_INDEX) | ||
190 | #define PIC_GPIO_B_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_B_INDEX) | ||
191 | #define PIC_USB_IRQ PIC_INTR_TO_IRQ(PIC_IRT_USB_INDEX) | ||
192 | #define PIC_IRT_LAST_IRQ PIC_USB_IRQ | ||
193 | /* end XLS */ | ||
194 | |||
195 | #ifndef __ASSEMBLY__ | ||
196 | static inline void pic_send_ipi(u32 ipi) | ||
197 | { | ||
198 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
199 | |||
200 | netlogic_write_reg(mmio, PIC_IPI, ipi); | ||
201 | } | ||
202 | |||
203 | static inline u32 pic_read_control(void) | ||
204 | { | ||
205 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
206 | |||
207 | return netlogic_read_reg(mmio, PIC_CTRL); | ||
208 | } | ||
209 | |||
210 | static inline void pic_write_control(u32 control) | ||
211 | { | ||
212 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
213 | |||
214 | netlogic_write_reg(mmio, PIC_CTRL, control); | ||
215 | } | ||
216 | |||
217 | static inline void pic_update_control(u32 control) | ||
218 | { | ||
219 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
220 | |||
221 | netlogic_write_reg(mmio, PIC_CTRL, | ||
222 | (control | netlogic_read_reg(mmio, PIC_CTRL))); | ||
223 | } | ||
224 | |||
225 | #define PIC_IRQ_IS_EDGE_TRIGGERED(irq) (((irq) >= PIC_TIMER_0_IRQ) && \ | ||
226 | ((irq) <= PIC_TIMER_7_IRQ)) | ||
227 | #define PIC_IRQ_IS_IRT(irq) (((irq) >= PIC_IRT_FIRST_IRQ) && \ | ||
228 | ((irq) <= PIC_IRT_LAST_IRQ)) | ||
229 | #endif | ||
230 | |||
231 | #endif /* _ASM_NLM_XLR_PIC_H */ | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/xlr.h b/arch/mips/include/asm/netlogic/xlr/xlr.h new file mode 100644 index 000000000000..3e6372692a04 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/xlr.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_XLR_H | ||
36 | #define _ASM_NLM_XLR_H | ||
37 | |||
38 | /* Platform UART functions */ | ||
39 | struct uart_port; | ||
40 | unsigned int nlm_xlr_uart_in(struct uart_port *, int); | ||
41 | void nlm_xlr_uart_out(struct uart_port *, int, int); | ||
42 | |||
43 | /* SMP support functions */ | ||
44 | struct irq_desc; | ||
45 | void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); | ||
46 | void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); | ||
47 | int nlm_wakeup_secondary_cpus(u32 wakeup_mask); | ||
48 | void nlm_smp_irq_init(void); | ||
49 | void nlm_boot_smp_nmi(void); | ||
50 | void prom_pre_boot_secondary_cpus(void); | ||
51 | |||
52 | extern struct plat_smp_ops nlm_smp_ops; | ||
53 | extern unsigned long nlm_common_ebase; | ||
54 | |||
55 | /* XLS B silicon "Rook" */ | ||
56 | static inline unsigned int nlm_chip_is_xls_b(void) | ||
57 | { | ||
58 | uint32_t prid = read_c0_prid(); | ||
59 | |||
60 | return ((prid & 0xf000) == 0x4000); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * XLR chip types | ||
65 | */ | ||
66 | /* The XLS product line has chip versions 0x[48c]? */ | ||
67 | static inline unsigned int nlm_chip_is_xls(void) | ||
68 | { | ||
69 | uint32_t prid = read_c0_prid(); | ||
70 | |||
71 | return ((prid & 0xf000) == 0x8000 || (prid & 0xf000) == 0x4000 || | ||
72 | (prid & 0xf000) == 0xc000); | ||
73 | } | ||
74 | |||
75 | #endif /* _ASM_NLM_XLR_H */ | ||
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 9f1b8dba2c81..de39b1f343ea 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h | |||
@@ -141,7 +141,8 @@ extern int ptrace_set_watch_regs(struct task_struct *child, | |||
141 | #define instruction_pointer(regs) ((regs)->cp0_epc) | 141 | #define instruction_pointer(regs) ((regs)->cp0_epc) |
142 | #define profile_pc(regs) instruction_pointer(regs) | 142 | #define profile_pc(regs) instruction_pointer(regs) |
143 | 143 | ||
144 | extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); | 144 | extern asmlinkage void syscall_trace_enter(struct pt_regs *regs); |
145 | extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); | ||
145 | 146 | ||
146 | extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET; | 147 | extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET; |
147 | 148 | ||
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index d71160de4d10..97f8bf6639e7 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -149,6 +149,9 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
149 | #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) | 149 | #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) |
150 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) | 150 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) |
151 | 151 | ||
152 | /* work to do in syscall_trace_leave() */ | ||
153 | #define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) | ||
154 | |||
152 | /* work to do on interrupt/exception return */ | 155 | /* work to do on interrupt/exception return */ |
153 | #define _TIF_WORK_MASK (0x0000ffef & \ | 156 | #define _TIF_WORK_MASK (0x0000ffef & \ |
154 | ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) | 157 | ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) |
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h index c7f1bfef1574..bc14447e69b5 100644 --- a/arch/mips/include/asm/time.h +++ b/arch/mips/include/asm/time.h | |||
@@ -84,12 +84,6 @@ static inline int init_mips_clocksource(void) | |||
84 | #endif | 84 | #endif |
85 | } | 85 | } |
86 | 86 | ||
87 | static inline void clocksource_set_clock(struct clocksource *cs, | ||
88 | unsigned int clock) | ||
89 | { | ||
90 | clocksource_calc_mult_shift(cs, clock, 4); | ||
91 | } | ||
92 | |||
93 | static inline void clockevent_set_clock(struct clock_event_device *cd, | 87 | static inline void clockevent_set_clock(struct clock_event_device *cd, |
94 | unsigned int clock) | 88 | unsigned int clock) |
95 | { | 89 | { |
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 9ce9f64cb76f..2d8e447cb828 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c | |||
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(vdma_free); | |||
211 | */ | 211 | */ |
212 | int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) | 212 | int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) |
213 | { | 213 | { |
214 | int first, pages, npages; | 214 | int first, pages; |
215 | 215 | ||
216 | if (laddr > 0xffffff) { | 216 | if (laddr > 0xffffff) { |
217 | if (vdma_debug) | 217 | if (vdma_debug) |
@@ -228,8 +228,7 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) | |||
228 | return -EINVAL; /* invalid physical address */ | 228 | return -EINVAL; /* invalid physical address */ |
229 | } | 229 | } |
230 | 230 | ||
231 | npages = pages = | 231 | pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; |
232 | (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; | ||
233 | first = laddr >> 12; | 232 | first = laddr >> 12; |
234 | if (vdma_debug) | 233 | if (vdma_debug) |
235 | printk("vdma_remap: first=%x, pages=%x\n", first, pages); | 234 | printk("vdma_remap: first=%x, pages=%x\n", first, pages); |
diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c index 5ebe75a68350..d7feb898692c 100644 --- a/arch/mips/jz4740/dma.c +++ b/arch/mips/jz4740/dma.c | |||
@@ -242,9 +242,7 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); | |||
242 | 242 | ||
243 | static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) | 243 | static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) |
244 | { | 244 | { |
245 | uint32_t status; | 245 | (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); |
246 | |||
247 | status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); | ||
248 | 246 | ||
249 | jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, | 247 | jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, |
250 | JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); | 248 | JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); |
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c index 6a9e14dab91e..d97cfbf882f5 100644 --- a/arch/mips/jz4740/setup.c +++ b/arch/mips/jz4740/setup.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> | 2 | * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> |
3 | * Copyright (C) 2011, Maarten ter Huurne <maarten@treewalker.org> | ||
3 | * JZ4740 setup code | 4 | * JZ4740 setup code |
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
@@ -14,13 +15,44 @@ | |||
14 | */ | 15 | */ |
15 | 16 | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/io.h> | ||
17 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
18 | 20 | ||
21 | #include <asm/bootinfo.h> | ||
22 | |||
23 | #include <asm/mach-jz4740/base.h> | ||
24 | |||
19 | #include "reset.h" | 25 | #include "reset.h" |
20 | 26 | ||
27 | |||
28 | #define JZ4740_EMC_SDRAM_CTRL 0x80 | ||
29 | |||
30 | |||
31 | static void __init jz4740_detect_mem(void) | ||
32 | { | ||
33 | void __iomem *jz_emc_base; | ||
34 | u32 ctrl, bus, bank, rows, cols; | ||
35 | phys_t size; | ||
36 | |||
37 | jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100); | ||
38 | ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL); | ||
39 | bus = 2 - ((ctrl >> 31) & 1); | ||
40 | bank = 1 + ((ctrl >> 19) & 1); | ||
41 | cols = 8 + ((ctrl >> 26) & 7); | ||
42 | rows = 11 + ((ctrl >> 20) & 3); | ||
43 | printk(KERN_DEBUG | ||
44 | "SDRAM preconfigured: bus:%u bank:%u rows:%u cols:%u\n", | ||
45 | bus, bank, rows, cols); | ||
46 | iounmap(jz_emc_base); | ||
47 | |||
48 | size = 1 << (bus + bank + cols + rows); | ||
49 | add_memory_region(0, size, BOOT_MEM_RAM); | ||
50 | } | ||
51 | |||
21 | void __init plat_mem_setup(void) | 52 | void __init plat_mem_setup(void) |
22 | { | 53 | { |
23 | jz4740_reset_init(); | 54 | jz4740_reset_init(); |
55 | jz4740_detect_mem(); | ||
24 | } | 56 | } |
25 | 57 | ||
26 | const char *get_system_type(void) | 58 | const char *get_system_type(void) |
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c index fe01678d94fd..f83c2dd07a27 100644 --- a/arch/mips/jz4740/time.c +++ b/arch/mips/jz4740/time.c | |||
@@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt, | |||
89 | 89 | ||
90 | static struct clock_event_device jz4740_clockevent = { | 90 | static struct clock_event_device jz4740_clockevent = { |
91 | .name = "jz4740-timer", | 91 | .name = "jz4740-timer", |
92 | .features = CLOCK_EVT_FEAT_PERIODIC, | 92 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
93 | .set_next_event = jz4740_clockevent_set_next, | 93 | .set_next_event = jz4740_clockevent_set_next, |
94 | .set_mode = jz4740_clockevent_set_mode, | 94 | .set_mode = jz4740_clockevent_set_mode, |
95 | .rating = 200, | 95 | .rating = 200, |
@@ -121,8 +121,7 @@ void __init plat_time_init(void) | |||
121 | 121 | ||
122 | clockevents_register_device(&jz4740_clockevent); | 122 | clockevents_register_device(&jz4740_clockevent); |
123 | 123 | ||
124 | clocksource_set_clock(&jz4740_clocksource, clk_rate); | 124 | ret = clocksource_register_hz(&jz4740_clocksource, clk_rate); |
125 | ret = clocksource_register(&jz4740_clocksource); | ||
126 | 125 | ||
127 | if (ret) | 126 | if (ret) |
128 | printk(KERN_ERR "Failed to register clocksource: %d\n", ret); | 127 | printk(KERN_ERR "Failed to register clocksource: %d\n", ret); |
diff --git a/arch/mips/jz4740/timer.c b/arch/mips/jz4740/timer.c index b2c015129055..654d5c3900b6 100644 --- a/arch/mips/jz4740/timer.c +++ b/arch/mips/jz4740/timer.c | |||
@@ -27,11 +27,13 @@ void jz4740_timer_enable_watchdog(void) | |||
27 | { | 27 | { |
28 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); | 28 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); |
29 | } | 29 | } |
30 | EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); | ||
30 | 31 | ||
31 | void jz4740_timer_disable_watchdog(void) | 32 | void jz4740_timer_disable_watchdog(void) |
32 | { | 33 | { |
33 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); | 34 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); |
34 | } | 35 | } |
36 | EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); | ||
35 | 37 | ||
36 | void __init jz4740_timer_init(void) | 38 | void __init jz4740_timer_init(void) |
37 | { | 39 | { |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index cedee2bcbd18..83bba332bbfc 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -52,6 +52,7 @@ obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o | |||
52 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o | 52 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o |
53 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o | 53 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o |
54 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o | 54 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o |
55 | obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o | ||
55 | 56 | ||
56 | obj-$(CONFIG_SMP) += smp.o | 57 | obj-$(CONFIG_SMP) += smp.o |
57 | obj-$(CONFIG_SMP_UP) += smp-up.o | 58 | obj-$(CONFIG_SMP_UP) += smp-up.o |
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 0b7377361e22..f0ab92a1b057 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c | |||
@@ -51,8 +51,7 @@ void __init txx9_clocksource_init(unsigned long baseaddr, | |||
51 | { | 51 | { |
52 | struct txx9_tmr_reg __iomem *tmrptr; | 52 | struct txx9_tmr_reg __iomem *tmrptr; |
53 | 53 | ||
54 | clocksource_set_clock(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); | 54 | clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); |
55 | clocksource_register(&txx9_clocksource.cs); | ||
56 | 55 | ||
57 | tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); | 56 | tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); |
58 | __raw_writel(TCR_BASE, &tmrptr->tcr); | 57 | __raw_writel(TCR_BASE, &tmrptr->tcr); |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index f65d4c8c65a6..bb133d10b145 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -291,6 +291,12 @@ static inline int cpu_has_confreg(void) | |||
291 | #endif | 291 | #endif |
292 | } | 292 | } |
293 | 293 | ||
294 | static inline void set_elf_platform(int cpu, const char *plat) | ||
295 | { | ||
296 | if (cpu == 0) | ||
297 | __elf_platform = plat; | ||
298 | } | ||
299 | |||
294 | /* | 300 | /* |
295 | * Get the FPU Implementation/Revision. | 301 | * Get the FPU Implementation/Revision. |
296 | */ | 302 | */ |
@@ -614,6 +620,16 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
614 | case PRID_IMP_LOONGSON2: | 620 | case PRID_IMP_LOONGSON2: |
615 | c->cputype = CPU_LOONGSON2; | 621 | c->cputype = CPU_LOONGSON2; |
616 | __cpu_name[cpu] = "ICT Loongson-2"; | 622 | __cpu_name[cpu] = "ICT Loongson-2"; |
623 | |||
624 | switch (c->processor_id & PRID_REV_MASK) { | ||
625 | case PRID_REV_LOONGSON2E: | ||
626 | set_elf_platform(cpu, "loongson2e"); | ||
627 | break; | ||
628 | case PRID_REV_LOONGSON2F: | ||
629 | set_elf_platform(cpu, "loongson2f"); | ||
630 | break; | ||
631 | } | ||
632 | |||
617 | c->isa_level = MIPS_CPU_ISA_III; | 633 | c->isa_level = MIPS_CPU_ISA_III; |
618 | c->options = R4K_OPTS | | 634 | c->options = R4K_OPTS | |
619 | MIPS_CPU_FPU | MIPS_CPU_LLSC | | 635 | MIPS_CPU_FPU | MIPS_CPU_LLSC | |
@@ -911,12 +927,14 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) | |||
911 | case PRID_IMP_BMIPS32_REV8: | 927 | case PRID_IMP_BMIPS32_REV8: |
912 | c->cputype = CPU_BMIPS32; | 928 | c->cputype = CPU_BMIPS32; |
913 | __cpu_name[cpu] = "Broadcom BMIPS32"; | 929 | __cpu_name[cpu] = "Broadcom BMIPS32"; |
930 | set_elf_platform(cpu, "bmips32"); | ||
914 | break; | 931 | break; |
915 | case PRID_IMP_BMIPS3300: | 932 | case PRID_IMP_BMIPS3300: |
916 | case PRID_IMP_BMIPS3300_ALT: | 933 | case PRID_IMP_BMIPS3300_ALT: |
917 | case PRID_IMP_BMIPS3300_BUG: | 934 | case PRID_IMP_BMIPS3300_BUG: |
918 | c->cputype = CPU_BMIPS3300; | 935 | c->cputype = CPU_BMIPS3300; |
919 | __cpu_name[cpu] = "Broadcom BMIPS3300"; | 936 | __cpu_name[cpu] = "Broadcom BMIPS3300"; |
937 | set_elf_platform(cpu, "bmips3300"); | ||
920 | break; | 938 | break; |
921 | case PRID_IMP_BMIPS43XX: { | 939 | case PRID_IMP_BMIPS43XX: { |
922 | int rev = c->processor_id & 0xff; | 940 | int rev = c->processor_id & 0xff; |
@@ -925,15 +943,18 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) | |||
925 | rev <= PRID_REV_BMIPS4380_HI) { | 943 | rev <= PRID_REV_BMIPS4380_HI) { |
926 | c->cputype = CPU_BMIPS4380; | 944 | c->cputype = CPU_BMIPS4380; |
927 | __cpu_name[cpu] = "Broadcom BMIPS4380"; | 945 | __cpu_name[cpu] = "Broadcom BMIPS4380"; |
946 | set_elf_platform(cpu, "bmips4380"); | ||
928 | } else { | 947 | } else { |
929 | c->cputype = CPU_BMIPS4350; | 948 | c->cputype = CPU_BMIPS4350; |
930 | __cpu_name[cpu] = "Broadcom BMIPS4350"; | 949 | __cpu_name[cpu] = "Broadcom BMIPS4350"; |
950 | set_elf_platform(cpu, "bmips4350"); | ||
931 | } | 951 | } |
932 | break; | 952 | break; |
933 | } | 953 | } |
934 | case PRID_IMP_BMIPS5000: | 954 | case PRID_IMP_BMIPS5000: |
935 | c->cputype = CPU_BMIPS5000; | 955 | c->cputype = CPU_BMIPS5000; |
936 | __cpu_name[cpu] = "Broadcom BMIPS5000"; | 956 | __cpu_name[cpu] = "Broadcom BMIPS5000"; |
957 | set_elf_platform(cpu, "bmips5000"); | ||
937 | c->options |= MIPS_CPU_ULRI; | 958 | c->options |= MIPS_CPU_ULRI; |
938 | break; | 959 | break; |
939 | } | 960 | } |
@@ -956,14 +977,12 @@ static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) | |||
956 | c->cputype = CPU_CAVIUM_OCTEON_PLUS; | 977 | c->cputype = CPU_CAVIUM_OCTEON_PLUS; |
957 | __cpu_name[cpu] = "Cavium Octeon+"; | 978 | __cpu_name[cpu] = "Cavium Octeon+"; |
958 | platform: | 979 | platform: |
959 | if (cpu == 0) | 980 | set_elf_platform(cpu, "octeon"); |
960 | __elf_platform = "octeon"; | ||
961 | break; | 981 | break; |
962 | case PRID_IMP_CAVIUM_CN63XX: | 982 | case PRID_IMP_CAVIUM_CN63XX: |
963 | c->cputype = CPU_CAVIUM_OCTEON2; | 983 | c->cputype = CPU_CAVIUM_OCTEON2; |
964 | __cpu_name[cpu] = "Cavium Octeon II"; | 984 | __cpu_name[cpu] = "Cavium Octeon II"; |
965 | if (cpu == 0) | 985 | set_elf_platform(cpu, "octeon2"); |
966 | __elf_platform = "octeon2"; | ||
967 | break; | 986 | break; |
968 | default: | 987 | default: |
969 | printk(KERN_INFO "Unknown Octeon chip!\n"); | 988 | printk(KERN_INFO "Unknown Octeon chip!\n"); |
@@ -988,6 +1007,59 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) | |||
988 | } | 1007 | } |
989 | } | 1008 | } |
990 | 1009 | ||
1010 | static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | ||
1011 | { | ||
1012 | decode_configs(c); | ||
1013 | |||
1014 | c->options = (MIPS_CPU_TLB | | ||
1015 | MIPS_CPU_4KEX | | ||
1016 | MIPS_CPU_COUNTER | | ||
1017 | MIPS_CPU_DIVEC | | ||
1018 | MIPS_CPU_WATCH | | ||
1019 | MIPS_CPU_EJTAG | | ||
1020 | MIPS_CPU_LLSC); | ||
1021 | |||
1022 | switch (c->processor_id & 0xff00) { | ||
1023 | case PRID_IMP_NETLOGIC_XLR732: | ||
1024 | case PRID_IMP_NETLOGIC_XLR716: | ||
1025 | case PRID_IMP_NETLOGIC_XLR532: | ||
1026 | case PRID_IMP_NETLOGIC_XLR308: | ||
1027 | case PRID_IMP_NETLOGIC_XLR532C: | ||
1028 | case PRID_IMP_NETLOGIC_XLR516C: | ||
1029 | case PRID_IMP_NETLOGIC_XLR508C: | ||
1030 | case PRID_IMP_NETLOGIC_XLR308C: | ||
1031 | c->cputype = CPU_XLR; | ||
1032 | __cpu_name[cpu] = "Netlogic XLR"; | ||
1033 | break; | ||
1034 | |||
1035 | case PRID_IMP_NETLOGIC_XLS608: | ||
1036 | case PRID_IMP_NETLOGIC_XLS408: | ||
1037 | case PRID_IMP_NETLOGIC_XLS404: | ||
1038 | case PRID_IMP_NETLOGIC_XLS208: | ||
1039 | case PRID_IMP_NETLOGIC_XLS204: | ||
1040 | case PRID_IMP_NETLOGIC_XLS108: | ||
1041 | case PRID_IMP_NETLOGIC_XLS104: | ||
1042 | case PRID_IMP_NETLOGIC_XLS616B: | ||
1043 | case PRID_IMP_NETLOGIC_XLS608B: | ||
1044 | case PRID_IMP_NETLOGIC_XLS416B: | ||
1045 | case PRID_IMP_NETLOGIC_XLS412B: | ||
1046 | case PRID_IMP_NETLOGIC_XLS408B: | ||
1047 | case PRID_IMP_NETLOGIC_XLS404B: | ||
1048 | c->cputype = CPU_XLR; | ||
1049 | __cpu_name[cpu] = "Netlogic XLS"; | ||
1050 | break; | ||
1051 | |||
1052 | default: | ||
1053 | printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n", | ||
1054 | c->processor_id); | ||
1055 | c->cputype = CPU_XLR; | ||
1056 | break; | ||
1057 | } | ||
1058 | |||
1059 | c->isa_level = MIPS_CPU_ISA_M64R1; | ||
1060 | c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; | ||
1061 | } | ||
1062 | |||
991 | #ifdef CONFIG_64BIT | 1063 | #ifdef CONFIG_64BIT |
992 | /* For use by uaccess.h */ | 1064 | /* For use by uaccess.h */ |
993 | u64 __ua_limit; | 1065 | u64 __ua_limit; |
@@ -1035,6 +1107,9 @@ __cpuinit void cpu_probe(void) | |||
1035 | case PRID_COMP_INGENIC: | 1107 | case PRID_COMP_INGENIC: |
1036 | cpu_probe_ingenic(c, cpu); | 1108 | cpu_probe_ingenic(c, cpu); |
1037 | break; | 1109 | break; |
1110 | case PRID_COMP_NETLOGIC: | ||
1111 | cpu_probe_netlogic(c, cpu); | ||
1112 | break; | ||
1038 | } | 1113 | } |
1039 | 1114 | ||
1040 | BUG_ON(!__cpu_name[cpu]); | 1115 | BUG_ON(!__cpu_name[cpu]); |
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c index 51489f8a825e..f96f99c794a3 100644 --- a/arch/mips/kernel/csrc-bcm1480.c +++ b/arch/mips/kernel/csrc-bcm1480.c | |||
@@ -49,6 +49,5 @@ void __init sb1480_clocksource_init(void) | |||
49 | 49 | ||
50 | plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); | 50 | plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); |
51 | zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); | 51 | zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); |
52 | clocksource_set_clock(cs, zbbus); | 52 | clocksource_register_hz(cs, zbbus); |
53 | clocksource_register(cs); | ||
54 | } | 53 | } |
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 23da108506b0..46bd7fa98d6c 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c | |||
@@ -59,7 +59,5 @@ void __init dec_ioasic_clocksource_init(void) | |||
59 | printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); | 59 | printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); |
60 | 60 | ||
61 | clocksource_dec.rating = 200 + freq / 10000000; | 61 | clocksource_dec.rating = 200 + freq / 10000000; |
62 | clocksource_set_clock(&clocksource_dec, freq); | 62 | clocksource_register_hz(&clocksource_dec, freq); |
63 | |||
64 | clocksource_register(&clocksource_dec); | ||
65 | } | 63 | } |
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c index a27c16c8690e..2e7c5232da8d 100644 --- a/arch/mips/kernel/csrc-powertv.c +++ b/arch/mips/kernel/csrc-powertv.c | |||
@@ -78,9 +78,7 @@ static void __init powertv_c0_hpt_clocksource_init(void) | |||
78 | 78 | ||
79 | clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; | 79 | clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; |
80 | 80 | ||
81 | clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); | 81 | clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); |
82 | |||
83 | clocksource_register(&clocksource_mips); | ||
84 | } | 82 | } |
85 | 83 | ||
86 | /** | 84 | /** |
@@ -130,43 +128,16 @@ static struct clocksource clocksource_tim_c = { | |||
130 | /** | 128 | /** |
131 | * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock | 129 | * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock |
132 | * | 130 | * |
133 | * The hard part here is coming up with a constant k and shift s such that | ||
134 | * the 48-bit TIM_C value multiplied by k doesn't overflow and that value, | ||
135 | * when shifted right by s, yields the corresponding number of nanoseconds. | ||
136 | * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to | 131 | * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to |
137 | * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the | 132 | * 1 / (27,000,000/8) seconds. |
138 | * number of nanoseconds. Since the TIM_C value has 48 bits and the math is | ||
139 | * done in 64 bits, avoiding an overflow means that k must be less than | ||
140 | * 64 - 48 = 16 bits. | ||
141 | */ | 133 | */ |
142 | static void __init powertv_tim_c_clocksource_init(void) | 134 | static void __init powertv_tim_c_clocksource_init(void) |
143 | { | 135 | { |
144 | int prescale; | ||
145 | unsigned long dividend; | ||
146 | unsigned long k; | ||
147 | int s; | ||
148 | const int max_k_bits = (64 - 48) - 1; | ||
149 | const unsigned long billion = 1000000000; | ||
150 | const unsigned long counts_per_second = 27000000 / 8; | 136 | const unsigned long counts_per_second = 27000000 / 8; |
151 | 137 | ||
152 | prescale = BITS_PER_LONG - ilog2(billion) - 1; | ||
153 | dividend = billion << prescale; | ||
154 | k = dividend / counts_per_second; | ||
155 | s = ilog2(k) - max_k_bits; | ||
156 | |||
157 | if (s < 0) | ||
158 | s = prescale; | ||
159 | |||
160 | else { | ||
161 | k >>= s; | ||
162 | s += prescale; | ||
163 | } | ||
164 | |||
165 | clocksource_tim_c.mult = k; | ||
166 | clocksource_tim_c.shift = s; | ||
167 | clocksource_tim_c.rating = 200; | 138 | clocksource_tim_c.rating = 200; |
168 | 139 | ||
169 | clocksource_register(&clocksource_tim_c); | 140 | clocksource_register_hz(&clocksource_tim_c, counts_per_second); |
170 | tim_c = (struct tim_c *) asic_reg_addr(tim_ch); | 141 | tim_c = (struct tim_c *) asic_reg_addr(tim_ch); |
171 | } | 142 | } |
172 | 143 | ||
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index e95a3cd48eea..decd1fa38d55 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c | |||
@@ -30,9 +30,7 @@ int __init init_r4k_clocksource(void) | |||
30 | /* Calculate a somewhat reasonable rating value */ | 30 | /* Calculate a somewhat reasonable rating value */ |
31 | clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; | 31 | clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; |
32 | 32 | ||
33 | clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); | 33 | clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); |
34 | |||
35 | clocksource_register(&clocksource_mips); | ||
36 | 34 | ||
37 | return 0; | 35 | return 0; |
38 | } | 36 | } |
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c index d14d3d1907fa..e9606d907685 100644 --- a/arch/mips/kernel/csrc-sb1250.c +++ b/arch/mips/kernel/csrc-sb1250.c | |||
@@ -65,6 +65,5 @@ void __init sb1250_clocksource_init(void) | |||
65 | IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, | 65 | IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, |
66 | R_SCD_TIMER_CFG))); | 66 | R_SCD_TIMER_CFG))); |
67 | 67 | ||
68 | clocksource_set_clock(cs, V_SCD_TIMER_FREQ); | 68 | clocksource_register_hz(cs, V_SCD_TIMER_FREQ); |
69 | clocksource_register(cs); | ||
70 | } | 69 | } |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index ffa331029e08..37acfa036d44 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -167,14 +167,13 @@ work_notifysig: # deal with pending signals and | |||
167 | FEXPORT(syscall_exit_work_partial) | 167 | FEXPORT(syscall_exit_work_partial) |
168 | SAVE_STATIC | 168 | SAVE_STATIC |
169 | syscall_exit_work: | 169 | syscall_exit_work: |
170 | li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | 170 | li t0, _TIF_WORK_SYSCALL_EXIT |
171 | and t0, a2 # a2 is preloaded with TI_FLAGS | 171 | and t0, a2 # a2 is preloaded with TI_FLAGS |
172 | beqz t0, work_pending # trace bit set? | 172 | beqz t0, work_pending # trace bit set? |
173 | local_irq_enable # could let do_syscall_trace() | 173 | local_irq_enable # could let syscall_trace_leave() |
174 | # call schedule() instead | 174 | # call schedule() instead |
175 | move a0, sp | 175 | move a0, sp |
176 | li a1, 1 | 176 | jal syscall_trace_leave |
177 | jal do_syscall_trace | ||
178 | b resume_userspace | 177 | b resume_userspace |
179 | 178 | ||
180 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) | 179 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 94ca2b018af7..feb8021a305f 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
26 | #define JUMP_RANGE_MASK ((1UL << 28) - 1) | ||
26 | 27 | ||
27 | #define INSN_NOP 0x00000000 /* nop */ | 28 | #define INSN_NOP 0x00000000 /* nop */ |
28 | #define INSN_JAL(addr) \ | 29 | #define INSN_JAL(addr) \ |
@@ -44,12 +45,12 @@ static inline void ftrace_dyn_arch_init_insns(void) | |||
44 | 45 | ||
45 | /* jal (ftrace_caller + 8), jump over the first two instruction */ | 46 | /* jal (ftrace_caller + 8), jump over the first two instruction */ |
46 | buf = (u32 *)&insn_jal_ftrace_caller; | 47 | buf = (u32 *)&insn_jal_ftrace_caller; |
47 | uasm_i_jal(&buf, (FTRACE_ADDR + 8)); | 48 | uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); |
48 | 49 | ||
49 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 50 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
50 | /* j ftrace_graph_caller */ | 51 | /* j ftrace_graph_caller */ |
51 | buf = (u32 *)&insn_j_ftrace_graph_caller; | 52 | buf = (u32 *)&insn_j_ftrace_graph_caller; |
52 | uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); | 53 | uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); |
53 | #endif | 54 | #endif |
54 | } | 55 | } |
55 | 56 | ||
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 2392a7a296d4..391221b6a6aa 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c | |||
@@ -125,87 +125,11 @@ void __init setup_pit_timer(void) | |||
125 | setup_irq(0, &irq0); | 125 | setup_irq(0, &irq0); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* | ||
129 | * Since the PIT overflows every tick, its not very useful | ||
130 | * to just read by itself. So use jiffies to emulate a free | ||
131 | * running counter: | ||
132 | */ | ||
133 | static cycle_t pit_read(struct clocksource *cs) | ||
134 | { | ||
135 | unsigned long flags; | ||
136 | int count; | ||
137 | u32 jifs; | ||
138 | static int old_count; | ||
139 | static u32 old_jifs; | ||
140 | |||
141 | raw_spin_lock_irqsave(&i8253_lock, flags); | ||
142 | /* | ||
143 | * Although our caller may have the read side of xtime_lock, | ||
144 | * this is now a seqlock, and we are cheating in this routine | ||
145 | * by having side effects on state that we cannot undo if | ||
146 | * there is a collision on the seqlock and our caller has to | ||
147 | * retry. (Namely, old_jifs and old_count.) So we must treat | ||
148 | * jiffies as volatile despite the lock. We read jiffies | ||
149 | * before latching the timer count to guarantee that although | ||
150 | * the jiffies value might be older than the count (that is, | ||
151 | * the counter may underflow between the last point where | ||
152 | * jiffies was incremented and the point where we latch the | ||
153 | * count), it cannot be newer. | ||
154 | */ | ||
155 | jifs = jiffies; | ||
156 | outb_p(0x00, PIT_MODE); /* latch the count ASAP */ | ||
157 | count = inb_p(PIT_CH0); /* read the latched count */ | ||
158 | count |= inb_p(PIT_CH0) << 8; | ||
159 | |||
160 | /* VIA686a test code... reset the latch if count > max + 1 */ | ||
161 | if (count > LATCH) { | ||
162 | outb_p(0x34, PIT_MODE); | ||
163 | outb_p(LATCH & 0xff, PIT_CH0); | ||
164 | outb(LATCH >> 8, PIT_CH0); | ||
165 | count = LATCH - 1; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * It's possible for count to appear to go the wrong way for a | ||
170 | * couple of reasons: | ||
171 | * | ||
172 | * 1. The timer counter underflows, but we haven't handled the | ||
173 | * resulting interrupt and incremented jiffies yet. | ||
174 | * 2. Hardware problem with the timer, not giving us continuous time, | ||
175 | * the counter does small "jumps" upwards on some Pentium systems, | ||
176 | * (see c't 95/10 page 335 for Neptun bug.) | ||
177 | * | ||
178 | * Previous attempts to handle these cases intelligently were | ||
179 | * buggy, so we just do the simple thing now. | ||
180 | */ | ||
181 | if (count > old_count && jifs == old_jifs) { | ||
182 | count = old_count; | ||
183 | } | ||
184 | old_count = count; | ||
185 | old_jifs = jifs; | ||
186 | |||
187 | raw_spin_unlock_irqrestore(&i8253_lock, flags); | ||
188 | |||
189 | count = (LATCH - 1) - count; | ||
190 | |||
191 | return (cycle_t)(jifs * LATCH) + count; | ||
192 | } | ||
193 | |||
194 | static struct clocksource clocksource_pit = { | ||
195 | .name = "pit", | ||
196 | .rating = 110, | ||
197 | .read = pit_read, | ||
198 | .mask = CLOCKSOURCE_MASK(32), | ||
199 | .mult = 0, | ||
200 | .shift = 20, | ||
201 | }; | ||
202 | |||
203 | static int __init init_pit_clocksource(void) | 128 | static int __init init_pit_clocksource(void) |
204 | { | 129 | { |
205 | if (num_possible_cpus() > 1) /* PIT does not scale! */ | 130 | if (num_possible_cpus() > 1) /* PIT does not scale! */ |
206 | return 0; | 131 | return 0; |
207 | 132 | ||
208 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); | 133 | return clocksource_i8253_init(); |
209 | return clocksource_register(&clocksource_pit); | ||
210 | } | 134 | } |
211 | arch_initcall(init_pit_clocksource); | 135 | arch_initcall(init_pit_clocksource); |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index d21c388c0116..4e6ea1ffad46 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -533,15 +533,10 @@ static inline int audit_arch(void) | |||
533 | * Notification of system call entry/exit | 533 | * Notification of system call entry/exit |
534 | * - triggered by current->work.syscall_trace | 534 | * - triggered by current->work.syscall_trace |
535 | */ | 535 | */ |
536 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | 536 | asmlinkage void syscall_trace_enter(struct pt_regs *regs) |
537 | { | 537 | { |
538 | /* do the secure computing check first */ | 538 | /* do the secure computing check first */ |
539 | if (!entryexit) | 539 | secure_computing(regs->regs[2]); |
540 | secure_computing(regs->regs[2]); | ||
541 | |||
542 | if (unlikely(current->audit_context) && entryexit) | ||
543 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), | ||
544 | regs->regs[2]); | ||
545 | 540 | ||
546 | if (!(current->ptrace & PT_PTRACED)) | 541 | if (!(current->ptrace & PT_PTRACED)) |
547 | goto out; | 542 | goto out; |
@@ -565,8 +560,40 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
565 | } | 560 | } |
566 | 561 | ||
567 | out: | 562 | out: |
568 | if (unlikely(current->audit_context) && !entryexit) | 563 | if (unlikely(current->audit_context)) |
569 | audit_syscall_entry(audit_arch(), regs->regs[2], | 564 | audit_syscall_entry(audit_arch(), regs->regs[2], |
570 | regs->regs[4], regs->regs[5], | 565 | regs->regs[4], regs->regs[5], |
571 | regs->regs[6], regs->regs[7]); | 566 | regs->regs[6], regs->regs[7]); |
572 | } | 567 | } |
568 | |||
569 | /* | ||
570 | * Notification of system call entry/exit | ||
571 | * - triggered by current->work.syscall_trace | ||
572 | */ | ||
573 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | ||
574 | { | ||
575 | if (unlikely(current->audit_context)) | ||
576 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), | ||
577 | -regs->regs[2]); | ||
578 | |||
579 | if (!(current->ptrace & PT_PTRACED)) | ||
580 | return; | ||
581 | |||
582 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
583 | return; | ||
584 | |||
585 | /* The 0x80 provides a way for the tracing parent to distinguish | ||
586 | between a syscall stop and SIGTRAP delivery */ | ||
587 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? | ||
588 | 0x80 : 0)); | ||
589 | |||
590 | /* | ||
591 | * this isn't the same as continuing with a signal, but it will do | ||
592 | * for normal use. strace only continues with a signal if the | ||
593 | * stopping signal is not SIGTRAP. -brl | ||
594 | */ | ||
595 | if (current->exit_code) { | ||
596 | send_sig(current->exit_code, current, 1); | ||
597 | current->exit_code = 0; | ||
598 | } | ||
599 | } | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 7f5468b38d4c..7a8e1dd7f6f2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -88,8 +88,7 @@ syscall_trace_entry: | |||
88 | SAVE_STATIC | 88 | SAVE_STATIC |
89 | move s0, t2 | 89 | move s0, t2 |
90 | move a0, sp | 90 | move a0, sp |
91 | li a1, 0 | 91 | jal syscall_trace_enter |
92 | jal do_syscall_trace | ||
93 | 92 | ||
94 | move t0, s0 | 93 | move t0, s0 |
95 | RESTORE_STATIC | 94 | RESTORE_STATIC |
@@ -565,7 +564,7 @@ einval: li v0, -ENOSYS | |||
565 | sys sys_ioprio_get 2 /* 4315 */ | 564 | sys sys_ioprio_get 2 /* 4315 */ |
566 | sys sys_utimensat 4 | 565 | sys sys_utimensat 4 |
567 | sys sys_signalfd 3 | 566 | sys sys_signalfd 3 |
568 | sys sys_ni_syscall 0 | 567 | sys sys_ni_syscall 0 /* was timerfd */ |
569 | sys sys_eventfd 1 | 568 | sys sys_eventfd 1 |
570 | sys sys_fallocate 6 /* 4320 */ | 569 | sys sys_fallocate 6 /* 4320 */ |
571 | sys sys_timerfd_create 2 | 570 | sys sys_timerfd_create 2 |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a2e1fcbc41dc..2d31c83224f9 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -91,8 +91,7 @@ syscall_trace_entry: | |||
91 | SAVE_STATIC | 91 | SAVE_STATIC |
92 | move s0, t2 | 92 | move s0, t2 |
93 | move a0, sp | 93 | move a0, sp |
94 | li a1, 0 | 94 | jal syscall_trace_enter |
95 | jal do_syscall_trace | ||
96 | 95 | ||
97 | move t0, s0 | 96 | move t0, s0 |
98 | RESTORE_STATIC | 97 | RESTORE_STATIC |
@@ -404,7 +403,7 @@ sys_call_table: | |||
404 | PTR sys_ioprio_get | 403 | PTR sys_ioprio_get |
405 | PTR sys_utimensat /* 5275 */ | 404 | PTR sys_utimensat /* 5275 */ |
406 | PTR sys_signalfd | 405 | PTR sys_signalfd |
407 | PTR sys_ni_syscall | 406 | PTR sys_ni_syscall /* was timerfd */ |
408 | PTR sys_eventfd | 407 | PTR sys_eventfd |
409 | PTR sys_fallocate | 408 | PTR sys_fallocate |
410 | PTR sys_timerfd_create /* 5280 */ | 409 | PTR sys_timerfd_create /* 5280 */ |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index b2c7624995b8..38a0503b9a4a 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -89,8 +89,7 @@ n32_syscall_trace_entry: | |||
89 | SAVE_STATIC | 89 | SAVE_STATIC |
90 | move s0, t2 | 90 | move s0, t2 |
91 | move a0, sp | 91 | move a0, sp |
92 | li a1, 0 | 92 | jal syscall_trace_enter |
93 | jal do_syscall_trace | ||
94 | 93 | ||
95 | move t0, s0 | 94 | move t0, s0 |
96 | RESTORE_STATIC | 95 | RESTORE_STATIC |
@@ -403,7 +402,7 @@ EXPORT(sysn32_call_table) | |||
403 | PTR sys_ioprio_get | 402 | PTR sys_ioprio_get |
404 | PTR compat_sys_utimensat | 403 | PTR compat_sys_utimensat |
405 | PTR compat_sys_signalfd /* 6280 */ | 404 | PTR compat_sys_signalfd /* 6280 */ |
406 | PTR sys_ni_syscall | 405 | PTR sys_ni_syscall /* was timerfd */ |
407 | PTR sys_eventfd | 406 | PTR sys_eventfd |
408 | PTR sys_fallocate | 407 | PTR sys_fallocate |
409 | PTR sys_timerfd_create | 408 | PTR sys_timerfd_create |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 049a9c8c49a0..91ea5e4041dd 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -123,8 +123,7 @@ trace_a_syscall: | |||
123 | 123 | ||
124 | move s0, t2 # Save syscall pointer | 124 | move s0, t2 # Save syscall pointer |
125 | move a0, sp | 125 | move a0, sp |
126 | li a1, 0 | 126 | jal syscall_trace_enter |
127 | jal do_syscall_trace | ||
128 | 127 | ||
129 | move t0, s0 | 128 | move t0, s0 |
130 | RESTORE_STATIC | 129 | RESTORE_STATIC |
@@ -522,7 +521,7 @@ sys_call_table: | |||
522 | PTR sys_ioprio_get /* 4315 */ | 521 | PTR sys_ioprio_get /* 4315 */ |
523 | PTR compat_sys_utimensat | 522 | PTR compat_sys_utimensat |
524 | PTR compat_sys_signalfd | 523 | PTR compat_sys_signalfd |
525 | PTR sys_ni_syscall | 524 | PTR sys_ni_syscall /* was timerfd */ |
526 | PTR sys_eventfd | 525 | PTR sys_eventfd |
527 | PTR sys32_fallocate /* 4320 */ | 526 | PTR sys32_fallocate /* 4320 */ |
528 | PTR sys_timerfd_create | 527 | PTR sys_timerfd_create |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 5a88cc4ccd5a..cedac4633741 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) | |||
929 | 929 | ||
930 | static void ipi_resched_interrupt(void) | 930 | static void ipi_resched_interrupt(void) |
931 | { | 931 | { |
932 | /* Return from interrupt should be enough to cause scheduler check */ | 932 | scheduler_ipi(); |
933 | } | 933 | } |
934 | 934 | ||
935 | static void ipi_call_interrupt(void) | 935 | static void ipi_call_interrupt(void) |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 58beabf50b3c..d02765708ddb 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -10,12 +10,9 @@ | |||
10 | #include <linux/capability.h> | 10 | #include <linux/capability.h> |
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <linux/mm.h> | ||
14 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
15 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
16 | #include <linux/mman.h> | ||
17 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
18 | #include <linux/sched.h> | ||
19 | #include <linux/string.h> | 16 | #include <linux/string.h> |
20 | #include <linux/syscalls.h> | 17 | #include <linux/syscalls.h> |
21 | #include <linux/file.h> | 18 | #include <linux/file.h> |
@@ -25,11 +22,9 @@ | |||
25 | #include <linux/msg.h> | 22 | #include <linux/msg.h> |
26 | #include <linux/shm.h> | 23 | #include <linux/shm.h> |
27 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
28 | #include <linux/module.h> | ||
29 | #include <linux/ipc.h> | 25 | #include <linux/ipc.h> |
30 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
31 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
32 | #include <linux/random.h> | ||
33 | #include <linux/elf.h> | 28 | #include <linux/elf.h> |
34 | 29 | ||
35 | #include <asm/asm.h> | 30 | #include <asm/asm.h> |
@@ -66,121 +61,6 @@ out: | |||
66 | return res; | 61 | return res; |
67 | } | 62 | } |
68 | 63 | ||
69 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | ||
70 | |||
71 | EXPORT_SYMBOL(shm_align_mask); | ||
72 | |||
73 | #define COLOUR_ALIGN(addr,pgoff) \ | ||
74 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | ||
75 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | ||
76 | |||
77 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
78 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
79 | { | ||
80 | struct vm_area_struct * vmm; | ||
81 | int do_color_align; | ||
82 | unsigned long task_size; | ||
83 | |||
84 | #ifdef CONFIG_32BIT | ||
85 | task_size = TASK_SIZE; | ||
86 | #else /* Must be CONFIG_64BIT*/ | ||
87 | task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; | ||
88 | #endif | ||
89 | |||
90 | if (len > task_size) | ||
91 | return -ENOMEM; | ||
92 | |||
93 | if (flags & MAP_FIXED) { | ||
94 | /* Even MAP_FIXED mappings must reside within task_size. */ | ||
95 | if (task_size - len < addr) | ||
96 | return -EINVAL; | ||
97 | |||
98 | /* | ||
99 | * We do not accept a shared mapping if it would violate | ||
100 | * cache aliasing constraints. | ||
101 | */ | ||
102 | if ((flags & MAP_SHARED) && | ||
103 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
104 | return -EINVAL; | ||
105 | return addr; | ||
106 | } | ||
107 | |||
108 | do_color_align = 0; | ||
109 | if (filp || (flags & MAP_SHARED)) | ||
110 | do_color_align = 1; | ||
111 | if (addr) { | ||
112 | if (do_color_align) | ||
113 | addr = COLOUR_ALIGN(addr, pgoff); | ||
114 | else | ||
115 | addr = PAGE_ALIGN(addr); | ||
116 | vmm = find_vma(current->mm, addr); | ||
117 | if (task_size - len >= addr && | ||
118 | (!vmm || addr + len <= vmm->vm_start)) | ||
119 | return addr; | ||
120 | } | ||
121 | addr = current->mm->mmap_base; | ||
122 | if (do_color_align) | ||
123 | addr = COLOUR_ALIGN(addr, pgoff); | ||
124 | else | ||
125 | addr = PAGE_ALIGN(addr); | ||
126 | |||
127 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | ||
128 | /* At this point: (!vmm || addr < vmm->vm_end). */ | ||
129 | if (task_size - len < addr) | ||
130 | return -ENOMEM; | ||
131 | if (!vmm || addr + len <= vmm->vm_start) | ||
132 | return addr; | ||
133 | addr = vmm->vm_end; | ||
134 | if (do_color_align) | ||
135 | addr = COLOUR_ALIGN(addr, pgoff); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
140 | { | ||
141 | unsigned long random_factor = 0UL; | ||
142 | |||
143 | if (current->flags & PF_RANDOMIZE) { | ||
144 | random_factor = get_random_int(); | ||
145 | random_factor = random_factor << PAGE_SHIFT; | ||
146 | if (TASK_IS_32BIT_ADDR) | ||
147 | random_factor &= 0xfffffful; | ||
148 | else | ||
149 | random_factor &= 0xffffffful; | ||
150 | } | ||
151 | |||
152 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
153 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
154 | mm->unmap_area = arch_unmap_area; | ||
155 | } | ||
156 | |||
157 | static inline unsigned long brk_rnd(void) | ||
158 | { | ||
159 | unsigned long rnd = get_random_int(); | ||
160 | |||
161 | rnd = rnd << PAGE_SHIFT; | ||
162 | /* 8MB for 32bit, 256MB for 64bit */ | ||
163 | if (TASK_IS_32BIT_ADDR) | ||
164 | rnd = rnd & 0x7ffffful; | ||
165 | else | ||
166 | rnd = rnd & 0xffffffful; | ||
167 | |||
168 | return rnd; | ||
169 | } | ||
170 | |||
171 | unsigned long arch_randomize_brk(struct mm_struct *mm) | ||
172 | { | ||
173 | unsigned long base = mm->brk; | ||
174 | unsigned long ret; | ||
175 | |||
176 | ret = PAGE_ALIGN(base + brk_rnd()); | ||
177 | |||
178 | if (ret < mm->brk) | ||
179 | return mm->brk; | ||
180 | |||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, | 64 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, |
185 | unsigned long, prot, unsigned long, flags, unsigned long, | 65 | unsigned long, prot, unsigned long, flags, unsigned long, |
186 | fd, off_t, offset) | 66 | fd, off_t, offset) |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 71350f7f2d88..e9b3af27d844 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
374 | unsigned long dvpret = dvpe(); | 374 | unsigned long dvpret = dvpe(); |
375 | #endif /* CONFIG_MIPS_MT_SMTC */ | 375 | #endif /* CONFIG_MIPS_MT_SMTC */ |
376 | 376 | ||
377 | notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); | 377 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) |
378 | sig = 0; | ||
378 | 379 | ||
379 | console_verbose(); | 380 | console_verbose(); |
380 | spin_lock_irq(&die_lock); | 381 | spin_lock_irq(&die_lock); |
@@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
383 | mips_mt_regdump(dvpret); | 384 | mips_mt_regdump(dvpret); |
384 | #endif /* CONFIG_MIPS_MT_SMTC */ | 385 | #endif /* CONFIG_MIPS_MT_SMTC */ |
385 | 386 | ||
386 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) | ||
387 | sig = 0; | ||
388 | |||
389 | printk("%s[#%d]:\n", str, ++die_counter); | 387 | printk("%s[#%d]:\n", str, ++die_counter); |
390 | show_registers(regs); | 388 | show_registers(regs); |
391 | add_taint(TAINT_DIE); | 389 | add_taint(TAINT_DIE); |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 832afbb87588..cd2ca544454b 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -68,12 +68,14 @@ SECTIONS | |||
68 | RODATA | 68 | RODATA |
69 | 69 | ||
70 | /* writeable */ | 70 | /* writeable */ |
71 | _sdata = .; /* Start of data section */ | ||
71 | .data : { /* Data */ | 72 | .data : { /* Data */ |
72 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ | 73 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ |
73 | 74 | ||
74 | INIT_TASK_DATA(PAGE_SIZE) | 75 | INIT_TASK_DATA(PAGE_SIZE) |
75 | NOSAVE_DATA | 76 | NOSAVE_DATA |
76 | CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | 77 | CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) |
78 | READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | ||
77 | DATA_DATA | 79 | DATA_DATA |
78 | CONSTRUCTORS | 80 | CONSTRUCTORS |
79 | } | 81 | } |
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig new file mode 100644 index 000000000000..3fccf2104513 --- /dev/null +++ b/arch/mips/lantiq/Kconfig | |||
@@ -0,0 +1,23 @@ | |||
1 | if LANTIQ | ||
2 | |||
3 | config SOC_TYPE_XWAY | ||
4 | bool | ||
5 | default n | ||
6 | |||
7 | choice | ||
8 | prompt "SoC Type" | ||
9 | default SOC_XWAY | ||
10 | |||
11 | config SOC_AMAZON_SE | ||
12 | bool "Amazon SE" | ||
13 | select SOC_TYPE_XWAY | ||
14 | |||
15 | config SOC_XWAY | ||
16 | bool "XWAY" | ||
17 | select SOC_TYPE_XWAY | ||
18 | select HW_HAS_PCI | ||
19 | endchoice | ||
20 | |||
21 | source "arch/mips/lantiq/xway/Kconfig" | ||
22 | |||
23 | endif | ||
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile new file mode 100644 index 000000000000..e5dae0e24b00 --- /dev/null +++ b/arch/mips/lantiq/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
2 | # | ||
3 | # This program is free software; you can redistribute it and/or modify it | ||
4 | # under the terms of the GNU General Public License version 2 as published | ||
5 | # by the Free Software Foundation. | ||
6 | |||
7 | obj-y := irq.o setup.o clk.o prom.o devices.o | ||
8 | |||
9 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
10 | |||
11 | obj-$(CONFIG_SOC_TYPE_XWAY) += xway/ | ||
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform new file mode 100644 index 000000000000..f3dff05722de --- /dev/null +++ b/arch/mips/lantiq/Platform | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Lantiq | ||
3 | # | ||
4 | |||
5 | platform-$(CONFIG_LANTIQ) += lantiq/ | ||
6 | cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq | ||
7 | load-$(CONFIG_LANTIQ) = 0xffffffff80002000 | ||
8 | cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway | ||
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c new file mode 100644 index 000000000000..94560899d13e --- /dev/null +++ b/arch/mips/lantiq/clk.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> | ||
7 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
8 | */ | ||
9 | #include <linux/io.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/list.h> | ||
17 | |||
18 | #include <asm/time.h> | ||
19 | #include <asm/irq.h> | ||
20 | #include <asm/div64.h> | ||
21 | |||
22 | #include <lantiq_soc.h> | ||
23 | |||
24 | #include "clk.h" | ||
25 | |||
26 | struct clk { | ||
27 | const char *name; | ||
28 | unsigned long rate; | ||
29 | unsigned long (*get_rate) (void); | ||
30 | }; | ||
31 | |||
32 | static struct clk *cpu_clk; | ||
33 | static int cpu_clk_cnt; | ||
34 | |||
35 | /* lantiq socs have 3 static clocks */ | ||
36 | static struct clk cpu_clk_generic[] = { | ||
37 | { | ||
38 | .name = "cpu", | ||
39 | .get_rate = ltq_get_cpu_hz, | ||
40 | }, { | ||
41 | .name = "fpi", | ||
42 | .get_rate = ltq_get_fpi_hz, | ||
43 | }, { | ||
44 | .name = "io", | ||
45 | .get_rate = ltq_get_io_region_clock, | ||
46 | }, | ||
47 | }; | ||
48 | |||
49 | static struct resource ltq_cgu_resource = { | ||
50 | .name = "cgu", | ||
51 | .start = LTQ_CGU_BASE_ADDR, | ||
52 | .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1, | ||
53 | .flags = IORESOURCE_MEM, | ||
54 | }; | ||
55 | |||
56 | /* remapped clock register range */ | ||
57 | void __iomem *ltq_cgu_membase; | ||
58 | |||
59 | void clk_init(void) | ||
60 | { | ||
61 | cpu_clk = cpu_clk_generic; | ||
62 | cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic); | ||
63 | } | ||
64 | |||
65 | static inline int clk_good(struct clk *clk) | ||
66 | { | ||
67 | return clk && !IS_ERR(clk); | ||
68 | } | ||
69 | |||
70 | unsigned long clk_get_rate(struct clk *clk) | ||
71 | { | ||
72 | if (unlikely(!clk_good(clk))) | ||
73 | return 0; | ||
74 | |||
75 | if (clk->rate != 0) | ||
76 | return clk->rate; | ||
77 | |||
78 | if (clk->get_rate != NULL) | ||
79 | return clk->get_rate(); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | EXPORT_SYMBOL(clk_get_rate); | ||
84 | |||
85 | struct clk *clk_get(struct device *dev, const char *id) | ||
86 | { | ||
87 | int i; | ||
88 | |||
89 | for (i = 0; i < cpu_clk_cnt; i++) | ||
90 | if (!strcmp(id, cpu_clk[i].name)) | ||
91 | return &cpu_clk[i]; | ||
92 | BUG(); | ||
93 | return ERR_PTR(-ENOENT); | ||
94 | } | ||
95 | EXPORT_SYMBOL(clk_get); | ||
96 | |||
97 | void clk_put(struct clk *clk) | ||
98 | { | ||
99 | /* not used */ | ||
100 | } | ||
101 | EXPORT_SYMBOL(clk_put); | ||
102 | |||
103 | static inline u32 ltq_get_counter_resolution(void) | ||
104 | { | ||
105 | u32 res; | ||
106 | |||
107 | __asm__ __volatile__( | ||
108 | ".set push\n" | ||
109 | ".set mips32r2\n" | ||
110 | "rdhwr %0, $3\n" | ||
111 | ".set pop\n" | ||
112 | : "=&r" (res) | ||
113 | : /* no input */ | ||
114 | : "memory"); | ||
115 | |||
116 | return res; | ||
117 | } | ||
118 | |||
119 | void __init plat_time_init(void) | ||
120 | { | ||
121 | struct clk *clk; | ||
122 | |||
123 | if (insert_resource(&iomem_resource, <q_cgu_resource) < 0) | ||
124 | panic("Failed to insert cgu memory\n"); | ||
125 | |||
126 | if (request_mem_region(ltq_cgu_resource.start, | ||
127 | resource_size(<q_cgu_resource), "cgu") < 0) | ||
128 | panic("Failed to request cgu memory\n"); | ||
129 | |||
130 | ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start, | ||
131 | resource_size(<q_cgu_resource)); | ||
132 | if (!ltq_cgu_membase) { | ||
133 | pr_err("Failed to remap cgu memory\n"); | ||
134 | unreachable(); | ||
135 | } | ||
136 | clk = clk_get(0, "cpu"); | ||
137 | mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution(); | ||
138 | write_c0_compare(read_c0_count()); | ||
139 | clk_put(clk); | ||
140 | } | ||
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h new file mode 100644 index 000000000000..3328925f2c3f --- /dev/null +++ b/arch/mips/lantiq/clk.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_CLK_H__ | ||
10 | #define _LTQ_CLK_H__ | ||
11 | |||
12 | extern void clk_init(void); | ||
13 | |||
14 | extern unsigned long ltq_get_cpu_hz(void); | ||
15 | extern unsigned long ltq_get_fpi_hz(void); | ||
16 | extern unsigned long ltq_get_io_region_clock(void); | ||
17 | |||
18 | #endif | ||
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c new file mode 100644 index 000000000000..7b82c34cb169 --- /dev/null +++ b/arch/mips/lantiq/devices.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/reboot.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/leds.h> | ||
17 | #include <linux/etherdevice.h> | ||
18 | #include <linux/reboot.h> | ||
19 | #include <linux/time.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/leds.h> | ||
23 | |||
24 | #include <asm/bootinfo.h> | ||
25 | #include <asm/irq.h> | ||
26 | |||
27 | #include <lantiq_soc.h> | ||
28 | |||
29 | #include "devices.h" | ||
30 | |||
31 | /* nor flash */ | ||
32 | static struct resource ltq_nor_resource = { | ||
33 | .name = "nor", | ||
34 | .start = LTQ_FLASH_START, | ||
35 | .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1, | ||
36 | .flags = IORESOURCE_MEM, | ||
37 | }; | ||
38 | |||
39 | static struct platform_device ltq_nor = { | ||
40 | .name = "ltq_nor", | ||
41 | .resource = <q_nor_resource, | ||
42 | .num_resources = 1, | ||
43 | }; | ||
44 | |||
45 | void __init ltq_register_nor(struct physmap_flash_data *data) | ||
46 | { | ||
47 | ltq_nor.dev.platform_data = data; | ||
48 | platform_device_register(<q_nor); | ||
49 | } | ||
50 | |||
51 | /* watchdog */ | ||
52 | static struct resource ltq_wdt_resource = { | ||
53 | .name = "watchdog", | ||
54 | .start = LTQ_WDT_BASE_ADDR, | ||
55 | .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1, | ||
56 | .flags = IORESOURCE_MEM, | ||
57 | }; | ||
58 | |||
59 | void __init ltq_register_wdt(void) | ||
60 | { | ||
61 | platform_device_register_simple("ltq_wdt", 0, <q_wdt_resource, 1); | ||
62 | } | ||
63 | |||
64 | /* asc ports */ | ||
65 | static struct resource ltq_asc0_resources[] = { | ||
66 | { | ||
67 | .name = "asc0", | ||
68 | .start = LTQ_ASC0_BASE_ADDR, | ||
69 | .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1, | ||
70 | .flags = IORESOURCE_MEM, | ||
71 | }, | ||
72 | IRQ_RES(tx, LTQ_ASC_TIR(0)), | ||
73 | IRQ_RES(rx, LTQ_ASC_RIR(0)), | ||
74 | IRQ_RES(err, LTQ_ASC_EIR(0)), | ||
75 | }; | ||
76 | |||
77 | static struct resource ltq_asc1_resources[] = { | ||
78 | { | ||
79 | .name = "asc1", | ||
80 | .start = LTQ_ASC1_BASE_ADDR, | ||
81 | .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, | ||
82 | .flags = IORESOURCE_MEM, | ||
83 | }, | ||
84 | IRQ_RES(tx, LTQ_ASC_TIR(1)), | ||
85 | IRQ_RES(rx, LTQ_ASC_RIR(1)), | ||
86 | IRQ_RES(err, LTQ_ASC_EIR(1)), | ||
87 | }; | ||
88 | |||
89 | void __init ltq_register_asc(int port) | ||
90 | { | ||
91 | switch (port) { | ||
92 | case 0: | ||
93 | platform_device_register_simple("ltq_asc", 0, | ||
94 | ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources)); | ||
95 | break; | ||
96 | case 1: | ||
97 | platform_device_register_simple("ltq_asc", 1, | ||
98 | ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources)); | ||
99 | break; | ||
100 | default: | ||
101 | break; | ||
102 | } | ||
103 | } | ||
104 | |||
105 | #ifdef CONFIG_PCI | ||
106 | /* pci */ | ||
107 | static struct platform_device ltq_pci = { | ||
108 | .name = "ltq_pci", | ||
109 | .num_resources = 0, | ||
110 | }; | ||
111 | |||
112 | void __init ltq_register_pci(struct ltq_pci_data *data) | ||
113 | { | ||
114 | ltq_pci.dev.platform_data = data; | ||
115 | platform_device_register(<q_pci); | ||
116 | } | ||
117 | #else | ||
118 | void __init ltq_register_pci(struct ltq_pci_data *data) | ||
119 | { | ||
120 | pr_err("kernel is compiled without PCI support\n"); | ||
121 | } | ||
122 | #endif | ||
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h new file mode 100644 index 000000000000..2947bb19a528 --- /dev/null +++ b/arch/mips/lantiq/devices.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_DEVICES_H__ | ||
10 | #define _LTQ_DEVICES_H__ | ||
11 | |||
12 | #include <lantiq_platform.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | |||
15 | #define IRQ_RES(resname, irq) \ | ||
16 | {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ} | ||
17 | |||
18 | extern void ltq_register_nor(struct physmap_flash_data *data); | ||
19 | extern void ltq_register_wdt(void); | ||
20 | extern void ltq_register_asc(int port); | ||
21 | extern void ltq_register_pci(struct ltq_pci_data *data); | ||
22 | |||
23 | #endif | ||
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c new file mode 100644 index 000000000000..972e05f87631 --- /dev/null +++ b/arch/mips/lantiq/early_printk.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/cpu.h> | ||
11 | |||
12 | #include <lantiq.h> | ||
13 | #include <lantiq_soc.h> | ||
14 | |||
15 | /* no ioremap possible at this early stage, lets use KSEG1 instead */ | ||
16 | #define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR) | ||
17 | #define ASC_BUF 1024 | ||
18 | #define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048)) | ||
19 | #define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020)) | ||
20 | #define TXMASK 0x3F00 | ||
21 | #define TXOFFSET 8 | ||
22 | |||
23 | void prom_putchar(char c) | ||
24 | { | ||
25 | unsigned long flags; | ||
26 | |||
27 | local_irq_save(flags); | ||
28 | do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET); | ||
29 | if (c == '\n') | ||
30 | ltq_w32('\r', LTQ_ASC_TBUF); | ||
31 | ltq_w32(c, LTQ_ASC_TBUF); | ||
32 | local_irq_restore(flags); | ||
33 | } | ||
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c new file mode 100644 index 000000000000..fc89795cafdb --- /dev/null +++ b/arch/mips/lantiq/irq.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/ioport.h> | ||
12 | |||
13 | #include <asm/bootinfo.h> | ||
14 | #include <asm/irq_cpu.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | #include <irq.h> | ||
18 | |||
19 | /* register definitions */ | ||
20 | #define LTQ_ICU_IM0_ISR 0x0000 | ||
21 | #define LTQ_ICU_IM0_IER 0x0008 | ||
22 | #define LTQ_ICU_IM0_IOSR 0x0010 | ||
23 | #define LTQ_ICU_IM0_IRSR 0x0018 | ||
24 | #define LTQ_ICU_IM0_IMR 0x0020 | ||
25 | #define LTQ_ICU_IM1_ISR 0x0028 | ||
26 | #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) | ||
27 | |||
28 | #define LTQ_EIU_EXIN_C 0x0000 | ||
29 | #define LTQ_EIU_EXIN_INIC 0x0004 | ||
30 | #define LTQ_EIU_EXIN_INEN 0x000C | ||
31 | |||
32 | /* irq numbers used by the external interrupt unit (EIU) */ | ||
33 | #define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30) | ||
34 | #define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31) | ||
35 | #define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26) | ||
36 | #define LTQ_EIU_IR3 INT_NUM_IM1_IRL0 | ||
37 | #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) | ||
38 | #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) | ||
39 | #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) | ||
40 | |||
41 | #define MAX_EIU 6 | ||
42 | |||
43 | /* irqs generated by device attached to the EBU need to be acked in | ||
44 | * a special manner | ||
45 | */ | ||
46 | #define LTQ_ICU_EBU_IRQ 22 | ||
47 | |||
48 | #define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) | ||
49 | #define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) | ||
50 | |||
51 | #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) | ||
52 | #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) | ||
53 | |||
54 | static unsigned short ltq_eiu_irq[MAX_EIU] = { | ||
55 | LTQ_EIU_IR0, | ||
56 | LTQ_EIU_IR1, | ||
57 | LTQ_EIU_IR2, | ||
58 | LTQ_EIU_IR3, | ||
59 | LTQ_EIU_IR4, | ||
60 | LTQ_EIU_IR5, | ||
61 | }; | ||
62 | |||
63 | static struct resource ltq_icu_resource = { | ||
64 | .name = "icu", | ||
65 | .start = LTQ_ICU_BASE_ADDR, | ||
66 | .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, | ||
67 | .flags = IORESOURCE_MEM, | ||
68 | }; | ||
69 | |||
70 | static struct resource ltq_eiu_resource = { | ||
71 | .name = "eiu", | ||
72 | .start = LTQ_EIU_BASE_ADDR, | ||
73 | .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, | ||
74 | .flags = IORESOURCE_MEM, | ||
75 | }; | ||
76 | |||
77 | static void __iomem *ltq_icu_membase; | ||
78 | static void __iomem *ltq_eiu_membase; | ||
79 | |||
80 | void ltq_disable_irq(struct irq_data *d) | ||
81 | { | ||
82 | u32 ier = LTQ_ICU_IM0_IER; | ||
83 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
84 | |||
85 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
86 | irq_nr %= INT_NUM_IM_OFFSET; | ||
87 | ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); | ||
88 | } | ||
89 | |||
90 | void ltq_mask_and_ack_irq(struct irq_data *d) | ||
91 | { | ||
92 | u32 ier = LTQ_ICU_IM0_IER; | ||
93 | u32 isr = LTQ_ICU_IM0_ISR; | ||
94 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
95 | |||
96 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
97 | isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
98 | irq_nr %= INT_NUM_IM_OFFSET; | ||
99 | ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); | ||
100 | ltq_icu_w32((1 << irq_nr), isr); | ||
101 | } | ||
102 | |||
103 | static void ltq_ack_irq(struct irq_data *d) | ||
104 | { | ||
105 | u32 isr = LTQ_ICU_IM0_ISR; | ||
106 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
107 | |||
108 | isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
109 | irq_nr %= INT_NUM_IM_OFFSET; | ||
110 | ltq_icu_w32((1 << irq_nr), isr); | ||
111 | } | ||
112 | |||
113 | void ltq_enable_irq(struct irq_data *d) | ||
114 | { | ||
115 | u32 ier = LTQ_ICU_IM0_IER; | ||
116 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
117 | |||
118 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
119 | irq_nr %= INT_NUM_IM_OFFSET; | ||
120 | ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); | ||
121 | } | ||
122 | |||
123 | static unsigned int ltq_startup_eiu_irq(struct irq_data *d) | ||
124 | { | ||
125 | int i; | ||
126 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
127 | |||
128 | ltq_enable_irq(d); | ||
129 | for (i = 0; i < MAX_EIU; i++) { | ||
130 | if (irq_nr == ltq_eiu_irq[i]) { | ||
131 | /* low level - we should really handle set_type */ | ||
132 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | | ||
133 | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); | ||
134 | /* clear all pending */ | ||
135 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), | ||
136 | LTQ_EIU_EXIN_INIC); | ||
137 | /* enable */ | ||
138 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), | ||
139 | LTQ_EIU_EXIN_INEN); | ||
140 | break; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static void ltq_shutdown_eiu_irq(struct irq_data *d) | ||
148 | { | ||
149 | int i; | ||
150 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
151 | |||
152 | ltq_disable_irq(d); | ||
153 | for (i = 0; i < MAX_EIU; i++) { | ||
154 | if (irq_nr == ltq_eiu_irq[i]) { | ||
155 | /* disable */ | ||
156 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), | ||
157 | LTQ_EIU_EXIN_INEN); | ||
158 | break; | ||
159 | } | ||
160 | } | ||
161 | } | ||
162 | |||
163 | static struct irq_chip ltq_irq_type = { | ||
164 | "icu", | ||
165 | .irq_enable = ltq_enable_irq, | ||
166 | .irq_disable = ltq_disable_irq, | ||
167 | .irq_unmask = ltq_enable_irq, | ||
168 | .irq_ack = ltq_ack_irq, | ||
169 | .irq_mask = ltq_disable_irq, | ||
170 | .irq_mask_ack = ltq_mask_and_ack_irq, | ||
171 | }; | ||
172 | |||
173 | static struct irq_chip ltq_eiu_type = { | ||
174 | "eiu", | ||
175 | .irq_startup = ltq_startup_eiu_irq, | ||
176 | .irq_shutdown = ltq_shutdown_eiu_irq, | ||
177 | .irq_enable = ltq_enable_irq, | ||
178 | .irq_disable = ltq_disable_irq, | ||
179 | .irq_unmask = ltq_enable_irq, | ||
180 | .irq_ack = ltq_ack_irq, | ||
181 | .irq_mask = ltq_disable_irq, | ||
182 | .irq_mask_ack = ltq_mask_and_ack_irq, | ||
183 | }; | ||
184 | |||
185 | static void ltq_hw_irqdispatch(int module) | ||
186 | { | ||
187 | u32 irq; | ||
188 | |||
189 | irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); | ||
190 | if (irq == 0) | ||
191 | return; | ||
192 | |||
193 | /* silicon bug causes only the msb set to 1 to be valid. all | ||
194 | * other bits might be bogus | ||
195 | */ | ||
196 | irq = __fls(irq); | ||
197 | do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); | ||
198 | |||
199 | /* if this is a EBU irq, we need to ack it or get a deadlock */ | ||
200 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) | ||
201 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, | ||
202 | LTQ_EBU_PCC_ISTAT); | ||
203 | } | ||
204 | |||
205 | #define DEFINE_HWx_IRQDISPATCH(x) \ | ||
206 | static void ltq_hw ## x ## _irqdispatch(void) \ | ||
207 | { \ | ||
208 | ltq_hw_irqdispatch(x); \ | ||
209 | } | ||
210 | DEFINE_HWx_IRQDISPATCH(0) | ||
211 | DEFINE_HWx_IRQDISPATCH(1) | ||
212 | DEFINE_HWx_IRQDISPATCH(2) | ||
213 | DEFINE_HWx_IRQDISPATCH(3) | ||
214 | DEFINE_HWx_IRQDISPATCH(4) | ||
215 | |||
216 | static void ltq_hw5_irqdispatch(void) | ||
217 | { | ||
218 | do_IRQ(MIPS_CPU_TIMER_IRQ); | ||
219 | } | ||
220 | |||
221 | asmlinkage void plat_irq_dispatch(void) | ||
222 | { | ||
223 | unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; | ||
224 | unsigned int i; | ||
225 | |||
226 | if (pending & CAUSEF_IP7) { | ||
227 | do_IRQ(MIPS_CPU_TIMER_IRQ); | ||
228 | goto out; | ||
229 | } else { | ||
230 | for (i = 0; i < 5; i++) { | ||
231 | if (pending & (CAUSEF_IP2 << i)) { | ||
232 | ltq_hw_irqdispatch(i); | ||
233 | goto out; | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); | ||
238 | |||
239 | out: | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | static struct irqaction cascade = { | ||
244 | .handler = no_action, | ||
245 | .flags = IRQF_DISABLED, | ||
246 | .name = "cascade", | ||
247 | }; | ||
248 | |||
249 | void __init arch_init_irq(void) | ||
250 | { | ||
251 | int i; | ||
252 | |||
253 | if (insert_resource(&iomem_resource, <q_icu_resource) < 0) | ||
254 | panic("Failed to insert icu memory\n"); | ||
255 | |||
256 | if (request_mem_region(ltq_icu_resource.start, | ||
257 | resource_size(<q_icu_resource), "icu") < 0) | ||
258 | panic("Failed to request icu memory\n"); | ||
259 | |||
260 | ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, | ||
261 | resource_size(<q_icu_resource)); | ||
262 | if (!ltq_icu_membase) | ||
263 | panic("Failed to remap icu memory\n"); | ||
264 | |||
265 | if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) | ||
266 | panic("Failed to insert eiu memory\n"); | ||
267 | |||
268 | if (request_mem_region(ltq_eiu_resource.start, | ||
269 | resource_size(<q_eiu_resource), "eiu") < 0) | ||
270 | panic("Failed to request eiu memory\n"); | ||
271 | |||
272 | ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, | ||
273 | resource_size(<q_eiu_resource)); | ||
274 | if (!ltq_eiu_membase) | ||
275 | panic("Failed to remap eiu memory\n"); | ||
276 | |||
277 | /* make sure all irqs are turned off by default */ | ||
278 | for (i = 0; i < 5; i++) | ||
279 | ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); | ||
280 | |||
281 | /* clear all possibly pending interrupts */ | ||
282 | ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); | ||
283 | |||
284 | mips_cpu_irq_init(); | ||
285 | |||
286 | for (i = 2; i <= 6; i++) | ||
287 | setup_irq(i, &cascade); | ||
288 | |||
289 | if (cpu_has_vint) { | ||
290 | pr_info("Setting up vectored interrupts\n"); | ||
291 | set_vi_handler(2, ltq_hw0_irqdispatch); | ||
292 | set_vi_handler(3, ltq_hw1_irqdispatch); | ||
293 | set_vi_handler(4, ltq_hw2_irqdispatch); | ||
294 | set_vi_handler(5, ltq_hw3_irqdispatch); | ||
295 | set_vi_handler(6, ltq_hw4_irqdispatch); | ||
296 | set_vi_handler(7, ltq_hw5_irqdispatch); | ||
297 | } | ||
298 | |||
299 | for (i = INT_NUM_IRQ0; | ||
300 | i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) | ||
301 | if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || | ||
302 | (i == LTQ_EIU_IR2)) | ||
303 | irq_set_chip_and_handler(i, <q_eiu_type, | ||
304 | handle_level_irq); | ||
305 | /* EIU3-5 only exist on ar9 and vr9 */ | ||
306 | else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || | ||
307 | (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) | ||
308 | irq_set_chip_and_handler(i, <q_eiu_type, | ||
309 | handle_level_irq); | ||
310 | else | ||
311 | irq_set_chip_and_handler(i, <q_irq_type, | ||
312 | handle_level_irq); | ||
313 | |||
314 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | ||
315 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | | ||
316 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
317 | #else | ||
318 | set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | | ||
319 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
320 | #endif | ||
321 | } | ||
322 | |||
323 | unsigned int __cpuinit get_c0_compare_int(void) | ||
324 | { | ||
325 | return CP0_LEGACY_COMPARE_IRQ; | ||
326 | } | ||
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h new file mode 100644 index 000000000000..7e01b8c484eb --- /dev/null +++ b/arch/mips/lantiq/machtypes.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LANTIQ_MACH_H__ | ||
10 | #define _LANTIQ_MACH_H__ | ||
11 | |||
12 | #include <asm/mips_machine.h> | ||
13 | |||
14 | enum lantiq_mach_type { | ||
15 | LTQ_MACH_GENERIC = 0, | ||
16 | LTQ_MACH_EASY50712, /* Danube evaluation board */ | ||
17 | LTQ_MACH_EASY50601, /* Amazon SE evaluation board */ | ||
18 | }; | ||
19 | |||
20 | #endif | ||
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c new file mode 100644 index 000000000000..56ba007bf1e5 --- /dev/null +++ b/arch/mips/lantiq/prom.c | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/clk.h> | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/time.h> | ||
13 | |||
14 | #include <lantiq.h> | ||
15 | |||
16 | #include "prom.h" | ||
17 | #include "clk.h" | ||
18 | |||
19 | static struct ltq_soc_info soc_info; | ||
20 | |||
21 | unsigned int ltq_get_cpu_ver(void) | ||
22 | { | ||
23 | return soc_info.rev; | ||
24 | } | ||
25 | EXPORT_SYMBOL(ltq_get_cpu_ver); | ||
26 | |||
27 | unsigned int ltq_get_soc_type(void) | ||
28 | { | ||
29 | return soc_info.type; | ||
30 | } | ||
31 | EXPORT_SYMBOL(ltq_get_soc_type); | ||
32 | |||
33 | const char *get_system_type(void) | ||
34 | { | ||
35 | return soc_info.sys_type; | ||
36 | } | ||
37 | |||
38 | void prom_free_prom_memory(void) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | static void __init prom_init_cmdline(void) | ||
43 | { | ||
44 | int argc = fw_arg0; | ||
45 | char **argv = (char **) KSEG1ADDR(fw_arg1); | ||
46 | int i; | ||
47 | |||
48 | for (i = 0; i < argc; i++) { | ||
49 | char *p = (char *) KSEG1ADDR(argv[i]); | ||
50 | |||
51 | if (p && *p) { | ||
52 | strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); | ||
53 | strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); | ||
54 | } | ||
55 | } | ||
56 | } | ||
57 | |||
58 | void __init prom_init(void) | ||
59 | { | ||
60 | struct clk *clk; | ||
61 | |||
62 | ltq_soc_detect(&soc_info); | ||
63 | clk_init(); | ||
64 | clk = clk_get(0, "cpu"); | ||
65 | snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d", | ||
66 | soc_info.name, soc_info.rev); | ||
67 | clk_put(clk); | ||
68 | soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0'; | ||
69 | pr_info("SoC: %s\n", soc_info.sys_type); | ||
70 | prom_init_cmdline(); | ||
71 | } | ||
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h new file mode 100644 index 000000000000..b4229d94280f --- /dev/null +++ b/arch/mips/lantiq/prom.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_PROM_H__ | ||
10 | #define _LTQ_PROM_H__ | ||
11 | |||
12 | #define LTQ_SYS_TYPE_LEN 0x100 | ||
13 | |||
14 | struct ltq_soc_info { | ||
15 | unsigned char *name; | ||
16 | unsigned int rev; | ||
17 | unsigned int partnum; | ||
18 | unsigned int type; | ||
19 | unsigned char sys_type[LTQ_SYS_TYPE_LEN]; | ||
20 | }; | ||
21 | |||
22 | extern void ltq_soc_detect(struct ltq_soc_info *i); | ||
23 | extern void ltq_soc_setup(void); | ||
24 | |||
25 | #endif | ||
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c new file mode 100644 index 000000000000..9b8af77ed0f9 --- /dev/null +++ b/arch/mips/lantiq/setup.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <asm/bootinfo.h> | ||
14 | |||
15 | #include <lantiq_soc.h> | ||
16 | |||
17 | #include "machtypes.h" | ||
18 | #include "devices.h" | ||
19 | #include "prom.h" | ||
20 | |||
21 | void __init plat_mem_setup(void) | ||
22 | { | ||
23 | /* assume 16M as default incase uboot fails to pass proper ramsize */ | ||
24 | unsigned long memsize = 16; | ||
25 | char **envp = (char **) KSEG1ADDR(fw_arg2); | ||
26 | |||
27 | ioport_resource.start = IOPORT_RESOURCE_START; | ||
28 | ioport_resource.end = IOPORT_RESOURCE_END; | ||
29 | iomem_resource.start = IOMEM_RESOURCE_START; | ||
30 | iomem_resource.end = IOMEM_RESOURCE_END; | ||
31 | |||
32 | set_io_port_base((unsigned long) KSEG1); | ||
33 | |||
34 | while (*envp) { | ||
35 | char *e = (char *)KSEG1ADDR(*envp); | ||
36 | if (!strncmp(e, "memsize=", 8)) { | ||
37 | e += 8; | ||
38 | if (strict_strtoul(e, 0, &memsize)) | ||
39 | pr_warn("bad memsize specified\n"); | ||
40 | } | ||
41 | envp++; | ||
42 | } | ||
43 | memsize *= 1024 * 1024; | ||
44 | add_memory_region(0x00000000, memsize, BOOT_MEM_RAM); | ||
45 | } | ||
46 | |||
47 | static int __init | ||
48 | lantiq_setup(void) | ||
49 | { | ||
50 | ltq_soc_setup(); | ||
51 | mips_machine_setup(); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | arch_initcall(lantiq_setup); | ||
56 | |||
57 | static void __init | ||
58 | lantiq_generic_init(void) | ||
59 | { | ||
60 | /* Nothing to do */ | ||
61 | } | ||
62 | |||
63 | MIPS_MACHINE(LTQ_MACH_GENERIC, | ||
64 | "Generic", | ||
65 | "Generic Lantiq based board", | ||
66 | lantiq_generic_init); | ||
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig new file mode 100644 index 000000000000..2b857de36620 --- /dev/null +++ b/arch/mips/lantiq/xway/Kconfig | |||
@@ -0,0 +1,23 @@ | |||
1 | if SOC_XWAY | ||
2 | |||
3 | menu "MIPS Machine" | ||
4 | |||
5 | config LANTIQ_MACH_EASY50712 | ||
6 | bool "Easy50712 - Danube" | ||
7 | default y | ||
8 | |||
9 | endmenu | ||
10 | |||
11 | endif | ||
12 | |||
13 | if SOC_AMAZON_SE | ||
14 | |||
15 | menu "MIPS Machine" | ||
16 | |||
17 | config LANTIQ_MACH_EASY50601 | ||
18 | bool "Easy50601 - Amazon SE" | ||
19 | default y | ||
20 | |||
21 | endmenu | ||
22 | |||
23 | endif | ||
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile new file mode 100644 index 000000000000..c517f2e77563 --- /dev/null +++ b/arch/mips/lantiq/xway/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o | ||
2 | |||
3 | obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o | ||
4 | obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o | ||
5 | |||
6 | obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o | ||
7 | obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o | ||
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c new file mode 100644 index 000000000000..22d823acd536 --- /dev/null +++ b/arch/mips/lantiq/xway/clk-ase.c | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/io.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/clk.h> | ||
13 | |||
14 | #include <asm/time.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/div64.h> | ||
17 | |||
18 | #include <lantiq_soc.h> | ||
19 | |||
20 | /* cgu registers */ | ||
21 | #define LTQ_CGU_SYS 0x0010 | ||
22 | |||
23 | unsigned int ltq_get_io_region_clock(void) | ||
24 | { | ||
25 | return CLOCK_133M; | ||
26 | } | ||
27 | EXPORT_SYMBOL(ltq_get_io_region_clock); | ||
28 | |||
29 | unsigned int ltq_get_fpi_bus_clock(int fpi) | ||
30 | { | ||
31 | return CLOCK_133M; | ||
32 | } | ||
33 | EXPORT_SYMBOL(ltq_get_fpi_bus_clock); | ||
34 | |||
35 | unsigned int ltq_get_cpu_hz(void) | ||
36 | { | ||
37 | if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5)) | ||
38 | return CLOCK_266M; | ||
39 | else | ||
40 | return CLOCK_133M; | ||
41 | } | ||
42 | EXPORT_SYMBOL(ltq_get_cpu_hz); | ||
43 | |||
44 | unsigned int ltq_get_fpi_hz(void) | ||
45 | { | ||
46 | return CLOCK_133M; | ||
47 | } | ||
48 | EXPORT_SYMBOL(ltq_get_fpi_hz); | ||
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c new file mode 100644 index 000000000000..ddd39593c581 --- /dev/null +++ b/arch/mips/lantiq/xway/clk-xway.c | |||
@@ -0,0 +1,223 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/io.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/clk.h> | ||
13 | |||
14 | #include <asm/time.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/div64.h> | ||
17 | |||
18 | #include <lantiq_soc.h> | ||
19 | |||
20 | static unsigned int ltq_ram_clocks[] = { | ||
21 | CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M }; | ||
22 | #define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3] | ||
23 | |||
24 | #define BASIC_FREQUENCY_1 35328000 | ||
25 | #define BASIC_FREQUENCY_2 36000000 | ||
26 | #define BASIS_REQUENCY_USB 12000000 | ||
27 | |||
28 | #define GET_BITS(x, msb, lsb) \ | ||
29 | (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb)) | ||
30 | |||
31 | #define LTQ_CGU_PLL0_CFG 0x0004 | ||
32 | #define LTQ_CGU_PLL1_CFG 0x0008 | ||
33 | #define LTQ_CGU_PLL2_CFG 0x000C | ||
34 | #define LTQ_CGU_SYS 0x0010 | ||
35 | #define LTQ_CGU_UPDATE 0x0014 | ||
36 | #define LTQ_CGU_IF_CLK 0x0018 | ||
37 | #define LTQ_CGU_OSC_CON 0x001C | ||
38 | #define LTQ_CGU_SMD 0x0020 | ||
39 | #define LTQ_CGU_CT1SR 0x0028 | ||
40 | #define LTQ_CGU_CT2SR 0x002C | ||
41 | #define LTQ_CGU_PCMCR 0x0030 | ||
42 | #define LTQ_CGU_PCI_CR 0x0034 | ||
43 | #define LTQ_CGU_PD_PC 0x0038 | ||
44 | #define LTQ_CGU_FMR 0x003C | ||
45 | |||
46 | #define CGU_PLL0_PHASE_DIVIDER_ENABLE \ | ||
47 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31)) | ||
48 | #define CGU_PLL0_BYPASS \ | ||
49 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30)) | ||
50 | #define CGU_PLL0_CFG_DSMSEL \ | ||
51 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28)) | ||
52 | #define CGU_PLL0_CFG_FRAC_EN \ | ||
53 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27)) | ||
54 | #define CGU_PLL1_SRC \ | ||
55 | (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31)) | ||
56 | #define CGU_PLL2_PHASE_DIVIDER_ENABLE \ | ||
57 | (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20)) | ||
58 | #define CGU_SYS_FPI_SEL (1 << 6) | ||
59 | #define CGU_SYS_DDR_SEL 0x3 | ||
60 | #define CGU_PLL0_SRC (1 << 29) | ||
61 | |||
62 | #define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17) | ||
63 | #define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6) | ||
64 | #define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2) | ||
65 | #define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17) | ||
66 | #define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13) | ||
67 | |||
68 | static unsigned int ltq_get_pll0_fdiv(void); | ||
69 | |||
70 | static inline unsigned int get_input_clock(int pll) | ||
71 | { | ||
72 | switch (pll) { | ||
73 | case 0: | ||
74 | if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC) | ||
75 | return BASIS_REQUENCY_USB; | ||
76 | else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) | ||
77 | return BASIC_FREQUENCY_1; | ||
78 | else | ||
79 | return BASIC_FREQUENCY_2; | ||
80 | case 1: | ||
81 | if (CGU_PLL1_SRC) | ||
82 | return BASIS_REQUENCY_USB; | ||
83 | else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) | ||
84 | return BASIC_FREQUENCY_1; | ||
85 | else | ||
86 | return BASIC_FREQUENCY_2; | ||
87 | case 2: | ||
88 | switch (CGU_PLL2_SRC) { | ||
89 | case 0: | ||
90 | return ltq_get_pll0_fdiv(); | ||
91 | case 1: | ||
92 | return CGU_PLL2_PHASE_DIVIDER_ENABLE ? | ||
93 | BASIC_FREQUENCY_1 : | ||
94 | BASIC_FREQUENCY_2; | ||
95 | case 2: | ||
96 | return BASIS_REQUENCY_USB; | ||
97 | } | ||
98 | default: | ||
99 | return 0; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den) | ||
104 | { | ||
105 | u64 res, clock = get_input_clock(pll); | ||
106 | |||
107 | res = num * clock; | ||
108 | do_div(res, den); | ||
109 | return res; | ||
110 | } | ||
111 | |||
112 | static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N, | ||
113 | unsigned int K) | ||
114 | { | ||
115 | unsigned int num = ((N + 1) << 10) + K; | ||
116 | unsigned int den = (M + 1) << 10; | ||
117 | |||
118 | return cal_dsm(pll, num, den); | ||
119 | } | ||
120 | |||
121 | static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N, | ||
122 | unsigned int K) | ||
123 | { | ||
124 | unsigned int num = ((N + 1) << 11) + K + 512; | ||
125 | unsigned int den = (M + 1) << 11; | ||
126 | |||
127 | return cal_dsm(pll, num, den); | ||
128 | } | ||
129 | |||
130 | static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N, | ||
131 | unsigned int K) | ||
132 | { | ||
133 | unsigned int num = K >= 512 ? | ||
134 | ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584; | ||
135 | unsigned int den = (M + 1) << 12; | ||
136 | |||
137 | return cal_dsm(pll, num, den); | ||
138 | } | ||
139 | |||
140 | static inline unsigned int dsm(int pll, unsigned int M, unsigned int N, | ||
141 | unsigned int K, unsigned int dsmsel, unsigned int phase_div_en) | ||
142 | { | ||
143 | if (!dsmsel) | ||
144 | return mash_dsm(pll, M, N, K); | ||
145 | else if (!phase_div_en) | ||
146 | return mash_dsm(pll, M, N, K); | ||
147 | else | ||
148 | return ssff_dsm_2(pll, M, N, K); | ||
149 | } | ||
150 | |||
151 | static inline unsigned int ltq_get_pll0_fosc(void) | ||
152 | { | ||
153 | if (CGU_PLL0_BYPASS) | ||
154 | return get_input_clock(0); | ||
155 | else | ||
156 | return !CGU_PLL0_CFG_FRAC_EN | ||
157 | ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0, | ||
158 | CGU_PLL0_CFG_DSMSEL, | ||
159 | CGU_PLL0_PHASE_DIVIDER_ENABLE) | ||
160 | : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, | ||
161 | CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL, | ||
162 | CGU_PLL0_PHASE_DIVIDER_ENABLE); | ||
163 | } | ||
164 | |||
165 | static unsigned int ltq_get_pll0_fdiv(void) | ||
166 | { | ||
167 | unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1; | ||
168 | |||
169 | return (ltq_get_pll0_fosc() + (div >> 1)) / div; | ||
170 | } | ||
171 | |||
172 | unsigned int ltq_get_io_region_clock(void) | ||
173 | { | ||
174 | unsigned int ret = ltq_get_pll0_fosc(); | ||
175 | |||
176 | switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) { | ||
177 | default: | ||
178 | case 0: | ||
179 | return (ret + 1) / 2; | ||
180 | case 1: | ||
181 | return (ret * 2 + 2) / 5; | ||
182 | case 2: | ||
183 | return (ret + 1) / 3; | ||
184 | case 3: | ||
185 | return (ret + 2) / 4; | ||
186 | } | ||
187 | } | ||
188 | EXPORT_SYMBOL(ltq_get_io_region_clock); | ||
189 | |||
190 | unsigned int ltq_get_fpi_bus_clock(int fpi) | ||
191 | { | ||
192 | unsigned int ret = ltq_get_io_region_clock(); | ||
193 | |||
194 | if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL)) | ||
195 | ret >>= 1; | ||
196 | return ret; | ||
197 | } | ||
198 | EXPORT_SYMBOL(ltq_get_fpi_bus_clock); | ||
199 | |||
200 | unsigned int ltq_get_cpu_hz(void) | ||
201 | { | ||
202 | switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) { | ||
203 | case 0: | ||
204 | return CLOCK_333M; | ||
205 | case 4: | ||
206 | return DDR_HZ; | ||
207 | case 8: | ||
208 | return DDR_HZ << 1; | ||
209 | default: | ||
210 | return DDR_HZ >> 1; | ||
211 | } | ||
212 | } | ||
213 | EXPORT_SYMBOL(ltq_get_cpu_hz); | ||
214 | |||
215 | unsigned int ltq_get_fpi_hz(void) | ||
216 | { | ||
217 | unsigned int ddr_clock = DDR_HZ; | ||
218 | |||
219 | if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40) | ||
220 | return ddr_clock >> 1; | ||
221 | return ddr_clock; | ||
222 | } | ||
223 | EXPORT_SYMBOL(ltq_get_fpi_hz); | ||
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c new file mode 100644 index 000000000000..e09e789dfc27 --- /dev/null +++ b/arch/mips/lantiq/xway/devices.c | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/reboot.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/leds.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/time.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/gpio.h> | ||
23 | #include <linux/leds.h> | ||
24 | |||
25 | #include <asm/bootinfo.h> | ||
26 | #include <asm/irq.h> | ||
27 | |||
28 | #include <lantiq_soc.h> | ||
29 | #include <lantiq_irq.h> | ||
30 | #include <lantiq_platform.h> | ||
31 | |||
32 | #include "devices.h" | ||
33 | |||
34 | /* gpio */ | ||
35 | static struct resource ltq_gpio_resource[] = { | ||
36 | { | ||
37 | .name = "gpio0", | ||
38 | .start = LTQ_GPIO0_BASE_ADDR, | ||
39 | .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1, | ||
40 | .flags = IORESOURCE_MEM, | ||
41 | }, { | ||
42 | .name = "gpio1", | ||
43 | .start = LTQ_GPIO1_BASE_ADDR, | ||
44 | .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1, | ||
45 | .flags = IORESOURCE_MEM, | ||
46 | }, { | ||
47 | .name = "gpio2", | ||
48 | .start = LTQ_GPIO2_BASE_ADDR, | ||
49 | .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1, | ||
50 | .flags = IORESOURCE_MEM, | ||
51 | } | ||
52 | }; | ||
53 | |||
54 | void __init ltq_register_gpio(void) | ||
55 | { | ||
56 | platform_device_register_simple("ltq_gpio", 0, | ||
57 | <q_gpio_resource[0], 1); | ||
58 | platform_device_register_simple("ltq_gpio", 1, | ||
59 | <q_gpio_resource[1], 1); | ||
60 | |||
61 | /* AR9 and VR9 have an extra gpio block */ | ||
62 | if (ltq_is_ar9() || ltq_is_vr9()) { | ||
63 | platform_device_register_simple("ltq_gpio", 2, | ||
64 | <q_gpio_resource[2], 1); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | /* serial to parallel conversion */ | ||
69 | static struct resource ltq_stp_resource = { | ||
70 | .name = "stp", | ||
71 | .start = LTQ_STP_BASE_ADDR, | ||
72 | .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1, | ||
73 | .flags = IORESOURCE_MEM, | ||
74 | }; | ||
75 | |||
76 | void __init ltq_register_gpio_stp(void) | ||
77 | { | ||
78 | platform_device_register_simple("ltq_stp", 0, <q_stp_resource, 1); | ||
79 | } | ||
80 | |||
81 | /* asc ports - amazon se has its own serial mapping */ | ||
82 | static struct resource ltq_ase_asc_resources[] = { | ||
83 | { | ||
84 | .name = "asc0", | ||
85 | .start = LTQ_ASC1_BASE_ADDR, | ||
86 | .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, | ||
87 | .flags = IORESOURCE_MEM, | ||
88 | }, | ||
89 | IRQ_RES(tx, LTQ_ASC_ASE_TIR), | ||
90 | IRQ_RES(rx, LTQ_ASC_ASE_RIR), | ||
91 | IRQ_RES(err, LTQ_ASC_ASE_EIR), | ||
92 | }; | ||
93 | |||
94 | void __init ltq_register_ase_asc(void) | ||
95 | { | ||
96 | platform_device_register_simple("ltq_asc", 0, | ||
97 | ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources)); | ||
98 | } | ||
99 | |||
100 | /* ethernet */ | ||
101 | static struct resource ltq_etop_resources = { | ||
102 | .name = "etop", | ||
103 | .start = LTQ_ETOP_BASE_ADDR, | ||
104 | .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1, | ||
105 | .flags = IORESOURCE_MEM, | ||
106 | }; | ||
107 | |||
108 | static struct platform_device ltq_etop = { | ||
109 | .name = "ltq_etop", | ||
110 | .resource = <q_etop_resources, | ||
111 | .num_resources = 1, | ||
112 | }; | ||
113 | |||
114 | void __init | ||
115 | ltq_register_etop(struct ltq_eth_data *eth) | ||
116 | { | ||
117 | if (eth) { | ||
118 | ltq_etop.dev.platform_data = eth; | ||
119 | platform_device_register(<q_etop); | ||
120 | } | ||
121 | } | ||
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h new file mode 100644 index 000000000000..e90493471bc1 --- /dev/null +++ b/arch/mips/lantiq/xway/devices.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_DEVICES_XWAY_H__ | ||
10 | #define _LTQ_DEVICES_XWAY_H__ | ||
11 | |||
12 | #include "../devices.h" | ||
13 | #include <linux/phy.h> | ||
14 | |||
15 | extern void ltq_register_gpio(void); | ||
16 | extern void ltq_register_gpio_stp(void); | ||
17 | extern void ltq_register_ase_asc(void); | ||
18 | extern void ltq_register_etop(struct ltq_eth_data *eth); | ||
19 | |||
20 | #endif | ||
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c new file mode 100644 index 000000000000..4278a459d6c4 --- /dev/null +++ b/arch/mips/lantiq/xway/dma.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
14 | * | ||
15 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
16 | */ | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | |||
23 | #include <lantiq_soc.h> | ||
24 | #include <xway_dma.h> | ||
25 | |||
26 | #define LTQ_DMA_CTRL 0x10 | ||
27 | #define LTQ_DMA_CPOLL 0x14 | ||
28 | #define LTQ_DMA_CS 0x18 | ||
29 | #define LTQ_DMA_CCTRL 0x1C | ||
30 | #define LTQ_DMA_CDBA 0x20 | ||
31 | #define LTQ_DMA_CDLEN 0x24 | ||
32 | #define LTQ_DMA_CIS 0x28 | ||
33 | #define LTQ_DMA_CIE 0x2C | ||
34 | #define LTQ_DMA_PS 0x40 | ||
35 | #define LTQ_DMA_PCTRL 0x44 | ||
36 | #define LTQ_DMA_IRNEN 0xf4 | ||
37 | |||
38 | #define DMA_DESCPT BIT(3) /* descriptor complete irq */ | ||
39 | #define DMA_TX BIT(8) /* TX channel direction */ | ||
40 | #define DMA_CHAN_ON BIT(0) /* channel on / off bit */ | ||
41 | #define DMA_PDEN BIT(6) /* enable packet drop */ | ||
42 | #define DMA_CHAN_RST BIT(1) /* channel on / off bit */ | ||
43 | #define DMA_RESET BIT(0) /* channel on / off bit */ | ||
44 | #define DMA_IRQ_ACK 0x7e /* IRQ status register */ | ||
45 | #define DMA_POLL BIT(31) /* turn on channel polling */ | ||
46 | #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ | ||
47 | #define DMA_2W_BURST BIT(1) /* 2 word burst length */ | ||
48 | #define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ | ||
49 | #define DMA_ETOP_ENDIANESS (0xf << 8) /* endianess swap etop channels */ | ||
50 | #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ | ||
51 | |||
52 | #define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) | ||
53 | #define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y)) | ||
54 | #define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ | ||
55 | ltq_dma_membase + (z)) | ||
56 | |||
57 | static struct resource ltq_dma_resource = { | ||
58 | .name = "dma", | ||
59 | .start = LTQ_DMA_BASE_ADDR, | ||
60 | .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1, | ||
61 | .flags = IORESOURCE_MEM, | ||
62 | }; | ||
63 | |||
64 | static void __iomem *ltq_dma_membase; | ||
65 | |||
66 | void | ||
67 | ltq_dma_enable_irq(struct ltq_dma_channel *ch) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | local_irq_save(flags); | ||
72 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
73 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
74 | local_irq_restore(flags); | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); | ||
77 | |||
78 | void | ||
79 | ltq_dma_disable_irq(struct ltq_dma_channel *ch) | ||
80 | { | ||
81 | unsigned long flags; | ||
82 | |||
83 | local_irq_save(flags); | ||
84 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
85 | ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); | ||
86 | local_irq_restore(flags); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); | ||
89 | |||
90 | void | ||
91 | ltq_dma_ack_irq(struct ltq_dma_channel *ch) | ||
92 | { | ||
93 | unsigned long flags; | ||
94 | |||
95 | local_irq_save(flags); | ||
96 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
97 | ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); | ||
101 | |||
102 | void | ||
103 | ltq_dma_open(struct ltq_dma_channel *ch) | ||
104 | { | ||
105 | unsigned long flag; | ||
106 | |||
107 | local_irq_save(flag); | ||
108 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
109 | ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); | ||
110 | ltq_dma_enable_irq(ch); | ||
111 | local_irq_restore(flag); | ||
112 | } | ||
113 | EXPORT_SYMBOL_GPL(ltq_dma_open); | ||
114 | |||
115 | void | ||
116 | ltq_dma_close(struct ltq_dma_channel *ch) | ||
117 | { | ||
118 | unsigned long flag; | ||
119 | |||
120 | local_irq_save(flag); | ||
121 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
122 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
123 | ltq_dma_disable_irq(ch); | ||
124 | local_irq_restore(flag); | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(ltq_dma_close); | ||
127 | |||
128 | static void | ||
129 | ltq_dma_alloc(struct ltq_dma_channel *ch) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | |||
133 | ch->desc = 0; | ||
134 | ch->desc_base = dma_alloc_coherent(NULL, | ||
135 | LTQ_DESC_NUM * LTQ_DESC_SIZE, | ||
136 | &ch->phys, GFP_ATOMIC); | ||
137 | memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); | ||
138 | |||
139 | local_irq_save(flags); | ||
140 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
141 | ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); | ||
142 | ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); | ||
143 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
144 | wmb(); | ||
145 | ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); | ||
146 | while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) | ||
147 | ; | ||
148 | local_irq_restore(flags); | ||
149 | } | ||
150 | |||
151 | void | ||
152 | ltq_dma_alloc_tx(struct ltq_dma_channel *ch) | ||
153 | { | ||
154 | unsigned long flags; | ||
155 | |||
156 | ltq_dma_alloc(ch); | ||
157 | |||
158 | local_irq_save(flags); | ||
159 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | ||
160 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
161 | ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); | ||
162 | local_irq_restore(flags); | ||
163 | } | ||
164 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); | ||
165 | |||
166 | void | ||
167 | ltq_dma_alloc_rx(struct ltq_dma_channel *ch) | ||
168 | { | ||
169 | unsigned long flags; | ||
170 | |||
171 | ltq_dma_alloc(ch); | ||
172 | |||
173 | local_irq_save(flags); | ||
174 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | ||
175 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
176 | ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); | ||
177 | local_irq_restore(flags); | ||
178 | } | ||
179 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); | ||
180 | |||
181 | void | ||
182 | ltq_dma_free(struct ltq_dma_channel *ch) | ||
183 | { | ||
184 | if (!ch->desc_base) | ||
185 | return; | ||
186 | ltq_dma_close(ch); | ||
187 | dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, | ||
188 | ch->desc_base, ch->phys); | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(ltq_dma_free); | ||
191 | |||
192 | void | ||
193 | ltq_dma_init_port(int p) | ||
194 | { | ||
195 | ltq_dma_w32(p, LTQ_DMA_PS); | ||
196 | switch (p) { | ||
197 | case DMA_PORT_ETOP: | ||
198 | /* | ||
199 | * Tell the DMA engine to swap the endianess of data frames and | ||
200 | * drop packets if the channel arbitration fails. | ||
201 | */ | ||
202 | ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN, | ||
203 | LTQ_DMA_PCTRL); | ||
204 | break; | ||
205 | |||
206 | case DMA_PORT_DEU: | ||
207 | ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), | ||
208 | LTQ_DMA_PCTRL); | ||
209 | break; | ||
210 | |||
211 | default: | ||
212 | break; | ||
213 | } | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(ltq_dma_init_port); | ||
216 | |||
217 | int __init | ||
218 | ltq_dma_init(void) | ||
219 | { | ||
220 | int i; | ||
221 | |||
222 | /* insert and request the memory region */ | ||
223 | if (insert_resource(&iomem_resource, <q_dma_resource) < 0) | ||
224 | panic("Failed to insert dma memory\n"); | ||
225 | |||
226 | if (request_mem_region(ltq_dma_resource.start, | ||
227 | resource_size(<q_dma_resource), "dma") < 0) | ||
228 | panic("Failed to request dma memory\n"); | ||
229 | |||
230 | /* remap dma register range */ | ||
231 | ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start, | ||
232 | resource_size(<q_dma_resource)); | ||
233 | if (!ltq_dma_membase) | ||
234 | panic("Failed to remap dma memory\n"); | ||
235 | |||
236 | /* power up and reset the dma engine */ | ||
237 | ltq_pmu_enable(PMU_DMA); | ||
238 | ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); | ||
239 | |||
240 | /* disable all interrupts */ | ||
241 | ltq_dma_w32(0, LTQ_DMA_IRNEN); | ||
242 | |||
243 | /* reset/configure each channel */ | ||
244 | for (i = 0; i < DMA_MAX_CHANNEL; i++) { | ||
245 | ltq_dma_w32(i, LTQ_DMA_CS); | ||
246 | ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); | ||
247 | ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); | ||
248 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
249 | } | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | postcore_initcall(ltq_dma_init); | ||
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c new file mode 100644 index 000000000000..66eb52fa50a1 --- /dev/null +++ b/arch/mips/lantiq/xway/ebu.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * EBU - the external bus unit attaches PCI, NOR and NAND | ||
7 | * | ||
8 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/version.h> | ||
14 | #include <linux/ioport.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | |||
18 | /* all access to the ebu must be locked */ | ||
19 | DEFINE_SPINLOCK(ebu_lock); | ||
20 | EXPORT_SYMBOL_GPL(ebu_lock); | ||
21 | |||
22 | static struct resource ltq_ebu_resource = { | ||
23 | .name = "ebu", | ||
24 | .start = LTQ_EBU_BASE_ADDR, | ||
25 | .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1, | ||
26 | .flags = IORESOURCE_MEM, | ||
27 | }; | ||
28 | |||
29 | /* remapped base addr of the clock unit and external bus unit */ | ||
30 | void __iomem *ltq_ebu_membase; | ||
31 | |||
32 | static int __init lantiq_ebu_init(void) | ||
33 | { | ||
34 | /* insert and request the memory region */ | ||
35 | if (insert_resource(&iomem_resource, <q_ebu_resource) < 0) | ||
36 | panic("Failed to insert ebu memory\n"); | ||
37 | |||
38 | if (request_mem_region(ltq_ebu_resource.start, | ||
39 | resource_size(<q_ebu_resource), "ebu") < 0) | ||
40 | panic("Failed to request ebu memory\n"); | ||
41 | |||
42 | /* remap ebu register range */ | ||
43 | ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start, | ||
44 | resource_size(<q_ebu_resource)); | ||
45 | if (!ltq_ebu_membase) | ||
46 | panic("Failed to remap ebu memory\n"); | ||
47 | |||
48 | /* make sure to unprotect the memory region where flash is located */ | ||
49 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | postcore_initcall(lantiq_ebu_init); | ||
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c new file mode 100644 index 000000000000..a321451a5455 --- /dev/null +++ b/arch/mips/lantiq/xway/gpio.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/slab.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/gpio.h> | ||
13 | #include <linux/ioport.h> | ||
14 | #include <linux/io.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | |||
18 | #define LTQ_GPIO_OUT 0x00 | ||
19 | #define LTQ_GPIO_IN 0x04 | ||
20 | #define LTQ_GPIO_DIR 0x08 | ||
21 | #define LTQ_GPIO_ALTSEL0 0x0C | ||
22 | #define LTQ_GPIO_ALTSEL1 0x10 | ||
23 | #define LTQ_GPIO_OD 0x14 | ||
24 | |||
25 | #define PINS_PER_PORT 16 | ||
26 | #define MAX_PORTS 3 | ||
27 | |||
28 | #define ltq_gpio_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p))) | ||
29 | #define ltq_gpio_setbit(m, r, p) ltq_w32_mask(0, (1 << p), m + r) | ||
30 | #define ltq_gpio_clearbit(m, r, p) ltq_w32_mask((1 << p), 0, m + r) | ||
31 | |||
32 | struct ltq_gpio { | ||
33 | void __iomem *membase; | ||
34 | struct gpio_chip chip; | ||
35 | }; | ||
36 | |||
37 | static struct ltq_gpio ltq_gpio_port[MAX_PORTS]; | ||
38 | |||
39 | int gpio_to_irq(unsigned int gpio) | ||
40 | { | ||
41 | return -EINVAL; | ||
42 | } | ||
43 | EXPORT_SYMBOL(gpio_to_irq); | ||
44 | |||
45 | int irq_to_gpio(unsigned int gpio) | ||
46 | { | ||
47 | return -EINVAL; | ||
48 | } | ||
49 | EXPORT_SYMBOL(irq_to_gpio); | ||
50 | |||
51 | int ltq_gpio_request(unsigned int pin, unsigned int alt0, | ||
52 | unsigned int alt1, unsigned int dir, const char *name) | ||
53 | { | ||
54 | int id = 0; | ||
55 | |||
56 | if (pin >= (MAX_PORTS * PINS_PER_PORT)) | ||
57 | return -EINVAL; | ||
58 | if (gpio_request(pin, name)) { | ||
59 | pr_err("failed to setup lantiq gpio: %s\n", name); | ||
60 | return -EBUSY; | ||
61 | } | ||
62 | if (dir) | ||
63 | gpio_direction_output(pin, 1); | ||
64 | else | ||
65 | gpio_direction_input(pin); | ||
66 | while (pin >= PINS_PER_PORT) { | ||
67 | pin -= PINS_PER_PORT; | ||
68 | id++; | ||
69 | } | ||
70 | if (alt0) | ||
71 | ltq_gpio_setbit(ltq_gpio_port[id].membase, | ||
72 | LTQ_GPIO_ALTSEL0, pin); | ||
73 | else | ||
74 | ltq_gpio_clearbit(ltq_gpio_port[id].membase, | ||
75 | LTQ_GPIO_ALTSEL0, pin); | ||
76 | if (alt1) | ||
77 | ltq_gpio_setbit(ltq_gpio_port[id].membase, | ||
78 | LTQ_GPIO_ALTSEL1, pin); | ||
79 | else | ||
80 | ltq_gpio_clearbit(ltq_gpio_port[id].membase, | ||
81 | LTQ_GPIO_ALTSEL1, pin); | ||
82 | return 0; | ||
83 | } | ||
84 | EXPORT_SYMBOL(ltq_gpio_request); | ||
85 | |||
86 | static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) | ||
87 | { | ||
88 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
89 | |||
90 | if (value) | ||
91 | ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); | ||
92 | else | ||
93 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); | ||
94 | } | ||
95 | |||
96 | static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset) | ||
97 | { | ||
98 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
99 | |||
100 | return ltq_gpio_getbit(ltq_gpio->membase, LTQ_GPIO_IN, offset); | ||
101 | } | ||
102 | |||
103 | static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) | ||
104 | { | ||
105 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
106 | |||
107 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); | ||
108 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int ltq_gpio_direction_output(struct gpio_chip *chip, | ||
114 | unsigned int offset, int value) | ||
115 | { | ||
116 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
117 | |||
118 | ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); | ||
119 | ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); | ||
120 | ltq_gpio_set(chip, offset, value); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static int ltq_gpio_req(struct gpio_chip *chip, unsigned offset) | ||
126 | { | ||
127 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
128 | |||
129 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL0, offset); | ||
130 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL1, offset); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int ltq_gpio_probe(struct platform_device *pdev) | ||
135 | { | ||
136 | struct resource *res; | ||
137 | |||
138 | if (pdev->id >= MAX_PORTS) { | ||
139 | dev_err(&pdev->dev, "invalid gpio port %d\n", | ||
140 | pdev->id); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
144 | if (!res) { | ||
145 | dev_err(&pdev->dev, "failed to get memory for gpio port %d\n", | ||
146 | pdev->id); | ||
147 | return -ENOENT; | ||
148 | } | ||
149 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
150 | resource_size(res), dev_name(&pdev->dev)); | ||
151 | if (!res) { | ||
152 | dev_err(&pdev->dev, | ||
153 | "failed to request memory for gpio port %d\n", | ||
154 | pdev->id); | ||
155 | return -EBUSY; | ||
156 | } | ||
157 | ltq_gpio_port[pdev->id].membase = devm_ioremap_nocache(&pdev->dev, | ||
158 | res->start, resource_size(res)); | ||
159 | if (!ltq_gpio_port[pdev->id].membase) { | ||
160 | dev_err(&pdev->dev, "failed to remap memory for gpio port %d\n", | ||
161 | pdev->id); | ||
162 | return -ENOMEM; | ||
163 | } | ||
164 | ltq_gpio_port[pdev->id].chip.label = "ltq_gpio"; | ||
165 | ltq_gpio_port[pdev->id].chip.direction_input = ltq_gpio_direction_input; | ||
166 | ltq_gpio_port[pdev->id].chip.direction_output = | ||
167 | ltq_gpio_direction_output; | ||
168 | ltq_gpio_port[pdev->id].chip.get = ltq_gpio_get; | ||
169 | ltq_gpio_port[pdev->id].chip.set = ltq_gpio_set; | ||
170 | ltq_gpio_port[pdev->id].chip.request = ltq_gpio_req; | ||
171 | ltq_gpio_port[pdev->id].chip.base = PINS_PER_PORT * pdev->id; | ||
172 | ltq_gpio_port[pdev->id].chip.ngpio = PINS_PER_PORT; | ||
173 | platform_set_drvdata(pdev, <q_gpio_port[pdev->id]); | ||
174 | return gpiochip_add(<q_gpio_port[pdev->id].chip); | ||
175 | } | ||
176 | |||
177 | static struct platform_driver | ||
178 | ltq_gpio_driver = { | ||
179 | .probe = ltq_gpio_probe, | ||
180 | .driver = { | ||
181 | .name = "ltq_gpio", | ||
182 | .owner = THIS_MODULE, | ||
183 | }, | ||
184 | }; | ||
185 | |||
186 | int __init ltq_gpio_init(void) | ||
187 | { | ||
188 | int ret = platform_driver_register(<q_gpio_driver); | ||
189 | |||
190 | if (ret) | ||
191 | pr_info("ltq_gpio : Error registering platfom driver!"); | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | postcore_initcall(ltq_gpio_init); | ||
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c new file mode 100644 index 000000000000..a479355abdb9 --- /dev/null +++ b/arch/mips/lantiq/xway/gpio_ebu.c | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <linux/mutex.h> | ||
14 | #include <linux/gpio.h> | ||
15 | #include <linux/io.h> | ||
16 | |||
17 | #include <lantiq_soc.h> | ||
18 | |||
19 | /* | ||
20 | * By attaching hardware latches to the EBU it is possible to create output | ||
21 | * only gpios. This driver configures a special memory address, which when | ||
22 | * written to outputs 16 bit to the latches. | ||
23 | */ | ||
24 | |||
25 | #define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */ | ||
26 | #define LTQ_EBU_WP 0x80000000 /* write protect bit */ | ||
27 | |||
28 | /* we keep a shadow value of the last value written to the ebu */ | ||
29 | static int ltq_ebu_gpio_shadow = 0x0; | ||
30 | static void __iomem *ltq_ebu_gpio_membase; | ||
31 | |||
32 | static void ltq_ebu_apply(void) | ||
33 | { | ||
34 | unsigned long flags; | ||
35 | |||
36 | spin_lock_irqsave(&ebu_lock, flags); | ||
37 | ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1); | ||
38 | *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow; | ||
39 | ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); | ||
40 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
41 | } | ||
42 | |||
43 | static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value) | ||
44 | { | ||
45 | if (value) | ||
46 | ltq_ebu_gpio_shadow |= (1 << offset); | ||
47 | else | ||
48 | ltq_ebu_gpio_shadow &= ~(1 << offset); | ||
49 | ltq_ebu_apply(); | ||
50 | } | ||
51 | |||
52 | static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset, | ||
53 | int value) | ||
54 | { | ||
55 | ltq_ebu_set(chip, offset, value); | ||
56 | |||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static struct gpio_chip ltq_ebu_chip = { | ||
61 | .label = "ltq_ebu", | ||
62 | .direction_output = ltq_ebu_direction_output, | ||
63 | .set = ltq_ebu_set, | ||
64 | .base = 72, | ||
65 | .ngpio = 16, | ||
66 | .can_sleep = 1, | ||
67 | .owner = THIS_MODULE, | ||
68 | }; | ||
69 | |||
70 | static int ltq_ebu_probe(struct platform_device *pdev) | ||
71 | { | ||
72 | int ret = 0; | ||
73 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
74 | |||
75 | if (!res) { | ||
76 | dev_err(&pdev->dev, "failed to get memory resource\n"); | ||
77 | return -ENOENT; | ||
78 | } | ||
79 | |||
80 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
81 | resource_size(res), dev_name(&pdev->dev)); | ||
82 | if (!res) { | ||
83 | dev_err(&pdev->dev, "failed to request memory resource\n"); | ||
84 | return -EBUSY; | ||
85 | } | ||
86 | |||
87 | ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
88 | resource_size(res)); | ||
89 | if (!ltq_ebu_gpio_membase) { | ||
90 | dev_err(&pdev->dev, "Failed to ioremap mem region\n"); | ||
91 | return -ENOMEM; | ||
92 | } | ||
93 | |||
94 | /* grab the default shadow value passed form the platform code */ | ||
95 | ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data; | ||
96 | |||
97 | /* tell the ebu controller which memory address we will be using */ | ||
98 | ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1); | ||
99 | |||
100 | /* write protect the region */ | ||
101 | ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); | ||
102 | |||
103 | ret = gpiochip_add(<q_ebu_chip); | ||
104 | if (!ret) | ||
105 | ltq_ebu_apply(); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | static struct platform_driver ltq_ebu_driver = { | ||
110 | .probe = ltq_ebu_probe, | ||
111 | .driver = { | ||
112 | .name = "ltq_ebu", | ||
113 | .owner = THIS_MODULE, | ||
114 | }, | ||
115 | }; | ||
116 | |||
117 | static int __init ltq_ebu_init(void) | ||
118 | { | ||
119 | int ret = platform_driver_register(<q_ebu_driver); | ||
120 | |||
121 | if (ret) | ||
122 | pr_info("ltq_ebu : Error registering platfom driver!"); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | postcore_initcall(ltq_ebu_init); | ||
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c new file mode 100644 index 000000000000..67d59d690340 --- /dev/null +++ b/arch/mips/lantiq/xway/gpio_stp.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2007 John Crispin <blogic@openwrt.org> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/slab.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/gpio.h> | ||
18 | |||
19 | #include <lantiq_soc.h> | ||
20 | |||
21 | #define LTQ_STP_CON0 0x00 | ||
22 | #define LTQ_STP_CON1 0x04 | ||
23 | #define LTQ_STP_CPU0 0x08 | ||
24 | #define LTQ_STP_CPU1 0x0C | ||
25 | #define LTQ_STP_AR 0x10 | ||
26 | |||
27 | #define LTQ_STP_CON_SWU (1 << 31) | ||
28 | #define LTQ_STP_2HZ 0 | ||
29 | #define LTQ_STP_4HZ (1 << 23) | ||
30 | #define LTQ_STP_8HZ (2 << 23) | ||
31 | #define LTQ_STP_10HZ (3 << 23) | ||
32 | #define LTQ_STP_SPEED_MASK (0xf << 23) | ||
33 | #define LTQ_STP_UPD_FPI (1 << 31) | ||
34 | #define LTQ_STP_UPD_MASK (3 << 30) | ||
35 | #define LTQ_STP_ADSL_SRC (3 << 24) | ||
36 | |||
37 | #define LTQ_STP_GROUP0 (1 << 0) | ||
38 | |||
39 | #define LTQ_STP_RISING 0 | ||
40 | #define LTQ_STP_FALLING (1 << 26) | ||
41 | #define LTQ_STP_EDGE_MASK (1 << 26) | ||
42 | |||
43 | #define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg) | ||
44 | #define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg) | ||
45 | #define ltq_stp_w32_mask(clear, set, reg) \ | ||
46 | ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \ | ||
47 | ltq_stp_membase + (reg)) | ||
48 | |||
49 | static int ltq_stp_shadow = 0xffff; | ||
50 | static void __iomem *ltq_stp_membase; | ||
51 | |||
52 | static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value) | ||
53 | { | ||
54 | if (value) | ||
55 | ltq_stp_shadow |= (1 << offset); | ||
56 | else | ||
57 | ltq_stp_shadow &= ~(1 << offset); | ||
58 | ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0); | ||
59 | } | ||
60 | |||
61 | static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset, | ||
62 | int value) | ||
63 | { | ||
64 | ltq_stp_set(chip, offset, value); | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static struct gpio_chip ltq_stp_chip = { | ||
70 | .label = "ltq_stp", | ||
71 | .direction_output = ltq_stp_direction_output, | ||
72 | .set = ltq_stp_set, | ||
73 | .base = 48, | ||
74 | .ngpio = 24, | ||
75 | .can_sleep = 1, | ||
76 | .owner = THIS_MODULE, | ||
77 | }; | ||
78 | |||
79 | static int ltq_stp_hw_init(void) | ||
80 | { | ||
81 | /* the 3 pins used to control the external stp */ | ||
82 | ltq_gpio_request(4, 1, 0, 1, "stp-st"); | ||
83 | ltq_gpio_request(5, 1, 0, 1, "stp-d"); | ||
84 | ltq_gpio_request(6, 1, 0, 1, "stp-sh"); | ||
85 | |||
86 | /* sane defaults */ | ||
87 | ltq_stp_w32(0, LTQ_STP_AR); | ||
88 | ltq_stp_w32(0, LTQ_STP_CPU0); | ||
89 | ltq_stp_w32(0, LTQ_STP_CPU1); | ||
90 | ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0); | ||
91 | ltq_stp_w32(0, LTQ_STP_CON1); | ||
92 | |||
93 | /* rising or falling edge */ | ||
94 | ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0); | ||
95 | |||
96 | /* per default stp 15-0 are set */ | ||
97 | ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1); | ||
98 | |||
99 | /* stp are update periodically by the FPI bus */ | ||
100 | ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1); | ||
101 | |||
102 | /* set stp update speed */ | ||
103 | ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1); | ||
104 | |||
105 | /* tell the hardware that pin (led) 0 and 1 are controlled | ||
106 | * by the dsl arc | ||
107 | */ | ||
108 | ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0); | ||
109 | |||
110 | ltq_pmu_enable(PMU_LED); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int __devinit ltq_stp_probe(struct platform_device *pdev) | ||
115 | { | ||
116 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
117 | int ret = 0; | ||
118 | |||
119 | if (!res) | ||
120 | return -ENOENT; | ||
121 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
122 | resource_size(res), dev_name(&pdev->dev)); | ||
123 | if (!res) { | ||
124 | dev_err(&pdev->dev, "failed to request STP memory\n"); | ||
125 | return -EBUSY; | ||
126 | } | ||
127 | ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
128 | resource_size(res)); | ||
129 | if (!ltq_stp_membase) { | ||
130 | dev_err(&pdev->dev, "failed to remap STP memory\n"); | ||
131 | return -ENOMEM; | ||
132 | } | ||
133 | ret = gpiochip_add(<q_stp_chip); | ||
134 | if (!ret) | ||
135 | ret = ltq_stp_hw_init(); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | static struct platform_driver ltq_stp_driver = { | ||
141 | .probe = ltq_stp_probe, | ||
142 | .driver = { | ||
143 | .name = "ltq_stp", | ||
144 | .owner = THIS_MODULE, | ||
145 | }, | ||
146 | }; | ||
147 | |||
148 | int __init ltq_stp_init(void) | ||
149 | { | ||
150 | int ret = platform_driver_register(<q_stp_driver); | ||
151 | |||
152 | if (ret) | ||
153 | pr_info("ltq_stp: error registering platfom driver"); | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | postcore_initcall(ltq_stp_init); | ||
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c new file mode 100644 index 000000000000..d5aaf637ab19 --- /dev/null +++ b/arch/mips/lantiq/xway/mach-easy50601.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/mtd/mtd.h> | ||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <linux/input.h> | ||
15 | |||
16 | #include <lantiq.h> | ||
17 | |||
18 | #include "../machtypes.h" | ||
19 | #include "devices.h" | ||
20 | |||
21 | static struct mtd_partition easy50601_partitions[] = { | ||
22 | { | ||
23 | .name = "uboot", | ||
24 | .offset = 0x0, | ||
25 | .size = 0x10000, | ||
26 | }, | ||
27 | { | ||
28 | .name = "uboot_env", | ||
29 | .offset = 0x10000, | ||
30 | .size = 0x10000, | ||
31 | }, | ||
32 | { | ||
33 | .name = "linux", | ||
34 | .offset = 0x20000, | ||
35 | .size = 0xE0000, | ||
36 | }, | ||
37 | { | ||
38 | .name = "rootfs", | ||
39 | .offset = 0x100000, | ||
40 | .size = 0x300000, | ||
41 | }, | ||
42 | }; | ||
43 | |||
44 | static struct physmap_flash_data easy50601_flash_data = { | ||
45 | .nr_parts = ARRAY_SIZE(easy50601_partitions), | ||
46 | .parts = easy50601_partitions, | ||
47 | }; | ||
48 | |||
49 | static void __init easy50601_init(void) | ||
50 | { | ||
51 | ltq_register_nor(&easy50601_flash_data); | ||
52 | } | ||
53 | |||
54 | MIPS_MACHINE(LTQ_MACH_EASY50601, | ||
55 | "EASY50601", | ||
56 | "EASY50601 Eval Board", | ||
57 | easy50601_init); | ||
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c new file mode 100644 index 000000000000..ea5027b3239d --- /dev/null +++ b/arch/mips/lantiq/xway/mach-easy50712.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/mtd/mtd.h> | ||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <linux/input.h> | ||
15 | #include <linux/phy.h> | ||
16 | |||
17 | #include <lantiq_soc.h> | ||
18 | #include <irq.h> | ||
19 | |||
20 | #include "../machtypes.h" | ||
21 | #include "devices.h" | ||
22 | |||
23 | static struct mtd_partition easy50712_partitions[] = { | ||
24 | { | ||
25 | .name = "uboot", | ||
26 | .offset = 0x0, | ||
27 | .size = 0x10000, | ||
28 | }, | ||
29 | { | ||
30 | .name = "uboot_env", | ||
31 | .offset = 0x10000, | ||
32 | .size = 0x10000, | ||
33 | }, | ||
34 | { | ||
35 | .name = "linux", | ||
36 | .offset = 0x20000, | ||
37 | .size = 0xe0000, | ||
38 | }, | ||
39 | { | ||
40 | .name = "rootfs", | ||
41 | .offset = 0x100000, | ||
42 | .size = 0x300000, | ||
43 | }, | ||
44 | }; | ||
45 | |||
46 | static struct physmap_flash_data easy50712_flash_data = { | ||
47 | .nr_parts = ARRAY_SIZE(easy50712_partitions), | ||
48 | .parts = easy50712_partitions, | ||
49 | }; | ||
50 | |||
51 | static struct ltq_pci_data ltq_pci_data = { | ||
52 | .clock = PCI_CLOCK_INT, | ||
53 | .gpio = PCI_GNT1 | PCI_REQ1, | ||
54 | .irq = { | ||
55 | [14] = INT_NUM_IM0_IRL0 + 22, | ||
56 | }, | ||
57 | }; | ||
58 | |||
59 | static struct ltq_eth_data ltq_eth_data = { | ||
60 | .mii_mode = PHY_INTERFACE_MODE_MII, | ||
61 | }; | ||
62 | |||
63 | static void __init easy50712_init(void) | ||
64 | { | ||
65 | ltq_register_gpio_stp(); | ||
66 | ltq_register_nor(&easy50712_flash_data); | ||
67 | ltq_register_pci(<q_pci_data); | ||
68 | ltq_register_etop(<q_eth_data); | ||
69 | } | ||
70 | |||
71 | MIPS_MACHINE(LTQ_MACH_EASY50712, | ||
72 | "EASY50712", | ||
73 | "EASY50712 Eval Board", | ||
74 | easy50712_init); | ||
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c new file mode 100644 index 000000000000..9d69f01e352b --- /dev/null +++ b/arch/mips/lantiq/xway/pmu.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/version.h> | ||
12 | #include <linux/ioport.h> | ||
13 | |||
14 | #include <lantiq_soc.h> | ||
15 | |||
16 | /* PMU - the power management unit allows us to turn part of the core | ||
17 | * on and off | ||
18 | */ | ||
19 | |||
20 | /* the enable / disable registers */ | ||
21 | #define LTQ_PMU_PWDCR 0x1C | ||
22 | #define LTQ_PMU_PWDSR 0x20 | ||
23 | |||
24 | #define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y)) | ||
25 | #define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x)) | ||
26 | |||
27 | static struct resource ltq_pmu_resource = { | ||
28 | .name = "pmu", | ||
29 | .start = LTQ_PMU_BASE_ADDR, | ||
30 | .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1, | ||
31 | .flags = IORESOURCE_MEM, | ||
32 | }; | ||
33 | |||
34 | static void __iomem *ltq_pmu_membase; | ||
35 | |||
36 | void ltq_pmu_enable(unsigned int module) | ||
37 | { | ||
38 | int err = 1000000; | ||
39 | |||
40 | ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR); | ||
41 | do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module)); | ||
42 | |||
43 | if (!err) | ||
44 | panic("activating PMU module failed!\n"); | ||
45 | } | ||
46 | EXPORT_SYMBOL(ltq_pmu_enable); | ||
47 | |||
48 | void ltq_pmu_disable(unsigned int module) | ||
49 | { | ||
50 | ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR); | ||
51 | } | ||
52 | EXPORT_SYMBOL(ltq_pmu_disable); | ||
53 | |||
54 | int __init ltq_pmu_init(void) | ||
55 | { | ||
56 | if (insert_resource(&iomem_resource, <q_pmu_resource) < 0) | ||
57 | panic("Failed to insert pmu memory\n"); | ||
58 | |||
59 | if (request_mem_region(ltq_pmu_resource.start, | ||
60 | resource_size(<q_pmu_resource), "pmu") < 0) | ||
61 | panic("Failed to request pmu memory\n"); | ||
62 | |||
63 | ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start, | ||
64 | resource_size(<q_pmu_resource)); | ||
65 | if (!ltq_pmu_membase) | ||
66 | panic("Failed to remap pmu memory\n"); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | core_initcall(ltq_pmu_init); | ||
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c new file mode 100644 index 000000000000..abe49f4db57f --- /dev/null +++ b/arch/mips/lantiq/xway/prom-ase.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/clk.h> | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/time.h> | ||
13 | |||
14 | #include <lantiq_soc.h> | ||
15 | |||
16 | #include "../prom.h" | ||
17 | |||
18 | #define SOC_AMAZON_SE "Amazon_SE" | ||
19 | |||
20 | #define PART_SHIFT 12 | ||
21 | #define PART_MASK 0x0FFFFFFF | ||
22 | #define REV_SHIFT 28 | ||
23 | #define REV_MASK 0xF0000000 | ||
24 | |||
25 | void __init ltq_soc_detect(struct ltq_soc_info *i) | ||
26 | { | ||
27 | i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; | ||
28 | i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; | ||
29 | switch (i->partnum) { | ||
30 | case SOC_ID_AMAZON_SE: | ||
31 | i->name = SOC_AMAZON_SE; | ||
32 | i->type = SOC_TYPE_AMAZON_SE; | ||
33 | break; | ||
34 | |||
35 | default: | ||
36 | unreachable(); | ||
37 | break; | ||
38 | } | ||
39 | } | ||
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c new file mode 100644 index 000000000000..1686692ac24d --- /dev/null +++ b/arch/mips/lantiq/xway/prom-xway.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/clk.h> | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/time.h> | ||
13 | |||
14 | #include <lantiq_soc.h> | ||
15 | |||
16 | #include "../prom.h" | ||
17 | |||
18 | #define SOC_DANUBE "Danube" | ||
19 | #define SOC_TWINPASS "Twinpass" | ||
20 | #define SOC_AR9 "AR9" | ||
21 | |||
22 | #define PART_SHIFT 12 | ||
23 | #define PART_MASK 0x0FFFFFFF | ||
24 | #define REV_SHIFT 28 | ||
25 | #define REV_MASK 0xF0000000 | ||
26 | |||
27 | void __init ltq_soc_detect(struct ltq_soc_info *i) | ||
28 | { | ||
29 | i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; | ||
30 | i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; | ||
31 | switch (i->partnum) { | ||
32 | case SOC_ID_DANUBE1: | ||
33 | case SOC_ID_DANUBE2: | ||
34 | i->name = SOC_DANUBE; | ||
35 | i->type = SOC_TYPE_DANUBE; | ||
36 | break; | ||
37 | |||
38 | case SOC_ID_TWINPASS: | ||
39 | i->name = SOC_TWINPASS; | ||
40 | i->type = SOC_TYPE_DANUBE; | ||
41 | break; | ||
42 | |||
43 | case SOC_ID_ARX188: | ||
44 | case SOC_ID_ARX168: | ||
45 | case SOC_ID_ARX182: | ||
46 | i->name = SOC_AR9; | ||
47 | i->type = SOC_TYPE_AR9; | ||
48 | break; | ||
49 | |||
50 | default: | ||
51 | unreachable(); | ||
52 | break; | ||
53 | } | ||
54 | } | ||
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c new file mode 100644 index 000000000000..a1be36d0e490 --- /dev/null +++ b/arch/mips/lantiq/xway/reset.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/io.h> | ||
11 | #include <linux/ioport.h> | ||
12 | #include <linux/pm.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <asm/reboot.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | |||
18 | #define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y)) | ||
19 | #define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x)) | ||
20 | |||
21 | /* register definitions */ | ||
22 | #define LTQ_RCU_RST 0x0010 | ||
23 | #define LTQ_RCU_RST_ALL 0x40000000 | ||
24 | |||
25 | #define LTQ_RCU_RST_STAT 0x0014 | ||
26 | #define LTQ_RCU_STAT_SHIFT 26 | ||
27 | |||
28 | static struct resource ltq_rcu_resource = { | ||
29 | .name = "rcu", | ||
30 | .start = LTQ_RCU_BASE_ADDR, | ||
31 | .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1, | ||
32 | .flags = IORESOURCE_MEM, | ||
33 | }; | ||
34 | |||
35 | /* remapped base addr of the reset control unit */ | ||
36 | static void __iomem *ltq_rcu_membase; | ||
37 | |||
38 | /* This function is used by the watchdog driver */ | ||
39 | int ltq_reset_cause(void) | ||
40 | { | ||
41 | u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT); | ||
42 | return val >> LTQ_RCU_STAT_SHIFT; | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(ltq_reset_cause); | ||
45 | |||
46 | static void ltq_machine_restart(char *command) | ||
47 | { | ||
48 | pr_notice("System restart\n"); | ||
49 | local_irq_disable(); | ||
50 | ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST); | ||
51 | unreachable(); | ||
52 | } | ||
53 | |||
54 | static void ltq_machine_halt(void) | ||
55 | { | ||
56 | pr_notice("System halted.\n"); | ||
57 | local_irq_disable(); | ||
58 | unreachable(); | ||
59 | } | ||
60 | |||
61 | static void ltq_machine_power_off(void) | ||
62 | { | ||
63 | pr_notice("Please turn off the power now.\n"); | ||
64 | local_irq_disable(); | ||
65 | unreachable(); | ||
66 | } | ||
67 | |||
68 | static int __init mips_reboot_setup(void) | ||
69 | { | ||
70 | /* insert and request the memory region */ | ||
71 | if (insert_resource(&iomem_resource, <q_rcu_resource) < 0) | ||
72 | panic("Failed to insert rcu memory\n"); | ||
73 | |||
74 | if (request_mem_region(ltq_rcu_resource.start, | ||
75 | resource_size(<q_rcu_resource), "rcu") < 0) | ||
76 | panic("Failed to request rcu memory\n"); | ||
77 | |||
78 | /* remap rcu register range */ | ||
79 | ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start, | ||
80 | resource_size(<q_rcu_resource)); | ||
81 | if (!ltq_rcu_membase) | ||
82 | panic("Failed to remap rcu memory\n"); | ||
83 | |||
84 | _machine_restart = ltq_machine_restart; | ||
85 | _machine_halt = ltq_machine_halt; | ||
86 | pm_power_off = ltq_machine_power_off; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | arch_initcall(mips_reboot_setup); | ||
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c new file mode 100644 index 000000000000..f6f326798a39 --- /dev/null +++ b/arch/mips/lantiq/xway/setup-ase.c | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <lantiq_soc.h> | ||
10 | |||
11 | #include "../prom.h" | ||
12 | #include "devices.h" | ||
13 | |||
14 | void __init ltq_soc_setup(void) | ||
15 | { | ||
16 | ltq_register_ase_asc(); | ||
17 | ltq_register_gpio(); | ||
18 | ltq_register_wdt(); | ||
19 | } | ||
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c new file mode 100644 index 000000000000..c292f643a858 --- /dev/null +++ b/arch/mips/lantiq/xway/setup-xway.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <lantiq_soc.h> | ||
10 | |||
11 | #include "../prom.h" | ||
12 | #include "devices.h" | ||
13 | |||
14 | void __init ltq_soc_setup(void) | ||
15 | { | ||
16 | ltq_register_asc(0); | ||
17 | ltq_register_asc(1); | ||
18 | ltq_register_gpio(); | ||
19 | ltq_register_wdt(); | ||
20 | } | ||
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 2adead5a8a37..b2cad4fd5fc4 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile | |||
@@ -28,6 +28,7 @@ obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o | |||
28 | obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o | 28 | obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o |
29 | obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o | 29 | obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o |
30 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o | 30 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o |
31 | obj-$(CONFIG_CPU_XLR) += dump_tlb.o | ||
31 | 32 | ||
32 | # libgcc-style stuff needed in the kernel | 33 | # libgcc-style stuff needed in the kernel |
33 | obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o | 34 | obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o |
diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c index 8c807c965199..0cb1b9760e34 100644 --- a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c +++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c | |||
@@ -201,8 +201,6 @@ static struct clocksource clocksource_mfgpt = { | |||
201 | .rating = 120, /* Functional for real use, but not desired */ | 201 | .rating = 120, /* Functional for real use, but not desired */ |
202 | .read = mfgpt_read, | 202 | .read = mfgpt_read, |
203 | .mask = CLOCKSOURCE_MASK(32), | 203 | .mask = CLOCKSOURCE_MASK(32), |
204 | .mult = 0, | ||
205 | .shift = 22, | ||
206 | }; | 204 | }; |
207 | 205 | ||
208 | int __init init_mfgpt_clocksource(void) | 206 | int __init init_mfgpt_clocksource(void) |
@@ -210,8 +208,7 @@ int __init init_mfgpt_clocksource(void) | |||
210 | if (num_possible_cpus() > 1) /* MFGPT does not scale! */ | 208 | if (num_possible_cpus() > 1) /* MFGPT does not scale! */ |
211 | return 0; | 209 | return 0; |
212 | 210 | ||
213 | clocksource_mfgpt.mult = clocksource_hz2mult(MFGPT_TICK_RATE, 22); | 211 | return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE); |
214 | return clocksource_register(&clocksource_mfgpt); | ||
215 | } | 212 | } |
216 | 213 | ||
217 | arch_initcall(init_mfgpt_clocksource); | 214 | arch_initcall(init_mfgpt_clocksource); |
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c index 11b193f848f8..d93830ad6113 100644 --- a/arch/mips/loongson/common/env.c +++ b/arch/mips/loongson/common/env.c | |||
@@ -29,9 +29,10 @@ unsigned long memsize, highmemsize; | |||
29 | 29 | ||
30 | #define parse_even_earlier(res, option, p) \ | 30 | #define parse_even_earlier(res, option, p) \ |
31 | do { \ | 31 | do { \ |
32 | int ret; \ | 32 | unsigned int tmp __maybe_unused; \ |
33 | \ | ||
33 | if (strncmp(option, (char *)p, strlen(option)) == 0) \ | 34 | if (strncmp(option, (char *)p, strlen(option)) == 0) \ |
34 | ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ | 35 | tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ |
35 | } while (0) | 36 | } while (0) |
36 | 37 | ||
37 | void __init prom_init_env(void) | 38 | void __init prom_init_env(void) |
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index d679c772d082..4d8c1623eee2 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile | |||
@@ -3,7 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += cache.o dma-default.o extable.o fault.o \ | 5 | obj-y += cache.o dma-default.o extable.o fault.o \ |
6 | init.o tlbex.o tlbex-fault.o uasm.o page.o | 6 | init.o mmap.o tlbex.o tlbex-fault.o uasm.o \ |
7 | page.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o | 9 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o |
9 | obj-$(CONFIG_64BIT) += pgtable-64.o | 10 | obj-$(CONFIG_64BIT) += pgtable-64.o |
@@ -29,6 +30,7 @@ obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o | |||
29 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o | 30 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o |
30 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o | 31 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o |
31 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o | 32 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o |
33 | obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o | ||
32 | 34 | ||
33 | obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o | 35 | obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o |
34 | obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o | 36 | obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index b4923a75cb4b..d9bc5d3593b6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -1006,6 +1006,7 @@ static void __cpuinit probe_pcache(void) | |||
1006 | case CPU_25KF: | 1006 | case CPU_25KF: |
1007 | case CPU_SB1: | 1007 | case CPU_SB1: |
1008 | case CPU_SB1A: | 1008 | case CPU_SB1A: |
1009 | case CPU_XLR: | ||
1009 | c->dcache.flags |= MIPS_CACHE_PINDEX; | 1010 | c->dcache.flags |= MIPS_CACHE_PINDEX; |
1010 | break; | 1011 | break; |
1011 | 1012 | ||
@@ -1075,7 +1076,6 @@ static int __cpuinit probe_scache(void) | |||
1075 | unsigned long flags, addr, begin, end, pow2; | 1076 | unsigned long flags, addr, begin, end, pow2; |
1076 | unsigned int config = read_c0_config(); | 1077 | unsigned int config = read_c0_config(); |
1077 | struct cpuinfo_mips *c = ¤t_cpu_data; | 1078 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1078 | int tmp; | ||
1079 | 1079 | ||
1080 | if (config & CONF_SC) | 1080 | if (config & CONF_SC) |
1081 | return 0; | 1081 | return 0; |
@@ -1108,7 +1108,6 @@ static int __cpuinit probe_scache(void) | |||
1108 | 1108 | ||
1109 | /* Now search for the wrap around point. */ | 1109 | /* Now search for the wrap around point. */ |
1110 | pow2 = (128 * 1024); | 1110 | pow2 = (128 * 1024); |
1111 | tmp = 0; | ||
1112 | for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { | 1111 | for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { |
1113 | cache_op(Index_Load_Tag_SD, addr); | 1112 | cache_op(Index_Load_Tag_SD, addr); |
1114 | __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ | 1113 | __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ |
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c new file mode 100644 index 000000000000..ae3c20a9556e --- /dev/null +++ b/arch/mips/mm/mmap.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Wind River Systems, | ||
7 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/mman.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/random.h> | ||
14 | #include <linux/sched.h> | ||
15 | |||
16 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | ||
17 | |||
18 | EXPORT_SYMBOL(shm_align_mask); | ||
19 | |||
20 | #define COLOUR_ALIGN(addr,pgoff) \ | ||
21 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | ||
22 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | ||
23 | |||
24 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
25 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
26 | { | ||
27 | struct vm_area_struct * vmm; | ||
28 | int do_color_align; | ||
29 | |||
30 | if (len > TASK_SIZE) | ||
31 | return -ENOMEM; | ||
32 | |||
33 | if (flags & MAP_FIXED) { | ||
34 | /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ | ||
35 | if (TASK_SIZE - len < addr) | ||
36 | return -EINVAL; | ||
37 | |||
38 | /* | ||
39 | * We do not accept a shared mapping if it would violate | ||
40 | * cache aliasing constraints. | ||
41 | */ | ||
42 | if ((flags & MAP_SHARED) && | ||
43 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
44 | return -EINVAL; | ||
45 | return addr; | ||
46 | } | ||
47 | |||
48 | do_color_align = 0; | ||
49 | if (filp || (flags & MAP_SHARED)) | ||
50 | do_color_align = 1; | ||
51 | if (addr) { | ||
52 | if (do_color_align) | ||
53 | addr = COLOUR_ALIGN(addr, pgoff); | ||
54 | else | ||
55 | addr = PAGE_ALIGN(addr); | ||
56 | vmm = find_vma(current->mm, addr); | ||
57 | if (TASK_SIZE - len >= addr && | ||
58 | (!vmm || addr + len <= vmm->vm_start)) | ||
59 | return addr; | ||
60 | } | ||
61 | addr = current->mm->mmap_base; | ||
62 | if (do_color_align) | ||
63 | addr = COLOUR_ALIGN(addr, pgoff); | ||
64 | else | ||
65 | addr = PAGE_ALIGN(addr); | ||
66 | |||
67 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | ||
68 | /* At this point: (!vmm || addr < vmm->vm_end). */ | ||
69 | if (TASK_SIZE - len < addr) | ||
70 | return -ENOMEM; | ||
71 | if (!vmm || addr + len <= vmm->vm_start) | ||
72 | return addr; | ||
73 | addr = vmm->vm_end; | ||
74 | if (do_color_align) | ||
75 | addr = COLOUR_ALIGN(addr, pgoff); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
80 | { | ||
81 | unsigned long random_factor = 0UL; | ||
82 | |||
83 | if (current->flags & PF_RANDOMIZE) { | ||
84 | random_factor = get_random_int(); | ||
85 | random_factor = random_factor << PAGE_SHIFT; | ||
86 | if (TASK_IS_32BIT_ADDR) | ||
87 | random_factor &= 0xfffffful; | ||
88 | else | ||
89 | random_factor &= 0xffffffful; | ||
90 | } | ||
91 | |||
92 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
93 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
94 | mm->unmap_area = arch_unmap_area; | ||
95 | } | ||
96 | |||
97 | static inline unsigned long brk_rnd(void) | ||
98 | { | ||
99 | unsigned long rnd = get_random_int(); | ||
100 | |||
101 | rnd = rnd << PAGE_SHIFT; | ||
102 | /* 8MB for 32bit, 256MB for 64bit */ | ||
103 | if (TASK_IS_32BIT_ADDR) | ||
104 | rnd = rnd & 0x7ffffful; | ||
105 | else | ||
106 | rnd = rnd & 0xffffffful; | ||
107 | |||
108 | return rnd; | ||
109 | } | ||
110 | |||
111 | unsigned long arch_randomize_brk(struct mm_struct *mm) | ||
112 | { | ||
113 | unsigned long base = mm->brk; | ||
114 | unsigned long ret; | ||
115 | |||
116 | ret = PAGE_ALIGN(base + brk_rnd()); | ||
117 | |||
118 | if (ret < mm->brk) | ||
119 | return mm->brk; | ||
120 | |||
121 | return ret; | ||
122 | } | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 5ef294fbb6e7..424ed4b92e6d 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -404,6 +404,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
404 | case CPU_5KC: | 404 | case CPU_5KC: |
405 | case CPU_TX49XX: | 405 | case CPU_TX49XX: |
406 | case CPU_PR4450: | 406 | case CPU_PR4450: |
407 | case CPU_XLR: | ||
407 | uasm_i_nop(p); | 408 | uasm_i_nop(p); |
408 | tlbw(p); | 409 | tlbw(p); |
409 | break; | 410 | break; |
@@ -1151,8 +1152,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
1151 | struct uasm_reloc *r = relocs; | 1152 | struct uasm_reloc *r = relocs; |
1152 | u32 *f; | 1153 | u32 *f; |
1153 | unsigned int final_len; | 1154 | unsigned int final_len; |
1154 | struct mips_huge_tlb_info htlb_info; | 1155 | struct mips_huge_tlb_info htlb_info __maybe_unused; |
1155 | enum vmalloc64_mode vmalloc_mode; | 1156 | enum vmalloc64_mode vmalloc_mode __maybe_unused; |
1156 | 1157 | ||
1157 | memset(tlb_handler, 0, sizeof(tlb_handler)); | 1158 | memset(tlb_handler, 0, sizeof(tlb_handler)); |
1158 | memset(labels, 0, sizeof(labels)); | 1159 | memset(labels, 0, sizeof(labels)); |
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 414f0c99b196..31180c321a1a 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c | |||
@@ -193,8 +193,6 @@ extern struct plat_smp_ops msmtc_smp_ops; | |||
193 | 193 | ||
194 | void __init prom_init(void) | 194 | void __init prom_init(void) |
195 | { | 195 | { |
196 | int result; | ||
197 | |||
198 | prom_argc = fw_arg0; | 196 | prom_argc = fw_arg0; |
199 | _prom_argv = (int *) fw_arg1; | 197 | _prom_argv = (int *) fw_arg1; |
200 | _prom_envp = (int *) fw_arg2; | 198 | _prom_envp = (int *) fw_arg2; |
@@ -360,20 +358,14 @@ void __init prom_init(void) | |||
360 | #ifdef CONFIG_SERIAL_8250_CONSOLE | 358 | #ifdef CONFIG_SERIAL_8250_CONSOLE |
361 | console_config(); | 359 | console_config(); |
362 | #endif | 360 | #endif |
363 | /* Early detection of CMP support */ | ||
364 | result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ); | ||
365 | |||
366 | #ifdef CONFIG_MIPS_CMP | 361 | #ifdef CONFIG_MIPS_CMP |
367 | if (result) | 362 | /* Early detection of CMP support */ |
363 | if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) | ||
368 | register_smp_ops(&cmp_smp_ops); | 364 | register_smp_ops(&cmp_smp_ops); |
365 | else | ||
369 | #endif | 366 | #endif |
370 | #ifdef CONFIG_MIPS_MT_SMP | 367 | #ifdef CONFIG_MIPS_MT_SMP |
371 | #ifdef CONFIG_MIPS_CMP | ||
372 | if (!result) | ||
373 | register_smp_ops(&vsmp_smp_ops); | 368 | register_smp_ops(&vsmp_smp_ops); |
374 | #else | ||
375 | register_smp_ops(&vsmp_smp_ops); | ||
376 | #endif | ||
377 | #endif | 369 | #endif |
378 | #ifdef CONFIG_MIPS_MT_SMTC | 370 | #ifdef CONFIG_MIPS_MT_SMTC |
379 | register_smp_ops(&msmtc_smp_ops); | 371 | register_smp_ops(&msmtc_smp_ops); |
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 9027061f0ead..1d36c511a7a5 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -56,7 +56,6 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock); | |||
56 | static inline int mips_pcibios_iack(void) | 56 | static inline int mips_pcibios_iack(void) |
57 | { | 57 | { |
58 | int irq; | 58 | int irq; |
59 | u32 dummy; | ||
60 | 59 | ||
61 | /* | 60 | /* |
62 | * Determine highest priority pending interrupt by performing | 61 | * Determine highest priority pending interrupt by performing |
@@ -83,7 +82,7 @@ static inline int mips_pcibios_iack(void) | |||
83 | BONITO_PCIMAP_CFG = 0x20000; | 82 | BONITO_PCIMAP_CFG = 0x20000; |
84 | 83 | ||
85 | /* Flush Bonito register block */ | 84 | /* Flush Bonito register block */ |
86 | dummy = BONITO_PCIMAP_CFG; | 85 | (void) BONITO_PCIMAP_CFG; |
87 | iob(); /* sync */ | 86 | iob(); /* sync */ |
88 | 87 | ||
89 | irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); | 88 | irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); |
@@ -309,6 +308,8 @@ static void ipi_call_dispatch(void) | |||
309 | 308 | ||
310 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) | 309 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) |
311 | { | 310 | { |
311 | scheduler_ipi(); | ||
312 | |||
312 | return IRQ_HANDLED; | 313 | return IRQ_HANDLED; |
313 | } | 314 | } |
314 | 315 | ||
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig new file mode 100644 index 000000000000..a5ca743613f2 --- /dev/null +++ b/arch/mips/netlogic/Kconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | config NLM_COMMON | ||
2 | bool | ||
3 | |||
4 | config NLM_XLR | ||
5 | bool | ||
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile new file mode 100644 index 000000000000..9bd3f731f62e --- /dev/null +++ b/arch/mips/netlogic/xlr/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-y += setup.o platform.o irq.o setup.o time.o | ||
2 | obj-$(CONFIG_SMP) += smp.o smpboot.o | ||
3 | obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o | ||
4 | |||
5 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/mips/netlogic/xlr/irq.c b/arch/mips/netlogic/xlr/irq.c new file mode 100644 index 000000000000..1446d58e364c --- /dev/null +++ b/arch/mips/netlogic/xlr/irq.c | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/linkage.h> | ||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/mm.h> | ||
41 | |||
42 | #include <asm/mipsregs.h> | ||
43 | |||
44 | #include <asm/netlogic/xlr/iomap.h> | ||
45 | #include <asm/netlogic/xlr/pic.h> | ||
46 | #include <asm/netlogic/xlr/xlr.h> | ||
47 | |||
48 | #include <asm/netlogic/interrupt.h> | ||
49 | #include <asm/netlogic/mips-extns.h> | ||
50 | |||
51 | static u64 nlm_irq_mask; | ||
52 | static DEFINE_SPINLOCK(nlm_pic_lock); | ||
53 | |||
54 | static void xlr_pic_enable(struct irq_data *d) | ||
55 | { | ||
56 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
57 | unsigned long flags; | ||
58 | nlm_reg_t reg; | ||
59 | int irq = d->irq; | ||
60 | |||
61 | WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); | ||
62 | |||
63 | spin_lock_irqsave(&nlm_pic_lock, flags); | ||
64 | reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); | ||
65 | netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, | ||
66 | reg | (1 << 6) | (1 << 30) | (1 << 31)); | ||
67 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
68 | } | ||
69 | |||
70 | static void xlr_pic_mask(struct irq_data *d) | ||
71 | { | ||
72 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
73 | unsigned long flags; | ||
74 | nlm_reg_t reg; | ||
75 | int irq = d->irq; | ||
76 | |||
77 | WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); | ||
78 | |||
79 | spin_lock_irqsave(&nlm_pic_lock, flags); | ||
80 | reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); | ||
81 | netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, | ||
82 | reg | (1 << 6) | (1 << 30) | (0 << 31)); | ||
83 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
84 | } | ||
85 | |||
86 | #ifdef CONFIG_PCI | ||
87 | /* Extra ACK needed for XLR on chip PCI controller */ | ||
88 | static void xlr_pci_ack(struct irq_data *d) | ||
89 | { | ||
90 | nlm_reg_t *pci_mmio = netlogic_io_mmio(NETLOGIC_IO_PCIX_OFFSET); | ||
91 | |||
92 | netlogic_read_reg(pci_mmio, (0x140 >> 2)); | ||
93 | } | ||
94 | |||
95 | /* Extra ACK needed for XLS on chip PCIe controller */ | ||
96 | static void xls_pcie_ack(struct irq_data *d) | ||
97 | { | ||
98 | nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); | ||
99 | |||
100 | switch (d->irq) { | ||
101 | case PIC_PCIE_LINK0_IRQ: | ||
102 | netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); | ||
103 | break; | ||
104 | case PIC_PCIE_LINK1_IRQ: | ||
105 | netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); | ||
106 | break; | ||
107 | case PIC_PCIE_LINK2_IRQ: | ||
108 | netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); | ||
109 | break; | ||
110 | case PIC_PCIE_LINK3_IRQ: | ||
111 | netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); | ||
112 | break; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /* For XLS B silicon, the 3,4 PCI interrupts are different */ | ||
117 | static void xls_pcie_ack_b(struct irq_data *d) | ||
118 | { | ||
119 | nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); | ||
120 | |||
121 | switch (d->irq) { | ||
122 | case PIC_PCIE_LINK0_IRQ: | ||
123 | netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); | ||
124 | break; | ||
125 | case PIC_PCIE_LINK1_IRQ: | ||
126 | netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); | ||
127 | break; | ||
128 | case PIC_PCIE_XLSB0_LINK2_IRQ: | ||
129 | netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); | ||
130 | break; | ||
131 | case PIC_PCIE_XLSB0_LINK3_IRQ: | ||
132 | netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); | ||
133 | break; | ||
134 | } | ||
135 | } | ||
136 | #endif | ||
137 | |||
138 | static void xlr_pic_ack(struct irq_data *d) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | nlm_reg_t *mmio; | ||
142 | int irq = d->irq; | ||
143 | void *hd = irq_data_get_irq_handler_data(d); | ||
144 | |||
145 | WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); | ||
146 | |||
147 | if (hd) { | ||
148 | void (*extra_ack)(void *) = hd; | ||
149 | extra_ack(d); | ||
150 | } | ||
151 | mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
152 | spin_lock_irqsave(&nlm_pic_lock, flags); | ||
153 | netlogic_write_reg(mmio, PIC_INT_ACK, (1 << (irq - PIC_IRQ_BASE))); | ||
154 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * This chip definition handles interrupts routed thru the XLR | ||
159 | * hardware PIC, currently IRQs 8-39 are mapped to hardware intr | ||
160 | * 0-31 wired the XLR PIC | ||
161 | */ | ||
162 | static struct irq_chip xlr_pic = { | ||
163 | .name = "XLR-PIC", | ||
164 | .irq_enable = xlr_pic_enable, | ||
165 | .irq_mask = xlr_pic_mask, | ||
166 | .irq_ack = xlr_pic_ack, | ||
167 | }; | ||
168 | |||
169 | static void rsvd_irq_handler(struct irq_data *d) | ||
170 | { | ||
171 | WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Chip definition for CPU originated interrupts(timer, msg) and | ||
176 | * IPIs | ||
177 | */ | ||
178 | struct irq_chip nlm_cpu_intr = { | ||
179 | .name = "XLR-CPU-INTR", | ||
180 | .irq_enable = rsvd_irq_handler, | ||
181 | .irq_mask = rsvd_irq_handler, | ||
182 | .irq_ack = rsvd_irq_handler, | ||
183 | }; | ||
184 | |||
185 | void __init init_xlr_irqs(void) | ||
186 | { | ||
187 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
188 | uint32_t thread_mask = 1; | ||
189 | int level, i; | ||
190 | |||
191 | pr_info("Interrupt thread mask [%x]\n", thread_mask); | ||
192 | for (i = 0; i < PIC_NUM_IRTS; i++) { | ||
193 | level = PIC_IRQ_IS_EDGE_TRIGGERED(i); | ||
194 | |||
195 | /* Bind all PIC irqs to boot cpu */ | ||
196 | netlogic_write_reg(mmio, PIC_IRT_0_BASE + i, thread_mask); | ||
197 | |||
198 | /* | ||
199 | * Use local scheduling and high polarity for all IRTs | ||
200 | * Invalidate all IRTs, by default | ||
201 | */ | ||
202 | netlogic_write_reg(mmio, PIC_IRT_1_BASE + i, | ||
203 | (level << 30) | (1 << 6) | (PIC_IRQ_BASE + i)); | ||
204 | } | ||
205 | |||
206 | /* Make all IRQs as level triggered by default */ | ||
207 | for (i = 0; i < NR_IRQS; i++) { | ||
208 | if (PIC_IRQ_IS_IRT(i)) | ||
209 | irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq); | ||
210 | else | ||
211 | irq_set_chip_and_handler(i, &nlm_cpu_intr, | ||
212 | handle_level_irq); | ||
213 | } | ||
214 | #ifdef CONFIG_SMP | ||
215 | irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, | ||
216 | nlm_smp_function_ipi_handler); | ||
217 | irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, | ||
218 | nlm_smp_resched_ipi_handler); | ||
219 | nlm_irq_mask |= | ||
220 | ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); | ||
221 | #endif | ||
222 | |||
223 | #ifdef CONFIG_PCI | ||
224 | /* | ||
225 | * For PCI interrupts, we need to ack the PIC controller too, overload | ||
226 | * irq handler data to do this | ||
227 | */ | ||
228 | if (nlm_chip_is_xls()) { | ||
229 | if (nlm_chip_is_xls_b()) { | ||
230 | irq_set_handler_data(PIC_PCIE_LINK0_IRQ, | ||
231 | xls_pcie_ack_b); | ||
232 | irq_set_handler_data(PIC_PCIE_LINK1_IRQ, | ||
233 | xls_pcie_ack_b); | ||
234 | irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ, | ||
235 | xls_pcie_ack_b); | ||
236 | irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, | ||
237 | xls_pcie_ack_b); | ||
238 | } else { | ||
239 | irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack); | ||
240 | irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack); | ||
241 | irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack); | ||
242 | irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack); | ||
243 | } | ||
244 | } else { | ||
245 | /* XLR PCI controller ACK */ | ||
246 | irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack); | ||
247 | } | ||
248 | #endif | ||
249 | /* unmask all PIC related interrupts. If no handler is installed by the | ||
250 | * drivers, it'll just ack the interrupt and return | ||
251 | */ | ||
252 | for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) | ||
253 | nlm_irq_mask |= (1ULL << i); | ||
254 | |||
255 | nlm_irq_mask |= (1ULL << IRQ_TIMER); | ||
256 | } | ||
257 | |||
258 | void __init arch_init_irq(void) | ||
259 | { | ||
260 | /* Initialize the irq descriptors */ | ||
261 | init_xlr_irqs(); | ||
262 | write_c0_eimr(nlm_irq_mask); | ||
263 | } | ||
264 | |||
265 | void __cpuinit nlm_smp_irq_init(void) | ||
266 | { | ||
267 | /* set interrupt mask for non-zero cpus */ | ||
268 | write_c0_eimr(nlm_irq_mask); | ||
269 | } | ||
270 | |||
271 | asmlinkage void plat_irq_dispatch(void) | ||
272 | { | ||
273 | uint64_t eirr; | ||
274 | int i; | ||
275 | |||
276 | eirr = read_c0_eirr() & read_c0_eimr(); | ||
277 | if (!eirr) | ||
278 | return; | ||
279 | |||
280 | /* no need of EIRR here, writing compare clears interrupt */ | ||
281 | if (eirr & (1 << IRQ_TIMER)) { | ||
282 | do_IRQ(IRQ_TIMER); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | /* use dcltz: optimize below code */ | ||
287 | for (i = 63; i != -1; i--) { | ||
288 | if (eirr & (1ULL << i)) | ||
289 | break; | ||
290 | } | ||
291 | if (i == -1) { | ||
292 | pr_err("no interrupt !!\n"); | ||
293 | return; | ||
294 | } | ||
295 | |||
296 | /* Ack eirr */ | ||
297 | write_c0_eirr(1ULL << i); | ||
298 | |||
299 | do_IRQ(i); | ||
300 | } | ||
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c new file mode 100644 index 000000000000..609ec2534642 --- /dev/null +++ b/arch/mips/netlogic/xlr/platform.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Copyright 2011, Netlogic Microsystems. | ||
3 | * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> | ||
4 | * | ||
5 | * This file is licensed under the terms of the GNU General Public | ||
6 | * License version 2. This program is licensed "as is" without any | ||
7 | * warranty of any kind, whether express or implied. | ||
8 | */ | ||
9 | |||
10 | #include <linux/device.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/resource.h> | ||
15 | #include <linux/serial_8250.h> | ||
16 | #include <linux/serial_reg.h> | ||
17 | |||
18 | #include <asm/netlogic/xlr/iomap.h> | ||
19 | #include <asm/netlogic/xlr/pic.h> | ||
20 | #include <asm/netlogic/xlr/xlr.h> | ||
21 | |||
22 | unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset) | ||
23 | { | ||
24 | nlm_reg_t *mmio; | ||
25 | unsigned int value; | ||
26 | |||
27 | /* XLR uart does not need any mapping of regs */ | ||
28 | mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); | ||
29 | value = netlogic_read_reg(mmio, 0); | ||
30 | |||
31 | /* See XLR/XLS errata */ | ||
32 | if (offset == UART_MSR) | ||
33 | value ^= 0xF0; | ||
34 | else if (offset == UART_MCR) | ||
35 | value ^= 0x3; | ||
36 | |||
37 | return value; | ||
38 | } | ||
39 | |||
40 | void nlm_xlr_uart_out(struct uart_port *p, int offset, int value) | ||
41 | { | ||
42 | nlm_reg_t *mmio; | ||
43 | |||
44 | /* XLR uart does not need any mapping of regs */ | ||
45 | mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); | ||
46 | |||
47 | /* See XLR/XLS errata */ | ||
48 | if (offset == UART_MSR) | ||
49 | value ^= 0xF0; | ||
50 | else if (offset == UART_MCR) | ||
51 | value ^= 0x3; | ||
52 | |||
53 | netlogic_write_reg(mmio, 0, value); | ||
54 | } | ||
55 | |||
56 | #define PORT(_irq) \ | ||
57 | { \ | ||
58 | .irq = _irq, \ | ||
59 | .regshift = 2, \ | ||
60 | .iotype = UPIO_MEM32, \ | ||
61 | .flags = (UPF_SKIP_TEST | \ | ||
62 | UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\ | ||
63 | .uartclk = PIC_CLKS_PER_SEC, \ | ||
64 | .type = PORT_16550A, \ | ||
65 | .serial_in = nlm_xlr_uart_in, \ | ||
66 | .serial_out = nlm_xlr_uart_out, \ | ||
67 | } | ||
68 | |||
69 | static struct plat_serial8250_port xlr_uart_data[] = { | ||
70 | PORT(PIC_UART_0_IRQ), | ||
71 | PORT(PIC_UART_1_IRQ), | ||
72 | {}, | ||
73 | }; | ||
74 | |||
75 | static struct platform_device uart_device = { | ||
76 | .name = "serial8250", | ||
77 | .id = PLAT8250_DEV_PLATFORM, | ||
78 | .dev = { | ||
79 | .platform_data = xlr_uart_data, | ||
80 | }, | ||
81 | }; | ||
82 | |||
83 | static int __init nlm_uart_init(void) | ||
84 | { | ||
85 | nlm_reg_t *mmio; | ||
86 | |||
87 | mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); | ||
88 | xlr_uart_data[0].membase = (void __iomem *)mmio; | ||
89 | xlr_uart_data[0].mapbase = CPHYSADDR((unsigned long)mmio); | ||
90 | |||
91 | mmio = netlogic_io_mmio(NETLOGIC_IO_UART_1_OFFSET); | ||
92 | xlr_uart_data[1].membase = (void __iomem *)mmio; | ||
93 | xlr_uart_data[1].mapbase = CPHYSADDR((unsigned long)mmio); | ||
94 | |||
95 | return platform_device_register(&uart_device); | ||
96 | } | ||
97 | |||
98 | arch_initcall(nlm_uart_init); | ||
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c new file mode 100644 index 000000000000..482802569e74 --- /dev/null +++ b/arch/mips/netlogic/xlr/setup.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/serial_8250.h> | ||
37 | #include <linux/pm.h> | ||
38 | |||
39 | #include <asm/reboot.h> | ||
40 | #include <asm/time.h> | ||
41 | #include <asm/bootinfo.h> | ||
42 | #include <asm/smp-ops.h> | ||
43 | |||
44 | #include <asm/netlogic/interrupt.h> | ||
45 | #include <asm/netlogic/psb-bootinfo.h> | ||
46 | |||
47 | #include <asm/netlogic/xlr/xlr.h> | ||
48 | #include <asm/netlogic/xlr/iomap.h> | ||
49 | #include <asm/netlogic/xlr/pic.h> | ||
50 | #include <asm/netlogic/xlr/gpio.h> | ||
51 | |||
52 | unsigned long netlogic_io_base = (unsigned long)(DEFAULT_NETLOGIC_IO_BASE); | ||
53 | unsigned long nlm_common_ebase = 0x0; | ||
54 | struct psb_info nlm_prom_info; | ||
55 | |||
56 | static void nlm_early_serial_setup(void) | ||
57 | { | ||
58 | struct uart_port s; | ||
59 | nlm_reg_t *uart_base; | ||
60 | |||
61 | uart_base = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); | ||
62 | memset(&s, 0, sizeof(s)); | ||
63 | s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; | ||
64 | s.iotype = UPIO_MEM32; | ||
65 | s.regshift = 2; | ||
66 | s.irq = PIC_UART_0_IRQ; | ||
67 | s.uartclk = PIC_CLKS_PER_SEC; | ||
68 | s.serial_in = nlm_xlr_uart_in; | ||
69 | s.serial_out = nlm_xlr_uart_out; | ||
70 | s.mapbase = (unsigned long)uart_base; | ||
71 | s.membase = (unsigned char __iomem *)uart_base; | ||
72 | early_serial_setup(&s); | ||
73 | } | ||
74 | |||
75 | static void nlm_linux_exit(void) | ||
76 | { | ||
77 | nlm_reg_t *mmio; | ||
78 | |||
79 | mmio = netlogic_io_mmio(NETLOGIC_IO_GPIO_OFFSET); | ||
80 | /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */ | ||
81 | netlogic_write_reg(mmio, NETLOGIC_GPIO_SWRESET_REG, 1); | ||
82 | for ( ; ; ) | ||
83 | cpu_wait(); | ||
84 | } | ||
85 | |||
86 | void __init plat_mem_setup(void) | ||
87 | { | ||
88 | panic_timeout = 5; | ||
89 | _machine_restart = (void (*)(char *))nlm_linux_exit; | ||
90 | _machine_halt = nlm_linux_exit; | ||
91 | pm_power_off = nlm_linux_exit; | ||
92 | } | ||
93 | |||
94 | const char *get_system_type(void) | ||
95 | { | ||
96 | return "Netlogic XLR/XLS Series"; | ||
97 | } | ||
98 | |||
99 | void __init prom_free_prom_memory(void) | ||
100 | { | ||
101 | /* Nothing yet */ | ||
102 | } | ||
103 | |||
104 | static void build_arcs_cmdline(int *argv) | ||
105 | { | ||
106 | int i, remain, len; | ||
107 | char *arg; | ||
108 | |||
109 | remain = sizeof(arcs_cmdline) - 1; | ||
110 | arcs_cmdline[0] = '\0'; | ||
111 | for (i = 0; argv[i] != 0; i++) { | ||
112 | arg = (char *)(long)argv[i]; | ||
113 | len = strlen(arg); | ||
114 | if (len + 1 > remain) | ||
115 | break; | ||
116 | strcat(arcs_cmdline, arg); | ||
117 | strcat(arcs_cmdline, " "); | ||
118 | remain -= len + 1; | ||
119 | } | ||
120 | |||
121 | /* Add the default options here */ | ||
122 | if ((strstr(arcs_cmdline, "console=")) == NULL) { | ||
123 | arg = "console=ttyS0,38400 "; | ||
124 | len = strlen(arg); | ||
125 | if (len > remain) | ||
126 | goto fail; | ||
127 | strcat(arcs_cmdline, arg); | ||
128 | remain -= len; | ||
129 | } | ||
130 | #ifdef CONFIG_BLK_DEV_INITRD | ||
131 | if ((strstr(arcs_cmdline, "rdinit=")) == NULL) { | ||
132 | arg = "rdinit=/sbin/init "; | ||
133 | len = strlen(arg); | ||
134 | if (len > remain) | ||
135 | goto fail; | ||
136 | strcat(arcs_cmdline, arg); | ||
137 | remain -= len; | ||
138 | } | ||
139 | #endif | ||
140 | return; | ||
141 | fail: | ||
142 | panic("Cannot add %s, command line too big!", arg); | ||
143 | } | ||
144 | |||
145 | static void prom_add_memory(void) | ||
146 | { | ||
147 | struct nlm_boot_mem_map *bootm; | ||
148 | u64 start, size; | ||
149 | u64 pref_backup = 512; /* avoid pref walking beyond end */ | ||
150 | int i; | ||
151 | |||
152 | bootm = (void *)(long)nlm_prom_info.psb_mem_map; | ||
153 | for (i = 0; i < bootm->nr_map; i++) { | ||
154 | if (bootm->map[i].type != BOOT_MEM_RAM) | ||
155 | continue; | ||
156 | start = bootm->map[i].addr; | ||
157 | size = bootm->map[i].size; | ||
158 | |||
159 | /* Work around for using bootloader mem */ | ||
160 | if (i == 0 && start == 0 && size == 0x0c000000) | ||
161 | size = 0x0ff00000; | ||
162 | |||
163 | add_memory_region(start, size - pref_backup, BOOT_MEM_RAM); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | void __init prom_init(void) | ||
168 | { | ||
169 | int *argv, *envp; /* passed as 32 bit ptrs */ | ||
170 | struct psb_info *prom_infop; | ||
171 | |||
172 | /* truncate to 32 bit and sign extend all args */ | ||
173 | argv = (int *)(long)(int)fw_arg1; | ||
174 | envp = (int *)(long)(int)fw_arg2; | ||
175 | prom_infop = (struct psb_info *)(long)(int)fw_arg3; | ||
176 | |||
177 | nlm_prom_info = *prom_infop; | ||
178 | |||
179 | nlm_early_serial_setup(); | ||
180 | build_arcs_cmdline(argv); | ||
181 | nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); | ||
182 | prom_add_memory(); | ||
183 | |||
184 | #ifdef CONFIG_SMP | ||
185 | nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map); | ||
186 | register_smp_ops(&nlm_smp_ops); | ||
187 | #endif | ||
188 | } | ||
diff --git a/arch/mips/netlogic/xlr/smp.c b/arch/mips/netlogic/xlr/smp.c new file mode 100644 index 000000000000..b495a7f1433b --- /dev/null +++ b/arch/mips/netlogic/xlr/smp.c | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/smp.h> | ||
39 | #include <linux/irq.h> | ||
40 | |||
41 | #include <asm/mmu_context.h> | ||
42 | |||
43 | #include <asm/netlogic/interrupt.h> | ||
44 | #include <asm/netlogic/mips-extns.h> | ||
45 | |||
46 | #include <asm/netlogic/xlr/iomap.h> | ||
47 | #include <asm/netlogic/xlr/pic.h> | ||
48 | #include <asm/netlogic/xlr/xlr.h> | ||
49 | |||
50 | void core_send_ipi(int logical_cpu, unsigned int action) | ||
51 | { | ||
52 | int cpu = cpu_logical_map(logical_cpu); | ||
53 | u32 tid = cpu & 0x3; | ||
54 | u32 pid = (cpu >> 2) & 0x07; | ||
55 | u32 ipi = (tid << 16) | (pid << 20); | ||
56 | |||
57 | if (action & SMP_CALL_FUNCTION) | ||
58 | ipi |= IRQ_IPI_SMP_FUNCTION; | ||
59 | else if (action & SMP_RESCHEDULE_YOURSELF) | ||
60 | ipi |= IRQ_IPI_SMP_RESCHEDULE; | ||
61 | else | ||
62 | return; | ||
63 | |||
64 | pic_send_ipi(ipi); | ||
65 | } | ||
66 | |||
67 | void nlm_send_ipi_single(int cpu, unsigned int action) | ||
68 | { | ||
69 | core_send_ipi(cpu, action); | ||
70 | } | ||
71 | |||
72 | void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) | ||
73 | { | ||
74 | int cpu; | ||
75 | |||
76 | for_each_cpu(cpu, mask) { | ||
77 | core_send_ipi(cpu, action); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | /* IRQ_IPI_SMP_FUNCTION Handler */ | ||
82 | void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) | ||
83 | { | ||
84 | smp_call_function_interrupt(); | ||
85 | } | ||
86 | |||
87 | /* IRQ_IPI_SMP_RESCHEDULE handler */ | ||
88 | void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc) | ||
89 | { | ||
90 | set_need_resched(); | ||
91 | } | ||
92 | |||
93 | void nlm_common_ipi_handler(int irq, struct pt_regs *regs) | ||
94 | { | ||
95 | if (irq == IRQ_IPI_SMP_FUNCTION) { | ||
96 | smp_call_function_interrupt(); | ||
97 | } else { | ||
98 | /* Announce that we are for reschduling */ | ||
99 | set_need_resched(); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Called before going into mips code, early cpu init | ||
105 | */ | ||
106 | void nlm_early_init_secondary(void) | ||
107 | { | ||
108 | write_c0_ebase((uint32_t)nlm_common_ebase); | ||
109 | /* TLB partition here later */ | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Code to run on secondary just after probing the CPU | ||
114 | */ | ||
115 | static void __cpuinit nlm_init_secondary(void) | ||
116 | { | ||
117 | nlm_smp_irq_init(); | ||
118 | } | ||
119 | |||
120 | void nlm_smp_finish(void) | ||
121 | { | ||
122 | #ifdef notyet | ||
123 | nlm_common_msgring_cpu_init(); | ||
124 | #endif | ||
125 | } | ||
126 | |||
127 | void nlm_cpus_done(void) | ||
128 | { | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Boot all other cpus in the system, initialize them, and bring them into | ||
133 | * the boot function | ||
134 | */ | ||
135 | int nlm_cpu_unblock[NR_CPUS]; | ||
136 | int nlm_cpu_ready[NR_CPUS]; | ||
137 | unsigned long nlm_next_gp; | ||
138 | unsigned long nlm_next_sp; | ||
139 | cpumask_t phys_cpu_present_map; | ||
140 | |||
141 | void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) | ||
142 | { | ||
143 | unsigned long gp = (unsigned long)task_thread_info(idle); | ||
144 | unsigned long sp = (unsigned long)__KSTK_TOS(idle); | ||
145 | int cpu = cpu_logical_map(logical_cpu); | ||
146 | |||
147 | nlm_next_sp = sp; | ||
148 | nlm_next_gp = gp; | ||
149 | |||
150 | /* barrier */ | ||
151 | __sync(); | ||
152 | nlm_cpu_unblock[cpu] = 1; | ||
153 | } | ||
154 | |||
155 | void __init nlm_smp_setup(void) | ||
156 | { | ||
157 | unsigned int boot_cpu; | ||
158 | int num_cpus, i; | ||
159 | |||
160 | boot_cpu = hard_smp_processor_id(); | ||
161 | cpus_clear(phys_cpu_present_map); | ||
162 | |||
163 | cpu_set(boot_cpu, phys_cpu_present_map); | ||
164 | __cpu_number_map[boot_cpu] = 0; | ||
165 | __cpu_logical_map[0] = boot_cpu; | ||
166 | cpu_set(0, cpu_possible_map); | ||
167 | |||
168 | num_cpus = 1; | ||
169 | for (i = 0; i < NR_CPUS; i++) { | ||
170 | if (nlm_cpu_ready[i]) { | ||
171 | cpu_set(i, phys_cpu_present_map); | ||
172 | __cpu_number_map[i] = num_cpus; | ||
173 | __cpu_logical_map[num_cpus] = i; | ||
174 | cpu_set(num_cpus, cpu_possible_map); | ||
175 | ++num_cpus; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | pr_info("Phys CPU present map: %lx, possible map %lx\n", | ||
180 | (unsigned long)phys_cpu_present_map.bits[0], | ||
181 | (unsigned long)cpu_possible_map.bits[0]); | ||
182 | |||
183 | pr_info("Detected %i Slave CPU(s)\n", num_cpus); | ||
184 | } | ||
185 | |||
186 | void nlm_prepare_cpus(unsigned int max_cpus) | ||
187 | { | ||
188 | } | ||
189 | |||
190 | struct plat_smp_ops nlm_smp_ops = { | ||
191 | .send_ipi_single = nlm_send_ipi_single, | ||
192 | .send_ipi_mask = nlm_send_ipi_mask, | ||
193 | .init_secondary = nlm_init_secondary, | ||
194 | .smp_finish = nlm_smp_finish, | ||
195 | .cpus_done = nlm_cpus_done, | ||
196 | .boot_secondary = nlm_boot_secondary, | ||
197 | .smp_setup = nlm_smp_setup, | ||
198 | .prepare_cpus = nlm_prepare_cpus, | ||
199 | }; | ||
200 | |||
201 | unsigned long secondary_entry_point; | ||
202 | |||
203 | int nlm_wakeup_secondary_cpus(u32 wakeup_mask) | ||
204 | { | ||
205 | unsigned int tid, pid, ipi, i, boot_cpu; | ||
206 | void *reset_vec; | ||
207 | |||
208 | secondary_entry_point = (unsigned long)prom_pre_boot_secondary_cpus; | ||
209 | reset_vec = (void *)CKSEG1ADDR(0x1fc00000); | ||
210 | memcpy(reset_vec, nlm_boot_smp_nmi, 0x80); | ||
211 | boot_cpu = hard_smp_processor_id(); | ||
212 | |||
213 | for (i = 0; i < NR_CPUS; i++) { | ||
214 | if (i == boot_cpu) | ||
215 | continue; | ||
216 | if (wakeup_mask & (1u << i)) { | ||
217 | tid = i & 0x3; | ||
218 | pid = (i >> 2) & 0x7; | ||
219 | ipi = (tid << 16) | (pid << 20) | (1 << 8); | ||
220 | pic_send_ipi(ipi); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | return 0; | ||
225 | } | ||
diff --git a/arch/mips/netlogic/xlr/smpboot.S b/arch/mips/netlogic/xlr/smpboot.S new file mode 100644 index 000000000000..b8e074402c99 --- /dev/null +++ b/arch/mips/netlogic/xlr/smpboot.S | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <asm/asm.h> | ||
36 | #include <asm/asm-offsets.h> | ||
37 | #include <asm/regdef.h> | ||
38 | #include <asm/mipsregs.h> | ||
39 | |||
40 | |||
41 | /* Don't jump to linux function from Bootloader stack. Change it | ||
42 | * here. Kernel might allocate bootloader memory before all the CPUs are | ||
43 | * brought up (eg: Inode cache region) and we better don't overwrite this | ||
44 | * memory | ||
45 | */ | ||
46 | NESTED(prom_pre_boot_secondary_cpus, 16, sp) | ||
47 | .set mips64 | ||
48 | mfc0 t0, $15, 1 # read ebase | ||
49 | andi t0, 0x1f # t0 has the processor_id() | ||
50 | sll t0, 2 # offset in cpu array | ||
51 | |||
52 | PTR_LA t1, nlm_cpu_ready # mark CPU ready | ||
53 | PTR_ADDU t1, t0 | ||
54 | li t2, 1 | ||
55 | sw t2, 0(t1) | ||
56 | |||
57 | PTR_LA t1, nlm_cpu_unblock | ||
58 | PTR_ADDU t1, t0 | ||
59 | 1: lw t2, 0(t1) # wait till unblocked | ||
60 | beqz t2, 1b | ||
61 | nop | ||
62 | |||
63 | PTR_LA t1, nlm_next_sp | ||
64 | PTR_L sp, 0(t1) | ||
65 | PTR_LA t1, nlm_next_gp | ||
66 | PTR_L gp, 0(t1) | ||
67 | |||
68 | PTR_LA t0, nlm_early_init_secondary | ||
69 | jalr t0 | ||
70 | nop | ||
71 | |||
72 | PTR_LA t0, smp_bootstrap | ||
73 | jr t0 | ||
74 | nop | ||
75 | END(prom_pre_boot_secondary_cpus) | ||
76 | |||
77 | NESTED(nlm_boot_smp_nmi, 0, sp) | ||
78 | .set push | ||
79 | .set noat | ||
80 | .set mips64 | ||
81 | .set noreorder | ||
82 | |||
83 | /* Clear the NMI and BEV bits */ | ||
84 | MFC0 k0, CP0_STATUS | ||
85 | li k1, 0xffb7ffff | ||
86 | and k0, k0, k1 | ||
87 | MTC0 k0, CP0_STATUS | ||
88 | |||
89 | PTR_LA k1, secondary_entry_point | ||
90 | PTR_L k0, 0(k1) | ||
91 | jr k0 | ||
92 | nop | ||
93 | .set pop | ||
94 | END(nlm_boot_smp_nmi) | ||
diff --git a/arch/mips/netlogic/xlr/time.c b/arch/mips/netlogic/xlr/time.c new file mode 100644 index 000000000000..0d81b262593c --- /dev/null +++ b/arch/mips/netlogic/xlr/time.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/init.h> | ||
36 | |||
37 | #include <asm/time.h> | ||
38 | #include <asm/netlogic/interrupt.h> | ||
39 | #include <asm/netlogic/psb-bootinfo.h> | ||
40 | |||
41 | unsigned int __cpuinit get_c0_compare_int(void) | ||
42 | { | ||
43 | return IRQ_TIMER; | ||
44 | } | ||
45 | |||
46 | void __init plat_time_init(void) | ||
47 | { | ||
48 | mips_hpt_frequency = nlm_prom_info.cpu_frequency; | ||
49 | pr_info("MIPS counter frequency [%ld]\n", | ||
50 | (unsigned long)mips_hpt_frequency); | ||
51 | } | ||
diff --git a/arch/mips/netlogic/xlr/xlr_console.c b/arch/mips/netlogic/xlr/xlr_console.c new file mode 100644 index 000000000000..759df0692201 --- /dev/null +++ b/arch/mips/netlogic/xlr/xlr_console.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/types.h> | ||
36 | #include <asm/netlogic/xlr/iomap.h> | ||
37 | |||
38 | void prom_putchar(char c) | ||
39 | { | ||
40 | nlm_reg_t *mmio; | ||
41 | |||
42 | mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); | ||
43 | while (netlogic_read_reg(mmio, 0x5) == 0) | ||
44 | ; | ||
45 | netlogic_write_reg(mmio, 0x0, c); | ||
46 | } | ||
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index c9209ca6c8e7..4df879937446 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile | |||
@@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o | |||
41 | obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o | 41 | obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o |
42 | obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o | 42 | obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o |
43 | obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o | 43 | obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o |
44 | obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o | ||
44 | obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o | 45 | obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o |
45 | obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o | 46 | obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o |
46 | obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o | 47 | obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o |
@@ -55,6 +56,7 @@ obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o | |||
55 | obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o | 56 | obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o |
56 | obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o | 57 | obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o |
57 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o | 58 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o |
59 | obj-$(CONFIG_NLM_XLR) += pci-xlr.o | ||
58 | 60 | ||
59 | ifdef CONFIG_PCI_MSI | 61 | ifdef CONFIG_PCI_MSI |
60 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o | 62 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o |
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c new file mode 100644 index 000000000000..1f2afb55cc71 --- /dev/null +++ b/arch/mips/pci/ops-lantiq.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <asm/addrspace.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
18 | #include <lantiq_soc.h> | ||
19 | |||
20 | #include "pci-lantiq.h" | ||
21 | |||
22 | #define LTQ_PCI_CFG_BUSNUM_SHF 16 | ||
23 | #define LTQ_PCI_CFG_DEVNUM_SHF 11 | ||
24 | #define LTQ_PCI_CFG_FUNNUM_SHF 8 | ||
25 | |||
26 | #define PCI_ACCESS_READ 0 | ||
27 | #define PCI_ACCESS_WRITE 1 | ||
28 | |||
29 | static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus, | ||
30 | unsigned int devfn, unsigned int where, u32 *data) | ||
31 | { | ||
32 | unsigned long cfg_base; | ||
33 | unsigned long flags; | ||
34 | u32 temp; | ||
35 | |||
36 | /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the | ||
37 | SoC itself */ | ||
38 | if ((bus->number != 0) || ((devfn & 0xf8) > 0x78) | ||
39 | || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68)) | ||
40 | return 1; | ||
41 | |||
42 | spin_lock_irqsave(&ebu_lock, flags); | ||
43 | |||
44 | cfg_base = (unsigned long) ltq_pci_mapped_cfg; | ||
45 | cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn << | ||
46 | LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3); | ||
47 | |||
48 | /* Perform access */ | ||
49 | if (access_type == PCI_ACCESS_WRITE) { | ||
50 | ltq_w32(swab32(*data), ((u32 *)cfg_base)); | ||
51 | } else { | ||
52 | *data = ltq_r32(((u32 *)(cfg_base))); | ||
53 | *data = swab32(*data); | ||
54 | } | ||
55 | wmb(); | ||
56 | |||
57 | /* clean possible Master abort */ | ||
58 | cfg_base = (unsigned long) ltq_pci_mapped_cfg; | ||
59 | cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; | ||
60 | temp = ltq_r32(((u32 *)(cfg_base))); | ||
61 | temp = swab32(temp); | ||
62 | cfg_base = (unsigned long) ltq_pci_mapped_cfg; | ||
63 | cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; | ||
64 | ltq_w32(temp, ((u32 *)cfg_base)); | ||
65 | |||
66 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
67 | |||
68 | if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ)) | ||
69 | return 1; | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, | ||
75 | int where, int size, u32 *val) | ||
76 | { | ||
77 | u32 data = 0; | ||
78 | |||
79 | if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) | ||
80 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
81 | |||
82 | if (size == 1) | ||
83 | *val = (data >> ((where & 3) << 3)) & 0xff; | ||
84 | else if (size == 2) | ||
85 | *val = (data >> ((where & 3) << 3)) & 0xffff; | ||
86 | else | ||
87 | *val = data; | ||
88 | |||
89 | return PCIBIOS_SUCCESSFUL; | ||
90 | } | ||
91 | |||
92 | int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, | ||
93 | int where, int size, u32 val) | ||
94 | { | ||
95 | u32 data = 0; | ||
96 | |||
97 | if (size == 4) { | ||
98 | data = val; | ||
99 | } else { | ||
100 | if (ltq_pci_config_access(PCI_ACCESS_READ, bus, | ||
101 | devfn, where, &data)) | ||
102 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
103 | |||
104 | if (size == 1) | ||
105 | data = (data & ~(0xff << ((where & 3) << 3))) | | ||
106 | (val << ((where & 3) << 3)); | ||
107 | else if (size == 2) | ||
108 | data = (data & ~(0xffff << ((where & 3) << 3))) | | ||
109 | (val << ((where & 3) << 3)); | ||
110 | } | ||
111 | |||
112 | if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) | ||
113 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
114 | |||
115 | return PCIBIOS_SUCCESSFUL; | ||
116 | } | ||
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c new file mode 100644 index 000000000000..603d7493e966 --- /dev/null +++ b/arch/mips/pci/pci-lantiq.c | |||
@@ -0,0 +1,297 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | |||
18 | #include <asm/pci.h> | ||
19 | #include <asm/gpio.h> | ||
20 | #include <asm/addrspace.h> | ||
21 | |||
22 | #include <lantiq_soc.h> | ||
23 | #include <lantiq_irq.h> | ||
24 | #include <lantiq_platform.h> | ||
25 | |||
26 | #include "pci-lantiq.h" | ||
27 | |||
28 | #define LTQ_PCI_CFG_BASE 0x17000000 | ||
29 | #define LTQ_PCI_CFG_SIZE 0x00008000 | ||
30 | #define LTQ_PCI_MEM_BASE 0x18000000 | ||
31 | #define LTQ_PCI_MEM_SIZE 0x02000000 | ||
32 | #define LTQ_PCI_IO_BASE 0x1AE00000 | ||
33 | #define LTQ_PCI_IO_SIZE 0x00200000 | ||
34 | |||
35 | #define PCI_CR_FCI_ADDR_MAP0 0x00C0 | ||
36 | #define PCI_CR_FCI_ADDR_MAP1 0x00C4 | ||
37 | #define PCI_CR_FCI_ADDR_MAP2 0x00C8 | ||
38 | #define PCI_CR_FCI_ADDR_MAP3 0x00CC | ||
39 | #define PCI_CR_FCI_ADDR_MAP4 0x00D0 | ||
40 | #define PCI_CR_FCI_ADDR_MAP5 0x00D4 | ||
41 | #define PCI_CR_FCI_ADDR_MAP6 0x00D8 | ||
42 | #define PCI_CR_FCI_ADDR_MAP7 0x00DC | ||
43 | #define PCI_CR_CLK_CTRL 0x0000 | ||
44 | #define PCI_CR_PCI_MOD 0x0030 | ||
45 | #define PCI_CR_PC_ARB 0x0080 | ||
46 | #define PCI_CR_FCI_ADDR_MAP11hg 0x00E4 | ||
47 | #define PCI_CR_BAR11MASK 0x0044 | ||
48 | #define PCI_CR_BAR12MASK 0x0048 | ||
49 | #define PCI_CR_BAR13MASK 0x004C | ||
50 | #define PCI_CS_BASE_ADDR1 0x0010 | ||
51 | #define PCI_CR_PCI_ADDR_MAP11 0x0064 | ||
52 | #define PCI_CR_FCI_BURST_LENGTH 0x00E8 | ||
53 | #define PCI_CR_PCI_EOI 0x002C | ||
54 | #define PCI_CS_STS_CMD 0x0004 | ||
55 | |||
56 | #define PCI_MASTER0_REQ_MASK_2BITS 8 | ||
57 | #define PCI_MASTER1_REQ_MASK_2BITS 10 | ||
58 | #define PCI_MASTER2_REQ_MASK_2BITS 12 | ||
59 | #define INTERNAL_ARB_ENABLE_BIT 0 | ||
60 | |||
61 | #define LTQ_CGU_IFCCR 0x0018 | ||
62 | #define LTQ_CGU_PCICR 0x0034 | ||
63 | |||
64 | #define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y)) | ||
65 | #define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x)) | ||
66 | |||
67 | #define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y)) | ||
68 | #define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x)) | ||
69 | |||
70 | struct ltq_pci_gpio_map { | ||
71 | int pin; | ||
72 | int alt0; | ||
73 | int alt1; | ||
74 | int dir; | ||
75 | char *name; | ||
76 | }; | ||
77 | |||
78 | /* the pci core can make use of the following gpios */ | ||
79 | static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = { | ||
80 | { 0, 1, 0, 0, "pci-exin0" }, | ||
81 | { 1, 1, 0, 0, "pci-exin1" }, | ||
82 | { 2, 1, 0, 0, "pci-exin2" }, | ||
83 | { 39, 1, 0, 0, "pci-exin3" }, | ||
84 | { 10, 1, 0, 0, "pci-exin4" }, | ||
85 | { 9, 1, 0, 0, "pci-exin5" }, | ||
86 | { 30, 1, 0, 1, "pci-gnt1" }, | ||
87 | { 23, 1, 0, 1, "pci-gnt2" }, | ||
88 | { 19, 1, 0, 1, "pci-gnt3" }, | ||
89 | { 38, 1, 0, 1, "pci-gnt4" }, | ||
90 | { 29, 1, 0, 0, "pci-req1" }, | ||
91 | { 31, 1, 0, 0, "pci-req2" }, | ||
92 | { 3, 1, 0, 0, "pci-req3" }, | ||
93 | { 37, 1, 0, 0, "pci-req4" }, | ||
94 | }; | ||
95 | |||
96 | __iomem void *ltq_pci_mapped_cfg; | ||
97 | static __iomem void *ltq_pci_membase; | ||
98 | |||
99 | int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL; | ||
100 | |||
101 | /* Since the PCI REQ pins can be reused for other functionality, make it | ||
102 | possible to exclude those from interpretation by the PCI controller */ | ||
103 | static int ltq_pci_req_mask = 0xf; | ||
104 | |||
105 | static int *ltq_pci_irq_map; | ||
106 | |||
107 | struct pci_ops ltq_pci_ops = { | ||
108 | .read = ltq_pci_read_config_dword, | ||
109 | .write = ltq_pci_write_config_dword | ||
110 | }; | ||
111 | |||
112 | static struct resource pci_io_resource = { | ||
113 | .name = "pci io space", | ||
114 | .start = LTQ_PCI_IO_BASE, | ||
115 | .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1, | ||
116 | .flags = IORESOURCE_IO | ||
117 | }; | ||
118 | |||
119 | static struct resource pci_mem_resource = { | ||
120 | .name = "pci memory space", | ||
121 | .start = LTQ_PCI_MEM_BASE, | ||
122 | .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1, | ||
123 | .flags = IORESOURCE_MEM | ||
124 | }; | ||
125 | |||
126 | static struct pci_controller ltq_pci_controller = { | ||
127 | .pci_ops = <q_pci_ops, | ||
128 | .mem_resource = &pci_mem_resource, | ||
129 | .mem_offset = 0x00000000UL, | ||
130 | .io_resource = &pci_io_resource, | ||
131 | .io_offset = 0x00000000UL, | ||
132 | }; | ||
133 | |||
134 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
135 | { | ||
136 | if (ltqpci_plat_dev_init) | ||
137 | return ltqpci_plat_dev_init(dev); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static u32 ltq_calc_bar11mask(void) | ||
143 | { | ||
144 | u32 mem, bar11mask; | ||
145 | |||
146 | /* BAR11MASK value depends on available memory on system. */ | ||
147 | mem = num_physpages * PAGE_SIZE; | ||
148 | bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; | ||
149 | |||
150 | return bar11mask; | ||
151 | } | ||
152 | |||
153 | static void ltq_pci_setup_gpio(int gpio) | ||
154 | { | ||
155 | int i; | ||
156 | for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) { | ||
157 | if (gpio & (1 << i)) { | ||
158 | ltq_gpio_request(ltq_pci_gpio_map[i].pin, | ||
159 | ltq_pci_gpio_map[i].alt0, | ||
160 | ltq_pci_gpio_map[i].alt1, | ||
161 | ltq_pci_gpio_map[i].dir, | ||
162 | ltq_pci_gpio_map[i].name); | ||
163 | } | ||
164 | } | ||
165 | ltq_gpio_request(21, 0, 0, 1, "pci-reset"); | ||
166 | ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK; | ||
167 | } | ||
168 | |||
169 | static int __devinit ltq_pci_startup(struct ltq_pci_data *conf) | ||
170 | { | ||
171 | u32 temp_buffer; | ||
172 | |||
173 | /* set clock to 33Mhz */ | ||
174 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); | ||
175 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); | ||
176 | |||
177 | /* external or internal clock ? */ | ||
178 | if (conf->clock) { | ||
179 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16), | ||
180 | LTQ_CGU_IFCCR); | ||
181 | ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR); | ||
182 | } else { | ||
183 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16), | ||
184 | LTQ_CGU_IFCCR); | ||
185 | ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR); | ||
186 | } | ||
187 | |||
188 | /* setup pci clock and gpis used by pci */ | ||
189 | ltq_pci_setup_gpio(conf->gpio); | ||
190 | |||
191 | /* enable auto-switching between PCI and EBU */ | ||
192 | ltq_pci_w32(0xa, PCI_CR_CLK_CTRL); | ||
193 | |||
194 | /* busy, i.e. configuration is not done, PCI access has to be retried */ | ||
195 | ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD); | ||
196 | wmb(); | ||
197 | /* BUS Master/IO/MEM access */ | ||
198 | ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD); | ||
199 | |||
200 | /* enable external 2 PCI masters */ | ||
201 | temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB); | ||
202 | temp_buffer &= (~(ltq_pci_req_mask << 16)); | ||
203 | /* enable internal arbiter */ | ||
204 | temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); | ||
205 | /* enable internal PCI master reqest */ | ||
206 | temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); | ||
207 | |||
208 | /* enable EBU request */ | ||
209 | temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS)); | ||
210 | |||
211 | /* enable all external masters request */ | ||
212 | temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS)); | ||
213 | ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB); | ||
214 | wmb(); | ||
215 | |||
216 | /* setup BAR memory regions */ | ||
217 | ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0); | ||
218 | ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1); | ||
219 | ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2); | ||
220 | ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3); | ||
221 | ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4); | ||
222 | ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5); | ||
223 | ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6); | ||
224 | ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7); | ||
225 | ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg); | ||
226 | ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK); | ||
227 | ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11); | ||
228 | ltq_pci_w32(0, PCI_CS_BASE_ADDR1); | ||
229 | /* both TX and RX endian swap are enabled */ | ||
230 | ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI); | ||
231 | wmb(); | ||
232 | ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000, | ||
233 | PCI_CR_BAR12MASK); | ||
234 | ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000, | ||
235 | PCI_CR_BAR13MASK); | ||
236 | /*use 8 dw burst length */ | ||
237 | ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH); | ||
238 | ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD); | ||
239 | wmb(); | ||
240 | |||
241 | /* setup irq line */ | ||
242 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON); | ||
243 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); | ||
244 | |||
245 | /* toggle reset pin */ | ||
246 | __gpio_set_value(21, 0); | ||
247 | wmb(); | ||
248 | mdelay(1); | ||
249 | __gpio_set_value(21, 1); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
254 | { | ||
255 | if (ltq_pci_irq_map[slot]) | ||
256 | return ltq_pci_irq_map[slot]; | ||
257 | printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n", | ||
258 | slot); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static int __devinit ltq_pci_probe(struct platform_device *pdev) | ||
264 | { | ||
265 | struct ltq_pci_data *ltq_pci_data = | ||
266 | (struct ltq_pci_data *) pdev->dev.platform_data; | ||
267 | pci_probe_only = 0; | ||
268 | ltq_pci_irq_map = ltq_pci_data->irq; | ||
269 | ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE); | ||
270 | ltq_pci_mapped_cfg = | ||
271 | ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE); | ||
272 | ltq_pci_controller.io_map_base = | ||
273 | (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1); | ||
274 | ltq_pci_startup(ltq_pci_data); | ||
275 | register_pci_controller(<q_pci_controller); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static struct platform_driver | ||
281 | ltq_pci_driver = { | ||
282 | .probe = ltq_pci_probe, | ||
283 | .driver = { | ||
284 | .name = "ltq_pci", | ||
285 | .owner = THIS_MODULE, | ||
286 | }, | ||
287 | }; | ||
288 | |||
289 | int __init pcibios_init(void) | ||
290 | { | ||
291 | int ret = platform_driver_register(<q_pci_driver); | ||
292 | if (ret) | ||
293 | printk(KERN_INFO "ltq_pci: Error registering platfom driver!"); | ||
294 | return ret; | ||
295 | } | ||
296 | |||
297 | arch_initcall(pcibios_init); | ||
diff --git a/arch/mips/pci/pci-lantiq.h b/arch/mips/pci/pci-lantiq.h new file mode 100644 index 000000000000..66bf6cd6be3c --- /dev/null +++ b/arch/mips/pci/pci-lantiq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_PCI_H__ | ||
10 | #define _LTQ_PCI_H__ | ||
11 | |||
12 | extern __iomem void *ltq_pci_mapped_cfg; | ||
13 | extern int ltq_pci_read_config_dword(struct pci_bus *bus, | ||
14 | unsigned int devfn, int where, int size, u32 *val); | ||
15 | extern int ltq_pci_write_config_dword(struct pci_bus *bus, | ||
16 | unsigned int devfn, int where, int size, u32 val); | ||
17 | |||
18 | #endif | ||
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c new file mode 100644 index 000000000000..38fece16c435 --- /dev/null +++ b/arch/mips/pci/pci-xlr.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/types.h> | ||
36 | #include <linux/pci.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/console.h> | ||
41 | |||
42 | #include <asm/io.h> | ||
43 | |||
44 | #include <asm/netlogic/interrupt.h> | ||
45 | #include <asm/netlogic/xlr/iomap.h> | ||
46 | #include <asm/netlogic/xlr/pic.h> | ||
47 | #include <asm/netlogic/xlr/xlr.h> | ||
48 | |||
49 | static void *pci_config_base; | ||
50 | |||
51 | #define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off)) | ||
52 | |||
53 | /* PCI ops */ | ||
54 | static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn, | ||
55 | int where) | ||
56 | { | ||
57 | u32 data; | ||
58 | u32 *cfgaddr; | ||
59 | |||
60 | cfgaddr = (u32 *)(pci_config_base + | ||
61 | pci_cfg_addr(bus->number, devfn, where & ~3)); | ||
62 | data = *cfgaddr; | ||
63 | return cpu_to_le32(data); | ||
64 | } | ||
65 | |||
66 | static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn, | ||
67 | int where, u32 data) | ||
68 | { | ||
69 | u32 *cfgaddr; | ||
70 | |||
71 | cfgaddr = (u32 *)(pci_config_base + | ||
72 | pci_cfg_addr(bus->number, devfn, where & ~3)); | ||
73 | *cfgaddr = cpu_to_le32(data); | ||
74 | } | ||
75 | |||
76 | static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn, | ||
77 | int where, int size, u32 *val) | ||
78 | { | ||
79 | u32 data; | ||
80 | |||
81 | if ((size == 2) && (where & 1)) | ||
82 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
83 | else if ((size == 4) && (where & 3)) | ||
84 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
85 | |||
86 | data = pci_cfg_read_32bit(bus, devfn, where); | ||
87 | |||
88 | if (size == 1) | ||
89 | *val = (data >> ((where & 3) << 3)) & 0xff; | ||
90 | else if (size == 2) | ||
91 | *val = (data >> ((where & 3) << 3)) & 0xffff; | ||
92 | else | ||
93 | *val = data; | ||
94 | |||
95 | return PCIBIOS_SUCCESSFUL; | ||
96 | } | ||
97 | |||
98 | |||
99 | static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn, | ||
100 | int where, int size, u32 val) | ||
101 | { | ||
102 | u32 data; | ||
103 | |||
104 | if ((size == 2) && (where & 1)) | ||
105 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
106 | else if ((size == 4) && (where & 3)) | ||
107 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
108 | |||
109 | data = pci_cfg_read_32bit(bus, devfn, where); | ||
110 | |||
111 | if (size == 1) | ||
112 | data = (data & ~(0xff << ((where & 3) << 3))) | | ||
113 | (val << ((where & 3) << 3)); | ||
114 | else if (size == 2) | ||
115 | data = (data & ~(0xffff << ((where & 3) << 3))) | | ||
116 | (val << ((where & 3) << 3)); | ||
117 | else | ||
118 | data = val; | ||
119 | |||
120 | pci_cfg_write_32bit(bus, devfn, where, data); | ||
121 | |||
122 | return PCIBIOS_SUCCESSFUL; | ||
123 | } | ||
124 | |||
125 | struct pci_ops nlm_pci_ops = { | ||
126 | .read = nlm_pcibios_read, | ||
127 | .write = nlm_pcibios_write | ||
128 | }; | ||
129 | |||
130 | static struct resource nlm_pci_mem_resource = { | ||
131 | .name = "XLR PCI MEM", | ||
132 | .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ | ||
133 | .end = 0xdfffffffUL, | ||
134 | .flags = IORESOURCE_MEM, | ||
135 | }; | ||
136 | |||
137 | static struct resource nlm_pci_io_resource = { | ||
138 | .name = "XLR IO MEM", | ||
139 | .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */ | ||
140 | .end = 0x100fffffUL, | ||
141 | .flags = IORESOURCE_IO, | ||
142 | }; | ||
143 | |||
144 | struct pci_controller nlm_pci_controller = { | ||
145 | .index = 0, | ||
146 | .pci_ops = &nlm_pci_ops, | ||
147 | .mem_resource = &nlm_pci_mem_resource, | ||
148 | .mem_offset = 0x00000000UL, | ||
149 | .io_resource = &nlm_pci_io_resource, | ||
150 | .io_offset = 0x00000000UL, | ||
151 | }; | ||
152 | |||
153 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
154 | { | ||
155 | if (!nlm_chip_is_xls()) | ||
156 | return PIC_PCIX_IRQ; /* for XLR just one IRQ*/ | ||
157 | |||
158 | /* | ||
159 | * For XLS PCIe, there is an IRQ per Link, find out which | ||
160 | * link the device is on to assign interrupts | ||
161 | */ | ||
162 | if (dev->bus->self == NULL) | ||
163 | return 0; | ||
164 | |||
165 | switch (dev->bus->self->devfn) { | ||
166 | case 0x0: | ||
167 | return PIC_PCIE_LINK0_IRQ; | ||
168 | case 0x8: | ||
169 | return PIC_PCIE_LINK1_IRQ; | ||
170 | case 0x10: | ||
171 | if (nlm_chip_is_xls_b()) | ||
172 | return PIC_PCIE_XLSB0_LINK2_IRQ; | ||
173 | else | ||
174 | return PIC_PCIE_LINK2_IRQ; | ||
175 | case 0x18: | ||
176 | if (nlm_chip_is_xls_b()) | ||
177 | return PIC_PCIE_XLSB0_LINK3_IRQ; | ||
178 | else | ||
179 | return PIC_PCIE_LINK3_IRQ; | ||
180 | } | ||
181 | WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | /* Do platform specific device initialization at pci_enable_device() time */ | ||
186 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
187 | { | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int __init pcibios_init(void) | ||
192 | { | ||
193 | /* PSB assigns PCI resources */ | ||
194 | pci_probe_only = 1; | ||
195 | pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20); | ||
196 | |||
197 | /* Extend IO port for memory mapped io */ | ||
198 | ioport_resource.start = 0; | ||
199 | ioport_resource.end = ~0; | ||
200 | |||
201 | set_io_port_base(CKSEG1); | ||
202 | nlm_pci_controller.io_map_base = CKSEG1; | ||
203 | |||
204 | pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n"); | ||
205 | register_pci_controller(&nlm_pci_controller); | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | arch_initcall(pcibios_init); | ||
211 | |||
212 | struct pci_fixup pcibios_fixups[] = { | ||
213 | {0} | ||
214 | }; | ||
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c index f9b9dcdfa9dd..98fd0099d964 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c | |||
@@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d, | |||
97 | 97 | ||
98 | static struct irq_chip msp_per_irq_controller = { | 98 | static struct irq_chip msp_per_irq_controller = { |
99 | .name = "MSP_PER", | 99 | .name = "MSP_PER", |
100 | .irq_enable = unmask_per_irq. | 100 | .irq_enable = unmask_per_irq, |
101 | .irq_disable = mask_per_irq, | 101 | .irq_disable = mask_per_irq, |
102 | .irq_ack = msp_per_irq_ack, | 102 | .irq_ack = msp_per_irq_ack, |
103 | #ifdef CONFIG_SMP | 103 | #ifdef CONFIG_SMP |
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c index efc9e889b349..2608752898c0 100644 --- a/arch/mips/pmc-sierra/yosemite/smp.c +++ b/arch/mips/pmc-sierra/yosemite/smp.c | |||
@@ -55,6 +55,8 @@ void titan_mailbox_irq(void) | |||
55 | 55 | ||
56 | if (status & 0x2) | 56 | if (status & 0x2) |
57 | smp_call_function_interrupt(); | 57 | smp_call_function_interrupt(); |
58 | if (status & 0x4) | ||
59 | scheduler_ipi(); | ||
58 | break; | 60 | break; |
59 | 61 | ||
60 | case 1: | 62 | case 1: |
@@ -63,6 +65,8 @@ void titan_mailbox_irq(void) | |||
63 | 65 | ||
64 | if (status & 0x2) | 66 | if (status & 0x2) |
65 | smp_call_function_interrupt(); | 67 | smp_call_function_interrupt(); |
68 | if (status & 0x4) | ||
69 | scheduler_ipi(); | ||
66 | break; | 70 | break; |
67 | } | 71 | } |
68 | } | 72 | } |
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S index dbb5c7b4b70f..f8a751c03282 100644 --- a/arch/mips/power/hibernate.S +++ b/arch/mips/power/hibernate.S | |||
@@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume) | |||
35 | 0: | 35 | 0: |
36 | PTR_L t1, PBE_ADDRESS(t0) /* source */ | 36 | PTR_L t1, PBE_ADDRESS(t0) /* source */ |
37 | PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ | 37 | PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ |
38 | PTR_ADDIU t3, t1, PAGE_SIZE | 38 | PTR_ADDU t3, t1, PAGE_SIZE |
39 | 1: | 39 | 1: |
40 | REG_L t8, (t1) | 40 | REG_L t8, (t1) |
41 | REG_S t8, (t2) | 41 | REG_S t8, (t2) |
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 37de05d595e7..6c47dfeb7be3 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c | |||
@@ -185,7 +185,7 @@ int __init rb532_gpio_init(void) | |||
185 | struct resource *r; | 185 | struct resource *r; |
186 | 186 | ||
187 | r = rb532_gpio_reg0_res; | 187 | r = rb532_gpio_reg0_res; |
188 | rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); | 188 | rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r)); |
189 | 189 | ||
190 | if (!rb532_gpio_chip->regbase) { | 190 | if (!rb532_gpio_chip->regbase) { |
191 | printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); | 191 | printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); |
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c index deddbf0ebe5c..698904daf901 100644 --- a/arch/mips/sgi-ip22/ip22-platform.c +++ b/arch/mips/sgi-ip22/ip22-platform.c | |||
@@ -132,7 +132,7 @@ static struct platform_device eth1_device = { | |||
132 | */ | 132 | */ |
133 | static int __init sgiseeq_devinit(void) | 133 | static int __init sgiseeq_devinit(void) |
134 | { | 134 | { |
135 | unsigned int tmp; | 135 | unsigned int pbdma __maybe_unused; |
136 | int res, i; | 136 | int res, i; |
137 | 137 | ||
138 | eth0_pd.hpc = hpc3c0; | 138 | eth0_pd.hpc = hpc3c0; |
@@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void) | |||
151 | 151 | ||
152 | /* Second HPC is missing? */ | 152 | /* Second HPC is missing? */ |
153 | if (ip22_is_fullhouse() || | 153 | if (ip22_is_fullhouse() || |
154 | get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) | 154 | get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) |
155 | return 0; | 155 | return 0; |
156 | 156 | ||
157 | sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | | 157 | sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | |
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index 603fc91c1030..1a94c9894188 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c | |||
@@ -32,7 +32,7 @@ | |||
32 | static unsigned long dosample(void) | 32 | static unsigned long dosample(void) |
33 | { | 33 | { |
34 | u32 ct0, ct1; | 34 | u32 ct0, ct1; |
35 | u8 msb, lsb; | 35 | u8 msb; |
36 | 36 | ||
37 | /* Start the counter. */ | 37 | /* Start the counter. */ |
38 | sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | | 38 | sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | |
@@ -46,7 +46,7 @@ static unsigned long dosample(void) | |||
46 | /* Latch and spin until top byte of counter2 is zero */ | 46 | /* Latch and spin until top byte of counter2 is zero */ |
47 | do { | 47 | do { |
48 | writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); | 48 | writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); |
49 | lsb = readb(&sgint->tcnt2); | 49 | (void) readb(&sgint->tcnt2); |
50 | msb = readb(&sgint->tcnt2); | 50 | msb = readb(&sgint->tcnt2); |
51 | ct1 = read_c0_count(); | 51 | ct1 = read_c0_count(); |
52 | } while (msb); | 52 | } while (msb); |
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c index a1fa4abb3f6a..cd0d5b06cd83 100644 --- a/arch/mips/sgi-ip27/ip27-hubio.c +++ b/arch/mips/sgi-ip27/ip27-hubio.c | |||
@@ -29,7 +29,6 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, | |||
29 | unsigned long xtalk_addr, size_t size) | 29 | unsigned long xtalk_addr, size_t size) |
30 | { | 30 | { |
31 | nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); | 31 | nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); |
32 | volatile hubreg_t junk; | ||
33 | unsigned i; | 32 | unsigned i; |
34 | 33 | ||
35 | /* use small-window mapping if possible */ | 34 | /* use small-window mapping if possible */ |
@@ -64,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, | |||
64 | * after we write it. | 63 | * after we write it. |
65 | */ | 64 | */ |
66 | IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); | 65 | IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); |
67 | junk = HUB_L(IIO_ITTE_GET(nasid, i)); | 66 | (void) HUB_L(IIO_ITTE_GET(nasid, i)); |
68 | 67 | ||
69 | return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); | 68 | return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); |
70 | } | 69 | } |
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 0a04603d577c..b18b04e48577 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c | |||
@@ -147,8 +147,10 @@ static void ip27_do_irq_mask0(void) | |||
147 | #ifdef CONFIG_SMP | 147 | #ifdef CONFIG_SMP |
148 | if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { | 148 | if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { |
149 | LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); | 149 | LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); |
150 | scheduler_ipi(); | ||
150 | } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { | 151 | } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { |
151 | LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); | 152 | LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); |
153 | scheduler_ipi(); | ||
152 | } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { | 154 | } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { |
153 | LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); | 155 | LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); |
154 | smp_call_function_interrupt(); | 156 | smp_call_function_interrupt(); |
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index c3d30a88daf3..1d1919a44e88 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c | |||
@@ -54,11 +54,8 @@ void __init setup_replication_mask(void) | |||
54 | 54 | ||
55 | static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) | 55 | static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) |
56 | { | 56 | { |
57 | cnodeid_t client_cnode; | ||
58 | kern_vars_t *kvp; | 57 | kern_vars_t *kvp; |
59 | 58 | ||
60 | client_cnode = NASID_TO_COMPACT_NODEID(client_nasid); | ||
61 | |||
62 | kvp = &hub_data(client_nasid)->kern_vars; | 59 | kvp = &hub_data(client_nasid)->kern_vars; |
63 | 60 | ||
64 | KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; | 61 | KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; |
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index a152538d3c97..ef74f3267f91 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c | |||
@@ -66,18 +66,7 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt) | |||
66 | static void rt_set_mode(enum clock_event_mode mode, | 66 | static void rt_set_mode(enum clock_event_mode mode, |
67 | struct clock_event_device *evt) | 67 | struct clock_event_device *evt) |
68 | { | 68 | { |
69 | switch (mode) { | 69 | /* Nothing to do ... */ |
70 | case CLOCK_EVT_MODE_ONESHOT: | ||
71 | /* The only mode supported */ | ||
72 | break; | ||
73 | |||
74 | case CLOCK_EVT_MODE_PERIODIC: | ||
75 | case CLOCK_EVT_MODE_UNUSED: | ||
76 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
77 | case CLOCK_EVT_MODE_RESUME: | ||
78 | /* Nothing to do */ | ||
79 | break; | ||
80 | } | ||
81 | } | 70 | } |
82 | 71 | ||
83 | int rt_timer_irq; | 72 | int rt_timer_irq; |
@@ -174,8 +163,7 @@ static void __init hub_rt_clocksource_init(void) | |||
174 | { | 163 | { |
175 | struct clocksource *cs = &hub_rt_clocksource; | 164 | struct clocksource *cs = &hub_rt_clocksource; |
176 | 165 | ||
177 | clocksource_set_clock(cs, CYCLES_PER_SEC); | 166 | clocksource_register_hz(cs, CYCLES_PER_SEC); |
178 | clocksource_register(cs); | ||
179 | } | 167 | } |
180 | 168 | ||
181 | void __init plat_time_init(void) | 169 | void __init plat_time_init(void) |
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index 47b347c992ea..d667875be564 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/kernel_stat.h> | 22 | #include <linux/kernel_stat.h> |
23 | #include <linux/sched.h> | ||
23 | 24 | ||
24 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -189,10 +190,8 @@ void bcm1480_mailbox_interrupt(void) | |||
189 | /* Clear the mailbox to clear the interrupt */ | 190 | /* Clear the mailbox to clear the interrupt */ |
190 | __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); | 191 | __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); |
191 | 192 | ||
192 | /* | 193 | if (action & SMP_RESCHEDULE_YOURSELF) |
193 | * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the | 194 | scheduler_ipi(); |
194 | * interrupt will do the reschedule for us | ||
195 | */ | ||
196 | 195 | ||
197 | if (action & SMP_CALL_FUNCTION) | 196 | if (action & SMP_CALL_FUNCTION) |
198 | smp_call_function_interrupt(); | 197 | smp_call_function_interrupt(); |
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index c00a5cb1128d..38e7f6bd7922 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/kernel_stat.h> | 23 | #include <linux/kernel_stat.h> |
24 | #include <linux/sched.h> | ||
24 | 25 | ||
25 | #include <asm/mmu_context.h> | 26 | #include <asm/mmu_context.h> |
26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
@@ -177,10 +178,8 @@ void sb1250_mailbox_interrupt(void) | |||
177 | /* Clear the mailbox to clear the interrupt */ | 178 | /* Clear the mailbox to clear the interrupt */ |
178 | ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]); | 179 | ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]); |
179 | 180 | ||
180 | /* | 181 | if (action & SMP_RESCHEDULE_YOURSELF) |
181 | * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the | 182 | scheduler_ipi(); |
182 | * interrupt will do the reschedule for us | ||
183 | */ | ||
184 | 183 | ||
185 | if (action & SMP_CALL_FUNCTION) | 184 | if (action & SMP_CALL_FUNCTION) |
186 | smp_call_function_interrupt(); | 185 | smp_call_function_interrupt(); |
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index c76151b56568..0904d4d30cb3 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c | |||
@@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void) | |||
95 | static __init unsigned long dosample(void) | 95 | static __init unsigned long dosample(void) |
96 | { | 96 | { |
97 | u32 ct0, ct1; | 97 | u32 ct0, ct1; |
98 | volatile u8 msb, lsb; | 98 | volatile u8 msb; |
99 | 99 | ||
100 | /* Start the counter. */ | 100 | /* Start the counter. */ |
101 | outb_p(0x34, 0x43); | 101 | outb_p(0x34, 0x43); |
@@ -108,7 +108,7 @@ static __init unsigned long dosample(void) | |||
108 | /* Latch and spin until top byte of counter0 is zero */ | 108 | /* Latch and spin until top byte of counter0 is zero */ |
109 | do { | 109 | do { |
110 | outb(0x00, 0x43); | 110 | outb(0x00, 0x43); |
111 | lsb = inb(0x40); | 111 | (void) inb(0x40); |
112 | msb = inb(0x40); | 112 | msb = inb(0x40); |
113 | ct1 = read_c0_count(); | 113 | ct1 = read_c0_count(); |
114 | } while (msb); | 114 | } while (msb); |
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 226c826a2194..83fb27912231 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c | |||
@@ -494,14 +494,11 @@ void smp_send_stop(void) | |||
494 | * @irq: The interrupt number. | 494 | * @irq: The interrupt number. |
495 | * @dev_id: The device ID. | 495 | * @dev_id: The device ID. |
496 | * | 496 | * |
497 | * We need do nothing here, since the scheduling will be effected on our way | ||
498 | * back through entry.S. | ||
499 | * | ||
500 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. | 497 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. |
501 | */ | 498 | */ |
502 | static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) | 499 | static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) |
503 | { | 500 | { |
504 | /* do nothing */ | 501 | scheduler_ipi(); |
505 | return IRQ_HANDLED; | 502 | return IRQ_HANDLED; |
506 | } | 503 | } |
507 | 504 | ||
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 69d63d354ef0..828305f19cff 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -155,10 +155,7 @@ ipi_interrupt(int irq, void *dev_id) | |||
155 | 155 | ||
156 | case IPI_RESCHEDULE: | 156 | case IPI_RESCHEDULE: |
157 | smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); | 157 | smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); |
158 | /* | 158 | scheduler_ipi(); |
159 | * Reschedule callback. Everything to be | ||
160 | * done is done by the interrupt return path. | ||
161 | */ | ||
162 | break; | 159 | break; |
163 | 160 | ||
164 | case IPI_CALL_FUNC: | 161 | case IPI_CALL_FUNC: |
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index 7005ee0b074d..49baddcdd14e 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h | |||
@@ -3,7 +3,6 @@ | |||
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
6 | #include <linux/sysdev.h> | ||
7 | #include <asm/dcr.h> | 6 | #include <asm/dcr.h> |
8 | #include <asm/msi_bitmap.h> | 7 | #include <asm/msi_bitmap.h> |
9 | 8 | ||
@@ -320,8 +319,6 @@ struct mpic | |||
320 | /* link */ | 319 | /* link */ |
321 | struct mpic *next; | 320 | struct mpic *next; |
322 | 321 | ||
323 | struct sys_device sysdev; | ||
324 | |||
325 | #ifdef CONFIG_PM | 322 | #ifdef CONFIG_PM |
326 | struct mpic_irq_save *save_data; | 323 | struct mpic_irq_save *save_data; |
327 | #endif | 324 | #endif |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index cbdbb14be4b0..9f9c204bef69 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -116,7 +116,7 @@ void smp_message_recv(int msg) | |||
116 | generic_smp_call_function_interrupt(); | 116 | generic_smp_call_function_interrupt(); |
117 | break; | 117 | break; |
118 | case PPC_MSG_RESCHEDULE: | 118 | case PPC_MSG_RESCHEDULE: |
119 | /* we notice need_resched on exit */ | 119 | scheduler_ipi(); |
120 | break; | 120 | break; |
121 | case PPC_MSG_CALL_FUNC_SINGLE: | 121 | case PPC_MSG_CALL_FUNC_SINGLE: |
122 | generic_smp_call_function_single_interrupt(); | 122 | generic_smp_call_function_single_interrupt(); |
@@ -146,7 +146,7 @@ static irqreturn_t call_function_action(int irq, void *data) | |||
146 | 146 | ||
147 | static irqreturn_t reschedule_action(int irq, void *data) | 147 | static irqreturn_t reschedule_action(int irq, void *data) |
148 | { | 148 | { |
149 | /* we just need the return path side effect of checking need_resched */ | 149 | scheduler_ipi(); |
150 | return IRQ_HANDLED; | 150 | return IRQ_HANDLED; |
151 | } | 151 | } |
152 | 152 | ||
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 5ddb801bc154..d782cd71c07c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -143,7 +143,6 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
143 | #endif | 143 | #endif |
144 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); | 144 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); |
145 | 145 | ||
146 | sysfs_printk_last_file(); | ||
147 | if (notify_die(DIE_OOPS, str, regs, err, 255, | 146 | if (notify_die(DIE_OOPS, str, regs, err, 255, |
148 | SIGSEGV) == NOTIFY_STOP) | 147 | SIGSEGV) == NOTIFY_STOP) |
149 | return 1; | 148 | return 1; |
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 188272934cfb..104faa8aa23c 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { | |||
318 | .end = mpc83xx_suspend_end, | 318 | .end = mpc83xx_suspend_end, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | static struct of_device_id pmc_match[]; | ||
321 | static int pmc_probe(struct platform_device *ofdev) | 322 | static int pmc_probe(struct platform_device *ofdev) |
322 | { | 323 | { |
324 | const struct of_device_id *match; | ||
323 | struct device_node *np = ofdev->dev.of_node; | 325 | struct device_node *np = ofdev->dev.of_node; |
324 | struct resource res; | 326 | struct resource res; |
325 | struct pmc_type *type; | 327 | struct pmc_type *type; |
326 | int ret = 0; | 328 | int ret = 0; |
327 | 329 | ||
328 | if (!ofdev->dev.of_match) | 330 | match = of_match_device(pmc_match, &ofdev->dev); |
331 | if (!match) | ||
329 | return -EINVAL; | 332 | return -EINVAL; |
330 | 333 | ||
331 | type = ofdev->dev.of_match->data; | 334 | type = match->data; |
332 | 335 | ||
333 | if (!of_device_is_available(np)) | 336 | if (!of_device_is_available(np)) |
334 | return -ENODEV; | 337 | return -ENODEV; |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index acfaccea5f4f..3675da73623f 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/linux_logo.h> | 34 | #include <linux/linux_logo.h> |
35 | #include <linux/syscore_ops.h> | ||
35 | #include <asm/spu.h> | 36 | #include <asm/spu.h> |
36 | #include <asm/spu_priv1.h> | 37 | #include <asm/spu_priv1.h> |
37 | #include <asm/spu_csa.h> | 38 | #include <asm/spu_csa.h> |
@@ -521,18 +522,8 @@ void spu_init_channels(struct spu *spu) | |||
521 | } | 522 | } |
522 | EXPORT_SYMBOL_GPL(spu_init_channels); | 523 | EXPORT_SYMBOL_GPL(spu_init_channels); |
523 | 524 | ||
524 | static int spu_shutdown(struct sys_device *sysdev) | ||
525 | { | ||
526 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | ||
527 | |||
528 | spu_free_irqs(spu); | ||
529 | spu_destroy_spu(spu); | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static struct sysdev_class spu_sysdev_class = { | 525 | static struct sysdev_class spu_sysdev_class = { |
534 | .name = "spu", | 526 | .name = "spu", |
535 | .shutdown = spu_shutdown, | ||
536 | }; | 527 | }; |
537 | 528 | ||
538 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) | 529 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
@@ -797,6 +788,22 @@ static inline void crash_register_spus(struct list_head *list) | |||
797 | } | 788 | } |
798 | #endif | 789 | #endif |
799 | 790 | ||
791 | static void spu_shutdown(void) | ||
792 | { | ||
793 | struct spu *spu; | ||
794 | |||
795 | mutex_lock(&spu_full_list_mutex); | ||
796 | list_for_each_entry(spu, &spu_full_list, full_list) { | ||
797 | spu_free_irqs(spu); | ||
798 | spu_destroy_spu(spu); | ||
799 | } | ||
800 | mutex_unlock(&spu_full_list_mutex); | ||
801 | } | ||
802 | |||
803 | static struct syscore_ops spu_syscore_ops = { | ||
804 | .shutdown = spu_shutdown, | ||
805 | }; | ||
806 | |||
800 | static int __init init_spu_base(void) | 807 | static int __init init_spu_base(void) |
801 | { | 808 | { |
802 | int i, ret = 0; | 809 | int i, ret = 0; |
@@ -830,6 +837,7 @@ static int __init init_spu_base(void) | |||
830 | crash_register_spus(&spu_full_list); | 837 | crash_register_spus(&spu_full_list); |
831 | mutex_unlock(&spu_full_list_mutex); | 838 | mutex_unlock(&spu_full_list_mutex); |
832 | spu_add_sysdev_attr(&attr_stat); | 839 | spu_add_sysdev_attr(&attr_stat); |
840 | register_syscore_ops(&spu_syscore_ops); | ||
833 | 841 | ||
834 | spu_init_affinity(); | 842 | spu_init_affinity(); |
835 | 843 | ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 023f24086a0a..7c18a1607d1c 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/signal.h> | 21 | #include <linux/signal.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/adb.h> | 25 | #include <linux/adb.h> |
26 | #include <linux/pmu.h> | 26 | #include <linux/pmu.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
@@ -677,7 +677,7 @@ not_found: | |||
677 | return viaint; | 677 | return viaint; |
678 | } | 678 | } |
679 | 679 | ||
680 | static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | 680 | static int pmacpic_suspend(void) |
681 | { | 681 | { |
682 | int viaint = pmacpic_find_viaint(); | 682 | int viaint = pmacpic_find_viaint(); |
683 | 683 | ||
@@ -698,7 +698,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | |||
698 | return 0; | 698 | return 0; |
699 | } | 699 | } |
700 | 700 | ||
701 | static int pmacpic_resume(struct sys_device *sysdev) | 701 | static void pmacpic_resume(void) |
702 | { | 702 | { |
703 | int i; | 703 | int i; |
704 | 704 | ||
@@ -709,39 +709,19 @@ static int pmacpic_resume(struct sys_device *sysdev) | |||
709 | for (i = 0; i < max_real_irqs; ++i) | 709 | for (i = 0; i < max_real_irqs; ++i) |
710 | if (test_bit(i, sleep_save_mask)) | 710 | if (test_bit(i, sleep_save_mask)) |
711 | pmac_unmask_irq(irq_get_irq_data(i)); | 711 | pmac_unmask_irq(irq_get_irq_data(i)); |
712 | |||
713 | return 0; | ||
714 | } | 712 | } |
715 | 713 | ||
716 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | 714 | static struct syscore_ops pmacpic_syscore_ops = { |
717 | 715 | .suspend = pmacpic_suspend, | |
718 | static struct sysdev_class pmacpic_sysclass = { | 716 | .resume = pmacpic_resume, |
719 | .name = "pmac_pic", | ||
720 | }; | 717 | }; |
721 | 718 | ||
722 | static struct sys_device device_pmacpic = { | 719 | static int __init init_pmacpic_syscore(void) |
723 | .id = 0, | ||
724 | .cls = &pmacpic_sysclass, | ||
725 | }; | ||
726 | |||
727 | static struct sysdev_driver driver_pmacpic = { | ||
728 | #if defined(CONFIG_PM) && defined(CONFIG_PPC32) | ||
729 | .suspend = &pmacpic_suspend, | ||
730 | .resume = &pmacpic_resume, | ||
731 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
732 | }; | ||
733 | |||
734 | static int __init init_pmacpic_sysfs(void) | ||
735 | { | 720 | { |
736 | #ifdef CONFIG_PPC32 | 721 | register_syscore_ops(&pmacpic_syscore_ops); |
737 | if (max_irqs == 0) | ||
738 | return -ENODEV; | ||
739 | #endif | ||
740 | printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); | ||
741 | sysdev_class_register(&pmacpic_sysclass); | ||
742 | sysdev_register(&device_pmacpic); | ||
743 | sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); | ||
744 | return 0; | 722 | return 0; |
745 | } | 723 | } |
746 | machine_subsys_initcall(powermac, init_pmacpic_sysfs); | ||
747 | 724 | ||
725 | machine_subsys_initcall(powermac, init_pmacpic_syscore); | ||
726 | |||
727 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index d5679dc1e20f..01cd2f089512 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | static const struct of_device_id fsl_of_msi_ids[]; | ||
307 | static int __devinit fsl_of_msi_probe(struct platform_device *dev) | 308 | static int __devinit fsl_of_msi_probe(struct platform_device *dev) |
308 | { | 309 | { |
310 | const struct of_device_id *match; | ||
309 | struct fsl_msi *msi; | 311 | struct fsl_msi *msi; |
310 | struct resource res; | 312 | struct resource res; |
311 | int err, i, j, irq_index, count; | 313 | int err, i, j, irq_index, count; |
@@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) | |||
316 | u32 offset; | 318 | u32 offset; |
317 | static const u32 all_avail[] = { 0, NR_MSI_IRQS }; | 319 | static const u32 all_avail[] = { 0, NR_MSI_IRQS }; |
318 | 320 | ||
319 | if (!dev->dev.of_match) | 321 | match = of_match_device(fsl_of_msi_ids, &dev->dev); |
322 | if (!match) | ||
320 | return -EINVAL; | 323 | return -EINVAL; |
321 | features = dev->dev.of_match->data; | 324 | features = match->data; |
322 | 325 | ||
323 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); | 326 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); |
324 | 327 | ||
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index fa438be962b7..596554a8725e 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/signal.h> | 20 | #include <linux/signal.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/syscore_ops.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
@@ -902,7 +902,7 @@ static struct { | |||
902 | u32 sercr; | 902 | u32 sercr; |
903 | } ipic_saved_state; | 903 | } ipic_saved_state; |
904 | 904 | ||
905 | static int ipic_suspend(struct sys_device *sdev, pm_message_t state) | 905 | static int ipic_suspend(void) |
906 | { | 906 | { |
907 | struct ipic *ipic = primary_ipic; | 907 | struct ipic *ipic = primary_ipic; |
908 | 908 | ||
@@ -933,7 +933,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state) | |||
933 | return 0; | 933 | return 0; |
934 | } | 934 | } |
935 | 935 | ||
936 | static int ipic_resume(struct sys_device *sdev) | 936 | static void ipic_resume(void) |
937 | { | 937 | { |
938 | struct ipic *ipic = primary_ipic; | 938 | struct ipic *ipic = primary_ipic; |
939 | 939 | ||
@@ -949,44 +949,26 @@ static int ipic_resume(struct sys_device *sdev) | |||
949 | ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); | 949 | ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); |
950 | ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); | 950 | ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); |
951 | ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); | 951 | ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); |
952 | |||
953 | return 0; | ||
954 | } | 952 | } |
955 | #else | 953 | #else |
956 | #define ipic_suspend NULL | 954 | #define ipic_suspend NULL |
957 | #define ipic_resume NULL | 955 | #define ipic_resume NULL |
958 | #endif | 956 | #endif |
959 | 957 | ||
960 | static struct sysdev_class ipic_sysclass = { | 958 | static struct syscore_ops ipic_syscore_ops = { |
961 | .name = "ipic", | ||
962 | .suspend = ipic_suspend, | 959 | .suspend = ipic_suspend, |
963 | .resume = ipic_resume, | 960 | .resume = ipic_resume, |
964 | }; | 961 | }; |
965 | 962 | ||
966 | static struct sys_device device_ipic = { | 963 | static int __init init_ipic_syscore(void) |
967 | .id = 0, | ||
968 | .cls = &ipic_sysclass, | ||
969 | }; | ||
970 | |||
971 | static int __init init_ipic_sysfs(void) | ||
972 | { | 964 | { |
973 | int rc; | ||
974 | |||
975 | if (!primary_ipic || !primary_ipic->regs) | 965 | if (!primary_ipic || !primary_ipic->regs) |
976 | return -ENODEV; | 966 | return -ENODEV; |
977 | printk(KERN_DEBUG "Registering ipic with sysfs...\n"); | ||
978 | 967 | ||
979 | rc = sysdev_class_register(&ipic_sysclass); | 968 | printk(KERN_DEBUG "Registering ipic system core operations\n"); |
980 | if (rc) { | 969 | register_syscore_ops(&ipic_syscore_ops); |
981 | printk(KERN_ERR "Failed registering ipic sys class\n"); | 970 | |
982 | return -ENODEV; | ||
983 | } | ||
984 | rc = sysdev_register(&device_ipic); | ||
985 | if (rc) { | ||
986 | printk(KERN_ERR "Failed registering ipic sys device\n"); | ||
987 | return -ENODEV; | ||
988 | } | ||
989 | return 0; | 971 | return 0; |
990 | } | 972 | } |
991 | 973 | ||
992 | subsys_initcall(init_ipic_sysfs); | 974 | subsys_initcall(init_ipic_syscore); |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index f91c065bed5a..7e5dc8f4984a 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/syscore_ops.h> | ||
30 | 31 | ||
31 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
32 | #include <asm/signal.h> | 33 | #include <asm/signal.h> |
@@ -1702,9 +1703,8 @@ void mpic_reset_core(int cpu) | |||
1702 | #endif /* CONFIG_SMP */ | 1703 | #endif /* CONFIG_SMP */ |
1703 | 1704 | ||
1704 | #ifdef CONFIG_PM | 1705 | #ifdef CONFIG_PM |
1705 | static int mpic_suspend(struct sys_device *dev, pm_message_t state) | 1706 | static void mpic_suspend_one(struct mpic *mpic) |
1706 | { | 1707 | { |
1707 | struct mpic *mpic = container_of(dev, struct mpic, sysdev); | ||
1708 | int i; | 1708 | int i; |
1709 | 1709 | ||
1710 | for (i = 0; i < mpic->num_sources; i++) { | 1710 | for (i = 0; i < mpic->num_sources; i++) { |
@@ -1713,13 +1713,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state) | |||
1713 | mpic->save_data[i].dest = | 1713 | mpic->save_data[i].dest = |
1714 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); | 1714 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); |
1715 | } | 1715 | } |
1716 | } | ||
1717 | |||
1718 | static int mpic_suspend(void) | ||
1719 | { | ||
1720 | struct mpic *mpic = mpics; | ||
1721 | |||
1722 | while (mpic) { | ||
1723 | mpic_suspend_one(mpic); | ||
1724 | mpic = mpic->next; | ||
1725 | } | ||
1716 | 1726 | ||
1717 | return 0; | 1727 | return 0; |
1718 | } | 1728 | } |
1719 | 1729 | ||
1720 | static int mpic_resume(struct sys_device *dev) | 1730 | static void mpic_resume_one(struct mpic *mpic) |
1721 | { | 1731 | { |
1722 | struct mpic *mpic = container_of(dev, struct mpic, sysdev); | ||
1723 | int i; | 1732 | int i; |
1724 | 1733 | ||
1725 | for (i = 0; i < mpic->num_sources; i++) { | 1734 | for (i = 0; i < mpic->num_sources; i++) { |
@@ -1746,33 +1755,28 @@ static int mpic_resume(struct sys_device *dev) | |||
1746 | } | 1755 | } |
1747 | #endif | 1756 | #endif |
1748 | } /* end for loop */ | 1757 | } /* end for loop */ |
1758 | } | ||
1749 | 1759 | ||
1750 | return 0; | 1760 | static void mpic_resume(void) |
1761 | { | ||
1762 | struct mpic *mpic = mpics; | ||
1763 | |||
1764 | while (mpic) { | ||
1765 | mpic_resume_one(mpic); | ||
1766 | mpic = mpic->next; | ||
1767 | } | ||
1751 | } | 1768 | } |
1752 | #endif | ||
1753 | 1769 | ||
1754 | static struct sysdev_class mpic_sysclass = { | 1770 | static struct syscore_ops mpic_syscore_ops = { |
1755 | #ifdef CONFIG_PM | ||
1756 | .resume = mpic_resume, | 1771 | .resume = mpic_resume, |
1757 | .suspend = mpic_suspend, | 1772 | .suspend = mpic_suspend, |
1758 | #endif | ||
1759 | .name = "mpic", | ||
1760 | }; | 1773 | }; |
1761 | 1774 | ||
1762 | static int mpic_init_sys(void) | 1775 | static int mpic_init_sys(void) |
1763 | { | 1776 | { |
1764 | struct mpic *mpic = mpics; | 1777 | register_syscore_ops(&mpic_syscore_ops); |
1765 | int error, id = 0; | 1778 | return 0; |
1766 | |||
1767 | error = sysdev_class_register(&mpic_sysclass); | ||
1768 | |||
1769 | while (mpic && !error) { | ||
1770 | mpic->sysdev.cls = &mpic_sysclass; | ||
1771 | mpic->sysdev.id = id++; | ||
1772 | error = sysdev_register(&mpic->sysdev); | ||
1773 | mpic = mpic->next; | ||
1774 | } | ||
1775 | return error; | ||
1776 | } | 1779 | } |
1777 | 1780 | ||
1778 | device_initcall(mpic_init_sys); | 1781 | device_initcall(mpic_init_sys); |
1782 | #endif | ||
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 43a5c78046db..3e20383d0921 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h | |||
@@ -11,5 +11,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable); | |||
11 | int set_memory_ro(unsigned long addr, int numpages); | 11 | int set_memory_ro(unsigned long addr, int numpages); |
12 | int set_memory_rw(unsigned long addr, int numpages); | 12 | int set_memory_rw(unsigned long addr, int numpages); |
13 | int set_memory_nx(unsigned long addr, int numpages); | 13 | int set_memory_nx(unsigned long addr, int numpages); |
14 | int set_memory_x(unsigned long addr, int numpages); | ||
14 | 15 | ||
15 | #endif /* _S390_CACHEFLUSH_H */ | 16 | #endif /* _S390_CACHEFLUSH_H */ |
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 72b2e2f2d32d..7e91c58072e2 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h | |||
@@ -9,9 +9,22 @@ | |||
9 | #define _ASM_S390_DIAG_H | 9 | #define _ASM_S390_DIAG_H |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Diagnose 10: Release pages | 12 | * Diagnose 10: Release page range |
13 | */ | 13 | */ |
14 | extern void diag10(unsigned long addr); | 14 | static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) |
15 | { | ||
16 | unsigned long start_addr, end_addr; | ||
17 | |||
18 | start_addr = start_pfn << PAGE_SHIFT; | ||
19 | end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; | ||
20 | |||
21 | asm volatile( | ||
22 | "0: diag %0,%1,0x10\n" | ||
23 | "1:\n" | ||
24 | EX_TABLE(0b, 1b) | ||
25 | EX_TABLE(1b, 1b) | ||
26 | : : "a" (start_addr), "a" (end_addr)); | ||
27 | } | ||
15 | 28 | ||
16 | /* | 29 | /* |
17 | * Diagnose 14: Input spool file manipulation | 30 | * Diagnose 14: Input spool file manipulation |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index a6f0e7cc9cde..8c277caa8d3a 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
23 | #ifdef CONFIG_64BIT | 23 | #ifdef CONFIG_64BIT |
24 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 24 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
25 | #endif | 25 | #endif |
26 | if (current->mm->context.alloc_pgste) { | 26 | if (current->mm && current->mm->context.alloc_pgste) { |
27 | /* | 27 | /* |
28 | * alloc_pgste indicates, that any NEW context will be created | 28 | * alloc_pgste indicates, that any NEW context will be created |
29 | * with extended page tables. The old context is unchanged. The | 29 | * with extended page tables. The old context is unchanged. The |
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index c032d11da8a1..8237fc07ac79 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c | |||
@@ -9,27 +9,6 @@ | |||
9 | #include <asm/diag.h> | 9 | #include <asm/diag.h> |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Diagnose 10: Release pages | ||
13 | */ | ||
14 | void diag10(unsigned long addr) | ||
15 | { | ||
16 | if (addr >= 0x7ff00000) | ||
17 | return; | ||
18 | asm volatile( | ||
19 | #ifdef CONFIG_64BIT | ||
20 | " sam31\n" | ||
21 | " diag %0,%0,0x10\n" | ||
22 | "0: sam64\n" | ||
23 | #else | ||
24 | " diag %0,%0,0x10\n" | ||
25 | "0:\n" | ||
26 | #endif | ||
27 | EX_TABLE(0b, 0b) | ||
28 | : : "a" (addr)); | ||
29 | } | ||
30 | EXPORT_SYMBOL(diag10); | ||
31 | |||
32 | /* | ||
33 | * Diagnose 14: Input spool file manipulation | 12 | * Diagnose 14: Input spool file manipulation |
34 | */ | 13 | */ |
35 | int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) | 14 | int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index c83726c9fe03..3d4a78fc1adc 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -672,6 +672,7 @@ static struct insn opcode_b2[] = { | |||
672 | { "rp", 0x77, INSTR_S_RD }, | 672 | { "rp", 0x77, INSTR_S_RD }, |
673 | { "stcke", 0x78, INSTR_S_RD }, | 673 | { "stcke", 0x78, INSTR_S_RD }, |
674 | { "sacf", 0x79, INSTR_S_RD }, | 674 | { "sacf", 0x79, INSTR_S_RD }, |
675 | { "spp", 0x80, INSTR_S_RD }, | ||
675 | { "stsi", 0x7d, INSTR_S_RD }, | 676 | { "stsi", 0x7d, INSTR_S_RD }, |
676 | { "srnm", 0x99, INSTR_S_RD }, | 677 | { "srnm", 0x99, INSTR_S_RD }, |
677 | { "stfpc", 0x9c, INSTR_S_RD }, | 678 | { "stfpc", 0x9c, INSTR_S_RD }, |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 648f64239a9d..1b67fc6ebdc2 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -836,7 +836,7 @@ restart_base: | |||
836 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 836 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on |
837 | basr %r14,0 | 837 | basr %r14,0 |
838 | l %r14,restart_addr-.(%r14) | 838 | l %r14,restart_addr-.(%r14) |
839 | br %r14 # branch to start_secondary | 839 | basr %r14,%r14 # branch to start_secondary |
840 | restart_addr: | 840 | restart_addr: |
841 | .long start_secondary | 841 | .long start_secondary |
842 | .align 8 | 842 | .align 8 |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9d3603d6c511..9fd864563499 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -841,7 +841,7 @@ restart_base: | |||
841 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) | 841 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) |
842 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER | 842 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER |
843 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 843 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on |
844 | jg start_secondary | 844 | brasl %r14,start_secondary |
845 | .align 8 | 845 | .align 8 |
846 | restart_vtime: | 846 | restart_vtime: |
847 | .long 0x7fffffff,0xffffffff | 847 | .long 0x7fffffff,0xffffffff |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 63a97db83f96..63c7d9ff220d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
165 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; | 165 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; |
166 | /* | 166 | /* |
167 | * handle bit signal external calls | 167 | * handle bit signal external calls |
168 | * | ||
169 | * For the ec_schedule signal we have to do nothing. All the work | ||
170 | * is done automatically when we return from the interrupt. | ||
171 | */ | 168 | */ |
172 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 169 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
173 | 170 | ||
171 | if (test_bit(ec_schedule, &bits)) | ||
172 | scheduler_ipi(); | ||
173 | |||
174 | if (test_bit(ec_call_function, &bits)) | 174 | if (test_bit(ec_call_function, &bits)) |
175 | generic_smp_call_function_interrupt(); | 175 | generic_smp_call_function_interrupt(); |
176 | 176 | ||
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index c66ffd8dbbb7..1f1dba9dcf58 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter, | |||
91 | } else | 91 | } else |
92 | free_page((unsigned long) npa); | 92 | free_page((unsigned long) npa); |
93 | } | 93 | } |
94 | diag10(addr); | 94 | diag10_range(addr >> PAGE_SHIFT, 1); |
95 | pa->pages[pa->index++] = addr; | 95 | pa->pages[pa->index++] = addr; |
96 | (*counter)++; | 96 | (*counter)++; |
97 | spin_unlock(&cmm_lock); | 97 | spin_unlock(&cmm_lock); |
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 0607e4b14b27..f05edcc3beff 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -54,3 +54,8 @@ int set_memory_nx(unsigned long addr, int numpages) | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | EXPORT_SYMBOL_GPL(set_memory_nx); | 56 | EXPORT_SYMBOL_GPL(set_memory_nx); |
57 | |||
58 | int set_memory_x(unsigned long addr, int numpages) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 4952872d6f0a..33cbd373cce4 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -1021,20 +1021,14 @@ deallocate_exit: | |||
1021 | return rc; | 1021 | return rc; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | long hwsampler_query_min_interval(void) | 1024 | unsigned long hwsampler_query_min_interval(void) |
1025 | { | 1025 | { |
1026 | if (min_sampler_rate) | 1026 | return min_sampler_rate; |
1027 | return min_sampler_rate; | ||
1028 | else | ||
1029 | return -EINVAL; | ||
1030 | } | 1027 | } |
1031 | 1028 | ||
1032 | long hwsampler_query_max_interval(void) | 1029 | unsigned long hwsampler_query_max_interval(void) |
1033 | { | 1030 | { |
1034 | if (max_sampler_rate) | 1031 | return max_sampler_rate; |
1035 | return max_sampler_rate; | ||
1036 | else | ||
1037 | return -EINVAL; | ||
1038 | } | 1032 | } |
1039 | 1033 | ||
1040 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) | 1034 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) |
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h index 8c72b59316b5..1912f3bb190c 100644 --- a/arch/s390/oprofile/hwsampler.h +++ b/arch/s390/oprofile/hwsampler.h | |||
@@ -102,8 +102,8 @@ int hwsampler_setup(void); | |||
102 | int hwsampler_shutdown(void); | 102 | int hwsampler_shutdown(void); |
103 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); | 103 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); |
104 | int hwsampler_deallocate(void); | 104 | int hwsampler_deallocate(void); |
105 | long hwsampler_query_min_interval(void); | 105 | unsigned long hwsampler_query_min_interval(void); |
106 | long hwsampler_query_max_interval(void); | 106 | unsigned long hwsampler_query_max_interval(void); |
107 | int hwsampler_start_all(unsigned long interval); | 107 | int hwsampler_start_all(unsigned long interval); |
108 | int hwsampler_stop_all(void); | 108 | int hwsampler_stop_all(void); |
109 | int hwsampler_deactivate(unsigned int cpu); | 109 | int hwsampler_deactivate(unsigned int cpu); |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index c63d7e58352b..5995e9bc72d9 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) | |||
145 | * create hwsampler files only if hwsampler_setup() succeeds. | 145 | * create hwsampler files only if hwsampler_setup() succeeds. |
146 | */ | 146 | */ |
147 | oprofile_min_interval = hwsampler_query_min_interval(); | 147 | oprofile_min_interval = hwsampler_query_min_interval(); |
148 | if (oprofile_min_interval < 0) { | 148 | if (oprofile_min_interval == 0) |
149 | oprofile_min_interval = 0; | ||
150 | return -ENODEV; | 149 | return -ENODEV; |
151 | } | ||
152 | oprofile_max_interval = hwsampler_query_max_interval(); | 150 | oprofile_max_interval = hwsampler_query_max_interval(); |
153 | if (oprofile_max_interval < 0) { | 151 | if (oprofile_max_interval == 0) |
154 | oprofile_max_interval = 0; | ||
155 | return -ENODEV; | 152 | return -ENODEV; |
156 | } | ||
157 | 153 | ||
158 | if (oprofile_timer_init(ops)) | 154 | if (oprofile_timer_init(ops)) |
159 | return -ENODEV; | 155 | return -ENODEV; |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 4b89da248d17..bc439de48cd1 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -24,7 +24,6 @@ config SUPERH | |||
24 | select RTC_LIB | 24 | select RTC_LIB |
25 | select GENERIC_ATOMIC64 | 25 | select GENERIC_ATOMIC64 |
26 | select GENERIC_IRQ_SHOW | 26 | select GENERIC_IRQ_SHOW |
27 | select ARCH_NO_SYSDEV_OPS | ||
28 | help | 27 | help |
29 | The SuperH is a RISC processor targeted for use in embedded systems | 28 | The SuperH is a RISC processor targeted for use in embedded systems |
30 | and consumer electronics; it was also used in the Sega Dreamcast | 29 | and consumer electronics; it was also used in the Sega Dreamcast |
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig index e71a531f1e31..77ec0e7b8ddf 100644 --- a/arch/sh/configs/apsh4ad0a_defconfig +++ b/arch/sh/configs/apsh4ad0a_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_PREEMPT=y | |||
48 | CONFIG_BINFMT_MISC=y | 48 | CONFIG_BINFMT_MISC=y |
49 | CONFIG_PM=y | 49 | CONFIG_PM=y |
50 | CONFIG_PM_DEBUG=y | 50 | CONFIG_PM_DEBUG=y |
51 | CONFIG_PM_VERBOSE=y | ||
52 | CONFIG_PM_RUNTIME=y | 51 | CONFIG_PM_RUNTIME=y |
53 | CONFIG_CPU_IDLE=y | 52 | CONFIG_CPU_IDLE=y |
54 | CONFIG_NET=y | 53 | CONFIG_NET=y |
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig index dc4a2eb6a616..c41650572d79 100644 --- a/arch/sh/configs/sdk7786_defconfig +++ b/arch/sh/configs/sdk7786_defconfig | |||
@@ -83,7 +83,6 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y | |||
83 | CONFIG_BINFMT_MISC=y | 83 | CONFIG_BINFMT_MISC=y |
84 | CONFIG_PM=y | 84 | CONFIG_PM=y |
85 | CONFIG_PM_DEBUG=y | 85 | CONFIG_PM_DEBUG=y |
86 | CONFIG_PM_VERBOSE=y | ||
87 | CONFIG_PM_RUNTIME=y | 86 | CONFIG_PM_RUNTIME=y |
88 | CONFIG_CPU_IDLE=y | 87 | CONFIG_CPU_IDLE=y |
89 | CONFIG_NET=y | 88 | CONFIG_NET=y |
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 6dcb8166a64d..22db127afa7b 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c | |||
@@ -139,7 +139,7 @@ void platform_pm_runtime_suspend_idle(void) | |||
139 | queue_work(pm_wq, &hwblk_work); | 139 | queue_work(pm_wq, &hwblk_work); |
140 | } | 140 | } |
141 | 141 | ||
142 | int platform_pm_runtime_suspend(struct device *dev) | 142 | static int default_platform_runtime_suspend(struct device *dev) |
143 | { | 143 | { |
144 | struct platform_device *pdev = to_platform_device(dev); | 144 | struct platform_device *pdev = to_platform_device(dev); |
145 | struct pdev_archdata *ad = &pdev->archdata; | 145 | struct pdev_archdata *ad = &pdev->archdata; |
@@ -147,7 +147,7 @@ int platform_pm_runtime_suspend(struct device *dev) | |||
147 | int hwblk = ad->hwblk_id; | 147 | int hwblk = ad->hwblk_id; |
148 | int ret = 0; | 148 | int ret = 0; |
149 | 149 | ||
150 | dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); | 150 | dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); |
151 | 151 | ||
152 | /* ignore off-chip platform devices */ | 152 | /* ignore off-chip platform devices */ |
153 | if (!hwblk) | 153 | if (!hwblk) |
@@ -183,20 +183,20 @@ int platform_pm_runtime_suspend(struct device *dev) | |||
183 | mutex_unlock(&ad->mutex); | 183 | mutex_unlock(&ad->mutex); |
184 | 184 | ||
185 | out: | 185 | out: |
186 | dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", | 186 | dev_dbg(dev, "%s() [%d] returns %d\n", |
187 | hwblk, ret); | 187 | __func__, hwblk, ret); |
188 | 188 | ||
189 | return ret; | 189 | return ret; |
190 | } | 190 | } |
191 | 191 | ||
192 | int platform_pm_runtime_resume(struct device *dev) | 192 | static int default_platform_runtime_resume(struct device *dev) |
193 | { | 193 | { |
194 | struct platform_device *pdev = to_platform_device(dev); | 194 | struct platform_device *pdev = to_platform_device(dev); |
195 | struct pdev_archdata *ad = &pdev->archdata; | 195 | struct pdev_archdata *ad = &pdev->archdata; |
196 | int hwblk = ad->hwblk_id; | 196 | int hwblk = ad->hwblk_id; |
197 | int ret = 0; | 197 | int ret = 0; |
198 | 198 | ||
199 | dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); | 199 | dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); |
200 | 200 | ||
201 | /* ignore off-chip platform devices */ | 201 | /* ignore off-chip platform devices */ |
202 | if (!hwblk) | 202 | if (!hwblk) |
@@ -228,19 +228,19 @@ int platform_pm_runtime_resume(struct device *dev) | |||
228 | */ | 228 | */ |
229 | mutex_unlock(&ad->mutex); | 229 | mutex_unlock(&ad->mutex); |
230 | out: | 230 | out: |
231 | dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", | 231 | dev_dbg(dev, "%s() [%d] returns %d\n", |
232 | hwblk, ret); | 232 | __func__, hwblk, ret); |
233 | 233 | ||
234 | return ret; | 234 | return ret; |
235 | } | 235 | } |
236 | 236 | ||
237 | int platform_pm_runtime_idle(struct device *dev) | 237 | static int default_platform_runtime_idle(struct device *dev) |
238 | { | 238 | { |
239 | struct platform_device *pdev = to_platform_device(dev); | 239 | struct platform_device *pdev = to_platform_device(dev); |
240 | int hwblk = pdev->archdata.hwblk_id; | 240 | int hwblk = pdev->archdata.hwblk_id; |
241 | int ret = 0; | 241 | int ret = 0; |
242 | 242 | ||
243 | dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); | 243 | dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); |
244 | 244 | ||
245 | /* ignore off-chip platform devices */ | 245 | /* ignore off-chip platform devices */ |
246 | if (!hwblk) | 246 | if (!hwblk) |
@@ -252,10 +252,19 @@ int platform_pm_runtime_idle(struct device *dev) | |||
252 | /* suspend synchronously to disable clocks immediately */ | 252 | /* suspend synchronously to disable clocks immediately */ |
253 | ret = pm_runtime_suspend(dev); | 253 | ret = pm_runtime_suspend(dev); |
254 | out: | 254 | out: |
255 | dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); | 255 | dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk); |
256 | return ret; | 256 | return ret; |
257 | } | 257 | } |
258 | 258 | ||
259 | static struct dev_power_domain default_power_domain = { | ||
260 | .ops = { | ||
261 | .runtime_suspend = default_platform_runtime_suspend, | ||
262 | .runtime_resume = default_platform_runtime_resume, | ||
263 | .runtime_idle = default_platform_runtime_idle, | ||
264 | USE_PLATFORM_PM_SLEEP_OPS | ||
265 | }, | ||
266 | }; | ||
267 | |||
259 | static int platform_bus_notify(struct notifier_block *nb, | 268 | static int platform_bus_notify(struct notifier_block *nb, |
260 | unsigned long action, void *data) | 269 | unsigned long action, void *data) |
261 | { | 270 | { |
@@ -276,6 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb, | |||
276 | hwblk_disable(hwblk_info, hwblk); | 285 | hwblk_disable(hwblk_info, hwblk); |
277 | /* make sure driver re-inits itself once */ | 286 | /* make sure driver re-inits itself once */ |
278 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); | 287 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); |
288 | dev->pwr_domain = &default_power_domain; | ||
279 | break; | 289 | break; |
280 | /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ | 290 | /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ |
281 | case BUS_NOTIFY_BOUND_DRIVER: | 291 | case BUS_NOTIFY_BOUND_DRIVER: |
@@ -289,6 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb, | |||
289 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); | 299 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); |
290 | break; | 300 | break; |
291 | case BUS_NOTIFY_DEL_DEVICE: | 301 | case BUS_NOTIFY_DEL_DEVICE: |
302 | dev->pwr_domain = NULL; | ||
292 | break; | 303 | break; |
293 | } | 304 | } |
294 | return 0; | 305 | return 0; |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 509b36b45115..6207561ea34a 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/cpu.h> | 21 | #include <linux/cpu.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/sched.h> | ||
23 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
24 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
25 | #include <asm/system.h> | 26 | #include <asm/system.h> |
@@ -323,6 +324,7 @@ void smp_message_recv(unsigned int msg) | |||
323 | generic_smp_call_function_interrupt(); | 324 | generic_smp_call_function_interrupt(); |
324 | break; | 325 | break; |
325 | case SMP_MSG_RESCHEDULE: | 326 | case SMP_MSG_RESCHEDULE: |
327 | scheduler_ipi(); | ||
326 | break; | 328 | break; |
327 | case SMP_MSG_FUNCTION_SINGLE: | 329 | case SMP_MSG_FUNCTION_SINGLE: |
328 | generic_smp_call_function_single_interrupt(); | 330 | generic_smp_call_function_single_interrupt(); |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 3484c2f65aba..b51a17104b5f 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -87,7 +87,6 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
87 | bust_spinlocks(1); | 87 | bust_spinlocks(1); |
88 | 88 | ||
89 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | 89 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); |
90 | sysfs_printk_last_file(); | ||
91 | print_modules(); | 90 | print_modules(); |
92 | show_regs(regs); | 91 | show_regs(regs); |
93 | 92 | ||
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 1c79f32734a0..8b9c556d630b 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
@@ -65,6 +65,10 @@ static inline int pcibus_to_node(struct pci_bus *pbus) | |||
65 | #define smt_capable() (sparc64_multi_core) | 65 | #define smt_capable() (sparc64_multi_core) |
66 | #endif /* CONFIG_SMP */ | 66 | #endif /* CONFIG_SMP */ |
67 | 67 | ||
68 | #define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) | 68 | extern cpumask_t cpu_core_map[NR_CPUS]; |
69 | static inline const struct cpumask *cpu_coregroup_mask(int cpu) | ||
70 | { | ||
71 | return &cpu_core_map[cpu]; | ||
72 | } | ||
69 | 73 | ||
70 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ | 74 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ |
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index f679c57644d5..1e34f29e58bb 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c | |||
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op) | |||
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | 167 | ||
168 | static struct of_device_id __initdata apc_match[] = { | 168 | static struct of_device_id apc_match[] = { |
169 | { | 169 | { |
170 | .name = APC_OBPNAME, | 170 | .name = APC_OBPNAME, |
171 | }, | 171 | }, |
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 948068a083fc..d1840dbdaa2f 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c | |||
@@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, | |||
452 | sabre_scan_bus(pbm, &op->dev); | 452 | sabre_scan_bus(pbm, &op->dev); |
453 | } | 453 | } |
454 | 454 | ||
455 | static const struct of_device_id sabre_match[]; | ||
455 | static int __devinit sabre_probe(struct platform_device *op) | 456 | static int __devinit sabre_probe(struct platform_device *op) |
456 | { | 457 | { |
458 | const struct of_device_id *match; | ||
457 | const struct linux_prom64_registers *pr_regs; | 459 | const struct linux_prom64_registers *pr_regs; |
458 | struct device_node *dp = op->dev.of_node; | 460 | struct device_node *dp = op->dev.of_node; |
459 | struct pci_pbm_info *pbm; | 461 | struct pci_pbm_info *pbm; |
@@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op) | |||
463 | const u32 *vdma; | 465 | const u32 *vdma; |
464 | u64 clear_irq; | 466 | u64 clear_irq; |
465 | 467 | ||
466 | hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); | 468 | match = of_match_device(sabre_match, &op->dev); |
469 | hummingbird_p = match && (match->data != NULL); | ||
467 | if (!hummingbird_p) { | 470 | if (!hummingbird_p) { |
468 | struct device_node *cpu_dp; | 471 | struct device_node *cpu_dp; |
469 | 472 | ||
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index fecfcb2063c8..283fbc329a43 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c | |||
@@ -1458,11 +1458,15 @@ out_err: | |||
1458 | return err; | 1458 | return err; |
1459 | } | 1459 | } |
1460 | 1460 | ||
1461 | static const struct of_device_id schizo_match[]; | ||
1461 | static int __devinit schizo_probe(struct platform_device *op) | 1462 | static int __devinit schizo_probe(struct platform_device *op) |
1462 | { | 1463 | { |
1463 | if (!op->dev.of_match) | 1464 | const struct of_device_id *match; |
1465 | |||
1466 | match = of_match_device(schizo_match, &op->dev); | ||
1467 | if (!match) | ||
1464 | return -EINVAL; | 1468 | return -EINVAL; |
1465 | return __schizo_init(op, (unsigned long) op->dev.of_match->data); | 1469 | return __schizo_init(op, (unsigned long)match->data); |
1466 | } | 1470 | } |
1467 | 1471 | ||
1468 | /* The ordering of this table is very important. Some Tomatillo | 1472 | /* The ordering of this table is very important. Some Tomatillo |
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index 93d7b4465f8d..6a585d393580 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c | |||
@@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op) | |||
69 | return 0; | 69 | return 0; |
70 | } | 70 | } |
71 | 71 | ||
72 | static struct of_device_id __initdata pmc_match[] = { | 72 | static struct of_device_id pmc_match[] = { |
73 | { | 73 | { |
74 | .name = PMC_OBPNAME, | 74 | .name = PMC_OBPNAME, |
75 | }, | 75 | }, |
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 91c10fb70858..442286d83435 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |||
53 | void __cpuinit smp_store_cpu_info(int id) | 53 | void __cpuinit smp_store_cpu_info(int id) |
54 | { | 54 | { |
55 | int cpu_node; | 55 | int cpu_node; |
56 | int mid; | ||
56 | 57 | ||
57 | cpu_data(id).udelay_val = loops_per_jiffy; | 58 | cpu_data(id).udelay_val = loops_per_jiffy; |
58 | 59 | ||
@@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id) | |||
60 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, | 61 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, |
61 | "clock-frequency", 0); | 62 | "clock-frequency", 0); |
62 | cpu_data(id).prom_node = cpu_node; | 63 | cpu_data(id).prom_node = cpu_node; |
63 | cpu_data(id).mid = cpu_get_hwmid(cpu_node); | 64 | mid = cpu_get_hwmid(cpu_node); |
64 | 65 | ||
65 | if (cpu_data(id).mid < 0) | 66 | if (mid < 0) { |
66 | panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); | 67 | printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); |
68 | mid = 0; | ||
69 | } | ||
70 | cpu_data(id).mid = mid; | ||
67 | } | 71 | } |
68 | 72 | ||
69 | void __init smp_cpus_done(unsigned int max_cpus) | 73 | void __init smp_cpus_done(unsigned int max_cpus) |
@@ -125,7 +129,9 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; | |||
125 | 129 | ||
126 | void smp_send_reschedule(int cpu) | 130 | void smp_send_reschedule(int cpu) |
127 | { | 131 | { |
128 | /* See sparc64 */ | 132 | /* |
133 | * XXX missing reschedule IPI, see scheduler_ipi() | ||
134 | */ | ||
129 | } | 135 | } |
130 | 136 | ||
131 | void smp_send_stop(void) | 137 | void smp_send_stop(void) |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 3e94a8c23238..9478da7fdb3e 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -1368,6 +1368,7 @@ void smp_send_reschedule(int cpu) | |||
1368 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) | 1368 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) |
1369 | { | 1369 | { |
1370 | clear_softint(1 << irq); | 1370 | clear_softint(1 << irq); |
1371 | scheduler_ipi(); | ||
1371 | } | 1372 | } |
1372 | 1373 | ||
1373 | /* This is a nop because we capture all other cpus | 1374 | /* This is a nop because we capture all other cpus |
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 4e236391b635..96046a4024c2 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c | |||
@@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op) | |||
168 | return 0; | 168 | return 0; |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct of_device_id __initdata clock_match[] = { | 171 | static struct of_device_id clock_match[] = { |
172 | { | 172 | { |
173 | .name = "eeprom", | 173 | .name = "eeprom", |
174 | }, | 174 | }, |
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 3632cb34e914..0084c3361e15 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S | |||
@@ -289,10 +289,16 @@ cc_end_cruft: | |||
289 | 289 | ||
290 | /* Also, handle the alignment code out of band. */ | 290 | /* Also, handle the alignment code out of band. */ |
291 | cc_dword_align: | 291 | cc_dword_align: |
292 | cmp %g1, 6 | 292 | cmp %g1, 16 |
293 | bl,a ccte | 293 | bge 1f |
294 | srl %g1, 1, %o3 | ||
295 | 2: cmp %o3, 0 | ||
296 | be,a ccte | ||
294 | andcc %g1, 0xf, %o3 | 297 | andcc %g1, 0xf, %o3 |
295 | andcc %o0, 0x1, %g0 | 298 | andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits) |
299 | be,a 2b | ||
300 | srl %o3, 1, %o3 | ||
301 | 1: andcc %o0, 0x1, %g0 | ||
296 | bne ccslow | 302 | bne ccslow |
297 | andcc %o0, 0x2, %g0 | 303 | andcc %o0, 0x2, %g0 |
298 | be 1f | 304 | be 1f |
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index a4293102ef81..c52224d5ed45 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c | |||
@@ -189,12 +189,8 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
189 | /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ | 189 | /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ |
190 | static irqreturn_t handle_reschedule_ipi(int irq, void *token) | 190 | static irqreturn_t handle_reschedule_ipi(int irq, void *token) |
191 | { | 191 | { |
192 | /* | ||
193 | * Nothing to do here; when we return from interrupt, the | ||
194 | * rescheduling will occur there. But do bump the interrupt | ||
195 | * profiler count in the meantime. | ||
196 | */ | ||
197 | __get_cpu_var(irq_stat).irq_resched_count++; | 192 | __get_cpu_var(irq_stat).irq_resched_count++; |
193 | scheduler_ipi(); | ||
198 | 194 | ||
199 | return IRQ_HANDLED; | 195 | return IRQ_HANDLED; |
200 | } | 196 | } |
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index 106bf27e2a9a..eefb107d2d73 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c | |||
@@ -173,7 +173,7 @@ void IPI_handler(int cpu) | |||
173 | break; | 173 | break; |
174 | 174 | ||
175 | case 'R': | 175 | case 'R': |
176 | set_tsk_need_resched(current); | 176 | scheduler_ipi(); |
177 | break; | 177 | break; |
178 | 178 | ||
179 | case 'S': | 179 | case 'S': |
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c index 6ea77979531c..42827cafa6af 100644 --- a/arch/um/os-Linux/util.c +++ b/arch/um/os-Linux/util.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <stdio.h> | 6 | #include <stdio.h> |
7 | #include <stdlib.h> | 7 | #include <stdlib.h> |
8 | #include <unistd.h> | ||
8 | #include <errno.h> | 9 | #include <errno.h> |
9 | #include <signal.h> | 10 | #include <signal.h> |
10 | #include <string.h> | 11 | #include <string.h> |
@@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len) | |||
75 | host.release, host.version, host.machine); | 76 | host.release, host.version, host.machine); |
76 | } | 77 | } |
77 | 78 | ||
79 | /* | ||
80 | * We cannot use glibc's abort(). It makes use of tgkill() which | ||
81 | * has no effect within UML's kernel threads. | ||
82 | * After that glibc would execute an invalid instruction to kill | ||
83 | * the calling process and UML crashes with SIGSEGV. | ||
84 | */ | ||
85 | static inline void __attribute__ ((noreturn)) uml_abort(void) | ||
86 | { | ||
87 | sigset_t sig; | ||
88 | |||
89 | fflush(NULL); | ||
90 | |||
91 | if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT)) | ||
92 | sigprocmask(SIG_UNBLOCK, &sig, 0); | ||
93 | |||
94 | for (;;) | ||
95 | if (kill(getpid(), SIGABRT) < 0) | ||
96 | exit(127); | ||
97 | } | ||
98 | |||
78 | void os_dump_core(void) | 99 | void os_dump_core(void) |
79 | { | 100 | { |
80 | int pid; | 101 | int pid; |
@@ -116,5 +137,5 @@ void os_dump_core(void) | |||
116 | while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) | 137 | while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) |
117 | os_kill_ptraced_process(pid, 0); | 138 | os_kill_ptraced_process(pid, 0); |
118 | 139 | ||
119 | abort(); | 140 | uml_abort(); |
120 | } | 141 | } |
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c index 2aa30a364bbe..d4efa7d679ff 100644 --- a/arch/unicore32/kernel/irq.c +++ b/arch/unicore32/kernel/irq.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
26 | #include <linux/sysdev.h> | 26 | #include <linux/syscore_ops.h> |
27 | #include <linux/gpio.h> | 27 | #include <linux/gpio.h> |
28 | 28 | ||
29 | #include <asm/system.h> | 29 | #include <asm/system.h> |
@@ -237,7 +237,7 @@ static struct puv3_irq_state { | |||
237 | unsigned int iccr; | 237 | unsigned int iccr; |
238 | } puv3_irq_state; | 238 | } puv3_irq_state; |
239 | 239 | ||
240 | static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) | 240 | static int puv3_irq_suspend(void) |
241 | { | 241 | { |
242 | struct puv3_irq_state *st = &puv3_irq_state; | 242 | struct puv3_irq_state *st = &puv3_irq_state; |
243 | 243 | ||
@@ -265,7 +265,7 @@ static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) | |||
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | static int puv3_irq_resume(struct sys_device *dev) | 268 | static void puv3_irq_resume(void) |
269 | { | 269 | { |
270 | struct puv3_irq_state *st = &puv3_irq_state; | 270 | struct puv3_irq_state *st = &puv3_irq_state; |
271 | 271 | ||
@@ -278,27 +278,20 @@ static int puv3_irq_resume(struct sys_device *dev) | |||
278 | 278 | ||
279 | writel(st->icmr, INTC_ICMR); | 279 | writel(st->icmr, INTC_ICMR); |
280 | } | 280 | } |
281 | return 0; | ||
282 | } | 281 | } |
283 | 282 | ||
284 | static struct sysdev_class puv3_irq_sysclass = { | 283 | static struct syscore_ops puv3_irq_syscore_ops = { |
285 | .name = "pkunity-irq", | ||
286 | .suspend = puv3_irq_suspend, | 284 | .suspend = puv3_irq_suspend, |
287 | .resume = puv3_irq_resume, | 285 | .resume = puv3_irq_resume, |
288 | }; | 286 | }; |
289 | 287 | ||
290 | static struct sys_device puv3_irq_device = { | 288 | static int __init puv3_irq_init_syscore(void) |
291 | .id = 0, | ||
292 | .cls = &puv3_irq_sysclass, | ||
293 | }; | ||
294 | |||
295 | static int __init puv3_irq_init_devicefs(void) | ||
296 | { | 289 | { |
297 | sysdev_class_register(&puv3_irq_sysclass); | 290 | register_syscore_ops(&puv3_irq_syscore_ops); |
298 | return sysdev_register(&puv3_irq_device); | 291 | return 0; |
299 | } | 292 | } |
300 | 293 | ||
301 | device_initcall(puv3_irq_init_devicefs); | 294 | device_initcall(puv3_irq_init_syscore); |
302 | 295 | ||
303 | void __init init_IRQ(void) | 296 | void __init init_IRQ(void) |
304 | { | 297 | { |
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 254e36fa9513..b9a26465e728 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c | |||
@@ -192,7 +192,6 @@ static int __die(const char *str, int err, struct thread_info *thread, | |||
192 | 192 | ||
193 | printk(KERN_EMERG "Internal error: %s: %x [#%d]\n", | 193 | printk(KERN_EMERG "Internal error: %s: %x [#%d]\n", |
194 | str, err, ++die_counter); | 194 | str, err, ++die_counter); |
195 | sysfs_printk_last_file(); | ||
196 | 195 | ||
197 | /* trap and error numbers are mostly meaningless on UniCore */ | 196 | /* trap and error numbers are mostly meaningless on UniCore */ |
198 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \ | 197 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cc6c53a95bfd..4168e5d8632a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -8,6 +8,7 @@ config 64BIT | |||
8 | 8 | ||
9 | config X86_32 | 9 | config X86_32 |
10 | def_bool !64BIT | 10 | def_bool !64BIT |
11 | select CLKSRC_I8253 | ||
11 | 12 | ||
12 | config X86_64 | 13 | config X86_64 |
13 | def_bool 64BIT | 14 | def_bool 64BIT |
@@ -71,7 +72,6 @@ config X86 | |||
71 | select GENERIC_IRQ_SHOW | 72 | select GENERIC_IRQ_SHOW |
72 | select IRQ_FORCED_THREADING | 73 | select IRQ_FORCED_THREADING |
73 | select USE_GENERIC_SMP_HELPERS if SMP | 74 | select USE_GENERIC_SMP_HELPERS if SMP |
74 | select ARCH_NO_SYSDEV_OPS | ||
75 | 75 | ||
76 | config INSTRUCTION_DECODER | 76 | config INSTRUCTION_DECODER |
77 | def_bool (KPROBES || PERF_EVENTS) | 77 | def_bool (KPROBES || PERF_EVENTS) |
@@ -112,7 +112,14 @@ config MMU | |||
112 | def_bool y | 112 | def_bool y |
113 | 113 | ||
114 | config ZONE_DMA | 114 | config ZONE_DMA |
115 | def_bool y | 115 | bool "DMA memory allocation support" if EXPERT |
116 | default y | ||
117 | help | ||
118 | DMA memory allocation support allows devices with less than 32-bit | ||
119 | addressing to allocate within the first 16MB of address space. | ||
120 | Disable if no such devices will be used. | ||
121 | |||
122 | If unsure, say Y. | ||
116 | 123 | ||
117 | config SBUS | 124 | config SBUS |
118 | bool | 125 | bool |
@@ -365,17 +372,6 @@ config X86_UV | |||
365 | # Following is an alphabetically sorted list of 32 bit extended platforms | 372 | # Following is an alphabetically sorted list of 32 bit extended platforms |
366 | # Please maintain the alphabetic order if and when there are additions | 373 | # Please maintain the alphabetic order if and when there are additions |
367 | 374 | ||
368 | config X86_ELAN | ||
369 | bool "AMD Elan" | ||
370 | depends on X86_32 | ||
371 | depends on X86_EXTENDED_PLATFORM | ||
372 | ---help--- | ||
373 | Select this for an AMD Elan processor. | ||
374 | |||
375 | Do not use this option for K6/Athlon/Opteron processors! | ||
376 | |||
377 | If unsure, choose "PC-compatible" instead. | ||
378 | |||
379 | config X86_INTEL_CE | 375 | config X86_INTEL_CE |
380 | bool "CE4100 TV platform" | 376 | bool "CE4100 TV platform" |
381 | depends on PCI | 377 | depends on PCI |
@@ -690,6 +686,7 @@ config AMD_IOMMU | |||
690 | bool "AMD IOMMU support" | 686 | bool "AMD IOMMU support" |
691 | select SWIOTLB | 687 | select SWIOTLB |
692 | select PCI_MSI | 688 | select PCI_MSI |
689 | select PCI_IOV | ||
693 | depends on X86_64 && PCI && ACPI | 690 | depends on X86_64 && PCI && ACPI |
694 | ---help--- | 691 | ---help--- |
695 | With this option you can enable support for AMD IOMMU hardware in | 692 | With this option you can enable support for AMD IOMMU hardware in |
@@ -1174,7 +1171,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" | |||
1174 | config AMD_NUMA | 1171 | config AMD_NUMA |
1175 | def_bool y | 1172 | def_bool y |
1176 | prompt "Old style AMD Opteron NUMA detection" | 1173 | prompt "Old style AMD Opteron NUMA detection" |
1177 | depends on X86_64 && NUMA && PCI | 1174 | depends on NUMA && PCI |
1178 | ---help--- | 1175 | ---help--- |
1179 | Enable AMD NUMA node topology detection. You should say Y here if | 1176 | Enable AMD NUMA node topology detection. You should say Y here if |
1180 | you have a multi processor AMD system. This uses an old method to | 1177 | you have a multi processor AMD system. This uses an old method to |
@@ -1201,7 +1198,7 @@ config NODES_SPAN_OTHER_NODES | |||
1201 | 1198 | ||
1202 | config NUMA_EMU | 1199 | config NUMA_EMU |
1203 | bool "NUMA emulation" | 1200 | bool "NUMA emulation" |
1204 | depends on X86_64 && NUMA | 1201 | depends on NUMA |
1205 | ---help--- | 1202 | ---help--- |
1206 | Enable NUMA emulation. A flat machine will be split | 1203 | Enable NUMA emulation. A flat machine will be split |
1207 | into virtual nodes when booted with "numa=fake=N", where N is the | 1204 | into virtual nodes when booted with "numa=fake=N", where N is the |
@@ -1223,6 +1220,10 @@ config HAVE_ARCH_BOOTMEM | |||
1223 | def_bool y | 1220 | def_bool y |
1224 | depends on X86_32 && NUMA | 1221 | depends on X86_32 && NUMA |
1225 | 1222 | ||
1223 | config HAVE_ARCH_ALLOC_REMAP | ||
1224 | def_bool y | ||
1225 | depends on X86_32 && NUMA | ||
1226 | |||
1226 | config ARCH_HAVE_MEMORY_PRESENT | 1227 | config ARCH_HAVE_MEMORY_PRESENT |
1227 | def_bool y | 1228 | def_bool y |
1228 | depends on X86_32 && DISCONTIGMEM | 1229 | depends on X86_32 && DISCONTIGMEM |
@@ -1231,13 +1232,9 @@ config NEED_NODE_MEMMAP_SIZE | |||
1231 | def_bool y | 1232 | def_bool y |
1232 | depends on X86_32 && (DISCONTIGMEM || SPARSEMEM) | 1233 | depends on X86_32 && (DISCONTIGMEM || SPARSEMEM) |
1233 | 1234 | ||
1234 | config HAVE_ARCH_ALLOC_REMAP | ||
1235 | def_bool y | ||
1236 | depends on X86_32 && NUMA | ||
1237 | |||
1238 | config ARCH_FLATMEM_ENABLE | 1235 | config ARCH_FLATMEM_ENABLE |
1239 | def_bool y | 1236 | def_bool y |
1240 | depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA | 1237 | depends on X86_32 && !NUMA |
1241 | 1238 | ||
1242 | config ARCH_DISCONTIGMEM_ENABLE | 1239 | config ARCH_DISCONTIGMEM_ENABLE |
1243 | def_bool y | 1240 | def_bool y |
@@ -1247,20 +1244,16 @@ config ARCH_DISCONTIGMEM_DEFAULT | |||
1247 | def_bool y | 1244 | def_bool y |
1248 | depends on NUMA && X86_32 | 1245 | depends on NUMA && X86_32 |
1249 | 1246 | ||
1250 | config ARCH_PROC_KCORE_TEXT | ||
1251 | def_bool y | ||
1252 | depends on X86_64 && PROC_KCORE | ||
1253 | |||
1254 | config ARCH_SPARSEMEM_DEFAULT | ||
1255 | def_bool y | ||
1256 | depends on X86_64 | ||
1257 | |||
1258 | config ARCH_SPARSEMEM_ENABLE | 1247 | config ARCH_SPARSEMEM_ENABLE |
1259 | def_bool y | 1248 | def_bool y |
1260 | depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD | 1249 | depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD |
1261 | select SPARSEMEM_STATIC if X86_32 | 1250 | select SPARSEMEM_STATIC if X86_32 |
1262 | select SPARSEMEM_VMEMMAP_ENABLE if X86_64 | 1251 | select SPARSEMEM_VMEMMAP_ENABLE if X86_64 |
1263 | 1252 | ||
1253 | config ARCH_SPARSEMEM_DEFAULT | ||
1254 | def_bool y | ||
1255 | depends on X86_64 | ||
1256 | |||
1264 | config ARCH_SELECT_MEMORY_MODEL | 1257 | config ARCH_SELECT_MEMORY_MODEL |
1265 | def_bool y | 1258 | def_bool y |
1266 | depends on ARCH_SPARSEMEM_ENABLE | 1259 | depends on ARCH_SPARSEMEM_ENABLE |
@@ -1269,6 +1262,10 @@ config ARCH_MEMORY_PROBE | |||
1269 | def_bool X86_64 | 1262 | def_bool X86_64 |
1270 | depends on MEMORY_HOTPLUG | 1263 | depends on MEMORY_HOTPLUG |
1271 | 1264 | ||
1265 | config ARCH_PROC_KCORE_TEXT | ||
1266 | def_bool y | ||
1267 | depends on X86_64 && PROC_KCORE | ||
1268 | |||
1272 | config ILLEGAL_POINTER_VALUE | 1269 | config ILLEGAL_POINTER_VALUE |
1273 | hex | 1270 | hex |
1274 | default 0 if X86_32 | 1271 | default 0 if X86_32 |
@@ -1703,10 +1700,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE | |||
1703 | def_bool y | 1700 | def_bool y |
1704 | depends on MEMORY_HOTPLUG | 1701 | depends on MEMORY_HOTPLUG |
1705 | 1702 | ||
1706 | config HAVE_ARCH_EARLY_PFN_TO_NID | ||
1707 | def_bool X86_64 | ||
1708 | depends on NUMA | ||
1709 | |||
1710 | config USE_PERCPU_NUMA_NODE_ID | 1703 | config USE_PERCPU_NUMA_NODE_ID |
1711 | def_bool y | 1704 | def_bool y |
1712 | depends on NUMA | 1705 | depends on NUMA |
@@ -1848,7 +1841,7 @@ config APM_ALLOW_INTS | |||
1848 | 1841 | ||
1849 | endif # APM | 1842 | endif # APM |
1850 | 1843 | ||
1851 | source "arch/x86/kernel/cpu/cpufreq/Kconfig" | 1844 | source "drivers/cpufreq/Kconfig" |
1852 | 1845 | ||
1853 | source "drivers/cpuidle/Kconfig" | 1846 | source "drivers/cpuidle/Kconfig" |
1854 | 1847 | ||
@@ -2076,7 +2069,7 @@ config OLPC | |||
2076 | depends on !X86_PAE | 2069 | depends on !X86_PAE |
2077 | select GPIOLIB | 2070 | select GPIOLIB |
2078 | select OF | 2071 | select OF |
2079 | select OF_PROMTREE if PROC_DEVICETREE | 2072 | select OF_PROMTREE |
2080 | ---help--- | 2073 | ---help--- |
2081 | Add support for detecting the unique features of the OLPC | 2074 | Add support for detecting the unique features of the OLPC |
2082 | XO hardware. | 2075 | XO hardware. |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index d161e939df62..6a7cfdf8ff69 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -1,6 +1,4 @@ | |||
1 | # Put here option for CPU selection and depending optimization | 1 | # Put here option for CPU selection and depending optimization |
2 | if !X86_ELAN | ||
3 | |||
4 | choice | 2 | choice |
5 | prompt "Processor family" | 3 | prompt "Processor family" |
6 | default M686 if X86_32 | 4 | default M686 if X86_32 |
@@ -203,6 +201,14 @@ config MWINCHIP3D | |||
203 | stores for this CPU, which can increase performance of some | 201 | stores for this CPU, which can increase performance of some |
204 | operations. | 202 | operations. |
205 | 203 | ||
204 | config MELAN | ||
205 | bool "AMD Elan" | ||
206 | depends on X86_32 | ||
207 | ---help--- | ||
208 | Select this for an AMD Elan processor. | ||
209 | |||
210 | Do not use this option for K6/Athlon/Opteron processors! | ||
211 | |||
206 | config MGEODEGX1 | 212 | config MGEODEGX1 |
207 | bool "GeodeGX1" | 213 | bool "GeodeGX1" |
208 | depends on X86_32 | 214 | depends on X86_32 |
@@ -292,8 +298,6 @@ config X86_GENERIC | |||
292 | This is really intended for distributors who need more | 298 | This is really intended for distributors who need more |
293 | generic optimizations. | 299 | generic optimizations. |
294 | 300 | ||
295 | endif | ||
296 | |||
297 | # | 301 | # |
298 | # Define implied options from the CPU selection here | 302 | # Define implied options from the CPU selection here |
299 | config X86_INTERNODE_CACHE_SHIFT | 303 | config X86_INTERNODE_CACHE_SHIFT |
@@ -312,7 +316,7 @@ config X86_L1_CACHE_SHIFT | |||
312 | int | 316 | int |
313 | default "7" if MPENTIUM4 || MPSC | 317 | default "7" if MPENTIUM4 || MPSC |
314 | default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU | 318 | default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU |
315 | default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 | 319 | default "4" if MELAN || M486 || M386 || MGEODEGX1 |
316 | default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX | 320 | default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX |
317 | 321 | ||
318 | config X86_XADD | 322 | config X86_XADD |
@@ -358,7 +362,7 @@ config X86_POPAD_OK | |||
358 | 362 | ||
359 | config X86_ALIGNMENT_16 | 363 | config X86_ALIGNMENT_16 |
360 | def_bool y | 364 | def_bool y |
361 | depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 | 365 | depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 |
362 | 366 | ||
363 | config X86_INTEL_USERCOPY | 367 | config X86_INTEL_USERCOPY |
364 | def_bool y | 368 | def_bool y |
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu index f2ee1abb1df9..86cee7b749e1 100644 --- a/arch/x86/Makefile_32.cpu +++ b/arch/x86/Makefile_32.cpu | |||
@@ -37,7 +37,7 @@ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march= | |||
37 | $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) | 37 | $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) |
38 | 38 | ||
39 | # AMD Elan support | 39 | # AMD Elan support |
40 | cflags-$(CONFIG_X86_ELAN) += -march=i486 | 40 | cflags-$(CONFIG_MELAN) += -march=i486 |
41 | 41 | ||
42 | # Geode GX1 support | 42 | # Geode GX1 support |
43 | cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx | 43 | cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 12e0e7dd869c..416d865eae39 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -183,8 +183,6 @@ static inline void disable_acpi(void) { } | |||
183 | 183 | ||
184 | #define ARCH_HAS_POWER_INIT 1 | 184 | #define ARCH_HAS_POWER_INIT 1 |
185 | 185 | ||
186 | struct bootnode; | ||
187 | |||
188 | #ifdef CONFIG_ACPI_NUMA | 186 | #ifdef CONFIG_ACPI_NUMA |
189 | extern int acpi_numa; | 187 | extern int acpi_numa; |
190 | extern int x86_acpi_numa_init(void); | 188 | extern int x86_acpi_numa_init(void); |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 8cdd1e247975..bf535f947e8c 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -190,12 +190,4 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); | |||
190 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | 190 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); |
191 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); | 191 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); |
192 | 192 | ||
193 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) | ||
194 | #define IDEAL_NOP_SIZE_5 5 | ||
195 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | ||
196 | extern void arch_init_ideal_nop5(void); | ||
197 | #else | ||
198 | static inline void arch_init_ideal_nop5(void) {} | ||
199 | #endif | ||
200 | |||
201 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 193 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index 916bc8111a01..55d95eb789b3 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -19,13 +19,12 @@ | |||
19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H | 19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H |
20 | #define _ASM_X86_AMD_IOMMU_PROTO_H | 20 | #define _ASM_X86_AMD_IOMMU_PROTO_H |
21 | 21 | ||
22 | struct amd_iommu; | 22 | #include <asm/amd_iommu_types.h> |
23 | 23 | ||
24 | extern int amd_iommu_init_dma_ops(void); | 24 | extern int amd_iommu_init_dma_ops(void); |
25 | extern int amd_iommu_init_passthrough(void); | 25 | extern int amd_iommu_init_passthrough(void); |
26 | extern irqreturn_t amd_iommu_int_thread(int irq, void *data); | ||
26 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 27 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); |
27 | extern void amd_iommu_flush_all_domains(void); | ||
28 | extern void amd_iommu_flush_all_devices(void); | ||
29 | extern void amd_iommu_apply_erratum_63(u16 devid); | 28 | extern void amd_iommu_apply_erratum_63(u16 devid); |
30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | 29 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); |
31 | extern int amd_iommu_init_devices(void); | 30 | extern int amd_iommu_init_devices(void); |
@@ -44,4 +43,12 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) | |||
44 | (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); | 43 | (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); |
45 | } | 44 | } |
46 | 45 | ||
46 | static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) | ||
47 | { | ||
48 | if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) | ||
49 | return false; | ||
50 | |||
51 | return !!(iommu->features & f); | ||
52 | } | ||
53 | |||
47 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | 54 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index e3509fc303bf..4c9982995414 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -68,12 +68,25 @@ | |||
68 | #define MMIO_CONTROL_OFFSET 0x0018 | 68 | #define MMIO_CONTROL_OFFSET 0x0018 |
69 | #define MMIO_EXCL_BASE_OFFSET 0x0020 | 69 | #define MMIO_EXCL_BASE_OFFSET 0x0020 |
70 | #define MMIO_EXCL_LIMIT_OFFSET 0x0028 | 70 | #define MMIO_EXCL_LIMIT_OFFSET 0x0028 |
71 | #define MMIO_EXT_FEATURES 0x0030 | ||
71 | #define MMIO_CMD_HEAD_OFFSET 0x2000 | 72 | #define MMIO_CMD_HEAD_OFFSET 0x2000 |
72 | #define MMIO_CMD_TAIL_OFFSET 0x2008 | 73 | #define MMIO_CMD_TAIL_OFFSET 0x2008 |
73 | #define MMIO_EVT_HEAD_OFFSET 0x2010 | 74 | #define MMIO_EVT_HEAD_OFFSET 0x2010 |
74 | #define MMIO_EVT_TAIL_OFFSET 0x2018 | 75 | #define MMIO_EVT_TAIL_OFFSET 0x2018 |
75 | #define MMIO_STATUS_OFFSET 0x2020 | 76 | #define MMIO_STATUS_OFFSET 0x2020 |
76 | 77 | ||
78 | |||
79 | /* Extended Feature Bits */ | ||
80 | #define FEATURE_PREFETCH (1ULL<<0) | ||
81 | #define FEATURE_PPR (1ULL<<1) | ||
82 | #define FEATURE_X2APIC (1ULL<<2) | ||
83 | #define FEATURE_NX (1ULL<<3) | ||
84 | #define FEATURE_GT (1ULL<<4) | ||
85 | #define FEATURE_IA (1ULL<<6) | ||
86 | #define FEATURE_GA (1ULL<<7) | ||
87 | #define FEATURE_HE (1ULL<<8) | ||
88 | #define FEATURE_PC (1ULL<<9) | ||
89 | |||
77 | /* MMIO status bits */ | 90 | /* MMIO status bits */ |
78 | #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 | 91 | #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 |
79 | 92 | ||
@@ -113,7 +126,9 @@ | |||
113 | /* command specific defines */ | 126 | /* command specific defines */ |
114 | #define CMD_COMPL_WAIT 0x01 | 127 | #define CMD_COMPL_WAIT 0x01 |
115 | #define CMD_INV_DEV_ENTRY 0x02 | 128 | #define CMD_INV_DEV_ENTRY 0x02 |
116 | #define CMD_INV_IOMMU_PAGES 0x03 | 129 | #define CMD_INV_IOMMU_PAGES 0x03 |
130 | #define CMD_INV_IOTLB_PAGES 0x04 | ||
131 | #define CMD_INV_ALL 0x08 | ||
117 | 132 | ||
118 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 | 133 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 |
119 | #define CMD_COMPL_WAIT_INT_MASK 0x02 | 134 | #define CMD_COMPL_WAIT_INT_MASK 0x02 |
@@ -215,6 +230,8 @@ | |||
215 | #define IOMMU_PTE_IR (1ULL << 61) | 230 | #define IOMMU_PTE_IR (1ULL << 61) |
216 | #define IOMMU_PTE_IW (1ULL << 62) | 231 | #define IOMMU_PTE_IW (1ULL << 62) |
217 | 232 | ||
233 | #define DTE_FLAG_IOTLB 0x01 | ||
234 | |||
218 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) | 235 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
219 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) | 236 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) |
220 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) | 237 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) |
@@ -227,6 +244,7 @@ | |||
227 | /* IOMMU capabilities */ | 244 | /* IOMMU capabilities */ |
228 | #define IOMMU_CAP_IOTLB 24 | 245 | #define IOMMU_CAP_IOTLB 24 |
229 | #define IOMMU_CAP_NPCACHE 26 | 246 | #define IOMMU_CAP_NPCACHE 26 |
247 | #define IOMMU_CAP_EFR 27 | ||
230 | 248 | ||
231 | #define MAX_DOMAIN_ID 65536 | 249 | #define MAX_DOMAIN_ID 65536 |
232 | 250 | ||
@@ -249,6 +267,8 @@ extern bool amd_iommu_dump; | |||
249 | 267 | ||
250 | /* global flag if IOMMUs cache non-present entries */ | 268 | /* global flag if IOMMUs cache non-present entries */ |
251 | extern bool amd_iommu_np_cache; | 269 | extern bool amd_iommu_np_cache; |
270 | /* Only true if all IOMMUs support device IOTLBs */ | ||
271 | extern bool amd_iommu_iotlb_sup; | ||
252 | 272 | ||
253 | /* | 273 | /* |
254 | * Make iterating over all IOMMUs easier | 274 | * Make iterating over all IOMMUs easier |
@@ -371,6 +391,9 @@ struct amd_iommu { | |||
371 | /* flags read from acpi table */ | 391 | /* flags read from acpi table */ |
372 | u8 acpi_flags; | 392 | u8 acpi_flags; |
373 | 393 | ||
394 | /* Extended features */ | ||
395 | u64 features; | ||
396 | |||
374 | /* | 397 | /* |
375 | * Capability pointer. There could be more than one IOMMU per PCI | 398 | * Capability pointer. There could be more than one IOMMU per PCI |
376 | * device function if there are more than one AMD IOMMU capability | 399 | * device function if there are more than one AMD IOMMU capability |
@@ -409,9 +432,6 @@ struct amd_iommu { | |||
409 | /* if one, we need to send a completion wait command */ | 432 | /* if one, we need to send a completion wait command */ |
410 | bool need_sync; | 433 | bool need_sync; |
411 | 434 | ||
412 | /* becomes true if a command buffer reset is running */ | ||
413 | bool reset_in_progress; | ||
414 | |||
415 | /* default dma_ops domain for that IOMMU */ | 435 | /* default dma_ops domain for that IOMMU */ |
416 | struct dma_ops_domain *default_dom; | 436 | struct dma_ops_domain *default_dom; |
417 | 437 | ||
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 331682231bb4..67f87f257611 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -11,7 +11,6 @@ struct amd_nb_bus_dev_range { | |||
11 | 11 | ||
12 | extern const struct pci_device_id amd_nb_misc_ids[]; | 12 | extern const struct pci_device_id amd_nb_misc_ids[]; |
13 | extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; | 13 | extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; |
14 | struct bootnode; | ||
15 | 14 | ||
16 | extern bool early_is_amd_nb(u32 value); | 15 | extern bool early_is_amd_nb(u32 value); |
17 | extern int amd_cache_northbridges(void); | 16 | extern int amd_cache_northbridges(void); |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 2b7d573be549..a0c46f061210 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -363,7 +363,12 @@ struct apic { | |||
363 | */ | 363 | */ |
364 | int (*x86_32_early_logical_apicid)(int cpu); | 364 | int (*x86_32_early_logical_apicid)(int cpu); |
365 | 365 | ||
366 | /* determine CPU -> NUMA node mapping */ | 366 | /* |
367 | * Optional method called from setup_local_APIC() after logical | ||
368 | * apicid is guaranteed to be known to initialize apicid -> node | ||
369 | * mapping if NUMA initialization hasn't done so already. Don't | ||
370 | * add new users. | ||
371 | */ | ||
367 | int (*x86_32_numa_cpu_node)(int cpu); | 372 | int (*x86_32_numa_cpu_node)(int cpu); |
368 | #endif | 373 | #endif |
369 | }; | 374 | }; |
@@ -537,8 +542,6 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) | |||
537 | return cpuid_apic >> index_msb; | 542 | return cpuid_apic >> index_msb; |
538 | } | 543 | } |
539 | 544 | ||
540 | extern int default_x86_32_numa_cpu_node(int cpu); | ||
541 | |||
542 | #endif | 545 | #endif |
543 | 546 | ||
544 | static inline unsigned int | 547 | static inline unsigned int |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index d87988bacf3e..34595d5e1038 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -78,6 +78,7 @@ | |||
78 | #define APIC_DEST_LOGICAL 0x00800 | 78 | #define APIC_DEST_LOGICAL 0x00800 |
79 | #define APIC_DEST_PHYSICAL 0x00000 | 79 | #define APIC_DEST_PHYSICAL 0x00000 |
80 | #define APIC_DM_FIXED 0x00000 | 80 | #define APIC_DM_FIXED 0x00000 |
81 | #define APIC_DM_FIXED_MASK 0x00700 | ||
81 | #define APIC_DM_LOWEST 0x00100 | 82 | #define APIC_DM_LOWEST 0x00100 |
82 | #define APIC_DM_SMI 0x00200 | 83 | #define APIC_DM_SMI 0x00200 |
83 | #define APIC_DM_REMRD 0x00300 | 84 | #define APIC_DM_REMRD 0x00300 |
diff --git a/arch/x86/include/asm/bios_ebda.h b/arch/x86/include/asm/bios_ebda.h index 3c7521063d3f..aa6a3170ab5a 100644 --- a/arch/x86/include/asm/bios_ebda.h +++ b/arch/x86/include/asm/bios_ebda.h | |||
@@ -4,16 +4,40 @@ | |||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * there is a real-mode segmented pointer pointing to the | 7 | * Returns physical address of EBDA. Returns 0 if there is no EBDA. |
8 | * 4K EBDA area at 0x40E. | ||
9 | */ | 8 | */ |
10 | static inline unsigned int get_bios_ebda(void) | 9 | static inline unsigned int get_bios_ebda(void) |
11 | { | 10 | { |
11 | /* | ||
12 | * There is a real-mode segmented pointer pointing to the | ||
13 | * 4K EBDA area at 0x40E. | ||
14 | */ | ||
12 | unsigned int address = *(unsigned short *)phys_to_virt(0x40E); | 15 | unsigned int address = *(unsigned short *)phys_to_virt(0x40E); |
13 | address <<= 4; | 16 | address <<= 4; |
14 | return address; /* 0 means none */ | 17 | return address; /* 0 means none */ |
15 | } | 18 | } |
16 | 19 | ||
20 | /* | ||
21 | * Return the sanitized length of the EBDA in bytes, if it exists. | ||
22 | */ | ||
23 | static inline unsigned int get_bios_ebda_length(void) | ||
24 | { | ||
25 | unsigned int address; | ||
26 | unsigned int length; | ||
27 | |||
28 | address = get_bios_ebda(); | ||
29 | if (!address) | ||
30 | return 0; | ||
31 | |||
32 | /* EBDA length is byte 0 of the EBDA (stored in KiB) */ | ||
33 | length = *(unsigned char *)phys_to_virt(address); | ||
34 | length <<= 10; | ||
35 | |||
36 | /* Trim the length if it extends beyond 640KiB */ | ||
37 | length = min_t(unsigned int, (640 * 1024) - address, length); | ||
38 | return length; | ||
39 | } | ||
40 | |||
17 | void reserve_ebda_region(void); | 41 | void reserve_ebda_region(void); |
18 | 42 | ||
19 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION | 43 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 7f2f7b123293..5dc6acc98dbd 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -195,6 +195,7 @@ | |||
195 | 195 | ||
196 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ | 196 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
197 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ | 197 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ |
198 | #define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ | ||
198 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ | 199 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
199 | 200 | ||
200 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 201 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
@@ -208,8 +209,7 @@ extern const char * const x86_power_flags[32]; | |||
208 | #define test_cpu_cap(c, bit) \ | 209 | #define test_cpu_cap(c, bit) \ |
209 | test_bit(bit, (unsigned long *)((c)->x86_capability)) | 210 | test_bit(bit, (unsigned long *)((c)->x86_capability)) |
210 | 211 | ||
211 | #define cpu_has(c, bit) \ | 212 | #define REQUIRED_MASK_BIT_SET(bit) \ |
212 | (__builtin_constant_p(bit) && \ | ||
213 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ | 213 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
214 | (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ | 214 | (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ |
215 | (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ | 215 | (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ |
@@ -219,10 +219,16 @@ extern const char * const x86_power_flags[32]; | |||
219 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ | 219 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
220 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ | 220 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ |
221 | (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ | 221 | (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ |
222 | (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) \ | 222 | (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) |
223 | ? 1 : \ | 223 | |
224 | #define cpu_has(c, bit) \ | ||
225 | (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ | ||
224 | test_cpu_cap(c, bit)) | 226 | test_cpu_cap(c, bit)) |
225 | 227 | ||
228 | #define this_cpu_has(bit) \ | ||
229 | (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ | ||
230 | x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) | ||
231 | |||
226 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) | 232 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
227 | 233 | ||
228 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) | 234 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h index 057099e5faba..0bdb0c54d9a1 100644 --- a/arch/x86/include/asm/dma.h +++ b/arch/x86/include/asm/dma.h | |||
@@ -69,22 +69,18 @@ | |||
69 | 69 | ||
70 | #define MAX_DMA_CHANNELS 8 | 70 | #define MAX_DMA_CHANNELS 8 |
71 | 71 | ||
72 | #ifdef CONFIG_X86_32 | ||
73 | |||
74 | /* The maximum address that we can perform a DMA transfer to on this platform */ | ||
75 | #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) | ||
76 | |||
77 | #else | ||
78 | |||
79 | /* 16MB ISA DMA zone */ | 72 | /* 16MB ISA DMA zone */ |
80 | #define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) | 73 | #define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) |
81 | 74 | ||
82 | /* 4GB broken PCI/AGP hardware bus master zone */ | 75 | /* 4GB broken PCI/AGP hardware bus master zone */ |
83 | #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) | 76 | #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) |
84 | 77 | ||
78 | #ifdef CONFIG_X86_32 | ||
79 | /* The maximum address that we can perform a DMA transfer to on this platform */ | ||
80 | #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) | ||
81 | #else | ||
85 | /* Compat define for old dma zone */ | 82 | /* Compat define for old dma zone */ |
86 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) | 83 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) |
87 | |||
88 | #endif | 84 | #endif |
89 | 85 | ||
90 | /* 8237 DMA controllers */ | 86 | /* 8237 DMA controllers */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 8e4a16508d4e..7093e4a6a0bc 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | |||
90 | #endif /* CONFIG_X86_32 */ | 90 | #endif /* CONFIG_X86_32 */ |
91 | 91 | ||
92 | extern int add_efi_memmap; | 92 | extern int add_efi_memmap; |
93 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); | ||
93 | extern void efi_memblock_x86_reserve_range(void); | 94 | extern void efi_memblock_x86_reserve_range(void); |
94 | extern void efi_call_phys_prelog(void); | 95 | extern void efi_call_phys_prelog(void); |
95 | extern void efi_call_phys_epilog(void); | 96 | extern void efi_call_phys_epilog(void); |
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h index fc1f579fb965..65aaa91d5850 100644 --- a/arch/x86/include/asm/i8253.h +++ b/arch/x86/include/asm/i8253.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #define PIT_CH0 0x40 | 6 | #define PIT_CH0 0x40 |
7 | #define PIT_CH2 0x42 | 7 | #define PIT_CH2 0x42 |
8 | 8 | ||
9 | #define PIT_LATCH LATCH | ||
10 | |||
9 | extern raw_spinlock_t i8253_lock; | 11 | extern raw_spinlock_t i8253_lock; |
10 | 12 | ||
11 | extern struct clock_event_device *global_clock_event; | 13 | extern struct clock_event_device *global_clock_event; |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index eb16e94ae04f..021979a6e23f 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -142,8 +142,6 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} | |||
142 | static inline void enable_p5_mce(void) {} | 142 | static inline void enable_p5_mce(void) {} |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | extern void (*x86_mce_decode_callback)(struct mce *m); | ||
146 | |||
147 | void mce_setup(struct mce *m); | 145 | void mce_setup(struct mce *m); |
148 | void mce_log(struct mce *m); | 146 | void mce_log(struct mce *m); |
149 | DECLARE_PER_CPU(struct sys_device, mce_dev); | 147 | DECLARE_PER_CPU(struct sys_device, mce_dev); |
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 91df7c51806c..5e83a416eca8 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h | |||
@@ -13,31 +13,11 @@ extern struct pglist_data *node_data[]; | |||
13 | #define NODE_DATA(nid) (node_data[nid]) | 13 | #define NODE_DATA(nid) (node_data[nid]) |
14 | 14 | ||
15 | #include <asm/numaq.h> | 15 | #include <asm/numaq.h> |
16 | /* summit or generic arch */ | ||
17 | #include <asm/srat.h> | ||
18 | |||
19 | extern int get_memcfg_numa_flat(void); | ||
20 | /* | ||
21 | * This allows any one NUMA architecture to be compiled | ||
22 | * for, and still fall back to the flat function if it | ||
23 | * fails. | ||
24 | */ | ||
25 | static inline void get_memcfg_numa(void) | ||
26 | { | ||
27 | |||
28 | if (get_memcfg_numaq()) | ||
29 | return; | ||
30 | if (get_memcfg_from_srat()) | ||
31 | return; | ||
32 | get_memcfg_numa_flat(); | ||
33 | } | ||
34 | 16 | ||
35 | extern void resume_map_numa_kva(pgd_t *pgd); | 17 | extern void resume_map_numa_kva(pgd_t *pgd); |
36 | 18 | ||
37 | #else /* !CONFIG_NUMA */ | 19 | #else /* !CONFIG_NUMA */ |
38 | 20 | ||
39 | #define get_memcfg_numa get_memcfg_numa_flat | ||
40 | |||
41 | static inline void resume_map_numa_kva(pgd_t *pgd) {} | 21 | static inline void resume_map_numa_kva(pgd_t *pgd) {} |
42 | 22 | ||
43 | #endif /* CONFIG_NUMA */ | 23 | #endif /* CONFIG_NUMA */ |
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h index 288b96f815a6..b3f88d7867c7 100644 --- a/arch/x86/include/asm/mmzone_64.h +++ b/arch/x86/include/asm/mmzone_64.h | |||
@@ -4,36 +4,13 @@ | |||
4 | #ifndef _ASM_X86_MMZONE_64_H | 4 | #ifndef _ASM_X86_MMZONE_64_H |
5 | #define _ASM_X86_MMZONE_64_H | 5 | #define _ASM_X86_MMZONE_64_H |
6 | 6 | ||
7 | |||
8 | #ifdef CONFIG_NUMA | 7 | #ifdef CONFIG_NUMA |
9 | 8 | ||
10 | #include <linux/mmdebug.h> | 9 | #include <linux/mmdebug.h> |
11 | |||
12 | #include <asm/smp.h> | 10 | #include <asm/smp.h> |
13 | 11 | ||
14 | /* Simple perfect hash to map physical addresses to node numbers */ | ||
15 | struct memnode { | ||
16 | int shift; | ||
17 | unsigned int mapsize; | ||
18 | s16 *map; | ||
19 | s16 embedded_map[64 - 8]; | ||
20 | } ____cacheline_aligned; /* total size = 128 bytes */ | ||
21 | extern struct memnode memnode; | ||
22 | #define memnode_shift memnode.shift | ||
23 | #define memnodemap memnode.map | ||
24 | #define memnodemapsize memnode.mapsize | ||
25 | |||
26 | extern struct pglist_data *node_data[]; | 12 | extern struct pglist_data *node_data[]; |
27 | 13 | ||
28 | static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | ||
29 | { | ||
30 | unsigned nid; | ||
31 | VIRTUAL_BUG_ON(!memnodemap); | ||
32 | nid = memnodemap[addr >> memnode_shift]; | ||
33 | VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); | ||
34 | return nid; | ||
35 | } | ||
36 | |||
37 | #define NODE_DATA(nid) (node_data[nid]) | 14 | #define NODE_DATA(nid) (node_data[nid]) |
38 | 15 | ||
39 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 16 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 67763c5d8b4e..9eae7752ae9b 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #define MODULE_PROC_FAMILY "K7 " | 35 | #define MODULE_PROC_FAMILY "K7 " |
36 | #elif defined CONFIG_MK8 | 36 | #elif defined CONFIG_MK8 |
37 | #define MODULE_PROC_FAMILY "K8 " | 37 | #define MODULE_PROC_FAMILY "K8 " |
38 | #elif defined CONFIG_X86_ELAN | 38 | #elif defined CONFIG_MELAN |
39 | #define MODULE_PROC_FAMILY "ELAN " | 39 | #define MODULE_PROC_FAMILY "ELAN " |
40 | #elif defined CONFIG_MCRUSOE | 40 | #elif defined CONFIG_MCRUSOE |
41 | #define MODULE_PROC_FAMILY "CRUSOE " | 41 | #define MODULE_PROC_FAMILY "CRUSOE " |
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h index af788496020b..405b4032a60b 100644 --- a/arch/x86/include/asm/nops.h +++ b/arch/x86/include/asm/nops.h | |||
@@ -1,7 +1,13 @@ | |||
1 | #ifndef _ASM_X86_NOPS_H | 1 | #ifndef _ASM_X86_NOPS_H |
2 | #define _ASM_X86_NOPS_H | 2 | #define _ASM_X86_NOPS_H |
3 | 3 | ||
4 | /* Define nops for use with alternative() */ | 4 | /* |
5 | * Define nops for use with alternative() and for tracing. | ||
6 | * | ||
7 | * *_NOP5_ATOMIC must be a single instruction. | ||
8 | */ | ||
9 | |||
10 | #define NOP_DS_PREFIX 0x3e | ||
5 | 11 | ||
6 | /* generic versions from gas | 12 | /* generic versions from gas |
7 | 1: nop | 13 | 1: nop |
@@ -13,14 +19,15 @@ | |||
13 | 6: leal 0x00000000(%esi),%esi | 19 | 6: leal 0x00000000(%esi),%esi |
14 | 7: leal 0x00000000(,%esi,1),%esi | 20 | 7: leal 0x00000000(,%esi,1),%esi |
15 | */ | 21 | */ |
16 | #define GENERIC_NOP1 ".byte 0x90\n" | 22 | #define GENERIC_NOP1 0x90 |
17 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | 23 | #define GENERIC_NOP2 0x89,0xf6 |
18 | #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" | 24 | #define GENERIC_NOP3 0x8d,0x76,0x00 |
19 | #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" | 25 | #define GENERIC_NOP4 0x8d,0x74,0x26,0x00 |
20 | #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 | 26 | #define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4 |
21 | #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | 27 | #define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00 |
22 | #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | 28 | #define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00 |
23 | #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 | 29 | #define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7 |
30 | #define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4 | ||
24 | 31 | ||
25 | /* Opteron 64bit nops | 32 | /* Opteron 64bit nops |
26 | 1: nop | 33 | 1: nop |
@@ -29,13 +36,14 @@ | |||
29 | 4: osp osp osp nop | 36 | 4: osp osp osp nop |
30 | */ | 37 | */ |
31 | #define K8_NOP1 GENERIC_NOP1 | 38 | #define K8_NOP1 GENERIC_NOP1 |
32 | #define K8_NOP2 ".byte 0x66,0x90\n" | 39 | #define K8_NOP2 0x66,K8_NOP1 |
33 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | 40 | #define K8_NOP3 0x66,K8_NOP2 |
34 | #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" | 41 | #define K8_NOP4 0x66,K8_NOP3 |
35 | #define K8_NOP5 K8_NOP3 K8_NOP2 | 42 | #define K8_NOP5 K8_NOP3,K8_NOP2 |
36 | #define K8_NOP6 K8_NOP3 K8_NOP3 | 43 | #define K8_NOP6 K8_NOP3,K8_NOP3 |
37 | #define K8_NOP7 K8_NOP4 K8_NOP3 | 44 | #define K8_NOP7 K8_NOP4,K8_NOP3 |
38 | #define K8_NOP8 K8_NOP4 K8_NOP4 | 45 | #define K8_NOP8 K8_NOP4,K8_NOP4 |
46 | #define K8_NOP5_ATOMIC 0x66,K8_NOP4 | ||
39 | 47 | ||
40 | /* K7 nops | 48 | /* K7 nops |
41 | uses eax dependencies (arbitrary choice) | 49 | uses eax dependencies (arbitrary choice) |
@@ -47,13 +55,14 @@ | |||
47 | 7: leal 0x00000000(,%eax,1),%eax | 55 | 7: leal 0x00000000(,%eax,1),%eax |
48 | */ | 56 | */ |
49 | #define K7_NOP1 GENERIC_NOP1 | 57 | #define K7_NOP1 GENERIC_NOP1 |
50 | #define K7_NOP2 ".byte 0x8b,0xc0\n" | 58 | #define K7_NOP2 0x8b,0xc0 |
51 | #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" | 59 | #define K7_NOP3 0x8d,0x04,0x20 |
52 | #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" | 60 | #define K7_NOP4 0x8d,0x44,0x20,0x00 |
53 | #define K7_NOP5 K7_NOP4 ASM_NOP1 | 61 | #define K7_NOP5 K7_NOP4,K7_NOP1 |
54 | #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" | 62 | #define K7_NOP6 0x8d,0x80,0,0,0,0 |
55 | #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" | 63 | #define K7_NOP7 0x8D,0x04,0x05,0,0,0,0 |
56 | #define K7_NOP8 K7_NOP7 ASM_NOP1 | 64 | #define K7_NOP8 K7_NOP7,K7_NOP1 |
65 | #define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4 | ||
57 | 66 | ||
58 | /* P6 nops | 67 | /* P6 nops |
59 | uses eax dependencies (Intel-recommended choice) | 68 | uses eax dependencies (Intel-recommended choice) |
@@ -69,52 +78,65 @@ | |||
69 | There is kernel code that depends on this. | 78 | There is kernel code that depends on this. |
70 | */ | 79 | */ |
71 | #define P6_NOP1 GENERIC_NOP1 | 80 | #define P6_NOP1 GENERIC_NOP1 |
72 | #define P6_NOP2 ".byte 0x66,0x90\n" | 81 | #define P6_NOP2 0x66,0x90 |
73 | #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" | 82 | #define P6_NOP3 0x0f,0x1f,0x00 |
74 | #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" | 83 | #define P6_NOP4 0x0f,0x1f,0x40,0 |
75 | #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" | 84 | #define P6_NOP5 0x0f,0x1f,0x44,0x00,0 |
76 | #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" | 85 | #define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0 |
77 | #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" | 86 | #define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0 |
78 | #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" | 87 | #define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 |
88 | #define P6_NOP5_ATOMIC P6_NOP5 | ||
89 | |||
90 | #define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" | ||
79 | 91 | ||
80 | #if defined(CONFIG_MK7) | 92 | #if defined(CONFIG_MK7) |
81 | #define ASM_NOP1 K7_NOP1 | 93 | #define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) |
82 | #define ASM_NOP2 K7_NOP2 | 94 | #define ASM_NOP2 _ASM_MK_NOP(K7_NOP2) |
83 | #define ASM_NOP3 K7_NOP3 | 95 | #define ASM_NOP3 _ASM_MK_NOP(K7_NOP3) |
84 | #define ASM_NOP4 K7_NOP4 | 96 | #define ASM_NOP4 _ASM_MK_NOP(K7_NOP4) |
85 | #define ASM_NOP5 K7_NOP5 | 97 | #define ASM_NOP5 _ASM_MK_NOP(K7_NOP5) |
86 | #define ASM_NOP6 K7_NOP6 | 98 | #define ASM_NOP6 _ASM_MK_NOP(K7_NOP6) |
87 | #define ASM_NOP7 K7_NOP7 | 99 | #define ASM_NOP7 _ASM_MK_NOP(K7_NOP7) |
88 | #define ASM_NOP8 K7_NOP8 | 100 | #define ASM_NOP8 _ASM_MK_NOP(K7_NOP8) |
101 | #define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC) | ||
89 | #elif defined(CONFIG_X86_P6_NOP) | 102 | #elif defined(CONFIG_X86_P6_NOP) |
90 | #define ASM_NOP1 P6_NOP1 | 103 | #define ASM_NOP1 _ASM_MK_NOP(P6_NOP1) |
91 | #define ASM_NOP2 P6_NOP2 | 104 | #define ASM_NOP2 _ASM_MK_NOP(P6_NOP2) |
92 | #define ASM_NOP3 P6_NOP3 | 105 | #define ASM_NOP3 _ASM_MK_NOP(P6_NOP3) |
93 | #define ASM_NOP4 P6_NOP4 | 106 | #define ASM_NOP4 _ASM_MK_NOP(P6_NOP4) |
94 | #define ASM_NOP5 P6_NOP5 | 107 | #define ASM_NOP5 _ASM_MK_NOP(P6_NOP5) |
95 | #define ASM_NOP6 P6_NOP6 | 108 | #define ASM_NOP6 _ASM_MK_NOP(P6_NOP6) |
96 | #define ASM_NOP7 P6_NOP7 | 109 | #define ASM_NOP7 _ASM_MK_NOP(P6_NOP7) |
97 | #define ASM_NOP8 P6_NOP8 | 110 | #define ASM_NOP8 _ASM_MK_NOP(P6_NOP8) |
111 | #define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC) | ||
98 | #elif defined(CONFIG_X86_64) | 112 | #elif defined(CONFIG_X86_64) |
99 | #define ASM_NOP1 K8_NOP1 | 113 | #define ASM_NOP1 _ASM_MK_NOP(K8_NOP1) |
100 | #define ASM_NOP2 K8_NOP2 | 114 | #define ASM_NOP2 _ASM_MK_NOP(K8_NOP2) |
101 | #define ASM_NOP3 K8_NOP3 | 115 | #define ASM_NOP3 _ASM_MK_NOP(K8_NOP3) |
102 | #define ASM_NOP4 K8_NOP4 | 116 | #define ASM_NOP4 _ASM_MK_NOP(K8_NOP4) |
103 | #define ASM_NOP5 K8_NOP5 | 117 | #define ASM_NOP5 _ASM_MK_NOP(K8_NOP5) |
104 | #define ASM_NOP6 K8_NOP6 | 118 | #define ASM_NOP6 _ASM_MK_NOP(K8_NOP6) |
105 | #define ASM_NOP7 K8_NOP7 | 119 | #define ASM_NOP7 _ASM_MK_NOP(K8_NOP7) |
106 | #define ASM_NOP8 K8_NOP8 | 120 | #define ASM_NOP8 _ASM_MK_NOP(K8_NOP8) |
121 | #define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC) | ||
107 | #else | 122 | #else |
108 | #define ASM_NOP1 GENERIC_NOP1 | 123 | #define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1) |
109 | #define ASM_NOP2 GENERIC_NOP2 | 124 | #define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2) |
110 | #define ASM_NOP3 GENERIC_NOP3 | 125 | #define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3) |
111 | #define ASM_NOP4 GENERIC_NOP4 | 126 | #define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4) |
112 | #define ASM_NOP5 GENERIC_NOP5 | 127 | #define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5) |
113 | #define ASM_NOP6 GENERIC_NOP6 | 128 | #define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6) |
114 | #define ASM_NOP7 GENERIC_NOP7 | 129 | #define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7) |
115 | #define ASM_NOP8 GENERIC_NOP8 | 130 | #define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8) |
131 | #define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC) | ||
116 | #endif | 132 | #endif |
117 | 133 | ||
118 | #define ASM_NOP_MAX 8 | 134 | #define ASM_NOP_MAX 8 |
135 | #define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */ | ||
136 | |||
137 | #ifndef __ASSEMBLY__ | ||
138 | extern const unsigned char * const *ideal_nops; | ||
139 | extern void arch_init_ideal_nops(void); | ||
140 | #endif | ||
119 | 141 | ||
120 | #endif /* _ASM_X86_NOPS_H */ | 142 | #endif /* _ASM_X86_NOPS_H */ |
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index a50fc9f493b3..bfacd2ccf651 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h | |||
@@ -1,12 +1,24 @@ | |||
1 | #ifndef _ASM_X86_NUMA_H | 1 | #ifndef _ASM_X86_NUMA_H |
2 | #define _ASM_X86_NUMA_H | 2 | #define _ASM_X86_NUMA_H |
3 | 3 | ||
4 | #include <linux/nodemask.h> | ||
5 | |||
4 | #include <asm/topology.h> | 6 | #include <asm/topology.h> |
5 | #include <asm/apicdef.h> | 7 | #include <asm/apicdef.h> |
6 | 8 | ||
7 | #ifdef CONFIG_NUMA | 9 | #ifdef CONFIG_NUMA |
8 | 10 | ||
9 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | 11 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) |
12 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) | ||
13 | |||
14 | /* | ||
15 | * Too small node sizes may confuse the VM badly. Usually they | ||
16 | * result from BIOS bugs. So dont recognize nodes as standalone | ||
17 | * NUMA entities that have less than this amount of RAM listed: | ||
18 | */ | ||
19 | #define NODE_MIN_SIZE (4*1024*1024) | ||
20 | |||
21 | extern int numa_off; | ||
10 | 22 | ||
11 | /* | 23 | /* |
12 | * __apicid_to_node[] stores the raw mapping between physical apicid and | 24 | * __apicid_to_node[] stores the raw mapping between physical apicid and |
@@ -17,15 +29,27 @@ | |||
17 | * numa_cpu_node(). | 29 | * numa_cpu_node(). |
18 | */ | 30 | */ |
19 | extern s16 __apicid_to_node[MAX_LOCAL_APIC]; | 31 | extern s16 __apicid_to_node[MAX_LOCAL_APIC]; |
32 | extern nodemask_t numa_nodes_parsed __initdata; | ||
33 | |||
34 | extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); | ||
35 | extern void __init numa_set_distance(int from, int to, int distance); | ||
20 | 36 | ||
21 | static inline void set_apicid_to_node(int apicid, s16 node) | 37 | static inline void set_apicid_to_node(int apicid, s16 node) |
22 | { | 38 | { |
23 | __apicid_to_node[apicid] = node; | 39 | __apicid_to_node[apicid] = node; |
24 | } | 40 | } |
41 | |||
42 | extern int __cpuinit numa_cpu_node(int cpu); | ||
43 | |||
25 | #else /* CONFIG_NUMA */ | 44 | #else /* CONFIG_NUMA */ |
26 | static inline void set_apicid_to_node(int apicid, s16 node) | 45 | static inline void set_apicid_to_node(int apicid, s16 node) |
27 | { | 46 | { |
28 | } | 47 | } |
48 | |||
49 | static inline int numa_cpu_node(int cpu) | ||
50 | { | ||
51 | return NUMA_NO_NODE; | ||
52 | } | ||
29 | #endif /* CONFIG_NUMA */ | 53 | #endif /* CONFIG_NUMA */ |
30 | 54 | ||
31 | #ifdef CONFIG_X86_32 | 55 | #ifdef CONFIG_X86_32 |
@@ -37,14 +61,12 @@ static inline void set_apicid_to_node(int apicid, s16 node) | |||
37 | #ifdef CONFIG_NUMA | 61 | #ifdef CONFIG_NUMA |
38 | extern void __cpuinit numa_set_node(int cpu, int node); | 62 | extern void __cpuinit numa_set_node(int cpu, int node); |
39 | extern void __cpuinit numa_clear_node(int cpu); | 63 | extern void __cpuinit numa_clear_node(int cpu); |
40 | extern void __init numa_init_array(void); | ||
41 | extern void __init init_cpu_to_node(void); | 64 | extern void __init init_cpu_to_node(void); |
42 | extern void __cpuinit numa_add_cpu(int cpu); | 65 | extern void __cpuinit numa_add_cpu(int cpu); |
43 | extern void __cpuinit numa_remove_cpu(int cpu); | 66 | extern void __cpuinit numa_remove_cpu(int cpu); |
44 | #else /* CONFIG_NUMA */ | 67 | #else /* CONFIG_NUMA */ |
45 | static inline void numa_set_node(int cpu, int node) { } | 68 | static inline void numa_set_node(int cpu, int node) { } |
46 | static inline void numa_clear_node(int cpu) { } | 69 | static inline void numa_clear_node(int cpu) { } |
47 | static inline void numa_init_array(void) { } | ||
48 | static inline void init_cpu_to_node(void) { } | 70 | static inline void init_cpu_to_node(void) { } |
49 | static inline void numa_add_cpu(int cpu) { } | 71 | static inline void numa_add_cpu(int cpu) { } |
50 | static inline void numa_remove_cpu(int cpu) { } | 72 | static inline void numa_remove_cpu(int cpu) { } |
@@ -54,4 +76,10 @@ static inline void numa_remove_cpu(int cpu) { } | |||
54 | void debug_cpumask_set_cpu(int cpu, int node, bool enable); | 76 | void debug_cpumask_set_cpu(int cpu, int node, bool enable); |
55 | #endif | 77 | #endif |
56 | 78 | ||
79 | #ifdef CONFIG_NUMA_EMU | ||
80 | #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) | ||
81 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | ||
82 | void numa_emu_cmdline(char *); | ||
83 | #endif /* CONFIG_NUMA_EMU */ | ||
84 | |||
57 | #endif /* _ASM_X86_NUMA_H */ | 85 | #endif /* _ASM_X86_NUMA_H */ |
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index c6beed1ef103..e7d6b8254742 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h | |||
@@ -1,16 +1,6 @@ | |||
1 | #ifndef _ASM_X86_NUMA_32_H | 1 | #ifndef _ASM_X86_NUMA_32_H |
2 | #define _ASM_X86_NUMA_32_H | 2 | #define _ASM_X86_NUMA_32_H |
3 | 3 | ||
4 | extern int numa_off; | ||
5 | |||
6 | extern int pxm_to_nid(int pxm); | ||
7 | |||
8 | #ifdef CONFIG_NUMA | ||
9 | extern int __cpuinit numa_cpu_node(int cpu); | ||
10 | #else /* CONFIG_NUMA */ | ||
11 | static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } | ||
12 | #endif /* CONFIG_NUMA */ | ||
13 | |||
14 | #ifdef CONFIG_HIGHMEM | 4 | #ifdef CONFIG_HIGHMEM |
15 | extern void set_highmem_pages_init(void); | 5 | extern void set_highmem_pages_init(void); |
16 | #else | 6 | #else |
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 344eb1790b46..0c05f7ae46e8 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h | |||
@@ -1,42 +1,6 @@ | |||
1 | #ifndef _ASM_X86_NUMA_64_H | 1 | #ifndef _ASM_X86_NUMA_64_H |
2 | #define _ASM_X86_NUMA_64_H | 2 | #define _ASM_X86_NUMA_64_H |
3 | 3 | ||
4 | #include <linux/nodemask.h> | ||
5 | |||
6 | struct bootnode { | ||
7 | u64 start; | ||
8 | u64 end; | ||
9 | }; | ||
10 | |||
11 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) | ||
12 | |||
13 | extern int numa_off; | ||
14 | |||
15 | extern unsigned long numa_free_all_bootmem(void); | 4 | extern unsigned long numa_free_all_bootmem(void); |
16 | extern void setup_node_bootmem(int nodeid, unsigned long start, | ||
17 | unsigned long end); | ||
18 | |||
19 | #ifdef CONFIG_NUMA | ||
20 | /* | ||
21 | * Too small node sizes may confuse the VM badly. Usually they | ||
22 | * result from BIOS bugs. So dont recognize nodes as standalone | ||
23 | * NUMA entities that have less than this amount of RAM listed: | ||
24 | */ | ||
25 | #define NODE_MIN_SIZE (4*1024*1024) | ||
26 | |||
27 | extern nodemask_t numa_nodes_parsed __initdata; | ||
28 | |||
29 | extern int __cpuinit numa_cpu_node(int cpu); | ||
30 | extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); | ||
31 | extern void __init numa_set_distance(int from, int to, int distance); | ||
32 | |||
33 | #ifdef CONFIG_NUMA_EMU | ||
34 | #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) | ||
35 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | ||
36 | void numa_emu_cmdline(char *); | ||
37 | #endif /* CONFIG_NUMA_EMU */ | ||
38 | #else | ||
39 | static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } | ||
40 | #endif | ||
41 | 5 | ||
42 | #endif /* _ASM_X86_NUMA_64_H */ | 6 | #endif /* _ASM_X86_NUMA_64_H */ |
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h index 37c516545ec8..c3b3c322fd87 100644 --- a/arch/x86/include/asm/numaq.h +++ b/arch/x86/include/asm/numaq.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #ifdef CONFIG_X86_NUMAQ | 29 | #ifdef CONFIG_X86_NUMAQ |
30 | 30 | ||
31 | extern int found_numaq; | 31 | extern int found_numaq; |
32 | extern int get_memcfg_numaq(void); | 32 | extern int numaq_numa_init(void); |
33 | extern int pci_numaq_init(void); | 33 | extern int pci_numaq_init(void); |
34 | 34 | ||
35 | extern void *xquad_portio; | 35 | extern void *xquad_portio; |
@@ -166,11 +166,6 @@ struct sys_cfg_data { | |||
166 | 166 | ||
167 | void numaq_tsc_disable(void); | 167 | void numaq_tsc_disable(void); |
168 | 168 | ||
169 | #else | ||
170 | static inline int get_memcfg_numaq(void) | ||
171 | { | ||
172 | return 0; | ||
173 | } | ||
174 | #endif /* CONFIG_X86_NUMAQ */ | 169 | #endif /* CONFIG_X86_NUMAQ */ |
175 | #endif /* _ASM_X86_NUMAQ_H */ | 170 | #endif /* _ASM_X86_NUMAQ_H */ |
176 | 171 | ||
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h index c5d3a5abbb9f..24487712e0b1 100644 --- a/arch/x86/include/asm/olpc_ofw.h +++ b/arch/x86/include/asm/olpc_ofw.h | |||
@@ -26,15 +26,12 @@ extern void setup_olpc_ofw_pgd(void); | |||
26 | /* check if OFW was detected during boot */ | 26 | /* check if OFW was detected during boot */ |
27 | extern bool olpc_ofw_present(void); | 27 | extern bool olpc_ofw_present(void); |
28 | 28 | ||
29 | extern void olpc_dt_build_devicetree(void); | ||
30 | |||
29 | #else /* !CONFIG_OLPC */ | 31 | #else /* !CONFIG_OLPC */ |
30 | static inline void olpc_ofw_detect(void) { } | 32 | static inline void olpc_ofw_detect(void) { } |
31 | static inline void setup_olpc_ofw_pgd(void) { } | 33 | static inline void setup_olpc_ofw_pgd(void) { } |
32 | #endif /* !CONFIG_OLPC */ | ||
33 | |||
34 | #ifdef CONFIG_OF_PROMTREE | ||
35 | extern void olpc_dt_build_devicetree(void); | ||
36 | #else | ||
37 | static inline void olpc_dt_build_devicetree(void) { } | 34 | static inline void olpc_dt_build_devicetree(void) { } |
38 | #endif | 35 | #endif /* !CONFIG_OLPC */ |
39 | 36 | ||
40 | #endif /* _ASM_X86_OLPC_OFW_H */ | 37 | #endif /* _ASM_X86_OLPC_OFW_H */ |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index d475b4398d8b..53278b0dfdf6 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -517,7 +517,7 @@ do { \ | |||
517 | typeof(o2) __o2 = o2; \ | 517 | typeof(o2) __o2 = o2; \ |
518 | typeof(o2) __n2 = n2; \ | 518 | typeof(o2) __n2 = n2; \ |
519 | typeof(o2) __dummy; \ | 519 | typeof(o2) __dummy; \ |
520 | alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \ | 520 | alternative_io("call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP4, \ |
521 | "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ | 521 | "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ |
522 | X86_FEATURE_CX16, \ | 522 | X86_FEATURE_CX16, \ |
523 | ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ | 523 | ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ |
@@ -542,6 +542,33 @@ do { \ | |||
542 | old__; \ | 542 | old__; \ |
543 | }) | 543 | }) |
544 | 544 | ||
545 | static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, | ||
546 | const unsigned long __percpu *addr) | ||
547 | { | ||
548 | unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; | ||
549 | |||
550 | return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0; | ||
551 | } | ||
552 | |||
553 | static inline int x86_this_cpu_variable_test_bit(int nr, | ||
554 | const unsigned long __percpu *addr) | ||
555 | { | ||
556 | int oldbit; | ||
557 | |||
558 | asm volatile("bt "__percpu_arg(2)",%1\n\t" | ||
559 | "sbb %0,%0" | ||
560 | : "=r" (oldbit) | ||
561 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | ||
562 | |||
563 | return oldbit; | ||
564 | } | ||
565 | |||
566 | #define x86_this_cpu_test_bit(nr, addr) \ | ||
567 | (__builtin_constant_p((nr)) \ | ||
568 | ? x86_this_cpu_constant_test_bit((nr), (addr)) \ | ||
569 | : x86_this_cpu_variable_test_bit((nr), (addr))) | ||
570 | |||
571 | |||
545 | #include <asm-generic/percpu.h> | 572 | #include <asm-generic/percpu.h> |
546 | 573 | ||
547 | /* We can use this directly for local CPU (faster). */ | 574 | /* We can use this directly for local CPU (faster). */ |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 7db7723d1f32..d56187c6b838 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
299 | /* Install a pte for a particular vaddr in kernel space. */ | 299 | /* Install a pte for a particular vaddr in kernel space. */ |
300 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | 300 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); |
301 | 301 | ||
302 | extern void native_pagetable_reserve(u64 start, u64 end); | ||
302 | #ifdef CONFIG_X86_32 | 303 | #ifdef CONFIG_X86_32 |
303 | extern void native_pagetable_setup_start(pgd_t *base); | 304 | extern void native_pagetable_setup_start(pgd_t *base); |
304 | extern void native_pagetable_setup_done(pgd_t *base); | 305 | extern void native_pagetable_setup_done(pgd_t *base); |
diff --git a/arch/x86/include/asm/probe_roms.h b/arch/x86/include/asm/probe_roms.h new file mode 100644 index 000000000000..4950a0b1d09c --- /dev/null +++ b/arch/x86/include/asm/probe_roms.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _PROBE_ROMS_H_ | ||
2 | #define _PROBE_ROMS_H_ | ||
3 | struct pci_dev; | ||
4 | |||
5 | extern void __iomem *pci_map_biosrom(struct pci_dev *pdev); | ||
6 | extern void pci_unmap_biosrom(void __iomem *rom); | ||
7 | extern size_t pci_biosrom_size(struct pci_dev *pdev); | ||
8 | #endif | ||
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index a898a2b6e10c..59ab4dffa377 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h | |||
@@ -60,6 +60,7 @@ | |||
60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ | 60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ |
61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ | 61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ |
62 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | 62 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ |
63 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * x86-64 Task Priority Register, CR8 | 66 | * x86-64 Task Priority Register, CR8 |
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 647d8a06ce4f..9756551ec760 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -104,10 +104,10 @@ void *extend_brk(size_t size, size_t align); | |||
104 | type *name; \ | 104 | type *name; \ |
105 | RESERVE_BRK(name, sizeof(type) * entries) | 105 | RESERVE_BRK(name, sizeof(type) * entries) |
106 | 106 | ||
107 | extern void probe_roms(void); | ||
107 | #ifdef __i386__ | 108 | #ifdef __i386__ |
108 | 109 | ||
109 | void __init i386_start_kernel(void); | 110 | void __init i386_start_kernel(void); |
110 | extern void probe_roms(void); | ||
111 | 111 | ||
112 | #else | 112 | #else |
113 | void __init x86_64_start_kernel(char *real_mode); | 113 | void __init x86_64_start_kernel(char *real_mode); |
diff --git a/arch/x86/include/asm/srat.h b/arch/x86/include/asm/srat.h deleted file mode 100644 index b508d639d1a7..000000000000 --- a/arch/x86/include/asm/srat.h +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | /* | ||
2 | * Some of the code in this file has been gleaned from the 64 bit | ||
3 | * discontigmem support code base. | ||
4 | * | ||
5 | * Copyright (C) 2002, IBM Corp. | ||
6 | * | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
17 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
18 | * details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | * | ||
24 | * Send feedback to Pat Gaughen <gone@us.ibm.com> | ||
25 | */ | ||
26 | |||
27 | #ifndef _ASM_X86_SRAT_H | ||
28 | #define _ASM_X86_SRAT_H | ||
29 | |||
30 | #ifdef CONFIG_ACPI_NUMA | ||
31 | extern int get_memcfg_from_srat(void); | ||
32 | #else | ||
33 | static inline int get_memcfg_from_srat(void) | ||
34 | { | ||
35 | return 0; | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | #endif /* _ASM_X86_SRAT_H */ | ||
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 12569e691ce3..c2ff2a1d845e 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -303,24 +303,81 @@ static inline void native_wbinvd(void) | |||
303 | #ifdef CONFIG_PARAVIRT | 303 | #ifdef CONFIG_PARAVIRT |
304 | #include <asm/paravirt.h> | 304 | #include <asm/paravirt.h> |
305 | #else | 305 | #else |
306 | #define read_cr0() (native_read_cr0()) | 306 | |
307 | #define write_cr0(x) (native_write_cr0(x)) | 307 | static inline unsigned long read_cr0(void) |
308 | #define read_cr2() (native_read_cr2()) | 308 | { |
309 | #define write_cr2(x) (native_write_cr2(x)) | 309 | return native_read_cr0(); |
310 | #define read_cr3() (native_read_cr3()) | 310 | } |
311 | #define write_cr3(x) (native_write_cr3(x)) | 311 | |
312 | #define read_cr4() (native_read_cr4()) | 312 | static inline void write_cr0(unsigned long x) |
313 | #define read_cr4_safe() (native_read_cr4_safe()) | 313 | { |
314 | #define write_cr4(x) (native_write_cr4(x)) | 314 | native_write_cr0(x); |
315 | #define wbinvd() (native_wbinvd()) | 315 | } |
316 | |||
317 | static inline unsigned long read_cr2(void) | ||
318 | { | ||
319 | return native_read_cr2(); | ||
320 | } | ||
321 | |||
322 | static inline void write_cr2(unsigned long x) | ||
323 | { | ||
324 | native_write_cr2(x); | ||
325 | } | ||
326 | |||
327 | static inline unsigned long read_cr3(void) | ||
328 | { | ||
329 | return native_read_cr3(); | ||
330 | } | ||
331 | |||
332 | static inline void write_cr3(unsigned long x) | ||
333 | { | ||
334 | native_write_cr3(x); | ||
335 | } | ||
336 | |||
337 | static inline unsigned long read_cr4(void) | ||
338 | { | ||
339 | return native_read_cr4(); | ||
340 | } | ||
341 | |||
342 | static inline unsigned long read_cr4_safe(void) | ||
343 | { | ||
344 | return native_read_cr4_safe(); | ||
345 | } | ||
346 | |||
347 | static inline void write_cr4(unsigned long x) | ||
348 | { | ||
349 | native_write_cr4(x); | ||
350 | } | ||
351 | |||
352 | static inline void wbinvd(void) | ||
353 | { | ||
354 | native_wbinvd(); | ||
355 | } | ||
356 | |||
316 | #ifdef CONFIG_X86_64 | 357 | #ifdef CONFIG_X86_64 |
317 | #define read_cr8() (native_read_cr8()) | 358 | |
318 | #define write_cr8(x) (native_write_cr8(x)) | 359 | static inline unsigned long read_cr8(void) |
319 | #define load_gs_index native_load_gs_index | 360 | { |
361 | return native_read_cr8(); | ||
362 | } | ||
363 | |||
364 | static inline void write_cr8(unsigned long x) | ||
365 | { | ||
366 | native_write_cr8(x); | ||
367 | } | ||
368 | |||
369 | static inline void load_gs_index(unsigned selector) | ||
370 | { | ||
371 | native_load_gs_index(selector); | ||
372 | } | ||
373 | |||
320 | #endif | 374 | #endif |
321 | 375 | ||
322 | /* Clear the 'TS' bit */ | 376 | /* Clear the 'TS' bit */ |
323 | #define clts() (native_clts()) | 377 | static inline void clts(void) |
378 | { | ||
379 | native_clts(); | ||
380 | } | ||
324 | 381 | ||
325 | #endif/* CONFIG_PARAVIRT */ | 382 | #endif/* CONFIG_PARAVIRT */ |
326 | 383 | ||
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 910a7084f7f2..c00692476e9f 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -93,19 +93,11 @@ extern void setup_node_to_cpumask_map(void); | |||
93 | #define pcibus_to_node(bus) __pcibus_to_node(bus) | 93 | #define pcibus_to_node(bus) __pcibus_to_node(bus) |
94 | 94 | ||
95 | #ifdef CONFIG_X86_32 | 95 | #ifdef CONFIG_X86_32 |
96 | extern unsigned long node_start_pfn[]; | ||
97 | extern unsigned long node_end_pfn[]; | ||
98 | extern unsigned long node_remap_size[]; | ||
99 | #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) | ||
100 | |||
101 | # define SD_CACHE_NICE_TRIES 1 | 96 | # define SD_CACHE_NICE_TRIES 1 |
102 | # define SD_IDLE_IDX 1 | 97 | # define SD_IDLE_IDX 1 |
103 | |||
104 | #else | 98 | #else |
105 | |||
106 | # define SD_CACHE_NICE_TRIES 2 | 99 | # define SD_CACHE_NICE_TRIES 2 |
107 | # define SD_IDLE_IDX 2 | 100 | # define SD_IDLE_IDX 2 |
108 | |||
109 | #endif | 101 | #endif |
110 | 102 | ||
111 | /* sched_domains SD_NODE_INIT for NUMA machines */ | 103 | /* sched_domains SD_NODE_INIT for NUMA machines */ |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 3e094af443c3..130f1eeee5fe 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -94,6 +94,8 @@ | |||
94 | /* after this # consecutive successes, bump up the throttle if it was lowered */ | 94 | /* after this # consecutive successes, bump up the throttle if it was lowered */ |
95 | #define COMPLETE_THRESHOLD 5 | 95 | #define COMPLETE_THRESHOLD 5 |
96 | 96 | ||
97 | #define UV_LB_SUBNODEID 0x10 | ||
98 | |||
97 | /* | 99 | /* |
98 | * number of entries in the destination side payload queue | 100 | * number of entries in the destination side payload queue |
99 | */ | 101 | */ |
@@ -124,7 +126,7 @@ | |||
124 | * The distribution specification (32 bytes) is interpreted as a 256-bit | 126 | * The distribution specification (32 bytes) is interpreted as a 256-bit |
125 | * distribution vector. Adjacent bits correspond to consecutive even numbered | 127 | * distribution vector. Adjacent bits correspond to consecutive even numbered |
126 | * nodeIDs. The result of adding the index of a given bit to the 15-bit | 128 | * nodeIDs. The result of adding the index of a given bit to the 15-bit |
127 | * 'base_dest_nodeid' field of the header corresponds to the | 129 | * 'base_dest_nasid' field of the header corresponds to the |
128 | * destination nodeID associated with that specified bit. | 130 | * destination nodeID associated with that specified bit. |
129 | */ | 131 | */ |
130 | struct bau_target_uvhubmask { | 132 | struct bau_target_uvhubmask { |
@@ -176,7 +178,7 @@ struct bau_msg_payload { | |||
176 | struct bau_msg_header { | 178 | struct bau_msg_header { |
177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ | 179 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
178 | /* bits 5:0 */ | 180 | /* bits 5:0 */ |
179 | unsigned int base_dest_nodeid:15; /* nasid of the */ | 181 | unsigned int base_dest_nasid:15; /* nasid of the */ |
180 | /* bits 20:6 */ /* first bit in uvhub map */ | 182 | /* bits 20:6 */ /* first bit in uvhub map */ |
181 | unsigned int command:8; /* message type */ | 183 | unsigned int command:8; /* message type */ |
182 | /* bits 28:21 */ | 184 | /* bits 28:21 */ |
@@ -378,6 +380,10 @@ struct ptc_stats { | |||
378 | unsigned long d_rcanceled; /* number of messages canceled by resets */ | 380 | unsigned long d_rcanceled; /* number of messages canceled by resets */ |
379 | }; | 381 | }; |
380 | 382 | ||
383 | struct hub_and_pnode { | ||
384 | short uvhub; | ||
385 | short pnode; | ||
386 | }; | ||
381 | /* | 387 | /* |
382 | * one per-cpu; to locate the software tables | 388 | * one per-cpu; to locate the software tables |
383 | */ | 389 | */ |
@@ -399,10 +405,12 @@ struct bau_control { | |||
399 | int baudisabled; | 405 | int baudisabled; |
400 | int set_bau_off; | 406 | int set_bau_off; |
401 | short cpu; | 407 | short cpu; |
408 | short osnode; | ||
402 | short uvhub_cpu; | 409 | short uvhub_cpu; |
403 | short uvhub; | 410 | short uvhub; |
404 | short cpus_in_socket; | 411 | short cpus_in_socket; |
405 | short cpus_in_uvhub; | 412 | short cpus_in_uvhub; |
413 | short partition_base_pnode; | ||
406 | unsigned short message_number; | 414 | unsigned short message_number; |
407 | unsigned short uvhub_quiesce; | 415 | unsigned short uvhub_quiesce; |
408 | short socket_acknowledge_count[DEST_Q_SIZE]; | 416 | short socket_acknowledge_count[DEST_Q_SIZE]; |
@@ -422,15 +430,16 @@ struct bau_control { | |||
422 | int congested_period; | 430 | int congested_period; |
423 | cycles_t period_time; | 431 | cycles_t period_time; |
424 | long period_requests; | 432 | long period_requests; |
433 | struct hub_and_pnode *target_hub_and_pnode; | ||
425 | }; | 434 | }; |
426 | 435 | ||
427 | static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) | 436 | static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) |
428 | { | 437 | { |
429 | return constant_test_bit(uvhub, &dstp->bits[0]); | 438 | return constant_test_bit(uvhub, &dstp->bits[0]); |
430 | } | 439 | } |
431 | static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) | 440 | static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp) |
432 | { | 441 | { |
433 | __set_bit(uvhub, &dstp->bits[0]); | 442 | __set_bit(pnode, &dstp->bits[0]); |
434 | } | 443 | } |
435 | static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, | 444 | static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, |
436 | int nbits) | 445 | int nbits) |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index a501741c2335..4298002d0c83 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -398,6 +398,8 @@ struct uv_blade_info { | |||
398 | unsigned short nr_online_cpus; | 398 | unsigned short nr_online_cpus; |
399 | unsigned short pnode; | 399 | unsigned short pnode; |
400 | short memory_nid; | 400 | short memory_nid; |
401 | spinlock_t nmi_lock; | ||
402 | unsigned long nmi_count; | ||
401 | }; | 403 | }; |
402 | extern struct uv_blade_info *uv_blade_info; | 404 | extern struct uv_blade_info *uv_blade_info; |
403 | extern short *uv_node_to_blade; | 405 | extern short *uv_node_to_blade; |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 20cafeac7455..f5bb64a823d7 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV MMR definitions | 6 | * SGI UV MMR definitions |
7 | * | 7 | * |
8 | * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_X86_UV_UV_MMRS_H | 11 | #ifndef _ASM_X86_UV_UV_MMRS_H |
@@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u { | |||
1099 | } s; | 1099 | } s; |
1100 | }; | 1100 | }; |
1101 | 1101 | ||
1102 | /* ========================================================================= */ | ||
1103 | /* UVH_SCRATCH5 */ | ||
1104 | /* ========================================================================= */ | ||
1105 | #define UVH_SCRATCH5 0x2d0200UL | ||
1106 | #define UVH_SCRATCH5_32 0x00778 | ||
1107 | |||
1108 | #define UVH_SCRATCH5_SCRATCH5_SHFT 0 | ||
1109 | #define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL | ||
1110 | union uvh_scratch5_u { | ||
1111 | unsigned long v; | ||
1112 | struct uvh_scratch5_s { | ||
1113 | unsigned long scratch5 : 64; /* RW, W1CS */ | ||
1114 | } s; | ||
1115 | }; | ||
1102 | 1116 | ||
1103 | #endif /* __ASM_UV_MMRS_X86_H__ */ | 1117 | #endif /* __ASM_UV_MMRS_X86_H__ */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 643ebf2e2ad8..d3d859035af9 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -68,6 +68,17 @@ struct x86_init_oem { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | /** | 70 | /** |
71 | * struct x86_init_mapping - platform specific initial kernel pagetable setup | ||
72 | * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage | ||
73 | * | ||
74 | * For more details on the purpose of this hook, look in | ||
75 | * init_memory_mapping and the commit that added it. | ||
76 | */ | ||
77 | struct x86_init_mapping { | ||
78 | void (*pagetable_reserve)(u64 start, u64 end); | ||
79 | }; | ||
80 | |||
81 | /** | ||
71 | * struct x86_init_paging - platform specific paging functions | 82 | * struct x86_init_paging - platform specific paging functions |
72 | * @pagetable_setup_start: platform specific pre paging_init() call | 83 | * @pagetable_setup_start: platform specific pre paging_init() call |
73 | * @pagetable_setup_done: platform specific post paging_init() call | 84 | * @pagetable_setup_done: platform specific post paging_init() call |
@@ -123,6 +134,7 @@ struct x86_init_ops { | |||
123 | struct x86_init_mpparse mpparse; | 134 | struct x86_init_mpparse mpparse; |
124 | struct x86_init_irqs irqs; | 135 | struct x86_init_irqs irqs; |
125 | struct x86_init_oem oem; | 136 | struct x86_init_oem oem; |
137 | struct x86_init_mapping mapping; | ||
126 | struct x86_init_paging paging; | 138 | struct x86_init_paging paging; |
127 | struct x86_init_timers timers; | 139 | struct x86_init_timers timers; |
128 | struct x86_init_iommu iommu; | 140 | struct x86_init_iommu iommu; |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index c61934fbf22a..64a619d47d34 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -47,8 +47,9 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | |||
47 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, | 47 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, |
48 | unsigned long pfn_e); | 48 | unsigned long pfn_e); |
49 | 49 | ||
50 | extern int m2p_add_override(unsigned long mfn, struct page *page); | 50 | extern int m2p_add_override(unsigned long mfn, struct page *page, |
51 | extern int m2p_remove_override(struct page *page); | 51 | bool clear_pte); |
52 | extern int m2p_remove_override(struct page *page, bool clear_pte); | ||
52 | extern struct page *m2p_find_override(unsigned long mfn); | 53 | extern struct page *m2p_find_override(unsigned long mfn); |
53 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); | 54 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); |
54 | 55 | ||
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index aa8620989162..4fbda9a3f339 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h | |||
@@ -15,10 +15,26 @@ static inline int pci_xen_hvm_init(void) | |||
15 | #endif | 15 | #endif |
16 | #if defined(CONFIG_XEN_DOM0) | 16 | #if defined(CONFIG_XEN_DOM0) |
17 | void __init xen_setup_pirqs(void); | 17 | void __init xen_setup_pirqs(void); |
18 | int xen_find_device_domain_owner(struct pci_dev *dev); | ||
19 | int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); | ||
20 | int xen_unregister_device_domain_owner(struct pci_dev *dev); | ||
18 | #else | 21 | #else |
19 | static inline void __init xen_setup_pirqs(void) | 22 | static inline void __init xen_setup_pirqs(void) |
20 | { | 23 | { |
21 | } | 24 | } |
25 | static inline int xen_find_device_domain_owner(struct pci_dev *dev) | ||
26 | { | ||
27 | return -1; | ||
28 | } | ||
29 | static inline int xen_register_device_domain_owner(struct pci_dev *dev, | ||
30 | uint16_t domain) | ||
31 | { | ||
32 | return -1; | ||
33 | } | ||
34 | static inline int xen_unregister_device_domain_owner(struct pci_dev *dev) | ||
35 | { | ||
36 | return -1; | ||
37 | } | ||
22 | #endif | 38 | #endif |
23 | 39 | ||
24 | #if defined(CONFIG_PCI_MSI) | 40 | #if defined(CONFIG_PCI_MSI) |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 7338ef2218bc..250806472a7e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -36,7 +36,7 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | |||
36 | obj-y += time.o ioport.o ldt.o dumpstack.o | 36 | obj-y += time.o ioport.o ldt.o dumpstack.o |
37 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o | 37 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o |
38 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 38 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
39 | obj-$(CONFIG_X86_32) += probe_roms_32.o | 39 | obj-y += probe_roms.o |
40 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 40 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
41 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | 41 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o |
42 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o | 42 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o |
@@ -117,7 +117,7 @@ obj-$(CONFIG_OF) += devicetree.o | |||
117 | ifeq ($(CONFIG_X86_64),y) | 117 | ifeq ($(CONFIG_X86_64),y) |
118 | obj-$(CONFIG_AUDIT) += audit_64.o | 118 | obj-$(CONFIG_AUDIT) += audit_64.o |
119 | 119 | ||
120 | obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o | 120 | obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o |
121 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o | 121 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o |
122 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o | 122 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o |
123 | 123 | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index ff93bc1b09c3..18a857ba7a25 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -112,11 +112,6 @@ static int __init acpi_sleep_setup(char *str) | |||
112 | #ifdef CONFIG_HIBERNATION | 112 | #ifdef CONFIG_HIBERNATION |
113 | if (strncmp(str, "s4_nohwsig", 10) == 0) | 113 | if (strncmp(str, "s4_nohwsig", 10) == 0) |
114 | acpi_no_s4_hw_signature(); | 114 | acpi_no_s4_hw_signature(); |
115 | if (strncmp(str, "s4_nonvs", 8) == 0) { | ||
116 | pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, " | ||
117 | "please use acpi_sleep=nonvs instead"); | ||
118 | acpi_nvs_nosave(); | ||
119 | } | ||
120 | #endif | 115 | #endif |
121 | if (strncmp(str, "nonvs", 5) == 0) | 116 | if (strncmp(str, "nonvs", 5) == 0) |
122 | acpi_nvs_nosave(); | 117 | acpi_nvs_nosave(); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 1eeeafcb4410..a81f2d52f869 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -67,17 +67,30 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); | |||
67 | #define DPRINTK(fmt, args...) if (debug_alternative) \ | 67 | #define DPRINTK(fmt, args...) if (debug_alternative) \ |
68 | printk(KERN_DEBUG fmt, args) | 68 | printk(KERN_DEBUG fmt, args) |
69 | 69 | ||
70 | /* | ||
71 | * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes | ||
72 | * that correspond to that nop. Getting from one nop to the next, we | ||
73 | * add to the array the offset that is equal to the sum of all sizes of | ||
74 | * nops preceding the one we are after. | ||
75 | * | ||
76 | * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the | ||
77 | * nice symmetry of sizes of the previous nops. | ||
78 | */ | ||
70 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) | 79 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
71 | /* Use inline assembly to define this because the nops are defined | 80 | static const unsigned char intelnops[] = |
72 | as inline assembly strings in the include files and we cannot | 81 | { |
73 | get them easily into strings. */ | 82 | GENERIC_NOP1, |
74 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " | 83 | GENERIC_NOP2, |
75 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | 84 | GENERIC_NOP3, |
76 | GENERIC_NOP7 GENERIC_NOP8 | 85 | GENERIC_NOP4, |
77 | "\t.previous"); | 86 | GENERIC_NOP5, |
78 | extern const unsigned char intelnops[]; | 87 | GENERIC_NOP6, |
79 | static const unsigned char *const __initconst_or_module | 88 | GENERIC_NOP7, |
80 | intel_nops[ASM_NOP_MAX+1] = { | 89 | GENERIC_NOP8, |
90 | GENERIC_NOP5_ATOMIC | ||
91 | }; | ||
92 | static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = | ||
93 | { | ||
81 | NULL, | 94 | NULL, |
82 | intelnops, | 95 | intelnops, |
83 | intelnops + 1, | 96 | intelnops + 1, |
@@ -87,17 +100,25 @@ intel_nops[ASM_NOP_MAX+1] = { | |||
87 | intelnops + 1 + 2 + 3 + 4 + 5, | 100 | intelnops + 1 + 2 + 3 + 4 + 5, |
88 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | 101 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, |
89 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 102 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
103 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
90 | }; | 104 | }; |
91 | #endif | 105 | #endif |
92 | 106 | ||
93 | #ifdef K8_NOP1 | 107 | #ifdef K8_NOP1 |
94 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " | 108 | static const unsigned char k8nops[] = |
95 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | 109 | { |
96 | K8_NOP7 K8_NOP8 | 110 | K8_NOP1, |
97 | "\t.previous"); | 111 | K8_NOP2, |
98 | extern const unsigned char k8nops[]; | 112 | K8_NOP3, |
99 | static const unsigned char *const __initconst_or_module | 113 | K8_NOP4, |
100 | k8_nops[ASM_NOP_MAX+1] = { | 114 | K8_NOP5, |
115 | K8_NOP6, | ||
116 | K8_NOP7, | ||
117 | K8_NOP8, | ||
118 | K8_NOP5_ATOMIC | ||
119 | }; | ||
120 | static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = | ||
121 | { | ||
101 | NULL, | 122 | NULL, |
102 | k8nops, | 123 | k8nops, |
103 | k8nops + 1, | 124 | k8nops + 1, |
@@ -107,17 +128,25 @@ k8_nops[ASM_NOP_MAX+1] = { | |||
107 | k8nops + 1 + 2 + 3 + 4 + 5, | 128 | k8nops + 1 + 2 + 3 + 4 + 5, |
108 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | 129 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, |
109 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 130 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
131 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
110 | }; | 132 | }; |
111 | #endif | 133 | #endif |
112 | 134 | ||
113 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) | 135 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
114 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " | 136 | static const unsigned char k7nops[] = |
115 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | 137 | { |
116 | K7_NOP7 K7_NOP8 | 138 | K7_NOP1, |
117 | "\t.previous"); | 139 | K7_NOP2, |
118 | extern const unsigned char k7nops[]; | 140 | K7_NOP3, |
119 | static const unsigned char *const __initconst_or_module | 141 | K7_NOP4, |
120 | k7_nops[ASM_NOP_MAX+1] = { | 142 | K7_NOP5, |
143 | K7_NOP6, | ||
144 | K7_NOP7, | ||
145 | K7_NOP8, | ||
146 | K7_NOP5_ATOMIC | ||
147 | }; | ||
148 | static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = | ||
149 | { | ||
121 | NULL, | 150 | NULL, |
122 | k7nops, | 151 | k7nops, |
123 | k7nops + 1, | 152 | k7nops + 1, |
@@ -127,17 +156,25 @@ k7_nops[ASM_NOP_MAX+1] = { | |||
127 | k7nops + 1 + 2 + 3 + 4 + 5, | 156 | k7nops + 1 + 2 + 3 + 4 + 5, |
128 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | 157 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, |
129 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 158 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
159 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
130 | }; | 160 | }; |
131 | #endif | 161 | #endif |
132 | 162 | ||
133 | #ifdef P6_NOP1 | 163 | #ifdef P6_NOP1 |
134 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " | 164 | static const unsigned char __initconst_or_module p6nops[] = |
135 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 | 165 | { |
136 | P6_NOP7 P6_NOP8 | 166 | P6_NOP1, |
137 | "\t.previous"); | 167 | P6_NOP2, |
138 | extern const unsigned char p6nops[]; | 168 | P6_NOP3, |
139 | static const unsigned char *const __initconst_or_module | 169 | P6_NOP4, |
140 | p6_nops[ASM_NOP_MAX+1] = { | 170 | P6_NOP5, |
171 | P6_NOP6, | ||
172 | P6_NOP7, | ||
173 | P6_NOP8, | ||
174 | P6_NOP5_ATOMIC | ||
175 | }; | ||
176 | static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = | ||
177 | { | ||
141 | NULL, | 178 | NULL, |
142 | p6nops, | 179 | p6nops, |
143 | p6nops + 1, | 180 | p6nops + 1, |
@@ -147,47 +184,65 @@ p6_nops[ASM_NOP_MAX+1] = { | |||
147 | p6nops + 1 + 2 + 3 + 4 + 5, | 184 | p6nops + 1 + 2 + 3 + 4 + 5, |
148 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, | 185 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, |
149 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 186 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
187 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
150 | }; | 188 | }; |
151 | #endif | 189 | #endif |
152 | 190 | ||
191 | /* Initialize these to a safe default */ | ||
153 | #ifdef CONFIG_X86_64 | 192 | #ifdef CONFIG_X86_64 |
193 | const unsigned char * const *ideal_nops = p6_nops; | ||
194 | #else | ||
195 | const unsigned char * const *ideal_nops = intel_nops; | ||
196 | #endif | ||
154 | 197 | ||
155 | extern char __vsyscall_0; | 198 | void __init arch_init_ideal_nops(void) |
156 | static const unsigned char *const *__init_or_module find_nop_table(void) | ||
157 | { | 199 | { |
158 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 200 | switch (boot_cpu_data.x86_vendor) { |
159 | boot_cpu_has(X86_FEATURE_NOPL)) | 201 | case X86_VENDOR_INTEL: |
160 | return p6_nops; | 202 | /* |
161 | else | 203 | * Due to a decoder implementation quirk, some |
162 | return k8_nops; | 204 | * specific Intel CPUs actually perform better with |
163 | } | 205 | * the "k8_nops" than with the SDM-recommended NOPs. |
164 | 206 | */ | |
165 | #else /* CONFIG_X86_64 */ | 207 | if (boot_cpu_data.x86 == 6 && |
208 | boot_cpu_data.x86_model >= 0x0f && | ||
209 | boot_cpu_data.x86_model != 0x1c && | ||
210 | boot_cpu_data.x86_model != 0x26 && | ||
211 | boot_cpu_data.x86_model != 0x27 && | ||
212 | boot_cpu_data.x86_model < 0x30) { | ||
213 | ideal_nops = k8_nops; | ||
214 | } else if (boot_cpu_has(X86_FEATURE_NOPL)) { | ||
215 | ideal_nops = p6_nops; | ||
216 | } else { | ||
217 | #ifdef CONFIG_X86_64 | ||
218 | ideal_nops = k8_nops; | ||
219 | #else | ||
220 | ideal_nops = intel_nops; | ||
221 | #endif | ||
222 | } | ||
166 | 223 | ||
167 | static const unsigned char *const *__init_or_module find_nop_table(void) | 224 | default: |
168 | { | 225 | #ifdef CONFIG_X86_64 |
169 | if (boot_cpu_has(X86_FEATURE_K8)) | 226 | ideal_nops = k8_nops; |
170 | return k8_nops; | 227 | #else |
171 | else if (boot_cpu_has(X86_FEATURE_K7)) | 228 | if (boot_cpu_has(X86_FEATURE_K8)) |
172 | return k7_nops; | 229 | ideal_nops = k8_nops; |
173 | else if (boot_cpu_has(X86_FEATURE_NOPL)) | 230 | else if (boot_cpu_has(X86_FEATURE_K7)) |
174 | return p6_nops; | 231 | ideal_nops = k7_nops; |
175 | else | 232 | else |
176 | return intel_nops; | 233 | ideal_nops = intel_nops; |
234 | #endif | ||
235 | } | ||
177 | } | 236 | } |
178 | 237 | ||
179 | #endif /* CONFIG_X86_64 */ | ||
180 | |||
181 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 238 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
182 | static void __init_or_module add_nops(void *insns, unsigned int len) | 239 | static void __init_or_module add_nops(void *insns, unsigned int len) |
183 | { | 240 | { |
184 | const unsigned char *const *noptable = find_nop_table(); | ||
185 | |||
186 | while (len > 0) { | 241 | while (len > 0) { |
187 | unsigned int noplen = len; | 242 | unsigned int noplen = len; |
188 | if (noplen > ASM_NOP_MAX) | 243 | if (noplen > ASM_NOP_MAX) |
189 | noplen = ASM_NOP_MAX; | 244 | noplen = ASM_NOP_MAX; |
190 | memcpy(insns, noptable[noplen], noplen); | 245 | memcpy(insns, ideal_nops[noplen], noplen); |
191 | insns += noplen; | 246 | insns += noplen; |
192 | len -= noplen; | 247 | len -= noplen; |
193 | } | 248 | } |
@@ -195,6 +250,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) | |||
195 | 250 | ||
196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 251 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
197 | extern s32 __smp_locks[], __smp_locks_end[]; | 252 | extern s32 __smp_locks[], __smp_locks_end[]; |
253 | extern char __vsyscall_0; | ||
198 | void *text_poke_early(void *addr, const void *opcode, size_t len); | 254 | void *text_poke_early(void *addr, const void *opcode, size_t len); |
199 | 255 | ||
200 | /* Replace instructions with better alternatives for this CPU type. | 256 | /* Replace instructions with better alternatives for this CPU type. |
@@ -687,29 +743,3 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | |||
687 | wrote_text = 0; | 743 | wrote_text = 0; |
688 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 744 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); |
689 | } | 745 | } |
690 | |||
691 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) | ||
692 | |||
693 | #ifdef CONFIG_X86_64 | ||
694 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; | ||
695 | #else | ||
696 | unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; | ||
697 | #endif | ||
698 | |||
699 | void __init arch_init_ideal_nop5(void) | ||
700 | { | ||
701 | /* | ||
702 | * There is no good nop for all x86 archs. This selection | ||
703 | * algorithm should be unified with the one in find_nop_table(), | ||
704 | * but this should be good enough for now. | ||
705 | * | ||
706 | * For cases other than the ones below, use the safe (as in | ||
707 | * always functional) defaults above. | ||
708 | */ | ||
709 | #ifdef CONFIG_X86_64 | ||
710 | /* Don't use these on 32 bits due to broken virtualizers */ | ||
711 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
712 | memcpy(ideal_nop5, p6_nops[5], 5); | ||
713 | #endif | ||
714 | } | ||
715 | #endif | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/amd_gart_64.c index b117efd24f71..b117efd24f71 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c | |||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 57ca77787220..873e7e1ead7b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/pci-ats.h> | ||
21 | #include <linux/bitmap.h> | 22 | #include <linux/bitmap.h> |
22 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
23 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
@@ -25,6 +26,7 @@ | |||
25 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
26 | #include <linux/iommu-helper.h> | 27 | #include <linux/iommu-helper.h> |
27 | #include <linux/iommu.h> | 28 | #include <linux/iommu.h> |
29 | #include <linux/delay.h> | ||
28 | #include <asm/proto.h> | 30 | #include <asm/proto.h> |
29 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
30 | #include <asm/gart.h> | 32 | #include <asm/gart.h> |
@@ -34,7 +36,7 @@ | |||
34 | 36 | ||
35 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 37 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
36 | 38 | ||
37 | #define EXIT_LOOP_COUNT 10000000 | 39 | #define LOOP_TIMEOUT 100000 |
38 | 40 | ||
39 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | 41 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); |
40 | 42 | ||
@@ -57,7 +59,6 @@ struct iommu_cmd { | |||
57 | u32 data[4]; | 59 | u32 data[4]; |
58 | }; | 60 | }; |
59 | 61 | ||
60 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); | ||
61 | static void update_domain(struct protection_domain *domain); | 62 | static void update_domain(struct protection_domain *domain); |
62 | 63 | ||
63 | /**************************************************************************** | 64 | /**************************************************************************** |
@@ -322,8 +323,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | |||
322 | break; | 323 | break; |
323 | case EVENT_TYPE_ILL_CMD: | 324 | case EVENT_TYPE_ILL_CMD: |
324 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 325 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
325 | iommu->reset_in_progress = true; | ||
326 | reset_iommu_command_buffer(iommu); | ||
327 | dump_command(address); | 326 | dump_command(address); |
328 | break; | 327 | break; |
329 | case EVENT_TYPE_CMD_HARD_ERR: | 328 | case EVENT_TYPE_CMD_HARD_ERR: |
@@ -367,7 +366,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
367 | spin_unlock_irqrestore(&iommu->lock, flags); | 366 | spin_unlock_irqrestore(&iommu->lock, flags); |
368 | } | 367 | } |
369 | 368 | ||
370 | irqreturn_t amd_iommu_int_handler(int irq, void *data) | 369 | irqreturn_t amd_iommu_int_thread(int irq, void *data) |
371 | { | 370 | { |
372 | struct amd_iommu *iommu; | 371 | struct amd_iommu *iommu; |
373 | 372 | ||
@@ -377,192 +376,300 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) | |||
377 | return IRQ_HANDLED; | 376 | return IRQ_HANDLED; |
378 | } | 377 | } |
379 | 378 | ||
379 | irqreturn_t amd_iommu_int_handler(int irq, void *data) | ||
380 | { | ||
381 | return IRQ_WAKE_THREAD; | ||
382 | } | ||
383 | |||
380 | /**************************************************************************** | 384 | /**************************************************************************** |
381 | * | 385 | * |
382 | * IOMMU command queuing functions | 386 | * IOMMU command queuing functions |
383 | * | 387 | * |
384 | ****************************************************************************/ | 388 | ****************************************************************************/ |
385 | 389 | ||
386 | /* | 390 | static int wait_on_sem(volatile u64 *sem) |
387 | * Writes the command to the IOMMUs command buffer and informs the | 391 | { |
388 | * hardware about the new command. Must be called with iommu->lock held. | 392 | int i = 0; |
389 | */ | 393 | |
390 | static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | 394 | while (*sem == 0 && i < LOOP_TIMEOUT) { |
395 | udelay(1); | ||
396 | i += 1; | ||
397 | } | ||
398 | |||
399 | if (i == LOOP_TIMEOUT) { | ||
400 | pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); | ||
401 | return -EIO; | ||
402 | } | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static void copy_cmd_to_buffer(struct amd_iommu *iommu, | ||
408 | struct iommu_cmd *cmd, | ||
409 | u32 tail) | ||
391 | { | 410 | { |
392 | u32 tail, head; | ||
393 | u8 *target; | 411 | u8 *target; |
394 | 412 | ||
395 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
396 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
397 | target = iommu->cmd_buf + tail; | 413 | target = iommu->cmd_buf + tail; |
398 | memcpy_toio(target, cmd, sizeof(*cmd)); | 414 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; |
399 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | 415 | |
400 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | 416 | /* Copy command to buffer */ |
401 | if (tail == head) | 417 | memcpy(target, cmd, sizeof(*cmd)); |
402 | return -ENOMEM; | 418 | |
419 | /* Tell the IOMMU about it */ | ||
403 | writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 420 | writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
421 | } | ||
404 | 422 | ||
405 | return 0; | 423 | static void build_completion_wait(struct iommu_cmd *cmd, u64 address) |
424 | { | ||
425 | WARN_ON(address & 0x7ULL); | ||
426 | |||
427 | memset(cmd, 0, sizeof(*cmd)); | ||
428 | cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; | ||
429 | cmd->data[1] = upper_32_bits(__pa(address)); | ||
430 | cmd->data[2] = 1; | ||
431 | CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); | ||
432 | } | ||
433 | |||
434 | static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) | ||
435 | { | ||
436 | memset(cmd, 0, sizeof(*cmd)); | ||
437 | cmd->data[0] = devid; | ||
438 | CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); | ||
439 | } | ||
440 | |||
441 | static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | ||
442 | size_t size, u16 domid, int pde) | ||
443 | { | ||
444 | u64 pages; | ||
445 | int s; | ||
446 | |||
447 | pages = iommu_num_pages(address, size, PAGE_SIZE); | ||
448 | s = 0; | ||
449 | |||
450 | if (pages > 1) { | ||
451 | /* | ||
452 | * If we have to flush more than one page, flush all | ||
453 | * TLB entries for this domain | ||
454 | */ | ||
455 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
456 | s = 1; | ||
457 | } | ||
458 | |||
459 | address &= PAGE_MASK; | ||
460 | |||
461 | memset(cmd, 0, sizeof(*cmd)); | ||
462 | cmd->data[1] |= domid; | ||
463 | cmd->data[2] = lower_32_bits(address); | ||
464 | cmd->data[3] = upper_32_bits(address); | ||
465 | CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); | ||
466 | if (s) /* size bit - we flush more than one 4kb page */ | ||
467 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
468 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | ||
469 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | ||
470 | } | ||
471 | |||
472 | static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, | ||
473 | u64 address, size_t size) | ||
474 | { | ||
475 | u64 pages; | ||
476 | int s; | ||
477 | |||
478 | pages = iommu_num_pages(address, size, PAGE_SIZE); | ||
479 | s = 0; | ||
480 | |||
481 | if (pages > 1) { | ||
482 | /* | ||
483 | * If we have to flush more than one page, flush all | ||
484 | * TLB entries for this domain | ||
485 | */ | ||
486 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
487 | s = 1; | ||
488 | } | ||
489 | |||
490 | address &= PAGE_MASK; | ||
491 | |||
492 | memset(cmd, 0, sizeof(*cmd)); | ||
493 | cmd->data[0] = devid; | ||
494 | cmd->data[0] |= (qdep & 0xff) << 24; | ||
495 | cmd->data[1] = devid; | ||
496 | cmd->data[2] = lower_32_bits(address); | ||
497 | cmd->data[3] = upper_32_bits(address); | ||
498 | CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); | ||
499 | if (s) | ||
500 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
501 | } | ||
502 | |||
503 | static void build_inv_all(struct iommu_cmd *cmd) | ||
504 | { | ||
505 | memset(cmd, 0, sizeof(*cmd)); | ||
506 | CMD_SET_TYPE(cmd, CMD_INV_ALL); | ||
406 | } | 507 | } |
407 | 508 | ||
408 | /* | 509 | /* |
409 | * General queuing function for commands. Takes iommu->lock and calls | 510 | * Writes the command to the IOMMUs command buffer and informs the |
410 | * __iommu_queue_command(). | 511 | * hardware about the new command. |
411 | */ | 512 | */ |
412 | static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | 513 | static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) |
413 | { | 514 | { |
515 | u32 left, tail, head, next_tail; | ||
414 | unsigned long flags; | 516 | unsigned long flags; |
415 | int ret; | ||
416 | 517 | ||
518 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
519 | |||
520 | again: | ||
417 | spin_lock_irqsave(&iommu->lock, flags); | 521 | spin_lock_irqsave(&iommu->lock, flags); |
418 | ret = __iommu_queue_command(iommu, cmd); | ||
419 | if (!ret) | ||
420 | iommu->need_sync = true; | ||
421 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
422 | 522 | ||
423 | return ret; | 523 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); |
424 | } | 524 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
525 | next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | ||
526 | left = (head - next_tail) % iommu->cmd_buf_size; | ||
425 | 527 | ||
426 | /* | 528 | if (left <= 2) { |
427 | * This function waits until an IOMMU has completed a completion | 529 | struct iommu_cmd sync_cmd; |
428 | * wait command | 530 | volatile u64 sem = 0; |
429 | */ | 531 | int ret; |
430 | static void __iommu_wait_for_completion(struct amd_iommu *iommu) | ||
431 | { | ||
432 | int ready = 0; | ||
433 | unsigned status = 0; | ||
434 | unsigned long i = 0; | ||
435 | 532 | ||
436 | INC_STATS_COUNTER(compl_wait); | 533 | build_completion_wait(&sync_cmd, (u64)&sem); |
534 | copy_cmd_to_buffer(iommu, &sync_cmd, tail); | ||
437 | 535 | ||
438 | while (!ready && (i < EXIT_LOOP_COUNT)) { | 536 | spin_unlock_irqrestore(&iommu->lock, flags); |
439 | ++i; | 537 | |
440 | /* wait for the bit to become one */ | 538 | if ((ret = wait_on_sem(&sem)) != 0) |
441 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | 539 | return ret; |
442 | ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; | 540 | |
541 | goto again; | ||
443 | } | 542 | } |
444 | 543 | ||
445 | /* set bit back to zero */ | 544 | copy_cmd_to_buffer(iommu, cmd, tail); |
446 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 545 | |
447 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 546 | /* We need to sync now to make sure all commands are processed */ |
547 | iommu->need_sync = true; | ||
548 | |||
549 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
448 | 550 | ||
449 | if (unlikely(i == EXIT_LOOP_COUNT)) | 551 | return 0; |
450 | iommu->reset_in_progress = true; | ||
451 | } | 552 | } |
452 | 553 | ||
453 | /* | 554 | /* |
454 | * This function queues a completion wait command into the command | 555 | * This function queues a completion wait command into the command |
455 | * buffer of an IOMMU | 556 | * buffer of an IOMMU |
456 | */ | 557 | */ |
457 | static int __iommu_completion_wait(struct amd_iommu *iommu) | 558 | static int iommu_completion_wait(struct amd_iommu *iommu) |
458 | { | 559 | { |
459 | struct iommu_cmd cmd; | 560 | struct iommu_cmd cmd; |
561 | volatile u64 sem = 0; | ||
562 | int ret; | ||
460 | 563 | ||
461 | memset(&cmd, 0, sizeof(cmd)); | 564 | if (!iommu->need_sync) |
462 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 565 | return 0; |
463 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | ||
464 | 566 | ||
465 | return __iommu_queue_command(iommu, &cmd); | 567 | build_completion_wait(&cmd, (u64)&sem); |
568 | |||
569 | ret = iommu_queue_command(iommu, &cmd); | ||
570 | if (ret) | ||
571 | return ret; | ||
572 | |||
573 | return wait_on_sem(&sem); | ||
466 | } | 574 | } |
467 | 575 | ||
468 | /* | 576 | static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) |
469 | * This function is called whenever we need to ensure that the IOMMU has | ||
470 | * completed execution of all commands we sent. It sends a | ||
471 | * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs | ||
472 | * us about that by writing a value to a physical address we pass with | ||
473 | * the command. | ||
474 | */ | ||
475 | static int iommu_completion_wait(struct amd_iommu *iommu) | ||
476 | { | 577 | { |
477 | int ret = 0; | 578 | struct iommu_cmd cmd; |
478 | unsigned long flags; | ||
479 | 579 | ||
480 | spin_lock_irqsave(&iommu->lock, flags); | 580 | build_inv_dte(&cmd, devid); |
481 | 581 | ||
482 | if (!iommu->need_sync) | 582 | return iommu_queue_command(iommu, &cmd); |
483 | goto out; | 583 | } |
484 | 584 | ||
485 | ret = __iommu_completion_wait(iommu); | 585 | static void iommu_flush_dte_all(struct amd_iommu *iommu) |
586 | { | ||
587 | u32 devid; | ||
486 | 588 | ||
487 | iommu->need_sync = false; | 589 | for (devid = 0; devid <= 0xffff; ++devid) |
590 | iommu_flush_dte(iommu, devid); | ||
488 | 591 | ||
489 | if (ret) | 592 | iommu_completion_wait(iommu); |
490 | goto out; | 593 | } |
491 | |||
492 | __iommu_wait_for_completion(iommu); | ||
493 | 594 | ||
494 | out: | 595 | /* |
495 | spin_unlock_irqrestore(&iommu->lock, flags); | 596 | * This function uses heavy locking and may disable irqs for some time. But |
597 | * this is no issue because it is only called during resume. | ||
598 | */ | ||
599 | static void iommu_flush_tlb_all(struct amd_iommu *iommu) | ||
600 | { | ||
601 | u32 dom_id; | ||
496 | 602 | ||
497 | if (iommu->reset_in_progress) | 603 | for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { |
498 | reset_iommu_command_buffer(iommu); | 604 | struct iommu_cmd cmd; |
605 | build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | ||
606 | dom_id, 1); | ||
607 | iommu_queue_command(iommu, &cmd); | ||
608 | } | ||
499 | 609 | ||
500 | return 0; | 610 | iommu_completion_wait(iommu); |
501 | } | 611 | } |
502 | 612 | ||
503 | static void iommu_flush_complete(struct protection_domain *domain) | 613 | static void iommu_flush_all(struct amd_iommu *iommu) |
504 | { | 614 | { |
505 | int i; | 615 | struct iommu_cmd cmd; |
506 | 616 | ||
507 | for (i = 0; i < amd_iommus_present; ++i) { | 617 | build_inv_all(&cmd); |
508 | if (!domain->dev_iommu[i]) | ||
509 | continue; | ||
510 | 618 | ||
511 | /* | 619 | iommu_queue_command(iommu, &cmd); |
512 | * Devices of this domain are behind this IOMMU | 620 | iommu_completion_wait(iommu); |
513 | * We need to wait for completion of all commands. | 621 | } |
514 | */ | 622 | |
515 | iommu_completion_wait(amd_iommus[i]); | 623 | void iommu_flush_all_caches(struct amd_iommu *iommu) |
624 | { | ||
625 | if (iommu_feature(iommu, FEATURE_IA)) { | ||
626 | iommu_flush_all(iommu); | ||
627 | } else { | ||
628 | iommu_flush_dte_all(iommu); | ||
629 | iommu_flush_tlb_all(iommu); | ||
516 | } | 630 | } |
517 | } | 631 | } |
518 | 632 | ||
519 | /* | 633 | /* |
520 | * Command send function for invalidating a device table entry | 634 | * Command send function for flushing on-device TLB |
521 | */ | 635 | */ |
522 | static int iommu_flush_device(struct device *dev) | 636 | static int device_flush_iotlb(struct device *dev, u64 address, size_t size) |
523 | { | 637 | { |
638 | struct pci_dev *pdev = to_pci_dev(dev); | ||
524 | struct amd_iommu *iommu; | 639 | struct amd_iommu *iommu; |
525 | struct iommu_cmd cmd; | 640 | struct iommu_cmd cmd; |
526 | u16 devid; | 641 | u16 devid; |
642 | int qdep; | ||
527 | 643 | ||
644 | qdep = pci_ats_queue_depth(pdev); | ||
528 | devid = get_device_id(dev); | 645 | devid = get_device_id(dev); |
529 | iommu = amd_iommu_rlookup_table[devid]; | 646 | iommu = amd_iommu_rlookup_table[devid]; |
530 | 647 | ||
531 | /* Build command */ | 648 | build_inv_iotlb_pages(&cmd, devid, qdep, address, size); |
532 | memset(&cmd, 0, sizeof(cmd)); | ||
533 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | ||
534 | cmd.data[0] = devid; | ||
535 | 649 | ||
536 | return iommu_queue_command(iommu, &cmd); | 650 | return iommu_queue_command(iommu, &cmd); |
537 | } | 651 | } |
538 | 652 | ||
539 | static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | ||
540 | u16 domid, int pde, int s) | ||
541 | { | ||
542 | memset(cmd, 0, sizeof(*cmd)); | ||
543 | address &= PAGE_MASK; | ||
544 | CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); | ||
545 | cmd->data[1] |= domid; | ||
546 | cmd->data[2] = lower_32_bits(address); | ||
547 | cmd->data[3] = upper_32_bits(address); | ||
548 | if (s) /* size bit - we flush more than one 4kb page */ | ||
549 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
550 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | ||
551 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | ||
552 | } | ||
553 | |||
554 | /* | 653 | /* |
555 | * Generic command send function for invalidaing TLB entries | 654 | * Command send function for invalidating a device table entry |
556 | */ | 655 | */ |
557 | static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | 656 | static int device_flush_dte(struct device *dev) |
558 | u64 address, u16 domid, int pde, int s) | ||
559 | { | 657 | { |
560 | struct iommu_cmd cmd; | 658 | struct amd_iommu *iommu; |
659 | struct pci_dev *pdev; | ||
660 | u16 devid; | ||
561 | int ret; | 661 | int ret; |
562 | 662 | ||
563 | __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s); | 663 | pdev = to_pci_dev(dev); |
664 | devid = get_device_id(dev); | ||
665 | iommu = amd_iommu_rlookup_table[devid]; | ||
564 | 666 | ||
565 | ret = iommu_queue_command(iommu, &cmd); | 667 | ret = iommu_flush_dte(iommu, devid); |
668 | if (ret) | ||
669 | return ret; | ||
670 | |||
671 | if (pci_ats_enabled(pdev)) | ||
672 | ret = device_flush_iotlb(dev, 0, ~0UL); | ||
566 | 673 | ||
567 | return ret; | 674 | return ret; |
568 | } | 675 | } |
@@ -572,23 +679,14 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
572 | * It invalidates a single PTE if the range to flush is within a single | 679 | * It invalidates a single PTE if the range to flush is within a single |
573 | * page. Otherwise it flushes the whole TLB of the IOMMU. | 680 | * page. Otherwise it flushes the whole TLB of the IOMMU. |
574 | */ | 681 | */ |
575 | static void __iommu_flush_pages(struct protection_domain *domain, | 682 | static void __domain_flush_pages(struct protection_domain *domain, |
576 | u64 address, size_t size, int pde) | 683 | u64 address, size_t size, int pde) |
577 | { | 684 | { |
578 | int s = 0, i; | 685 | struct iommu_dev_data *dev_data; |
579 | unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); | 686 | struct iommu_cmd cmd; |
580 | 687 | int ret = 0, i; | |
581 | address &= PAGE_MASK; | ||
582 | |||
583 | if (pages > 1) { | ||
584 | /* | ||
585 | * If we have to flush more than one page, flush all | ||
586 | * TLB entries for this domain | ||
587 | */ | ||
588 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
589 | s = 1; | ||
590 | } | ||
591 | 688 | ||
689 | build_inv_iommu_pages(&cmd, address, size, domain->id, pde); | ||
592 | 690 | ||
593 | for (i = 0; i < amd_iommus_present; ++i) { | 691 | for (i = 0; i < amd_iommus_present; ++i) { |
594 | if (!domain->dev_iommu[i]) | 692 | if (!domain->dev_iommu[i]) |
@@ -598,101 +696,70 @@ static void __iommu_flush_pages(struct protection_domain *domain, | |||
598 | * Devices of this domain are behind this IOMMU | 696 | * Devices of this domain are behind this IOMMU |
599 | * We need a TLB flush | 697 | * We need a TLB flush |
600 | */ | 698 | */ |
601 | iommu_queue_inv_iommu_pages(amd_iommus[i], address, | 699 | ret |= iommu_queue_command(amd_iommus[i], &cmd); |
602 | domain->id, pde, s); | 700 | } |
701 | |||
702 | list_for_each_entry(dev_data, &domain->dev_list, list) { | ||
703 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | ||
704 | |||
705 | if (!pci_ats_enabled(pdev)) | ||
706 | continue; | ||
707 | |||
708 | ret |= device_flush_iotlb(dev_data->dev, address, size); | ||
603 | } | 709 | } |
604 | 710 | ||
605 | return; | 711 | WARN_ON(ret); |
606 | } | 712 | } |
607 | 713 | ||
608 | static void iommu_flush_pages(struct protection_domain *domain, | 714 | static void domain_flush_pages(struct protection_domain *domain, |
609 | u64 address, size_t size) | 715 | u64 address, size_t size) |
610 | { | 716 | { |
611 | __iommu_flush_pages(domain, address, size, 0); | 717 | __domain_flush_pages(domain, address, size, 0); |
612 | } | 718 | } |
613 | 719 | ||
614 | /* Flush the whole IO/TLB for a given protection domain */ | 720 | /* Flush the whole IO/TLB for a given protection domain */ |
615 | static void iommu_flush_tlb(struct protection_domain *domain) | 721 | static void domain_flush_tlb(struct protection_domain *domain) |
616 | { | 722 | { |
617 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); | 723 | __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); |
618 | } | 724 | } |
619 | 725 | ||
620 | /* Flush the whole IO/TLB for a given protection domain - including PDE */ | 726 | /* Flush the whole IO/TLB for a given protection domain - including PDE */ |
621 | static void iommu_flush_tlb_pde(struct protection_domain *domain) | 727 | static void domain_flush_tlb_pde(struct protection_domain *domain) |
622 | { | 728 | { |
623 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); | 729 | __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); |
624 | } | ||
625 | |||
626 | |||
627 | /* | ||
628 | * This function flushes the DTEs for all devices in domain | ||
629 | */ | ||
630 | static void iommu_flush_domain_devices(struct protection_domain *domain) | ||
631 | { | ||
632 | struct iommu_dev_data *dev_data; | ||
633 | unsigned long flags; | ||
634 | |||
635 | spin_lock_irqsave(&domain->lock, flags); | ||
636 | |||
637 | list_for_each_entry(dev_data, &domain->dev_list, list) | ||
638 | iommu_flush_device(dev_data->dev); | ||
639 | |||
640 | spin_unlock_irqrestore(&domain->lock, flags); | ||
641 | } | 730 | } |
642 | 731 | ||
643 | static void iommu_flush_all_domain_devices(void) | 732 | static void domain_flush_complete(struct protection_domain *domain) |
644 | { | 733 | { |
645 | struct protection_domain *domain; | 734 | int i; |
646 | unsigned long flags; | ||
647 | 735 | ||
648 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | 736 | for (i = 0; i < amd_iommus_present; ++i) { |
737 | if (!domain->dev_iommu[i]) | ||
738 | continue; | ||
649 | 739 | ||
650 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { | 740 | /* |
651 | iommu_flush_domain_devices(domain); | 741 | * Devices of this domain are behind this IOMMU |
652 | iommu_flush_complete(domain); | 742 | * We need to wait for completion of all commands. |
743 | */ | ||
744 | iommu_completion_wait(amd_iommus[i]); | ||
653 | } | 745 | } |
654 | |||
655 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
656 | } | 746 | } |
657 | 747 | ||
658 | void amd_iommu_flush_all_devices(void) | ||
659 | { | ||
660 | iommu_flush_all_domain_devices(); | ||
661 | } | ||
662 | 748 | ||
663 | /* | 749 | /* |
664 | * This function uses heavy locking and may disable irqs for some time. But | 750 | * This function flushes the DTEs for all devices in domain |
665 | * this is no issue because it is only called during resume. | ||
666 | */ | 751 | */ |
667 | void amd_iommu_flush_all_domains(void) | 752 | static void domain_flush_devices(struct protection_domain *domain) |
668 | { | 753 | { |
669 | struct protection_domain *domain; | 754 | struct iommu_dev_data *dev_data; |
670 | unsigned long flags; | 755 | unsigned long flags; |
671 | 756 | ||
672 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | 757 | spin_lock_irqsave(&domain->lock, flags); |
673 | |||
674 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { | ||
675 | spin_lock(&domain->lock); | ||
676 | iommu_flush_tlb_pde(domain); | ||
677 | iommu_flush_complete(domain); | ||
678 | spin_unlock(&domain->lock); | ||
679 | } | ||
680 | |||
681 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
682 | } | ||
683 | |||
684 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | ||
685 | { | ||
686 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | ||
687 | |||
688 | if (iommu->reset_in_progress) | ||
689 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); | ||
690 | 758 | ||
691 | amd_iommu_reset_cmd_buffer(iommu); | 759 | list_for_each_entry(dev_data, &domain->dev_list, list) |
692 | amd_iommu_flush_all_devices(); | 760 | device_flush_dte(dev_data->dev); |
693 | amd_iommu_flush_all_domains(); | ||
694 | 761 | ||
695 | iommu->reset_in_progress = false; | 762 | spin_unlock_irqrestore(&domain->lock, flags); |
696 | } | 763 | } |
697 | 764 | ||
698 | /**************************************************************************** | 765 | /**************************************************************************** |
@@ -1410,17 +1477,22 @@ static bool dma_ops_domain(struct protection_domain *domain) | |||
1410 | return domain->flags & PD_DMA_OPS_MASK; | 1477 | return domain->flags & PD_DMA_OPS_MASK; |
1411 | } | 1478 | } |
1412 | 1479 | ||
1413 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | 1480 | static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) |
1414 | { | 1481 | { |
1415 | u64 pte_root = virt_to_phys(domain->pt_root); | 1482 | u64 pte_root = virt_to_phys(domain->pt_root); |
1483 | u32 flags = 0; | ||
1416 | 1484 | ||
1417 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | 1485 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) |
1418 | << DEV_ENTRY_MODE_SHIFT; | 1486 | << DEV_ENTRY_MODE_SHIFT; |
1419 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | 1487 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; |
1420 | 1488 | ||
1421 | amd_iommu_dev_table[devid].data[2] = domain->id; | 1489 | if (ats) |
1422 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | 1490 | flags |= DTE_FLAG_IOTLB; |
1423 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 1491 | |
1492 | amd_iommu_dev_table[devid].data[3] |= flags; | ||
1493 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1494 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1495 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
1424 | } | 1496 | } |
1425 | 1497 | ||
1426 | static void clear_dte_entry(u16 devid) | 1498 | static void clear_dte_entry(u16 devid) |
@@ -1437,34 +1509,42 @@ static void do_attach(struct device *dev, struct protection_domain *domain) | |||
1437 | { | 1509 | { |
1438 | struct iommu_dev_data *dev_data; | 1510 | struct iommu_dev_data *dev_data; |
1439 | struct amd_iommu *iommu; | 1511 | struct amd_iommu *iommu; |
1512 | struct pci_dev *pdev; | ||
1513 | bool ats = false; | ||
1440 | u16 devid; | 1514 | u16 devid; |
1441 | 1515 | ||
1442 | devid = get_device_id(dev); | 1516 | devid = get_device_id(dev); |
1443 | iommu = amd_iommu_rlookup_table[devid]; | 1517 | iommu = amd_iommu_rlookup_table[devid]; |
1444 | dev_data = get_dev_data(dev); | 1518 | dev_data = get_dev_data(dev); |
1519 | pdev = to_pci_dev(dev); | ||
1520 | |||
1521 | if (amd_iommu_iotlb_sup) | ||
1522 | ats = pci_ats_enabled(pdev); | ||
1445 | 1523 | ||
1446 | /* Update data structures */ | 1524 | /* Update data structures */ |
1447 | dev_data->domain = domain; | 1525 | dev_data->domain = domain; |
1448 | list_add(&dev_data->list, &domain->dev_list); | 1526 | list_add(&dev_data->list, &domain->dev_list); |
1449 | set_dte_entry(devid, domain); | 1527 | set_dte_entry(devid, domain, ats); |
1450 | 1528 | ||
1451 | /* Do reference counting */ | 1529 | /* Do reference counting */ |
1452 | domain->dev_iommu[iommu->index] += 1; | 1530 | domain->dev_iommu[iommu->index] += 1; |
1453 | domain->dev_cnt += 1; | 1531 | domain->dev_cnt += 1; |
1454 | 1532 | ||
1455 | /* Flush the DTE entry */ | 1533 | /* Flush the DTE entry */ |
1456 | iommu_flush_device(dev); | 1534 | device_flush_dte(dev); |
1457 | } | 1535 | } |
1458 | 1536 | ||
1459 | static void do_detach(struct device *dev) | 1537 | static void do_detach(struct device *dev) |
1460 | { | 1538 | { |
1461 | struct iommu_dev_data *dev_data; | 1539 | struct iommu_dev_data *dev_data; |
1462 | struct amd_iommu *iommu; | 1540 | struct amd_iommu *iommu; |
1541 | struct pci_dev *pdev; | ||
1463 | u16 devid; | 1542 | u16 devid; |
1464 | 1543 | ||
1465 | devid = get_device_id(dev); | 1544 | devid = get_device_id(dev); |
1466 | iommu = amd_iommu_rlookup_table[devid]; | 1545 | iommu = amd_iommu_rlookup_table[devid]; |
1467 | dev_data = get_dev_data(dev); | 1546 | dev_data = get_dev_data(dev); |
1547 | pdev = to_pci_dev(dev); | ||
1468 | 1548 | ||
1469 | /* decrease reference counters */ | 1549 | /* decrease reference counters */ |
1470 | dev_data->domain->dev_iommu[iommu->index] -= 1; | 1550 | dev_data->domain->dev_iommu[iommu->index] -= 1; |
@@ -1476,7 +1556,7 @@ static void do_detach(struct device *dev) | |||
1476 | clear_dte_entry(devid); | 1556 | clear_dte_entry(devid); |
1477 | 1557 | ||
1478 | /* Flush the DTE entry */ | 1558 | /* Flush the DTE entry */ |
1479 | iommu_flush_device(dev); | 1559 | device_flush_dte(dev); |
1480 | } | 1560 | } |
1481 | 1561 | ||
1482 | /* | 1562 | /* |
@@ -1539,9 +1619,13 @@ out_unlock: | |||
1539 | static int attach_device(struct device *dev, | 1619 | static int attach_device(struct device *dev, |
1540 | struct protection_domain *domain) | 1620 | struct protection_domain *domain) |
1541 | { | 1621 | { |
1622 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1542 | unsigned long flags; | 1623 | unsigned long flags; |
1543 | int ret; | 1624 | int ret; |
1544 | 1625 | ||
1626 | if (amd_iommu_iotlb_sup) | ||
1627 | pci_enable_ats(pdev, PAGE_SHIFT); | ||
1628 | |||
1545 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1629 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1546 | ret = __attach_device(dev, domain); | 1630 | ret = __attach_device(dev, domain); |
1547 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1631 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
@@ -1551,7 +1635,7 @@ static int attach_device(struct device *dev, | |||
1551 | * left the caches in the IOMMU dirty. So we have to flush | 1635 | * left the caches in the IOMMU dirty. So we have to flush |
1552 | * here to evict all dirty stuff. | 1636 | * here to evict all dirty stuff. |
1553 | */ | 1637 | */ |
1554 | iommu_flush_tlb_pde(domain); | 1638 | domain_flush_tlb_pde(domain); |
1555 | 1639 | ||
1556 | return ret; | 1640 | return ret; |
1557 | } | 1641 | } |
@@ -1598,12 +1682,16 @@ static void __detach_device(struct device *dev) | |||
1598 | */ | 1682 | */ |
1599 | static void detach_device(struct device *dev) | 1683 | static void detach_device(struct device *dev) |
1600 | { | 1684 | { |
1685 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1601 | unsigned long flags; | 1686 | unsigned long flags; |
1602 | 1687 | ||
1603 | /* lock device table */ | 1688 | /* lock device table */ |
1604 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1689 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1605 | __detach_device(dev); | 1690 | __detach_device(dev); |
1606 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1691 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1692 | |||
1693 | if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) | ||
1694 | pci_disable_ats(pdev); | ||
1607 | } | 1695 | } |
1608 | 1696 | ||
1609 | /* | 1697 | /* |
@@ -1692,7 +1780,7 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1692 | goto out; | 1780 | goto out; |
1693 | } | 1781 | } |
1694 | 1782 | ||
1695 | iommu_flush_device(dev); | 1783 | device_flush_dte(dev); |
1696 | iommu_completion_wait(iommu); | 1784 | iommu_completion_wait(iommu); |
1697 | 1785 | ||
1698 | out: | 1786 | out: |
@@ -1753,8 +1841,9 @@ static void update_device_table(struct protection_domain *domain) | |||
1753 | struct iommu_dev_data *dev_data; | 1841 | struct iommu_dev_data *dev_data; |
1754 | 1842 | ||
1755 | list_for_each_entry(dev_data, &domain->dev_list, list) { | 1843 | list_for_each_entry(dev_data, &domain->dev_list, list) { |
1844 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | ||
1756 | u16 devid = get_device_id(dev_data->dev); | 1845 | u16 devid = get_device_id(dev_data->dev); |
1757 | set_dte_entry(devid, domain); | 1846 | set_dte_entry(devid, domain, pci_ats_enabled(pdev)); |
1758 | } | 1847 | } |
1759 | } | 1848 | } |
1760 | 1849 | ||
@@ -1764,8 +1853,9 @@ static void update_domain(struct protection_domain *domain) | |||
1764 | return; | 1853 | return; |
1765 | 1854 | ||
1766 | update_device_table(domain); | 1855 | update_device_table(domain); |
1767 | iommu_flush_domain_devices(domain); | 1856 | |
1768 | iommu_flush_tlb_pde(domain); | 1857 | domain_flush_devices(domain); |
1858 | domain_flush_tlb_pde(domain); | ||
1769 | 1859 | ||
1770 | domain->updated = false; | 1860 | domain->updated = false; |
1771 | } | 1861 | } |
@@ -1924,10 +2014,10 @@ retry: | |||
1924 | ADD_STATS_COUNTER(alloced_io_mem, size); | 2014 | ADD_STATS_COUNTER(alloced_io_mem, size); |
1925 | 2015 | ||
1926 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { | 2016 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { |
1927 | iommu_flush_tlb(&dma_dom->domain); | 2017 | domain_flush_tlb(&dma_dom->domain); |
1928 | dma_dom->need_flush = false; | 2018 | dma_dom->need_flush = false; |
1929 | } else if (unlikely(amd_iommu_np_cache)) | 2019 | } else if (unlikely(amd_iommu_np_cache)) |
1930 | iommu_flush_pages(&dma_dom->domain, address, size); | 2020 | domain_flush_pages(&dma_dom->domain, address, size); |
1931 | 2021 | ||
1932 | out: | 2022 | out: |
1933 | return address; | 2023 | return address; |
@@ -1976,7 +2066,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1976 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 2066 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
1977 | 2067 | ||
1978 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { | 2068 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { |
1979 | iommu_flush_pages(&dma_dom->domain, flush_addr, size); | 2069 | domain_flush_pages(&dma_dom->domain, flush_addr, size); |
1980 | dma_dom->need_flush = false; | 2070 | dma_dom->need_flush = false; |
1981 | } | 2071 | } |
1982 | } | 2072 | } |
@@ -2012,7 +2102,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
2012 | if (addr == DMA_ERROR_CODE) | 2102 | if (addr == DMA_ERROR_CODE) |
2013 | goto out; | 2103 | goto out; |
2014 | 2104 | ||
2015 | iommu_flush_complete(domain); | 2105 | domain_flush_complete(domain); |
2016 | 2106 | ||
2017 | out: | 2107 | out: |
2018 | spin_unlock_irqrestore(&domain->lock, flags); | 2108 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -2039,7 +2129,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
2039 | 2129 | ||
2040 | __unmap_single(domain->priv, dma_addr, size, dir); | 2130 | __unmap_single(domain->priv, dma_addr, size, dir); |
2041 | 2131 | ||
2042 | iommu_flush_complete(domain); | 2132 | domain_flush_complete(domain); |
2043 | 2133 | ||
2044 | spin_unlock_irqrestore(&domain->lock, flags); | 2134 | spin_unlock_irqrestore(&domain->lock, flags); |
2045 | } | 2135 | } |
@@ -2104,7 +2194,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
2104 | goto unmap; | 2194 | goto unmap; |
2105 | } | 2195 | } |
2106 | 2196 | ||
2107 | iommu_flush_complete(domain); | 2197 | domain_flush_complete(domain); |
2108 | 2198 | ||
2109 | out: | 2199 | out: |
2110 | spin_unlock_irqrestore(&domain->lock, flags); | 2200 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -2150,7 +2240,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
2150 | s->dma_address = s->dma_length = 0; | 2240 | s->dma_address = s->dma_length = 0; |
2151 | } | 2241 | } |
2152 | 2242 | ||
2153 | iommu_flush_complete(domain); | 2243 | domain_flush_complete(domain); |
2154 | 2244 | ||
2155 | spin_unlock_irqrestore(&domain->lock, flags); | 2245 | spin_unlock_irqrestore(&domain->lock, flags); |
2156 | } | 2246 | } |
@@ -2200,7 +2290,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
2200 | goto out_free; | 2290 | goto out_free; |
2201 | } | 2291 | } |
2202 | 2292 | ||
2203 | iommu_flush_complete(domain); | 2293 | domain_flush_complete(domain); |
2204 | 2294 | ||
2205 | spin_unlock_irqrestore(&domain->lock, flags); | 2295 | spin_unlock_irqrestore(&domain->lock, flags); |
2206 | 2296 | ||
@@ -2232,7 +2322,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
2232 | 2322 | ||
2233 | __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 2323 | __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
2234 | 2324 | ||
2235 | iommu_flush_complete(domain); | 2325 | domain_flush_complete(domain); |
2236 | 2326 | ||
2237 | spin_unlock_irqrestore(&domain->lock, flags); | 2327 | spin_unlock_irqrestore(&domain->lock, flags); |
2238 | 2328 | ||
@@ -2476,7 +2566,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, | |||
2476 | if (!iommu) | 2566 | if (!iommu) |
2477 | return; | 2567 | return; |
2478 | 2568 | ||
2479 | iommu_flush_device(dev); | 2569 | device_flush_dte(dev); |
2480 | iommu_completion_wait(iommu); | 2570 | iommu_completion_wait(iommu); |
2481 | } | 2571 | } |
2482 | 2572 | ||
@@ -2542,7 +2632,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
2542 | unmap_size = iommu_unmap_page(domain, iova, page_size); | 2632 | unmap_size = iommu_unmap_page(domain, iova, page_size); |
2543 | mutex_unlock(&domain->api_lock); | 2633 | mutex_unlock(&domain->api_lock); |
2544 | 2634 | ||
2545 | iommu_flush_tlb_pde(domain); | 2635 | domain_flush_tlb_pde(domain); |
2546 | 2636 | ||
2547 | return get_order(unmap_size); | 2637 | return get_order(unmap_size); |
2548 | } | 2638 | } |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 246d727b65b7..9179c21120a8 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -137,6 +137,7 @@ int amd_iommus_present; | |||
137 | 137 | ||
138 | /* IOMMUs have a non-present cache? */ | 138 | /* IOMMUs have a non-present cache? */ |
139 | bool amd_iommu_np_cache __read_mostly; | 139 | bool amd_iommu_np_cache __read_mostly; |
140 | bool amd_iommu_iotlb_sup __read_mostly = true; | ||
140 | 141 | ||
141 | /* | 142 | /* |
142 | * The ACPI table parsing functions set this variable on an error | 143 | * The ACPI table parsing functions set this variable on an error |
@@ -180,6 +181,12 @@ static u32 dev_table_size; /* size of the device table */ | |||
180 | static u32 alias_table_size; /* size of the alias table */ | 181 | static u32 alias_table_size; /* size of the alias table */ |
181 | static u32 rlookup_table_size; /* size if the rlookup table */ | 182 | static u32 rlookup_table_size; /* size if the rlookup table */ |
182 | 183 | ||
184 | /* | ||
185 | * This function flushes all internal caches of | ||
186 | * the IOMMU used by this driver. | ||
187 | */ | ||
188 | extern void iommu_flush_all_caches(struct amd_iommu *iommu); | ||
189 | |||
183 | static inline void update_last_devid(u16 devid) | 190 | static inline void update_last_devid(u16 devid) |
184 | { | 191 | { |
185 | if (devid > amd_iommu_last_bdf) | 192 | if (devid > amd_iommu_last_bdf) |
@@ -293,9 +300,23 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
293 | /* Function to enable the hardware */ | 300 | /* Function to enable the hardware */ |
294 | static void iommu_enable(struct amd_iommu *iommu) | 301 | static void iommu_enable(struct amd_iommu *iommu) |
295 | { | 302 | { |
296 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", | 303 | static const char * const feat_str[] = { |
304 | "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", | ||
305 | "IA", "GA", "HE", "PC", NULL | ||
306 | }; | ||
307 | int i; | ||
308 | |||
309 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", | ||
297 | dev_name(&iommu->dev->dev), iommu->cap_ptr); | 310 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
298 | 311 | ||
312 | if (iommu->cap & (1 << IOMMU_CAP_EFR)) { | ||
313 | printk(KERN_CONT " extended features: "); | ||
314 | for (i = 0; feat_str[i]; ++i) | ||
315 | if (iommu_feature(iommu, (1ULL << i))) | ||
316 | printk(KERN_CONT " %s", feat_str[i]); | ||
317 | } | ||
318 | printk(KERN_CONT "\n"); | ||
319 | |||
299 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | 320 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
300 | } | 321 | } |
301 | 322 | ||
@@ -651,7 +672,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) | |||
651 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) | 672 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) |
652 | { | 673 | { |
653 | int cap_ptr = iommu->cap_ptr; | 674 | int cap_ptr = iommu->cap_ptr; |
654 | u32 range, misc; | 675 | u32 range, misc, low, high; |
655 | int i, j; | 676 | int i, j; |
656 | 677 | ||
657 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, | 678 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
@@ -667,6 +688,15 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
667 | MMIO_GET_LD(range)); | 688 | MMIO_GET_LD(range)); |
668 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); | 689 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); |
669 | 690 | ||
691 | if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) | ||
692 | amd_iommu_iotlb_sup = false; | ||
693 | |||
694 | /* read extended feature bits */ | ||
695 | low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); | ||
696 | high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); | ||
697 | |||
698 | iommu->features = ((u64)high << 32) | low; | ||
699 | |||
670 | if (!is_rd890_iommu(iommu->dev)) | 700 | if (!is_rd890_iommu(iommu->dev)) |
671 | return; | 701 | return; |
672 | 702 | ||
@@ -1004,10 +1034,11 @@ static int iommu_setup_msi(struct amd_iommu *iommu) | |||
1004 | if (pci_enable_msi(iommu->dev)) | 1034 | if (pci_enable_msi(iommu->dev)) |
1005 | return 1; | 1035 | return 1; |
1006 | 1036 | ||
1007 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, | 1037 | r = request_threaded_irq(iommu->dev->irq, |
1008 | IRQF_SAMPLE_RANDOM, | 1038 | amd_iommu_int_handler, |
1009 | "AMD-Vi", | 1039 | amd_iommu_int_thread, |
1010 | NULL); | 1040 | 0, "AMD-Vi", |
1041 | iommu->dev); | ||
1011 | 1042 | ||
1012 | if (r) { | 1043 | if (r) { |
1013 | pci_disable_msi(iommu->dev); | 1044 | pci_disable_msi(iommu->dev); |
@@ -1244,6 +1275,7 @@ static void enable_iommus(void) | |||
1244 | iommu_set_exclusion_range(iommu); | 1275 | iommu_set_exclusion_range(iommu); |
1245 | iommu_init_msi(iommu); | 1276 | iommu_init_msi(iommu); |
1246 | iommu_enable(iommu); | 1277 | iommu_enable(iommu); |
1278 | iommu_flush_all_caches(iommu); | ||
1247 | } | 1279 | } |
1248 | } | 1280 | } |
1249 | 1281 | ||
@@ -1274,8 +1306,8 @@ static void amd_iommu_resume(void) | |||
1274 | * we have to flush after the IOMMUs are enabled because a | 1306 | * we have to flush after the IOMMUs are enabled because a |
1275 | * disabled IOMMU will never execute the commands we send | 1307 | * disabled IOMMU will never execute the commands we send |
1276 | */ | 1308 | */ |
1277 | amd_iommu_flush_all_devices(); | 1309 | for_each_iommu(iommu) |
1278 | amd_iommu_flush_all_domains(); | 1310 | iommu_flush_all_caches(iommu); |
1279 | } | 1311 | } |
1280 | 1312 | ||
1281 | static int amd_iommu_suspend(void) | 1313 | static int amd_iommu_suspend(void) |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index cd1ffed4ee22..289e92862fd9 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -177,7 +177,6 @@ static struct clocksource clocksource_apbt = { | |||
177 | .rating = APBT_CLOCKSOURCE_RATING, | 177 | .rating = APBT_CLOCKSOURCE_RATING, |
178 | .read = apbt_read_clocksource, | 178 | .read = apbt_read_clocksource, |
179 | .mask = APBT_MASK, | 179 | .mask = APBT_MASK, |
180 | .shift = APBT_SHIFT, | ||
181 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 180 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
182 | .resume = apbt_restart_clocksource, | 181 | .resume = apbt_restart_clocksource, |
183 | }; | 182 | }; |
@@ -543,14 +542,7 @@ static int apbt_clocksource_register(void) | |||
543 | if (t1 == apbt_read_clocksource(&clocksource_apbt)) | 542 | if (t1 == apbt_read_clocksource(&clocksource_apbt)) |
544 | panic("APBT counter not counting. APBT disabled\n"); | 543 | panic("APBT counter not counting. APBT disabled\n"); |
545 | 544 | ||
546 | /* | 545 | clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000); |
547 | * initialize and register APBT clocksource | ||
548 | * convert that to ns/clock cycle | ||
549 | * mult = (ns/c) * 2^APBT_SHIFT | ||
550 | */ | ||
551 | clocksource_apbt.mult = div_sc(MSEC_PER_SEC, | ||
552 | (unsigned long) apbt_freq, APBT_SHIFT); | ||
553 | clocksource_register(&clocksource_apbt); | ||
554 | 546 | ||
555 | return 0; | 547 | return 0; |
556 | } | 548 | } |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 73fb469908c6..3d2661ca6542 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -30,6 +30,22 @@ | |||
30 | #include <asm/amd_nb.h> | 30 | #include <asm/amd_nb.h> |
31 | #include <asm/x86_init.h> | 31 | #include <asm/x86_init.h> |
32 | 32 | ||
33 | /* | ||
34 | * Using 512M as goal, in case kexec will load kernel_big | ||
35 | * that will do the on-position decompress, and could overlap with | ||
36 | * with the gart aperture that is used. | ||
37 | * Sequence: | ||
38 | * kernel_small | ||
39 | * ==> kexec (with kdump trigger path or gart still enabled) | ||
40 | * ==> kernel_small (gart area become e820_reserved) | ||
41 | * ==> kexec (with kdump trigger path or gart still enabled) | ||
42 | * ==> kerne_big (uncompressed size will be big than 64M or 128M) | ||
43 | * So don't use 512M below as gart iommu, leave the space for kernel | ||
44 | * code for safe. | ||
45 | */ | ||
46 | #define GART_MIN_ADDR (512ULL << 20) | ||
47 | #define GART_MAX_ADDR (1ULL << 32) | ||
48 | |||
33 | int gart_iommu_aperture; | 49 | int gart_iommu_aperture; |
34 | int gart_iommu_aperture_disabled __initdata; | 50 | int gart_iommu_aperture_disabled __initdata; |
35 | int gart_iommu_aperture_allowed __initdata; | 51 | int gart_iommu_aperture_allowed __initdata; |
@@ -70,21 +86,9 @@ static u32 __init allocate_aperture(void) | |||
70 | * memory. Unfortunately we cannot move it up because that would | 86 | * memory. Unfortunately we cannot move it up because that would |
71 | * make the IOMMU useless. | 87 | * make the IOMMU useless. |
72 | */ | 88 | */ |
73 | /* | 89 | addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, |
74 | * using 512M as goal, in case kexec will load kernel_big | 90 | aper_size, aper_size); |
75 | * that will do the on position decompress, and could overlap with | 91 | if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) { |
76 | * that position with gart that is used. | ||
77 | * sequende: | ||
78 | * kernel_small | ||
79 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) | ||
80 | * ==> kernel_small(gart area become e820_reserved) | ||
81 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) | ||
82 | * ==> kerne_big (uncompressed size will be big than 64M or 128M) | ||
83 | * so don't use 512M below as gart iommu, leave the space for kernel | ||
84 | * code for safe | ||
85 | */ | ||
86 | addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20); | ||
87 | if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) { | ||
88 | printk(KERN_ERR | 92 | printk(KERN_ERR |
89 | "Cannot allocate aperture memory hole (%lx,%uK)\n", | 93 | "Cannot allocate aperture memory hole (%lx,%uK)\n", |
90 | addr, aper_size>>10); | 94 | addr, aper_size>>10); |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index fabf01eff771..f92a8e5d1e21 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -505,7 +505,7 @@ static void __cpuinit setup_APIC_timer(void) | |||
505 | { | 505 | { |
506 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 506 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
507 | 507 | ||
508 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) { | 508 | if (this_cpu_has(X86_FEATURE_ARAT)) { |
509 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | 509 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; |
510 | /* Make LAPIC timer preferrable over percpu HPET */ | 510 | /* Make LAPIC timer preferrable over percpu HPET */ |
511 | lapic_clockevent.rating = 150; | 511 | lapic_clockevent.rating = 150; |
@@ -1237,6 +1237,17 @@ void __cpuinit setup_local_APIC(void) | |||
1237 | /* always use the value from LDR */ | 1237 | /* always use the value from LDR */ |
1238 | early_per_cpu(x86_cpu_to_logical_apicid, cpu) = | 1238 | early_per_cpu(x86_cpu_to_logical_apicid, cpu) = |
1239 | logical_smp_processor_id(); | 1239 | logical_smp_processor_id(); |
1240 | |||
1241 | /* | ||
1242 | * Some NUMA implementations (NUMAQ) don't initialize apicid to | ||
1243 | * node mapping during NUMA init. Now that logical apicid is | ||
1244 | * guaranteed to be known, give it another chance. This is already | ||
1245 | * a bit too late - percpu allocation has already happened without | ||
1246 | * proper NUMA affinity. | ||
1247 | */ | ||
1248 | if (apic->x86_32_numa_cpu_node) | ||
1249 | set_apicid_to_node(early_per_cpu(x86_cpu_to_apicid, cpu), | ||
1250 | apic->x86_32_numa_cpu_node(cpu)); | ||
1240 | #endif | 1251 | #endif |
1241 | 1252 | ||
1242 | /* | 1253 | /* |
@@ -1812,30 +1823,41 @@ void smp_spurious_interrupt(struct pt_regs *regs) | |||
1812 | */ | 1823 | */ |
1813 | void smp_error_interrupt(struct pt_regs *regs) | 1824 | void smp_error_interrupt(struct pt_regs *regs) |
1814 | { | 1825 | { |
1815 | u32 v, v1; | 1826 | u32 v0, v1; |
1827 | u32 i = 0; | ||
1828 | static const char * const error_interrupt_reason[] = { | ||
1829 | "Send CS error", /* APIC Error Bit 0 */ | ||
1830 | "Receive CS error", /* APIC Error Bit 1 */ | ||
1831 | "Send accept error", /* APIC Error Bit 2 */ | ||
1832 | "Receive accept error", /* APIC Error Bit 3 */ | ||
1833 | "Redirectable IPI", /* APIC Error Bit 4 */ | ||
1834 | "Send illegal vector", /* APIC Error Bit 5 */ | ||
1835 | "Received illegal vector", /* APIC Error Bit 6 */ | ||
1836 | "Illegal register address", /* APIC Error Bit 7 */ | ||
1837 | }; | ||
1816 | 1838 | ||
1817 | exit_idle(); | 1839 | exit_idle(); |
1818 | irq_enter(); | 1840 | irq_enter(); |
1819 | /* First tickle the hardware, only then report what went on. -- REW */ | 1841 | /* First tickle the hardware, only then report what went on. -- REW */ |
1820 | v = apic_read(APIC_ESR); | 1842 | v0 = apic_read(APIC_ESR); |
1821 | apic_write(APIC_ESR, 0); | 1843 | apic_write(APIC_ESR, 0); |
1822 | v1 = apic_read(APIC_ESR); | 1844 | v1 = apic_read(APIC_ESR); |
1823 | ack_APIC_irq(); | 1845 | ack_APIC_irq(); |
1824 | atomic_inc(&irq_err_count); | 1846 | atomic_inc(&irq_err_count); |
1825 | 1847 | ||
1826 | /* | 1848 | apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", |
1827 | * Here is what the APIC error bits mean: | 1849 | smp_processor_id(), v0 , v1); |
1828 | * 0: Send CS error | 1850 | |
1829 | * 1: Receive CS error | 1851 | v1 = v1 & 0xff; |
1830 | * 2: Send accept error | 1852 | while (v1) { |
1831 | * 3: Receive accept error | 1853 | if (v1 & 0x1) |
1832 | * 4: Reserved | 1854 | apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); |
1833 | * 5: Send illegal vector | 1855 | i++; |
1834 | * 6: Received illegal vector | 1856 | v1 >>= 1; |
1835 | * 7: Illegal register address | 1857 | }; |
1836 | */ | 1858 | |
1837 | pr_debug("APIC error on CPU%d: %02x(%02x)\n", | 1859 | apic_printk(APIC_DEBUG, KERN_CONT "\n"); |
1838 | smp_processor_id(), v , v1); | 1860 | |
1839 | irq_exit(); | 1861 | irq_exit(); |
1840 | } | 1862 | } |
1841 | 1863 | ||
@@ -2003,21 +2025,6 @@ void default_init_apic_ldr(void) | |||
2003 | apic_write(APIC_LDR, val); | 2025 | apic_write(APIC_LDR, val); |
2004 | } | 2026 | } |
2005 | 2027 | ||
2006 | #ifdef CONFIG_X86_32 | ||
2007 | int default_x86_32_numa_cpu_node(int cpu) | ||
2008 | { | ||
2009 | #ifdef CONFIG_NUMA | ||
2010 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | ||
2011 | |||
2012 | if (apicid != BAD_APICID) | ||
2013 | return __apicid_to_node[apicid]; | ||
2014 | return NUMA_NO_NODE; | ||
2015 | #else | ||
2016 | return 0; | ||
2017 | #endif | ||
2018 | } | ||
2019 | #endif | ||
2020 | |||
2021 | /* | 2028 | /* |
2022 | * Power management | 2029 | * Power management |
2023 | */ | 2030 | */ |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index f1baa2dc087a..775b82bc655c 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c | |||
@@ -119,14 +119,6 @@ static void noop_apic_write(u32 reg, u32 v) | |||
119 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); | 119 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); |
120 | } | 120 | } |
121 | 121 | ||
122 | #ifdef CONFIG_X86_32 | ||
123 | static int noop_x86_32_numa_cpu_node(int cpu) | ||
124 | { | ||
125 | /* we're always on node 0 */ | ||
126 | return 0; | ||
127 | } | ||
128 | #endif | ||
129 | |||
130 | struct apic apic_noop = { | 122 | struct apic apic_noop = { |
131 | .name = "noop", | 123 | .name = "noop", |
132 | .probe = noop_probe, | 124 | .probe = noop_probe, |
@@ -195,6 +187,5 @@ struct apic apic_noop = { | |||
195 | 187 | ||
196 | #ifdef CONFIG_X86_32 | 188 | #ifdef CONFIG_X86_32 |
197 | .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, | 189 | .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, |
198 | .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node, | ||
199 | #endif | 190 | #endif |
200 | }; | 191 | }; |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 541a2e431659..d84ac5a584b5 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -253,5 +253,4 @@ struct apic apic_bigsmp = { | |||
253 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 253 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
254 | 254 | ||
255 | .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, | 255 | .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, |
256 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
257 | }; | 256 | }; |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 3e9de4854c5b..70533de5bd29 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -510,11 +510,6 @@ static void es7000_setup_apic_routing(void) | |||
510 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); | 510 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); |
511 | } | 511 | } |
512 | 512 | ||
513 | static int es7000_numa_cpu_node(int cpu) | ||
514 | { | ||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | static int es7000_cpu_present_to_apicid(int mps_cpu) | 513 | static int es7000_cpu_present_to_apicid(int mps_cpu) |
519 | { | 514 | { |
520 | if (!mps_cpu) | 515 | if (!mps_cpu) |
@@ -688,7 +683,6 @@ struct apic __refdata apic_es7000_cluster = { | |||
688 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 683 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
689 | 684 | ||
690 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, | 685 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, |
691 | .x86_32_numa_cpu_node = es7000_numa_cpu_node, | ||
692 | }; | 686 | }; |
693 | 687 | ||
694 | struct apic __refdata apic_es7000 = { | 688 | struct apic __refdata apic_es7000 = { |
@@ -752,5 +746,4 @@ struct apic __refdata apic_es7000 = { | |||
752 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 746 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
753 | 747 | ||
754 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, | 748 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, |
755 | .x86_32_numa_cpu_node = es7000_numa_cpu_node, | ||
756 | }; | 749 | }; |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 6273eee5134b..30f13319e24b 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -48,8 +48,6 @@ | |||
48 | #include <asm/e820.h> | 48 | #include <asm/e820.h> |
49 | #include <asm/ipi.h> | 49 | #include <asm/ipi.h> |
50 | 50 | ||
51 | #define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) | ||
52 | |||
53 | int found_numaq; | 51 | int found_numaq; |
54 | 52 | ||
55 | /* | 53 | /* |
@@ -79,31 +77,20 @@ int quad_local_to_mp_bus_id[NR_CPUS/4][4]; | |||
79 | static inline void numaq_register_node(int node, struct sys_cfg_data *scd) | 77 | static inline void numaq_register_node(int node, struct sys_cfg_data *scd) |
80 | { | 78 | { |
81 | struct eachquadmem *eq = scd->eq + node; | 79 | struct eachquadmem *eq = scd->eq + node; |
80 | u64 start = (u64)(eq->hi_shrd_mem_start - eq->priv_mem_size) << 20; | ||
81 | u64 end = (u64)(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size) << 20; | ||
82 | int ret; | ||
82 | 83 | ||
83 | node_set_online(node); | 84 | node_set(node, numa_nodes_parsed); |
84 | 85 | ret = numa_add_memblk(node, start, end); | |
85 | /* Convert to pages */ | 86 | BUG_ON(ret < 0); |
86 | node_start_pfn[node] = | ||
87 | MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size); | ||
88 | |||
89 | node_end_pfn[node] = | ||
90 | MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); | ||
91 | |||
92 | memblock_x86_register_active_regions(node, node_start_pfn[node], | ||
93 | node_end_pfn[node]); | ||
94 | |||
95 | memory_present(node, node_start_pfn[node], node_end_pfn[node]); | ||
96 | |||
97 | node_remap_size[node] = node_memmap_size_bytes(node, | ||
98 | node_start_pfn[node], | ||
99 | node_end_pfn[node]); | ||
100 | } | 87 | } |
101 | 88 | ||
102 | /* | 89 | /* |
103 | * Function: smp_dump_qct() | 90 | * Function: smp_dump_qct() |
104 | * | 91 | * |
105 | * Description: gets memory layout from the quad config table. This | 92 | * Description: gets memory layout from the quad config table. This |
106 | * function also updates node_online_map with the nodes (quads) present. | 93 | * function also updates numa_nodes_parsed with the nodes (quads) present. |
107 | */ | 94 | */ |
108 | static void __init smp_dump_qct(void) | 95 | static void __init smp_dump_qct(void) |
109 | { | 96 | { |
@@ -112,7 +99,6 @@ static void __init smp_dump_qct(void) | |||
112 | 99 | ||
113 | scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR); | 100 | scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR); |
114 | 101 | ||
115 | nodes_clear(node_online_map); | ||
116 | for_each_node(node) { | 102 | for_each_node(node) { |
117 | if (scd->quads_present31_0 & (1 << node)) | 103 | if (scd->quads_present31_0 & (1 << node)) |
118 | numaq_register_node(node, scd); | 104 | numaq_register_node(node, scd); |
@@ -282,14 +268,14 @@ static __init void early_check_numaq(void) | |||
282 | } | 268 | } |
283 | } | 269 | } |
284 | 270 | ||
285 | int __init get_memcfg_numaq(void) | 271 | int __init numaq_numa_init(void) |
286 | { | 272 | { |
287 | early_check_numaq(); | 273 | early_check_numaq(); |
288 | if (!found_numaq) | 274 | if (!found_numaq) |
289 | return 0; | 275 | return -ENOENT; |
290 | smp_dump_qct(); | 276 | smp_dump_qct(); |
291 | 277 | ||
292 | return 1; | 278 | return 0; |
293 | } | 279 | } |
294 | 280 | ||
295 | #define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 281 | #define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index fc84c7b61108..6541e471fd91 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -172,7 +172,6 @@ struct apic apic_default = { | |||
172 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 172 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
173 | 173 | ||
174 | .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, | 174 | .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, |
175 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
176 | }; | 175 | }; |
177 | 176 | ||
178 | extern struct apic apic_numaq; | 177 | extern struct apic apic_numaq; |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index e4b8059b414a..35bcd7d995a1 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -551,5 +551,4 @@ struct apic apic_summit = { | |||
551 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 551 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
552 | 552 | ||
553 | .x86_32_early_logical_apicid = summit_early_logical_apicid, | 553 | .x86_32_early_logical_apicid = summit_early_logical_apicid, |
554 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
555 | }; | 554 | }; |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 33b10a0fc095..7acd2d2ac965 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -37,6 +37,13 @@ | |||
37 | #include <asm/smp.h> | 37 | #include <asm/smp.h> |
38 | #include <asm/x86_init.h> | 38 | #include <asm/x86_init.h> |
39 | #include <asm/emergency-restart.h> | 39 | #include <asm/emergency-restart.h> |
40 | #include <asm/nmi.h> | ||
41 | |||
42 | /* BMC sets a bit this MMR non-zero before sending an NMI */ | ||
43 | #define UVH_NMI_MMR UVH_SCRATCH5 | ||
44 | #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) | ||
45 | #define UV_NMI_PENDING_MASK (1UL << 63) | ||
46 | DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); | ||
40 | 47 | ||
41 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 48 | DEFINE_PER_CPU(int, x2apic_extra_bits); |
42 | 49 | ||
@@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void) | |||
642 | */ | 649 | */ |
643 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | 650 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) |
644 | { | 651 | { |
652 | unsigned long real_uv_nmi; | ||
653 | int bid; | ||
654 | |||
645 | if (reason != DIE_NMIUNKNOWN) | 655 | if (reason != DIE_NMIUNKNOWN) |
646 | return NOTIFY_OK; | 656 | return NOTIFY_OK; |
647 | 657 | ||
648 | if (in_crash_kexec) | 658 | if (in_crash_kexec) |
649 | /* do nothing if entering the crash kernel */ | 659 | /* do nothing if entering the crash kernel */ |
650 | return NOTIFY_OK; | 660 | return NOTIFY_OK; |
661 | |||
651 | /* | 662 | /* |
652 | * Use a lock so only one cpu prints at a time | 663 | * Each blade has an MMR that indicates when an NMI has been sent |
653 | * to prevent intermixed output. | 664 | * to cpus on the blade. If an NMI is detected, atomically |
665 | * clear the MMR and update a per-blade NMI count used to | ||
666 | * cause each cpu on the blade to notice a new NMI. | ||
667 | */ | ||
668 | bid = uv_numa_blade_id(); | ||
669 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); | ||
670 | |||
671 | if (unlikely(real_uv_nmi)) { | ||
672 | spin_lock(&uv_blade_info[bid].nmi_lock); | ||
673 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); | ||
674 | if (real_uv_nmi) { | ||
675 | uv_blade_info[bid].nmi_count++; | ||
676 | uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK); | ||
677 | } | ||
678 | spin_unlock(&uv_blade_info[bid].nmi_lock); | ||
679 | } | ||
680 | |||
681 | if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) | ||
682 | return NOTIFY_DONE; | ||
683 | |||
684 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; | ||
685 | |||
686 | /* | ||
687 | * Use a lock so only one cpu prints at a time. | ||
688 | * This prevents intermixed output. | ||
654 | */ | 689 | */ |
655 | spin_lock(&uv_nmi_lock); | 690 | spin_lock(&uv_nmi_lock); |
656 | pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); | 691 | pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); |
657 | dump_stack(); | 692 | dump_stack(); |
658 | spin_unlock(&uv_nmi_lock); | 693 | spin_unlock(&uv_nmi_lock); |
659 | 694 | ||
@@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | |||
661 | } | 696 | } |
662 | 697 | ||
663 | static struct notifier_block uv_dump_stack_nmi_nb = { | 698 | static struct notifier_block uv_dump_stack_nmi_nb = { |
664 | .notifier_call = uv_handle_nmi | 699 | .notifier_call = uv_handle_nmi, |
700 | .priority = NMI_LOCAL_LOW_PRIOR - 1, | ||
665 | }; | 701 | }; |
666 | 702 | ||
667 | void uv_register_nmi_notifier(void) | 703 | void uv_register_nmi_notifier(void) |
@@ -720,8 +756,9 @@ void __init uv_system_init(void) | |||
720 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | 756 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); |
721 | 757 | ||
722 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 758 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
723 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); | 759 | uv_blade_info = kzalloc(bytes, GFP_KERNEL); |
724 | BUG_ON(!uv_blade_info); | 760 | BUG_ON(!uv_blade_info); |
761 | |||
725 | for (blade = 0; blade < uv_num_possible_blades(); blade++) | 762 | for (blade = 0; blade < uv_num_possible_blades(); blade++) |
726 | uv_blade_info[blade].memory_nid = -1; | 763 | uv_blade_info[blade].memory_nid = -1; |
727 | 764 | ||
@@ -747,6 +784,7 @@ void __init uv_system_init(void) | |||
747 | uv_blade_info[blade].pnode = pnode; | 784 | uv_blade_info[blade].pnode = pnode; |
748 | uv_blade_info[blade].nr_possible_cpus = 0; | 785 | uv_blade_info[blade].nr_possible_cpus = 0; |
749 | uv_blade_info[blade].nr_online_cpus = 0; | 786 | uv_blade_info[blade].nr_online_cpus = 0; |
787 | spin_lock_init(&uv_blade_info[blade].nmi_lock); | ||
750 | max_pnode = max(pnode, max_pnode); | 788 | max_pnode = max(pnode, max_pnode); |
751 | blade++; | 789 | blade++; |
752 | } | 790 | } |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index adee12e0da1f..3bfa02235965 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1238,7 +1238,6 @@ static int suspend(int vetoable) | |||
1238 | dpm_suspend_noirq(PMSG_SUSPEND); | 1238 | dpm_suspend_noirq(PMSG_SUSPEND); |
1239 | 1239 | ||
1240 | local_irq_disable(); | 1240 | local_irq_disable(); |
1241 | sysdev_suspend(PMSG_SUSPEND); | ||
1242 | syscore_suspend(); | 1241 | syscore_suspend(); |
1243 | 1242 | ||
1244 | local_irq_enable(); | 1243 | local_irq_enable(); |
@@ -1258,7 +1257,6 @@ static int suspend(int vetoable) | |||
1258 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1257 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1259 | 1258 | ||
1260 | syscore_resume(); | 1259 | syscore_resume(); |
1261 | sysdev_resume(); | ||
1262 | local_irq_enable(); | 1260 | local_irq_enable(); |
1263 | 1261 | ||
1264 | dpm_resume_noirq(PMSG_RESUME); | 1262 | dpm_resume_noirq(PMSG_RESUME); |
@@ -1282,7 +1280,6 @@ static void standby(void) | |||
1282 | dpm_suspend_noirq(PMSG_SUSPEND); | 1280 | dpm_suspend_noirq(PMSG_SUSPEND); |
1283 | 1281 | ||
1284 | local_irq_disable(); | 1282 | local_irq_disable(); |
1285 | sysdev_suspend(PMSG_SUSPEND); | ||
1286 | syscore_suspend(); | 1283 | syscore_suspend(); |
1287 | local_irq_enable(); | 1284 | local_irq_enable(); |
1288 | 1285 | ||
@@ -1292,7 +1289,6 @@ static void standby(void) | |||
1292 | 1289 | ||
1293 | local_irq_disable(); | 1290 | local_irq_disable(); |
1294 | syscore_resume(); | 1291 | syscore_resume(); |
1295 | sysdev_resume(); | ||
1296 | local_irq_enable(); | 1292 | local_irq_enable(); |
1297 | 1293 | ||
1298 | dpm_resume_noirq(PMSG_RESUME); | 1294 | dpm_resume_noirq(PMSG_RESUME); |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 3f0ebe429a01..6042981d0309 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -30,7 +30,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o | |||
30 | 30 | ||
31 | obj-$(CONFIG_X86_MCE) += mcheck/ | 31 | obj-$(CONFIG_X86_MCE) += mcheck/ |
32 | obj-$(CONFIG_MTRR) += mtrr/ | 32 | obj-$(CONFIG_MTRR) += mtrr/ |
33 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | ||
34 | 33 | ||
35 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 34 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
36 | 35 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bb9eb29a52dd..6f9d1f6063e9 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
613 | #endif | 613 | #endif |
614 | 614 | ||
615 | /* As a rule processors have APIC timer running in deep C states */ | 615 | /* As a rule processors have APIC timer running in deep C states */ |
616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | 616 | if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400)) |
617 | set_cpu_cap(c, X86_FEATURE_ARAT); | 617 | set_cpu_cap(c, X86_FEATURE_ARAT); |
618 | 618 | ||
619 | /* | 619 | /* |
@@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev); | |||
698 | */ | 698 | */ |
699 | 699 | ||
700 | const int amd_erratum_400[] = | 700 | const int amd_erratum_400[] = |
701 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), | 701 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), |
702 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); | 702 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); |
703 | EXPORT_SYMBOL_GPL(amd_erratum_400); | 703 | EXPORT_SYMBOL_GPL(amd_erratum_400); |
704 | 704 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 173f3a3fa1a6..cbc70a27430c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -254,6 +254,25 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |||
254 | } | 254 | } |
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | static int disable_smep __initdata; | ||
258 | static __init int setup_disable_smep(char *arg) | ||
259 | { | ||
260 | disable_smep = 1; | ||
261 | return 1; | ||
262 | } | ||
263 | __setup("nosmep", setup_disable_smep); | ||
264 | |||
265 | static __init void setup_smep(struct cpuinfo_x86 *c) | ||
266 | { | ||
267 | if (cpu_has(c, X86_FEATURE_SMEP)) { | ||
268 | if (unlikely(disable_smep)) { | ||
269 | setup_clear_cpu_cap(X86_FEATURE_SMEP); | ||
270 | clear_in_cr4(X86_CR4_SMEP); | ||
271 | } else | ||
272 | set_in_cr4(X86_CR4_SMEP); | ||
273 | } | ||
274 | } | ||
275 | |||
257 | /* | 276 | /* |
258 | * Some CPU features depend on higher CPUID levels, which may not always | 277 | * Some CPU features depend on higher CPUID levels, which may not always |
259 | * be available due to CPUID level capping or broken virtualization | 278 | * be available due to CPUID level capping or broken virtualization |
@@ -667,6 +686,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
667 | c->cpu_index = 0; | 686 | c->cpu_index = 0; |
668 | #endif | 687 | #endif |
669 | filter_cpuid_features(c, false); | 688 | filter_cpuid_features(c, false); |
689 | |||
690 | setup_smep(c); | ||
670 | } | 691 | } |
671 | 692 | ||
672 | void __init early_cpu_init(void) | 693 | void __init early_cpu_init(void) |
@@ -752,6 +773,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
752 | #endif | 773 | #endif |
753 | } | 774 | } |
754 | 775 | ||
776 | setup_smep(c); | ||
777 | |||
755 | get_model_name(c); /* Default name */ | 778 | get_model_name(c); /* Default name */ |
756 | 779 | ||
757 | detect_nopl(c); | 780 | detect_nopl(c); |
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile deleted file mode 100644 index bd54bf67e6fb..000000000000 --- a/arch/x86/kernel/cpu/cpufreq/Makefile +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | # Link order matters. K8 is preferred to ACPI because of firmware bugs in early | ||
2 | # K8 systems. ACPI is preferred to all other hardware-specific drivers. | ||
3 | # speedstep-* is preferred over p4-clockmod. | ||
4 | |||
5 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o | ||
6 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o | ||
7 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o | ||
8 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | ||
9 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | ||
10 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o | ||
11 | obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o | ||
12 | obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o | ||
13 | obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o | ||
14 | obj-$(CONFIG_X86_LONGRUN) += longrun.o | ||
15 | obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o | ||
16 | obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o | ||
17 | obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o | ||
18 | obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o | ||
19 | obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o | ||
20 | obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o | ||
21 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fc73a34ba8c9..1edf5ba4fb2b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -411,12 +411,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
411 | 411 | ||
412 | switch (c->x86_model) { | 412 | switch (c->x86_model) { |
413 | case 5: | 413 | case 5: |
414 | if (c->x86_mask == 0) { | 414 | if (l2 == 0) |
415 | if (l2 == 0) | 415 | p = "Celeron (Covington)"; |
416 | p = "Celeron (Covington)"; | 416 | else if (l2 == 256) |
417 | else if (l2 == 256) | 417 | p = "Mobile Pentium II (Dixon)"; |
418 | p = "Mobile Pentium II (Dixon)"; | ||
419 | } | ||
420 | break; | 418 | break; |
421 | 419 | ||
422 | case 6: | 420 | case 6: |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1ce1af2899df..c105c533ed94 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -327,7 +327,6 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | |||
327 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); | 327 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); |
328 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); | 328 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); |
329 | 329 | ||
330 | l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; | ||
331 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 330 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
332 | } | 331 | } |
333 | 332 | ||
@@ -454,27 +453,16 @@ int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot, | |||
454 | { | 453 | { |
455 | int ret = 0; | 454 | int ret = 0; |
456 | 455 | ||
457 | #define SUBCACHE_MASK (3UL << 20) | 456 | /* check if @slot is already used or the index is already disabled */ |
458 | #define SUBCACHE_INDEX 0xfff | ||
459 | |||
460 | /* | ||
461 | * check whether this slot is already used or | ||
462 | * the index is already disabled | ||
463 | */ | ||
464 | ret = amd_get_l3_disable_slot(l3, slot); | 457 | ret = amd_get_l3_disable_slot(l3, slot); |
465 | if (ret >= 0) | 458 | if (ret >= 0) |
466 | return -EINVAL; | 459 | return -EINVAL; |
467 | 460 | ||
468 | /* | 461 | if (index > l3->indices) |
469 | * check whether the other slot has disabled the | ||
470 | * same index already | ||
471 | */ | ||
472 | if (index == amd_get_l3_disable_slot(l3, !slot)) | ||
473 | return -EINVAL; | 462 | return -EINVAL; |
474 | 463 | ||
475 | /* do not allow writes outside of allowed bits */ | 464 | /* check whether the other slot has disabled the same index already */ |
476 | if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | 465 | if (index == amd_get_l3_disable_slot(l3, !slot)) |
477 | ((index & SUBCACHE_INDEX) > l3->indices)) | ||
478 | return -EINVAL; | 466 | return -EINVAL; |
479 | 467 | ||
480 | amd_l3_disable_index(l3, cpu, slot, index); | 468 | amd_l3_disable_index(l3, cpu, slot, index); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3385ea26f684..ff1ae9b6464d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -105,20 +105,6 @@ static int cpu_missing; | |||
105 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); | 105 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); |
106 | EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); | 106 | EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); |
107 | 107 | ||
108 | static int default_decode_mce(struct notifier_block *nb, unsigned long val, | ||
109 | void *data) | ||
110 | { | ||
111 | pr_emerg(HW_ERR "No human readable MCE decoding support on this CPU type.\n"); | ||
112 | pr_emerg(HW_ERR "Run the message through 'mcelog --ascii' to decode.\n"); | ||
113 | |||
114 | return NOTIFY_STOP; | ||
115 | } | ||
116 | |||
117 | static struct notifier_block mce_dec_nb = { | ||
118 | .notifier_call = default_decode_mce, | ||
119 | .priority = -1, | ||
120 | }; | ||
121 | |||
122 | /* MCA banks polled by the period polling timer for corrected events */ | 108 | /* MCA banks polled by the period polling timer for corrected events */ |
123 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | 109 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
124 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | 110 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
@@ -212,6 +198,8 @@ void mce_log(struct mce *mce) | |||
212 | 198 | ||
213 | static void print_mce(struct mce *m) | 199 | static void print_mce(struct mce *m) |
214 | { | 200 | { |
201 | int ret = 0; | ||
202 | |||
215 | pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", | 203 | pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", |
216 | m->extcpu, m->mcgstatus, m->bank, m->status); | 204 | m->extcpu, m->mcgstatus, m->bank, m->status); |
217 | 205 | ||
@@ -239,7 +227,11 @@ static void print_mce(struct mce *m) | |||
239 | * Print out human-readable details about the MCE error, | 227 | * Print out human-readable details about the MCE error, |
240 | * (if the CPU has an implementation for that) | 228 | * (if the CPU has an implementation for that) |
241 | */ | 229 | */ |
242 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); | 230 | ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
231 | if (ret == NOTIFY_STOP) | ||
232 | return; | ||
233 | |||
234 | pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); | ||
243 | } | 235 | } |
244 | 236 | ||
245 | #define PANIC_TIMEOUT 5 /* 5 seconds */ | 237 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
@@ -590,7 +582,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
590 | if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { | 582 | if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { |
591 | mce_log(&m); | 583 | mce_log(&m); |
592 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m); | 584 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m); |
593 | add_taint(TAINT_MACHINE_CHECK); | ||
594 | } | 585 | } |
595 | 586 | ||
596 | /* | 587 | /* |
@@ -1722,8 +1713,6 @@ __setup("mce", mcheck_enable); | |||
1722 | 1713 | ||
1723 | int __init mcheck_init(void) | 1714 | int __init mcheck_init(void) |
1724 | { | 1715 | { |
1725 | atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); | ||
1726 | |||
1727 | mcheck_intel_therm_init(); | 1716 | mcheck_intel_therm_init(); |
1728 | 1717 | ||
1729 | return 0; | 1718 | return 0; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 167f97b5596e..bb0adad35143 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -509,6 +509,7 @@ recurse: | |||
509 | out_free: | 509 | out_free: |
510 | if (b) { | 510 | if (b) { |
511 | kobject_put(&b->kobj); | 511 | kobject_put(&b->kobj); |
512 | list_del(&b->miscj); | ||
512 | kfree(b); | 513 | kfree(b); |
513 | } | 514 | } |
514 | return err; | 515 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 6f8c5e9da97f..27c625178bf1 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -187,8 +187,6 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
187 | this_cpu, | 187 | this_cpu, |
188 | level == CORE_LEVEL ? "Core" : "Package", | 188 | level == CORE_LEVEL ? "Core" : "Package", |
189 | state->count); | 189 | state->count); |
190 | |||
191 | add_taint(TAINT_MACHINE_CHECK); | ||
192 | return 1; | 190 | return 1; |
193 | } | 191 | } |
194 | if (old_event) { | 192 | if (old_event) { |
@@ -355,7 +353,6 @@ static void notify_thresholds(__u64 msr_val) | |||
355 | static void intel_thermal_interrupt(void) | 353 | static void intel_thermal_interrupt(void) |
356 | { | 354 | { |
357 | __u64 msr_val; | 355 | __u64 msr_val; |
358 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | ||
359 | 356 | ||
360 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 357 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
361 | 358 | ||
@@ -367,19 +364,19 @@ static void intel_thermal_interrupt(void) | |||
367 | CORE_LEVEL) != 0) | 364 | CORE_LEVEL) != 0) |
368 | mce_log_therm_throt_event(CORE_THROTTLED | msr_val); | 365 | mce_log_therm_throt_event(CORE_THROTTLED | msr_val); |
369 | 366 | ||
370 | if (cpu_has(c, X86_FEATURE_PLN)) | 367 | if (this_cpu_has(X86_FEATURE_PLN)) |
371 | if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, | 368 | if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, |
372 | POWER_LIMIT_EVENT, | 369 | POWER_LIMIT_EVENT, |
373 | CORE_LEVEL) != 0) | 370 | CORE_LEVEL) != 0) |
374 | mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); | 371 | mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); |
375 | 372 | ||
376 | if (cpu_has(c, X86_FEATURE_PTS)) { | 373 | if (this_cpu_has(X86_FEATURE_PTS)) { |
377 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); | 374 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); |
378 | if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, | 375 | if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, |
379 | THERMAL_THROTTLING_EVENT, | 376 | THERMAL_THROTTLING_EVENT, |
380 | PACKAGE_LEVEL) != 0) | 377 | PACKAGE_LEVEL) != 0) |
381 | mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); | 378 | mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); |
382 | if (cpu_has(c, X86_FEATURE_PLN)) | 379 | if (this_cpu_has(X86_FEATURE_PLN)) |
383 | if (therm_throt_process(msr_val & | 380 | if (therm_throt_process(msr_val & |
384 | PACKAGE_THERM_STATUS_POWER_LIMIT, | 381 | PACKAGE_THERM_STATUS_POWER_LIMIT, |
385 | POWER_LIMIT_EVENT, | 382 | POWER_LIMIT_EVENT, |
@@ -393,7 +390,6 @@ static void unexpected_thermal_interrupt(void) | |||
393 | { | 390 | { |
394 | printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", | 391 | printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", |
395 | smp_processor_id()); | 392 | smp_processor_id()); |
396 | add_taint(TAINT_MACHINE_CHECK); | ||
397 | } | 393 | } |
398 | 394 | ||
399 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; | 395 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; |
@@ -446,18 +442,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
446 | */ | 442 | */ |
447 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 443 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
448 | 444 | ||
445 | h = lvtthmr_init; | ||
449 | /* | 446 | /* |
450 | * The initial value of thermal LVT entries on all APs always reads | 447 | * The initial value of thermal LVT entries on all APs always reads |
451 | * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI | 448 | * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI |
452 | * sequence to them and LVT registers are reset to 0s except for | 449 | * sequence to them and LVT registers are reset to 0s except for |
453 | * the mask bits which are set to 1s when APs receive INIT IPI. | 450 | * the mask bits which are set to 1s when APs receive INIT IPI. |
454 | * Always restore the value that BIOS has programmed on AP based on | 451 | * If BIOS takes over the thermal interrupt and sets its interrupt |
455 | * BSP's info we saved since BIOS is always setting the same value | 452 | * delivery mode to SMI (not fixed), it restores the value that the |
456 | * for all threads/cores | 453 | * BIOS has programmed on AP based on BSP's info we saved since BIOS |
454 | * is always setting the same value for all threads/cores. | ||
457 | */ | 455 | */ |
458 | apic_write(APIC_LVTTHMR, lvtthmr_init); | 456 | if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) |
457 | apic_write(APIC_LVTTHMR, lvtthmr_init); | ||
459 | 458 | ||
460 | h = lvtthmr_init; | ||
461 | 459 | ||
462 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | 460 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
463 | printk(KERN_DEBUG | 461 | printk(KERN_DEBUG |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f478ff6877ef..1aae78f775fc 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -263,7 +263,6 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) | |||
263 | printk("DEBUG_PAGEALLOC"); | 263 | printk("DEBUG_PAGEALLOC"); |
264 | #endif | 264 | #endif |
265 | printk("\n"); | 265 | printk("\n"); |
266 | sysfs_printk_last_file(); | ||
267 | if (notify_die(DIE_OOPS, str, regs, err, | 266 | if (notify_die(DIE_OOPS, str, regs, err, |
268 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | 267 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) |
269 | return 1; | 268 | return 1; |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index a93742a57468..0ba15a6cc57e 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -260,9 +260,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
260 | return mod_code_status; | 260 | return mod_code_status; |
261 | } | 261 | } |
262 | 262 | ||
263 | static unsigned char *ftrace_nop_replace(void) | 263 | static const unsigned char *ftrace_nop_replace(void) |
264 | { | 264 | { |
265 | return ideal_nop5; | 265 | return ideal_nops[NOP_ATOMIC5]; |
266 | } | 266 | } |
267 | 267 | ||
268 | static int | 268 | static int |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index d6d6bb361931..3bb08509a7a1 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -23,7 +23,6 @@ | |||
23 | static void __init i386_default_early_setup(void) | 23 | static void __init i386_default_early_setup(void) |
24 | { | 24 | { |
25 | /* Initialize 32bit specific setup functions */ | 25 | /* Initialize 32bit specific setup functions */ |
26 | x86_init.resources.probe_roms = probe_roms; | ||
27 | x86_init.resources.reserve_resources = i386_reserve_resources; | 26 | x86_init.resources.reserve_resources = i386_reserve_resources; |
28 | x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; | 27 | x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; |
29 | 28 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index bfe8f729e086..6781765b3a0d 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -217,7 +217,7 @@ static void hpet_reserve_platform_timers(unsigned int id) { } | |||
217 | /* | 217 | /* |
218 | * Common hpet info | 218 | * Common hpet info |
219 | */ | 219 | */ |
220 | static unsigned long hpet_period; | 220 | static unsigned long hpet_freq; |
221 | 221 | ||
222 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | 222 | static void hpet_legacy_set_mode(enum clock_event_mode mode, |
223 | struct clock_event_device *evt); | 223 | struct clock_event_device *evt); |
@@ -232,7 +232,6 @@ static struct clock_event_device hpet_clockevent = { | |||
232 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 232 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
233 | .set_mode = hpet_legacy_set_mode, | 233 | .set_mode = hpet_legacy_set_mode, |
234 | .set_next_event = hpet_legacy_next_event, | 234 | .set_next_event = hpet_legacy_next_event, |
235 | .shift = 32, | ||
236 | .irq = 0, | 235 | .irq = 0, |
237 | .rating = 50, | 236 | .rating = 50, |
238 | }; | 237 | }; |
@@ -290,28 +289,12 @@ static void hpet_legacy_clockevent_register(void) | |||
290 | hpet_enable_legacy_int(); | 289 | hpet_enable_legacy_int(); |
291 | 290 | ||
292 | /* | 291 | /* |
293 | * The mult factor is defined as (include/linux/clockchips.h) | ||
294 | * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) | ||
295 | * hpet_period is in units of femtoseconds (per cycle), so | ||
296 | * mult/2^shift = cyc/ns = 10^6/hpet_period | ||
297 | * mult = (10^6 * 2^shift)/hpet_period | ||
298 | * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period | ||
299 | */ | ||
300 | hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, | ||
301 | hpet_period, hpet_clockevent.shift); | ||
302 | /* Calculate the min / max delta */ | ||
303 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | ||
304 | &hpet_clockevent); | ||
305 | /* Setup minimum reprogramming delta. */ | ||
306 | hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, | ||
307 | &hpet_clockevent); | ||
308 | |||
309 | /* | ||
310 | * Start hpet with the boot cpu mask and make it | 292 | * Start hpet with the boot cpu mask and make it |
311 | * global after the IO_APIC has been initialized. | 293 | * global after the IO_APIC has been initialized. |
312 | */ | 294 | */ |
313 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); | 295 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); |
314 | clockevents_register_device(&hpet_clockevent); | 296 | clockevents_config_and_register(&hpet_clockevent, hpet_freq, |
297 | HPET_MIN_PROG_DELTA, 0x7FFFFFFF); | ||
315 | global_clock_event = &hpet_clockevent; | 298 | global_clock_event = &hpet_clockevent; |
316 | printk(KERN_DEBUG "hpet clockevent registered\n"); | 299 | printk(KERN_DEBUG "hpet clockevent registered\n"); |
317 | } | 300 | } |
@@ -549,7 +532,6 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
549 | static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | 532 | static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) |
550 | { | 533 | { |
551 | struct clock_event_device *evt = &hdev->evt; | 534 | struct clock_event_device *evt = &hdev->evt; |
552 | uint64_t hpet_freq; | ||
553 | 535 | ||
554 | WARN_ON(cpu != smp_processor_id()); | 536 | WARN_ON(cpu != smp_processor_id()); |
555 | if (!(hdev->flags & HPET_DEV_VALID)) | 537 | if (!(hdev->flags & HPET_DEV_VALID)) |
@@ -571,24 +553,10 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | |||
571 | 553 | ||
572 | evt->set_mode = hpet_msi_set_mode; | 554 | evt->set_mode = hpet_msi_set_mode; |
573 | evt->set_next_event = hpet_msi_next_event; | 555 | evt->set_next_event = hpet_msi_next_event; |
574 | evt->shift = 32; | ||
575 | |||
576 | /* | ||
577 | * The period is a femto seconds value. We need to calculate the | ||
578 | * scaled math multiplication factor for nanosecond to hpet tick | ||
579 | * conversion. | ||
580 | */ | ||
581 | hpet_freq = FSEC_PER_SEC; | ||
582 | do_div(hpet_freq, hpet_period); | ||
583 | evt->mult = div_sc((unsigned long) hpet_freq, | ||
584 | NSEC_PER_SEC, evt->shift); | ||
585 | /* Calculate the max delta */ | ||
586 | evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); | ||
587 | /* 5 usec minimum reprogramming delta. */ | ||
588 | evt->min_delta_ns = 5000; | ||
589 | |||
590 | evt->cpumask = cpumask_of(hdev->cpu); | 556 | evt->cpumask = cpumask_of(hdev->cpu); |
591 | clockevents_register_device(evt); | 557 | |
558 | clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, | ||
559 | 0x7FFFFFFF); | ||
592 | } | 560 | } |
593 | 561 | ||
594 | #ifdef CONFIG_HPET | 562 | #ifdef CONFIG_HPET |
@@ -792,7 +760,6 @@ static struct clocksource clocksource_hpet = { | |||
792 | static int hpet_clocksource_register(void) | 760 | static int hpet_clocksource_register(void) |
793 | { | 761 | { |
794 | u64 start, now; | 762 | u64 start, now; |
795 | u64 hpet_freq; | ||
796 | cycle_t t1; | 763 | cycle_t t1; |
797 | 764 | ||
798 | /* Start the counter */ | 765 | /* Start the counter */ |
@@ -819,24 +786,7 @@ static int hpet_clocksource_register(void) | |||
819 | return -ENODEV; | 786 | return -ENODEV; |
820 | } | 787 | } |
821 | 788 | ||
822 | /* | ||
823 | * The definition of mult is (include/linux/clocksource.h) | ||
824 | * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc | ||
825 | * so we first need to convert hpet_period to ns/cyc units: | ||
826 | * mult/2^shift = ns/cyc = hpet_period/10^6 | ||
827 | * mult = (hpet_period * 2^shift)/10^6 | ||
828 | * mult = (hpet_period << shift)/FSEC_PER_NSEC | ||
829 | */ | ||
830 | |||
831 | /* Need to convert hpet_period (fsec/cyc) to cyc/sec: | ||
832 | * | ||
833 | * cyc/sec = FSEC_PER_SEC/hpet_period(fsec/cyc) | ||
834 | * cyc/sec = (FSEC_PER_NSEC * NSEC_PER_SEC)/hpet_period | ||
835 | */ | ||
836 | hpet_freq = FSEC_PER_SEC; | ||
837 | do_div(hpet_freq, hpet_period); | ||
838 | clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); | 789 | clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); |
839 | |||
840 | return 0; | 790 | return 0; |
841 | } | 791 | } |
842 | 792 | ||
@@ -845,7 +795,9 @@ static int hpet_clocksource_register(void) | |||
845 | */ | 795 | */ |
846 | int __init hpet_enable(void) | 796 | int __init hpet_enable(void) |
847 | { | 797 | { |
798 | unsigned long hpet_period; | ||
848 | unsigned int id; | 799 | unsigned int id; |
800 | u64 freq; | ||
849 | int i; | 801 | int i; |
850 | 802 | ||
851 | if (!is_hpet_capable()) | 803 | if (!is_hpet_capable()) |
@@ -884,6 +836,14 @@ int __init hpet_enable(void) | |||
884 | goto out_nohpet; | 836 | goto out_nohpet; |
885 | 837 | ||
886 | /* | 838 | /* |
839 | * The period is a femto seconds value. Convert it to a | ||
840 | * frequency. | ||
841 | */ | ||
842 | freq = FSEC_PER_SEC; | ||
843 | do_div(freq, hpet_period); | ||
844 | hpet_freq = freq; | ||
845 | |||
846 | /* | ||
887 | * Read the HPET ID register to retrieve the IRQ routing | 847 | * Read the HPET ID register to retrieve the IRQ routing |
888 | * information and the number of channels | 848 | * information and the number of channels |
889 | */ | 849 | */ |
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 2dfd31597443..fb66dc9e36cb 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -93,7 +93,6 @@ static struct clock_event_device pit_ce = { | |||
93 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 93 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
94 | .set_mode = init_pit_timer, | 94 | .set_mode = init_pit_timer, |
95 | .set_next_event = pit_next_event, | 95 | .set_next_event = pit_next_event, |
96 | .shift = 32, | ||
97 | .irq = 0, | 96 | .irq = 0, |
98 | }; | 97 | }; |
99 | 98 | ||
@@ -108,90 +107,12 @@ void __init setup_pit_timer(void) | |||
108 | * IO_APIC has been initialized. | 107 | * IO_APIC has been initialized. |
109 | */ | 108 | */ |
110 | pit_ce.cpumask = cpumask_of(smp_processor_id()); | 109 | pit_ce.cpumask = cpumask_of(smp_processor_id()); |
111 | pit_ce.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, pit_ce.shift); | ||
112 | pit_ce.max_delta_ns = clockevent_delta2ns(0x7FFF, &pit_ce); | ||
113 | pit_ce.min_delta_ns = clockevent_delta2ns(0xF, &pit_ce); | ||
114 | 110 | ||
115 | clockevents_register_device(&pit_ce); | 111 | clockevents_config_and_register(&pit_ce, CLOCK_TICK_RATE, 0xF, 0x7FFF); |
116 | global_clock_event = &pit_ce; | 112 | global_clock_event = &pit_ce; |
117 | } | 113 | } |
118 | 114 | ||
119 | #ifndef CONFIG_X86_64 | 115 | #ifndef CONFIG_X86_64 |
120 | /* | ||
121 | * Since the PIT overflows every tick, its not very useful | ||
122 | * to just read by itself. So use jiffies to emulate a free | ||
123 | * running counter: | ||
124 | */ | ||
125 | static cycle_t pit_read(struct clocksource *cs) | ||
126 | { | ||
127 | static int old_count; | ||
128 | static u32 old_jifs; | ||
129 | unsigned long flags; | ||
130 | int count; | ||
131 | u32 jifs; | ||
132 | |||
133 | raw_spin_lock_irqsave(&i8253_lock, flags); | ||
134 | /* | ||
135 | * Although our caller may have the read side of xtime_lock, | ||
136 | * this is now a seqlock, and we are cheating in this routine | ||
137 | * by having side effects on state that we cannot undo if | ||
138 | * there is a collision on the seqlock and our caller has to | ||
139 | * retry. (Namely, old_jifs and old_count.) So we must treat | ||
140 | * jiffies as volatile despite the lock. We read jiffies | ||
141 | * before latching the timer count to guarantee that although | ||
142 | * the jiffies value might be older than the count (that is, | ||
143 | * the counter may underflow between the last point where | ||
144 | * jiffies was incremented and the point where we latch the | ||
145 | * count), it cannot be newer. | ||
146 | */ | ||
147 | jifs = jiffies; | ||
148 | outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ | ||
149 | count = inb_pit(PIT_CH0); /* read the latched count */ | ||
150 | count |= inb_pit(PIT_CH0) << 8; | ||
151 | |||
152 | /* VIA686a test code... reset the latch if count > max + 1 */ | ||
153 | if (count > LATCH) { | ||
154 | outb_pit(0x34, PIT_MODE); | ||
155 | outb_pit(LATCH & 0xff, PIT_CH0); | ||
156 | outb_pit(LATCH >> 8, PIT_CH0); | ||
157 | count = LATCH - 1; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * It's possible for count to appear to go the wrong way for a | ||
162 | * couple of reasons: | ||
163 | * | ||
164 | * 1. The timer counter underflows, but we haven't handled the | ||
165 | * resulting interrupt and incremented jiffies yet. | ||
166 | * 2. Hardware problem with the timer, not giving us continuous time, | ||
167 | * the counter does small "jumps" upwards on some Pentium systems, | ||
168 | * (see c't 95/10 page 335 for Neptun bug.) | ||
169 | * | ||
170 | * Previous attempts to handle these cases intelligently were | ||
171 | * buggy, so we just do the simple thing now. | ||
172 | */ | ||
173 | if (count > old_count && jifs == old_jifs) | ||
174 | count = old_count; | ||
175 | |||
176 | old_count = count; | ||
177 | old_jifs = jifs; | ||
178 | |||
179 | raw_spin_unlock_irqrestore(&i8253_lock, flags); | ||
180 | |||
181 | count = (LATCH - 1) - count; | ||
182 | |||
183 | return (cycle_t)(jifs * LATCH) + count; | ||
184 | } | ||
185 | |||
186 | static struct clocksource pit_cs = { | ||
187 | .name = "pit", | ||
188 | .rating = 110, | ||
189 | .read = pit_read, | ||
190 | .mask = CLOCKSOURCE_MASK(32), | ||
191 | .mult = 0, | ||
192 | .shift = 20, | ||
193 | }; | ||
194 | |||
195 | static int __init init_pit_clocksource(void) | 116 | static int __init init_pit_clocksource(void) |
196 | { | 117 | { |
197 | /* | 118 | /* |
@@ -205,10 +126,7 @@ static int __init init_pit_clocksource(void) | |||
205 | pit_ce.mode != CLOCK_EVT_MODE_PERIODIC) | 126 | pit_ce.mode != CLOCK_EVT_MODE_PERIODIC) |
206 | return 0; | 127 | return 0; |
207 | 128 | ||
208 | pit_cs.mult = clocksource_hz2mult(CLOCK_TICK_RATE, pit_cs.shift); | 129 | return clocksource_i8253_init(); |
209 | |||
210 | return clocksource_register(&pit_cs); | ||
211 | } | 130 | } |
212 | arch_initcall(init_pit_clocksource); | 131 | arch_initcall(init_pit_clocksource); |
213 | |||
214 | #endif /* !CONFIG_X86_64 */ | 132 | #endif /* !CONFIG_X86_64 */ |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 1cb0b9fc78dc..6c0802eb2f7f 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -249,7 +249,7 @@ void fixup_irqs(void) | |||
249 | 249 | ||
250 | data = irq_desc_get_irq_data(desc); | 250 | data = irq_desc_get_irq_data(desc); |
251 | affinity = data->affinity; | 251 | affinity = data->affinity; |
252 | if (!irq_has_action(irq) || | 252 | if (!irq_has_action(irq) || irqd_is_per_cpu(data) || |
253 | cpumask_subset(affinity, cpu_online_mask)) { | 253 | cpumask_subset(affinity, cpu_online_mask)) { |
254 | raw_spin_unlock(&desc->lock); | 254 | raw_spin_unlock(&desc->lock); |
255 | continue; | 255 | continue; |
@@ -276,7 +276,8 @@ void fixup_irqs(void) | |||
276 | else if (!(warned++)) | 276 | else if (!(warned++)) |
277 | set_affinity = 0; | 277 | set_affinity = 0; |
278 | 278 | ||
279 | if (!irqd_can_move_in_process_context(data) && chip->irq_unmask) | 279 | if (!irqd_can_move_in_process_context(data) && |
280 | !irqd_irq_disabled(data) && chip->irq_unmask) | ||
280 | chip->irq_unmask(data); | 281 | chip->irq_unmask(data); |
281 | 282 | ||
282 | raw_spin_unlock(&desc->lock); | 283 | raw_spin_unlock(&desc->lock); |
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 961b6b30ba90..3fee346ef545 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c | |||
@@ -34,7 +34,7 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
34 | code.offset = entry->target - | 34 | code.offset = entry->target - |
35 | (entry->code + JUMP_LABEL_NOP_SIZE); | 35 | (entry->code + JUMP_LABEL_NOP_SIZE); |
36 | } else | 36 | } else |
37 | memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE); | 37 | memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); |
38 | get_online_cpus(); | 38 | get_online_cpus(); |
39 | mutex_lock(&text_mutex); | 39 | mutex_lock(&text_mutex); |
40 | text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); | 40 | text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); |
@@ -44,7 +44,8 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
44 | 44 | ||
45 | void arch_jump_label_text_poke_early(jump_label_t addr) | 45 | void arch_jump_label_text_poke_early(jump_label_t addr) |
46 | { | 46 | { |
47 | text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE); | 47 | text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], |
48 | JUMP_LABEL_NOP_SIZE); | ||
48 | } | 49 | } |
49 | 50 | ||
50 | #endif | 51 | #endif |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index c969fd9d1566..f1a6244d7d93 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1183 | struct pt_regs *regs) | 1183 | struct pt_regs *regs) |
1184 | { | 1184 | { |
1185 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1185 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1186 | unsigned long flags; | ||
1186 | 1187 | ||
1187 | /* This is possible if op is under delayed unoptimizing */ | 1188 | /* This is possible if op is under delayed unoptimizing */ |
1188 | if (kprobe_disabled(&op->kp)) | 1189 | if (kprobe_disabled(&op->kp)) |
1189 | return; | 1190 | return; |
1190 | 1191 | ||
1191 | preempt_disable(); | 1192 | local_irq_save(flags); |
1192 | if (kprobe_running()) { | 1193 | if (kprobe_running()) { |
1193 | kprobes_inc_nmissed_count(&op->kp); | 1194 | kprobes_inc_nmissed_count(&op->kp); |
1194 | } else { | 1195 | } else { |
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1207 | opt_pre_handler(&op->kp, regs); | 1208 | opt_pre_handler(&op->kp, regs); |
1208 | __this_cpu_write(current_kprobe, NULL); | 1209 | __this_cpu_write(current_kprobe, NULL); |
1209 | } | 1210 | } |
1210 | preempt_enable_no_resched(); | 1211 | local_irq_restore(flags); |
1211 | } | 1212 | } |
1212 | 1213 | ||
1213 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | 1214 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index f98d3eafe07a..6389a6bca11b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -26,8 +26,6 @@ | |||
26 | #include <asm/x86_init.h> | 26 | #include <asm/x86_init.h> |
27 | #include <asm/reboot.h> | 27 | #include <asm/reboot.h> |
28 | 28 | ||
29 | #define KVM_SCALE 22 | ||
30 | |||
31 | static int kvmclock = 1; | 29 | static int kvmclock = 1; |
32 | static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; | 30 | static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; |
33 | static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; | 31 | static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; |
@@ -120,8 +118,6 @@ static struct clocksource kvm_clock = { | |||
120 | .read = kvm_clock_get_cycles, | 118 | .read = kvm_clock_get_cycles, |
121 | .rating = 400, | 119 | .rating = 400, |
122 | .mask = CLOCKSOURCE_MASK(64), | 120 | .mask = CLOCKSOURCE_MASK(64), |
123 | .mult = 1 << KVM_SCALE, | ||
124 | .shift = KVM_SCALE, | ||
125 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 121 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
126 | }; | 122 | }; |
127 | 123 | ||
@@ -203,7 +199,7 @@ void __init kvmclock_init(void) | |||
203 | machine_ops.crash_shutdown = kvm_crash_shutdown; | 199 | machine_ops.crash_shutdown = kvm_crash_shutdown; |
204 | #endif | 200 | #endif |
205 | kvm_get_preset_lpj(); | 201 | kvm_get_preset_lpj(); |
206 | clocksource_register(&kvm_clock); | 202 | clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); |
207 | pv_info.paravirt_enabled = 1; | 203 | pv_info.paravirt_enabled = 1; |
208 | pv_info.name = "KVM"; | 204 | pv_info.name = "KVM"; |
209 | 205 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 5a532ce646bf..6f9bfffb2720 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -715,17 +715,15 @@ static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) | |||
715 | } | 715 | } |
716 | } | 716 | } |
717 | 717 | ||
718 | static int | 718 | static int __init |
719 | check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) | 719 | check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) |
720 | { | 720 | { |
721 | int ret = 0; | ||
722 | |||
723 | if (!mpc_new_phys || count <= mpc_new_length) { | 721 | if (!mpc_new_phys || count <= mpc_new_length) { |
724 | WARN(1, "update_mptable: No spare slots (length: %x)\n", count); | 722 | WARN(1, "update_mptable: No spare slots (length: %x)\n", count); |
725 | return -1; | 723 | return -1; |
726 | } | 724 | } |
727 | 725 | ||
728 | return ret; | 726 | return 0; |
729 | } | 727 | } |
730 | #else /* CONFIG_X86_IO_APIC */ | 728 | #else /* CONFIG_X86_IO_APIC */ |
731 | static | 729 | static |
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c index 55d745ec1181..35ccf75696eb 100644 --- a/arch/x86/kernel/pci-iommu_table.c +++ b/arch/x86/kernel/pci-iommu_table.c | |||
@@ -50,20 +50,14 @@ void __init check_iommu_entries(struct iommu_table_entry *start, | |||
50 | struct iommu_table_entry *finish) | 50 | struct iommu_table_entry *finish) |
51 | { | 51 | { |
52 | struct iommu_table_entry *p, *q, *x; | 52 | struct iommu_table_entry *p, *q, *x; |
53 | char sym_p[KSYM_SYMBOL_LEN]; | ||
54 | char sym_q[KSYM_SYMBOL_LEN]; | ||
55 | 53 | ||
56 | /* Simple cyclic dependency checker. */ | 54 | /* Simple cyclic dependency checker. */ |
57 | for (p = start; p < finish; p++) { | 55 | for (p = start; p < finish; p++) { |
58 | q = find_dependents_of(start, finish, p); | 56 | q = find_dependents_of(start, finish, p); |
59 | x = find_dependents_of(start, finish, q); | 57 | x = find_dependents_of(start, finish, q); |
60 | if (p == x) { | 58 | if (p == x) { |
61 | sprint_symbol(sym_p, (unsigned long)p->detect); | 59 | printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n", |
62 | sprint_symbol(sym_q, (unsigned long)q->detect); | 60 | p->detect, q->detect); |
63 | |||
64 | printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ | ||
65 | " on %s and vice-versa. BREAKING IT.\n", | ||
66 | sym_p, sym_q); | ||
67 | /* Heavy handed way..*/ | 61 | /* Heavy handed way..*/ |
68 | x->depend = 0; | 62 | x->depend = 0; |
69 | } | 63 | } |
@@ -72,12 +66,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start, | |||
72 | for (p = start; p < finish; p++) { | 66 | for (p = start; p < finish; p++) { |
73 | q = find_dependents_of(p, finish, p); | 67 | q = find_dependents_of(p, finish, p); |
74 | if (q && q > p) { | 68 | if (q && q > p) { |
75 | sprint_symbol(sym_p, (unsigned long)p->detect); | 69 | printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n", |
76 | sprint_symbol(sym_q, (unsigned long)q->detect); | 70 | p->detect, q->detect); |
77 | |||
78 | printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ | ||
79 | "should be called before %s!\n", | ||
80 | sym_p, sym_q); | ||
81 | } | 71 | } |
82 | } | 72 | } |
83 | } | 73 | } |
diff --git a/arch/x86/kernel/probe_roms_32.c b/arch/x86/kernel/probe_roms.c index 071e7fea42e5..ba0a4cce53be 100644 --- a/arch/x86/kernel/probe_roms_32.c +++ b/arch/x86/kernel/probe_roms.c | |||
@@ -73,6 +73,107 @@ static struct resource video_rom_resource = { | |||
73 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 73 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /* does this oprom support the given pci device, or any of the devices | ||
77 | * that the driver supports? | ||
78 | */ | ||
79 | static bool match_id(struct pci_dev *pdev, unsigned short vendor, unsigned short device) | ||
80 | { | ||
81 | struct pci_driver *drv = pdev->driver; | ||
82 | const struct pci_device_id *id; | ||
83 | |||
84 | if (pdev->vendor == vendor && pdev->device == device) | ||
85 | return true; | ||
86 | |||
87 | for (id = drv ? drv->id_table : NULL; id && id->vendor; id++) | ||
88 | if (id->vendor == vendor && id->device == device) | ||
89 | break; | ||
90 | |||
91 | return id && id->vendor; | ||
92 | } | ||
93 | |||
94 | static bool probe_list(struct pci_dev *pdev, unsigned short vendor, | ||
95 | const unsigned char *rom_list) | ||
96 | { | ||
97 | unsigned short device; | ||
98 | |||
99 | do { | ||
100 | if (probe_kernel_address(rom_list, device) != 0) | ||
101 | device = 0; | ||
102 | |||
103 | if (device && match_id(pdev, vendor, device)) | ||
104 | break; | ||
105 | |||
106 | rom_list += 2; | ||
107 | } while (device); | ||
108 | |||
109 | return !!device; | ||
110 | } | ||
111 | |||
112 | static struct resource *find_oprom(struct pci_dev *pdev) | ||
113 | { | ||
114 | struct resource *oprom = NULL; | ||
115 | int i; | ||
116 | |||
117 | for (i = 0; i < ARRAY_SIZE(adapter_rom_resources); i++) { | ||
118 | struct resource *res = &adapter_rom_resources[i]; | ||
119 | unsigned short offset, vendor, device, list, rev; | ||
120 | const unsigned char *rom; | ||
121 | |||
122 | if (res->end == 0) | ||
123 | break; | ||
124 | |||
125 | rom = isa_bus_to_virt(res->start); | ||
126 | if (probe_kernel_address(rom + 0x18, offset) != 0) | ||
127 | continue; | ||
128 | |||
129 | if (probe_kernel_address(rom + offset + 0x4, vendor) != 0) | ||
130 | continue; | ||
131 | |||
132 | if (probe_kernel_address(rom + offset + 0x6, device) != 0) | ||
133 | continue; | ||
134 | |||
135 | if (match_id(pdev, vendor, device)) { | ||
136 | oprom = res; | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | if (probe_kernel_address(rom + offset + 0x8, list) == 0 && | ||
141 | probe_kernel_address(rom + offset + 0xc, rev) == 0 && | ||
142 | rev >= 3 && list && | ||
143 | probe_list(pdev, vendor, rom + offset + list)) { | ||
144 | oprom = res; | ||
145 | break; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | return oprom; | ||
150 | } | ||
151 | |||
152 | void *pci_map_biosrom(struct pci_dev *pdev) | ||
153 | { | ||
154 | struct resource *oprom = find_oprom(pdev); | ||
155 | |||
156 | if (!oprom) | ||
157 | return NULL; | ||
158 | |||
159 | return ioremap(oprom->start, resource_size(oprom)); | ||
160 | } | ||
161 | EXPORT_SYMBOL(pci_map_biosrom); | ||
162 | |||
163 | void pci_unmap_biosrom(void __iomem *image) | ||
164 | { | ||
165 | iounmap(image); | ||
166 | } | ||
167 | EXPORT_SYMBOL(pci_unmap_biosrom); | ||
168 | |||
169 | size_t pci_biosrom_size(struct pci_dev *pdev) | ||
170 | { | ||
171 | struct resource *oprom = find_oprom(pdev); | ||
172 | |||
173 | return oprom ? resource_size(oprom) : 0; | ||
174 | } | ||
175 | EXPORT_SYMBOL(pci_biosrom_size); | ||
176 | |||
76 | #define ROMSIGNATURE 0xaa55 | 177 | #define ROMSIGNATURE 0xaa55 |
77 | 178 | ||
78 | static int __init romsignature(const unsigned char *rom) | 179 | static int __init romsignature(const unsigned char *rom) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index d46cbe46b7ab..88a90a977f8e 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -449,7 +449,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
449 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | 449 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) |
450 | { | 450 | { |
451 | if (!need_resched()) { | 451 | if (!need_resched()) { |
452 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) | 452 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) |
453 | clflush((void *)¤t_thread_info()->flags); | 453 | clflush((void *)¤t_thread_info()->flags); |
454 | 454 | ||
455 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 455 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
@@ -465,7 +465,7 @@ static void mwait_idle(void) | |||
465 | if (!need_resched()) { | 465 | if (!need_resched()) { |
466 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | 466 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); |
467 | trace_cpu_idle(1, smp_processor_id()); | 467 | trace_cpu_idle(1, smp_processor_id()); |
468 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) | 468 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) |
469 | clflush((void *)¤t_thread_info()->flags); | 469 | clflush((void *)¤t_thread_info()->flags); |
470 | 470 | ||
471 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 471 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 08c44b08bf5b..0c016f727695 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(pm_power_off); | |||
36 | 36 | ||
37 | static const struct desc_ptr no_idt = {}; | 37 | static const struct desc_ptr no_idt = {}; |
38 | static int reboot_mode; | 38 | static int reboot_mode; |
39 | enum reboot_type reboot_type = BOOT_KBD; | 39 | enum reboot_type reboot_type = BOOT_ACPI; |
40 | int reboot_force; | 40 | int reboot_force; |
41 | 41 | ||
42 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) | 42 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) |
@@ -478,9 +478,24 @@ void __attribute__((weak)) mach_reboot_fixups(void) | |||
478 | { | 478 | { |
479 | } | 479 | } |
480 | 480 | ||
481 | /* | ||
482 | * Windows compatible x86 hardware expects the following on reboot: | ||
483 | * | ||
484 | * 1) If the FADT has the ACPI reboot register flag set, try it | ||
485 | * 2) If still alive, write to the keyboard controller | ||
486 | * 3) If still alive, write to the ACPI reboot register again | ||
487 | * 4) If still alive, write to the keyboard controller again | ||
488 | * | ||
489 | * If the machine is still alive at this stage, it gives up. We default to | ||
490 | * following the same pattern, except that if we're still alive after (4) we'll | ||
491 | * try to force a triple fault and then cycle between hitting the keyboard | ||
492 | * controller and doing that | ||
493 | */ | ||
481 | static void native_machine_emergency_restart(void) | 494 | static void native_machine_emergency_restart(void) |
482 | { | 495 | { |
483 | int i; | 496 | int i; |
497 | int attempt = 0; | ||
498 | int orig_reboot_type = reboot_type; | ||
484 | 499 | ||
485 | if (reboot_emergency) | 500 | if (reboot_emergency) |
486 | emergency_vmx_disable_all(); | 501 | emergency_vmx_disable_all(); |
@@ -502,6 +517,13 @@ static void native_machine_emergency_restart(void) | |||
502 | outb(0xfe, 0x64); /* pulse reset low */ | 517 | outb(0xfe, 0x64); /* pulse reset low */ |
503 | udelay(50); | 518 | udelay(50); |
504 | } | 519 | } |
520 | if (attempt == 0 && orig_reboot_type == BOOT_ACPI) { | ||
521 | attempt = 1; | ||
522 | reboot_type = BOOT_ACPI; | ||
523 | } else { | ||
524 | reboot_type = BOOT_TRIPLE; | ||
525 | } | ||
526 | break; | ||
505 | 527 | ||
506 | case BOOT_TRIPLE: | 528 | case BOOT_TRIPLE: |
507 | load_idt(&no_idt); | 529 | load_idt(&no_idt); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 4be9b398470e..c3050af9306d 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow); | |||
691 | 691 | ||
692 | void __init setup_arch(char **cmdline_p) | 692 | void __init setup_arch(char **cmdline_p) |
693 | { | 693 | { |
694 | unsigned long flags; | ||
695 | |||
696 | #ifdef CONFIG_X86_32 | 694 | #ifdef CONFIG_X86_32 |
697 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); | 695 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
698 | visws_early_detect(); | 696 | visws_early_detect(); |
@@ -1041,9 +1039,7 @@ void __init setup_arch(char **cmdline_p) | |||
1041 | 1039 | ||
1042 | mcheck_init(); | 1040 | mcheck_init(); |
1043 | 1041 | ||
1044 | local_irq_save(flags); | 1042 | arch_init_ideal_nops(); |
1045 | arch_init_ideal_nop5(); | ||
1046 | local_irq_restore(flags); | ||
1047 | } | 1043 | } |
1048 | 1044 | ||
1049 | #ifdef CONFIG_X86_32 | 1045 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 513deac7228d..013e7eba83bb 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -194,14 +194,13 @@ static void native_stop_other_cpus(int wait) | |||
194 | } | 194 | } |
195 | 195 | ||
196 | /* | 196 | /* |
197 | * Reschedule call back. Nothing to do, | 197 | * Reschedule call back. |
198 | * all the work is done automatically when | ||
199 | * we return from the interrupt. | ||
200 | */ | 198 | */ |
201 | void smp_reschedule_interrupt(struct pt_regs *regs) | 199 | void smp_reschedule_interrupt(struct pt_regs *regs) |
202 | { | 200 | { |
203 | ack_APIC_irq(); | 201 | ack_APIC_irq(); |
204 | inc_irq_stat(irq_resched_count); | 202 | inc_irq_stat(irq_resched_count); |
203 | scheduler_ipi(); | ||
205 | /* | 204 | /* |
206 | * KVM uses this interrupt to force a cpu out of guest mode | 205 | * KVM uses this interrupt to force a cpu out of guest mode |
207 | */ | 206 | */ |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2871d3c71b6..a3c430bdfb60 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1332,9 +1332,9 @@ static inline void mwait_play_dead(void) | |||
1332 | void *mwait_ptr; | 1332 | void *mwait_ptr; |
1333 | struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); | 1333 | struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); |
1334 | 1334 | ||
1335 | if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c))) | 1335 | if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)) |
1336 | return; | 1336 | return; |
1337 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) | 1337 | if (!this_cpu_has(X86_FEATURE_CLFLSH)) |
1338 | return; | 1338 | return; |
1339 | if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) | 1339 | if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) |
1340 | return; | 1340 | return; |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index c11514e9128b..6f164bd5e14d 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -35,7 +35,7 @@ void iommu_shutdown_noop(void) { } | |||
35 | struct x86_init_ops x86_init __initdata = { | 35 | struct x86_init_ops x86_init __initdata = { |
36 | 36 | ||
37 | .resources = { | 37 | .resources = { |
38 | .probe_roms = x86_init_noop, | 38 | .probe_roms = probe_roms, |
39 | .reserve_resources = reserve_standard_io_resources, | 39 | .reserve_resources = reserve_standard_io_resources, |
40 | .memory_setup = default_machine_specific_memory_setup, | 40 | .memory_setup = default_machine_specific_memory_setup, |
41 | }, | 41 | }, |
@@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = { | |||
61 | .banner = default_banner, | 61 | .banner = default_banner, |
62 | }, | 62 | }, |
63 | 63 | ||
64 | .mapping = { | ||
65 | .pagetable_reserve = native_pagetable_reserve, | ||
66 | }, | ||
67 | |||
64 | .paging = { | 68 | .paging = { |
65 | .pagetable_setup_start = native_pagetable_setup_start, | 69 | .pagetable_setup_start = native_pagetable_setup_start, |
66 | .pagetable_setup_done = native_pagetable_setup_done, | 70 | .pagetable_setup_done = native_pagetable_setup_done, |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cd608973ce5..e191c096ab90 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * kernel and insert a module (lg.ko) which allows us to run other Linux | 7 | * kernel and insert a module (lg.ko) which allows us to run other Linux |
8 | * kernels the same way we'd run processes. We call the first kernel the Host, | 8 | * kernels the same way we'd run processes. We call the first kernel the Host, |
9 | * and the others the Guests. The program which sets up and configures Guests | 9 | * and the others the Guests. The program which sets up and configures Guests |
10 | * (such as the example in Documentation/lguest/lguest.c) is called the | 10 | * (such as the example in Documentation/virtual/lguest/lguest.c) is called the |
11 | * Launcher. | 11 | * Launcher. |
12 | * | 12 | * |
13 | * Secondly, we only run specially modified Guests, not normal kernels: setting | 13 | * Secondly, we only run specially modified Guests, not normal kernels: setting |
@@ -913,8 +913,6 @@ static struct clocksource lguest_clock = { | |||
913 | .rating = 200, | 913 | .rating = 200, |
914 | .read = lguest_clock_read, | 914 | .read = lguest_clock_read, |
915 | .mask = CLOCKSOURCE_MASK(64), | 915 | .mask = CLOCKSOURCE_MASK(64), |
916 | .mult = 1 << 22, | ||
917 | .shift = 22, | ||
918 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 916 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
919 | }; | 917 | }; |
920 | 918 | ||
@@ -997,7 +995,7 @@ static void lguest_time_init(void) | |||
997 | /* Set up the timer interrupt (0) to go to our simple timer routine */ | 995 | /* Set up the timer interrupt (0) to go to our simple timer routine */ |
998 | irq_set_handler(0, lguest_time_irq); | 996 | irq_set_handler(0, lguest_time_irq); |
999 | 997 | ||
1000 | clocksource_register(&lguest_clock); | 998 | clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); |
1001 | 999 | ||
1002 | /* We can't set cpumask in the initializer: damn C limitations! Set it | 1000 | /* We can't set cpumask in the initializer: damn C limitations! Set it |
1003 | * here and register our timer device. */ | 1001 | * here and register our timer device. */ |
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index daab21dae2d1..efbf2a0ecdea 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S | |||
@@ -67,7 +67,7 @@ ENTRY(memcpy) | |||
67 | jb .Lhandle_tail | 67 | jb .Lhandle_tail |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * We check whether memory false dependece could occur, | 70 | * We check whether memory false dependence could occur, |
71 | * then jump to corresponding copy mode. | 71 | * then jump to corresponding copy mode. |
72 | */ | 72 | */ |
73 | cmp %dil, %sil | 73 | cmp %dil, %sil |
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 3e608edf9958..3d11327c9ab4 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -23,8 +23,8 @@ mmiotrace-y := kmmio.o pf_in.o mmio-mod.o | |||
23 | obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o | 23 | obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o |
24 | 24 | ||
25 | obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o | 25 | obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o |
26 | obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o | 26 | obj-$(CONFIG_AMD_NUMA) += amdtopology.o |
27 | obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o | 27 | obj-$(CONFIG_ACPI_NUMA) += srat.o |
28 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o | 28 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o |
29 | 29 | ||
30 | obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o | 30 | obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o |
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology.c index 0919c26820d4..5247d01329ca 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
14 | #include <linux/memblock.h> | 14 | #include <linux/memblock.h> |
15 | #include <linux/bootmem.h> | ||
15 | 16 | ||
16 | #include <asm/io.h> | 17 | #include <asm/io.h> |
17 | #include <linux/pci_ids.h> | 18 | #include <linux/pci_ids.h> |
@@ -69,10 +70,10 @@ static __init void early_get_boot_cpu_id(void) | |||
69 | 70 | ||
70 | int __init amd_numa_init(void) | 71 | int __init amd_numa_init(void) |
71 | { | 72 | { |
72 | unsigned long start = PFN_PHYS(0); | 73 | u64 start = PFN_PHYS(0); |
73 | unsigned long end = PFN_PHYS(max_pfn); | 74 | u64 end = PFN_PHYS(max_pfn); |
74 | unsigned numnodes; | 75 | unsigned numnodes; |
75 | unsigned long prevbase; | 76 | u64 prevbase; |
76 | int i, j, nb; | 77 | int i, j, nb; |
77 | u32 nodeid, reg; | 78 | u32 nodeid, reg; |
78 | unsigned int bits, cores, apicid_base; | 79 | unsigned int bits, cores, apicid_base; |
@@ -95,7 +96,7 @@ int __init amd_numa_init(void) | |||
95 | 96 | ||
96 | prevbase = 0; | 97 | prevbase = 0; |
97 | for (i = 0; i < 8; i++) { | 98 | for (i = 0; i < 8; i++) { |
98 | unsigned long base, limit; | 99 | u64 base, limit; |
99 | 100 | ||
100 | base = read_pci_config(0, nb, 1, 0x40 + i*8); | 101 | base = read_pci_config(0, nb, 1, 0x40 + i*8); |
101 | limit = read_pci_config(0, nb, 1, 0x44 + i*8); | 102 | limit = read_pci_config(0, nb, 1, 0x44 + i*8); |
@@ -107,18 +108,18 @@ int __init amd_numa_init(void) | |||
107 | continue; | 108 | continue; |
108 | } | 109 | } |
109 | if (nodeid >= numnodes) { | 110 | if (nodeid >= numnodes) { |
110 | pr_info("Ignoring excess node %d (%lx:%lx)\n", nodeid, | 111 | pr_info("Ignoring excess node %d (%Lx:%Lx)\n", nodeid, |
111 | base, limit); | 112 | base, limit); |
112 | continue; | 113 | continue; |
113 | } | 114 | } |
114 | 115 | ||
115 | if (!limit) { | 116 | if (!limit) { |
116 | pr_info("Skipping node entry %d (base %lx)\n", | 117 | pr_info("Skipping node entry %d (base %Lx)\n", |
117 | i, base); | 118 | i, base); |
118 | continue; | 119 | continue; |
119 | } | 120 | } |
120 | if ((base >> 8) & 3 || (limit >> 8) & 3) { | 121 | if ((base >> 8) & 3 || (limit >> 8) & 3) { |
121 | pr_err("Node %d using interleaving mode %lx/%lx\n", | 122 | pr_err("Node %d using interleaving mode %Lx/%Lx\n", |
122 | nodeid, (base >> 8) & 3, (limit >> 8) & 3); | 123 | nodeid, (base >> 8) & 3, (limit >> 8) & 3); |
123 | return -EINVAL; | 124 | return -EINVAL; |
124 | } | 125 | } |
@@ -150,19 +151,19 @@ int __init amd_numa_init(void) | |||
150 | continue; | 151 | continue; |
151 | } | 152 | } |
152 | if (limit < base) { | 153 | if (limit < base) { |
153 | pr_err("Node %d bogus settings %lx-%lx.\n", | 154 | pr_err("Node %d bogus settings %Lx-%Lx.\n", |
154 | nodeid, base, limit); | 155 | nodeid, base, limit); |
155 | continue; | 156 | continue; |
156 | } | 157 | } |
157 | 158 | ||
158 | /* Could sort here, but pun for now. Should not happen anyroads. */ | 159 | /* Could sort here, but pun for now. Should not happen anyroads. */ |
159 | if (prevbase > base) { | 160 | if (prevbase > base) { |
160 | pr_err("Node map not sorted %lx,%lx\n", | 161 | pr_err("Node map not sorted %Lx,%Lx\n", |
161 | prevbase, base); | 162 | prevbase, base); |
162 | return -EINVAL; | 163 | return -EINVAL; |
163 | } | 164 | } |
164 | 165 | ||
165 | pr_info("Node %d MemBase %016lx Limit %016lx\n", | 166 | pr_info("Node %d MemBase %016Lx Limit %016Lx\n", |
166 | nodeid, base, limit); | 167 | nodeid, base, limit); |
167 | 168 | ||
168 | prevbase = base; | 169 | prevbase = base; |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 286d289b039b..37b8b0fe8320 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
81 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); | 81 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); |
82 | } | 82 | } |
83 | 83 | ||
84 | void __init native_pagetable_reserve(u64 start, u64 end) | ||
85 | { | ||
86 | memblock_x86_reserve_range(start, end, "PGTABLE"); | ||
87 | } | ||
88 | |||
84 | struct map_range { | 89 | struct map_range { |
85 | unsigned long start; | 90 | unsigned long start; |
86 | unsigned long end; | 91 | unsigned long end; |
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
272 | 277 | ||
273 | __flush_tlb_all(); | 278 | __flush_tlb_all(); |
274 | 279 | ||
280 | /* | ||
281 | * Reserve the kernel pagetable pages we used (pgt_buf_start - | ||
282 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) | ||
283 | * so that they can be reused for other purposes. | ||
284 | * | ||
285 | * On native it just means calling memblock_x86_reserve_range, on Xen it | ||
286 | * also means marking RW the pagetable pages that we allocated before | ||
287 | * but that haven't been used. | ||
288 | * | ||
289 | * In fact on xen we mark RO the whole range pgt_buf_start - | ||
290 | * pgt_buf_top, because we have to make sure that when | ||
291 | * init_memory_mapping reaches the pagetable pages area, it maps | ||
292 | * RO all the pagetable pages, including the ones that are beyond | ||
293 | * pgt_buf_end at that time. | ||
294 | */ | ||
275 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) | 295 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) |
276 | memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, | 296 | x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), |
277 | pgt_buf_end << PAGE_SHIFT, "PGTABLE"); | 297 | PFN_PHYS(pgt_buf_end)); |
278 | 298 | ||
279 | if (!after_bootmem) | 299 | if (!after_bootmem) |
280 | early_memtest(start, end); | 300 | early_memtest(start, end); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 80088f994193..29f7c6d98179 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -678,8 +678,10 @@ static void __init zone_sizes_init(void) | |||
678 | { | 678 | { |
679 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 679 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
680 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 680 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
681 | #ifdef CONFIG_ZONE_DMA | ||
681 | max_zone_pfns[ZONE_DMA] = | 682 | max_zone_pfns[ZONE_DMA] = |
682 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 683 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
684 | #endif | ||
683 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 685 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
684 | #ifdef CONFIG_HIGHMEM | 686 | #ifdef CONFIG_HIGHMEM |
685 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | 687 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; |
@@ -716,6 +718,7 @@ void __init paging_init(void) | |||
716 | * NOTE: at this point the bootmem allocator is fully available. | 718 | * NOTE: at this point the bootmem allocator is fully available. |
717 | */ | 719 | */ |
718 | olpc_dt_build_devicetree(); | 720 | olpc_dt_build_devicetree(); |
721 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | ||
719 | sparse_init(); | 722 | sparse_init(); |
720 | zone_sizes_init(); | 723 | zone_sizes_init(); |
721 | } | 724 | } |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 794233587287..d865c4aeec55 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -616,7 +616,9 @@ void __init paging_init(void) | |||
616 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 616 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
617 | 617 | ||
618 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 618 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
619 | #ifdef CONFIG_ZONE_DMA | ||
619 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | 620 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; |
621 | #endif | ||
620 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | 622 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; |
621 | max_zone_pfns[ZONE_NORMAL] = max_pfn; | 623 | max_zone_pfns[ZONE_NORMAL] = max_pfn; |
622 | 624 | ||
@@ -679,14 +681,6 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
679 | } | 681 | } |
680 | EXPORT_SYMBOL_GPL(arch_add_memory); | 682 | EXPORT_SYMBOL_GPL(arch_add_memory); |
681 | 683 | ||
682 | #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) | ||
683 | int memory_add_physaddr_to_nid(u64 start) | ||
684 | { | ||
685 | return 0; | ||
686 | } | ||
687 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | ||
688 | #endif | ||
689 | |||
690 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 684 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
691 | 685 | ||
692 | static struct kcore_list kcore_vsyscall; | 686 | static struct kcore_list kcore_vsyscall; |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0369843511dc..be1ef574ce9a 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -91,13 +91,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
91 | return (__force void __iomem *)phys_to_virt(phys_addr); | 91 | return (__force void __iomem *)phys_to_virt(phys_addr); |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * Check if the request spans more than any BAR in the iomem resource | ||
95 | * tree. | ||
96 | */ | ||
97 | WARN_ONCE(iomem_map_sanity_check(phys_addr, size), | ||
98 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); | ||
99 | |||
100 | /* | ||
101 | * Don't allow anybody to remap normal RAM that we're using.. | 94 | * Don't allow anybody to remap normal RAM that we're using.. |
102 | */ | 95 | */ |
103 | last_pfn = last_addr >> PAGE_SHIFT; | 96 | last_pfn = last_addr >> PAGE_SHIFT; |
@@ -170,6 +163,13 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
170 | ret_addr = (void __iomem *) (vaddr + offset); | 163 | ret_addr = (void __iomem *) (vaddr + offset); |
171 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); | 164 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
172 | 165 | ||
166 | /* | ||
167 | * Check if the request spans more than any BAR in the iomem resource | ||
168 | * tree. | ||
169 | */ | ||
170 | WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), | ||
171 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); | ||
172 | |||
173 | return ret_addr; | 173 | return ret_addr; |
174 | err_free_area: | 174 | err_free_area: |
175 | free_vm_area(area); | 175 | free_vm_area(area); |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 745258dfc4dc..f5510d889a22 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -1,11 +1,39 @@ | |||
1 | /* Common code for 32 and 64-bit NUMA */ | 1 | /* Common code for 32 and 64-bit NUMA */ |
2 | #include <linux/topology.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/module.h> | 3 | #include <linux/mm.h> |
4 | #include <linux/string.h> | ||
5 | #include <linux/init.h> | ||
4 | #include <linux/bootmem.h> | 6 | #include <linux/bootmem.h> |
5 | #include <asm/numa.h> | 7 | #include <linux/memblock.h> |
8 | #include <linux/mmzone.h> | ||
9 | #include <linux/ctype.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/nodemask.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/topology.h> | ||
14 | |||
15 | #include <asm/e820.h> | ||
16 | #include <asm/proto.h> | ||
17 | #include <asm/dma.h> | ||
6 | #include <asm/acpi.h> | 18 | #include <asm/acpi.h> |
19 | #include <asm/amd_nb.h> | ||
20 | |||
21 | #include "numa_internal.h" | ||
7 | 22 | ||
8 | int __initdata numa_off; | 23 | int __initdata numa_off; |
24 | nodemask_t numa_nodes_parsed __initdata; | ||
25 | |||
26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | ||
27 | EXPORT_SYMBOL(node_data); | ||
28 | |||
29 | static struct numa_meminfo numa_meminfo | ||
30 | #ifndef CONFIG_MEMORY_HOTPLUG | ||
31 | __initdata | ||
32 | #endif | ||
33 | ; | ||
34 | |||
35 | static int numa_distance_cnt; | ||
36 | static u8 *numa_distance; | ||
9 | 37 | ||
10 | static __init int numa_setup(char *opt) | 38 | static __init int numa_setup(char *opt) |
11 | { | 39 | { |
@@ -32,6 +60,15 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | |||
32 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | 60 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
33 | }; | 61 | }; |
34 | 62 | ||
63 | int __cpuinit numa_cpu_node(int cpu) | ||
64 | { | ||
65 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | ||
66 | |||
67 | if (apicid != BAD_APICID) | ||
68 | return __apicid_to_node[apicid]; | ||
69 | return NUMA_NO_NODE; | ||
70 | } | ||
71 | |||
35 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | 72 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
36 | EXPORT_SYMBOL(node_to_cpumask_map); | 73 | EXPORT_SYMBOL(node_to_cpumask_map); |
37 | 74 | ||
@@ -95,6 +132,407 @@ void __init setup_node_to_cpumask_map(void) | |||
95 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | 132 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); |
96 | } | 133 | } |
97 | 134 | ||
135 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | ||
136 | struct numa_meminfo *mi) | ||
137 | { | ||
138 | /* ignore zero length blks */ | ||
139 | if (start == end) | ||
140 | return 0; | ||
141 | |||
142 | /* whine about and ignore invalid blks */ | ||
143 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | ||
144 | pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", | ||
145 | nid, start, end); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | ||
150 | pr_err("NUMA: too many memblk ranges\n"); | ||
151 | return -EINVAL; | ||
152 | } | ||
153 | |||
154 | mi->blk[mi->nr_blks].start = start; | ||
155 | mi->blk[mi->nr_blks].end = end; | ||
156 | mi->blk[mi->nr_blks].nid = nid; | ||
157 | mi->nr_blks++; | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | ||
163 | * @idx: Index of memblk to remove | ||
164 | * @mi: numa_meminfo to remove memblk from | ||
165 | * | ||
166 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | ||
167 | * decrementing @mi->nr_blks. | ||
168 | */ | ||
169 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | ||
170 | { | ||
171 | mi->nr_blks--; | ||
172 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | ||
173 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | ||
178 | * @nid: NUMA node ID of the new memblk | ||
179 | * @start: Start address of the new memblk | ||
180 | * @end: End address of the new memblk | ||
181 | * | ||
182 | * Add a new memblk to the default numa_meminfo. | ||
183 | * | ||
184 | * RETURNS: | ||
185 | * 0 on success, -errno on failure. | ||
186 | */ | ||
187 | int __init numa_add_memblk(int nid, u64 start, u64 end) | ||
188 | { | ||
189 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | ||
190 | } | ||
191 | |||
192 | /* Initialize NODE_DATA for a node on the local memory */ | ||
193 | static void __init setup_node_data(int nid, u64 start, u64 end) | ||
194 | { | ||
195 | const u64 nd_low = PFN_PHYS(MAX_DMA_PFN); | ||
196 | const u64 nd_high = PFN_PHYS(max_pfn_mapped); | ||
197 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | ||
198 | bool remapped = false; | ||
199 | u64 nd_pa; | ||
200 | void *nd; | ||
201 | int tnid; | ||
202 | |||
203 | /* | ||
204 | * Don't confuse VM with a node that doesn't have the | ||
205 | * minimum amount of memory: | ||
206 | */ | ||
207 | if (end && (end - start) < NODE_MIN_SIZE) | ||
208 | return; | ||
209 | |||
210 | /* initialize remap allocator before aligning to ZONE_ALIGN */ | ||
211 | init_alloc_remap(nid, start, end); | ||
212 | |||
213 | start = roundup(start, ZONE_ALIGN); | ||
214 | |||
215 | printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", | ||
216 | nid, start, end); | ||
217 | |||
218 | /* | ||
219 | * Allocate node data. Try remap allocator first, node-local | ||
220 | * memory and then any node. Never allocate in DMA zone. | ||
221 | */ | ||
222 | nd = alloc_remap(nid, nd_size); | ||
223 | if (nd) { | ||
224 | nd_pa = __pa(nd); | ||
225 | remapped = true; | ||
226 | } else { | ||
227 | nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, | ||
228 | nd_size, SMP_CACHE_BYTES); | ||
229 | if (nd_pa == MEMBLOCK_ERROR) | ||
230 | nd_pa = memblock_find_in_range(nd_low, nd_high, | ||
231 | nd_size, SMP_CACHE_BYTES); | ||
232 | if (nd_pa == MEMBLOCK_ERROR) { | ||
233 | pr_err("Cannot find %zu bytes in node %d\n", | ||
234 | nd_size, nid); | ||
235 | return; | ||
236 | } | ||
237 | memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); | ||
238 | nd = __va(nd_pa); | ||
239 | } | ||
240 | |||
241 | /* report and initialize */ | ||
242 | printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n", | ||
243 | nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); | ||
244 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); | ||
245 | if (!remapped && tnid != nid) | ||
246 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); | ||
247 | |||
248 | node_data[nid] = nd; | ||
249 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); | ||
250 | NODE_DATA(nid)->node_id = nid; | ||
251 | NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT; | ||
252 | NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT; | ||
253 | |||
254 | node_set_online(nid); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | ||
259 | * @mi: numa_meminfo to clean up | ||
260 | * | ||
261 | * Sanitize @mi by merging and removing unncessary memblks. Also check for | ||
262 | * conflicts and clear unused memblks. | ||
263 | * | ||
264 | * RETURNS: | ||
265 | * 0 on success, -errno on failure. | ||
266 | */ | ||
267 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | ||
268 | { | ||
269 | const u64 low = 0; | ||
270 | const u64 high = PFN_PHYS(max_pfn); | ||
271 | int i, j, k; | ||
272 | |||
273 | /* first, trim all entries */ | ||
274 | for (i = 0; i < mi->nr_blks; i++) { | ||
275 | struct numa_memblk *bi = &mi->blk[i]; | ||
276 | |||
277 | /* make sure all blocks are inside the limits */ | ||
278 | bi->start = max(bi->start, low); | ||
279 | bi->end = min(bi->end, high); | ||
280 | |||
281 | /* and there's no empty block */ | ||
282 | if (bi->start >= bi->end) | ||
283 | numa_remove_memblk_from(i--, mi); | ||
284 | } | ||
285 | |||
286 | /* merge neighboring / overlapping entries */ | ||
287 | for (i = 0; i < mi->nr_blks; i++) { | ||
288 | struct numa_memblk *bi = &mi->blk[i]; | ||
289 | |||
290 | for (j = i + 1; j < mi->nr_blks; j++) { | ||
291 | struct numa_memblk *bj = &mi->blk[j]; | ||
292 | u64 start, end; | ||
293 | |||
294 | /* | ||
295 | * See whether there are overlapping blocks. Whine | ||
296 | * about but allow overlaps of the same nid. They | ||
297 | * will be merged below. | ||
298 | */ | ||
299 | if (bi->end > bj->start && bi->start < bj->end) { | ||
300 | if (bi->nid != bj->nid) { | ||
301 | pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", | ||
302 | bi->nid, bi->start, bi->end, | ||
303 | bj->nid, bj->start, bj->end); | ||
304 | return -EINVAL; | ||
305 | } | ||
306 | pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", | ||
307 | bi->nid, bi->start, bi->end, | ||
308 | bj->start, bj->end); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Join together blocks on the same node, holes | ||
313 | * between which don't overlap with memory on other | ||
314 | * nodes. | ||
315 | */ | ||
316 | if (bi->nid != bj->nid) | ||
317 | continue; | ||
318 | start = min(bi->start, bj->start); | ||
319 | end = max(bi->end, bj->end); | ||
320 | for (k = 0; k < mi->nr_blks; k++) { | ||
321 | struct numa_memblk *bk = &mi->blk[k]; | ||
322 | |||
323 | if (bi->nid == bk->nid) | ||
324 | continue; | ||
325 | if (start < bk->end && end > bk->start) | ||
326 | break; | ||
327 | } | ||
328 | if (k < mi->nr_blks) | ||
329 | continue; | ||
330 | printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n", | ||
331 | bi->nid, bi->start, bi->end, bj->start, bj->end, | ||
332 | start, end); | ||
333 | bi->start = start; | ||
334 | bi->end = end; | ||
335 | numa_remove_memblk_from(j--, mi); | ||
336 | } | ||
337 | } | ||
338 | |||
339 | /* clear unused ones */ | ||
340 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { | ||
341 | mi->blk[i].start = mi->blk[i].end = 0; | ||
342 | mi->blk[i].nid = NUMA_NO_NODE; | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * Set nodes, which have memory in @mi, in *@nodemask. | ||
350 | */ | ||
351 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | ||
352 | const struct numa_meminfo *mi) | ||
353 | { | ||
354 | int i; | ||
355 | |||
356 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | ||
357 | if (mi->blk[i].start != mi->blk[i].end && | ||
358 | mi->blk[i].nid != NUMA_NO_NODE) | ||
359 | node_set(mi->blk[i].nid, *nodemask); | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * numa_reset_distance - Reset NUMA distance table | ||
364 | * | ||
365 | * The current table is freed. The next numa_set_distance() call will | ||
366 | * create a new one. | ||
367 | */ | ||
368 | void __init numa_reset_distance(void) | ||
369 | { | ||
370 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | ||
371 | |||
372 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | ||
373 | if (numa_distance_cnt) | ||
374 | memblock_x86_free_range(__pa(numa_distance), | ||
375 | __pa(numa_distance) + size); | ||
376 | numa_distance_cnt = 0; | ||
377 | numa_distance = NULL; /* enable table creation */ | ||
378 | } | ||
379 | |||
380 | static int __init numa_alloc_distance(void) | ||
381 | { | ||
382 | nodemask_t nodes_parsed; | ||
383 | size_t size; | ||
384 | int i, j, cnt = 0; | ||
385 | u64 phys; | ||
386 | |||
387 | /* size the new table and allocate it */ | ||
388 | nodes_parsed = numa_nodes_parsed; | ||
389 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | ||
390 | |||
391 | for_each_node_mask(i, nodes_parsed) | ||
392 | cnt = i; | ||
393 | cnt++; | ||
394 | size = cnt * cnt * sizeof(numa_distance[0]); | ||
395 | |||
396 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), | ||
397 | size, PAGE_SIZE); | ||
398 | if (phys == MEMBLOCK_ERROR) { | ||
399 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | ||
400 | /* don't retry until explicitly reset */ | ||
401 | numa_distance = (void *)1LU; | ||
402 | return -ENOMEM; | ||
403 | } | ||
404 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | ||
405 | |||
406 | numa_distance = __va(phys); | ||
407 | numa_distance_cnt = cnt; | ||
408 | |||
409 | /* fill with the default distances */ | ||
410 | for (i = 0; i < cnt; i++) | ||
411 | for (j = 0; j < cnt; j++) | ||
412 | numa_distance[i * cnt + j] = i == j ? | ||
413 | LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
414 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | /** | ||
420 | * numa_set_distance - Set NUMA distance from one NUMA to another | ||
421 | * @from: the 'from' node to set distance | ||
422 | * @to: the 'to' node to set distance | ||
423 | * @distance: NUMA distance | ||
424 | * | ||
425 | * Set the distance from node @from to @to to @distance. If distance table | ||
426 | * doesn't exist, one which is large enough to accommodate all the currently | ||
427 | * known nodes will be created. | ||
428 | * | ||
429 | * If such table cannot be allocated, a warning is printed and further | ||
430 | * calls are ignored until the distance table is reset with | ||
431 | * numa_reset_distance(). | ||
432 | * | ||
433 | * If @from or @to is higher than the highest known node at the time of | ||
434 | * table creation or @distance doesn't make sense, the call is ignored. | ||
435 | * This is to allow simplification of specific NUMA config implementations. | ||
436 | */ | ||
437 | void __init numa_set_distance(int from, int to, int distance) | ||
438 | { | ||
439 | if (!numa_distance && numa_alloc_distance() < 0) | ||
440 | return; | ||
441 | |||
442 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) { | ||
443 | printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", | ||
444 | from, to, distance); | ||
445 | return; | ||
446 | } | ||
447 | |||
448 | if ((u8)distance != distance || | ||
449 | (from == to && distance != LOCAL_DISTANCE)) { | ||
450 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | ||
451 | from, to, distance); | ||
452 | return; | ||
453 | } | ||
454 | |||
455 | numa_distance[from * numa_distance_cnt + to] = distance; | ||
456 | } | ||
457 | |||
458 | int __node_distance(int from, int to) | ||
459 | { | ||
460 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | ||
461 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
462 | return numa_distance[from * numa_distance_cnt + to]; | ||
463 | } | ||
464 | EXPORT_SYMBOL(__node_distance); | ||
465 | |||
466 | /* | ||
467 | * Sanity check to catch more bad NUMA configurations (they are amazingly | ||
468 | * common). Make sure the nodes cover all memory. | ||
469 | */ | ||
470 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | ||
471 | { | ||
472 | u64 numaram, e820ram; | ||
473 | int i; | ||
474 | |||
475 | numaram = 0; | ||
476 | for (i = 0; i < mi->nr_blks; i++) { | ||
477 | u64 s = mi->blk[i].start >> PAGE_SHIFT; | ||
478 | u64 e = mi->blk[i].end >> PAGE_SHIFT; | ||
479 | numaram += e - s; | ||
480 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | ||
481 | if ((s64)numaram < 0) | ||
482 | numaram = 0; | ||
483 | } | ||
484 | |||
485 | e820ram = max_pfn - (memblock_x86_hole_size(0, | ||
486 | PFN_PHYS(max_pfn)) >> PAGE_SHIFT); | ||
487 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | ||
488 | if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | ||
489 | printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", | ||
490 | (numaram << PAGE_SHIFT) >> 20, | ||
491 | (e820ram << PAGE_SHIFT) >> 20); | ||
492 | return false; | ||
493 | } | ||
494 | return true; | ||
495 | } | ||
496 | |||
497 | static int __init numa_register_memblks(struct numa_meminfo *mi) | ||
498 | { | ||
499 | int i, nid; | ||
500 | |||
501 | /* Account for nodes with cpus and no memory */ | ||
502 | node_possible_map = numa_nodes_parsed; | ||
503 | numa_nodemask_from_meminfo(&node_possible_map, mi); | ||
504 | if (WARN_ON(nodes_empty(node_possible_map))) | ||
505 | return -EINVAL; | ||
506 | |||
507 | for (i = 0; i < mi->nr_blks; i++) | ||
508 | memblock_x86_register_active_regions(mi->blk[i].nid, | ||
509 | mi->blk[i].start >> PAGE_SHIFT, | ||
510 | mi->blk[i].end >> PAGE_SHIFT); | ||
511 | |||
512 | /* for out of order entries */ | ||
513 | sort_node_map(); | ||
514 | if (!numa_meminfo_cover_memory(mi)) | ||
515 | return -EINVAL; | ||
516 | |||
517 | /* Finally register nodes. */ | ||
518 | for_each_node_mask(nid, node_possible_map) { | ||
519 | u64 start = PFN_PHYS(max_pfn); | ||
520 | u64 end = 0; | ||
521 | |||
522 | for (i = 0; i < mi->nr_blks; i++) { | ||
523 | if (nid != mi->blk[i].nid) | ||
524 | continue; | ||
525 | start = min(mi->blk[i].start, start); | ||
526 | end = max(mi->blk[i].end, end); | ||
527 | } | ||
528 | |||
529 | if (start < end) | ||
530 | setup_node_data(nid, start, end); | ||
531 | } | ||
532 | |||
533 | return 0; | ||
534 | } | ||
535 | |||
98 | /* | 536 | /* |
99 | * There are unfortunately some poorly designed mainboards around that | 537 | * There are unfortunately some poorly designed mainboards around that |
100 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | 538 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node |
@@ -102,7 +540,7 @@ void __init setup_node_to_cpumask_map(void) | |||
102 | * as the number of CPUs is not known yet. We round robin the existing | 540 | * as the number of CPUs is not known yet. We round robin the existing |
103 | * nodes. | 541 | * nodes. |
104 | */ | 542 | */ |
105 | void __init numa_init_array(void) | 543 | static void __init numa_init_array(void) |
106 | { | 544 | { |
107 | int rr, i; | 545 | int rr, i; |
108 | 546 | ||
@@ -117,6 +555,95 @@ void __init numa_init_array(void) | |||
117 | } | 555 | } |
118 | } | 556 | } |
119 | 557 | ||
558 | static int __init numa_init(int (*init_func)(void)) | ||
559 | { | ||
560 | int i; | ||
561 | int ret; | ||
562 | |||
563 | for (i = 0; i < MAX_LOCAL_APIC; i++) | ||
564 | set_apicid_to_node(i, NUMA_NO_NODE); | ||
565 | |||
566 | nodes_clear(numa_nodes_parsed); | ||
567 | nodes_clear(node_possible_map); | ||
568 | nodes_clear(node_online_map); | ||
569 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | ||
570 | remove_all_active_ranges(); | ||
571 | numa_reset_distance(); | ||
572 | |||
573 | ret = init_func(); | ||
574 | if (ret < 0) | ||
575 | return ret; | ||
576 | ret = numa_cleanup_meminfo(&numa_meminfo); | ||
577 | if (ret < 0) | ||
578 | return ret; | ||
579 | |||
580 | numa_emulation(&numa_meminfo, numa_distance_cnt); | ||
581 | |||
582 | ret = numa_register_memblks(&numa_meminfo); | ||
583 | if (ret < 0) | ||
584 | return ret; | ||
585 | |||
586 | for (i = 0; i < nr_cpu_ids; i++) { | ||
587 | int nid = early_cpu_to_node(i); | ||
588 | |||
589 | if (nid == NUMA_NO_NODE) | ||
590 | continue; | ||
591 | if (!node_online(nid)) | ||
592 | numa_clear_node(i); | ||
593 | } | ||
594 | numa_init_array(); | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /** | ||
599 | * dummy_numa_init - Fallback dummy NUMA init | ||
600 | * | ||
601 | * Used if there's no underlying NUMA architecture, NUMA initialization | ||
602 | * fails, or NUMA is disabled on the command line. | ||
603 | * | ||
604 | * Must online at least one node and add memory blocks that cover all | ||
605 | * allowed memory. This function must not fail. | ||
606 | */ | ||
607 | static int __init dummy_numa_init(void) | ||
608 | { | ||
609 | printk(KERN_INFO "%s\n", | ||
610 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | ||
611 | printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n", | ||
612 | 0LLU, PFN_PHYS(max_pfn)); | ||
613 | |||
614 | node_set(0, numa_nodes_parsed); | ||
615 | numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); | ||
616 | |||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | /** | ||
621 | * x86_numa_init - Initialize NUMA | ||
622 | * | ||
623 | * Try each configured NUMA initialization method until one succeeds. The | ||
624 | * last fallback is dummy single node config encomapssing whole memory and | ||
625 | * never fails. | ||
626 | */ | ||
627 | void __init x86_numa_init(void) | ||
628 | { | ||
629 | if (!numa_off) { | ||
630 | #ifdef CONFIG_X86_NUMAQ | ||
631 | if (!numa_init(numaq_numa_init)) | ||
632 | return; | ||
633 | #endif | ||
634 | #ifdef CONFIG_ACPI_NUMA | ||
635 | if (!numa_init(x86_acpi_numa_init)) | ||
636 | return; | ||
637 | #endif | ||
638 | #ifdef CONFIG_AMD_NUMA | ||
639 | if (!numa_init(amd_numa_init)) | ||
640 | return; | ||
641 | #endif | ||
642 | } | ||
643 | |||
644 | numa_init(dummy_numa_init); | ||
645 | } | ||
646 | |||
120 | static __init int find_near_online_node(int node) | 647 | static __init int find_near_online_node(int node) |
121 | { | 648 | { |
122 | int n, val; | 649 | int n, val; |
@@ -282,3 +809,18 @@ const struct cpumask *cpumask_of_node(int node) | |||
282 | EXPORT_SYMBOL(cpumask_of_node); | 809 | EXPORT_SYMBOL(cpumask_of_node); |
283 | 810 | ||
284 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 811 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
812 | |||
813 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
814 | int memory_add_physaddr_to_nid(u64 start) | ||
815 | { | ||
816 | struct numa_meminfo *mi = &numa_meminfo; | ||
817 | int nid = mi->blk[0].nid; | ||
818 | int i; | ||
819 | |||
820 | for (i = 0; i < mi->nr_blks; i++) | ||
821 | if (mi->blk[i].start <= start && mi->blk[i].end > start) | ||
822 | nid = mi->blk[i].nid; | ||
823 | return nid; | ||
824 | } | ||
825 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | ||
826 | #endif | ||
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index bde3906420df..849a975d3fa0 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -22,39 +22,11 @@ | |||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
27 | #include <linux/memblock.h> | 26 | #include <linux/memblock.h> |
28 | #include <linux/mmzone.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/initrd.h> | ||
31 | #include <linux/nodemask.h> | ||
32 | #include <linux/module.h> | 27 | #include <linux/module.h> |
33 | #include <linux/kexec.h> | ||
34 | #include <linux/pfn.h> | ||
35 | #include <linux/swap.h> | ||
36 | #include <linux/acpi.h> | ||
37 | |||
38 | #include <asm/e820.h> | ||
39 | #include <asm/setup.h> | ||
40 | #include <asm/mmzone.h> | ||
41 | #include <asm/bios_ebda.h> | ||
42 | #include <asm/proto.h> | ||
43 | |||
44 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | ||
45 | EXPORT_SYMBOL(node_data); | ||
46 | |||
47 | /* | ||
48 | * numa interface - we expect the numa architecture specific code to have | ||
49 | * populated the following initialisation. | ||
50 | * | ||
51 | * 1) node_online_map - the map of all nodes configured (online) in the system | ||
52 | * 2) node_start_pfn - the starting page frame number for a node | ||
53 | * 3) node_end_pfn - the ending page fram number for a node | ||
54 | */ | ||
55 | unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly; | ||
56 | unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly; | ||
57 | 28 | ||
29 | #include "numa_internal.h" | ||
58 | 30 | ||
59 | #ifdef CONFIG_DISCONTIGMEM | 31 | #ifdef CONFIG_DISCONTIGMEM |
60 | /* | 32 | /* |
@@ -99,108 +71,46 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, | |||
99 | } | 71 | } |
100 | #endif | 72 | #endif |
101 | 73 | ||
102 | extern unsigned long find_max_low_pfn(void); | ||
103 | extern unsigned long highend_pfn, highstart_pfn; | 74 | extern unsigned long highend_pfn, highstart_pfn; |
104 | 75 | ||
105 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) | 76 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) |
106 | 77 | ||
107 | unsigned long node_remap_size[MAX_NUMNODES]; | ||
108 | static void *node_remap_start_vaddr[MAX_NUMNODES]; | 78 | static void *node_remap_start_vaddr[MAX_NUMNODES]; |
109 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | 79 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
110 | 80 | ||
111 | static unsigned long kva_start_pfn; | ||
112 | static unsigned long kva_pages; | ||
113 | |||
114 | int __cpuinit numa_cpu_node(int cpu) | ||
115 | { | ||
116 | return apic->x86_32_numa_cpu_node(cpu); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * FLAT - support for basic PC memory model with discontig enabled, essentially | ||
121 | * a single node with all available processors in it with a flat | ||
122 | * memory map. | ||
123 | */ | ||
124 | int __init get_memcfg_numa_flat(void) | ||
125 | { | ||
126 | printk(KERN_DEBUG "NUMA - single node, flat memory mode\n"); | ||
127 | |||
128 | node_start_pfn[0] = 0; | ||
129 | node_end_pfn[0] = max_pfn; | ||
130 | memblock_x86_register_active_regions(0, 0, max_pfn); | ||
131 | memory_present(0, 0, max_pfn); | ||
132 | node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); | ||
133 | |||
134 | /* Indicate there is one node available. */ | ||
135 | nodes_clear(node_online_map); | ||
136 | node_set_online(0); | ||
137 | return 1; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Find the highest page frame number we have available for the node | ||
142 | */ | ||
143 | static void __init propagate_e820_map_node(int nid) | ||
144 | { | ||
145 | if (node_end_pfn[nid] > max_pfn) | ||
146 | node_end_pfn[nid] = max_pfn; | ||
147 | /* | ||
148 | * if a user has given mem=XXXX, then we need to make sure | ||
149 | * that the node _starts_ before that, too, not just ends | ||
150 | */ | ||
151 | if (node_start_pfn[nid] > max_pfn) | ||
152 | node_start_pfn[nid] = max_pfn; | ||
153 | BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Allocate memory for the pg_data_t for this node via a crude pre-bootmem | ||
158 | * method. For node zero take this from the bottom of memory, for | ||
159 | * subsequent nodes place them at node_remap_start_vaddr which contains | ||
160 | * node local data in physically node local memory. See setup_memory() | ||
161 | * for details. | ||
162 | */ | ||
163 | static void __init allocate_pgdat(int nid) | ||
164 | { | ||
165 | char buf[16]; | ||
166 | |||
167 | if (node_has_online_mem(nid) && node_remap_start_vaddr[nid]) | ||
168 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; | ||
169 | else { | ||
170 | unsigned long pgdat_phys; | ||
171 | pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT, | ||
172 | max_pfn_mapped<<PAGE_SHIFT, | ||
173 | sizeof(pg_data_t), | ||
174 | PAGE_SIZE); | ||
175 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT)); | ||
176 | memset(buf, 0, sizeof(buf)); | ||
177 | sprintf(buf, "NODE_DATA %d", nid); | ||
178 | memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); | ||
179 | } | ||
180 | printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", | ||
181 | nid, (unsigned long)NODE_DATA(nid)); | ||
182 | } | ||
183 | |||
184 | /* | 81 | /* |
185 | * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel | 82 | * Remap memory allocator |
186 | * virtual address space (KVA) is reserved and portions of nodes are mapped | ||
187 | * using it. This is to allow node-local memory to be allocated for | ||
188 | * structures that would normally require ZONE_NORMAL. The memory is | ||
189 | * allocated with alloc_remap() and callers should be prepared to allocate | ||
190 | * from the bootmem allocator instead. | ||
191 | */ | 83 | */ |
192 | static unsigned long node_remap_start_pfn[MAX_NUMNODES]; | 84 | static unsigned long node_remap_start_pfn[MAX_NUMNODES]; |
193 | static void *node_remap_end_vaddr[MAX_NUMNODES]; | 85 | static void *node_remap_end_vaddr[MAX_NUMNODES]; |
194 | static void *node_remap_alloc_vaddr[MAX_NUMNODES]; | 86 | static void *node_remap_alloc_vaddr[MAX_NUMNODES]; |
195 | static unsigned long node_remap_offset[MAX_NUMNODES]; | ||
196 | 87 | ||
88 | /** | ||
89 | * alloc_remap - Allocate remapped memory | ||
90 | * @nid: NUMA node to allocate memory from | ||
91 | * @size: The size of allocation | ||
92 | * | ||
93 | * Allocate @size bytes from the remap area of NUMA node @nid. The | ||
94 | * size of the remap area is predetermined by init_alloc_remap() and | ||
95 | * only the callers considered there should call this function. For | ||
96 | * more info, please read the comment on top of init_alloc_remap(). | ||
97 | * | ||
98 | * The caller must be ready to handle allocation failure from this | ||
99 | * function and fall back to regular memory allocator in such cases. | ||
100 | * | ||
101 | * CONTEXT: | ||
102 | * Single CPU early boot context. | ||
103 | * | ||
104 | * RETURNS: | ||
105 | * Pointer to the allocated memory on success, %NULL on failure. | ||
106 | */ | ||
197 | void *alloc_remap(int nid, unsigned long size) | 107 | void *alloc_remap(int nid, unsigned long size) |
198 | { | 108 | { |
199 | void *allocation = node_remap_alloc_vaddr[nid]; | 109 | void *allocation = node_remap_alloc_vaddr[nid]; |
200 | 110 | ||
201 | size = ALIGN(size, L1_CACHE_BYTES); | 111 | size = ALIGN(size, L1_CACHE_BYTES); |
202 | 112 | ||
203 | if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) | 113 | if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) |
204 | return NULL; | 114 | return NULL; |
205 | 115 | ||
206 | node_remap_alloc_vaddr[nid] += size; | 116 | node_remap_alloc_vaddr[nid] += size; |
@@ -209,26 +119,6 @@ void *alloc_remap(int nid, unsigned long size) | |||
209 | return allocation; | 119 | return allocation; |
210 | } | 120 | } |
211 | 121 | ||
212 | static void __init remap_numa_kva(void) | ||
213 | { | ||
214 | void *vaddr; | ||
215 | unsigned long pfn; | ||
216 | int node; | ||
217 | |||
218 | for_each_online_node(node) { | ||
219 | printk(KERN_DEBUG "remap_numa_kva: node %d\n", node); | ||
220 | for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { | ||
221 | vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT); | ||
222 | printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n", | ||
223 | (unsigned long)vaddr, | ||
224 | node_remap_start_pfn[node] + pfn); | ||
225 | set_pmd_pfn((ulong) vaddr, | ||
226 | node_remap_start_pfn[node] + pfn, | ||
227 | PAGE_KERNEL_LARGE); | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | |||
232 | #ifdef CONFIG_HIBERNATION | 122 | #ifdef CONFIG_HIBERNATION |
233 | /** | 123 | /** |
234 | * resume_map_numa_kva - add KVA mapping to the temporary page tables created | 124 | * resume_map_numa_kva - add KVA mapping to the temporary page tables created |
@@ -240,15 +130,16 @@ void resume_map_numa_kva(pgd_t *pgd_base) | |||
240 | int node; | 130 | int node; |
241 | 131 | ||
242 | for_each_online_node(node) { | 132 | for_each_online_node(node) { |
243 | unsigned long start_va, start_pfn, size, pfn; | 133 | unsigned long start_va, start_pfn, nr_pages, pfn; |
244 | 134 | ||
245 | start_va = (unsigned long)node_remap_start_vaddr[node]; | 135 | start_va = (unsigned long)node_remap_start_vaddr[node]; |
246 | start_pfn = node_remap_start_pfn[node]; | 136 | start_pfn = node_remap_start_pfn[node]; |
247 | size = node_remap_size[node]; | 137 | nr_pages = (node_remap_end_vaddr[node] - |
138 | node_remap_start_vaddr[node]) >> PAGE_SHIFT; | ||
248 | 139 | ||
249 | printk(KERN_DEBUG "%s: node %d\n", __func__, node); | 140 | printk(KERN_DEBUG "%s: node %d\n", __func__, node); |
250 | 141 | ||
251 | for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { | 142 | for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { |
252 | unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); | 143 | unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); |
253 | pgd_t *pgd = pgd_base + pgd_index(vaddr); | 144 | pgd_t *pgd = pgd_base + pgd_index(vaddr); |
254 | pud_t *pud = pud_offset(pgd, vaddr); | 145 | pud_t *pud = pud_offset(pgd, vaddr); |
@@ -264,132 +155,89 @@ void resume_map_numa_kva(pgd_t *pgd_base) | |||
264 | } | 155 | } |
265 | #endif | 156 | #endif |
266 | 157 | ||
267 | static __init unsigned long calculate_numa_remap_pages(void) | 158 | /** |
159 | * init_alloc_remap - Initialize remap allocator for a NUMA node | ||
160 | * @nid: NUMA node to initizlie remap allocator for | ||
161 | * | ||
162 | * NUMA nodes may end up without any lowmem. As allocating pgdat and | ||
163 | * memmap on a different node with lowmem is inefficient, a special | ||
164 | * remap allocator is implemented which can be used by alloc_remap(). | ||
165 | * | ||
166 | * For each node, the amount of memory which will be necessary for | ||
167 | * pgdat and memmap is calculated and two memory areas of the size are | ||
168 | * allocated - one in the node and the other in lowmem; then, the area | ||
169 | * in the node is remapped to the lowmem area. | ||
170 | * | ||
171 | * As pgdat and memmap must be allocated in lowmem anyway, this | ||
172 | * doesn't waste lowmem address space; however, the actual lowmem | ||
173 | * which gets remapped over is wasted. The amount shouldn't be | ||
174 | * problematic on machines this feature will be used. | ||
175 | * | ||
176 | * Initialization failure isn't fatal. alloc_remap() is used | ||
177 | * opportunistically and the callers will fall back to other memory | ||
178 | * allocation mechanisms on failure. | ||
179 | */ | ||
180 | void __init init_alloc_remap(int nid, u64 start, u64 end) | ||
268 | { | 181 | { |
269 | int nid; | 182 | unsigned long start_pfn = start >> PAGE_SHIFT; |
270 | unsigned long size, reserve_pages = 0; | 183 | unsigned long end_pfn = end >> PAGE_SHIFT; |
271 | 184 | unsigned long size, pfn; | |
272 | for_each_online_node(nid) { | 185 | u64 node_pa, remap_pa; |
273 | u64 node_kva_target; | 186 | void *remap_va; |
274 | u64 node_kva_final; | ||
275 | |||
276 | /* | ||
277 | * The acpi/srat node info can show hot-add memroy zones | ||
278 | * where memory could be added but not currently present. | ||
279 | */ | ||
280 | printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", | ||
281 | nid, node_start_pfn[nid], node_end_pfn[nid]); | ||
282 | if (node_start_pfn[nid] > max_pfn) | ||
283 | continue; | ||
284 | if (!node_end_pfn[nid]) | ||
285 | continue; | ||
286 | if (node_end_pfn[nid] > max_pfn) | ||
287 | node_end_pfn[nid] = max_pfn; | ||
288 | |||
289 | /* ensure the remap includes space for the pgdat. */ | ||
290 | size = node_remap_size[nid] + sizeof(pg_data_t); | ||
291 | |||
292 | /* convert size to large (pmd size) pages, rounding up */ | ||
293 | size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; | ||
294 | /* now the roundup is correct, convert to PAGE_SIZE pages */ | ||
295 | size = size * PTRS_PER_PTE; | ||
296 | |||
297 | node_kva_target = round_down(node_end_pfn[nid] - size, | ||
298 | PTRS_PER_PTE); | ||
299 | node_kva_target <<= PAGE_SHIFT; | ||
300 | do { | ||
301 | node_kva_final = memblock_find_in_range(node_kva_target, | ||
302 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, | ||
303 | ((u64)size)<<PAGE_SHIFT, | ||
304 | LARGE_PAGE_BYTES); | ||
305 | node_kva_target -= LARGE_PAGE_BYTES; | ||
306 | } while (node_kva_final == MEMBLOCK_ERROR && | ||
307 | (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); | ||
308 | |||
309 | if (node_kva_final == MEMBLOCK_ERROR) | ||
310 | panic("Can not get kva ram\n"); | ||
311 | |||
312 | node_remap_size[nid] = size; | ||
313 | node_remap_offset[nid] = reserve_pages; | ||
314 | reserve_pages += size; | ||
315 | printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" | ||
316 | " node %d at %llx\n", | ||
317 | size, nid, node_kva_final>>PAGE_SHIFT); | ||
318 | |||
319 | /* | ||
320 | * prevent kva address below max_low_pfn want it on system | ||
321 | * with less memory later. | ||
322 | * layout will be: KVA address , KVA RAM | ||
323 | * | ||
324 | * we are supposed to only record the one less then max_low_pfn | ||
325 | * but we could have some hole in high memory, and it will only | ||
326 | * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide | ||
327 | * to use it as free. | ||
328 | * So memblock_x86_reserve_range here, hope we don't run out of that array | ||
329 | */ | ||
330 | memblock_x86_reserve_range(node_kva_final, | ||
331 | node_kva_final+(((u64)size)<<PAGE_SHIFT), | ||
332 | "KVA RAM"); | ||
333 | |||
334 | node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; | ||
335 | } | ||
336 | printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", | ||
337 | reserve_pages); | ||
338 | return reserve_pages; | ||
339 | } | ||
340 | 187 | ||
341 | static void init_remap_allocator(int nid) | 188 | /* |
342 | { | 189 | * The acpi/srat node info can show hot-add memroy zones where |
343 | node_remap_start_vaddr[nid] = pfn_to_kaddr( | 190 | * memory could be added but not currently present. |
344 | kva_start_pfn + node_remap_offset[nid]); | 191 | */ |
345 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + | 192 | printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", |
346 | (node_remap_size[nid] * PAGE_SIZE); | 193 | nid, start_pfn, end_pfn); |
347 | node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + | 194 | |
348 | ALIGN(sizeof(pg_data_t), PAGE_SIZE); | 195 | /* calculate the necessary space aligned to large page size */ |
349 | 196 | size = node_memmap_size_bytes(nid, start_pfn, end_pfn); | |
350 | printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid, | 197 | size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); |
351 | (ulong) node_remap_start_vaddr[nid], | 198 | size = ALIGN(size, LARGE_PAGE_BYTES); |
352 | (ulong) node_remap_end_vaddr[nid]); | 199 | |
200 | /* allocate node memory and the lowmem remap area */ | ||
201 | node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); | ||
202 | if (node_pa == MEMBLOCK_ERROR) { | ||
203 | pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", | ||
204 | size, nid); | ||
205 | return; | ||
206 | } | ||
207 | memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); | ||
208 | |||
209 | remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, | ||
210 | max_low_pfn << PAGE_SHIFT, | ||
211 | size, LARGE_PAGE_BYTES); | ||
212 | if (remap_pa == MEMBLOCK_ERROR) { | ||
213 | pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", | ||
214 | size, nid); | ||
215 | memblock_x86_free_range(node_pa, node_pa + size); | ||
216 | return; | ||
217 | } | ||
218 | memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); | ||
219 | remap_va = phys_to_virt(remap_pa); | ||
220 | |||
221 | /* perform actual remap */ | ||
222 | for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) | ||
223 | set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), | ||
224 | (node_pa >> PAGE_SHIFT) + pfn, | ||
225 | PAGE_KERNEL_LARGE); | ||
226 | |||
227 | /* initialize remap allocator parameters */ | ||
228 | node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; | ||
229 | node_remap_start_vaddr[nid] = remap_va; | ||
230 | node_remap_end_vaddr[nid] = remap_va + size; | ||
231 | node_remap_alloc_vaddr[nid] = remap_va; | ||
232 | |||
233 | printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", | ||
234 | nid, node_pa, node_pa + size, remap_va, remap_va + size); | ||
353 | } | 235 | } |
354 | 236 | ||
355 | void __init initmem_init(void) | 237 | void __init initmem_init(void) |
356 | { | 238 | { |
357 | int nid; | 239 | x86_numa_init(); |
358 | long kva_target_pfn; | ||
359 | |||
360 | /* | ||
361 | * When mapping a NUMA machine we allocate the node_mem_map arrays | ||
362 | * from node local memory. They are then mapped directly into KVA | ||
363 | * between zone normal and vmalloc space. Calculate the size of | ||
364 | * this space and use it to adjust the boundary between ZONE_NORMAL | ||
365 | * and ZONE_HIGHMEM. | ||
366 | */ | ||
367 | |||
368 | get_memcfg_numa(); | ||
369 | numa_init_array(); | ||
370 | |||
371 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); | ||
372 | 240 | ||
373 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | ||
374 | do { | ||
375 | kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT, | ||
376 | max_low_pfn<<PAGE_SHIFT, | ||
377 | kva_pages<<PAGE_SHIFT, | ||
378 | PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; | ||
379 | kva_target_pfn -= PTRS_PER_PTE; | ||
380 | } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); | ||
381 | |||
382 | if (kva_start_pfn == MEMBLOCK_ERROR) | ||
383 | panic("Can not get kva space\n"); | ||
384 | |||
385 | printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", | ||
386 | kva_start_pfn, max_low_pfn); | ||
387 | printk(KERN_INFO "max_pfn = %lx\n", max_pfn); | ||
388 | |||
389 | /* avoid clash with initrd */ | ||
390 | memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT, | ||
391 | (kva_start_pfn + kva_pages)<<PAGE_SHIFT, | ||
392 | "KVA PG"); | ||
393 | #ifdef CONFIG_HIGHMEM | 241 | #ifdef CONFIG_HIGHMEM |
394 | highstart_pfn = highend_pfn = max_pfn; | 242 | highstart_pfn = highend_pfn = max_pfn; |
395 | if (max_pfn > max_low_pfn) | 243 | if (max_pfn > max_low_pfn) |
@@ -409,51 +257,9 @@ void __init initmem_init(void) | |||
409 | 257 | ||
410 | printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", | 258 | printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", |
411 | (ulong) pfn_to_kaddr(max_low_pfn)); | 259 | (ulong) pfn_to_kaddr(max_low_pfn)); |
412 | for_each_online_node(nid) { | ||
413 | init_remap_allocator(nid); | ||
414 | |||
415 | allocate_pgdat(nid); | ||
416 | } | ||
417 | remap_numa_kva(); | ||
418 | 260 | ||
419 | printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", | 261 | printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", |
420 | (ulong) pfn_to_kaddr(highstart_pfn)); | 262 | (ulong) pfn_to_kaddr(highstart_pfn)); |
421 | for_each_online_node(nid) | ||
422 | propagate_e820_map_node(nid); | ||
423 | |||
424 | for_each_online_node(nid) { | ||
425 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | ||
426 | NODE_DATA(nid)->node_id = nid; | ||
427 | } | ||
428 | 263 | ||
429 | setup_bootmem_allocator(); | 264 | setup_bootmem_allocator(); |
430 | } | 265 | } |
431 | |||
432 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
433 | static int paddr_to_nid(u64 addr) | ||
434 | { | ||
435 | int nid; | ||
436 | unsigned long pfn = PFN_DOWN(addr); | ||
437 | |||
438 | for_each_node(nid) | ||
439 | if (node_start_pfn[nid] <= pfn && | ||
440 | pfn < node_end_pfn[nid]) | ||
441 | return nid; | ||
442 | |||
443 | return -1; | ||
444 | } | ||
445 | |||
446 | /* | ||
447 | * This function is used to ask node id BEFORE memmap and mem_section's | ||
448 | * initialization (pfn_to_nid() can't be used yet). | ||
449 | * If _PXM is not defined on ACPI's DSDT, node id must be found by this. | ||
450 | */ | ||
451 | int memory_add_physaddr_to_nid(u64 addr) | ||
452 | { | ||
453 | int nid = paddr_to_nid(addr); | ||
454 | return (nid >= 0) ? nid : 0; | ||
455 | } | ||
456 | |||
457 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | ||
458 | #endif | ||
459 | |||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 85b52fc03084..dd27f401f0a0 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -2,646 +2,13 @@ | |||
2 | * Generic VM initialization for x86-64 NUMA setups. | 2 | * Generic VM initialization for x86-64 NUMA setups. |
3 | * Copyright 2002,2003 Andi Kleen, SuSE Labs. | 3 | * Copyright 2002,2003 Andi Kleen, SuSE Labs. |
4 | */ | 4 | */ |
5 | #include <linux/kernel.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/bootmem.h> | 5 | #include <linux/bootmem.h> |
10 | #include <linux/memblock.h> | ||
11 | #include <linux/mmzone.h> | ||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/nodemask.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/acpi.h> | ||
17 | |||
18 | #include <asm/e820.h> | ||
19 | #include <asm/proto.h> | ||
20 | #include <asm/dma.h> | ||
21 | #include <asm/acpi.h> | ||
22 | #include <asm/amd_nb.h> | ||
23 | 6 | ||
24 | #include "numa_internal.h" | 7 | #include "numa_internal.h" |
25 | 8 | ||
26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | ||
27 | EXPORT_SYMBOL(node_data); | ||
28 | |||
29 | nodemask_t numa_nodes_parsed __initdata; | ||
30 | |||
31 | struct memnode memnode; | ||
32 | |||
33 | static unsigned long __initdata nodemap_addr; | ||
34 | static unsigned long __initdata nodemap_size; | ||
35 | |||
36 | static struct numa_meminfo numa_meminfo __initdata; | ||
37 | |||
38 | static int numa_distance_cnt; | ||
39 | static u8 *numa_distance; | ||
40 | |||
41 | /* | ||
42 | * Given a shift value, try to populate memnodemap[] | ||
43 | * Returns : | ||
44 | * 1 if OK | ||
45 | * 0 if memnodmap[] too small (of shift too small) | ||
46 | * -1 if node overlap or lost ram (shift too big) | ||
47 | */ | ||
48 | static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift) | ||
49 | { | ||
50 | unsigned long addr, end; | ||
51 | int i, res = -1; | ||
52 | |||
53 | memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); | ||
54 | for (i = 0; i < mi->nr_blks; i++) { | ||
55 | addr = mi->blk[i].start; | ||
56 | end = mi->blk[i].end; | ||
57 | if (addr >= end) | ||
58 | continue; | ||
59 | if ((end >> shift) >= memnodemapsize) | ||
60 | return 0; | ||
61 | do { | ||
62 | if (memnodemap[addr >> shift] != NUMA_NO_NODE) | ||
63 | return -1; | ||
64 | memnodemap[addr >> shift] = mi->blk[i].nid; | ||
65 | addr += (1UL << shift); | ||
66 | } while (addr < end); | ||
67 | res = 1; | ||
68 | } | ||
69 | return res; | ||
70 | } | ||
71 | |||
72 | static int __init allocate_cachealigned_memnodemap(void) | ||
73 | { | ||
74 | unsigned long addr; | ||
75 | |||
76 | memnodemap = memnode.embedded_map; | ||
77 | if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map)) | ||
78 | return 0; | ||
79 | |||
80 | addr = 0x8000; | ||
81 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | ||
82 | nodemap_addr = memblock_find_in_range(addr, get_max_mapped(), | ||
83 | nodemap_size, L1_CACHE_BYTES); | ||
84 | if (nodemap_addr == MEMBLOCK_ERROR) { | ||
85 | printk(KERN_ERR | ||
86 | "NUMA: Unable to allocate Memory to Node hash map\n"); | ||
87 | nodemap_addr = nodemap_size = 0; | ||
88 | return -1; | ||
89 | } | ||
90 | memnodemap = phys_to_virt(nodemap_addr); | ||
91 | memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); | ||
92 | |||
93 | printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", | ||
94 | nodemap_addr, nodemap_addr + nodemap_size); | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * The LSB of all start and end addresses in the node map is the value of the | ||
100 | * maximum possible shift. | ||
101 | */ | ||
102 | static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi) | ||
103 | { | ||
104 | int i, nodes_used = 0; | ||
105 | unsigned long start, end; | ||
106 | unsigned long bitfield = 0, memtop = 0; | ||
107 | |||
108 | for (i = 0; i < mi->nr_blks; i++) { | ||
109 | start = mi->blk[i].start; | ||
110 | end = mi->blk[i].end; | ||
111 | if (start >= end) | ||
112 | continue; | ||
113 | bitfield |= start; | ||
114 | nodes_used++; | ||
115 | if (end > memtop) | ||
116 | memtop = end; | ||
117 | } | ||
118 | if (nodes_used <= 1) | ||
119 | i = 63; | ||
120 | else | ||
121 | i = find_first_bit(&bitfield, sizeof(unsigned long)*8); | ||
122 | memnodemapsize = (memtop >> i)+1; | ||
123 | return i; | ||
124 | } | ||
125 | |||
126 | static int __init compute_hash_shift(const struct numa_meminfo *mi) | ||
127 | { | ||
128 | int shift; | ||
129 | |||
130 | shift = extract_lsb_from_nodes(mi); | ||
131 | if (allocate_cachealigned_memnodemap()) | ||
132 | return -1; | ||
133 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", | ||
134 | shift); | ||
135 | |||
136 | if (populate_memnodemap(mi, shift) != 1) { | ||
137 | printk(KERN_INFO "Your memory is not aligned you need to " | ||
138 | "rebuild your kernel with a bigger NODEMAPSIZE " | ||
139 | "shift=%d\n", shift); | ||
140 | return -1; | ||
141 | } | ||
142 | return shift; | ||
143 | } | ||
144 | |||
145 | int __meminit __early_pfn_to_nid(unsigned long pfn) | ||
146 | { | ||
147 | return phys_to_nid(pfn << PAGE_SHIFT); | ||
148 | } | ||
149 | |||
150 | static void * __init early_node_mem(int nodeid, unsigned long start, | ||
151 | unsigned long end, unsigned long size, | ||
152 | unsigned long align) | ||
153 | { | ||
154 | unsigned long mem; | ||
155 | |||
156 | /* | ||
157 | * put it on high as possible | ||
158 | * something will go with NODE_DATA | ||
159 | */ | ||
160 | if (start < (MAX_DMA_PFN<<PAGE_SHIFT)) | ||
161 | start = MAX_DMA_PFN<<PAGE_SHIFT; | ||
162 | if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) && | ||
163 | end > (MAX_DMA32_PFN<<PAGE_SHIFT)) | ||
164 | start = MAX_DMA32_PFN<<PAGE_SHIFT; | ||
165 | mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align); | ||
166 | if (mem != MEMBLOCK_ERROR) | ||
167 | return __va(mem); | ||
168 | |||
169 | /* extend the search scope */ | ||
170 | end = max_pfn_mapped << PAGE_SHIFT; | ||
171 | start = MAX_DMA_PFN << PAGE_SHIFT; | ||
172 | mem = memblock_find_in_range(start, end, size, align); | ||
173 | if (mem != MEMBLOCK_ERROR) | ||
174 | return __va(mem); | ||
175 | |||
176 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", | ||
177 | size, nodeid); | ||
178 | |||
179 | return NULL; | ||
180 | } | ||
181 | |||
182 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | ||
183 | struct numa_meminfo *mi) | ||
184 | { | ||
185 | /* ignore zero length blks */ | ||
186 | if (start == end) | ||
187 | return 0; | ||
188 | |||
189 | /* whine about and ignore invalid blks */ | ||
190 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | ||
191 | pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", | ||
192 | nid, start, end); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | ||
197 | pr_err("NUMA: too many memblk ranges\n"); | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | |||
201 | mi->blk[mi->nr_blks].start = start; | ||
202 | mi->blk[mi->nr_blks].end = end; | ||
203 | mi->blk[mi->nr_blks].nid = nid; | ||
204 | mi->nr_blks++; | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | ||
210 | * @idx: Index of memblk to remove | ||
211 | * @mi: numa_meminfo to remove memblk from | ||
212 | * | ||
213 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | ||
214 | * decrementing @mi->nr_blks. | ||
215 | */ | ||
216 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | ||
217 | { | ||
218 | mi->nr_blks--; | ||
219 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | ||
220 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | ||
225 | * @nid: NUMA node ID of the new memblk | ||
226 | * @start: Start address of the new memblk | ||
227 | * @end: End address of the new memblk | ||
228 | * | ||
229 | * Add a new memblk to the default numa_meminfo. | ||
230 | * | ||
231 | * RETURNS: | ||
232 | * 0 on success, -errno on failure. | ||
233 | */ | ||
234 | int __init numa_add_memblk(int nid, u64 start, u64 end) | ||
235 | { | ||
236 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | ||
237 | } | ||
238 | |||
239 | /* Initialize bootmem allocator for a node */ | ||
240 | void __init | ||
241 | setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) | ||
242 | { | ||
243 | unsigned long start_pfn, last_pfn, nodedata_phys; | ||
244 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | ||
245 | int nid; | ||
246 | |||
247 | if (!end) | ||
248 | return; | ||
249 | |||
250 | /* | ||
251 | * Don't confuse VM with a node that doesn't have the | ||
252 | * minimum amount of memory: | ||
253 | */ | ||
254 | if (end && (end - start) < NODE_MIN_SIZE) | ||
255 | return; | ||
256 | |||
257 | start = roundup(start, ZONE_ALIGN); | ||
258 | |||
259 | printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid, | ||
260 | start, end); | ||
261 | |||
262 | start_pfn = start >> PAGE_SHIFT; | ||
263 | last_pfn = end >> PAGE_SHIFT; | ||
264 | |||
265 | node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size, | ||
266 | SMP_CACHE_BYTES); | ||
267 | if (node_data[nodeid] == NULL) | ||
268 | return; | ||
269 | nodedata_phys = __pa(node_data[nodeid]); | ||
270 | memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); | ||
271 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, | ||
272 | nodedata_phys + pgdat_size - 1); | ||
273 | nid = phys_to_nid(nodedata_phys); | ||
274 | if (nid != nodeid) | ||
275 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid); | ||
276 | |||
277 | memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); | ||
278 | NODE_DATA(nodeid)->node_id = nodeid; | ||
279 | NODE_DATA(nodeid)->node_start_pfn = start_pfn; | ||
280 | NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; | ||
281 | |||
282 | node_set_online(nodeid); | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | ||
287 | * @mi: numa_meminfo to clean up | ||
288 | * | ||
289 | * Sanitize @mi by merging and removing unncessary memblks. Also check for | ||
290 | * conflicts and clear unused memblks. | ||
291 | * | ||
292 | * RETURNS: | ||
293 | * 0 on success, -errno on failure. | ||
294 | */ | ||
295 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | ||
296 | { | ||
297 | const u64 low = 0; | ||
298 | const u64 high = (u64)max_pfn << PAGE_SHIFT; | ||
299 | int i, j, k; | ||
300 | |||
301 | for (i = 0; i < mi->nr_blks; i++) { | ||
302 | struct numa_memblk *bi = &mi->blk[i]; | ||
303 | |||
304 | /* make sure all blocks are inside the limits */ | ||
305 | bi->start = max(bi->start, low); | ||
306 | bi->end = min(bi->end, high); | ||
307 | |||
308 | /* and there's no empty block */ | ||
309 | if (bi->start >= bi->end) { | ||
310 | numa_remove_memblk_from(i--, mi); | ||
311 | continue; | ||
312 | } | ||
313 | |||
314 | for (j = i + 1; j < mi->nr_blks; j++) { | ||
315 | struct numa_memblk *bj = &mi->blk[j]; | ||
316 | unsigned long start, end; | ||
317 | |||
318 | /* | ||
319 | * See whether there are overlapping blocks. Whine | ||
320 | * about but allow overlaps of the same nid. They | ||
321 | * will be merged below. | ||
322 | */ | ||
323 | if (bi->end > bj->start && bi->start < bj->end) { | ||
324 | if (bi->nid != bj->nid) { | ||
325 | pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", | ||
326 | bi->nid, bi->start, bi->end, | ||
327 | bj->nid, bj->start, bj->end); | ||
328 | return -EINVAL; | ||
329 | } | ||
330 | pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", | ||
331 | bi->nid, bi->start, bi->end, | ||
332 | bj->start, bj->end); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Join together blocks on the same node, holes | ||
337 | * between which don't overlap with memory on other | ||
338 | * nodes. | ||
339 | */ | ||
340 | if (bi->nid != bj->nid) | ||
341 | continue; | ||
342 | start = max(min(bi->start, bj->start), low); | ||
343 | end = min(max(bi->end, bj->end), high); | ||
344 | for (k = 0; k < mi->nr_blks; k++) { | ||
345 | struct numa_memblk *bk = &mi->blk[k]; | ||
346 | |||
347 | if (bi->nid == bk->nid) | ||
348 | continue; | ||
349 | if (start < bk->end && end > bk->start) | ||
350 | break; | ||
351 | } | ||
352 | if (k < mi->nr_blks) | ||
353 | continue; | ||
354 | printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", | ||
355 | bi->nid, bi->start, bi->end, bj->start, bj->end, | ||
356 | start, end); | ||
357 | bi->start = start; | ||
358 | bi->end = end; | ||
359 | numa_remove_memblk_from(j--, mi); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { | ||
364 | mi->blk[i].start = mi->blk[i].end = 0; | ||
365 | mi->blk[i].nid = NUMA_NO_NODE; | ||
366 | } | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * Set nodes, which have memory in @mi, in *@nodemask. | ||
373 | */ | ||
374 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | ||
375 | const struct numa_meminfo *mi) | ||
376 | { | ||
377 | int i; | ||
378 | |||
379 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | ||
380 | if (mi->blk[i].start != mi->blk[i].end && | ||
381 | mi->blk[i].nid != NUMA_NO_NODE) | ||
382 | node_set(mi->blk[i].nid, *nodemask); | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * numa_reset_distance - Reset NUMA distance table | ||
387 | * | ||
388 | * The current table is freed. The next numa_set_distance() call will | ||
389 | * create a new one. | ||
390 | */ | ||
391 | void __init numa_reset_distance(void) | ||
392 | { | ||
393 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | ||
394 | |||
395 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | ||
396 | if (numa_distance_cnt) | ||
397 | memblock_x86_free_range(__pa(numa_distance), | ||
398 | __pa(numa_distance) + size); | ||
399 | numa_distance_cnt = 0; | ||
400 | numa_distance = NULL; /* enable table creation */ | ||
401 | } | ||
402 | |||
403 | static int __init numa_alloc_distance(void) | ||
404 | { | ||
405 | nodemask_t nodes_parsed; | ||
406 | size_t size; | ||
407 | int i, j, cnt = 0; | ||
408 | u64 phys; | ||
409 | |||
410 | /* size the new table and allocate it */ | ||
411 | nodes_parsed = numa_nodes_parsed; | ||
412 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | ||
413 | |||
414 | for_each_node_mask(i, nodes_parsed) | ||
415 | cnt = i; | ||
416 | cnt++; | ||
417 | size = cnt * cnt * sizeof(numa_distance[0]); | ||
418 | |||
419 | phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, | ||
420 | size, PAGE_SIZE); | ||
421 | if (phys == MEMBLOCK_ERROR) { | ||
422 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | ||
423 | /* don't retry until explicitly reset */ | ||
424 | numa_distance = (void *)1LU; | ||
425 | return -ENOMEM; | ||
426 | } | ||
427 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | ||
428 | |||
429 | numa_distance = __va(phys); | ||
430 | numa_distance_cnt = cnt; | ||
431 | |||
432 | /* fill with the default distances */ | ||
433 | for (i = 0; i < cnt; i++) | ||
434 | for (j = 0; j < cnt; j++) | ||
435 | numa_distance[i * cnt + j] = i == j ? | ||
436 | LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
437 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | ||
438 | |||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | /** | ||
443 | * numa_set_distance - Set NUMA distance from one NUMA to another | ||
444 | * @from: the 'from' node to set distance | ||
445 | * @to: the 'to' node to set distance | ||
446 | * @distance: NUMA distance | ||
447 | * | ||
448 | * Set the distance from node @from to @to to @distance. If distance table | ||
449 | * doesn't exist, one which is large enough to accommodate all the currently | ||
450 | * known nodes will be created. | ||
451 | * | ||
452 | * If such table cannot be allocated, a warning is printed and further | ||
453 | * calls are ignored until the distance table is reset with | ||
454 | * numa_reset_distance(). | ||
455 | * | ||
456 | * If @from or @to is higher than the highest known node at the time of | ||
457 | * table creation or @distance doesn't make sense, the call is ignored. | ||
458 | * This is to allow simplification of specific NUMA config implementations. | ||
459 | */ | ||
460 | void __init numa_set_distance(int from, int to, int distance) | ||
461 | { | ||
462 | if (!numa_distance && numa_alloc_distance() < 0) | ||
463 | return; | ||
464 | |||
465 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) { | ||
466 | printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", | ||
467 | from, to, distance); | ||
468 | return; | ||
469 | } | ||
470 | |||
471 | if ((u8)distance != distance || | ||
472 | (from == to && distance != LOCAL_DISTANCE)) { | ||
473 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | ||
474 | from, to, distance); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | numa_distance[from * numa_distance_cnt + to] = distance; | ||
479 | } | ||
480 | |||
481 | int __node_distance(int from, int to) | ||
482 | { | ||
483 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | ||
484 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
485 | return numa_distance[from * numa_distance_cnt + to]; | ||
486 | } | ||
487 | EXPORT_SYMBOL(__node_distance); | ||
488 | |||
489 | /* | ||
490 | * Sanity check to catch more bad NUMA configurations (they are amazingly | ||
491 | * common). Make sure the nodes cover all memory. | ||
492 | */ | ||
493 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | ||
494 | { | ||
495 | unsigned long numaram, e820ram; | ||
496 | int i; | ||
497 | |||
498 | numaram = 0; | ||
499 | for (i = 0; i < mi->nr_blks; i++) { | ||
500 | unsigned long s = mi->blk[i].start >> PAGE_SHIFT; | ||
501 | unsigned long e = mi->blk[i].end >> PAGE_SHIFT; | ||
502 | numaram += e - s; | ||
503 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | ||
504 | if ((long)numaram < 0) | ||
505 | numaram = 0; | ||
506 | } | ||
507 | |||
508 | e820ram = max_pfn - (memblock_x86_hole_size(0, | ||
509 | max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); | ||
510 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | ||
511 | if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | ||
512 | printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", | ||
513 | (numaram << PAGE_SHIFT) >> 20, | ||
514 | (e820ram << PAGE_SHIFT) >> 20); | ||
515 | return false; | ||
516 | } | ||
517 | return true; | ||
518 | } | ||
519 | |||
520 | static int __init numa_register_memblks(struct numa_meminfo *mi) | ||
521 | { | ||
522 | int i, nid; | ||
523 | |||
524 | /* Account for nodes with cpus and no memory */ | ||
525 | node_possible_map = numa_nodes_parsed; | ||
526 | numa_nodemask_from_meminfo(&node_possible_map, mi); | ||
527 | if (WARN_ON(nodes_empty(node_possible_map))) | ||
528 | return -EINVAL; | ||
529 | |||
530 | memnode_shift = compute_hash_shift(mi); | ||
531 | if (memnode_shift < 0) { | ||
532 | printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); | ||
533 | return -EINVAL; | ||
534 | } | ||
535 | |||
536 | for (i = 0; i < mi->nr_blks; i++) | ||
537 | memblock_x86_register_active_regions(mi->blk[i].nid, | ||
538 | mi->blk[i].start >> PAGE_SHIFT, | ||
539 | mi->blk[i].end >> PAGE_SHIFT); | ||
540 | |||
541 | /* for out of order entries */ | ||
542 | sort_node_map(); | ||
543 | if (!numa_meminfo_cover_memory(mi)) | ||
544 | return -EINVAL; | ||
545 | |||
546 | /* Finally register nodes. */ | ||
547 | for_each_node_mask(nid, node_possible_map) { | ||
548 | u64 start = (u64)max_pfn << PAGE_SHIFT; | ||
549 | u64 end = 0; | ||
550 | |||
551 | for (i = 0; i < mi->nr_blks; i++) { | ||
552 | if (nid != mi->blk[i].nid) | ||
553 | continue; | ||
554 | start = min(mi->blk[i].start, start); | ||
555 | end = max(mi->blk[i].end, end); | ||
556 | } | ||
557 | |||
558 | if (start < end) | ||
559 | setup_node_bootmem(nid, start, end); | ||
560 | } | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * dummy_numma_init - Fallback dummy NUMA init | ||
567 | * | ||
568 | * Used if there's no underlying NUMA architecture, NUMA initialization | ||
569 | * fails, or NUMA is disabled on the command line. | ||
570 | * | ||
571 | * Must online at least one node and add memory blocks that cover all | ||
572 | * allowed memory. This function must not fail. | ||
573 | */ | ||
574 | static int __init dummy_numa_init(void) | ||
575 | { | ||
576 | printk(KERN_INFO "%s\n", | ||
577 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | ||
578 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", | ||
579 | 0LU, max_pfn << PAGE_SHIFT); | ||
580 | |||
581 | node_set(0, numa_nodes_parsed); | ||
582 | numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int __init numa_init(int (*init_func)(void)) | ||
588 | { | ||
589 | int i; | ||
590 | int ret; | ||
591 | |||
592 | for (i = 0; i < MAX_LOCAL_APIC; i++) | ||
593 | set_apicid_to_node(i, NUMA_NO_NODE); | ||
594 | |||
595 | nodes_clear(numa_nodes_parsed); | ||
596 | nodes_clear(node_possible_map); | ||
597 | nodes_clear(node_online_map); | ||
598 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | ||
599 | remove_all_active_ranges(); | ||
600 | numa_reset_distance(); | ||
601 | |||
602 | ret = init_func(); | ||
603 | if (ret < 0) | ||
604 | return ret; | ||
605 | ret = numa_cleanup_meminfo(&numa_meminfo); | ||
606 | if (ret < 0) | ||
607 | return ret; | ||
608 | |||
609 | numa_emulation(&numa_meminfo, numa_distance_cnt); | ||
610 | |||
611 | ret = numa_register_memblks(&numa_meminfo); | ||
612 | if (ret < 0) | ||
613 | return ret; | ||
614 | |||
615 | for (i = 0; i < nr_cpu_ids; i++) { | ||
616 | int nid = early_cpu_to_node(i); | ||
617 | |||
618 | if (nid == NUMA_NO_NODE) | ||
619 | continue; | ||
620 | if (!node_online(nid)) | ||
621 | numa_clear_node(i); | ||
622 | } | ||
623 | numa_init_array(); | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | void __init initmem_init(void) | 9 | void __init initmem_init(void) |
628 | { | 10 | { |
629 | int ret; | 11 | x86_numa_init(); |
630 | |||
631 | if (!numa_off) { | ||
632 | #ifdef CONFIG_ACPI_NUMA | ||
633 | ret = numa_init(x86_acpi_numa_init); | ||
634 | if (!ret) | ||
635 | return; | ||
636 | #endif | ||
637 | #ifdef CONFIG_AMD_NUMA | ||
638 | ret = numa_init(amd_numa_init); | ||
639 | if (!ret) | ||
640 | return; | ||
641 | #endif | ||
642 | } | ||
643 | |||
644 | numa_init(dummy_numa_init); | ||
645 | } | 12 | } |
646 | 13 | ||
647 | unsigned long __init numa_free_all_bootmem(void) | 14 | unsigned long __init numa_free_all_bootmem(void) |
@@ -656,12 +23,3 @@ unsigned long __init numa_free_all_bootmem(void) | |||
656 | 23 | ||
657 | return pages; | 24 | return pages; |
658 | } | 25 | } |
659 | |||
660 | int __cpuinit numa_cpu_node(int cpu) | ||
661 | { | ||
662 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | ||
663 | |||
664 | if (apicid != BAD_APICID) | ||
665 | return __apicid_to_node[apicid]; | ||
666 | return NUMA_NO_NODE; | ||
667 | } | ||
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index de84cc140379..d0ed086b6247 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/errno.h> | 5 | #include <linux/errno.h> |
6 | #include <linux/topology.h> | 6 | #include <linux/topology.h> |
7 | #include <linux/memblock.h> | 7 | #include <linux/memblock.h> |
8 | #include <linux/bootmem.h> | ||
8 | #include <asm/dma.h> | 9 | #include <asm/dma.h> |
9 | 10 | ||
10 | #include "numa_internal.h" | 11 | #include "numa_internal.h" |
@@ -84,7 +85,13 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, | |||
84 | nr_nodes = MAX_NUMNODES; | 85 | nr_nodes = MAX_NUMNODES; |
85 | } | 86 | } |
86 | 87 | ||
87 | size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; | 88 | /* |
89 | * Calculate target node size. x86_32 freaks on __udivdi3() so do | ||
90 | * the division in ulong number of pages and convert back. | ||
91 | */ | ||
92 | size = max_addr - addr - memblock_x86_hole_size(addr, max_addr); | ||
93 | size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); | ||
94 | |||
88 | /* | 95 | /* |
89 | * Calculate the number of big nodes that can be allocated as a result | 96 | * Calculate the number of big nodes that can be allocated as a result |
90 | * of consolidating the remainder. | 97 | * of consolidating the remainder. |
@@ -226,7 +233,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, | |||
226 | */ | 233 | */ |
227 | while (nodes_weight(physnode_mask)) { | 234 | while (nodes_weight(physnode_mask)) { |
228 | for_each_node_mask(i, physnode_mask) { | 235 | for_each_node_mask(i, physnode_mask) { |
229 | u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; | 236 | u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); |
230 | u64 start, limit, end; | 237 | u64 start, limit, end; |
231 | int phys_blk; | 238 | int phys_blk; |
232 | 239 | ||
@@ -298,7 +305,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
298 | { | 305 | { |
299 | static struct numa_meminfo ei __initdata; | 306 | static struct numa_meminfo ei __initdata; |
300 | static struct numa_meminfo pi __initdata; | 307 | static struct numa_meminfo pi __initdata; |
301 | const u64 max_addr = max_pfn << PAGE_SHIFT; | 308 | const u64 max_addr = PFN_PHYS(max_pfn); |
302 | u8 *phys_dist = NULL; | 309 | u8 *phys_dist = NULL; |
303 | size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); | 310 | size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); |
304 | int max_emu_nid, dfl_phys_nid; | 311 | int max_emu_nid, dfl_phys_nid; |
@@ -342,8 +349,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
342 | if (numa_dist_cnt) { | 349 | if (numa_dist_cnt) { |
343 | u64 phys; | 350 | u64 phys; |
344 | 351 | ||
345 | phys = memblock_find_in_range(0, | 352 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), |
346 | (u64)max_pfn_mapped << PAGE_SHIFT, | ||
347 | phys_size, PAGE_SIZE); | 353 | phys_size, PAGE_SIZE); |
348 | if (phys == MEMBLOCK_ERROR) { | 354 | if (phys == MEMBLOCK_ERROR) { |
349 | pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); | 355 | pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); |
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index ef2d97377d7c..7178c3afe05e 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h | |||
@@ -19,6 +19,14 @@ void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); | |||
19 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi); | 19 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi); |
20 | void __init numa_reset_distance(void); | 20 | void __init numa_reset_distance(void); |
21 | 21 | ||
22 | void __init x86_numa_init(void); | ||
23 | |||
24 | #ifdef CONFIG_X86_64 | ||
25 | static inline void init_alloc_remap(int nid, u64 start, u64 end) { } | ||
26 | #else | ||
27 | void __init init_alloc_remap(int nid, u64 start, u64 end); | ||
28 | #endif | ||
29 | |||
22 | #ifdef CONFIG_NUMA_EMU | 30 | #ifdef CONFIG_NUMA_EMU |
23 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, | 31 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, |
24 | int numa_dist_cnt); | 32 | int numa_dist_cnt); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat.c index 8e9d3394f6d4..81dbfdeb080d 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat.c | |||
@@ -26,8 +26,6 @@ | |||
26 | 26 | ||
27 | int acpi_numa __initdata; | 27 | int acpi_numa __initdata; |
28 | 28 | ||
29 | static struct bootnode nodes_add[MAX_NUMNODES]; | ||
30 | |||
31 | static __init int setup_node(int pxm) | 29 | static __init int setup_node(int pxm) |
32 | { | 30 | { |
33 | return acpi_map_pxm_to_node(pxm); | 31 | return acpi_map_pxm_to_node(pxm); |
@@ -37,7 +35,6 @@ static __init void bad_srat(void) | |||
37 | { | 35 | { |
38 | printk(KERN_ERR "SRAT: SRAT not used.\n"); | 36 | printk(KERN_ERR "SRAT: SRAT not used.\n"); |
39 | acpi_numa = -1; | 37 | acpi_numa = -1; |
40 | memset(nodes_add, 0, sizeof(nodes_add)); | ||
41 | } | 38 | } |
42 | 39 | ||
43 | static __init inline int srat_disabled(void) | 40 | static __init inline int srat_disabled(void) |
@@ -131,73 +128,17 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
131 | pxm, apic_id, node); | 128 | pxm, apic_id, node); |
132 | } | 129 | } |
133 | 130 | ||
134 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 131 | #ifdef CONFIG_MEMORY_HOTPLUG |
135 | static inline int save_add_info(void) {return 1;} | 132 | static inline int save_add_info(void) {return 1;} |
136 | #else | 133 | #else |
137 | static inline int save_add_info(void) {return 0;} | 134 | static inline int save_add_info(void) {return 0;} |
138 | #endif | 135 | #endif |
139 | /* | ||
140 | * Update nodes_add[] | ||
141 | * This code supports one contiguous hot add area per node | ||
142 | */ | ||
143 | static void __init | ||
144 | update_nodes_add(int node, unsigned long start, unsigned long end) | ||
145 | { | ||
146 | unsigned long s_pfn = start >> PAGE_SHIFT; | ||
147 | unsigned long e_pfn = end >> PAGE_SHIFT; | ||
148 | int changed = 0; | ||
149 | struct bootnode *nd = &nodes_add[node]; | ||
150 | |||
151 | /* I had some trouble with strange memory hotadd regions breaking | ||
152 | the boot. Be very strict here and reject anything unexpected. | ||
153 | If you want working memory hotadd write correct SRATs. | ||
154 | |||
155 | The node size check is a basic sanity check to guard against | ||
156 | mistakes */ | ||
157 | if ((signed long)(end - start) < NODE_MIN_SIZE) { | ||
158 | printk(KERN_ERR "SRAT: Hotplug area too small\n"); | ||
159 | return; | ||
160 | } | ||
161 | |||
162 | /* This check might be a bit too strict, but I'm keeping it for now. */ | ||
163 | if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) { | ||
164 | printk(KERN_ERR | ||
165 | "SRAT: Hotplug area %lu -> %lu has existing memory\n", | ||
166 | s_pfn, e_pfn); | ||
167 | return; | ||
168 | } | ||
169 | |||
170 | /* Looks good */ | ||
171 | |||
172 | if (nd->start == nd->end) { | ||
173 | nd->start = start; | ||
174 | nd->end = end; | ||
175 | changed = 1; | ||
176 | } else { | ||
177 | if (nd->start == end) { | ||
178 | nd->start = start; | ||
179 | changed = 1; | ||
180 | } | ||
181 | if (nd->end == start) { | ||
182 | nd->end = end; | ||
183 | changed = 1; | ||
184 | } | ||
185 | if (!changed) | ||
186 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); | ||
187 | } | ||
188 | |||
189 | if (changed) { | ||
190 | node_set(node, numa_nodes_parsed); | ||
191 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", | ||
192 | nd->start, nd->end); | ||
193 | } | ||
194 | } | ||
195 | 136 | ||
196 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ | 137 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ |
197 | void __init | 138 | void __init |
198 | acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | 139 | acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) |
199 | { | 140 | { |
200 | unsigned long start, end; | 141 | u64 start, end; |
201 | int node, pxm; | 142 | int node, pxm; |
202 | 143 | ||
203 | if (srat_disabled()) | 144 | if (srat_disabled()) |
@@ -226,11 +167,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
226 | return; | 167 | return; |
227 | } | 168 | } |
228 | 169 | ||
229 | printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, | 170 | printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, |
230 | start, end); | 171 | start, end); |
231 | |||
232 | if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) | ||
233 | update_nodes_add(node, start, end); | ||
234 | } | 172 | } |
235 | 173 | ||
236 | void __init acpi_numa_arch_fixup(void) {} | 174 | void __init acpi_numa_arch_fixup(void) {} |
@@ -244,17 +182,3 @@ int __init x86_acpi_numa_init(void) | |||
244 | return ret; | 182 | return ret; |
245 | return srat_disabled() ? -EINVAL : 0; | 183 | return srat_disabled() ? -EINVAL : 0; |
246 | } | 184 | } |
247 | |||
248 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) | ||
249 | int memory_add_physaddr_to_nid(u64 start) | ||
250 | { | ||
251 | int i, ret = 0; | ||
252 | |||
253 | for_each_node(i) | ||
254 | if (nodes_add[i].start <= start && nodes_add[i].end > start) | ||
255 | ret = i; | ||
256 | |||
257 | return ret; | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | ||
260 | #endif | ||
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c deleted file mode 100644 index 364f36bdfad8..000000000000 --- a/arch/x86/mm/srat_32.c +++ /dev/null | |||
@@ -1,288 +0,0 @@ | |||
1 | /* | ||
2 | * Some of the code in this file has been gleaned from the 64 bit | ||
3 | * discontigmem support code base. | ||
4 | * | ||
5 | * Copyright (C) 2002, IBM Corp. | ||
6 | * | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
17 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
18 | * details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | * | ||
24 | * Send feedback to Pat Gaughen <gone@us.ibm.com> | ||
25 | */ | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/bootmem.h> | ||
28 | #include <linux/memblock.h> | ||
29 | #include <linux/mmzone.h> | ||
30 | #include <linux/acpi.h> | ||
31 | #include <linux/nodemask.h> | ||
32 | #include <asm/srat.h> | ||
33 | #include <asm/topology.h> | ||
34 | #include <asm/smp.h> | ||
35 | #include <asm/e820.h> | ||
36 | |||
37 | /* | ||
38 | * proximity macros and definitions | ||
39 | */ | ||
40 | #define NODE_ARRAY_INDEX(x) ((x) / 8) /* 8 bits/char */ | ||
41 | #define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */ | ||
42 | #define BMAP_SET(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << NODE_ARRAY_OFFSET(bit)) | ||
43 | #define BMAP_TEST(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << NODE_ARRAY_OFFSET(bit))) | ||
44 | /* bitmap length; _PXM is at most 255 */ | ||
45 | #define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8) | ||
46 | static u8 __initdata pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */ | ||
47 | |||
48 | #define MAX_CHUNKS_PER_NODE 3 | ||
49 | #define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES) | ||
50 | struct node_memory_chunk_s { | ||
51 | unsigned long start_pfn; | ||
52 | unsigned long end_pfn; | ||
53 | u8 pxm; // proximity domain of node | ||
54 | u8 nid; // which cnode contains this chunk? | ||
55 | u8 bank; // which mem bank on this node | ||
56 | }; | ||
57 | static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; | ||
58 | |||
59 | static int __initdata num_memory_chunks; /* total number of memory chunks */ | ||
60 | static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC]; | ||
61 | |||
62 | int acpi_numa __initdata; | ||
63 | |||
64 | static __init void bad_srat(void) | ||
65 | { | ||
66 | printk(KERN_ERR "SRAT: SRAT not used.\n"); | ||
67 | acpi_numa = -1; | ||
68 | num_memory_chunks = 0; | ||
69 | } | ||
70 | |||
71 | static __init inline int srat_disabled(void) | ||
72 | { | ||
73 | return numa_off || acpi_numa < 0; | ||
74 | } | ||
75 | |||
76 | /* Identify CPU proximity domains */ | ||
77 | void __init | ||
78 | acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *cpu_affinity) | ||
79 | { | ||
80 | if (srat_disabled()) | ||
81 | return; | ||
82 | if (cpu_affinity->header.length != | ||
83 | sizeof(struct acpi_srat_cpu_affinity)) { | ||
84 | bad_srat(); | ||
85 | return; | ||
86 | } | ||
87 | |||
88 | if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0) | ||
89 | return; /* empty entry */ | ||
90 | |||
91 | /* mark this node as "seen" in node bitmap */ | ||
92 | BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo); | ||
93 | |||
94 | /* don't need to check apic_id here, because it is always 8 bits */ | ||
95 | apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo; | ||
96 | |||
97 | printk(KERN_DEBUG "CPU %02x in proximity domain %02x\n", | ||
98 | cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Identify memory proximity domains and hot-remove capabilities. | ||
103 | * Fill node memory chunk list structure. | ||
104 | */ | ||
105 | void __init | ||
106 | acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *memory_affinity) | ||
107 | { | ||
108 | unsigned long long paddr, size; | ||
109 | unsigned long start_pfn, end_pfn; | ||
110 | u8 pxm; | ||
111 | struct node_memory_chunk_s *p, *q, *pend; | ||
112 | |||
113 | if (srat_disabled()) | ||
114 | return; | ||
115 | if (memory_affinity->header.length != | ||
116 | sizeof(struct acpi_srat_mem_affinity)) { | ||
117 | bad_srat(); | ||
118 | return; | ||
119 | } | ||
120 | |||
121 | if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0) | ||
122 | return; /* empty entry */ | ||
123 | |||
124 | pxm = memory_affinity->proximity_domain & 0xff; | ||
125 | |||
126 | /* mark this node as "seen" in node bitmap */ | ||
127 | BMAP_SET(pxm_bitmap, pxm); | ||
128 | |||
129 | /* calculate info for memory chunk structure */ | ||
130 | paddr = memory_affinity->base_address; | ||
131 | size = memory_affinity->length; | ||
132 | |||
133 | start_pfn = paddr >> PAGE_SHIFT; | ||
134 | end_pfn = (paddr + size) >> PAGE_SHIFT; | ||
135 | |||
136 | |||
137 | if (num_memory_chunks >= MAXCHUNKS) { | ||
138 | printk(KERN_WARNING "Too many mem chunks in SRAT." | ||
139 | " Ignoring %lld MBytes at %llx\n", | ||
140 | size/(1024*1024), paddr); | ||
141 | return; | ||
142 | } | ||
143 | |||
144 | /* Insertion sort based on base address */ | ||
145 | pend = &node_memory_chunk[num_memory_chunks]; | ||
146 | for (p = &node_memory_chunk[0]; p < pend; p++) { | ||
147 | if (start_pfn < p->start_pfn) | ||
148 | break; | ||
149 | } | ||
150 | if (p < pend) { | ||
151 | for (q = pend; q >= p; q--) | ||
152 | *(q + 1) = *q; | ||
153 | } | ||
154 | p->start_pfn = start_pfn; | ||
155 | p->end_pfn = end_pfn; | ||
156 | p->pxm = pxm; | ||
157 | |||
158 | num_memory_chunks++; | ||
159 | |||
160 | printk(KERN_DEBUG "Memory range %08lx to %08lx" | ||
161 | " in proximity domain %02x %s\n", | ||
162 | start_pfn, end_pfn, | ||
163 | pxm, | ||
164 | ((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ? | ||
165 | "enabled and removable" : "enabled" ) ); | ||
166 | } | ||
167 | |||
168 | /* Callback for SLIT parsing */ | ||
169 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | ||
170 | { | ||
171 | } | ||
172 | |||
173 | void acpi_numa_arch_fixup(void) | ||
174 | { | ||
175 | } | ||
176 | /* | ||
177 | * The SRAT table always lists ascending addresses, so can always | ||
178 | * assume that the first "start" address that you see is the real | ||
179 | * start of the node, and that the current "end" address is after | ||
180 | * the previous one. | ||
181 | */ | ||
182 | static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk) | ||
183 | { | ||
184 | /* | ||
185 | * Only add present memory as told by the e820. | ||
186 | * There is no guarantee from the SRAT that the memory it | ||
187 | * enumerates is present at boot time because it represents | ||
188 | * *possible* memory hotplug areas the same as normal RAM. | ||
189 | */ | ||
190 | if (memory_chunk->start_pfn >= max_pfn) { | ||
191 | printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n", | ||
192 | memory_chunk->start_pfn, memory_chunk->end_pfn); | ||
193 | return -1; | ||
194 | } | ||
195 | if (memory_chunk->nid != nid) | ||
196 | return -1; | ||
197 | |||
198 | if (!node_has_online_mem(nid)) | ||
199 | node_start_pfn[nid] = memory_chunk->start_pfn; | ||
200 | |||
201 | if (node_start_pfn[nid] > memory_chunk->start_pfn) | ||
202 | node_start_pfn[nid] = memory_chunk->start_pfn; | ||
203 | |||
204 | if (node_end_pfn[nid] < memory_chunk->end_pfn) | ||
205 | node_end_pfn[nid] = memory_chunk->end_pfn; | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | int __init get_memcfg_from_srat(void) | ||
211 | { | ||
212 | int i, j, nid; | ||
213 | |||
214 | if (srat_disabled()) | ||
215 | goto out_fail; | ||
216 | |||
217 | if (acpi_numa_init() < 0) | ||
218 | goto out_fail; | ||
219 | |||
220 | if (num_memory_chunks == 0) { | ||
221 | printk(KERN_DEBUG | ||
222 | "could not find any ACPI SRAT memory areas.\n"); | ||
223 | goto out_fail; | ||
224 | } | ||
225 | |||
226 | /* Calculate total number of nodes in system from PXM bitmap and create | ||
227 | * a set of sequential node IDs starting at zero. (ACPI doesn't seem | ||
228 | * to specify the range of _PXM values.) | ||
229 | */ | ||
230 | /* | ||
231 | * MCD - we no longer HAVE to number nodes sequentially. PXM domain | ||
232 | * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically | ||
233 | * 32, so we will continue numbering them in this manner until MAX_NUMNODES | ||
234 | * approaches MAX_PXM_DOMAINS for i386. | ||
235 | */ | ||
236 | nodes_clear(node_online_map); | ||
237 | for (i = 0; i < MAX_PXM_DOMAINS; i++) { | ||
238 | if (BMAP_TEST(pxm_bitmap, i)) { | ||
239 | int nid = acpi_map_pxm_to_node(i); | ||
240 | node_set_online(nid); | ||
241 | } | ||
242 | } | ||
243 | BUG_ON(num_online_nodes() == 0); | ||
244 | |||
245 | /* set cnode id in memory chunk structure */ | ||
246 | for (i = 0; i < num_memory_chunks; i++) | ||
247 | node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm); | ||
248 | |||
249 | printk(KERN_DEBUG "pxm bitmap: "); | ||
250 | for (i = 0; i < sizeof(pxm_bitmap); i++) { | ||
251 | printk(KERN_CONT "%02x ", pxm_bitmap[i]); | ||
252 | } | ||
253 | printk(KERN_CONT "\n"); | ||
254 | printk(KERN_DEBUG "Number of logical nodes in system = %d\n", | ||
255 | num_online_nodes()); | ||
256 | printk(KERN_DEBUG "Number of memory chunks in system = %d\n", | ||
257 | num_memory_chunks); | ||
258 | |||
259 | for (i = 0; i < MAX_LOCAL_APIC; i++) | ||
260 | set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i])); | ||
261 | |||
262 | for (j = 0; j < num_memory_chunks; j++){ | ||
263 | struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; | ||
264 | printk(KERN_DEBUG | ||
265 | "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", | ||
266 | j, chunk->nid, chunk->start_pfn, chunk->end_pfn); | ||
267 | if (node_read_chunk(chunk->nid, chunk)) | ||
268 | continue; | ||
269 | |||
270 | memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn, | ||
271 | min(chunk->end_pfn, max_pfn)); | ||
272 | } | ||
273 | /* for out of order entries in SRAT */ | ||
274 | sort_node_map(); | ||
275 | |||
276 | for_each_online_node(nid) { | ||
277 | unsigned long start = node_start_pfn[nid]; | ||
278 | unsigned long end = min(node_end_pfn[nid], max_pfn); | ||
279 | |||
280 | memory_present(nid, start, end); | ||
281 | node_remap_size[nid] = node_memmap_size_bytes(nid, start, end); | ||
282 | } | ||
283 | return 1; | ||
284 | out_fail: | ||
285 | printk(KERN_DEBUG "failed to get NUMA memory information from SRAT" | ||
286 | " table\n"); | ||
287 | return 0; | ||
288 | } | ||
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index e37b407a0ee8..8214724ce54d 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -108,7 +108,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
108 | } | 108 | } |
109 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, | 109 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, |
110 | (type == PCI_CAP_ID_MSIX) ? | 110 | (type == PCI_CAP_ID_MSIX) ? |
111 | "msi-x" : "msi"); | 111 | "msi-x" : "msi", |
112 | DOMID_SELF); | ||
112 | if (irq < 0) | 113 | if (irq < 0) |
113 | goto error; | 114 | goto error; |
114 | dev_dbg(&dev->dev, | 115 | dev_dbg(&dev->dev, |
@@ -148,7 +149,8 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
148 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, | 149 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, |
149 | (type == PCI_CAP_ID_MSIX) ? | 150 | (type == PCI_CAP_ID_MSIX) ? |
150 | "pcifront-msi-x" : | 151 | "pcifront-msi-x" : |
151 | "pcifront-msi"); | 152 | "pcifront-msi", |
153 | DOMID_SELF); | ||
152 | if (irq < 0) | 154 | if (irq < 0) |
153 | goto free; | 155 | goto free; |
154 | i++; | 156 | i++; |
@@ -190,9 +192,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
190 | 192 | ||
191 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 193 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
192 | struct physdev_map_pirq map_irq; | 194 | struct physdev_map_pirq map_irq; |
195 | domid_t domid; | ||
196 | |||
197 | domid = ret = xen_find_device_domain_owner(dev); | ||
198 | /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED, | ||
199 | * hence check ret value for < 0. */ | ||
200 | if (ret < 0) | ||
201 | domid = DOMID_SELF; | ||
193 | 202 | ||
194 | memset(&map_irq, 0, sizeof(map_irq)); | 203 | memset(&map_irq, 0, sizeof(map_irq)); |
195 | map_irq.domid = DOMID_SELF; | 204 | map_irq.domid = domid; |
196 | map_irq.type = MAP_PIRQ_TYPE_MSI; | 205 | map_irq.type = MAP_PIRQ_TYPE_MSI; |
197 | map_irq.index = -1; | 206 | map_irq.index = -1; |
198 | map_irq.pirq = -1; | 207 | map_irq.pirq = -1; |
@@ -215,14 +224,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
215 | 224 | ||
216 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | 225 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); |
217 | if (ret) { | 226 | if (ret) { |
218 | dev_warn(&dev->dev, "xen map irq failed %d\n", ret); | 227 | dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n", |
228 | ret, domid); | ||
219 | goto out; | 229 | goto out; |
220 | } | 230 | } |
221 | 231 | ||
222 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, | 232 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, |
223 | map_irq.pirq, map_irq.index, | 233 | map_irq.pirq, map_irq.index, |
224 | (type == PCI_CAP_ID_MSIX) ? | 234 | (type == PCI_CAP_ID_MSIX) ? |
225 | "msi-x" : "msi"); | 235 | "msi-x" : "msi", |
236 | domid); | ||
226 | if (ret < 0) | 237 | if (ret < 0) |
227 | goto out; | 238 | goto out; |
228 | } | 239 | } |
@@ -461,3 +472,78 @@ void __init xen_setup_pirqs(void) | |||
461 | } | 472 | } |
462 | } | 473 | } |
463 | #endif | 474 | #endif |
475 | |||
476 | #ifdef CONFIG_XEN_DOM0 | ||
477 | struct xen_device_domain_owner { | ||
478 | domid_t domain; | ||
479 | struct pci_dev *dev; | ||
480 | struct list_head list; | ||
481 | }; | ||
482 | |||
483 | static DEFINE_SPINLOCK(dev_domain_list_spinlock); | ||
484 | static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); | ||
485 | |||
486 | static struct xen_device_domain_owner *find_device(struct pci_dev *dev) | ||
487 | { | ||
488 | struct xen_device_domain_owner *owner; | ||
489 | |||
490 | list_for_each_entry(owner, &dev_domain_list, list) { | ||
491 | if (owner->dev == dev) | ||
492 | return owner; | ||
493 | } | ||
494 | return NULL; | ||
495 | } | ||
496 | |||
497 | int xen_find_device_domain_owner(struct pci_dev *dev) | ||
498 | { | ||
499 | struct xen_device_domain_owner *owner; | ||
500 | int domain = -ENODEV; | ||
501 | |||
502 | spin_lock(&dev_domain_list_spinlock); | ||
503 | owner = find_device(dev); | ||
504 | if (owner) | ||
505 | domain = owner->domain; | ||
506 | spin_unlock(&dev_domain_list_spinlock); | ||
507 | return domain; | ||
508 | } | ||
509 | EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); | ||
510 | |||
511 | int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) | ||
512 | { | ||
513 | struct xen_device_domain_owner *owner; | ||
514 | |||
515 | owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); | ||
516 | if (!owner) | ||
517 | return -ENODEV; | ||
518 | |||
519 | spin_lock(&dev_domain_list_spinlock); | ||
520 | if (find_device(dev)) { | ||
521 | spin_unlock(&dev_domain_list_spinlock); | ||
522 | kfree(owner); | ||
523 | return -EEXIST; | ||
524 | } | ||
525 | owner->domain = domain; | ||
526 | owner->dev = dev; | ||
527 | list_add_tail(&owner->list, &dev_domain_list); | ||
528 | spin_unlock(&dev_domain_list_spinlock); | ||
529 | return 0; | ||
530 | } | ||
531 | EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); | ||
532 | |||
533 | int xen_unregister_device_domain_owner(struct pci_dev *dev) | ||
534 | { | ||
535 | struct xen_device_domain_owner *owner; | ||
536 | |||
537 | spin_lock(&dev_domain_list_spinlock); | ||
538 | owner = find_device(dev); | ||
539 | if (!owner) { | ||
540 | spin_unlock(&dev_domain_list_spinlock); | ||
541 | return -ENODEV; | ||
542 | } | ||
543 | list_del(&owner->list); | ||
544 | spin_unlock(&dev_domain_list_spinlock); | ||
545 | kfree(owner); | ||
546 | return 0; | ||
547 | } | ||
548 | EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); | ||
549 | #endif | ||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 0fe27d7c6258..b30aa26a8df2 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -145,17 +145,6 @@ static void virt_efi_reset_system(int reset_type, | |||
145 | data_size, data); | 145 | data_size, data); |
146 | } | 146 | } |
147 | 147 | ||
148 | static efi_status_t virt_efi_set_virtual_address_map( | ||
149 | unsigned long memory_map_size, | ||
150 | unsigned long descriptor_size, | ||
151 | u32 descriptor_version, | ||
152 | efi_memory_desc_t *virtual_map) | ||
153 | { | ||
154 | return efi_call_virt4(set_virtual_address_map, | ||
155 | memory_map_size, descriptor_size, | ||
156 | descriptor_version, virtual_map); | ||
157 | } | ||
158 | |||
159 | static efi_status_t __init phys_efi_set_virtual_address_map( | 148 | static efi_status_t __init phys_efi_set_virtual_address_map( |
160 | unsigned long memory_map_size, | 149 | unsigned long memory_map_size, |
161 | unsigned long descriptor_size, | 150 | unsigned long descriptor_size, |
@@ -468,11 +457,25 @@ void __init efi_init(void) | |||
468 | #endif | 457 | #endif |
469 | } | 458 | } |
470 | 459 | ||
460 | void __init efi_set_executable(efi_memory_desc_t *md, bool executable) | ||
461 | { | ||
462 | u64 addr, npages; | ||
463 | |||
464 | addr = md->virt_addr; | ||
465 | npages = md->num_pages; | ||
466 | |||
467 | memrange_efi_to_native(&addr, &npages); | ||
468 | |||
469 | if (executable) | ||
470 | set_memory_x(addr, npages); | ||
471 | else | ||
472 | set_memory_nx(addr, npages); | ||
473 | } | ||
474 | |||
471 | static void __init runtime_code_page_mkexec(void) | 475 | static void __init runtime_code_page_mkexec(void) |
472 | { | 476 | { |
473 | efi_memory_desc_t *md; | 477 | efi_memory_desc_t *md; |
474 | void *p; | 478 | void *p; |
475 | u64 addr, npages; | ||
476 | 479 | ||
477 | /* Make EFI runtime service code area executable */ | 480 | /* Make EFI runtime service code area executable */ |
478 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 481 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
@@ -481,10 +484,7 @@ static void __init runtime_code_page_mkexec(void) | |||
481 | if (md->type != EFI_RUNTIME_SERVICES_CODE) | 484 | if (md->type != EFI_RUNTIME_SERVICES_CODE) |
482 | continue; | 485 | continue; |
483 | 486 | ||
484 | addr = md->virt_addr; | 487 | efi_set_executable(md, true); |
485 | npages = md->num_pages; | ||
486 | memrange_efi_to_native(&addr, &npages); | ||
487 | set_memory_x(addr, npages); | ||
488 | } | 488 | } |
489 | } | 489 | } |
490 | 490 | ||
@@ -498,13 +498,42 @@ static void __init runtime_code_page_mkexec(void) | |||
498 | */ | 498 | */ |
499 | void __init efi_enter_virtual_mode(void) | 499 | void __init efi_enter_virtual_mode(void) |
500 | { | 500 | { |
501 | efi_memory_desc_t *md; | 501 | efi_memory_desc_t *md, *prev_md = NULL; |
502 | efi_status_t status; | 502 | efi_status_t status; |
503 | unsigned long size; | 503 | unsigned long size; |
504 | u64 end, systab, addr, npages, end_pfn; | 504 | u64 end, systab, addr, npages, end_pfn; |
505 | void *p, *va; | 505 | void *p, *va, *new_memmap = NULL; |
506 | int count = 0; | ||
506 | 507 | ||
507 | efi.systab = NULL; | 508 | efi.systab = NULL; |
509 | |||
510 | /* Merge contiguous regions of the same type and attribute */ | ||
511 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
512 | u64 prev_size; | ||
513 | md = p; | ||
514 | |||
515 | if (!prev_md) { | ||
516 | prev_md = md; | ||
517 | continue; | ||
518 | } | ||
519 | |||
520 | if (prev_md->type != md->type || | ||
521 | prev_md->attribute != md->attribute) { | ||
522 | prev_md = md; | ||
523 | continue; | ||
524 | } | ||
525 | |||
526 | prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; | ||
527 | |||
528 | if (md->phys_addr == (prev_md->phys_addr + prev_size)) { | ||
529 | prev_md->num_pages += md->num_pages; | ||
530 | md->type = EFI_RESERVED_TYPE; | ||
531 | md->attribute = 0; | ||
532 | continue; | ||
533 | } | ||
534 | prev_md = md; | ||
535 | } | ||
536 | |||
508 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 537 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
509 | md = p; | 538 | md = p; |
510 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | 539 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
@@ -541,15 +570,21 @@ void __init efi_enter_virtual_mode(void) | |||
541 | systab += md->virt_addr - md->phys_addr; | 570 | systab += md->virt_addr - md->phys_addr; |
542 | efi.systab = (efi_system_table_t *) (unsigned long) systab; | 571 | efi.systab = (efi_system_table_t *) (unsigned long) systab; |
543 | } | 572 | } |
573 | new_memmap = krealloc(new_memmap, | ||
574 | (count + 1) * memmap.desc_size, | ||
575 | GFP_KERNEL); | ||
576 | memcpy(new_memmap + (count * memmap.desc_size), md, | ||
577 | memmap.desc_size); | ||
578 | count++; | ||
544 | } | 579 | } |
545 | 580 | ||
546 | BUG_ON(!efi.systab); | 581 | BUG_ON(!efi.systab); |
547 | 582 | ||
548 | status = phys_efi_set_virtual_address_map( | 583 | status = phys_efi_set_virtual_address_map( |
549 | memmap.desc_size * memmap.nr_map, | 584 | memmap.desc_size * count, |
550 | memmap.desc_size, | 585 | memmap.desc_size, |
551 | memmap.desc_version, | 586 | memmap.desc_version, |
552 | memmap.phys_map); | 587 | (efi_memory_desc_t *)__pa(new_memmap)); |
553 | 588 | ||
554 | if (status != EFI_SUCCESS) { | 589 | if (status != EFI_SUCCESS) { |
555 | printk(KERN_ALERT "Unable to switch EFI into virtual mode " | 590 | printk(KERN_ALERT "Unable to switch EFI into virtual mode " |
@@ -572,11 +607,12 @@ void __init efi_enter_virtual_mode(void) | |||
572 | efi.set_variable = virt_efi_set_variable; | 607 | efi.set_variable = virt_efi_set_variable; |
573 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; | 608 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; |
574 | efi.reset_system = virt_efi_reset_system; | 609 | efi.reset_system = virt_efi_reset_system; |
575 | efi.set_virtual_address_map = virt_efi_set_virtual_address_map; | 610 | efi.set_virtual_address_map = NULL; |
576 | if (__supported_pte_mask & _PAGE_NX) | 611 | if (__supported_pte_mask & _PAGE_NX) |
577 | runtime_code_page_mkexec(); | 612 | runtime_code_page_mkexec(); |
578 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); | 613 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); |
579 | memmap.map = NULL; | 614 | memmap.map = NULL; |
615 | kfree(new_memmap); | ||
580 | } | 616 | } |
581 | 617 | ||
582 | /* | 618 | /* |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac0621a7ac3d..2649426a7905 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -41,22 +41,7 @@ | |||
41 | static pgd_t save_pgd __initdata; | 41 | static pgd_t save_pgd __initdata; |
42 | static unsigned long efi_flags __initdata; | 42 | static unsigned long efi_flags __initdata; |
43 | 43 | ||
44 | static void __init early_mapping_set_exec(unsigned long start, | 44 | static void __init early_code_mapping_set_exec(int executable) |
45 | unsigned long end, | ||
46 | int executable) | ||
47 | { | ||
48 | unsigned long num_pages; | ||
49 | |||
50 | start &= PMD_MASK; | ||
51 | end = (end + PMD_SIZE - 1) & PMD_MASK; | ||
52 | num_pages = (end - start) >> PAGE_SHIFT; | ||
53 | if (executable) | ||
54 | set_memory_x((unsigned long)__va(start), num_pages); | ||
55 | else | ||
56 | set_memory_nx((unsigned long)__va(start), num_pages); | ||
57 | } | ||
58 | |||
59 | static void __init early_runtime_code_mapping_set_exec(int executable) | ||
60 | { | 45 | { |
61 | efi_memory_desc_t *md; | 46 | efi_memory_desc_t *md; |
62 | void *p; | 47 | void *p; |
@@ -67,11 +52,8 @@ static void __init early_runtime_code_mapping_set_exec(int executable) | |||
67 | /* Make EFI runtime service code area executable */ | 52 | /* Make EFI runtime service code area executable */ |
68 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 53 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
69 | md = p; | 54 | md = p; |
70 | if (md->type == EFI_RUNTIME_SERVICES_CODE) { | 55 | if (md->type == EFI_RUNTIME_SERVICES_CODE) |
71 | unsigned long end; | 56 | efi_set_executable(md, executable); |
72 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
73 | early_mapping_set_exec(md->phys_addr, end, executable); | ||
74 | } | ||
75 | } | 57 | } |
76 | } | 58 | } |
77 | 59 | ||
@@ -79,7 +61,7 @@ void __init efi_call_phys_prelog(void) | |||
79 | { | 61 | { |
80 | unsigned long vaddress; | 62 | unsigned long vaddress; |
81 | 63 | ||
82 | early_runtime_code_mapping_set_exec(1); | 64 | early_code_mapping_set_exec(1); |
83 | local_irq_save(efi_flags); | 65 | local_irq_save(efi_flags); |
84 | vaddress = (unsigned long)__va(0x0UL); | 66 | vaddress = (unsigned long)__va(0x0UL); |
85 | save_pgd = *pgd_offset_k(0x0UL); | 67 | save_pgd = *pgd_offset_k(0x0UL); |
@@ -95,7 +77,7 @@ void __init efi_call_phys_epilog(void) | |||
95 | set_pgd(pgd_offset_k(0x0UL), save_pgd); | 77 | set_pgd(pgd_offset_k(0x0UL), save_pgd); |
96 | __flush_tlb_all(); | 78 | __flush_tlb_all(); |
97 | local_irq_restore(efi_flags); | 79 | local_irq_restore(efi_flags); |
98 | early_runtime_code_mapping_set_exec(0); | 80 | early_code_mapping_set_exec(0); |
99 | } | 81 | } |
100 | 82 | ||
101 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | 83 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, |
@@ -107,8 +89,10 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | |||
107 | return ioremap(phys_addr, size); | 89 | return ioremap(phys_addr, size); |
108 | 90 | ||
109 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); | 91 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); |
110 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) | 92 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { |
111 | return NULL; | 93 | unsigned long top = last_map_pfn << PAGE_SHIFT; |
94 | efi_ioremap(top, size - (top - phys_addr), type); | ||
95 | } | ||
112 | 96 | ||
113 | return (void __iomem *)__va(phys_addr); | 97 | return (void __iomem *)__va(phys_addr); |
114 | } | 98 | } |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 275dbc19e2cf..7000e74b3087 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -194,7 +194,7 @@ static unsigned long __init mrst_calibrate_tsc(void) | |||
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | void __init mrst_time_init(void) | 197 | static void __init mrst_time_init(void) |
198 | { | 198 | { |
199 | sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); | 199 | sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); |
200 | switch (mrst_timer_options) { | 200 | switch (mrst_timer_options) { |
@@ -216,7 +216,7 @@ void __init mrst_time_init(void) | |||
216 | apbt_time_init(); | 216 | apbt_time_init(); |
217 | } | 217 | } |
218 | 218 | ||
219 | void __cpuinit mrst_arch_setup(void) | 219 | static void __cpuinit mrst_arch_setup(void) |
220 | { | 220 | { |
221 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) | 221 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) |
222 | __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; | 222 | __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; |
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile index c2a8cab65e5d..81c5e2165c24 100644 --- a/arch/x86/platform/olpc/Makefile +++ b/arch/x86/platform/olpc/Makefile | |||
@@ -1,4 +1,2 @@ | |||
1 | obj-$(CONFIG_OLPC) += olpc.o | 1 | obj-$(CONFIG_OLPC) += olpc.o olpc_ofw.o olpc_dt.o |
2 | obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o | 2 | obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o |
3 | obj-$(CONFIG_OLPC) += olpc_ofw.o | ||
4 | obj-$(CONFIG_OF_PROMTREE) += olpc_dt.o | ||
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c index edaf3fe8dc5e..0060fd59ea00 100644 --- a/arch/x86/platform/olpc/olpc.c +++ b/arch/x86/platform/olpc/olpc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/of.h> | ||
21 | 22 | ||
22 | #include <asm/geode.h> | 23 | #include <asm/geode.h> |
23 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
@@ -187,41 +188,43 @@ err: | |||
187 | } | 188 | } |
188 | EXPORT_SYMBOL_GPL(olpc_ec_cmd); | 189 | EXPORT_SYMBOL_GPL(olpc_ec_cmd); |
189 | 190 | ||
190 | static bool __init check_ofw_architecture(void) | 191 | static bool __init check_ofw_architecture(struct device_node *root) |
191 | { | 192 | { |
192 | size_t propsize; | 193 | const char *olpc_arch; |
193 | char olpc_arch[5]; | 194 | int propsize; |
194 | const void *args[] = { NULL, "architecture", olpc_arch, (void *)5 }; | ||
195 | void *res[] = { &propsize }; | ||
196 | 195 | ||
197 | if (olpc_ofw("getprop", args, res)) { | 196 | olpc_arch = of_get_property(root, "architecture", &propsize); |
198 | printk(KERN_ERR "ofw: getprop call failed!\n"); | ||
199 | return false; | ||
200 | } | ||
201 | return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; | 197 | return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; |
202 | } | 198 | } |
203 | 199 | ||
204 | static u32 __init get_board_revision(void) | 200 | static u32 __init get_board_revision(struct device_node *root) |
205 | { | 201 | { |
206 | size_t propsize; | 202 | int propsize; |
207 | __be32 rev; | 203 | const __be32 *rev; |
208 | const void *args[] = { NULL, "board-revision-int", &rev, (void *)4 }; | 204 | |
209 | void *res[] = { &propsize }; | 205 | rev = of_get_property(root, "board-revision-int", &propsize); |
210 | 206 | if (propsize != 4) | |
211 | if (olpc_ofw("getprop", args, res) || propsize != 4) { | 207 | return 0; |
212 | printk(KERN_ERR "ofw: getprop call failed!\n"); | 208 | |
213 | return cpu_to_be32(0); | 209 | return be32_to_cpu(*rev); |
214 | } | ||
215 | return be32_to_cpu(rev); | ||
216 | } | 210 | } |
217 | 211 | ||
218 | static bool __init platform_detect(void) | 212 | static bool __init platform_detect(void) |
219 | { | 213 | { |
220 | if (!check_ofw_architecture()) | 214 | struct device_node *root = of_find_node_by_path("/"); |
215 | bool success; | ||
216 | |||
217 | if (!root) | ||
221 | return false; | 218 | return false; |
222 | olpc_platform_info.flags |= OLPC_F_PRESENT; | 219 | |
223 | olpc_platform_info.boardrev = get_board_revision(); | 220 | success = check_ofw_architecture(root); |
224 | return true; | 221 | if (success) { |
222 | olpc_platform_info.boardrev = get_board_revision(root); | ||
223 | olpc_platform_info.flags |= OLPC_F_PRESENT; | ||
224 | } | ||
225 | |||
226 | of_node_put(root); | ||
227 | return success; | ||
225 | } | 228 | } |
226 | 229 | ||
227 | static int __init add_xo1_platform_devices(void) | 230 | static int __init add_xo1_platform_devices(void) |
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index 044bda5b3174..d39f63d017d2 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c | |||
@@ -19,7 +19,9 @@ | |||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/bootmem.h> | 20 | #include <linux/bootmem.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/of_platform.h> | ||
22 | #include <linux/of_pdt.h> | 23 | #include <linux/of_pdt.h> |
24 | #include <asm/olpc.h> | ||
23 | #include <asm/olpc_ofw.h> | 25 | #include <asm/olpc_ofw.h> |
24 | 26 | ||
25 | static phandle __init olpc_dt_getsibling(phandle node) | 27 | static phandle __init olpc_dt_getsibling(phandle node) |
@@ -180,3 +182,20 @@ void __init olpc_dt_build_devicetree(void) | |||
180 | pr_info("PROM DT: Built device tree with %u bytes of memory.\n", | 182 | pr_info("PROM DT: Built device tree with %u bytes of memory.\n", |
181 | prom_early_allocated); | 183 | prom_early_allocated); |
182 | } | 184 | } |
185 | |||
186 | /* A list of DT node/bus matches that we want to expose as platform devices */ | ||
187 | static struct of_device_id __initdata of_ids[] = { | ||
188 | { .compatible = "olpc,xo1-battery" }, | ||
189 | { .compatible = "olpc,xo1-dcon" }, | ||
190 | { .compatible = "olpc,xo1-rtc" }, | ||
191 | {}, | ||
192 | }; | ||
193 | |||
194 | static int __init olpc_create_platform_devices(void) | ||
195 | { | ||
196 | if (machine_is_olpc()) | ||
197 | return of_platform_bus_probe(NULL, of_ids, NULL); | ||
198 | else | ||
199 | return 0; | ||
200 | } | ||
201 | device_initcall(olpc_create_platform_devices); | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 7cb6424317f6..c58e0ea39ef5 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
699 | struct mm_struct *mm, | 699 | struct mm_struct *mm, |
700 | unsigned long va, unsigned int cpu) | 700 | unsigned long va, unsigned int cpu) |
701 | { | 701 | { |
702 | int tcpu; | ||
703 | int uvhub; | ||
704 | int locals = 0; | 702 | int locals = 0; |
705 | int remotes = 0; | 703 | int remotes = 0; |
706 | int hubs = 0; | 704 | int hubs = 0; |
705 | int tcpu; | ||
706 | int tpnode; | ||
707 | struct bau_desc *bau_desc; | 707 | struct bau_desc *bau_desc; |
708 | struct cpumask *flush_mask; | 708 | struct cpumask *flush_mask; |
709 | struct ptc_stats *stat; | 709 | struct ptc_stats *stat; |
710 | struct bau_control *bcp; | 710 | struct bau_control *bcp; |
711 | struct bau_control *tbcp; | 711 | struct bau_control *tbcp; |
712 | struct hub_and_pnode *hpp; | ||
712 | 713 | ||
713 | /* kernel was booted 'nobau' */ | 714 | /* kernel was booted 'nobau' */ |
714 | if (nobau) | 715 | if (nobau) |
@@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
750 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; | 751 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; |
751 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 752 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
752 | 753 | ||
753 | /* cpu statistics */ | ||
754 | for_each_cpu(tcpu, flush_mask) { | 754 | for_each_cpu(tcpu, flush_mask) { |
755 | uvhub = uv_cpu_to_blade_id(tcpu); | 755 | /* |
756 | bau_uvhub_set(uvhub, &bau_desc->distribution); | 756 | * The distribution vector is a bit map of pnodes, relative |
757 | if (uvhub == bcp->uvhub) | 757 | * to the partition base pnode (and the partition base nasid |
758 | * in the header). | ||
759 | * Translate cpu to pnode and hub using an array stored | ||
760 | * in local memory. | ||
761 | */ | ||
762 | hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; | ||
763 | tpnode = hpp->pnode - bcp->partition_base_pnode; | ||
764 | bau_uvhub_set(tpnode, &bau_desc->distribution); | ||
765 | if (hpp->uvhub == bcp->uvhub) | ||
758 | locals++; | 766 | locals++; |
759 | else | 767 | else |
760 | remotes++; | 768 | remotes++; |
@@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
855 | * an interrupt, but causes an error message to be returned to | 863 | * an interrupt, but causes an error message to be returned to |
856 | * the sender. | 864 | * the sender. |
857 | */ | 865 | */ |
858 | static void uv_enable_timeouts(void) | 866 | static void __init uv_enable_timeouts(void) |
859 | { | 867 | { |
860 | int uvhub; | 868 | int uvhub; |
861 | int nuvhubs; | 869 | int nuvhubs; |
@@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void) | |||
1326 | } | 1334 | } |
1327 | 1335 | ||
1328 | /* | 1336 | /* |
1329 | * initialize the sending side's sending buffers | 1337 | * Initialize the sending side's sending buffers. |
1330 | */ | 1338 | */ |
1331 | static void | 1339 | static void |
1332 | uv_activation_descriptor_init(int node, int pnode) | 1340 | uv_activation_descriptor_init(int node, int pnode, int base_pnode) |
1333 | { | 1341 | { |
1334 | int i; | 1342 | int i; |
1335 | int cpu; | 1343 | int cpu; |
@@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1352 | n = pa >> uv_nshift; | 1360 | n = pa >> uv_nshift; |
1353 | m = pa & uv_mmask; | 1361 | m = pa & uv_mmask; |
1354 | 1362 | ||
1363 | /* the 14-bit pnode */ | ||
1355 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, | 1364 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, |
1356 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); | 1365 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); |
1357 | |||
1358 | /* | 1366 | /* |
1359 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each | 1367 | * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each |
1360 | * cpu even though we only use the first one; one descriptor can | 1368 | * cpu even though we only use the first one; one descriptor can |
1361 | * describe a broadcast to 256 uv hubs. | 1369 | * describe a broadcast to 256 uv hubs. |
1362 | */ | 1370 | */ |
@@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1365 | memset(bd2, 0, sizeof(struct bau_desc)); | 1373 | memset(bd2, 0, sizeof(struct bau_desc)); |
1366 | bd2->header.sw_ack_flag = 1; | 1374 | bd2->header.sw_ack_flag = 1; |
1367 | /* | 1375 | /* |
1368 | * base_dest_nodeid is the nasid of the first uvhub | 1376 | * The base_dest_nasid set in the message header is the nasid |
1369 | * in the partition. The bit map will indicate uvhub numbers, | 1377 | * of the first uvhub in the partition. The bit map will |
1370 | * which are 0-N in a partition. Pnodes are unique system-wide. | 1378 | * indicate destination pnode numbers relative to that base. |
1379 | * They may not be consecutive if nasid striding is being used. | ||
1371 | */ | 1380 | */ |
1372 | bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); | 1381 | bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); |
1373 | bd2->header.dest_subnodeid = 0x10; /* the LB */ | 1382 | bd2->header.dest_subnodeid = UV_LB_SUBNODEID; |
1374 | bd2->header.command = UV_NET_ENDPOINT_INTD; | 1383 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
1375 | bd2->header.int_both = 1; | 1384 | bd2->header.int_both = 1; |
1376 | /* | 1385 | /* |
@@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode) | |||
1442 | /* | 1451 | /* |
1443 | * Initialization of each UV hub's structures | 1452 | * Initialization of each UV hub's structures |
1444 | */ | 1453 | */ |
1445 | static void __init uv_init_uvhub(int uvhub, int vector) | 1454 | static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) |
1446 | { | 1455 | { |
1447 | int node; | 1456 | int node; |
1448 | int pnode; | 1457 | int pnode; |
@@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector) | |||
1450 | 1459 | ||
1451 | node = uvhub_to_first_node(uvhub); | 1460 | node = uvhub_to_first_node(uvhub); |
1452 | pnode = uv_blade_to_pnode(uvhub); | 1461 | pnode = uv_blade_to_pnode(uvhub); |
1453 | uv_activation_descriptor_init(node, pnode); | 1462 | uv_activation_descriptor_init(node, pnode, base_pnode); |
1454 | uv_payload_queue_init(node, pnode); | 1463 | uv_payload_queue_init(node, pnode); |
1455 | /* | 1464 | /* |
1456 | * the below initialization can't be in firmware because the | 1465 | * The below initialization can't be in firmware because the |
1457 | * messaging IRQ will be determined by the OS | 1466 | * messaging IRQ will be determined by the OS. |
1458 | */ | 1467 | */ |
1459 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; | 1468 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; |
1460 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 1469 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
@@ -1491,10 +1500,11 @@ calculate_destination_timeout(void) | |||
1491 | /* | 1500 | /* |
1492 | * initialize the bau_control structure for each cpu | 1501 | * initialize the bau_control structure for each cpu |
1493 | */ | 1502 | */ |
1494 | static int __init uv_init_per_cpu(int nuvhubs) | 1503 | static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) |
1495 | { | 1504 | { |
1496 | int i; | 1505 | int i; |
1497 | int cpu; | 1506 | int cpu; |
1507 | int tcpu; | ||
1498 | int pnode; | 1508 | int pnode; |
1499 | int uvhub; | 1509 | int uvhub; |
1500 | int have_hmaster; | 1510 | int have_hmaster; |
@@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1528 | bcp = &per_cpu(bau_control, cpu); | 1538 | bcp = &per_cpu(bau_control, cpu); |
1529 | memset(bcp, 0, sizeof(struct bau_control)); | 1539 | memset(bcp, 0, sizeof(struct bau_control)); |
1530 | pnode = uv_cpu_hub_info(cpu)->pnode; | 1540 | pnode = uv_cpu_hub_info(cpu)->pnode; |
1541 | if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { | ||
1542 | printk(KERN_EMERG | ||
1543 | "cpu %d pnode %d-%d beyond %d; BAU disabled\n", | ||
1544 | cpu, pnode, base_part_pnode, | ||
1545 | UV_DISTRIBUTION_SIZE); | ||
1546 | return 1; | ||
1547 | } | ||
1548 | bcp->osnode = cpu_to_node(cpu); | ||
1549 | bcp->partition_base_pnode = uv_partition_base_pnode; | ||
1531 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; | 1550 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; |
1532 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); | 1551 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); |
1533 | bdp = &uvhub_descs[uvhub]; | 1552 | bdp = &uvhub_descs[uvhub]; |
@@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1536 | bdp->pnode = pnode; | 1555 | bdp->pnode = pnode; |
1537 | /* kludge: 'assuming' one node per socket, and assuming that | 1556 | /* kludge: 'assuming' one node per socket, and assuming that |
1538 | disabling a socket just leaves a gap in node numbers */ | 1557 | disabling a socket just leaves a gap in node numbers */ |
1539 | socket = (cpu_to_node(cpu) & 1); | 1558 | socket = bcp->osnode & 1; |
1540 | bdp->socket_mask |= (1 << socket); | 1559 | bdp->socket_mask |= (1 << socket); |
1541 | sdp = &bdp->socket[socket]; | 1560 | sdp = &bdp->socket[socket]; |
1542 | sdp->cpu_number[sdp->num_cpus] = cpu; | 1561 | sdp->cpu_number[sdp->num_cpus] = cpu; |
@@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1585 | nextsocket: | 1604 | nextsocket: |
1586 | socket++; | 1605 | socket++; |
1587 | socket_mask = (socket_mask >> 1); | 1606 | socket_mask = (socket_mask >> 1); |
1607 | /* each socket gets a local array of pnodes/hubs */ | ||
1608 | bcp = smaster; | ||
1609 | bcp->target_hub_and_pnode = kmalloc_node( | ||
1610 | sizeof(struct hub_and_pnode) * | ||
1611 | num_possible_cpus(), GFP_KERNEL, bcp->osnode); | ||
1612 | memset(bcp->target_hub_and_pnode, 0, | ||
1613 | sizeof(struct hub_and_pnode) * | ||
1614 | num_possible_cpus()); | ||
1615 | for_each_present_cpu(tcpu) { | ||
1616 | bcp->target_hub_and_pnode[tcpu].pnode = | ||
1617 | uv_cpu_hub_info(tcpu)->pnode; | ||
1618 | bcp->target_hub_and_pnode[tcpu].uvhub = | ||
1619 | uv_cpu_hub_info(tcpu)->numa_blade_id; | ||
1620 | } | ||
1588 | } | 1621 | } |
1589 | } | 1622 | } |
1590 | kfree(uvhub_descs); | 1623 | kfree(uvhub_descs); |
@@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void) | |||
1637 | spin_lock_init(&disable_lock); | 1670 | spin_lock_init(&disable_lock); |
1638 | congested_cycles = microsec_2_cycles(congested_response_us); | 1671 | congested_cycles = microsec_2_cycles(congested_response_us); |
1639 | 1672 | ||
1640 | if (uv_init_per_cpu(nuvhubs)) { | ||
1641 | nobau = 1; | ||
1642 | return 0; | ||
1643 | } | ||
1644 | |||
1645 | uv_partition_base_pnode = 0x7fffffff; | 1673 | uv_partition_base_pnode = 0x7fffffff; |
1646 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) | 1674 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
1647 | if (uv_blade_nr_possible_cpus(uvhub) && | 1675 | if (uv_blade_nr_possible_cpus(uvhub) && |
1648 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) | 1676 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) |
1649 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); | 1677 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); |
1678 | } | ||
1679 | |||
1680 | if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { | ||
1681 | nobau = 1; | ||
1682 | return 0; | ||
1683 | } | ||
1650 | 1684 | ||
1651 | vector = UV_BAU_MESSAGE; | 1685 | vector = UV_BAU_MESSAGE; |
1652 | for_each_possible_blade(uvhub) | 1686 | for_each_possible_blade(uvhub) |
1653 | if (uv_blade_nr_possible_cpus(uvhub)) | 1687 | if (uv_blade_nr_possible_cpus(uvhub)) |
1654 | uv_init_uvhub(uvhub, vector); | 1688 | uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); |
1655 | 1689 | ||
1656 | uv_enable_timeouts(); | 1690 | uv_enable_timeouts(); |
1657 | alloc_intr_gate(vector, uv_bau_message_intr1); | 1691 | alloc_intr_gate(vector, uv_bau_message_intr1); |
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 9daf5d1af9f1..0eb90184515f 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -40,7 +40,6 @@ static struct clocksource clocksource_uv = { | |||
40 | .rating = 400, | 40 | .rating = 400, |
41 | .read = uv_read_rtc, | 41 | .read = uv_read_rtc, |
42 | .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, | 42 | .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, |
43 | .shift = 10, | ||
44 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 43 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
45 | }; | 44 | }; |
46 | 45 | ||
@@ -372,14 +371,11 @@ static __init int uv_rtc_setup_clock(void) | |||
372 | if (!is_uv_system()) | 371 | if (!is_uv_system()) |
373 | return -ENODEV; | 372 | return -ENODEV; |
374 | 373 | ||
375 | clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, | ||
376 | clocksource_uv.shift); | ||
377 | |||
378 | /* If single blade, prefer tsc */ | 374 | /* If single blade, prefer tsc */ |
379 | if (uv_num_possible_blades() == 1) | 375 | if (uv_num_possible_blades() == 1) |
380 | clocksource_uv.rating = 250; | 376 | clocksource_uv.rating = 250; |
381 | 377 | ||
382 | rc = clocksource_register(&clocksource_uv); | 378 | rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); |
383 | if (rc) | 379 | if (rc) |
384 | printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); | 380 | printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); |
385 | else | 381 | else |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index e3c6a06cf725..dd7b88f2ec7a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -235,7 +235,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
235 | *dx &= maskedx; | 235 | *dx &= maskedx; |
236 | } | 236 | } |
237 | 237 | ||
238 | static __init void xen_init_cpuid_mask(void) | 238 | static void __init xen_init_cpuid_mask(void) |
239 | { | 239 | { |
240 | unsigned int ax, bx, cx, dx; | 240 | unsigned int ax, bx, cx, dx; |
241 | unsigned int xsave_mask; | 241 | unsigned int xsave_mask; |
@@ -400,7 +400,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr) | |||
400 | /* | 400 | /* |
401 | * load_gdt for early boot, when the gdt is only mapped once | 401 | * load_gdt for early boot, when the gdt is only mapped once |
402 | */ | 402 | */ |
403 | static __init void xen_load_gdt_boot(const struct desc_ptr *dtr) | 403 | static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) |
404 | { | 404 | { |
405 | unsigned long va = dtr->address; | 405 | unsigned long va = dtr->address; |
406 | unsigned int size = dtr->size + 1; | 406 | unsigned int size = dtr->size + 1; |
@@ -662,7 +662,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |||
662 | * Version of write_gdt_entry for use at early boot-time needed to | 662 | * Version of write_gdt_entry for use at early boot-time needed to |
663 | * update an entry as simply as possible. | 663 | * update an entry as simply as possible. |
664 | */ | 664 | */ |
665 | static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, | 665 | static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, |
666 | const void *desc, int type) | 666 | const void *desc, int type) |
667 | { | 667 | { |
668 | switch (type) { | 668 | switch (type) { |
@@ -933,18 +933,18 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, | |||
933 | return ret; | 933 | return ret; |
934 | } | 934 | } |
935 | 935 | ||
936 | static const struct pv_info xen_info __initdata = { | 936 | static const struct pv_info xen_info __initconst = { |
937 | .paravirt_enabled = 1, | 937 | .paravirt_enabled = 1, |
938 | .shared_kernel_pmd = 0, | 938 | .shared_kernel_pmd = 0, |
939 | 939 | ||
940 | .name = "Xen", | 940 | .name = "Xen", |
941 | }; | 941 | }; |
942 | 942 | ||
943 | static const struct pv_init_ops xen_init_ops __initdata = { | 943 | static const struct pv_init_ops xen_init_ops __initconst = { |
944 | .patch = xen_patch, | 944 | .patch = xen_patch, |
945 | }; | 945 | }; |
946 | 946 | ||
947 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | 947 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { |
948 | .cpuid = xen_cpuid, | 948 | .cpuid = xen_cpuid, |
949 | 949 | ||
950 | .set_debugreg = xen_set_debugreg, | 950 | .set_debugreg = xen_set_debugreg, |
@@ -1004,7 +1004,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
1004 | .end_context_switch = xen_end_context_switch, | 1004 | .end_context_switch = xen_end_context_switch, |
1005 | }; | 1005 | }; |
1006 | 1006 | ||
1007 | static const struct pv_apic_ops xen_apic_ops __initdata = { | 1007 | static const struct pv_apic_ops xen_apic_ops __initconst = { |
1008 | #ifdef CONFIG_X86_LOCAL_APIC | 1008 | #ifdef CONFIG_X86_LOCAL_APIC |
1009 | .startup_ipi_hook = paravirt_nop, | 1009 | .startup_ipi_hook = paravirt_nop, |
1010 | #endif | 1010 | #endif |
@@ -1055,7 +1055,7 @@ int xen_panic_handler_init(void) | |||
1055 | return 0; | 1055 | return 0; |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | static const struct machine_ops __initdata xen_machine_ops = { | 1058 | static const struct machine_ops xen_machine_ops __initconst = { |
1059 | .restart = xen_restart, | 1059 | .restart = xen_restart, |
1060 | .halt = xen_machine_halt, | 1060 | .halt = xen_machine_halt, |
1061 | .power_off = xen_machine_halt, | 1061 | .power_off = xen_machine_halt, |
@@ -1332,7 +1332,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, | |||
1332 | return NOTIFY_OK; | 1332 | return NOTIFY_OK; |
1333 | } | 1333 | } |
1334 | 1334 | ||
1335 | static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = { | 1335 | static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { |
1336 | .notifier_call = xen_hvm_cpu_notify, | 1336 | .notifier_call = xen_hvm_cpu_notify, |
1337 | }; | 1337 | }; |
1338 | 1338 | ||
@@ -1381,7 +1381,7 @@ bool xen_hvm_need_lapic(void) | |||
1381 | } | 1381 | } |
1382 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); | 1382 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); |
1383 | 1383 | ||
1384 | const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = { | 1384 | const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { |
1385 | .name = "Xen HVM", | 1385 | .name = "Xen HVM", |
1386 | .detect = xen_hvm_platform, | 1386 | .detect = xen_hvm_platform, |
1387 | .init_platform = xen_hvm_guest_init, | 1387 | .init_platform = xen_hvm_guest_init, |
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 6a6fe8939645..8bbb465b6f0a 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -113,7 +113,7 @@ static void xen_halt(void) | |||
113 | xen_safe_halt(); | 113 | xen_safe_halt(); |
114 | } | 114 | } |
115 | 115 | ||
116 | static const struct pv_irq_ops xen_irq_ops __initdata = { | 116 | static const struct pv_irq_ops xen_irq_ops __initconst = { |
117 | .save_fl = PV_CALLEE_SAVE(xen_save_fl), | 117 | .save_fl = PV_CALLEE_SAVE(xen_save_fl), |
118 | .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), | 118 | .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), |
119 | .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), | 119 | .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 55c965b38c27..02d752460371 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1054,7 +1054,7 @@ void xen_mm_pin_all(void) | |||
1054 | * that's before we have page structures to store the bits. So do all | 1054 | * that's before we have page structures to store the bits. So do all |
1055 | * the book-keeping now. | 1055 | * the book-keeping now. |
1056 | */ | 1056 | */ |
1057 | static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, | 1057 | static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, |
1058 | enum pt_level level) | 1058 | enum pt_level level) |
1059 | { | 1059 | { |
1060 | SetPagePinned(page); | 1060 | SetPagePinned(page); |
@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info) | |||
1187 | 1187 | ||
1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); | 1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); |
1189 | 1189 | ||
1190 | if (active_mm == mm) | 1190 | if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) |
1191 | leave_mm(smp_processor_id()); | 1191 | leave_mm(smp_processor_id()); |
1192 | 1192 | ||
1193 | /* If this cpu still has a stale cr3 reference, then make sure | 1193 | /* If this cpu still has a stale cr3 reference, then make sure |
@@ -1271,13 +1271,27 @@ void xen_exit_mmap(struct mm_struct *mm) | |||
1271 | spin_unlock(&mm->page_table_lock); | 1271 | spin_unlock(&mm->page_table_lock); |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | static __init void xen_pagetable_setup_start(pgd_t *base) | 1274 | static void __init xen_pagetable_setup_start(pgd_t *base) |
1275 | { | 1275 | { |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) | ||
1279 | { | ||
1280 | /* reserve the range used */ | ||
1281 | native_pagetable_reserve(start, end); | ||
1282 | |||
1283 | /* set as RW the rest */ | ||
1284 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, | ||
1285 | PFN_PHYS(pgt_buf_top)); | ||
1286 | while (end < PFN_PHYS(pgt_buf_top)) { | ||
1287 | make_lowmem_page_readwrite(__va(end)); | ||
1288 | end += PAGE_SIZE; | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1278 | static void xen_post_allocator_init(void); | 1292 | static void xen_post_allocator_init(void); |
1279 | 1293 | ||
1280 | static __init void xen_pagetable_setup_done(pgd_t *base) | 1294 | static void __init xen_pagetable_setup_done(pgd_t *base) |
1281 | { | 1295 | { |
1282 | xen_setup_shared_info(); | 1296 | xen_setup_shared_info(); |
1283 | xen_post_allocator_init(); | 1297 | xen_post_allocator_init(); |
@@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm) | |||
1463 | return ret; | 1477 | return ret; |
1464 | } | 1478 | } |
1465 | 1479 | ||
1466 | #ifdef CONFIG_X86_64 | ||
1467 | static __initdata u64 __last_pgt_set_rw = 0; | ||
1468 | static __initdata u64 __pgt_buf_start = 0; | ||
1469 | static __initdata u64 __pgt_buf_end = 0; | ||
1470 | static __initdata u64 __pgt_buf_top = 0; | ||
1471 | /* | ||
1472 | * As a consequence of the commit: | ||
1473 | * | ||
1474 | * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e | ||
1475 | * Author: Yinghai Lu <yinghai@kernel.org> | ||
1476 | * Date: Fri Dec 17 16:58:28 2010 -0800 | ||
1477 | * | ||
1478 | * x86-64, mm: Put early page table high | ||
1479 | * | ||
1480 | * at some point init_memory_mapping is going to reach the pagetable pages | ||
1481 | * area and map those pages too (mapping them as normal memory that falls | ||
1482 | * in the range of addresses passed to init_memory_mapping as argument). | ||
1483 | * Some of those pages are already pagetable pages (they are in the range | ||
1484 | * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and | ||
1485 | * everything is fine. | ||
1486 | * Some of these pages are not pagetable pages yet (they fall in the range | ||
1487 | * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they | ||
1488 | * are going to be mapped RW. When these pages become pagetable pages and | ||
1489 | * are hooked into the pagetable, xen will find that the guest has already | ||
1490 | * a RW mapping of them somewhere and fail the operation. | ||
1491 | * The reason Xen requires pagetables to be RO is that the hypervisor needs | ||
1492 | * to verify that the pagetables are valid before using them. The validation | ||
1493 | * operations are called "pinning". | ||
1494 | * | ||
1495 | * In order to fix the issue we mark all the pages in the entire range | ||
1496 | * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation | ||
1497 | * is completed only the range pgt_buf_start-pgt_buf_end is reserved by | ||
1498 | * init_memory_mapping. Hence the kernel is going to crash as soon as one | ||
1499 | * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those | ||
1500 | * ranges are RO). | ||
1501 | * | ||
1502 | * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_ | ||
1503 | * the init_memory_mapping has completed (in a perfect world we would | ||
1504 | * call this function from init_memory_mapping, but lets ignore that). | ||
1505 | * | ||
1506 | * Because we are called _after_ init_memory_mapping the pgt_buf_[start, | ||
1507 | * end,top] have all changed to new values (b/c init_memory_mapping | ||
1508 | * is called and setting up another new page-table). Hence, the first time | ||
1509 | * we enter this function, we save away the pgt_buf_start value and update | ||
1510 | * the pgt_buf_[end,top]. | ||
1511 | * | ||
1512 | * When we detect that the "old" pgt_buf_start through pgt_buf_end | ||
1513 | * PFNs have been reserved (so memblock_x86_reserve_range has been called), | ||
1514 | * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top. | ||
1515 | * | ||
1516 | * And then we update those "old" pgt_buf_[end|top] with the new ones | ||
1517 | * so that we can redo this on the next pagetable. | ||
1518 | */ | ||
1519 | static __init void mark_rw_past_pgt(void) { | ||
1520 | |||
1521 | if (pgt_buf_end > pgt_buf_start) { | ||
1522 | u64 addr, size; | ||
1523 | |||
1524 | /* Save it away. */ | ||
1525 | if (!__pgt_buf_start) { | ||
1526 | __pgt_buf_start = pgt_buf_start; | ||
1527 | __pgt_buf_end = pgt_buf_end; | ||
1528 | __pgt_buf_top = pgt_buf_top; | ||
1529 | return; | ||
1530 | } | ||
1531 | /* If we get the range that starts at __pgt_buf_end that means | ||
1532 | * the range is reserved, and that in 'init_memory_mapping' | ||
1533 | * the 'memblock_x86_reserve_range' has been called with the | ||
1534 | * outdated __pgt_buf_start, __pgt_buf_end (the "new" | ||
1535 | * pgt_buf_[start|end|top] refer now to a new pagetable. | ||
1536 | * Note: we are called _after_ the pgt_buf_[..] have been | ||
1537 | * updated.*/ | ||
1538 | |||
1539 | addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start), | ||
1540 | &size, PAGE_SIZE); | ||
1541 | |||
1542 | /* Still not reserved, meaning 'memblock_x86_reserve_range' | ||
1543 | * hasn't been called yet. Update the _end and _top.*/ | ||
1544 | if (addr == PFN_PHYS(__pgt_buf_start)) { | ||
1545 | __pgt_buf_end = pgt_buf_end; | ||
1546 | __pgt_buf_top = pgt_buf_top; | ||
1547 | return; | ||
1548 | } | ||
1549 | |||
1550 | /* OK, the area is reserved, meaning it is time for us to | ||
1551 | * set RW for the old end->top PFNs. */ | ||
1552 | |||
1553 | /* ..unless we had already done this. */ | ||
1554 | if (__pgt_buf_end == __last_pgt_set_rw) | ||
1555 | return; | ||
1556 | |||
1557 | addr = PFN_PHYS(__pgt_buf_end); | ||
1558 | |||
1559 | /* set as RW the rest */ | ||
1560 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", | ||
1561 | PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top)); | ||
1562 | |||
1563 | while (addr < PFN_PHYS(__pgt_buf_top)) { | ||
1564 | make_lowmem_page_readwrite(__va(addr)); | ||
1565 | addr += PAGE_SIZE; | ||
1566 | } | ||
1567 | /* And update everything so that we are ready for the next | ||
1568 | * pagetable (the one created for regions past 4GB) */ | ||
1569 | __last_pgt_set_rw = __pgt_buf_end; | ||
1570 | __pgt_buf_start = pgt_buf_start; | ||
1571 | __pgt_buf_end = pgt_buf_end; | ||
1572 | __pgt_buf_top = pgt_buf_top; | ||
1573 | } | ||
1574 | return; | ||
1575 | } | ||
1576 | #else | ||
1577 | static __init void mark_rw_past_pgt(void) { } | ||
1578 | #endif | ||
1579 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | 1480 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) |
1580 | { | 1481 | { |
1581 | #ifdef CONFIG_X86_64 | 1482 | #ifdef CONFIG_X86_64 |
@@ -1587,7 +1488,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1587 | } | 1488 | } |
1588 | 1489 | ||
1589 | #ifdef CONFIG_X86_32 | 1490 | #ifdef CONFIG_X86_32 |
1590 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1491 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1591 | { | 1492 | { |
1592 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 1493 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
1593 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 1494 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
@@ -1597,19 +1498,11 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1597 | return pte; | 1498 | return pte; |
1598 | } | 1499 | } |
1599 | #else /* CONFIG_X86_64 */ | 1500 | #else /* CONFIG_X86_64 */ |
1600 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1501 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1601 | { | 1502 | { |
1602 | unsigned long pfn = pte_pfn(pte); | 1503 | unsigned long pfn = pte_pfn(pte); |
1603 | 1504 | ||
1604 | /* | 1505 | /* |
1605 | * A bit of optimization. We do not need to call the workaround | ||
1606 | * when xen_set_pte_init is called with a PTE with 0 as PFN. | ||
1607 | * That is b/c the pagetable at that point are just being populated | ||
1608 | * with empty values and we can save some cycles by not calling | ||
1609 | * the 'memblock' code.*/ | ||
1610 | if (pfn) | ||
1611 | mark_rw_past_pgt(); | ||
1612 | /* | ||
1613 | * If the new pfn is within the range of the newly allocated | 1506 | * If the new pfn is within the range of the newly allocated |
1614 | * kernel pagetable, and it isn't being mapped into an | 1507 | * kernel pagetable, and it isn't being mapped into an |
1615 | * early_ioremap fixmap slot as a freshly allocated page, make sure | 1508 | * early_ioremap fixmap slot as a freshly allocated page, make sure |
@@ -1626,7 +1519,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1626 | 1519 | ||
1627 | /* Init-time set_pte while constructing initial pagetables, which | 1520 | /* Init-time set_pte while constructing initial pagetables, which |
1628 | doesn't allow RO pagetable pages to be remapped RW */ | 1521 | doesn't allow RO pagetable pages to be remapped RW */ |
1629 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | 1522 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) |
1630 | { | 1523 | { |
1631 | pte = mask_rw_pte(ptep, pte); | 1524 | pte = mask_rw_pte(ptep, pte); |
1632 | 1525 | ||
@@ -1644,7 +1537,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | |||
1644 | 1537 | ||
1645 | /* Early in boot, while setting up the initial pagetable, assume | 1538 | /* Early in boot, while setting up the initial pagetable, assume |
1646 | everything is pinned. */ | 1539 | everything is pinned. */ |
1647 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | 1540 | static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
1648 | { | 1541 | { |
1649 | #ifdef CONFIG_FLATMEM | 1542 | #ifdef CONFIG_FLATMEM |
1650 | BUG_ON(mem_map); /* should only be used early */ | 1543 | BUG_ON(mem_map); /* should only be used early */ |
@@ -1654,7 +1547,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | |||
1654 | } | 1547 | } |
1655 | 1548 | ||
1656 | /* Used for pmd and pud */ | 1549 | /* Used for pmd and pud */ |
1657 | static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | 1550 | static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) |
1658 | { | 1551 | { |
1659 | #ifdef CONFIG_FLATMEM | 1552 | #ifdef CONFIG_FLATMEM |
1660 | BUG_ON(mem_map); /* should only be used early */ | 1553 | BUG_ON(mem_map); /* should only be used early */ |
@@ -1664,13 +1557,13 @@ static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | |||
1664 | 1557 | ||
1665 | /* Early release_pte assumes that all pts are pinned, since there's | 1558 | /* Early release_pte assumes that all pts are pinned, since there's |
1666 | only init_mm and anything attached to that is pinned. */ | 1559 | only init_mm and anything attached to that is pinned. */ |
1667 | static __init void xen_release_pte_init(unsigned long pfn) | 1560 | static void __init xen_release_pte_init(unsigned long pfn) |
1668 | { | 1561 | { |
1669 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | 1562 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
1670 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1563 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1671 | } | 1564 | } |
1672 | 1565 | ||
1673 | static __init void xen_release_pmd_init(unsigned long pfn) | 1566 | static void __init xen_release_pmd_init(unsigned long pfn) |
1674 | { | 1567 | { |
1675 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1568 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1676 | } | 1569 | } |
@@ -1796,7 +1689,7 @@ static void set_page_prot(void *addr, pgprot_t prot) | |||
1796 | BUG(); | 1689 | BUG(); |
1797 | } | 1690 | } |
1798 | 1691 | ||
1799 | static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | 1692 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
1800 | { | 1693 | { |
1801 | unsigned pmdidx, pteidx; | 1694 | unsigned pmdidx, pteidx; |
1802 | unsigned ident_pte; | 1695 | unsigned ident_pte; |
@@ -1879,7 +1772,7 @@ static void convert_pfn_mfn(void *v) | |||
1879 | * of the physical mapping once some sort of allocator has been set | 1772 | * of the physical mapping once some sort of allocator has been set |
1880 | * up. | 1773 | * up. |
1881 | */ | 1774 | */ |
1882 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 1775 | pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, |
1883 | unsigned long max_pfn) | 1776 | unsigned long max_pfn) |
1884 | { | 1777 | { |
1885 | pud_t *l3; | 1778 | pud_t *l3; |
@@ -1950,7 +1843,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1950 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); | 1843 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
1951 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | 1844 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); |
1952 | 1845 | ||
1953 | static __init void xen_write_cr3_init(unsigned long cr3) | 1846 | static void __init xen_write_cr3_init(unsigned long cr3) |
1954 | { | 1847 | { |
1955 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | 1848 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); |
1956 | 1849 | ||
@@ -1987,7 +1880,7 @@ static __init void xen_write_cr3_init(unsigned long cr3) | |||
1987 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | 1880 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
1988 | } | 1881 | } |
1989 | 1882 | ||
1990 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 1883 | pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, |
1991 | unsigned long max_pfn) | 1884 | unsigned long max_pfn) |
1992 | { | 1885 | { |
1993 | pmd_t *kernel_pmd; | 1886 | pmd_t *kernel_pmd; |
@@ -2093,7 +1986,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
2093 | #endif | 1986 | #endif |
2094 | } | 1987 | } |
2095 | 1988 | ||
2096 | __init void xen_ident_map_ISA(void) | 1989 | void __init xen_ident_map_ISA(void) |
2097 | { | 1990 | { |
2098 | unsigned long pa; | 1991 | unsigned long pa; |
2099 | 1992 | ||
@@ -2116,10 +2009,8 @@ __init void xen_ident_map_ISA(void) | |||
2116 | xen_flush_tlb(); | 2009 | xen_flush_tlb(); |
2117 | } | 2010 | } |
2118 | 2011 | ||
2119 | static __init void xen_post_allocator_init(void) | 2012 | static void __init xen_post_allocator_init(void) |
2120 | { | 2013 | { |
2121 | mark_rw_past_pgt(); | ||
2122 | |||
2123 | #ifdef CONFIG_XEN_DEBUG | 2014 | #ifdef CONFIG_XEN_DEBUG |
2124 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); | 2015 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); |
2125 | #endif | 2016 | #endif |
@@ -2155,7 +2046,7 @@ static void xen_leave_lazy_mmu(void) | |||
2155 | preempt_enable(); | 2046 | preempt_enable(); |
2156 | } | 2047 | } |
2157 | 2048 | ||
2158 | static const struct pv_mmu_ops xen_mmu_ops __initdata = { | 2049 | static const struct pv_mmu_ops xen_mmu_ops __initconst = { |
2159 | .read_cr2 = xen_read_cr2, | 2050 | .read_cr2 = xen_read_cr2, |
2160 | .write_cr2 = xen_write_cr2, | 2051 | .write_cr2 = xen_write_cr2, |
2161 | 2052 | ||
@@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
2228 | 2119 | ||
2229 | void __init xen_init_mmu_ops(void) | 2120 | void __init xen_init_mmu_ops(void) |
2230 | { | 2121 | { |
2122 | x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; | ||
2231 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; | 2123 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; |
2232 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; | 2124 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; |
2233 | pv_mmu_ops = xen_mmu_ops; | 2125 | pv_mmu_ops = xen_mmu_ops; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 141eb0de8b06..58efeb9d5440 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn) | |||
522 | /* Boundary cross-over for the edges: */ | 522 | /* Boundary cross-over for the edges: */ |
523 | if (idx) { | 523 | if (idx) { |
524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | 524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); |
525 | unsigned long *mid_mfn_p; | ||
525 | 526 | ||
526 | p2m_init(p2m); | 527 | p2m_init(p2m); |
527 | 528 | ||
528 | p2m_top[topidx][mididx] = p2m; | 529 | p2m_top[topidx][mididx] = p2m; |
529 | 530 | ||
531 | /* For save/restore we need to MFN of the P2M saved */ | ||
532 | |||
533 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
534 | WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), | ||
535 | "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", | ||
536 | topidx, mididx); | ||
537 | mid_mfn_p[mididx] = virt_to_mfn(p2m); | ||
538 | |||
530 | } | 539 | } |
531 | return idx != 0; | 540 | return idx != 0; |
532 | } | 541 | } |
@@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, | |||
549 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) | 558 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) |
550 | { | 559 | { |
551 | unsigned topidx = p2m_top_index(pfn); | 560 | unsigned topidx = p2m_top_index(pfn); |
552 | if (p2m_top[topidx] == p2m_mid_missing) { | 561 | unsigned long *mid_mfn_p; |
553 | unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | 562 | unsigned long **mid; |
563 | |||
564 | mid = p2m_top[topidx]; | ||
565 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
566 | if (mid == p2m_mid_missing) { | ||
567 | mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
554 | 568 | ||
555 | p2m_mid_init(mid); | 569 | p2m_mid_init(mid); |
556 | 570 | ||
557 | p2m_top[topidx] = mid; | 571 | p2m_top[topidx] = mid; |
572 | |||
573 | BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); | ||
574 | } | ||
575 | /* And the save/restore P2M tables.. */ | ||
576 | if (mid_mfn_p == p2m_mid_missing_mfn) { | ||
577 | mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
578 | p2m_mid_mfn_init(mid_mfn_p); | ||
579 | |||
580 | p2m_top_mfn_p[topidx] = mid_mfn_p; | ||
581 | p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); | ||
582 | /* Note: we don't set mid_mfn_p[midix] here, | ||
583 | * look in __early_alloc_p2m */ | ||
558 | } | 584 | } |
559 | } | 585 | } |
560 | 586 | ||
@@ -650,7 +676,7 @@ static unsigned long mfn_hash(unsigned long mfn) | |||
650 | } | 676 | } |
651 | 677 | ||
652 | /* Add an MFN override for a particular page */ | 678 | /* Add an MFN override for a particular page */ |
653 | int m2p_add_override(unsigned long mfn, struct page *page) | 679 | int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) |
654 | { | 680 | { |
655 | unsigned long flags; | 681 | unsigned long flags; |
656 | unsigned long pfn; | 682 | unsigned long pfn; |
@@ -662,7 +688,6 @@ int m2p_add_override(unsigned long mfn, struct page *page) | |||
662 | if (!PageHighMem(page)) { | 688 | if (!PageHighMem(page)) { |
663 | address = (unsigned long)__va(pfn << PAGE_SHIFT); | 689 | address = (unsigned long)__va(pfn << PAGE_SHIFT); |
664 | ptep = lookup_address(address, &level); | 690 | ptep = lookup_address(address, &level); |
665 | |||
666 | if (WARN(ptep == NULL || level != PG_LEVEL_4K, | 691 | if (WARN(ptep == NULL || level != PG_LEVEL_4K, |
667 | "m2p_add_override: pfn %lx not mapped", pfn)) | 692 | "m2p_add_override: pfn %lx not mapped", pfn)) |
668 | return -EINVAL; | 693 | return -EINVAL; |
@@ -674,18 +699,17 @@ int m2p_add_override(unsigned long mfn, struct page *page) | |||
674 | if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) | 699 | if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) |
675 | return -ENOMEM; | 700 | return -ENOMEM; |
676 | 701 | ||
677 | if (!PageHighMem(page)) | 702 | if (clear_pte && !PageHighMem(page)) |
678 | /* Just zap old mapping for now */ | 703 | /* Just zap old mapping for now */ |
679 | pte_clear(&init_mm, address, ptep); | 704 | pte_clear(&init_mm, address, ptep); |
680 | |||
681 | spin_lock_irqsave(&m2p_override_lock, flags); | 705 | spin_lock_irqsave(&m2p_override_lock, flags); |
682 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); | 706 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); |
683 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 707 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
684 | 708 | ||
685 | return 0; | 709 | return 0; |
686 | } | 710 | } |
687 | 711 | EXPORT_SYMBOL_GPL(m2p_add_override); | |
688 | int m2p_remove_override(struct page *page) | 712 | int m2p_remove_override(struct page *page, bool clear_pte) |
689 | { | 713 | { |
690 | unsigned long flags; | 714 | unsigned long flags; |
691 | unsigned long mfn; | 715 | unsigned long mfn; |
@@ -713,7 +737,7 @@ int m2p_remove_override(struct page *page) | |||
713 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 737 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
714 | set_phys_to_machine(pfn, page->index); | 738 | set_phys_to_machine(pfn, page->index); |
715 | 739 | ||
716 | if (!PageHighMem(page)) | 740 | if (clear_pte && !PageHighMem(page)) |
717 | set_pte_at(&init_mm, address, ptep, | 741 | set_pte_at(&init_mm, address, ptep, |
718 | pfn_pte(pfn, PAGE_KERNEL)); | 742 | pfn_pte(pfn, PAGE_KERNEL)); |
719 | /* No tlb flush necessary because the caller already | 743 | /* No tlb flush necessary because the caller already |
@@ -721,6 +745,7 @@ int m2p_remove_override(struct page *page) | |||
721 | 745 | ||
722 | return 0; | 746 | return 0; |
723 | } | 747 | } |
748 | EXPORT_SYMBOL_GPL(m2p_remove_override); | ||
724 | 749 | ||
725 | struct page *m2p_find_override(unsigned long mfn) | 750 | struct page *m2p_find_override(unsigned long mfn) |
726 | { | 751 | { |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 90bac0aac3a5..be1a464f6d66 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -50,7 +50,7 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; | |||
50 | */ | 50 | */ |
51 | #define EXTRA_MEM_RATIO (10) | 51 | #define EXTRA_MEM_RATIO (10) |
52 | 52 | ||
53 | static __init void xen_add_extra_mem(unsigned long pages) | 53 | static void __init xen_add_extra_mem(unsigned long pages) |
54 | { | 54 | { |
55 | unsigned long pfn; | 55 | unsigned long pfn; |
56 | 56 | ||
@@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, | |||
166 | if (last > end) | 166 | if (last > end) |
167 | continue; | 167 | continue; |
168 | 168 | ||
169 | if (entry->type == E820_RAM) { | 169 | if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { |
170 | if (start > start_pci) | 170 | if (start > start_pci) |
171 | identity += set_phys_range_identity( | 171 | identity += set_phys_range_identity( |
172 | PFN_UP(start_pci), PFN_DOWN(start)); | 172 | PFN_UP(start_pci), PFN_DOWN(start)); |
@@ -227,7 +227,11 @@ char * __init xen_memory_setup(void) | |||
227 | 227 | ||
228 | memcpy(map_raw, map, sizeof(map)); | 228 | memcpy(map_raw, map, sizeof(map)); |
229 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
230 | #ifdef CONFIG_X86_32 | ||
231 | xen_extra_mem_start = mem_end; | ||
232 | #else | ||
230 | xen_extra_mem_start = max((1ULL << 32), mem_end); | 233 | xen_extra_mem_start = max((1ULL << 32), mem_end); |
234 | #endif | ||
231 | for (i = 0; i < memmap.nr_entries; i++) { | 235 | for (i = 0; i < memmap.nr_entries; i++) { |
232 | unsigned long long end; | 236 | unsigned long long end; |
233 | 237 | ||
@@ -336,7 +340,7 @@ static void __init fiddle_vdso(void) | |||
336 | #endif | 340 | #endif |
337 | } | 341 | } |
338 | 342 | ||
339 | static __cpuinit int register_callback(unsigned type, const void *func) | 343 | static int __cpuinit register_callback(unsigned type, const void *func) |
340 | { | 344 | { |
341 | struct callback_register callback = { | 345 | struct callback_register callback = { |
342 | .type = type, | 346 | .type = type, |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 30612441ed99..41038c01de40 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -46,18 +46,17 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | |||
46 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | 46 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Reschedule call back. Nothing to do, | 49 | * Reschedule call back. |
50 | * all the work is done automatically when | ||
51 | * we return from the interrupt. | ||
52 | */ | 50 | */ |
53 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | 51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) |
54 | { | 52 | { |
55 | inc_irq_stat(irq_resched_count); | 53 | inc_irq_stat(irq_resched_count); |
54 | scheduler_ipi(); | ||
56 | 55 | ||
57 | return IRQ_HANDLED; | 56 | return IRQ_HANDLED; |
58 | } | 57 | } |
59 | 58 | ||
60 | static __cpuinit void cpu_bringup(void) | 59 | static void __cpuinit cpu_bringup(void) |
61 | { | 60 | { |
62 | int cpu = smp_processor_id(); | 61 | int cpu = smp_processor_id(); |
63 | 62 | ||
@@ -85,7 +84,7 @@ static __cpuinit void cpu_bringup(void) | |||
85 | wmb(); /* make sure everything is out */ | 84 | wmb(); /* make sure everything is out */ |
86 | } | 85 | } |
87 | 86 | ||
88 | static __cpuinit void cpu_bringup_and_idle(void) | 87 | static void __cpuinit cpu_bringup_and_idle(void) |
89 | { | 88 | { |
90 | cpu_bringup(); | 89 | cpu_bringup(); |
91 | cpu_idle(); | 90 | cpu_idle(); |
@@ -242,7 +241,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
242 | } | 241 | } |
243 | } | 242 | } |
244 | 243 | ||
245 | static __cpuinit int | 244 | static int __cpuinit |
246 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | 245 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) |
247 | { | 246 | { |
248 | struct vcpu_guest_context *ctxt; | 247 | struct vcpu_guest_context *ctxt; |
@@ -486,7 +485,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
486 | return IRQ_HANDLED; | 485 | return IRQ_HANDLED; |
487 | } | 486 | } |
488 | 487 | ||
489 | static const struct smp_ops xen_smp_ops __initdata = { | 488 | static const struct smp_ops xen_smp_ops __initconst = { |
490 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | 489 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, |
491 | .smp_prepare_cpus = xen_smp_prepare_cpus, | 490 | .smp_prepare_cpus = xen_smp_prepare_cpus, |
492 | .smp_cpus_done = xen_smp_cpus_done, | 491 | .smp_cpus_done = xen_smp_cpus_done, |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 2e2d370a47b1..5158c505bef9 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -26,8 +26,6 @@ | |||
26 | 26 | ||
27 | #include "xen-ops.h" | 27 | #include "xen-ops.h" |
28 | 28 | ||
29 | #define XEN_SHIFT 22 | ||
30 | |||
31 | /* Xen may fire a timer up to this many ns early */ | 29 | /* Xen may fire a timer up to this many ns early */ |
32 | #define TIMER_SLOP 100000 | 30 | #define TIMER_SLOP 100000 |
33 | #define NS_PER_TICK (1000000000LL / HZ) | 31 | #define NS_PER_TICK (1000000000LL / HZ) |
@@ -211,8 +209,6 @@ static struct clocksource xen_clocksource __read_mostly = { | |||
211 | .rating = 400, | 209 | .rating = 400, |
212 | .read = xen_clocksource_get_cycles, | 210 | .read = xen_clocksource_get_cycles, |
213 | .mask = ~0, | 211 | .mask = ~0, |
214 | .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ | ||
215 | .shift = XEN_SHIFT, | ||
216 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 212 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
217 | }; | 213 | }; |
218 | 214 | ||
@@ -439,16 +435,16 @@ void xen_timer_resume(void) | |||
439 | } | 435 | } |
440 | } | 436 | } |
441 | 437 | ||
442 | static const struct pv_time_ops xen_time_ops __initdata = { | 438 | static const struct pv_time_ops xen_time_ops __initconst = { |
443 | .sched_clock = xen_clocksource_read, | 439 | .sched_clock = xen_clocksource_read, |
444 | }; | 440 | }; |
445 | 441 | ||
446 | static __init void xen_time_init(void) | 442 | static void __init xen_time_init(void) |
447 | { | 443 | { |
448 | int cpu = smp_processor_id(); | 444 | int cpu = smp_processor_id(); |
449 | struct timespec tp; | 445 | struct timespec tp; |
450 | 446 | ||
451 | clocksource_register(&xen_clocksource); | 447 | clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); |
452 | 448 | ||
453 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { | 449 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { |
454 | /* Successfully turned off 100Hz tick, so we have the | 450 | /* Successfully turned off 100Hz tick, so we have the |
@@ -468,7 +464,7 @@ static __init void xen_time_init(void) | |||
468 | xen_setup_cpu_clockevents(); | 464 | xen_setup_cpu_clockevents(); |
469 | } | 465 | } |
470 | 466 | ||
471 | __init void xen_init_time_ops(void) | 467 | void __init xen_init_time_ops(void) |
472 | { | 468 | { |
473 | pv_time_ops = xen_time_ops; | 469 | pv_time_ops = xen_time_ops; |
474 | 470 | ||
@@ -490,7 +486,7 @@ static void xen_hvm_setup_cpu_clockevents(void) | |||
490 | xen_setup_cpu_clockevents(); | 486 | xen_setup_cpu_clockevents(); |
491 | } | 487 | } |
492 | 488 | ||
493 | __init void xen_hvm_init_time_ops(void) | 489 | void __init xen_hvm_init_time_ops(void) |
494 | { | 490 | { |
495 | /* vector callback is needed otherwise we cannot receive interrupts | 491 | /* vector callback is needed otherwise we cannot receive interrupts |
496 | * on cpu > 0 and at this point we don't know how many cpus are | 492 | * on cpu > 0 and at this point we don't know how many cpus are |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 3112f55638c4..97dfdc8757b3 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -74,7 +74,7 @@ static inline void xen_hvm_smp_init(void) {} | |||
74 | 74 | ||
75 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | 75 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
76 | void __init xen_init_spinlocks(void); | 76 | void __init xen_init_spinlocks(void); |
77 | __cpuinit void xen_init_lock_cpu(int cpu); | 77 | void __cpuinit xen_init_lock_cpu(int cpu); |
78 | void xen_uninit_lock_cpu(int cpu); | 78 | void xen_uninit_lock_cpu(int cpu); |
79 | #else | 79 | #else |
80 | static inline void xen_init_spinlocks(void) | 80 | static inline void xen_init_spinlocks(void) |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f0605ab2a761..471fdcc5df85 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | |||
114 | } | 114 | } |
115 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); | 115 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
116 | 116 | ||
117 | struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) | ||
118 | { | ||
119 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | ||
120 | struct blkio_cgroup, css); | ||
121 | } | ||
122 | EXPORT_SYMBOL_GPL(task_blkio_cgroup); | ||
123 | |||
117 | static inline void | 124 | static inline void |
118 | blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) | 125 | blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) |
119 | { | 126 | { |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 10919fae2d3a..c774930cc206 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} | |||
291 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) | 291 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) |
292 | extern struct blkio_cgroup blkio_root_cgroup; | 292 | extern struct blkio_cgroup blkio_root_cgroup; |
293 | extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); | 293 | extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); |
294 | extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); | ||
294 | extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 295 | extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
295 | struct blkio_group *blkg, void *key, dev_t dev, | 296 | struct blkio_group *blkg, void *key, dev_t dev, |
296 | enum blkio_policy_id plid); | 297 | enum blkio_policy_id plid); |
@@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg, | |||
314 | struct cgroup; | 315 | struct cgroup; |
315 | static inline struct blkio_cgroup * | 316 | static inline struct blkio_cgroup * |
316 | cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } | 317 | cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } |
318 | static inline struct blkio_cgroup * | ||
319 | task_blkio_cgroup(struct task_struct *tsk) { return NULL; } | ||
317 | 320 | ||
318 | static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 321 | static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
319 | struct blkio_group *blkg, void *key, dev_t dev, | 322 | struct blkio_group *blkg, void *key, dev_t dev, |
diff --git a/block/blk-core.c b/block/blk-core.c index a2e58eeb3549..3fe00a14822a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -316,8 +316,10 @@ EXPORT_SYMBOL(__blk_run_queue); | |||
316 | */ | 316 | */ |
317 | void blk_run_queue_async(struct request_queue *q) | 317 | void blk_run_queue_async(struct request_queue *q) |
318 | { | 318 | { |
319 | if (likely(!blk_queue_stopped(q))) | 319 | if (likely(!blk_queue_stopped(q))) { |
320 | __cancel_delayed_work(&q->delay_work); | ||
320 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | 321 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); |
322 | } | ||
321 | } | 323 | } |
322 | EXPORT_SYMBOL(blk_run_queue_async); | 324 | EXPORT_SYMBOL(blk_run_queue_async); |
323 | 325 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 0475a22a420d..252a81a306f7 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg) | |||
160 | } | 160 | } |
161 | 161 | ||
162 | static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, | 162 | static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, |
163 | struct cgroup *cgroup) | 163 | struct blkio_cgroup *blkcg) |
164 | { | 164 | { |
165 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | ||
166 | struct throtl_grp *tg = NULL; | 165 | struct throtl_grp *tg = NULL; |
167 | void *key = td; | 166 | void *key = td; |
168 | struct backing_dev_info *bdi = &td->queue->backing_dev_info; | 167 | struct backing_dev_info *bdi = &td->queue->backing_dev_info; |
@@ -229,12 +228,12 @@ done: | |||
229 | 228 | ||
230 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | 229 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) |
231 | { | 230 | { |
232 | struct cgroup *cgroup; | ||
233 | struct throtl_grp *tg = NULL; | 231 | struct throtl_grp *tg = NULL; |
232 | struct blkio_cgroup *blkcg; | ||
234 | 233 | ||
235 | rcu_read_lock(); | 234 | rcu_read_lock(); |
236 | cgroup = task_cgroup(current, blkio_subsys_id); | 235 | blkcg = task_blkio_cgroup(current); |
237 | tg = throtl_find_alloc_tg(td, cgroup); | 236 | tg = throtl_find_alloc_tg(td, blkcg); |
238 | if (!tg) | 237 | if (!tg) |
239 | tg = &td->root_tg; | 238 | tg = &td->root_tg; |
240 | rcu_read_unlock(); | 239 | rcu_read_unlock(); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5b52011e3a40..ab7a9e6a9b1c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, | |||
1014 | cfqg->needs_update = true; | 1014 | cfqg->needs_update = true; |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | static struct cfq_group * | 1017 | static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, |
1018 | cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | 1018 | struct blkio_cgroup *blkcg, int create) |
1019 | { | 1019 | { |
1020 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | ||
1021 | struct cfq_group *cfqg = NULL; | 1020 | struct cfq_group *cfqg = NULL; |
1022 | void *key = cfqd; | 1021 | void *key = cfqd; |
1023 | int i, j; | 1022 | int i, j; |
@@ -1079,12 +1078,12 @@ done: | |||
1079 | */ | 1078 | */ |
1080 | static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) | 1079 | static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) |
1081 | { | 1080 | { |
1082 | struct cgroup *cgroup; | 1081 | struct blkio_cgroup *blkcg; |
1083 | struct cfq_group *cfqg = NULL; | 1082 | struct cfq_group *cfqg = NULL; |
1084 | 1083 | ||
1085 | rcu_read_lock(); | 1084 | rcu_read_lock(); |
1086 | cgroup = task_cgroup(current, blkio_subsys_id); | 1085 | blkcg = task_blkio_cgroup(current); |
1087 | cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); | 1086 | cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); |
1088 | if (!cfqg && create) | 1087 | if (!cfqg && create) |
1089 | cfqg = &cfqd->root_group; | 1088 | cfqg = &cfqd->root_group; |
1090 | rcu_read_unlock(); | 1089 | rcu_read_unlock(); |
diff --git a/drivers/Kconfig b/drivers/Kconfig index 177c7d156933..557a469c7aa6 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -119,4 +119,7 @@ source "drivers/platform/Kconfig" | |||
119 | source "drivers/clk/Kconfig" | 119 | source "drivers/clk/Kconfig" |
120 | 120 | ||
121 | source "drivers/hwspinlock/Kconfig" | 121 | source "drivers/hwspinlock/Kconfig" |
122 | |||
123 | source "drivers/clocksource/Kconfig" | ||
124 | |||
122 | endmenu | 125 | endmenu |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 3a73a93596e8..85b32376dad7 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -49,10 +49,6 @@ ACPI_MODULE_NAME("processor_perflib"); | |||
49 | 49 | ||
50 | static DEFINE_MUTEX(performance_mutex); | 50 | static DEFINE_MUTEX(performance_mutex); |
51 | 51 | ||
52 | /* Use cpufreq debug layer for _PPC changes. */ | ||
53 | #define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ | ||
54 | "cpufreq-core", msg) | ||
55 | |||
56 | /* | 52 | /* |
57 | * _PPC support is implemented as a CPUfreq policy notifier: | 53 | * _PPC support is implemented as a CPUfreq policy notifier: |
58 | * This means each time a CPUfreq driver registered also with | 54 | * This means each time a CPUfreq driver registered also with |
@@ -145,7 +141,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) | |||
145 | return -ENODEV; | 141 | return -ENODEV; |
146 | } | 142 | } |
147 | 143 | ||
148 | cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, | 144 | pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, |
149 | (int)ppc, ppc ? "" : "not"); | 145 | (int)ppc, ppc ? "" : "not"); |
150 | 146 | ||
151 | pr->performance_platform_limit = (int)ppc; | 147 | pr->performance_platform_limit = (int)ppc; |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ad3501739563..605a2954ef17 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -710,20 +710,14 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) | |||
710 | } | 710 | } |
711 | 711 | ||
712 | #ifdef CONFIG_X86 | 712 | #ifdef CONFIG_X86 |
713 | static int acpi_throttling_rdmsr(struct acpi_processor *pr, | 713 | static int acpi_throttling_rdmsr(u64 *value) |
714 | u64 *value) | ||
715 | { | 714 | { |
716 | struct cpuinfo_x86 *c; | ||
717 | u64 msr_high, msr_low; | 715 | u64 msr_high, msr_low; |
718 | unsigned int cpu; | ||
719 | u64 msr = 0; | 716 | u64 msr = 0; |
720 | int ret = -1; | 717 | int ret = -1; |
721 | 718 | ||
722 | cpu = pr->id; | 719 | if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || |
723 | c = &cpu_data(cpu); | 720 | !this_cpu_has(X86_FEATURE_ACPI)) { |
724 | |||
725 | if ((c->x86_vendor != X86_VENDOR_INTEL) || | ||
726 | !cpu_has(c, X86_FEATURE_ACPI)) { | ||
727 | printk(KERN_ERR PREFIX | 721 | printk(KERN_ERR PREFIX |
728 | "HARDWARE addr space,NOT supported yet\n"); | 722 | "HARDWARE addr space,NOT supported yet\n"); |
729 | } else { | 723 | } else { |
@@ -738,18 +732,13 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr, | |||
738 | return ret; | 732 | return ret; |
739 | } | 733 | } |
740 | 734 | ||
741 | static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) | 735 | static int acpi_throttling_wrmsr(u64 value) |
742 | { | 736 | { |
743 | struct cpuinfo_x86 *c; | ||
744 | unsigned int cpu; | ||
745 | int ret = -1; | 737 | int ret = -1; |
746 | u64 msr; | 738 | u64 msr; |
747 | 739 | ||
748 | cpu = pr->id; | 740 | if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || |
749 | c = &cpu_data(cpu); | 741 | !this_cpu_has(X86_FEATURE_ACPI)) { |
750 | |||
751 | if ((c->x86_vendor != X86_VENDOR_INTEL) || | ||
752 | !cpu_has(c, X86_FEATURE_ACPI)) { | ||
753 | printk(KERN_ERR PREFIX | 742 | printk(KERN_ERR PREFIX |
754 | "HARDWARE addr space,NOT supported yet\n"); | 743 | "HARDWARE addr space,NOT supported yet\n"); |
755 | } else { | 744 | } else { |
@@ -761,15 +750,14 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) | |||
761 | return ret; | 750 | return ret; |
762 | } | 751 | } |
763 | #else | 752 | #else |
764 | static int acpi_throttling_rdmsr(struct acpi_processor *pr, | 753 | static int acpi_throttling_rdmsr(u64 *value) |
765 | u64 *value) | ||
766 | { | 754 | { |
767 | printk(KERN_ERR PREFIX | 755 | printk(KERN_ERR PREFIX |
768 | "HARDWARE addr space,NOT supported yet\n"); | 756 | "HARDWARE addr space,NOT supported yet\n"); |
769 | return -1; | 757 | return -1; |
770 | } | 758 | } |
771 | 759 | ||
772 | static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) | 760 | static int acpi_throttling_wrmsr(u64 value) |
773 | { | 761 | { |
774 | printk(KERN_ERR PREFIX | 762 | printk(KERN_ERR PREFIX |
775 | "HARDWARE addr space,NOT supported yet\n"); | 763 | "HARDWARE addr space,NOT supported yet\n"); |
@@ -801,7 +789,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr, | |||
801 | ret = 0; | 789 | ret = 0; |
802 | break; | 790 | break; |
803 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 791 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
804 | ret = acpi_throttling_rdmsr(pr, value); | 792 | ret = acpi_throttling_rdmsr(value); |
805 | break; | 793 | break; |
806 | default: | 794 | default: |
807 | printk(KERN_ERR PREFIX "Unknown addr space %d\n", | 795 | printk(KERN_ERR PREFIX "Unknown addr space %d\n", |
@@ -834,7 +822,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr, | |||
834 | ret = 0; | 822 | ret = 0; |
835 | break; | 823 | break; |
836 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 824 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
837 | ret = acpi_throttling_wrmsr(pr, value); | 825 | ret = acpi_throttling_wrmsr(value); |
838 | break; | 826 | break; |
839 | default: | 827 | default: |
840 | printk(KERN_ERR PREFIX "Unknown addr space %d\n", | 828 | printk(KERN_ERR PREFIX "Unknown addr space %d\n", |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index ff9d832a163d..d38c40fe4ddb 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap) | |||
561 | { | 561 | { |
562 | void __iomem *port_mmio = ahci_port_base(ap); | 562 | void __iomem *port_mmio = ahci_port_base(ap); |
563 | u32 tmp; | 563 | u32 tmp; |
564 | u8 status; | ||
565 | |||
566 | status = readl(port_mmio + PORT_TFDATA) & 0xFF; | ||
567 | |||
568 | /* | ||
569 | * At end of section 10.1 of AHCI spec (rev 1.3), it states | ||
570 | * Software shall not set PxCMD.ST to 1 until it is determined | ||
571 | * that a functoinal device is present on the port as determined by | ||
572 | * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h | ||
573 | * | ||
574 | * Even though most AHCI host controllers work without this check, | ||
575 | * specific controller will fail under this condition | ||
576 | */ | ||
577 | if (status & (ATA_BUSY | ATA_DRQ)) | ||
578 | return; | ||
579 | else { | ||
580 | ahci_scr_read(&ap->link, SCR_STATUS, &tmp); | ||
581 | |||
582 | if ((tmp & 0xf) != 0x3) | ||
583 | return; | ||
584 | } | ||
585 | 564 | ||
586 | /* start DMA */ | 565 | /* start DMA */ |
587 | tmp = readl(port_mmio + PORT_CMD); | 566 | tmp = readl(port_mmio + PORT_CMD); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f26f2fe3480a..dad9fd660f37 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, | |||
3316 | struct ata_eh_context *ehc = &link->eh_context; | 3316 | struct ata_eh_context *ehc = &link->eh_context; |
3317 | struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; | 3317 | struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; |
3318 | enum ata_lpm_policy old_policy = link->lpm_policy; | 3318 | enum ata_lpm_policy old_policy = link->lpm_policy; |
3319 | bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; | 3319 | bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; |
3320 | unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; | 3320 | unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; |
3321 | unsigned int err_mask; | 3321 | unsigned int err_mask; |
3322 | int rc; | 3322 | int rc; |
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index bdd2719f3f68..bc9e702186dd 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c | |||
@@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent) | |||
2643 | } | 2643 | } |
2644 | 2644 | ||
2645 | #ifdef CONFIG_SBUS | 2645 | #ifdef CONFIG_SBUS |
2646 | static const struct of_device_id fore200e_sba_match[]; | ||
2646 | static int __devinit fore200e_sba_probe(struct platform_device *op) | 2647 | static int __devinit fore200e_sba_probe(struct platform_device *op) |
2647 | { | 2648 | { |
2649 | const struct of_device_id *match; | ||
2648 | const struct fore200e_bus *bus; | 2650 | const struct fore200e_bus *bus; |
2649 | struct fore200e *fore200e; | 2651 | struct fore200e *fore200e; |
2650 | static int index = 0; | 2652 | static int index = 0; |
2651 | int err; | 2653 | int err; |
2652 | 2654 | ||
2653 | if (!op->dev.of_match) | 2655 | match = of_match_device(fore200e_sba_match, &op->dev); |
2656 | if (!match) | ||
2654 | return -EINVAL; | 2657 | return -EINVAL; |
2655 | bus = op->dev.of_match->data; | 2658 | bus = match->data; |
2656 | 2659 | ||
2657 | fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); | 2660 | fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); |
2658 | if (!fore200e) | 2661 | if (!fore200e) |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index e9e5238f3106..d57e8d0fb823 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -168,11 +168,4 @@ config SYS_HYPERVISOR | |||
168 | bool | 168 | bool |
169 | default n | 169 | default n |
170 | 170 | ||
171 | config ARCH_NO_SYSDEV_OPS | ||
172 | bool | ||
173 | ---help--- | ||
174 | To be selected by architectures that don't use sysdev class or | ||
175 | sysdev driver power management (suspend/resume) and shutdown | ||
176 | operations. | ||
177 | |||
178 | endmenu | 171 | endmenu |
diff --git a/drivers/base/base.h b/drivers/base/base.h index 19f49e41ce5d..a34dca0ad041 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
@@ -111,8 +111,6 @@ static inline int driver_match_device(struct device_driver *drv, | |||
111 | return drv->bus->match ? drv->bus->match(dev, drv) : 1; | 111 | return drv->bus->match ? drv->bus->match(dev, drv) : 1; |
112 | } | 112 | } |
113 | 113 | ||
114 | extern void sysdev_shutdown(void); | ||
115 | |||
116 | extern char *make_class_name(const char *name, struct kobject *kobj); | 114 | extern char *make_class_name(const char *name, struct kobject *kobj); |
117 | 115 | ||
118 | extern int devres_release_all(struct device *dev); | 116 | extern int devres_release_all(struct device *dev); |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 81b78ede37c4..bc8729d603a7 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -400,7 +400,7 @@ static void device_remove_groups(struct device *dev, | |||
400 | static int device_add_attrs(struct device *dev) | 400 | static int device_add_attrs(struct device *dev) |
401 | { | 401 | { |
402 | struct class *class = dev->class; | 402 | struct class *class = dev->class; |
403 | struct device_type *type = dev->type; | 403 | const struct device_type *type = dev->type; |
404 | int error; | 404 | int error; |
405 | 405 | ||
406 | if (class) { | 406 | if (class) { |
@@ -440,7 +440,7 @@ static int device_add_attrs(struct device *dev) | |||
440 | static void device_remove_attrs(struct device *dev) | 440 | static void device_remove_attrs(struct device *dev) |
441 | { | 441 | { |
442 | struct class *class = dev->class; | 442 | struct class *class = dev->class; |
443 | struct device_type *type = dev->type; | 443 | const struct device_type *type = dev->type; |
444 | 444 | ||
445 | device_remove_groups(dev, dev->groups); | 445 | device_remove_groups(dev, dev->groups); |
446 | 446 | ||
@@ -1314,8 +1314,7 @@ EXPORT_SYMBOL_GPL(put_device); | |||
1314 | EXPORT_SYMBOL_GPL(device_create_file); | 1314 | EXPORT_SYMBOL_GPL(device_create_file); |
1315 | EXPORT_SYMBOL_GPL(device_remove_file); | 1315 | EXPORT_SYMBOL_GPL(device_remove_file); |
1316 | 1316 | ||
1317 | struct root_device | 1317 | struct root_device { |
1318 | { | ||
1319 | struct device dev; | 1318 | struct device dev; |
1320 | struct module *owner; | 1319 | struct module *owner; |
1321 | }; | 1320 | }; |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index da57ee9d63fe..6658da743c3a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -245,6 +245,10 @@ int device_attach(struct device *dev) | |||
245 | 245 | ||
246 | device_lock(dev); | 246 | device_lock(dev); |
247 | if (dev->driver) { | 247 | if (dev->driver) { |
248 | if (klist_node_attached(&dev->p->knode_driver)) { | ||
249 | ret = 1; | ||
250 | goto out_unlock; | ||
251 | } | ||
248 | ret = device_bind_driver(dev); | 252 | ret = device_bind_driver(dev); |
249 | if (ret == 0) | 253 | if (ret == 0) |
250 | ret = 1; | 254 | ret = 1; |
@@ -257,6 +261,7 @@ int device_attach(struct device *dev) | |||
257 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); | 261 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); |
258 | pm_runtime_put_sync(dev); | 262 | pm_runtime_put_sync(dev); |
259 | } | 263 | } |
264 | out_unlock: | ||
260 | device_unlock(dev); | 265 | device_unlock(dev); |
261 | return ret; | 266 | return ret; |
262 | } | 267 | } |
@@ -316,8 +321,7 @@ static void __device_release_driver(struct device *dev) | |||
316 | 321 | ||
317 | drv = dev->driver; | 322 | drv = dev->driver; |
318 | if (drv) { | 323 | if (drv) { |
319 | pm_runtime_get_noresume(dev); | 324 | pm_runtime_get_sync(dev); |
320 | pm_runtime_barrier(dev); | ||
321 | 325 | ||
322 | driver_sysfs_remove(dev); | 326 | driver_sysfs_remove(dev); |
323 | 327 | ||
@@ -326,6 +330,8 @@ static void __device_release_driver(struct device *dev) | |||
326 | BUS_NOTIFY_UNBIND_DRIVER, | 330 | BUS_NOTIFY_UNBIND_DRIVER, |
327 | dev); | 331 | dev); |
328 | 332 | ||
333 | pm_runtime_put_sync(dev); | ||
334 | |||
329 | if (dev->bus && dev->bus->remove) | 335 | if (dev->bus && dev->bus->remove) |
330 | dev->bus->remove(dev); | 336 | dev->bus->remove(dev); |
331 | else if (drv->remove) | 337 | else if (drv->remove) |
@@ -338,7 +344,6 @@ static void __device_release_driver(struct device *dev) | |||
338 | BUS_NOTIFY_UNBOUND_DRIVER, | 344 | BUS_NOTIFY_UNBOUND_DRIVER, |
339 | dev); | 345 | dev); |
340 | 346 | ||
341 | pm_runtime_put_sync(dev); | ||
342 | } | 347 | } |
343 | } | 348 | } |
344 | 349 | ||
@@ -408,17 +413,16 @@ void *dev_get_drvdata(const struct device *dev) | |||
408 | } | 413 | } |
409 | EXPORT_SYMBOL(dev_get_drvdata); | 414 | EXPORT_SYMBOL(dev_get_drvdata); |
410 | 415 | ||
411 | void dev_set_drvdata(struct device *dev, void *data) | 416 | int dev_set_drvdata(struct device *dev, void *data) |
412 | { | 417 | { |
413 | int error; | 418 | int error; |
414 | 419 | ||
415 | if (!dev) | ||
416 | return; | ||
417 | if (!dev->p) { | 420 | if (!dev->p) { |
418 | error = device_private_init(dev); | 421 | error = device_private_init(dev); |
419 | if (error) | 422 | if (error) |
420 | return; | 423 | return error; |
421 | } | 424 | } |
422 | dev->p->driver_data = data; | 425 | dev->p->driver_data = data; |
426 | return 0; | ||
423 | } | 427 | } |
424 | EXPORT_SYMBOL(dev_set_drvdata); | 428 | EXPORT_SYMBOL(dev_set_drvdata); |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 8c798ef7f13f..bbb03e6f7255 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -521,6 +521,11 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
521 | if (!firmware_p) | 521 | if (!firmware_p) |
522 | return -EINVAL; | 522 | return -EINVAL; |
523 | 523 | ||
524 | if (WARN_ON(usermodehelper_is_disabled())) { | ||
525 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
526 | return -EBUSY; | ||
527 | } | ||
528 | |||
524 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 529 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
525 | if (!firmware) { | 530 | if (!firmware) { |
526 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", | 531 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 3da6a43b7756..0a134a424a37 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -48,7 +48,8 @@ static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj) | |||
48 | return MEMORY_CLASS_NAME; | 48 | return MEMORY_CLASS_NAME; |
49 | } | 49 | } |
50 | 50 | ||
51 | static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uevent_env *env) | 51 | static int memory_uevent(struct kset *kset, struct kobject *obj, |
52 | struct kobj_uevent_env *env) | ||
52 | { | 53 | { |
53 | int retval = 0; | 54 | int retval = 0; |
54 | 55 | ||
@@ -228,10 +229,11 @@ int memory_isolate_notify(unsigned long val, void *v) | |||
228 | * OK to have direct references to sparsemem variables in here. | 229 | * OK to have direct references to sparsemem variables in here. |
229 | */ | 230 | */ |
230 | static int | 231 | static int |
231 | memory_section_action(unsigned long phys_index, unsigned long action) | 232 | memory_block_action(unsigned long phys_index, unsigned long action) |
232 | { | 233 | { |
233 | int i; | 234 | int i; |
234 | unsigned long start_pfn, start_paddr; | 235 | unsigned long start_pfn, start_paddr; |
236 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; | ||
235 | struct page *first_page; | 237 | struct page *first_page; |
236 | int ret; | 238 | int ret; |
237 | 239 | ||
@@ -243,7 +245,7 @@ memory_section_action(unsigned long phys_index, unsigned long action) | |||
243 | * that way. | 245 | * that way. |
244 | */ | 246 | */ |
245 | if (action == MEM_ONLINE) { | 247 | if (action == MEM_ONLINE) { |
246 | for (i = 0; i < PAGES_PER_SECTION; i++) { | 248 | for (i = 0; i < nr_pages; i++) { |
247 | if (PageReserved(first_page+i)) | 249 | if (PageReserved(first_page+i)) |
248 | continue; | 250 | continue; |
249 | 251 | ||
@@ -257,12 +259,12 @@ memory_section_action(unsigned long phys_index, unsigned long action) | |||
257 | switch (action) { | 259 | switch (action) { |
258 | case MEM_ONLINE: | 260 | case MEM_ONLINE: |
259 | start_pfn = page_to_pfn(first_page); | 261 | start_pfn = page_to_pfn(first_page); |
260 | ret = online_pages(start_pfn, PAGES_PER_SECTION); | 262 | ret = online_pages(start_pfn, nr_pages); |
261 | break; | 263 | break; |
262 | case MEM_OFFLINE: | 264 | case MEM_OFFLINE: |
263 | start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; | 265 | start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; |
264 | ret = remove_memory(start_paddr, | 266 | ret = remove_memory(start_paddr, |
265 | PAGES_PER_SECTION << PAGE_SHIFT); | 267 | nr_pages << PAGE_SHIFT); |
266 | break; | 268 | break; |
267 | default: | 269 | default: |
268 | WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " | 270 | WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " |
@@ -276,7 +278,7 @@ memory_section_action(unsigned long phys_index, unsigned long action) | |||
276 | static int memory_block_change_state(struct memory_block *mem, | 278 | static int memory_block_change_state(struct memory_block *mem, |
277 | unsigned long to_state, unsigned long from_state_req) | 279 | unsigned long to_state, unsigned long from_state_req) |
278 | { | 280 | { |
279 | int i, ret = 0; | 281 | int ret = 0; |
280 | 282 | ||
281 | mutex_lock(&mem->state_mutex); | 283 | mutex_lock(&mem->state_mutex); |
282 | 284 | ||
@@ -288,20 +290,11 @@ static int memory_block_change_state(struct memory_block *mem, | |||
288 | if (to_state == MEM_OFFLINE) | 290 | if (to_state == MEM_OFFLINE) |
289 | mem->state = MEM_GOING_OFFLINE; | 291 | mem->state = MEM_GOING_OFFLINE; |
290 | 292 | ||
291 | for (i = 0; i < sections_per_block; i++) { | 293 | ret = memory_block_action(mem->start_section_nr, to_state); |
292 | ret = memory_section_action(mem->start_section_nr + i, | ||
293 | to_state); | ||
294 | if (ret) | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | if (ret) { | ||
299 | for (i = 0; i < sections_per_block; i++) | ||
300 | memory_section_action(mem->start_section_nr + i, | ||
301 | from_state_req); | ||
302 | 294 | ||
295 | if (ret) | ||
303 | mem->state = from_state_req; | 296 | mem->state = from_state_req; |
304 | } else | 297 | else |
305 | mem->state = to_state; | 298 | mem->state = to_state; |
306 | 299 | ||
307 | out: | 300 | out: |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9e0e4fc24c46..1c291af637b3 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -192,18 +192,18 @@ EXPORT_SYMBOL_GPL(platform_device_alloc); | |||
192 | int platform_device_add_resources(struct platform_device *pdev, | 192 | int platform_device_add_resources(struct platform_device *pdev, |
193 | const struct resource *res, unsigned int num) | 193 | const struct resource *res, unsigned int num) |
194 | { | 194 | { |
195 | struct resource *r; | 195 | struct resource *r = NULL; |
196 | 196 | ||
197 | if (!res) | 197 | if (res) { |
198 | return 0; | 198 | r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); |
199 | 199 | if (!r) | |
200 | r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); | 200 | return -ENOMEM; |
201 | if (r) { | ||
202 | pdev->resource = r; | ||
203 | pdev->num_resources = num; | ||
204 | return 0; | ||
205 | } | 201 | } |
206 | return -ENOMEM; | 202 | |
203 | kfree(pdev->resource); | ||
204 | pdev->resource = r; | ||
205 | pdev->num_resources = num; | ||
206 | return 0; | ||
207 | } | 207 | } |
208 | EXPORT_SYMBOL_GPL(platform_device_add_resources); | 208 | EXPORT_SYMBOL_GPL(platform_device_add_resources); |
209 | 209 | ||
@@ -220,17 +220,17 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources); | |||
220 | int platform_device_add_data(struct platform_device *pdev, const void *data, | 220 | int platform_device_add_data(struct platform_device *pdev, const void *data, |
221 | size_t size) | 221 | size_t size) |
222 | { | 222 | { |
223 | void *d; | 223 | void *d = NULL; |
224 | 224 | ||
225 | if (!data) | 225 | if (data) { |
226 | return 0; | 226 | d = kmemdup(data, size, GFP_KERNEL); |
227 | 227 | if (!d) | |
228 | d = kmemdup(data, size, GFP_KERNEL); | 228 | return -ENOMEM; |
229 | if (d) { | ||
230 | pdev->dev.platform_data = d; | ||
231 | return 0; | ||
232 | } | 229 | } |
233 | return -ENOMEM; | 230 | |
231 | kfree(pdev->dev.platform_data); | ||
232 | pdev->dev.platform_data = d; | ||
233 | return 0; | ||
234 | } | 234 | } |
235 | EXPORT_SYMBOL_GPL(platform_device_add_data); | 235 | EXPORT_SYMBOL_GPL(platform_device_add_data); |
236 | 236 | ||
@@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev) | |||
667 | return ret; | 667 | return ret; |
668 | } | 668 | } |
669 | 669 | ||
670 | static int platform_pm_prepare(struct device *dev) | 670 | int platform_pm_prepare(struct device *dev) |
671 | { | 671 | { |
672 | struct device_driver *drv = dev->driver; | 672 | struct device_driver *drv = dev->driver; |
673 | int ret = 0; | 673 | int ret = 0; |
@@ -678,7 +678,7 @@ static int platform_pm_prepare(struct device *dev) | |||
678 | return ret; | 678 | return ret; |
679 | } | 679 | } |
680 | 680 | ||
681 | static void platform_pm_complete(struct device *dev) | 681 | void platform_pm_complete(struct device *dev) |
682 | { | 682 | { |
683 | struct device_driver *drv = dev->driver; | 683 | struct device_driver *drv = dev->driver; |
684 | 684 | ||
@@ -686,16 +686,11 @@ static void platform_pm_complete(struct device *dev) | |||
686 | drv->pm->complete(dev); | 686 | drv->pm->complete(dev); |
687 | } | 687 | } |
688 | 688 | ||
689 | #else /* !CONFIG_PM_SLEEP */ | 689 | #endif /* CONFIG_PM_SLEEP */ |
690 | |||
691 | #define platform_pm_prepare NULL | ||
692 | #define platform_pm_complete NULL | ||
693 | |||
694 | #endif /* !CONFIG_PM_SLEEP */ | ||
695 | 690 | ||
696 | #ifdef CONFIG_SUSPEND | 691 | #ifdef CONFIG_SUSPEND |
697 | 692 | ||
698 | int __weak platform_pm_suspend(struct device *dev) | 693 | int platform_pm_suspend(struct device *dev) |
699 | { | 694 | { |
700 | struct device_driver *drv = dev->driver; | 695 | struct device_driver *drv = dev->driver; |
701 | int ret = 0; | 696 | int ret = 0; |
@@ -713,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev) | |||
713 | return ret; | 708 | return ret; |
714 | } | 709 | } |
715 | 710 | ||
716 | int __weak platform_pm_suspend_noirq(struct device *dev) | 711 | int platform_pm_suspend_noirq(struct device *dev) |
717 | { | 712 | { |
718 | struct device_driver *drv = dev->driver; | 713 | struct device_driver *drv = dev->driver; |
719 | int ret = 0; | 714 | int ret = 0; |
@@ -729,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev) | |||
729 | return ret; | 724 | return ret; |
730 | } | 725 | } |
731 | 726 | ||
732 | int __weak platform_pm_resume(struct device *dev) | 727 | int platform_pm_resume(struct device *dev) |
733 | { | 728 | { |
734 | struct device_driver *drv = dev->driver; | 729 | struct device_driver *drv = dev->driver; |
735 | int ret = 0; | 730 | int ret = 0; |
@@ -747,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev) | |||
747 | return ret; | 742 | return ret; |
748 | } | 743 | } |
749 | 744 | ||
750 | int __weak platform_pm_resume_noirq(struct device *dev) | 745 | int platform_pm_resume_noirq(struct device *dev) |
751 | { | 746 | { |
752 | struct device_driver *drv = dev->driver; | 747 | struct device_driver *drv = dev->driver; |
753 | int ret = 0; | 748 | int ret = 0; |
@@ -763,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev) | |||
763 | return ret; | 758 | return ret; |
764 | } | 759 | } |
765 | 760 | ||
766 | #else /* !CONFIG_SUSPEND */ | 761 | #endif /* CONFIG_SUSPEND */ |
767 | |||
768 | #define platform_pm_suspend NULL | ||
769 | #define platform_pm_resume NULL | ||
770 | #define platform_pm_suspend_noirq NULL | ||
771 | #define platform_pm_resume_noirq NULL | ||
772 | |||
773 | #endif /* !CONFIG_SUSPEND */ | ||
774 | 762 | ||
775 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 763 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
776 | 764 | ||
777 | static int platform_pm_freeze(struct device *dev) | 765 | int platform_pm_freeze(struct device *dev) |
778 | { | 766 | { |
779 | struct device_driver *drv = dev->driver; | 767 | struct device_driver *drv = dev->driver; |
780 | int ret = 0; | 768 | int ret = 0; |
@@ -792,7 +780,7 @@ static int platform_pm_freeze(struct device *dev) | |||
792 | return ret; | 780 | return ret; |
793 | } | 781 | } |
794 | 782 | ||
795 | static int platform_pm_freeze_noirq(struct device *dev) | 783 | int platform_pm_freeze_noirq(struct device *dev) |
796 | { | 784 | { |
797 | struct device_driver *drv = dev->driver; | 785 | struct device_driver *drv = dev->driver; |
798 | int ret = 0; | 786 | int ret = 0; |
@@ -808,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev) | |||
808 | return ret; | 796 | return ret; |
809 | } | 797 | } |
810 | 798 | ||
811 | static int platform_pm_thaw(struct device *dev) | 799 | int platform_pm_thaw(struct device *dev) |
812 | { | 800 | { |
813 | struct device_driver *drv = dev->driver; | 801 | struct device_driver *drv = dev->driver; |
814 | int ret = 0; | 802 | int ret = 0; |
@@ -826,7 +814,7 @@ static int platform_pm_thaw(struct device *dev) | |||
826 | return ret; | 814 | return ret; |
827 | } | 815 | } |
828 | 816 | ||
829 | static int platform_pm_thaw_noirq(struct device *dev) | 817 | int platform_pm_thaw_noirq(struct device *dev) |
830 | { | 818 | { |
831 | struct device_driver *drv = dev->driver; | 819 | struct device_driver *drv = dev->driver; |
832 | int ret = 0; | 820 | int ret = 0; |
@@ -842,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev) | |||
842 | return ret; | 830 | return ret; |
843 | } | 831 | } |
844 | 832 | ||
845 | static int platform_pm_poweroff(struct device *dev) | 833 | int platform_pm_poweroff(struct device *dev) |
846 | { | 834 | { |
847 | struct device_driver *drv = dev->driver; | 835 | struct device_driver *drv = dev->driver; |
848 | int ret = 0; | 836 | int ret = 0; |
@@ -860,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev) | |||
860 | return ret; | 848 | return ret; |
861 | } | 849 | } |
862 | 850 | ||
863 | static int platform_pm_poweroff_noirq(struct device *dev) | 851 | int platform_pm_poweroff_noirq(struct device *dev) |
864 | { | 852 | { |
865 | struct device_driver *drv = dev->driver; | 853 | struct device_driver *drv = dev->driver; |
866 | int ret = 0; | 854 | int ret = 0; |
@@ -876,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev) | |||
876 | return ret; | 864 | return ret; |
877 | } | 865 | } |
878 | 866 | ||
879 | static int platform_pm_restore(struct device *dev) | 867 | int platform_pm_restore(struct device *dev) |
880 | { | 868 | { |
881 | struct device_driver *drv = dev->driver; | 869 | struct device_driver *drv = dev->driver; |
882 | int ret = 0; | 870 | int ret = 0; |
@@ -894,7 +882,7 @@ static int platform_pm_restore(struct device *dev) | |||
894 | return ret; | 882 | return ret; |
895 | } | 883 | } |
896 | 884 | ||
897 | static int platform_pm_restore_noirq(struct device *dev) | 885 | int platform_pm_restore_noirq(struct device *dev) |
898 | { | 886 | { |
899 | struct device_driver *drv = dev->driver; | 887 | struct device_driver *drv = dev->driver; |
900 | int ret = 0; | 888 | int ret = 0; |
@@ -910,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev) | |||
910 | return ret; | 898 | return ret; |
911 | } | 899 | } |
912 | 900 | ||
913 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ | 901 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
914 | |||
915 | #define platform_pm_freeze NULL | ||
916 | #define platform_pm_thaw NULL | ||
917 | #define platform_pm_poweroff NULL | ||
918 | #define platform_pm_restore NULL | ||
919 | #define platform_pm_freeze_noirq NULL | ||
920 | #define platform_pm_thaw_noirq NULL | ||
921 | #define platform_pm_poweroff_noirq NULL | ||
922 | #define platform_pm_restore_noirq NULL | ||
923 | |||
924 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ | ||
925 | |||
926 | #ifdef CONFIG_PM_RUNTIME | ||
927 | |||
928 | int __weak platform_pm_runtime_suspend(struct device *dev) | ||
929 | { | ||
930 | return pm_generic_runtime_suspend(dev); | ||
931 | }; | ||
932 | |||
933 | int __weak platform_pm_runtime_resume(struct device *dev) | ||
934 | { | ||
935 | return pm_generic_runtime_resume(dev); | ||
936 | }; | ||
937 | |||
938 | int __weak platform_pm_runtime_idle(struct device *dev) | ||
939 | { | ||
940 | return pm_generic_runtime_idle(dev); | ||
941 | }; | ||
942 | |||
943 | #else /* !CONFIG_PM_RUNTIME */ | ||
944 | |||
945 | #define platform_pm_runtime_suspend NULL | ||
946 | #define platform_pm_runtime_resume NULL | ||
947 | #define platform_pm_runtime_idle NULL | ||
948 | |||
949 | #endif /* !CONFIG_PM_RUNTIME */ | ||
950 | 902 | ||
951 | static const struct dev_pm_ops platform_dev_pm_ops = { | 903 | static const struct dev_pm_ops platform_dev_pm_ops = { |
952 | .prepare = platform_pm_prepare, | 904 | .runtime_suspend = pm_generic_runtime_suspend, |
953 | .complete = platform_pm_complete, | 905 | .runtime_resume = pm_generic_runtime_resume, |
954 | .suspend = platform_pm_suspend, | 906 | .runtime_idle = pm_generic_runtime_idle, |
955 | .resume = platform_pm_resume, | 907 | USE_PLATFORM_PM_SLEEP_OPS |
956 | .freeze = platform_pm_freeze, | ||
957 | .thaw = platform_pm_thaw, | ||
958 | .poweroff = platform_pm_poweroff, | ||
959 | .restore = platform_pm_restore, | ||
960 | .suspend_noirq = platform_pm_suspend_noirq, | ||
961 | .resume_noirq = platform_pm_resume_noirq, | ||
962 | .freeze_noirq = platform_pm_freeze_noirq, | ||
963 | .thaw_noirq = platform_pm_thaw_noirq, | ||
964 | .poweroff_noirq = platform_pm_poweroff_noirq, | ||
965 | .restore_noirq = platform_pm_restore_noirq, | ||
966 | .runtime_suspend = platform_pm_runtime_suspend, | ||
967 | .runtime_resume = platform_pm_runtime_resume, | ||
968 | .runtime_idle = platform_pm_runtime_idle, | ||
969 | }; | 908 | }; |
970 | 909 | ||
971 | struct bus_type platform_bus_type = { | 910 | struct bus_type platform_bus_type = { |
@@ -977,41 +916,6 @@ struct bus_type platform_bus_type = { | |||
977 | }; | 916 | }; |
978 | EXPORT_SYMBOL_GPL(platform_bus_type); | 917 | EXPORT_SYMBOL_GPL(platform_bus_type); |
979 | 918 | ||
980 | /** | ||
981 | * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops | ||
982 | * | ||
983 | * This function can be used by platform code to get the current | ||
984 | * set of dev_pm_ops functions used by the platform_bus_type. | ||
985 | */ | ||
986 | const struct dev_pm_ops * __init platform_bus_get_pm_ops(void) | ||
987 | { | ||
988 | return platform_bus_type.pm; | ||
989 | } | ||
990 | |||
991 | /** | ||
992 | * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type | ||
993 | * | ||
994 | * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type | ||
995 | * | ||
996 | * Platform code can override the dev_pm_ops methods of | ||
997 | * platform_bus_type by using this function. It is expected that | ||
998 | * platform code will first do a platform_bus_get_pm_ops(), then | ||
999 | * kmemdup it, then customize selected methods and pass a pointer to | ||
1000 | * the new struct dev_pm_ops to this function. | ||
1001 | * | ||
1002 | * Since platform-specific code is customizing methods for *all* | ||
1003 | * devices (not just platform-specific devices) it is expected that | ||
1004 | * any custom overrides of these functions will keep existing behavior | ||
1005 | * and simply extend it. For example, any customization of the | ||
1006 | * runtime PM methods should continue to call the pm_generic_* | ||
1007 | * functions as the default ones do in addition to the | ||
1008 | * platform-specific behavior. | ||
1009 | */ | ||
1010 | void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm) | ||
1011 | { | ||
1012 | platform_bus_type.pm = pm; | ||
1013 | } | ||
1014 | |||
1015 | int __init platform_bus_init(void) | 919 | int __init platform_bus_init(void) |
1016 | { | 920 | { |
1017 | int error; | 921 | int error; |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 118c1b92a511..3647e114d0e7 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -3,6 +3,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | |||
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
5 | obj-$(CONFIG_PM_OPP) += opp.o | 5 | obj-$(CONFIG_PM_OPP) += opp.o |
6 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o | ||
6 | 7 | ||
7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 8 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file |
8 | ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG | ||
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c new file mode 100644 index 000000000000..c0dd09df7be8 --- /dev/null +++ b/drivers/base/power/clock_ops.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks | ||
3 | * | ||
4 | * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/pm.h> | ||
13 | #include <linux/pm_runtime.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/err.h> | ||
17 | |||
18 | #ifdef CONFIG_PM_RUNTIME | ||
19 | |||
20 | struct pm_runtime_clk_data { | ||
21 | struct list_head clock_list; | ||
22 | struct mutex lock; | ||
23 | }; | ||
24 | |||
25 | enum pce_status { | ||
26 | PCE_STATUS_NONE = 0, | ||
27 | PCE_STATUS_ACQUIRED, | ||
28 | PCE_STATUS_ENABLED, | ||
29 | PCE_STATUS_ERROR, | ||
30 | }; | ||
31 | |||
32 | struct pm_clock_entry { | ||
33 | struct list_head node; | ||
34 | char *con_id; | ||
35 | struct clk *clk; | ||
36 | enum pce_status status; | ||
37 | }; | ||
38 | |||
39 | static struct pm_runtime_clk_data *__to_prd(struct device *dev) | ||
40 | { | ||
41 | return dev ? dev->power.subsys_data : NULL; | ||
42 | } | ||
43 | |||
44 | /** | ||
45 | * pm_runtime_clk_add - Start using a device clock for runtime PM. | ||
46 | * @dev: Device whose clock is going to be used for runtime PM. | ||
47 | * @con_id: Connection ID of the clock. | ||
48 | * | ||
49 | * Add the clock represented by @con_id to the list of clocks used for | ||
50 | * the runtime PM of @dev. | ||
51 | */ | ||
52 | int pm_runtime_clk_add(struct device *dev, const char *con_id) | ||
53 | { | ||
54 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
55 | struct pm_clock_entry *ce; | ||
56 | |||
57 | if (!prd) | ||
58 | return -EINVAL; | ||
59 | |||
60 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | ||
61 | if (!ce) { | ||
62 | dev_err(dev, "Not enough memory for clock entry.\n"); | ||
63 | return -ENOMEM; | ||
64 | } | ||
65 | |||
66 | if (con_id) { | ||
67 | ce->con_id = kstrdup(con_id, GFP_KERNEL); | ||
68 | if (!ce->con_id) { | ||
69 | dev_err(dev, | ||
70 | "Not enough memory for clock connection ID.\n"); | ||
71 | kfree(ce); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | mutex_lock(&prd->lock); | ||
77 | list_add_tail(&ce->node, &prd->clock_list); | ||
78 | mutex_unlock(&prd->lock); | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * __pm_runtime_clk_remove - Destroy runtime PM clock entry. | ||
84 | * @ce: Runtime PM clock entry to destroy. | ||
85 | * | ||
86 | * This routine must be called under the mutex protecting the runtime PM list | ||
87 | * of clocks corresponding the the @ce's device. | ||
88 | */ | ||
89 | static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) | ||
90 | { | ||
91 | if (!ce) | ||
92 | return; | ||
93 | |||
94 | list_del(&ce->node); | ||
95 | |||
96 | if (ce->status < PCE_STATUS_ERROR) { | ||
97 | if (ce->status == PCE_STATUS_ENABLED) | ||
98 | clk_disable(ce->clk); | ||
99 | |||
100 | if (ce->status >= PCE_STATUS_ACQUIRED) | ||
101 | clk_put(ce->clk); | ||
102 | } | ||
103 | |||
104 | if (ce->con_id) | ||
105 | kfree(ce->con_id); | ||
106 | |||
107 | kfree(ce); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * pm_runtime_clk_remove - Stop using a device clock for runtime PM. | ||
112 | * @dev: Device whose clock should not be used for runtime PM any more. | ||
113 | * @con_id: Connection ID of the clock. | ||
114 | * | ||
115 | * Remove the clock represented by @con_id from the list of clocks used for | ||
116 | * the runtime PM of @dev. | ||
117 | */ | ||
118 | void pm_runtime_clk_remove(struct device *dev, const char *con_id) | ||
119 | { | ||
120 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
121 | struct pm_clock_entry *ce; | ||
122 | |||
123 | if (!prd) | ||
124 | return; | ||
125 | |||
126 | mutex_lock(&prd->lock); | ||
127 | |||
128 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
129 | if (!con_id && !ce->con_id) { | ||
130 | __pm_runtime_clk_remove(ce); | ||
131 | break; | ||
132 | } else if (!con_id || !ce->con_id) { | ||
133 | continue; | ||
134 | } else if (!strcmp(con_id, ce->con_id)) { | ||
135 | __pm_runtime_clk_remove(ce); | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | mutex_unlock(&prd->lock); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. | ||
145 | * @dev: Device to initialize the list of runtime PM clocks for. | ||
146 | * | ||
147 | * Allocate a struct pm_runtime_clk_data object, initialize its lock member and | ||
148 | * make the @dev's power.subsys_data field point to it. | ||
149 | */ | ||
150 | int pm_runtime_clk_init(struct device *dev) | ||
151 | { | ||
152 | struct pm_runtime_clk_data *prd; | ||
153 | |||
154 | prd = kzalloc(sizeof(*prd), GFP_KERNEL); | ||
155 | if (!prd) { | ||
156 | dev_err(dev, "Not enough memory fo runtime PM data.\n"); | ||
157 | return -ENOMEM; | ||
158 | } | ||
159 | |||
160 | INIT_LIST_HEAD(&prd->clock_list); | ||
161 | mutex_init(&prd->lock); | ||
162 | dev->power.subsys_data = prd; | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. | ||
168 | * @dev: Device to destroy the list of runtime PM clocks for. | ||
169 | * | ||
170 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | ||
171 | * from the struct pm_runtime_clk_data object pointed to by it before and free | ||
172 | * that object. | ||
173 | */ | ||
174 | void pm_runtime_clk_destroy(struct device *dev) | ||
175 | { | ||
176 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
177 | struct pm_clock_entry *ce, *c; | ||
178 | |||
179 | if (!prd) | ||
180 | return; | ||
181 | |||
182 | dev->power.subsys_data = NULL; | ||
183 | |||
184 | mutex_lock(&prd->lock); | ||
185 | |||
186 | list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) | ||
187 | __pm_runtime_clk_remove(ce); | ||
188 | |||
189 | mutex_unlock(&prd->lock); | ||
190 | |||
191 | kfree(prd); | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * pm_runtime_clk_acquire - Acquire a device clock. | ||
196 | * @dev: Device whose clock is to be acquired. | ||
197 | * @con_id: Connection ID of the clock. | ||
198 | */ | ||
199 | static void pm_runtime_clk_acquire(struct device *dev, | ||
200 | struct pm_clock_entry *ce) | ||
201 | { | ||
202 | ce->clk = clk_get(dev, ce->con_id); | ||
203 | if (IS_ERR(ce->clk)) { | ||
204 | ce->status = PCE_STATUS_ERROR; | ||
205 | } else { | ||
206 | ce->status = PCE_STATUS_ACQUIRED; | ||
207 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. | ||
213 | * @dev: Device to disable the clocks for. | ||
214 | */ | ||
215 | int pm_runtime_clk_suspend(struct device *dev) | ||
216 | { | ||
217 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
218 | struct pm_clock_entry *ce; | ||
219 | |||
220 | dev_dbg(dev, "%s()\n", __func__); | ||
221 | |||
222 | if (!prd) | ||
223 | return 0; | ||
224 | |||
225 | mutex_lock(&prd->lock); | ||
226 | |||
227 | list_for_each_entry_reverse(ce, &prd->clock_list, node) { | ||
228 | if (ce->status == PCE_STATUS_NONE) | ||
229 | pm_runtime_clk_acquire(dev, ce); | ||
230 | |||
231 | if (ce->status < PCE_STATUS_ERROR) { | ||
232 | clk_disable(ce->clk); | ||
233 | ce->status = PCE_STATUS_ACQUIRED; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | mutex_unlock(&prd->lock); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. | ||
244 | * @dev: Device to enable the clocks for. | ||
245 | */ | ||
246 | int pm_runtime_clk_resume(struct device *dev) | ||
247 | { | ||
248 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
249 | struct pm_clock_entry *ce; | ||
250 | |||
251 | dev_dbg(dev, "%s()\n", __func__); | ||
252 | |||
253 | if (!prd) | ||
254 | return 0; | ||
255 | |||
256 | mutex_lock(&prd->lock); | ||
257 | |||
258 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
259 | if (ce->status == PCE_STATUS_NONE) | ||
260 | pm_runtime_clk_acquire(dev, ce); | ||
261 | |||
262 | if (ce->status < PCE_STATUS_ERROR) { | ||
263 | clk_enable(ce->clk); | ||
264 | ce->status = PCE_STATUS_ENABLED; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | mutex_unlock(&prd->lock); | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
275 | * @nb: Notifier block object this function is a member of. | ||
276 | * @action: Operation being carried out by the caller. | ||
277 | * @data: Device the routine is being run for. | ||
278 | * | ||
279 | * For this function to work, @nb must be a member of an object of type | ||
280 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
281 | * Specifically, the pwr_domain member of that object is copied to the device's | ||
282 | * pwr_domain field and its con_ids member is used to populate the device's list | ||
283 | * of runtime PM clocks, depending on @action. | ||
284 | * | ||
285 | * If the device's pwr_domain field is already populated with a value different | ||
286 | * from the one stored in the struct pm_clk_notifier_block object, the function | ||
287 | * does nothing. | ||
288 | */ | ||
289 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
290 | unsigned long action, void *data) | ||
291 | { | ||
292 | struct pm_clk_notifier_block *clknb; | ||
293 | struct device *dev = data; | ||
294 | char *con_id; | ||
295 | int error; | ||
296 | |||
297 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
298 | |||
299 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
300 | |||
301 | switch (action) { | ||
302 | case BUS_NOTIFY_ADD_DEVICE: | ||
303 | if (dev->pwr_domain) | ||
304 | break; | ||
305 | |||
306 | error = pm_runtime_clk_init(dev); | ||
307 | if (error) | ||
308 | break; | ||
309 | |||
310 | dev->pwr_domain = clknb->pwr_domain; | ||
311 | if (clknb->con_ids[0]) { | ||
312 | for (con_id = clknb->con_ids[0]; *con_id; con_id++) | ||
313 | pm_runtime_clk_add(dev, con_id); | ||
314 | } else { | ||
315 | pm_runtime_clk_add(dev, NULL); | ||
316 | } | ||
317 | |||
318 | break; | ||
319 | case BUS_NOTIFY_DEL_DEVICE: | ||
320 | if (dev->pwr_domain != clknb->pwr_domain) | ||
321 | break; | ||
322 | |||
323 | dev->pwr_domain = NULL; | ||
324 | pm_runtime_clk_destroy(dev); | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | #else /* !CONFIG_PM_RUNTIME */ | ||
332 | |||
333 | /** | ||
334 | * enable_clock - Enable a device clock. | ||
335 | * @dev: Device whose clock is to be enabled. | ||
336 | * @con_id: Connection ID of the clock. | ||
337 | */ | ||
338 | static void enable_clock(struct device *dev, const char *con_id) | ||
339 | { | ||
340 | struct clk *clk; | ||
341 | |||
342 | clk = clk_get(dev, con_id); | ||
343 | if (!IS_ERR(clk)) { | ||
344 | clk_enable(clk); | ||
345 | clk_put(clk); | ||
346 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * disable_clock - Disable a device clock. | ||
352 | * @dev: Device whose clock is to be disabled. | ||
353 | * @con_id: Connection ID of the clock. | ||
354 | */ | ||
355 | static void disable_clock(struct device *dev, const char *con_id) | ||
356 | { | ||
357 | struct clk *clk; | ||
358 | |||
359 | clk = clk_get(dev, con_id); | ||
360 | if (!IS_ERR(clk)) { | ||
361 | clk_disable(clk); | ||
362 | clk_put(clk); | ||
363 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
369 | * @nb: Notifier block object this function is a member of. | ||
370 | * @action: Operation being carried out by the caller. | ||
371 | * @data: Device the routine is being run for. | ||
372 | * | ||
373 | * For this function to work, @nb must be a member of an object of type | ||
374 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
375 | * Specifically, the con_ids member of that object is used to enable or disable | ||
376 | * the device's clocks, depending on @action. | ||
377 | */ | ||
378 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
379 | unsigned long action, void *data) | ||
380 | { | ||
381 | struct pm_clk_notifier_block *clknb; | ||
382 | struct device *dev = data; | ||
383 | char *con_id; | ||
384 | |||
385 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
386 | |||
387 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
388 | |||
389 | switch (action) { | ||
390 | case BUS_NOTIFY_ADD_DEVICE: | ||
391 | if (clknb->con_ids[0]) { | ||
392 | for (con_id = clknb->con_ids[0]; *con_id; con_id++) | ||
393 | enable_clock(dev, con_id); | ||
394 | } else { | ||
395 | enable_clock(dev, NULL); | ||
396 | } | ||
397 | break; | ||
398 | case BUS_NOTIFY_DEL_DEVICE: | ||
399 | if (clknb->con_ids[0]) { | ||
400 | for (con_id = clknb->con_ids[0]; *con_id; con_id++) | ||
401 | disable_clock(dev, con_id); | ||
402 | } else { | ||
403 | disable_clock(dev, NULL); | ||
404 | } | ||
405 | break; | ||
406 | } | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | #endif /* !CONFIG_PM_RUNTIME */ | ||
412 | |||
413 | /** | ||
414 | * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. | ||
415 | * @bus: Bus type to add the notifier to. | ||
416 | * @clknb: Notifier to be added to the given bus type. | ||
417 | * | ||
418 | * The nb member of @clknb is not expected to be initialized and its | ||
419 | * notifier_call member will be replaced with pm_runtime_clk_notify(). However, | ||
420 | * the remaining members of @clknb should be populated prior to calling this | ||
421 | * routine. | ||
422 | */ | ||
423 | void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
424 | struct pm_clk_notifier_block *clknb) | ||
425 | { | ||
426 | if (!bus || !clknb) | ||
427 | return; | ||
428 | |||
429 | clknb->nb.notifier_call = pm_runtime_clk_notify; | ||
430 | bus_register_notifier(bus, &clknb->nb); | ||
431 | } | ||
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 42f97f925629..cb3bb368681c 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -74,6 +74,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); | |||
74 | 74 | ||
75 | #ifdef CONFIG_PM_SLEEP | 75 | #ifdef CONFIG_PM_SLEEP |
76 | /** | 76 | /** |
77 | * pm_generic_prepare - Generic routine preparing a device for power transition. | ||
78 | * @dev: Device to prepare. | ||
79 | * | ||
80 | * Prepare a device for a system-wide power transition. | ||
81 | */ | ||
82 | int pm_generic_prepare(struct device *dev) | ||
83 | { | ||
84 | struct device_driver *drv = dev->driver; | ||
85 | int ret = 0; | ||
86 | |||
87 | if (drv && drv->pm && drv->pm->prepare) | ||
88 | ret = drv->pm->prepare(dev); | ||
89 | |||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | /** | ||
77 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | 94 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. |
78 | * @dev: Device to handle. | 95 | * @dev: Device to handle. |
79 | * @event: PM transition of the system under way. | 96 | * @event: PM transition of the system under way. |
@@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev) | |||
213 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); | 230 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); |
214 | } | 231 | } |
215 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 232 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
233 | |||
234 | /** | ||
235 | * pm_generic_complete - Generic routine competing a device power transition. | ||
236 | * @dev: Device to handle. | ||
237 | * | ||
238 | * Complete a device power transition during a system-wide power transition. | ||
239 | */ | ||
240 | void pm_generic_complete(struct device *dev) | ||
241 | { | ||
242 | struct device_driver *drv = dev->driver; | ||
243 | |||
244 | if (drv && drv->pm && drv->pm->complete) | ||
245 | drv->pm->complete(dev); | ||
246 | |||
247 | /* | ||
248 | * Let runtime PM try to suspend devices that haven't been in use before | ||
249 | * going into the system-wide sleep state we're resuming from. | ||
250 | */ | ||
251 | pm_runtime_idle(dev); | ||
252 | } | ||
216 | #endif /* CONFIG_PM_SLEEP */ | 253 | #endif /* CONFIG_PM_SLEEP */ |
217 | 254 | ||
218 | struct dev_pm_ops generic_subsys_pm_ops = { | 255 | struct dev_pm_ops generic_subsys_pm_ops = { |
219 | #ifdef CONFIG_PM_SLEEP | 256 | #ifdef CONFIG_PM_SLEEP |
257 | .prepare = pm_generic_prepare, | ||
220 | .suspend = pm_generic_suspend, | 258 | .suspend = pm_generic_suspend, |
221 | .resume = pm_generic_resume, | 259 | .resume = pm_generic_resume, |
222 | .freeze = pm_generic_freeze, | 260 | .freeze = pm_generic_freeze, |
223 | .thaw = pm_generic_thaw, | 261 | .thaw = pm_generic_thaw, |
224 | .poweroff = pm_generic_poweroff, | 262 | .poweroff = pm_generic_poweroff, |
225 | .restore = pm_generic_restore, | 263 | .restore = pm_generic_restore, |
264 | .complete = pm_generic_complete, | ||
226 | #endif | 265 | #endif |
227 | #ifdef CONFIG_PM_RUNTIME | 266 | #ifdef CONFIG_PM_RUNTIME |
228 | .runtime_suspend = pm_generic_runtime_suspend, | 267 | .runtime_suspend = pm_generic_runtime_suspend, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index abe3ab709e87..aa6320207745 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
426 | 426 | ||
427 | if (dev->pwr_domain) { | 427 | if (dev->pwr_domain) { |
428 | pm_dev_dbg(dev, state, "EARLY power domain "); | 428 | pm_dev_dbg(dev, state, "EARLY power domain "); |
429 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); | 429 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
430 | } | 430 | } else if (dev->type && dev->type->pm) { |
431 | |||
432 | if (dev->type && dev->type->pm) { | ||
433 | pm_dev_dbg(dev, state, "EARLY type "); | 431 | pm_dev_dbg(dev, state, "EARLY type "); |
434 | error = pm_noirq_op(dev, dev->type->pm, state); | 432 | error = pm_noirq_op(dev, dev->type->pm, state); |
435 | } else if (dev->class && dev->class->pm) { | 433 | } else if (dev->class && dev->class->pm) { |
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
517 | 515 | ||
518 | if (dev->pwr_domain) { | 516 | if (dev->pwr_domain) { |
519 | pm_dev_dbg(dev, state, "power domain "); | 517 | pm_dev_dbg(dev, state, "power domain "); |
520 | pm_op(dev, &dev->pwr_domain->ops, state); | 518 | error = pm_op(dev, &dev->pwr_domain->ops, state); |
519 | goto End; | ||
521 | } | 520 | } |
522 | 521 | ||
523 | if (dev->type && dev->type->pm) { | 522 | if (dev->type && dev->type->pm) { |
@@ -580,11 +579,13 @@ static bool is_async(struct device *dev) | |||
580 | * Execute the appropriate "resume" callback for all devices whose status | 579 | * Execute the appropriate "resume" callback for all devices whose status |
581 | * indicates that they are suspended. | 580 | * indicates that they are suspended. |
582 | */ | 581 | */ |
583 | static void dpm_resume(pm_message_t state) | 582 | void dpm_resume(pm_message_t state) |
584 | { | 583 | { |
585 | struct device *dev; | 584 | struct device *dev; |
586 | ktime_t starttime = ktime_get(); | 585 | ktime_t starttime = ktime_get(); |
587 | 586 | ||
587 | might_sleep(); | ||
588 | |||
588 | mutex_lock(&dpm_list_mtx); | 589 | mutex_lock(&dpm_list_mtx); |
589 | pm_transition = state; | 590 | pm_transition = state; |
590 | async_error = 0; | 591 | async_error = 0; |
@@ -629,12 +630,11 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
629 | { | 630 | { |
630 | device_lock(dev); | 631 | device_lock(dev); |
631 | 632 | ||
632 | if (dev->pwr_domain && dev->pwr_domain->ops.complete) { | 633 | if (dev->pwr_domain) { |
633 | pm_dev_dbg(dev, state, "completing power domain "); | 634 | pm_dev_dbg(dev, state, "completing power domain "); |
634 | dev->pwr_domain->ops.complete(dev); | 635 | if (dev->pwr_domain->ops.complete) |
635 | } | 636 | dev->pwr_domain->ops.complete(dev); |
636 | 637 | } else if (dev->type && dev->type->pm) { | |
637 | if (dev->type && dev->type->pm) { | ||
638 | pm_dev_dbg(dev, state, "completing type "); | 638 | pm_dev_dbg(dev, state, "completing type "); |
639 | if (dev->type->pm->complete) | 639 | if (dev->type->pm->complete) |
640 | dev->type->pm->complete(dev); | 640 | dev->type->pm->complete(dev); |
@@ -658,10 +658,12 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
658 | * Execute the ->complete() callbacks for all devices whose PM status is not | 658 | * Execute the ->complete() callbacks for all devices whose PM status is not |
659 | * DPM_ON (this allows new devices to be registered). | 659 | * DPM_ON (this allows new devices to be registered). |
660 | */ | 660 | */ |
661 | static void dpm_complete(pm_message_t state) | 661 | void dpm_complete(pm_message_t state) |
662 | { | 662 | { |
663 | struct list_head list; | 663 | struct list_head list; |
664 | 664 | ||
665 | might_sleep(); | ||
666 | |||
665 | INIT_LIST_HEAD(&list); | 667 | INIT_LIST_HEAD(&list); |
666 | mutex_lock(&dpm_list_mtx); | 668 | mutex_lock(&dpm_list_mtx); |
667 | while (!list_empty(&dpm_prepared_list)) { | 669 | while (!list_empty(&dpm_prepared_list)) { |
@@ -690,7 +692,6 @@ static void dpm_complete(pm_message_t state) | |||
690 | */ | 692 | */ |
691 | void dpm_resume_end(pm_message_t state) | 693 | void dpm_resume_end(pm_message_t state) |
692 | { | 694 | { |
693 | might_sleep(); | ||
694 | dpm_resume(state); | 695 | dpm_resume(state); |
695 | dpm_complete(state); | 696 | dpm_complete(state); |
696 | } | 697 | } |
@@ -732,7 +733,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
732 | { | 733 | { |
733 | int error; | 734 | int error; |
734 | 735 | ||
735 | if (dev->type && dev->type->pm) { | 736 | if (dev->pwr_domain) { |
737 | pm_dev_dbg(dev, state, "LATE power domain "); | ||
738 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); | ||
739 | if (error) | ||
740 | return error; | ||
741 | } else if (dev->type && dev->type->pm) { | ||
736 | pm_dev_dbg(dev, state, "LATE type "); | 742 | pm_dev_dbg(dev, state, "LATE type "); |
737 | error = pm_noirq_op(dev, dev->type->pm, state); | 743 | error = pm_noirq_op(dev, dev->type->pm, state); |
738 | if (error) | 744 | if (error) |
@@ -749,11 +755,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
749 | return error; | 755 | return error; |
750 | } | 756 | } |
751 | 757 | ||
752 | if (dev->pwr_domain) { | ||
753 | pm_dev_dbg(dev, state, "LATE power domain "); | ||
754 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); | ||
755 | } | ||
756 | |||
757 | return 0; | 758 | return 0; |
758 | } | 759 | } |
759 | 760 | ||
@@ -841,21 +842,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
841 | goto End; | 842 | goto End; |
842 | } | 843 | } |
843 | 844 | ||
845 | if (dev->pwr_domain) { | ||
846 | pm_dev_dbg(dev, state, "power domain "); | ||
847 | error = pm_op(dev, &dev->pwr_domain->ops, state); | ||
848 | goto End; | ||
849 | } | ||
850 | |||
844 | if (dev->type && dev->type->pm) { | 851 | if (dev->type && dev->type->pm) { |
845 | pm_dev_dbg(dev, state, "type "); | 852 | pm_dev_dbg(dev, state, "type "); |
846 | error = pm_op(dev, dev->type->pm, state); | 853 | error = pm_op(dev, dev->type->pm, state); |
847 | goto Domain; | 854 | goto End; |
848 | } | 855 | } |
849 | 856 | ||
850 | if (dev->class) { | 857 | if (dev->class) { |
851 | if (dev->class->pm) { | 858 | if (dev->class->pm) { |
852 | pm_dev_dbg(dev, state, "class "); | 859 | pm_dev_dbg(dev, state, "class "); |
853 | error = pm_op(dev, dev->class->pm, state); | 860 | error = pm_op(dev, dev->class->pm, state); |
854 | goto Domain; | 861 | goto End; |
855 | } else if (dev->class->suspend) { | 862 | } else if (dev->class->suspend) { |
856 | pm_dev_dbg(dev, state, "legacy class "); | 863 | pm_dev_dbg(dev, state, "legacy class "); |
857 | error = legacy_suspend(dev, state, dev->class->suspend); | 864 | error = legacy_suspend(dev, state, dev->class->suspend); |
858 | goto Domain; | 865 | goto End; |
859 | } | 866 | } |
860 | } | 867 | } |
861 | 868 | ||
@@ -869,12 +876,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
869 | } | 876 | } |
870 | } | 877 | } |
871 | 878 | ||
872 | Domain: | ||
873 | if (!error && dev->pwr_domain) { | ||
874 | pm_dev_dbg(dev, state, "power domain "); | ||
875 | pm_op(dev, &dev->pwr_domain->ops, state); | ||
876 | } | ||
877 | |||
878 | End: | 879 | End: |
879 | device_unlock(dev); | 880 | device_unlock(dev); |
880 | complete_all(&dev->power.completion); | 881 | complete_all(&dev->power.completion); |
@@ -914,11 +915,13 @@ static int device_suspend(struct device *dev) | |||
914 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 915 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
915 | * @state: PM transition of the system being carried out. | 916 | * @state: PM transition of the system being carried out. |
916 | */ | 917 | */ |
917 | static int dpm_suspend(pm_message_t state) | 918 | int dpm_suspend(pm_message_t state) |
918 | { | 919 | { |
919 | ktime_t starttime = ktime_get(); | 920 | ktime_t starttime = ktime_get(); |
920 | int error = 0; | 921 | int error = 0; |
921 | 922 | ||
923 | might_sleep(); | ||
924 | |||
922 | mutex_lock(&dpm_list_mtx); | 925 | mutex_lock(&dpm_list_mtx); |
923 | pm_transition = state; | 926 | pm_transition = state; |
924 | async_error = 0; | 927 | async_error = 0; |
@@ -965,7 +968,14 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
965 | 968 | ||
966 | device_lock(dev); | 969 | device_lock(dev); |
967 | 970 | ||
968 | if (dev->type && dev->type->pm) { | 971 | if (dev->pwr_domain) { |
972 | pm_dev_dbg(dev, state, "preparing power domain "); | ||
973 | if (dev->pwr_domain->ops.prepare) | ||
974 | error = dev->pwr_domain->ops.prepare(dev); | ||
975 | suspend_report_result(dev->pwr_domain->ops.prepare, error); | ||
976 | if (error) | ||
977 | goto End; | ||
978 | } else if (dev->type && dev->type->pm) { | ||
969 | pm_dev_dbg(dev, state, "preparing type "); | 979 | pm_dev_dbg(dev, state, "preparing type "); |
970 | if (dev->type->pm->prepare) | 980 | if (dev->type->pm->prepare) |
971 | error = dev->type->pm->prepare(dev); | 981 | error = dev->type->pm->prepare(dev); |
@@ -984,13 +994,6 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
984 | if (dev->bus->pm->prepare) | 994 | if (dev->bus->pm->prepare) |
985 | error = dev->bus->pm->prepare(dev); | 995 | error = dev->bus->pm->prepare(dev); |
986 | suspend_report_result(dev->bus->pm->prepare, error); | 996 | suspend_report_result(dev->bus->pm->prepare, error); |
987 | if (error) | ||
988 | goto End; | ||
989 | } | ||
990 | |||
991 | if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { | ||
992 | pm_dev_dbg(dev, state, "preparing power domain "); | ||
993 | dev->pwr_domain->ops.prepare(dev); | ||
994 | } | 997 | } |
995 | 998 | ||
996 | End: | 999 | End: |
@@ -1005,10 +1008,12 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1005 | * | 1008 | * |
1006 | * Execute the ->prepare() callback(s) for all devices. | 1009 | * Execute the ->prepare() callback(s) for all devices. |
1007 | */ | 1010 | */ |
1008 | static int dpm_prepare(pm_message_t state) | 1011 | int dpm_prepare(pm_message_t state) |
1009 | { | 1012 | { |
1010 | int error = 0; | 1013 | int error = 0; |
1011 | 1014 | ||
1015 | might_sleep(); | ||
1016 | |||
1012 | mutex_lock(&dpm_list_mtx); | 1017 | mutex_lock(&dpm_list_mtx); |
1013 | while (!list_empty(&dpm_list)) { | 1018 | while (!list_empty(&dpm_list)) { |
1014 | struct device *dev = to_device(dpm_list.next); | 1019 | struct device *dev = to_device(dpm_list.next); |
@@ -1057,7 +1062,6 @@ int dpm_suspend_start(pm_message_t state) | |||
1057 | { | 1062 | { |
1058 | int error; | 1063 | int error; |
1059 | 1064 | ||
1060 | might_sleep(); | ||
1061 | error = dpm_prepare(state); | 1065 | error = dpm_prepare(state); |
1062 | if (!error) | 1066 | if (!error) |
1063 | error = dpm_suspend(state); | 1067 | error = dpm_suspend(state); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3172c60d23a9..0d4587b15c55 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
168 | static int rpm_idle(struct device *dev, int rpmflags) | 168 | static int rpm_idle(struct device *dev, int rpmflags) |
169 | { | 169 | { |
170 | int (*callback)(struct device *); | 170 | int (*callback)(struct device *); |
171 | int (*domain_callback)(struct device *); | ||
172 | int retval; | 171 | int retval; |
173 | 172 | ||
174 | retval = rpm_check_suspend_allowed(dev); | 173 | retval = rpm_check_suspend_allowed(dev); |
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
214 | 213 | ||
215 | dev->power.idle_notification = true; | 214 | dev->power.idle_notification = true; |
216 | 215 | ||
217 | if (dev->type && dev->type->pm) | 216 | if (dev->pwr_domain) |
217 | callback = dev->pwr_domain->ops.runtime_idle; | ||
218 | else if (dev->type && dev->type->pm) | ||
218 | callback = dev->type->pm->runtime_idle; | 219 | callback = dev->type->pm->runtime_idle; |
219 | else if (dev->class && dev->class->pm) | 220 | else if (dev->class && dev->class->pm) |
220 | callback = dev->class->pm->runtime_idle; | 221 | callback = dev->class->pm->runtime_idle; |
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
223 | else | 224 | else |
224 | callback = NULL; | 225 | callback = NULL; |
225 | 226 | ||
226 | if (dev->pwr_domain) | 227 | if (callback) { |
227 | domain_callback = dev->pwr_domain->ops.runtime_idle; | ||
228 | else | ||
229 | domain_callback = NULL; | ||
230 | |||
231 | if (callback || domain_callback) { | ||
232 | spin_unlock_irq(&dev->power.lock); | 228 | spin_unlock_irq(&dev->power.lock); |
233 | 229 | ||
234 | if (domain_callback) | 230 | callback(dev); |
235 | retval = domain_callback(dev); | ||
236 | |||
237 | if (!retval && callback) | ||
238 | callback(dev); | ||
239 | 231 | ||
240 | spin_lock_irq(&dev->power.lock); | 232 | spin_lock_irq(&dev->power.lock); |
241 | } | 233 | } |
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
382 | 374 | ||
383 | __update_runtime_status(dev, RPM_SUSPENDING); | 375 | __update_runtime_status(dev, RPM_SUSPENDING); |
384 | 376 | ||
385 | if (dev->type && dev->type->pm) | 377 | if (dev->pwr_domain) |
378 | callback = dev->pwr_domain->ops.runtime_suspend; | ||
379 | else if (dev->type && dev->type->pm) | ||
386 | callback = dev->type->pm->runtime_suspend; | 380 | callback = dev->type->pm->runtime_suspend; |
387 | else if (dev->class && dev->class->pm) | 381 | else if (dev->class && dev->class->pm) |
388 | callback = dev->class->pm->runtime_suspend; | 382 | callback = dev->class->pm->runtime_suspend; |
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
400 | else | 394 | else |
401 | pm_runtime_cancel_pending(dev); | 395 | pm_runtime_cancel_pending(dev); |
402 | } else { | 396 | } else { |
403 | if (dev->pwr_domain) | ||
404 | rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev); | ||
405 | no_callback: | 397 | no_callback: |
406 | __update_runtime_status(dev, RPM_SUSPENDED); | 398 | __update_runtime_status(dev, RPM_SUSPENDED); |
407 | pm_runtime_deactivate_timer(dev); | 399 | pm_runtime_deactivate_timer(dev); |
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
582 | __update_runtime_status(dev, RPM_RESUMING); | 574 | __update_runtime_status(dev, RPM_RESUMING); |
583 | 575 | ||
584 | if (dev->pwr_domain) | 576 | if (dev->pwr_domain) |
585 | rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); | 577 | callback = dev->pwr_domain->ops.runtime_resume; |
586 | 578 | else if (dev->type && dev->type->pm) | |
587 | if (dev->type && dev->type->pm) | ||
588 | callback = dev->type->pm->runtime_resume; | 579 | callback = dev->type->pm->runtime_resume; |
589 | else if (dev->class && dev->class->pm) | 580 | else if (dev->class && dev->class->pm) |
590 | callback = dev->class->pm->runtime_resume; | 581 | callback = dev->class->pm->runtime_resume; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index fff49bee781d..a9f5b8979611 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -212,8 +212,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, | |||
212 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | 212 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, |
213 | autosuspend_delay_ms_store); | 213 | autosuspend_delay_ms_store); |
214 | 214 | ||
215 | #endif | 215 | #endif /* CONFIG_PM_RUNTIME */ |
216 | 216 | ||
217 | #ifdef CONFIG_PM_SLEEP | ||
217 | static ssize_t | 218 | static ssize_t |
218 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 219 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
219 | { | 220 | { |
@@ -248,7 +249,6 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
248 | 249 | ||
249 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 250 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
250 | 251 | ||
251 | #ifdef CONFIG_PM_SLEEP | ||
252 | static ssize_t wakeup_count_show(struct device *dev, | 252 | static ssize_t wakeup_count_show(struct device *dev, |
253 | struct device_attribute *attr, char *buf) | 253 | struct device_attribute *attr, char *buf) |
254 | { | 254 | { |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index abbbd33e8d8a..84f7c7d5a098 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -110,7 +110,6 @@ void wakeup_source_add(struct wakeup_source *ws) | |||
110 | spin_lock_irq(&events_lock); | 110 | spin_lock_irq(&events_lock); |
111 | list_add_rcu(&ws->entry, &wakeup_sources); | 111 | list_add_rcu(&ws->entry, &wakeup_sources); |
112 | spin_unlock_irq(&events_lock); | 112 | spin_unlock_irq(&events_lock); |
113 | synchronize_rcu(); | ||
114 | } | 113 | } |
115 | EXPORT_SYMBOL_GPL(wakeup_source_add); | 114 | EXPORT_SYMBOL_GPL(wakeup_source_add); |
116 | 115 | ||
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index acde9b5ee131..9dff77bfe1e3 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -328,203 +328,8 @@ void sysdev_unregister(struct sys_device *sysdev) | |||
328 | kobject_put(&sysdev->kobj); | 328 | kobject_put(&sysdev->kobj); |
329 | } | 329 | } |
330 | 330 | ||
331 | 331 | EXPORT_SYMBOL_GPL(sysdev_register); | |
332 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | 332 | EXPORT_SYMBOL_GPL(sysdev_unregister); |
333 | /** | ||
334 | * sysdev_shutdown - Shut down all system devices. | ||
335 | * | ||
336 | * Loop over each class of system devices, and the devices in each | ||
337 | * of those classes. For each device, we call the shutdown method for | ||
338 | * each driver registered for the device - the auxiliaries, | ||
339 | * and the class driver. | ||
340 | * | ||
341 | * Note: The list is iterated in reverse order, so that we shut down | ||
342 | * child devices before we shut down their parents. The list ordering | ||
343 | * is guaranteed by virtue of the fact that child devices are registered | ||
344 | * after their parents. | ||
345 | */ | ||
346 | void sysdev_shutdown(void) | ||
347 | { | ||
348 | struct sysdev_class *cls; | ||
349 | |||
350 | pr_debug("Shutting Down System Devices\n"); | ||
351 | |||
352 | mutex_lock(&sysdev_drivers_lock); | ||
353 | list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { | ||
354 | struct sys_device *sysdev; | ||
355 | |||
356 | pr_debug("Shutting down type '%s':\n", | ||
357 | kobject_name(&cls->kset.kobj)); | ||
358 | |||
359 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
360 | struct sysdev_driver *drv; | ||
361 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
362 | |||
363 | /* Call auxiliary drivers first */ | ||
364 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
365 | if (drv->shutdown) | ||
366 | drv->shutdown(sysdev); | ||
367 | } | ||
368 | |||
369 | /* Now call the generic one */ | ||
370 | if (cls->shutdown) | ||
371 | cls->shutdown(sysdev); | ||
372 | } | ||
373 | } | ||
374 | mutex_unlock(&sysdev_drivers_lock); | ||
375 | } | ||
376 | |||
377 | static void __sysdev_resume(struct sys_device *dev) | ||
378 | { | ||
379 | struct sysdev_class *cls = dev->cls; | ||
380 | struct sysdev_driver *drv; | ||
381 | |||
382 | /* First, call the class-specific one */ | ||
383 | if (cls->resume) | ||
384 | cls->resume(dev); | ||
385 | WARN_ONCE(!irqs_disabled(), | ||
386 | "Interrupts enabled after %pF\n", cls->resume); | ||
387 | |||
388 | /* Call auxiliary drivers next. */ | ||
389 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
390 | if (drv->resume) | ||
391 | drv->resume(dev); | ||
392 | WARN_ONCE(!irqs_disabled(), | ||
393 | "Interrupts enabled after %pF\n", drv->resume); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | /** | ||
398 | * sysdev_suspend - Suspend all system devices. | ||
399 | * @state: Power state to enter. | ||
400 | * | ||
401 | * We perform an almost identical operation as sysdev_shutdown() | ||
402 | * above, though calling ->suspend() instead. Interrupts are disabled | ||
403 | * when this called. Devices are responsible for both saving state and | ||
404 | * quiescing or powering down the device. | ||
405 | * | ||
406 | * This is only called by the device PM core, so we let them handle | ||
407 | * all synchronization. | ||
408 | */ | ||
409 | int sysdev_suspend(pm_message_t state) | ||
410 | { | ||
411 | struct sysdev_class *cls; | ||
412 | struct sys_device *sysdev, *err_dev; | ||
413 | struct sysdev_driver *drv, *err_drv; | ||
414 | int ret; | ||
415 | |||
416 | pr_debug("Checking wake-up interrupts\n"); | ||
417 | |||
418 | /* Return error code if there are any wake-up interrupts pending */ | ||
419 | ret = check_wakeup_irqs(); | ||
420 | if (ret) | ||
421 | return ret; | ||
422 | |||
423 | WARN_ONCE(!irqs_disabled(), | ||
424 | "Interrupts enabled while suspending system devices\n"); | ||
425 | |||
426 | pr_debug("Suspending System Devices\n"); | ||
427 | |||
428 | list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { | ||
429 | pr_debug("Suspending type '%s':\n", | ||
430 | kobject_name(&cls->kset.kobj)); | ||
431 | |||
432 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
433 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
434 | |||
435 | /* Call auxiliary drivers first */ | ||
436 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
437 | if (drv->suspend) { | ||
438 | ret = drv->suspend(sysdev, state); | ||
439 | if (ret) | ||
440 | goto aux_driver; | ||
441 | } | ||
442 | WARN_ONCE(!irqs_disabled(), | ||
443 | "Interrupts enabled after %pF\n", | ||
444 | drv->suspend); | ||
445 | } | ||
446 | |||
447 | /* Now call the generic one */ | ||
448 | if (cls->suspend) { | ||
449 | ret = cls->suspend(sysdev, state); | ||
450 | if (ret) | ||
451 | goto cls_driver; | ||
452 | WARN_ONCE(!irqs_disabled(), | ||
453 | "Interrupts enabled after %pF\n", | ||
454 | cls->suspend); | ||
455 | } | ||
456 | } | ||
457 | } | ||
458 | return 0; | ||
459 | /* resume current sysdev */ | ||
460 | cls_driver: | ||
461 | drv = NULL; | ||
462 | printk(KERN_ERR "Class suspend failed for %s: %d\n", | ||
463 | kobject_name(&sysdev->kobj), ret); | ||
464 | |||
465 | aux_driver: | ||
466 | if (drv) | ||
467 | printk(KERN_ERR "Class driver suspend failed for %s: %d\n", | ||
468 | kobject_name(&sysdev->kobj), ret); | ||
469 | list_for_each_entry(err_drv, &cls->drivers, entry) { | ||
470 | if (err_drv == drv) | ||
471 | break; | ||
472 | if (err_drv->resume) | ||
473 | err_drv->resume(sysdev); | ||
474 | } | ||
475 | |||
476 | /* resume other sysdevs in current class */ | ||
477 | list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { | ||
478 | if (err_dev == sysdev) | ||
479 | break; | ||
480 | pr_debug(" %s\n", kobject_name(&err_dev->kobj)); | ||
481 | __sysdev_resume(err_dev); | ||
482 | } | ||
483 | |||
484 | /* resume other classes */ | ||
485 | list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { | ||
486 | list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { | ||
487 | pr_debug(" %s\n", kobject_name(&err_dev->kobj)); | ||
488 | __sysdev_resume(err_dev); | ||
489 | } | ||
490 | } | ||
491 | return ret; | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(sysdev_suspend); | ||
494 | |||
495 | /** | ||
496 | * sysdev_resume - Bring system devices back to life. | ||
497 | * | ||
498 | * Similar to sysdev_suspend(), but we iterate the list forwards | ||
499 | * to guarantee that parent devices are resumed before their children. | ||
500 | * | ||
501 | * Note: Interrupts are disabled when called. | ||
502 | */ | ||
503 | int sysdev_resume(void) | ||
504 | { | ||
505 | struct sysdev_class *cls; | ||
506 | |||
507 | WARN_ONCE(!irqs_disabled(), | ||
508 | "Interrupts enabled while resuming system devices\n"); | ||
509 | |||
510 | pr_debug("Resuming System Devices\n"); | ||
511 | |||
512 | list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { | ||
513 | struct sys_device *sysdev; | ||
514 | |||
515 | pr_debug("Resuming type '%s':\n", | ||
516 | kobject_name(&cls->kset.kobj)); | ||
517 | |||
518 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
519 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
520 | |||
521 | __sysdev_resume(sysdev); | ||
522 | } | ||
523 | } | ||
524 | return 0; | ||
525 | } | ||
526 | EXPORT_SYMBOL_GPL(sysdev_resume); | ||
527 | #endif /* CONFIG_ARCH_NO_SYSDEV_OPS */ | ||
528 | 333 | ||
529 | int __init system_bus_init(void) | 334 | int __init system_bus_init(void) |
530 | { | 335 | { |
@@ -534,9 +339,6 @@ int __init system_bus_init(void) | |||
534 | return 0; | 339 | return 0; |
535 | } | 340 | } |
536 | 341 | ||
537 | EXPORT_SYMBOL_GPL(sysdev_register); | ||
538 | EXPORT_SYMBOL_GPL(sysdev_unregister); | ||
539 | |||
540 | #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) | 342 | #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) |
541 | 343 | ||
542 | ssize_t sysdev_store_ulong(struct sys_device *sysdev, | 344 | ssize_t sysdev_store_ulong(struct sys_device *sysdev, |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 8066d086578a..e086fbbbe853 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) | |||
2547 | disk->major = MajorNumber; | 2547 | disk->major = MajorNumber; |
2548 | disk->first_minor = n << DAC960_MaxPartitionsBits; | 2548 | disk->first_minor = n << DAC960_MaxPartitionsBits; |
2549 | disk->fops = &DAC960_BlockDeviceOperations; | 2549 | disk->fops = &DAC960_BlockDeviceOperations; |
2550 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
2551 | } | 2550 | } |
2552 | /* | 2551 | /* |
2553 | Indicate the Block Device Registration completed successfully, | 2552 | Indicate the Block Device Registration completed successfully, |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 456c0cc90dcf..8eba86bba599 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void) | |||
1736 | disk->major = FLOPPY_MAJOR; | 1736 | disk->major = FLOPPY_MAJOR; |
1737 | disk->first_minor = drive; | 1737 | disk->first_minor = drive; |
1738 | disk->fops = &floppy_fops; | 1738 | disk->fops = &floppy_fops; |
1739 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
1740 | sprintf(disk->disk_name, "fd%d", drive); | 1739 | sprintf(disk->disk_name, "fd%d", drive); |
1741 | disk->private_data = &unit[drive]; | 1740 | disk->private_data = &unit[drive]; |
1742 | set_capacity(disk, 880*2); | 1741 | set_capacity(disk, 880*2); |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index c871eae14120..ede16c64ff07 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void) | |||
1964 | unit[i].disk->first_minor = i; | 1964 | unit[i].disk->first_minor = i; |
1965 | sprintf(unit[i].disk->disk_name, "fd%d", i); | 1965 | sprintf(unit[i].disk->disk_name, "fd%d", i); |
1966 | unit[i].disk->fops = &floppy_fops; | 1966 | unit[i].disk->fops = &floppy_fops; |
1967 | unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
1968 | unit[i].disk->private_data = &unit[i]; | 1967 | unit[i].disk->private_data = &unit[i]; |
1969 | unit[i].disk->queue = blk_init_queue(do_fd_request, | 1968 | unit[i].disk->queue = blk_init_queue(do_fd_request, |
1970 | &ataflop_lock); | 1969 | &ataflop_lock); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 301d7a9a41a6..db8f88586c8d 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4205,7 +4205,6 @@ static int __init floppy_init(void) | |||
4205 | disks[dr]->major = FLOPPY_MAJOR; | 4205 | disks[dr]->major = FLOPPY_MAJOR; |
4206 | disks[dr]->first_minor = TOMINOR(dr); | 4206 | disks[dr]->first_minor = TOMINOR(dr); |
4207 | disks[dr]->fops = &floppy_fops; | 4207 | disks[dr]->fops = &floppy_fops; |
4208 | disks[dr]->events = DISK_EVENT_MEDIA_CHANGE; | ||
4209 | sprintf(disks[dr]->disk_name, "fd%d", dr); | 4208 | sprintf(disks[dr]->disk_name, "fd%d", dr); |
4210 | 4209 | ||
4211 | init_timer(&motor_off_timer[dr]); | 4210 | init_timer(&motor_off_timer[dr]); |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 2f2ccf686251..8690e31d9932 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -320,7 +320,6 @@ static void pcd_init_units(void) | |||
320 | disk->first_minor = unit; | 320 | disk->first_minor = unit; |
321 | strcpy(disk->disk_name, cd->name); /* umm... */ | 321 | strcpy(disk->disk_name, cd->name); /* umm... */ |
322 | disk->fops = &pcd_bdops; | 322 | disk->fops = &pcd_bdops; |
323 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
324 | } | 323 | } |
325 | } | 324 | } |
326 | 325 | ||
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 21dfdb776869..869e7676d46f 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk) | |||
837 | p->fops = &pd_fops; | 837 | p->fops = &pd_fops; |
838 | p->major = major; | 838 | p->major = major; |
839 | p->first_minor = (disk - pd) << PD_BITS; | 839 | p->first_minor = (disk - pd) << PD_BITS; |
840 | p->events = DISK_EVENT_MEDIA_CHANGE; | ||
841 | disk->gd = p; | 840 | disk->gd = p; |
842 | p->private_data = disk; | 841 | p->private_data = disk; |
843 | p->queue = pd_queue; | 842 | p->queue = pd_queue; |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 7adeb1edbf43..f21b520ef419 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -294,7 +294,6 @@ static void __init pf_init_units(void) | |||
294 | disk->first_minor = unit; | 294 | disk->first_minor = unit; |
295 | strcpy(disk->disk_name, pf->name); | 295 | strcpy(disk->disk_name, pf->name); |
296 | disk->fops = &pf_fops; | 296 | disk->fops = &pf_fops; |
297 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
298 | if (!(*drives[unit])[D_PRT]) | 297 | if (!(*drives[unit])[D_PRT]) |
299 | pf_drive_count++; | 298 | pf_drive_count++; |
300 | } | 299 | } |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3e904717c1c0..9712fad82bc6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -92,6 +92,8 @@ struct rbd_client { | |||
92 | struct list_head node; | 92 | struct list_head node; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct rbd_req_coll; | ||
96 | |||
95 | /* | 97 | /* |
96 | * a single io request | 98 | * a single io request |
97 | */ | 99 | */ |
@@ -100,6 +102,24 @@ struct rbd_request { | |||
100 | struct bio *bio; /* cloned bio */ | 102 | struct bio *bio; /* cloned bio */ |
101 | struct page **pages; /* list of used pages */ | 103 | struct page **pages; /* list of used pages */ |
102 | u64 len; | 104 | u64 len; |
105 | int coll_index; | ||
106 | struct rbd_req_coll *coll; | ||
107 | }; | ||
108 | |||
109 | struct rbd_req_status { | ||
110 | int done; | ||
111 | int rc; | ||
112 | u64 bytes; | ||
113 | }; | ||
114 | |||
115 | /* | ||
116 | * a collection of requests | ||
117 | */ | ||
118 | struct rbd_req_coll { | ||
119 | int total; | ||
120 | int num_done; | ||
121 | struct kref kref; | ||
122 | struct rbd_req_status status[0]; | ||
103 | }; | 123 | }; |
104 | 124 | ||
105 | struct rbd_snap { | 125 | struct rbd_snap { |
@@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev) | |||
416 | rbd_dev->client = NULL; | 436 | rbd_dev->client = NULL; |
417 | } | 437 | } |
418 | 438 | ||
439 | /* | ||
440 | * Destroy requests collection | ||
441 | */ | ||
442 | static void rbd_coll_release(struct kref *kref) | ||
443 | { | ||
444 | struct rbd_req_coll *coll = | ||
445 | container_of(kref, struct rbd_req_coll, kref); | ||
446 | |||
447 | dout("rbd_coll_release %p\n", coll); | ||
448 | kfree(coll); | ||
449 | } | ||
419 | 450 | ||
420 | /* | 451 | /* |
421 | * Create a new header structure, translate header format from the on-disk | 452 | * Create a new header structure, translate header format from the on-disk |
@@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header, | |||
590 | return len; | 621 | return len; |
591 | } | 622 | } |
592 | 623 | ||
624 | static int rbd_get_num_segments(struct rbd_image_header *header, | ||
625 | u64 ofs, u64 len) | ||
626 | { | ||
627 | u64 start_seg = ofs >> header->obj_order; | ||
628 | u64 end_seg = (ofs + len - 1) >> header->obj_order; | ||
629 | return end_seg - start_seg + 1; | ||
630 | } | ||
631 | |||
593 | /* | 632 | /* |
594 | * bio helpers | 633 | * bio helpers |
595 | */ | 634 | */ |
@@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops) | |||
735 | kfree(ops); | 774 | kfree(ops); |
736 | } | 775 | } |
737 | 776 | ||
777 | static void rbd_coll_end_req_index(struct request *rq, | ||
778 | struct rbd_req_coll *coll, | ||
779 | int index, | ||
780 | int ret, u64 len) | ||
781 | { | ||
782 | struct request_queue *q; | ||
783 | int min, max, i; | ||
784 | |||
785 | dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", | ||
786 | coll, index, ret, len); | ||
787 | |||
788 | if (!rq) | ||
789 | return; | ||
790 | |||
791 | if (!coll) { | ||
792 | blk_end_request(rq, ret, len); | ||
793 | return; | ||
794 | } | ||
795 | |||
796 | q = rq->q; | ||
797 | |||
798 | spin_lock_irq(q->queue_lock); | ||
799 | coll->status[index].done = 1; | ||
800 | coll->status[index].rc = ret; | ||
801 | coll->status[index].bytes = len; | ||
802 | max = min = coll->num_done; | ||
803 | while (max < coll->total && coll->status[max].done) | ||
804 | max++; | ||
805 | |||
806 | for (i = min; i<max; i++) { | ||
807 | __blk_end_request(rq, coll->status[i].rc, | ||
808 | coll->status[i].bytes); | ||
809 | coll->num_done++; | ||
810 | kref_put(&coll->kref, rbd_coll_release); | ||
811 | } | ||
812 | spin_unlock_irq(q->queue_lock); | ||
813 | } | ||
814 | |||
815 | static void rbd_coll_end_req(struct rbd_request *req, | ||
816 | int ret, u64 len) | ||
817 | { | ||
818 | rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); | ||
819 | } | ||
820 | |||
738 | /* | 821 | /* |
739 | * Send ceph osd request | 822 | * Send ceph osd request |
740 | */ | 823 | */ |
@@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq, | |||
749 | int flags, | 832 | int flags, |
750 | struct ceph_osd_req_op *ops, | 833 | struct ceph_osd_req_op *ops, |
751 | int num_reply, | 834 | int num_reply, |
835 | struct rbd_req_coll *coll, | ||
836 | int coll_index, | ||
752 | void (*rbd_cb)(struct ceph_osd_request *req, | 837 | void (*rbd_cb)(struct ceph_osd_request *req, |
753 | struct ceph_msg *msg), | 838 | struct ceph_msg *msg), |
754 | struct ceph_osd_request **linger_req, | 839 | struct ceph_osd_request **linger_req, |
@@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq, | |||
763 | struct ceph_osd_request_head *reqhead; | 848 | struct ceph_osd_request_head *reqhead; |
764 | struct rbd_image_header *header = &dev->header; | 849 | struct rbd_image_header *header = &dev->header; |
765 | 850 | ||
766 | ret = -ENOMEM; | ||
767 | req_data = kzalloc(sizeof(*req_data), GFP_NOIO); | 851 | req_data = kzalloc(sizeof(*req_data), GFP_NOIO); |
768 | if (!req_data) | 852 | if (!req_data) { |
769 | goto done; | 853 | if (coll) |
854 | rbd_coll_end_req_index(rq, coll, coll_index, | ||
855 | -ENOMEM, len); | ||
856 | return -ENOMEM; | ||
857 | } | ||
770 | 858 | ||
771 | dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); | 859 | if (coll) { |
860 | req_data->coll = coll; | ||
861 | req_data->coll_index = coll_index; | ||
862 | } | ||
863 | |||
864 | dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); | ||
772 | 865 | ||
773 | down_read(&header->snap_rwsem); | 866 | down_read(&header->snap_rwsem); |
774 | 867 | ||
@@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq, | |||
828 | ret = ceph_osdc_wait_request(&dev->client->osdc, req); | 921 | ret = ceph_osdc_wait_request(&dev->client->osdc, req); |
829 | if (ver) | 922 | if (ver) |
830 | *ver = le64_to_cpu(req->r_reassert_version.version); | 923 | *ver = le64_to_cpu(req->r_reassert_version.version); |
831 | dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); | 924 | dout("reassert_ver=%lld\n", |
925 | le64_to_cpu(req->r_reassert_version.version)); | ||
832 | ceph_osdc_put_request(req); | 926 | ceph_osdc_put_request(req); |
833 | } | 927 | } |
834 | return ret; | 928 | return ret; |
@@ -837,10 +931,8 @@ done_err: | |||
837 | bio_chain_put(req_data->bio); | 931 | bio_chain_put(req_data->bio); |
838 | ceph_osdc_put_request(req); | 932 | ceph_osdc_put_request(req); |
839 | done_pages: | 933 | done_pages: |
934 | rbd_coll_end_req(req_data, ret, len); | ||
840 | kfree(req_data); | 935 | kfree(req_data); |
841 | done: | ||
842 | if (rq) | ||
843 | blk_end_request(rq, ret, len); | ||
844 | return ret; | 936 | return ret; |
845 | } | 937 | } |
846 | 938 | ||
@@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) | |||
874 | bytes = req_data->len; | 966 | bytes = req_data->len; |
875 | } | 967 | } |
876 | 968 | ||
877 | blk_end_request(req_data->rq, rc, bytes); | 969 | rbd_coll_end_req(req_data, rc, bytes); |
878 | 970 | ||
879 | if (req_data->bio) | 971 | if (req_data->bio) |
880 | bio_chain_put(req_data->bio); | 972 | bio_chain_put(req_data->bio); |
@@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev, | |||
934 | flags, | 1026 | flags, |
935 | ops, | 1027 | ops, |
936 | 2, | 1028 | 2, |
1029 | NULL, 0, | ||
937 | NULL, | 1030 | NULL, |
938 | linger_req, ver); | 1031 | linger_req, ver); |
939 | if (ret < 0) | 1032 | if (ret < 0) |
@@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq, | |||
959 | u64 snapid, | 1052 | u64 snapid, |
960 | int opcode, int flags, int num_reply, | 1053 | int opcode, int flags, int num_reply, |
961 | u64 ofs, u64 len, | 1054 | u64 ofs, u64 len, |
962 | struct bio *bio) | 1055 | struct bio *bio, |
1056 | struct rbd_req_coll *coll, | ||
1057 | int coll_index) | ||
963 | { | 1058 | { |
964 | char *seg_name; | 1059 | char *seg_name; |
965 | u64 seg_ofs; | 1060 | u64 seg_ofs; |
@@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq, | |||
995 | flags, | 1090 | flags, |
996 | ops, | 1091 | ops, |
997 | num_reply, | 1092 | num_reply, |
1093 | coll, coll_index, | ||
998 | rbd_req_cb, 0, NULL); | 1094 | rbd_req_cb, 0, NULL); |
1095 | |||
1096 | rbd_destroy_ops(ops); | ||
999 | done: | 1097 | done: |
1000 | kfree(seg_name); | 1098 | kfree(seg_name); |
1001 | return ret; | 1099 | return ret; |
@@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq, | |||
1008 | struct rbd_device *rbd_dev, | 1106 | struct rbd_device *rbd_dev, |
1009 | struct ceph_snap_context *snapc, | 1107 | struct ceph_snap_context *snapc, |
1010 | u64 ofs, u64 len, | 1108 | u64 ofs, u64 len, |
1011 | struct bio *bio) | 1109 | struct bio *bio, |
1110 | struct rbd_req_coll *coll, | ||
1111 | int coll_index) | ||
1012 | { | 1112 | { |
1013 | return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, | 1113 | return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, |
1014 | CEPH_OSD_OP_WRITE, | 1114 | CEPH_OSD_OP_WRITE, |
1015 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, | 1115 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, |
1016 | 2, | 1116 | 2, |
1017 | ofs, len, bio); | 1117 | ofs, len, bio, coll, coll_index); |
1018 | } | 1118 | } |
1019 | 1119 | ||
1020 | /* | 1120 | /* |
@@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq, | |||
1024 | struct rbd_device *rbd_dev, | 1124 | struct rbd_device *rbd_dev, |
1025 | u64 snapid, | 1125 | u64 snapid, |
1026 | u64 ofs, u64 len, | 1126 | u64 ofs, u64 len, |
1027 | struct bio *bio) | 1127 | struct bio *bio, |
1128 | struct rbd_req_coll *coll, | ||
1129 | int coll_index) | ||
1028 | { | 1130 | { |
1029 | return rbd_do_op(rq, rbd_dev, NULL, | 1131 | return rbd_do_op(rq, rbd_dev, NULL, |
1030 | (snapid ? snapid : CEPH_NOSNAP), | 1132 | (snapid ? snapid : CEPH_NOSNAP), |
1031 | CEPH_OSD_OP_READ, | 1133 | CEPH_OSD_OP_READ, |
1032 | CEPH_OSD_FLAG_READ, | 1134 | CEPH_OSD_FLAG_READ, |
1033 | 2, | 1135 | 2, |
1034 | ofs, len, bio); | 1136 | ofs, len, bio, coll, coll_index); |
1035 | } | 1137 | } |
1036 | 1138 | ||
1037 | /* | 1139 | /* |
@@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, | |||
1063 | { | 1165 | { |
1064 | struct ceph_osd_req_op *ops; | 1166 | struct ceph_osd_req_op *ops; |
1065 | struct page **pages = NULL; | 1167 | struct page **pages = NULL; |
1066 | int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); | 1168 | int ret; |
1169 | |||
1170 | ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); | ||
1067 | if (ret < 0) | 1171 | if (ret < 0) |
1068 | return ret; | 1172 | return ret; |
1069 | 1173 | ||
@@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, | |||
1077 | CEPH_OSD_FLAG_READ, | 1181 | CEPH_OSD_FLAG_READ, |
1078 | ops, | 1182 | ops, |
1079 | 1, | 1183 | 1, |
1184 | NULL, 0, | ||
1080 | rbd_simple_req_cb, 0, NULL); | 1185 | rbd_simple_req_cb, 0, NULL); |
1081 | 1186 | ||
1082 | rbd_destroy_ops(ops); | 1187 | rbd_destroy_ops(ops); |
@@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev, | |||
1274 | return ret; | 1379 | return ret; |
1275 | } | 1380 | } |
1276 | 1381 | ||
1382 | static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) | ||
1383 | { | ||
1384 | struct rbd_req_coll *coll = | ||
1385 | kzalloc(sizeof(struct rbd_req_coll) + | ||
1386 | sizeof(struct rbd_req_status) * num_reqs, | ||
1387 | GFP_ATOMIC); | ||
1388 | |||
1389 | if (!coll) | ||
1390 | return NULL; | ||
1391 | coll->total = num_reqs; | ||
1392 | kref_init(&coll->kref); | ||
1393 | return coll; | ||
1394 | } | ||
1395 | |||
1277 | /* | 1396 | /* |
1278 | * block device queue callback | 1397 | * block device queue callback |
1279 | */ | 1398 | */ |
@@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q) | |||
1291 | bool do_write; | 1410 | bool do_write; |
1292 | int size, op_size = 0; | 1411 | int size, op_size = 0; |
1293 | u64 ofs; | 1412 | u64 ofs; |
1413 | int num_segs, cur_seg = 0; | ||
1414 | struct rbd_req_coll *coll; | ||
1294 | 1415 | ||
1295 | /* peek at request from block layer */ | 1416 | /* peek at request from block layer */ |
1296 | if (!rq) | 1417 | if (!rq) |
@@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q) | |||
1321 | do_write ? "write" : "read", | 1442 | do_write ? "write" : "read", |
1322 | size, blk_rq_pos(rq) * 512ULL); | 1443 | size, blk_rq_pos(rq) * 512ULL); |
1323 | 1444 | ||
1445 | num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); | ||
1446 | coll = rbd_alloc_coll(num_segs); | ||
1447 | if (!coll) { | ||
1448 | spin_lock_irq(q->queue_lock); | ||
1449 | __blk_end_request_all(rq, -ENOMEM); | ||
1450 | goto next; | ||
1451 | } | ||
1452 | |||
1324 | do { | 1453 | do { |
1325 | /* a bio clone to be passed down to OSD req */ | 1454 | /* a bio clone to be passed down to OSD req */ |
1326 | dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); | 1455 | dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); |
@@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q) | |||
1328 | rbd_dev->header.block_name, | 1457 | rbd_dev->header.block_name, |
1329 | ofs, size, | 1458 | ofs, size, |
1330 | NULL, NULL); | 1459 | NULL, NULL); |
1460 | kref_get(&coll->kref); | ||
1331 | bio = bio_chain_clone(&rq_bio, &next_bio, &bp, | 1461 | bio = bio_chain_clone(&rq_bio, &next_bio, &bp, |
1332 | op_size, GFP_ATOMIC); | 1462 | op_size, GFP_ATOMIC); |
1333 | if (!bio) { | 1463 | if (!bio) { |
1334 | spin_lock_irq(q->queue_lock); | 1464 | rbd_coll_end_req_index(rq, coll, cur_seg, |
1335 | __blk_end_request_all(rq, -ENOMEM); | 1465 | -ENOMEM, op_size); |
1336 | goto next; | 1466 | goto next_seg; |
1337 | } | 1467 | } |
1338 | 1468 | ||
1469 | |||
1339 | /* init OSD command: write or read */ | 1470 | /* init OSD command: write or read */ |
1340 | if (do_write) | 1471 | if (do_write) |
1341 | rbd_req_write(rq, rbd_dev, | 1472 | rbd_req_write(rq, rbd_dev, |
1342 | rbd_dev->header.snapc, | 1473 | rbd_dev->header.snapc, |
1343 | ofs, | 1474 | ofs, |
1344 | op_size, bio); | 1475 | op_size, bio, |
1476 | coll, cur_seg); | ||
1345 | else | 1477 | else |
1346 | rbd_req_read(rq, rbd_dev, | 1478 | rbd_req_read(rq, rbd_dev, |
1347 | cur_snap_id(rbd_dev), | 1479 | cur_snap_id(rbd_dev), |
1348 | ofs, | 1480 | ofs, |
1349 | op_size, bio); | 1481 | op_size, bio, |
1482 | coll, cur_seg); | ||
1350 | 1483 | ||
1484 | next_seg: | ||
1351 | size -= op_size; | 1485 | size -= op_size; |
1352 | ofs += op_size; | 1486 | ofs += op_size; |
1353 | 1487 | ||
1488 | cur_seg++; | ||
1354 | rq_bio = next_bio; | 1489 | rq_bio = next_bio; |
1355 | } while (size > 0); | 1490 | } while (size > 0); |
1491 | kref_put(&coll->kref, rbd_coll_release); | ||
1356 | 1492 | ||
1357 | if (bp) | 1493 | if (bp) |
1358 | bio_pair_release(bp); | 1494 | bio_pair_release(bp); |
1359 | |||
1360 | spin_lock_irq(q->queue_lock); | 1495 | spin_lock_irq(q->queue_lock); |
1361 | next: | 1496 | next: |
1362 | rq = blk_fetch_request(q); | 1497 | rq = blk_fetch_request(q); |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 24a482f2fbd6..fd5adcd55944 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd) | |||
858 | swd->unit[drive].disk->first_minor = drive; | 858 | swd->unit[drive].disk->first_minor = drive; |
859 | sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); | 859 | sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); |
860 | swd->unit[drive].disk->fops = &floppy_fops; | 860 | swd->unit[drive].disk->fops = &floppy_fops; |
861 | swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
862 | swd->unit[drive].disk->private_data = &swd->unit[drive]; | 861 | swd->unit[drive].disk->private_data = &swd->unit[drive]; |
863 | swd->unit[drive].disk->queue = swd->queue; | 862 | swd->unit[drive].disk->queue = swd->queue; |
864 | set_capacity(swd->unit[drive].disk, 2880); | 863 | set_capacity(swd->unit[drive].disk, 2880); |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 4c10f56facbf..773bfa792777 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device | |||
1163 | disk->major = FLOPPY_MAJOR; | 1163 | disk->major = FLOPPY_MAJOR; |
1164 | disk->first_minor = i; | 1164 | disk->first_minor = i; |
1165 | disk->fops = &floppy_fops; | 1165 | disk->fops = &floppy_fops; |
1166 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
1167 | disk->private_data = &floppy_states[i]; | 1166 | disk->private_data = &floppy_states[i]; |
1168 | disk->queue = swim3_queue; | 1167 | disk->queue = swim3_queue; |
1169 | disk->flags |= GENHD_FL_REMOVABLE; | 1168 | disk->flags |= GENHD_FL_REMOVABLE; |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 68b9430c7cfe..0e376d46bdd1 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) | |||
2334 | disk->major = UB_MAJOR; | 2334 | disk->major = UB_MAJOR; |
2335 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; | 2335 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; |
2336 | disk->fops = &ub_bd_fops; | 2336 | disk->fops = &ub_bd_fops; |
2337 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
2338 | disk->private_data = lun; | 2337 | disk->private_data = lun; |
2339 | disk->driverfs_dev = &sc->intf->dev; | 2338 | disk->driverfs_dev = &sc->intf->dev; |
2340 | 2339 | ||
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 645ff765cd12..6c7fd7db6dff 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace) | |||
1005 | ace->gd->major = ace_major; | 1005 | ace->gd->major = ace_major; |
1006 | ace->gd->first_minor = ace->id * ACE_NUM_MINORS; | 1006 | ace->gd->first_minor = ace->id * ACE_NUM_MINORS; |
1007 | ace->gd->fops = &ace_fops; | 1007 | ace->gd->fops = &ace_fops; |
1008 | ace->gd->events = DISK_EVENT_MEDIA_CHANGE; | ||
1009 | ace->gd->queue = ace->queue; | 1008 | ace->gd->queue = ace->queue; |
1010 | ace->gd->private_data = ace; | 1009 | ace->gd->private_data = ace; |
1011 | snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); | 1010 | snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 514dd8efaf73..75fb965b8f72 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t | |||
986 | 986 | ||
987 | cdinfo(CD_OPEN, "entering cdrom_open\n"); | 987 | cdinfo(CD_OPEN, "entering cdrom_open\n"); |
988 | 988 | ||
989 | /* open is event synchronization point, check events first */ | ||
990 | check_disk_change(bdev); | ||
991 | |||
989 | /* if this was a O_NONBLOCK open and we should honor the flags, | 992 | /* if this was a O_NONBLOCK open and we should honor the flags, |
990 | * do a quick open without drive/disc integrity checks. */ | 993 | * do a quick open without drive/disc integrity checks. */ |
991 | cdi->use_count++; | 994 | cdi->use_count++; |
@@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t | |||
1012 | 1015 | ||
1013 | cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", | 1016 | cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", |
1014 | cdi->name, cdi->use_count); | 1017 | cdi->name, cdi->use_count); |
1015 | /* Do this on open. Don't wait for mount, because they might | ||
1016 | not be mounting, but opening with O_NONBLOCK */ | ||
1017 | check_disk_change(bdev); | ||
1018 | return 0; | 1018 | return 0; |
1019 | err_release: | 1019 | err_release: |
1020 | if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { | 1020 | if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { |
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index b2b034fea34e..3ceaf006e7f0 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr) | |||
803 | goto probe_fail_cdrom_register; | 803 | goto probe_fail_cdrom_register; |
804 | } | 804 | } |
805 | gd.disk->fops = &gdrom_bdops; | 805 | gd.disk->fops = &gdrom_bdops; |
806 | gd.disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
807 | /* latch on to the interrupt */ | 806 | /* latch on to the interrupt */ |
808 | err = gdrom_set_interrupt_handlers(); | 807 | err = gdrom_set_interrupt_handlers(); |
809 | if (err) | 808 | if (err) |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 4e874c5fa605..e427fbe45999 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
626 | gendisk->queue = q; | 626 | gendisk->queue = q; |
627 | gendisk->fops = &viocd_fops; | 627 | gendisk->fops = &viocd_fops; |
628 | gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; | 628 | gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; |
629 | gendisk->events = DISK_EVENT_MEDIA_CHANGE; | ||
630 | set_capacity(gendisk, 0); | 629 | set_capacity(gendisk, 0); |
631 | gendisk->private_data = d; | 630 | gendisk->private_data = d; |
632 | d->viocd_disk = gendisk; | 631 | d->viocd_disk = gendisk; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ad59b4e0a9b5..49502bc5360a 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -523,7 +523,7 @@ config RAW_DRIVER | |||
523 | with the O_DIRECT flag. | 523 | with the O_DIRECT flag. |
524 | 524 | ||
525 | config MAX_RAW_DEVS | 525 | config MAX_RAW_DEVS |
526 | int "Maximum number of RAW devices to support (1-8192)" | 526 | int "Maximum number of RAW devices to support (1-65536)" |
527 | depends on RAW_DRIVER | 527 | depends on RAW_DRIVER |
528 | default "256" | 528 | default "256" |
529 | help | 529 | help |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 7066e801b9d3..051474c65b78 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -84,8 +84,6 @@ static struct clocksource clocksource_hpet = { | |||
84 | .rating = 250, | 84 | .rating = 250, |
85 | .read = read_hpet, | 85 | .read = read_hpet, |
86 | .mask = CLOCKSOURCE_MASK(64), | 86 | .mask = CLOCKSOURCE_MASK(64), |
87 | .mult = 0, /* to be calculated */ | ||
88 | .shift = 10, | ||
89 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 87 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
90 | }; | 88 | }; |
91 | static struct clocksource *hpet_clocksource; | 89 | static struct clocksource *hpet_clocksource; |
@@ -934,9 +932,7 @@ int hpet_alloc(struct hpet_data *hdp) | |||
934 | if (!hpet_clocksource) { | 932 | if (!hpet_clocksource) { |
935 | hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; | 933 | hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; |
936 | CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); | 934 | CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); |
937 | clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, | 935 | clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq); |
938 | clocksource_hpet.shift); | ||
939 | clocksource_register(&clocksource_hpet); | ||
940 | hpetp->hp_clocksource = &clocksource_hpet; | 936 | hpetp->hp_clocksource = &clocksource_hpet; |
941 | hpet_clocksource = &clocksource_hpet; | 937 | hpet_clocksource = &clocksource_hpet; |
942 | } | 938 | } |
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 43ac61978d8b..ac6739e085e3 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c | |||
@@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void) | |||
619 | pr_info("%s", version); | 619 | pr_info("%s", version); |
620 | } | 620 | } |
621 | 621 | ||
622 | static const struct of_device_id n2rng_match[]; | ||
622 | static int __devinit n2rng_probe(struct platform_device *op) | 623 | static int __devinit n2rng_probe(struct platform_device *op) |
623 | { | 624 | { |
625 | const struct of_device_id *match; | ||
624 | int victoria_falls; | 626 | int victoria_falls; |
625 | int err = -ENOMEM; | 627 | int err = -ENOMEM; |
626 | struct n2rng *np; | 628 | struct n2rng *np; |
627 | 629 | ||
628 | if (!op->dev.of_match) | 630 | match = of_match_device(n2rng_match, &op->dev); |
631 | if (!match) | ||
629 | return -EINVAL; | 632 | return -EINVAL; |
630 | victoria_falls = (op->dev.of_match->data != NULL); | 633 | victoria_falls = (match->data != NULL); |
631 | 634 | ||
632 | n2rng_driver_version(); | 635 | n2rng_driver_version(); |
633 | np = kzalloc(sizeof(*np), GFP_KERNEL); | 636 | np = kzalloc(sizeof(*np), GFP_KERNEL); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index cc6c9b2546a3..64c6b8530615 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = { | |||
2554 | }; | 2554 | }; |
2555 | #endif /* CONFIG_PCI */ | 2555 | #endif /* CONFIG_PCI */ |
2556 | 2556 | ||
2557 | static struct of_device_id ipmi_match[]; | ||
2557 | static int __devinit ipmi_probe(struct platform_device *dev) | 2558 | static int __devinit ipmi_probe(struct platform_device *dev) |
2558 | { | 2559 | { |
2559 | #ifdef CONFIG_OF | 2560 | #ifdef CONFIG_OF |
2561 | const struct of_device_id *match; | ||
2560 | struct smi_info *info; | 2562 | struct smi_info *info; |
2561 | struct resource resource; | 2563 | struct resource resource; |
2562 | const __be32 *regsize, *regspacing, *regshift; | 2564 | const __be32 *regsize, *regspacing, *regshift; |
@@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev) | |||
2566 | 2568 | ||
2567 | dev_info(&dev->dev, "probing via device tree\n"); | 2569 | dev_info(&dev->dev, "probing via device tree\n"); |
2568 | 2570 | ||
2569 | if (!dev->dev.of_match) | 2571 | match = of_match_device(ipmi_match, &dev->dev); |
2572 | if (!match) | ||
2570 | return -EINVAL; | 2573 | return -EINVAL; |
2571 | 2574 | ||
2572 | ret = of_address_to_resource(np, 0, &resource); | 2575 | ret = of_address_to_resource(np, 0, &resource); |
@@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev) | |||
2601 | return -ENOMEM; | 2604 | return -ENOMEM; |
2602 | } | 2605 | } |
2603 | 2606 | ||
2604 | info->si_type = (enum si_type) dev->dev.of_match->data; | 2607 | info->si_type = (enum si_type) match->data; |
2605 | info->addr_source = SI_DEVICETREE; | 2608 | info->addr_source = SI_DEVICETREE; |
2606 | info->irq_setup = std_irq_setup; | 2609 | info->irq_setup = std_irq_setup; |
2607 | 2610 | ||
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 436a99017998..8fc04b4f311f 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -806,29 +806,41 @@ static const struct file_operations oldmem_fops = { | |||
806 | }; | 806 | }; |
807 | #endif | 807 | #endif |
808 | 808 | ||
809 | static ssize_t kmsg_write(struct file *file, const char __user *buf, | 809 | static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv, |
810 | size_t count, loff_t *ppos) | 810 | unsigned long count, loff_t pos) |
811 | { | 811 | { |
812 | char *tmp; | 812 | char *line, *p; |
813 | ssize_t ret; | 813 | int i; |
814 | ssize_t ret = -EFAULT; | ||
815 | size_t len = iov_length(iv, count); | ||
814 | 816 | ||
815 | tmp = kmalloc(count + 1, GFP_KERNEL); | 817 | line = kmalloc(len + 1, GFP_KERNEL); |
816 | if (tmp == NULL) | 818 | if (line == NULL) |
817 | return -ENOMEM; | 819 | return -ENOMEM; |
818 | ret = -EFAULT; | 820 | |
819 | if (!copy_from_user(tmp, buf, count)) { | 821 | /* |
820 | tmp[count] = 0; | 822 | * copy all vectors into a single string, to ensure we do |
821 | ret = printk("%s", tmp); | 823 | * not interleave our log line with other printk calls |
822 | if (ret > count) | 824 | */ |
823 | /* printk can add a prefix */ | 825 | p = line; |
824 | ret = count; | 826 | for (i = 0; i < count; i++) { |
827 | if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len)) | ||
828 | goto out; | ||
829 | p += iv[i].iov_len; | ||
825 | } | 830 | } |
826 | kfree(tmp); | 831 | p[0] = '\0'; |
832 | |||
833 | ret = printk("%s", line); | ||
834 | /* printk can add a prefix */ | ||
835 | if (ret > len) | ||
836 | ret = len; | ||
837 | out: | ||
838 | kfree(line); | ||
827 | return ret; | 839 | return ret; |
828 | } | 840 | } |
829 | 841 | ||
830 | static const struct file_operations kmsg_fops = { | 842 | static const struct file_operations kmsg_fops = { |
831 | .write = kmsg_write, | 843 | .aio_write = kmsg_writev, |
832 | .llseek = noop_llseek, | 844 | .llseek = noop_llseek, |
833 | }; | 845 | }; |
834 | 846 | ||
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index b4b9d5a47885..b33e8ea314ed 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | #include <linux/gfp.h> | 22 | #include <linux/gfp.h> |
23 | #include <linux/compat.h> | 23 | #include <linux/compat.h> |
24 | #include <linux/vmalloc.h> | ||
24 | 25 | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | 27 | ||
@@ -30,10 +31,15 @@ struct raw_device_data { | |||
30 | }; | 31 | }; |
31 | 32 | ||
32 | static struct class *raw_class; | 33 | static struct class *raw_class; |
33 | static struct raw_device_data raw_devices[MAX_RAW_MINORS]; | 34 | static struct raw_device_data *raw_devices; |
34 | static DEFINE_MUTEX(raw_mutex); | 35 | static DEFINE_MUTEX(raw_mutex); |
35 | static const struct file_operations raw_ctl_fops; /* forward declaration */ | 36 | static const struct file_operations raw_ctl_fops; /* forward declaration */ |
36 | 37 | ||
38 | static int max_raw_minors = MAX_RAW_MINORS; | ||
39 | |||
40 | module_param(max_raw_minors, int, 0); | ||
41 | MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)"); | ||
42 | |||
37 | /* | 43 | /* |
38 | * Open/close code for raw IO. | 44 | * Open/close code for raw IO. |
39 | * | 45 | * |
@@ -125,7 +131,7 @@ static int bind_set(int number, u64 major, u64 minor) | |||
125 | struct raw_device_data *rawdev; | 131 | struct raw_device_data *rawdev; |
126 | int err = 0; | 132 | int err = 0; |
127 | 133 | ||
128 | if (number <= 0 || number >= MAX_RAW_MINORS) | 134 | if (number <= 0 || number >= max_raw_minors) |
129 | return -EINVAL; | 135 | return -EINVAL; |
130 | 136 | ||
131 | if (MAJOR(dev) != major || MINOR(dev) != minor) | 137 | if (MAJOR(dev) != major || MINOR(dev) != minor) |
@@ -312,14 +318,27 @@ static int __init raw_init(void) | |||
312 | dev_t dev = MKDEV(RAW_MAJOR, 0); | 318 | dev_t dev = MKDEV(RAW_MAJOR, 0); |
313 | int ret; | 319 | int ret; |
314 | 320 | ||
315 | ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw"); | 321 | if (max_raw_minors < 1 || max_raw_minors > 65536) { |
322 | printk(KERN_WARNING "raw: invalid max_raw_minors (must be" | ||
323 | " between 1 and 65536), using %d\n", MAX_RAW_MINORS); | ||
324 | max_raw_minors = MAX_RAW_MINORS; | ||
325 | } | ||
326 | |||
327 | raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors); | ||
328 | if (!raw_devices) { | ||
329 | printk(KERN_ERR "Not enough memory for raw device structures\n"); | ||
330 | ret = -ENOMEM; | ||
331 | goto error; | ||
332 | } | ||
333 | memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors); | ||
334 | |||
335 | ret = register_chrdev_region(dev, max_raw_minors, "raw"); | ||
316 | if (ret) | 336 | if (ret) |
317 | goto error; | 337 | goto error; |
318 | 338 | ||
319 | cdev_init(&raw_cdev, &raw_fops); | 339 | cdev_init(&raw_cdev, &raw_fops); |
320 | ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS); | 340 | ret = cdev_add(&raw_cdev, dev, max_raw_minors); |
321 | if (ret) { | 341 | if (ret) { |
322 | kobject_put(&raw_cdev.kobj); | ||
323 | goto error_region; | 342 | goto error_region; |
324 | } | 343 | } |
325 | 344 | ||
@@ -336,8 +355,9 @@ static int __init raw_init(void) | |||
336 | return 0; | 355 | return 0; |
337 | 356 | ||
338 | error_region: | 357 | error_region: |
339 | unregister_chrdev_region(dev, MAX_RAW_MINORS); | 358 | unregister_chrdev_region(dev, max_raw_minors); |
340 | error: | 359 | error: |
360 | vfree(raw_devices); | ||
341 | return ret; | 361 | return ret; |
342 | } | 362 | } |
343 | 363 | ||
@@ -346,7 +366,7 @@ static void __exit raw_exit(void) | |||
346 | device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); | 366 | device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); |
347 | class_destroy(raw_class); | 367 | class_destroy(raw_class); |
348 | cdev_del(&raw_cdev); | 368 | cdev_del(&raw_cdev); |
349 | unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS); | 369 | unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors); |
350 | } | 370 | } |
351 | 371 | ||
352 | module_init(raw_init); | 372 | module_init(raw_init); |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index d6412c16385f..39ccdeada791 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev) | |||
715 | } | 715 | } |
716 | 716 | ||
717 | #ifdef CONFIG_OF | 717 | #ifdef CONFIG_OF |
718 | static int __devinit hwicap_of_probe(struct platform_device *op) | 718 | static int __devinit hwicap_of_probe(struct platform_device *op, |
719 | const struct hwicap_driver_config *config) | ||
719 | { | 720 | { |
720 | struct resource res; | 721 | struct resource res; |
721 | const unsigned int *id; | 722 | const unsigned int *id; |
722 | const char *family; | 723 | const char *family; |
723 | int rc; | 724 | int rc; |
724 | const struct hwicap_driver_config *config = op->dev.of_match->data; | ||
725 | const struct config_registers *regs; | 725 | const struct config_registers *regs; |
726 | 726 | ||
727 | 727 | ||
@@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op) | |||
751 | regs); | 751 | regs); |
752 | } | 752 | } |
753 | #else | 753 | #else |
754 | static inline int hwicap_of_probe(struct platform_device *op) | 754 | static inline int hwicap_of_probe(struct platform_device *op, |
755 | const struct hwicap_driver_config *config) | ||
755 | { | 756 | { |
756 | return -EINVAL; | 757 | return -EINVAL; |
757 | } | 758 | } |
758 | #endif /* CONFIG_OF */ | 759 | #endif /* CONFIG_OF */ |
759 | 760 | ||
761 | static const struct of_device_id __devinitconst hwicap_of_match[]; | ||
760 | static int __devinit hwicap_drv_probe(struct platform_device *pdev) | 762 | static int __devinit hwicap_drv_probe(struct platform_device *pdev) |
761 | { | 763 | { |
764 | const struct of_device_id *match; | ||
762 | struct resource *res; | 765 | struct resource *res; |
763 | const struct config_registers *regs; | 766 | const struct config_registers *regs; |
764 | const char *family; | 767 | const char *family; |
765 | 768 | ||
766 | if (pdev->dev.of_match) | 769 | match = of_match_device(hwicap_of_match, &pdev->dev); |
767 | return hwicap_of_probe(pdev); | 770 | if (match) |
771 | return hwicap_of_probe(pdev, match->data); | ||
768 | 772 | ||
769 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 773 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
770 | if (!res) | 774 | if (!res) |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig new file mode 100644 index 000000000000..110aeeb52f9a --- /dev/null +++ b/drivers/clocksource/Kconfig | |||
@@ -0,0 +1,2 @@ | |||
1 | config CLKSRC_I8253 | ||
2 | bool | ||
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index be61ece6330b..cfb6383b543a 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -6,3 +6,4 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o | |||
6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o | 6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o |
7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o | 7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o |
8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o | 8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o |
9 | obj-$(CONFIG_CLKSRC_I8253) += i8253.o | ||
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 64e528e8bfa6..72f811f73e9c 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c | |||
@@ -29,8 +29,6 @@ static struct clocksource clocksource_cyclone = { | |||
29 | .rating = 250, | 29 | .rating = 250, |
30 | .read = read_cyclone, | 30 | .read = read_cyclone, |
31 | .mask = CYCLONE_TIMER_MASK, | 31 | .mask = CYCLONE_TIMER_MASK, |
32 | .mult = 10, | ||
33 | .shift = 0, | ||
34 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 32 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
35 | }; | 33 | }; |
36 | 34 | ||
@@ -108,12 +106,8 @@ static int __init init_cyclone_clocksource(void) | |||
108 | } | 106 | } |
109 | cyclone_ptr = cyclone_timer; | 107 | cyclone_ptr = cyclone_timer; |
110 | 108 | ||
111 | /* sort out mult/shift values: */ | 109 | return clocksource_register_hz(&clocksource_cyclone, |
112 | clocksource_cyclone.shift = 22; | 110 | CYCLONE_TIMER_FREQ); |
113 | clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, | ||
114 | clocksource_cyclone.shift); | ||
115 | |||
116 | return clocksource_register(&clocksource_cyclone); | ||
117 | } | 111 | } |
118 | 112 | ||
119 | arch_initcall(init_cyclone_clocksource); | 113 | arch_initcall(init_cyclone_clocksource); |
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c new file mode 100644 index 000000000000..225c1761b372 --- /dev/null +++ b/drivers/clocksource/i8253.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * i8253 PIT clocksource | ||
3 | */ | ||
4 | #include <linux/clocksource.h> | ||
5 | #include <linux/init.h> | ||
6 | #include <linux/io.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/timex.h> | ||
9 | |||
10 | #include <asm/i8253.h> | ||
11 | |||
12 | /* | ||
13 | * Since the PIT overflows every tick, its not very useful | ||
14 | * to just read by itself. So use jiffies to emulate a free | ||
15 | * running counter: | ||
16 | */ | ||
17 | static cycle_t i8253_read(struct clocksource *cs) | ||
18 | { | ||
19 | static int old_count; | ||
20 | static u32 old_jifs; | ||
21 | unsigned long flags; | ||
22 | int count; | ||
23 | u32 jifs; | ||
24 | |||
25 | raw_spin_lock_irqsave(&i8253_lock, flags); | ||
26 | /* | ||
27 | * Although our caller may have the read side of xtime_lock, | ||
28 | * this is now a seqlock, and we are cheating in this routine | ||
29 | * by having side effects on state that we cannot undo if | ||
30 | * there is a collision on the seqlock and our caller has to | ||
31 | * retry. (Namely, old_jifs and old_count.) So we must treat | ||
32 | * jiffies as volatile despite the lock. We read jiffies | ||
33 | * before latching the timer count to guarantee that although | ||
34 | * the jiffies value might be older than the count (that is, | ||
35 | * the counter may underflow between the last point where | ||
36 | * jiffies was incremented and the point where we latch the | ||
37 | * count), it cannot be newer. | ||
38 | */ | ||
39 | jifs = jiffies; | ||
40 | outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ | ||
41 | count = inb_pit(PIT_CH0); /* read the latched count */ | ||
42 | count |= inb_pit(PIT_CH0) << 8; | ||
43 | |||
44 | /* VIA686a test code... reset the latch if count > max + 1 */ | ||
45 | if (count > LATCH) { | ||
46 | outb_pit(0x34, PIT_MODE); | ||
47 | outb_pit(PIT_LATCH & 0xff, PIT_CH0); | ||
48 | outb_pit(PIT_LATCH >> 8, PIT_CH0); | ||
49 | count = PIT_LATCH - 1; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * It's possible for count to appear to go the wrong way for a | ||
54 | * couple of reasons: | ||
55 | * | ||
56 | * 1. The timer counter underflows, but we haven't handled the | ||
57 | * resulting interrupt and incremented jiffies yet. | ||
58 | * 2. Hardware problem with the timer, not giving us continuous time, | ||
59 | * the counter does small "jumps" upwards on some Pentium systems, | ||
60 | * (see c't 95/10 page 335 for Neptun bug.) | ||
61 | * | ||
62 | * Previous attempts to handle these cases intelligently were | ||
63 | * buggy, so we just do the simple thing now. | ||
64 | */ | ||
65 | if (count > old_count && jifs == old_jifs) | ||
66 | count = old_count; | ||
67 | |||
68 | old_count = count; | ||
69 | old_jifs = jifs; | ||
70 | |||
71 | raw_spin_unlock_irqrestore(&i8253_lock, flags); | ||
72 | |||
73 | count = (PIT_LATCH - 1) - count; | ||
74 | |||
75 | return (cycle_t)(jifs * PIT_LATCH) + count; | ||
76 | } | ||
77 | |||
78 | static struct clocksource i8253_cs = { | ||
79 | .name = "pit", | ||
80 | .rating = 110, | ||
81 | .read = i8253_read, | ||
82 | .mask = CLOCKSOURCE_MASK(32), | ||
83 | }; | ||
84 | |||
85 | int __init clocksource_i8253_init(void) | ||
86 | { | ||
87 | return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE); | ||
88 | } | ||
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index ca8ee8093d6c..9fb84853d8e3 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -1,3 +1,5 @@ | |||
1 | menu "CPU Frequency scaling" | ||
2 | |||
1 | config CPU_FREQ | 3 | config CPU_FREQ |
2 | bool "CPU Frequency scaling" | 4 | bool "CPU Frequency scaling" |
3 | help | 5 | help |
@@ -18,19 +20,6 @@ if CPU_FREQ | |||
18 | config CPU_FREQ_TABLE | 20 | config CPU_FREQ_TABLE |
19 | tristate | 21 | tristate |
20 | 22 | ||
21 | config CPU_FREQ_DEBUG | ||
22 | bool "Enable CPUfreq debugging" | ||
23 | help | ||
24 | Say Y here to enable CPUfreq subsystem (including drivers) | ||
25 | debugging. You will need to activate it via the kernel | ||
26 | command line by passing | ||
27 | cpufreq.debug=<value> | ||
28 | |||
29 | To get <value>, add | ||
30 | 1 to activate CPUfreq core debugging, | ||
31 | 2 to activate CPUfreq drivers debugging, and | ||
32 | 4 to activate CPUfreq governor debugging | ||
33 | |||
34 | config CPU_FREQ_STAT | 23 | config CPU_FREQ_STAT |
35 | tristate "CPU frequency translation statistics" | 24 | tristate "CPU frequency translation statistics" |
36 | select CPU_FREQ_TABLE | 25 | select CPU_FREQ_TABLE |
@@ -190,4 +179,10 @@ config CPU_FREQ_GOV_CONSERVATIVE | |||
190 | 179 | ||
191 | If in doubt, say N. | 180 | If in doubt, say N. |
192 | 181 | ||
193 | endif # CPU_FREQ | 182 | menu "x86 CPU frequency scaling drivers" |
183 | depends on X86 | ||
184 | source "drivers/cpufreq/Kconfig.x86" | ||
185 | endmenu | ||
186 | |||
187 | endif | ||
188 | endmenu | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/drivers/cpufreq/Kconfig.x86 index 870e6cc6ad28..78ff7ee48951 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig.x86 | |||
@@ -1,15 +1,7 @@ | |||
1 | # | 1 | # |
2 | # CPU Frequency scaling | 2 | # x86 CPU Frequency scaling drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | menu "CPU Frequency scaling" | ||
6 | |||
7 | source "drivers/cpufreq/Kconfig" | ||
8 | |||
9 | if CPU_FREQ | ||
10 | |||
11 | comment "CPUFreq processor drivers" | ||
12 | |||
13 | config X86_PCC_CPUFREQ | 5 | config X86_PCC_CPUFREQ |
14 | tristate "Processor Clocking Control interface driver" | 6 | tristate "Processor Clocking Control interface driver" |
15 | depends on ACPI && ACPI_PROCESSOR | 7 | depends on ACPI && ACPI_PROCESSOR |
@@ -43,7 +35,7 @@ config X86_ACPI_CPUFREQ | |||
43 | config ELAN_CPUFREQ | 35 | config ELAN_CPUFREQ |
44 | tristate "AMD Elan SC400 and SC410" | 36 | tristate "AMD Elan SC400 and SC410" |
45 | select CPU_FREQ_TABLE | 37 | select CPU_FREQ_TABLE |
46 | depends on X86_ELAN | 38 | depends on MELAN |
47 | ---help--- | 39 | ---help--- |
48 | This adds the CPUFreq driver for AMD Elan SC400 and SC410 | 40 | This adds the CPUFreq driver for AMD Elan SC400 and SC410 |
49 | processors. | 41 | processors. |
@@ -59,7 +51,7 @@ config ELAN_CPUFREQ | |||
59 | config SC520_CPUFREQ | 51 | config SC520_CPUFREQ |
60 | tristate "AMD Elan SC520" | 52 | tristate "AMD Elan SC520" |
61 | select CPU_FREQ_TABLE | 53 | select CPU_FREQ_TABLE |
62 | depends on X86_ELAN | 54 | depends on MELAN |
63 | ---help--- | 55 | ---help--- |
64 | This adds the CPUFreq driver for AMD Elan SC520 processor. | 56 | This adds the CPUFreq driver for AMD Elan SC520 processor. |
65 | 57 | ||
@@ -261,6 +253,3 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK | |||
261 | option lets the probing code bypass some of those checks if the | 253 | option lets the probing code bypass some of those checks if the |
262 | parameter "relaxed_check=1" is passed to the module. | 254 | parameter "relaxed_check=1" is passed to the module. |
263 | 255 | ||
264 | endif # CPU_FREQ | ||
265 | |||
266 | endmenu | ||
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 71fc3b4173f1..c7f1a6f16b6e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -13,3 +13,29 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o | |||
13 | # CPUfreq cross-arch helpers | 13 | # CPUfreq cross-arch helpers |
14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o | 14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o |
15 | 15 | ||
16 | ##################################################################################d | ||
17 | # x86 drivers. | ||
18 | # Link order matters. K8 is preferred to ACPI because of firmware bugs in early | ||
19 | # K8 systems. ACPI is preferred to all other hardware-specific drivers. | ||
20 | # speedstep-* is preferred over p4-clockmod. | ||
21 | |||
22 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o | ||
23 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o | ||
24 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o | ||
25 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | ||
26 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | ||
27 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o | ||
28 | obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o | ||
29 | obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o | ||
30 | obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o | ||
31 | obj-$(CONFIG_X86_LONGRUN) += longrun.o | ||
32 | obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o | ||
33 | obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o | ||
34 | obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o | ||
35 | obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o | ||
36 | obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o | ||
37 | obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o | ||
38 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o | ||
39 | |||
40 | ##################################################################################d | ||
41 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index a2baafb2fe6d..4e04e1274388 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -47,9 +47,6 @@ | |||
47 | #include <asm/cpufeature.h> | 47 | #include <asm/cpufeature.h> |
48 | #include "mperf.h" | 48 | #include "mperf.h" |
49 | 49 | ||
50 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
51 | "acpi-cpufreq", msg) | ||
52 | |||
53 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); | 50 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); |
54 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | 51 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); |
55 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
@@ -233,7 +230,7 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
233 | cmd.mask = mask; | 230 | cmd.mask = mask; |
234 | drv_read(&cmd); | 231 | drv_read(&cmd); |
235 | 232 | ||
236 | dprintk("get_cur_val = %u\n", cmd.val); | 233 | pr_debug("get_cur_val = %u\n", cmd.val); |
237 | 234 | ||
238 | return cmd.val; | 235 | return cmd.val; |
239 | } | 236 | } |
@@ -244,7 +241,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
244 | unsigned int freq; | 241 | unsigned int freq; |
245 | unsigned int cached_freq; | 242 | unsigned int cached_freq; |
246 | 243 | ||
247 | dprintk("get_cur_freq_on_cpu (%d)\n", cpu); | 244 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); |
248 | 245 | ||
249 | if (unlikely(data == NULL || | 246 | if (unlikely(data == NULL || |
250 | data->acpi_data == NULL || data->freq_table == NULL)) { | 247 | data->acpi_data == NULL || data->freq_table == NULL)) { |
@@ -261,7 +258,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
261 | data->resume = 1; | 258 | data->resume = 1; |
262 | } | 259 | } |
263 | 260 | ||
264 | dprintk("cur freq = %u\n", freq); | 261 | pr_debug("cur freq = %u\n", freq); |
265 | 262 | ||
266 | return freq; | 263 | return freq; |
267 | } | 264 | } |
@@ -293,7 +290,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
293 | unsigned int i; | 290 | unsigned int i; |
294 | int result = 0; | 291 | int result = 0; |
295 | 292 | ||
296 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | 293 | pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); |
297 | 294 | ||
298 | if (unlikely(data == NULL || | 295 | if (unlikely(data == NULL || |
299 | data->acpi_data == NULL || data->freq_table == NULL)) { | 296 | data->acpi_data == NULL || data->freq_table == NULL)) { |
@@ -313,11 +310,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
313 | next_perf_state = data->freq_table[next_state].index; | 310 | next_perf_state = data->freq_table[next_state].index; |
314 | if (perf->state == next_perf_state) { | 311 | if (perf->state == next_perf_state) { |
315 | if (unlikely(data->resume)) { | 312 | if (unlikely(data->resume)) { |
316 | dprintk("Called after resume, resetting to P%d\n", | 313 | pr_debug("Called after resume, resetting to P%d\n", |
317 | next_perf_state); | 314 | next_perf_state); |
318 | data->resume = 0; | 315 | data->resume = 0; |
319 | } else { | 316 | } else { |
320 | dprintk("Already at target state (P%d)\n", | 317 | pr_debug("Already at target state (P%d)\n", |
321 | next_perf_state); | 318 | next_perf_state); |
322 | goto out; | 319 | goto out; |
323 | } | 320 | } |
@@ -357,7 +354,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
357 | 354 | ||
358 | if (acpi_pstate_strict) { | 355 | if (acpi_pstate_strict) { |
359 | if (!check_freqs(cmd.mask, freqs.new, data)) { | 356 | if (!check_freqs(cmd.mask, freqs.new, data)) { |
360 | dprintk("acpi_cpufreq_target failed (%d)\n", | 357 | pr_debug("acpi_cpufreq_target failed (%d)\n", |
361 | policy->cpu); | 358 | policy->cpu); |
362 | result = -EAGAIN; | 359 | result = -EAGAIN; |
363 | goto out; | 360 | goto out; |
@@ -378,7 +375,7 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | |||
378 | { | 375 | { |
379 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 376 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
380 | 377 | ||
381 | dprintk("acpi_cpufreq_verify\n"); | 378 | pr_debug("acpi_cpufreq_verify\n"); |
382 | 379 | ||
383 | return cpufreq_frequency_table_verify(policy, data->freq_table); | 380 | return cpufreq_frequency_table_verify(policy, data->freq_table); |
384 | } | 381 | } |
@@ -433,11 +430,11 @@ static void free_acpi_perf_data(void) | |||
433 | static int __init acpi_cpufreq_early_init(void) | 430 | static int __init acpi_cpufreq_early_init(void) |
434 | { | 431 | { |
435 | unsigned int i; | 432 | unsigned int i; |
436 | dprintk("acpi_cpufreq_early_init\n"); | 433 | pr_debug("acpi_cpufreq_early_init\n"); |
437 | 434 | ||
438 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | 435 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); |
439 | if (!acpi_perf_data) { | 436 | if (!acpi_perf_data) { |
440 | dprintk("Memory allocation error for acpi_perf_data.\n"); | 437 | pr_debug("Memory allocation error for acpi_perf_data.\n"); |
441 | return -ENOMEM; | 438 | return -ENOMEM; |
442 | } | 439 | } |
443 | for_each_possible_cpu(i) { | 440 | for_each_possible_cpu(i) { |
@@ -519,7 +516,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
519 | static int blacklisted; | 516 | static int blacklisted; |
520 | #endif | 517 | #endif |
521 | 518 | ||
522 | dprintk("acpi_cpufreq_cpu_init\n"); | 519 | pr_debug("acpi_cpufreq_cpu_init\n"); |
523 | 520 | ||
524 | #ifdef CONFIG_SMP | 521 | #ifdef CONFIG_SMP |
525 | if (blacklisted) | 522 | if (blacklisted) |
@@ -566,7 +563,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
566 | 563 | ||
567 | /* capability check */ | 564 | /* capability check */ |
568 | if (perf->state_count <= 1) { | 565 | if (perf->state_count <= 1) { |
569 | dprintk("No P-States\n"); | 566 | pr_debug("No P-States\n"); |
570 | result = -ENODEV; | 567 | result = -ENODEV; |
571 | goto err_unreg; | 568 | goto err_unreg; |
572 | } | 569 | } |
@@ -578,11 +575,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
578 | 575 | ||
579 | switch (perf->control_register.space_id) { | 576 | switch (perf->control_register.space_id) { |
580 | case ACPI_ADR_SPACE_SYSTEM_IO: | 577 | case ACPI_ADR_SPACE_SYSTEM_IO: |
581 | dprintk("SYSTEM IO addr space\n"); | 578 | pr_debug("SYSTEM IO addr space\n"); |
582 | data->cpu_feature = SYSTEM_IO_CAPABLE; | 579 | data->cpu_feature = SYSTEM_IO_CAPABLE; |
583 | break; | 580 | break; |
584 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 581 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
585 | dprintk("HARDWARE addr space\n"); | 582 | pr_debug("HARDWARE addr space\n"); |
586 | if (!check_est_cpu(cpu)) { | 583 | if (!check_est_cpu(cpu)) { |
587 | result = -ENODEV; | 584 | result = -ENODEV; |
588 | goto err_unreg; | 585 | goto err_unreg; |
@@ -590,7 +587,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
590 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; | 587 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; |
591 | break; | 588 | break; |
592 | default: | 589 | default: |
593 | dprintk("Unknown addr space %d\n", | 590 | pr_debug("Unknown addr space %d\n", |
594 | (u32) (perf->control_register.space_id)); | 591 | (u32) (perf->control_register.space_id)); |
595 | result = -ENODEV; | 592 | result = -ENODEV; |
596 | goto err_unreg; | 593 | goto err_unreg; |
@@ -661,9 +658,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
661 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) | 658 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) |
662 | acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; | 659 | acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; |
663 | 660 | ||
664 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); | 661 | pr_debug("CPU%u - ACPI performance management activated.\n", cpu); |
665 | for (i = 0; i < perf->state_count; i++) | 662 | for (i = 0; i < perf->state_count; i++) |
666 | dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", | 663 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", |
667 | (i == perf->state ? '*' : ' '), i, | 664 | (i == perf->state ? '*' : ' '), i, |
668 | (u32) perf->states[i].core_frequency, | 665 | (u32) perf->states[i].core_frequency, |
669 | (u32) perf->states[i].power, | 666 | (u32) perf->states[i].power, |
@@ -694,7 +691,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
694 | { | 691 | { |
695 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 692 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
696 | 693 | ||
697 | dprintk("acpi_cpufreq_cpu_exit\n"); | 694 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
698 | 695 | ||
699 | if (data) { | 696 | if (data) { |
700 | cpufreq_frequency_table_put_attr(policy->cpu); | 697 | cpufreq_frequency_table_put_attr(policy->cpu); |
@@ -712,7 +709,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | |||
712 | { | 709 | { |
713 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 710 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
714 | 711 | ||
715 | dprintk("acpi_cpufreq_resume\n"); | 712 | pr_debug("acpi_cpufreq_resume\n"); |
716 | 713 | ||
717 | data->resume = 1; | 714 | data->resume = 1; |
718 | 715 | ||
@@ -743,7 +740,7 @@ static int __init acpi_cpufreq_init(void) | |||
743 | if (acpi_disabled) | 740 | if (acpi_disabled) |
744 | return 0; | 741 | return 0; |
745 | 742 | ||
746 | dprintk("acpi_cpufreq_init\n"); | 743 | pr_debug("acpi_cpufreq_init\n"); |
747 | 744 | ||
748 | ret = acpi_cpufreq_early_init(); | 745 | ret = acpi_cpufreq_early_init(); |
749 | if (ret) | 746 | if (ret) |
@@ -758,7 +755,7 @@ static int __init acpi_cpufreq_init(void) | |||
758 | 755 | ||
759 | static void __exit acpi_cpufreq_exit(void) | 756 | static void __exit acpi_cpufreq_exit(void) |
760 | { | 757 | { |
761 | dprintk("acpi_cpufreq_exit\n"); | 758 | pr_debug("acpi_cpufreq_exit\n"); |
762 | 759 | ||
763 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 760 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
764 | 761 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index 141abebc4516..7bac808804f3 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c | |||
@@ -57,8 +57,6 @@ MODULE_PARM_DESC(min_fsb, | |||
57 | "Minimum FSB to use, if not defined: current FSB - 50"); | 57 | "Minimum FSB to use, if not defined: current FSB - 50"); |
58 | 58 | ||
59 | #define PFX "cpufreq-nforce2: " | 59 | #define PFX "cpufreq-nforce2: " |
60 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
61 | "cpufreq-nforce2", msg) | ||
62 | 60 | ||
63 | /** | 61 | /** |
64 | * nforce2_calc_fsb - calculate FSB | 62 | * nforce2_calc_fsb - calculate FSB |
@@ -270,7 +268,7 @@ static int nforce2_target(struct cpufreq_policy *policy, | |||
270 | if (freqs.old == freqs.new) | 268 | if (freqs.old == freqs.new) |
271 | return 0; | 269 | return 0; |
272 | 270 | ||
273 | dprintk("Old CPU frequency %d kHz, new %d kHz\n", | 271 | pr_debug("Old CPU frequency %d kHz, new %d kHz\n", |
274 | freqs.old, freqs.new); | 272 | freqs.old, freqs.new); |
275 | 273 | ||
276 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 274 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
@@ -282,7 +280,7 @@ static int nforce2_target(struct cpufreq_policy *policy, | |||
282 | printk(KERN_ERR PFX "Changing FSB to %d failed\n", | 280 | printk(KERN_ERR PFX "Changing FSB to %d failed\n", |
283 | target_fsb); | 281 | target_fsb); |
284 | else | 282 | else |
285 | dprintk("Changed FSB successfully to %d\n", | 283 | pr_debug("Changed FSB successfully to %d\n", |
286 | target_fsb); | 284 | target_fsb); |
287 | 285 | ||
288 | /* Enable IRQs */ | 286 | /* Enable IRQs */ |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2dafc5c38ae7..0a5bea9e3585 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -32,9 +32,6 @@ | |||
32 | 32 | ||
33 | #include <trace/events/power.h> | 33 | #include <trace/events/power.h> |
34 | 34 | ||
35 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ | ||
36 | "cpufreq-core", msg) | ||
37 | |||
38 | /** | 35 | /** |
39 | * The "cpufreq driver" - the arch- or hardware-dependent low | 36 | * The "cpufreq driver" - the arch- or hardware-dependent low |
40 | * level driver of CPUFreq support, and its spinlock. This lock | 37 | * level driver of CPUFreq support, and its spinlock. This lock |
@@ -181,93 +178,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | |||
181 | 178 | ||
182 | 179 | ||
183 | /********************************************************************* | 180 | /********************************************************************* |
184 | * UNIFIED DEBUG HELPERS * | ||
185 | *********************************************************************/ | ||
186 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
187 | |||
188 | /* what part(s) of the CPUfreq subsystem are debugged? */ | ||
189 | static unsigned int debug; | ||
190 | |||
191 | /* is the debug output ratelimit'ed using printk_ratelimit? User can | ||
192 | * set or modify this value. | ||
193 | */ | ||
194 | static unsigned int debug_ratelimit = 1; | ||
195 | |||
196 | /* is the printk_ratelimit'ing enabled? It's enabled after a successful | ||
197 | * loading of a cpufreq driver, temporarily disabled when a new policy | ||
198 | * is set, and disabled upon cpufreq driver removal | ||
199 | */ | ||
200 | static unsigned int disable_ratelimit = 1; | ||
201 | static DEFINE_SPINLOCK(disable_ratelimit_lock); | ||
202 | |||
203 | static void cpufreq_debug_enable_ratelimit(void) | ||
204 | { | ||
205 | unsigned long flags; | ||
206 | |||
207 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
208 | if (disable_ratelimit) | ||
209 | disable_ratelimit--; | ||
210 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
211 | } | ||
212 | |||
213 | static void cpufreq_debug_disable_ratelimit(void) | ||
214 | { | ||
215 | unsigned long flags; | ||
216 | |||
217 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
218 | disable_ratelimit++; | ||
219 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
220 | } | ||
221 | |||
222 | void cpufreq_debug_printk(unsigned int type, const char *prefix, | ||
223 | const char *fmt, ...) | ||
224 | { | ||
225 | char s[256]; | ||
226 | va_list args; | ||
227 | unsigned int len; | ||
228 | unsigned long flags; | ||
229 | |||
230 | WARN_ON(!prefix); | ||
231 | if (type & debug) { | ||
232 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
233 | if (!disable_ratelimit && debug_ratelimit | ||
234 | && !printk_ratelimit()) { | ||
235 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
236 | return; | ||
237 | } | ||
238 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
239 | |||
240 | len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); | ||
241 | |||
242 | va_start(args, fmt); | ||
243 | len += vsnprintf(&s[len], (256 - len), fmt, args); | ||
244 | va_end(args); | ||
245 | |||
246 | printk(s); | ||
247 | |||
248 | WARN_ON(len < 5); | ||
249 | } | ||
250 | } | ||
251 | EXPORT_SYMBOL(cpufreq_debug_printk); | ||
252 | |||
253 | |||
254 | module_param(debug, uint, 0644); | ||
255 | MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core," | ||
256 | " 2 to debug drivers, and 4 to debug governors."); | ||
257 | |||
258 | module_param(debug_ratelimit, uint, 0644); | ||
259 | MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:" | ||
260 | " set to 0 to disable ratelimiting."); | ||
261 | |||
262 | #else /* !CONFIG_CPU_FREQ_DEBUG */ | ||
263 | |||
264 | static inline void cpufreq_debug_enable_ratelimit(void) { return; } | ||
265 | static inline void cpufreq_debug_disable_ratelimit(void) { return; } | ||
266 | |||
267 | #endif /* CONFIG_CPU_FREQ_DEBUG */ | ||
268 | |||
269 | |||
270 | /********************************************************************* | ||
271 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * | 181 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * |
272 | *********************************************************************/ | 182 | *********************************************************************/ |
273 | 183 | ||
@@ -291,7 +201,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
291 | if (!l_p_j_ref_freq) { | 201 | if (!l_p_j_ref_freq) { |
292 | l_p_j_ref = loops_per_jiffy; | 202 | l_p_j_ref = loops_per_jiffy; |
293 | l_p_j_ref_freq = ci->old; | 203 | l_p_j_ref_freq = ci->old; |
294 | dprintk("saving %lu as reference value for loops_per_jiffy; " | 204 | pr_debug("saving %lu as reference value for loops_per_jiffy; " |
295 | "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); | 205 | "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); |
296 | } | 206 | } |
297 | if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || | 207 | if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || |
@@ -299,7 +209,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
299 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { | 209 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { |
300 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, | 210 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, |
301 | ci->new); | 211 | ci->new); |
302 | dprintk("scaling loops_per_jiffy to %lu " | 212 | pr_debug("scaling loops_per_jiffy to %lu " |
303 | "for frequency %u kHz\n", loops_per_jiffy, ci->new); | 213 | "for frequency %u kHz\n", loops_per_jiffy, ci->new); |
304 | } | 214 | } |
305 | } | 215 | } |
@@ -326,7 +236,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | |||
326 | BUG_ON(irqs_disabled()); | 236 | BUG_ON(irqs_disabled()); |
327 | 237 | ||
328 | freqs->flags = cpufreq_driver->flags; | 238 | freqs->flags = cpufreq_driver->flags; |
329 | dprintk("notification %u of frequency transition to %u kHz\n", | 239 | pr_debug("notification %u of frequency transition to %u kHz\n", |
330 | state, freqs->new); | 240 | state, freqs->new); |
331 | 241 | ||
332 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); | 242 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); |
@@ -340,7 +250,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | |||
340 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | 250 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { |
341 | if ((policy) && (policy->cpu == freqs->cpu) && | 251 | if ((policy) && (policy->cpu == freqs->cpu) && |
342 | (policy->cur) && (policy->cur != freqs->old)) { | 252 | (policy->cur) && (policy->cur != freqs->old)) { |
343 | dprintk("Warning: CPU frequency is" | 253 | pr_debug("Warning: CPU frequency is" |
344 | " %u, cpufreq assumed %u kHz.\n", | 254 | " %u, cpufreq assumed %u kHz.\n", |
345 | freqs->old, policy->cur); | 255 | freqs->old, policy->cur); |
346 | freqs->old = policy->cur; | 256 | freqs->old = policy->cur; |
@@ -353,7 +263,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | |||
353 | 263 | ||
354 | case CPUFREQ_POSTCHANGE: | 264 | case CPUFREQ_POSTCHANGE: |
355 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); | 265 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); |
356 | dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, | 266 | pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, |
357 | (unsigned long)freqs->cpu); | 267 | (unsigned long)freqs->cpu); |
358 | trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); | 268 | trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); |
359 | trace_cpu_frequency(freqs->new, freqs->cpu); | 269 | trace_cpu_frequency(freqs->new, freqs->cpu); |
@@ -411,21 +321,14 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | |||
411 | t = __find_governor(str_governor); | 321 | t = __find_governor(str_governor); |
412 | 322 | ||
413 | if (t == NULL) { | 323 | if (t == NULL) { |
414 | char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", | 324 | int ret; |
415 | str_governor); | ||
416 | |||
417 | if (name) { | ||
418 | int ret; | ||
419 | 325 | ||
420 | mutex_unlock(&cpufreq_governor_mutex); | 326 | mutex_unlock(&cpufreq_governor_mutex); |
421 | ret = request_module("%s", name); | 327 | ret = request_module("cpufreq_%s", str_governor); |
422 | mutex_lock(&cpufreq_governor_mutex); | 328 | mutex_lock(&cpufreq_governor_mutex); |
423 | 329 | ||
424 | if (ret == 0) | 330 | if (ret == 0) |
425 | t = __find_governor(str_governor); | 331 | t = __find_governor(str_governor); |
426 | } | ||
427 | |||
428 | kfree(name); | ||
429 | } | 332 | } |
430 | 333 | ||
431 | if (t != NULL) { | 334 | if (t != NULL) { |
@@ -753,7 +656,7 @@ no_policy: | |||
753 | static void cpufreq_sysfs_release(struct kobject *kobj) | 656 | static void cpufreq_sysfs_release(struct kobject *kobj) |
754 | { | 657 | { |
755 | struct cpufreq_policy *policy = to_policy(kobj); | 658 | struct cpufreq_policy *policy = to_policy(kobj); |
756 | dprintk("last reference is dropped\n"); | 659 | pr_debug("last reference is dropped\n"); |
757 | complete(&policy->kobj_unregister); | 660 | complete(&policy->kobj_unregister); |
758 | } | 661 | } |
759 | 662 | ||
@@ -788,7 +691,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, | |||
788 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); | 691 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); |
789 | if (gov) { | 692 | if (gov) { |
790 | policy->governor = gov; | 693 | policy->governor = gov; |
791 | dprintk("Restoring governor %s for cpu %d\n", | 694 | pr_debug("Restoring governor %s for cpu %d\n", |
792 | policy->governor->name, cpu); | 695 | policy->governor->name, cpu); |
793 | } | 696 | } |
794 | #endif | 697 | #endif |
@@ -824,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, | |||
824 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; | 727 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; |
825 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 728 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
826 | 729 | ||
827 | dprintk("CPU already managed, adding link\n"); | 730 | pr_debug("CPU already managed, adding link\n"); |
828 | ret = sysfs_create_link(&sys_dev->kobj, | 731 | ret = sysfs_create_link(&sys_dev->kobj, |
829 | &managed_policy->kobj, | 732 | &managed_policy->kobj, |
830 | "cpufreq"); | 733 | "cpufreq"); |
@@ -865,7 +768,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, | |||
865 | if (!cpu_online(j)) | 768 | if (!cpu_online(j)) |
866 | continue; | 769 | continue; |
867 | 770 | ||
868 | dprintk("CPU %u already managed, adding link\n", j); | 771 | pr_debug("CPU %u already managed, adding link\n", j); |
869 | managed_policy = cpufreq_cpu_get(cpu); | 772 | managed_policy = cpufreq_cpu_get(cpu); |
870 | cpu_sys_dev = get_cpu_sysdev(j); | 773 | cpu_sys_dev = get_cpu_sysdev(j); |
871 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 774 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
@@ -941,7 +844,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, | |||
941 | policy->user_policy.governor = policy->governor; | 844 | policy->user_policy.governor = policy->governor; |
942 | 845 | ||
943 | if (ret) { | 846 | if (ret) { |
944 | dprintk("setting policy failed\n"); | 847 | pr_debug("setting policy failed\n"); |
945 | if (cpufreq_driver->exit) | 848 | if (cpufreq_driver->exit) |
946 | cpufreq_driver->exit(policy); | 849 | cpufreq_driver->exit(policy); |
947 | } | 850 | } |
@@ -977,8 +880,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
977 | if (cpu_is_offline(cpu)) | 880 | if (cpu_is_offline(cpu)) |
978 | return 0; | 881 | return 0; |
979 | 882 | ||
980 | cpufreq_debug_disable_ratelimit(); | 883 | pr_debug("adding CPU %u\n", cpu); |
981 | dprintk("adding CPU %u\n", cpu); | ||
982 | 884 | ||
983 | #ifdef CONFIG_SMP | 885 | #ifdef CONFIG_SMP |
984 | /* check whether a different CPU already registered this | 886 | /* check whether a different CPU already registered this |
@@ -986,7 +888,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
986 | policy = cpufreq_cpu_get(cpu); | 888 | policy = cpufreq_cpu_get(cpu); |
987 | if (unlikely(policy)) { | 889 | if (unlikely(policy)) { |
988 | cpufreq_cpu_put(policy); | 890 | cpufreq_cpu_put(policy); |
989 | cpufreq_debug_enable_ratelimit(); | ||
990 | return 0; | 891 | return 0; |
991 | } | 892 | } |
992 | #endif | 893 | #endif |
@@ -1037,7 +938,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
1037 | */ | 938 | */ |
1038 | ret = cpufreq_driver->init(policy); | 939 | ret = cpufreq_driver->init(policy); |
1039 | if (ret) { | 940 | if (ret) { |
1040 | dprintk("initialization failed\n"); | 941 | pr_debug("initialization failed\n"); |
1041 | goto err_unlock_policy; | 942 | goto err_unlock_policy; |
1042 | } | 943 | } |
1043 | policy->user_policy.min = policy->min; | 944 | policy->user_policy.min = policy->min; |
@@ -1063,8 +964,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
1063 | 964 | ||
1064 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 965 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
1065 | module_put(cpufreq_driver->owner); | 966 | module_put(cpufreq_driver->owner); |
1066 | dprintk("initialization complete\n"); | 967 | pr_debug("initialization complete\n"); |
1067 | cpufreq_debug_enable_ratelimit(); | ||
1068 | 968 | ||
1069 | return 0; | 969 | return 0; |
1070 | 970 | ||
@@ -1088,7 +988,6 @@ err_free_policy: | |||
1088 | nomem_out: | 988 | nomem_out: |
1089 | module_put(cpufreq_driver->owner); | 989 | module_put(cpufreq_driver->owner); |
1090 | module_out: | 990 | module_out: |
1091 | cpufreq_debug_enable_ratelimit(); | ||
1092 | return ret; | 991 | return ret; |
1093 | } | 992 | } |
1094 | 993 | ||
@@ -1112,15 +1011,13 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1112 | unsigned int j; | 1011 | unsigned int j; |
1113 | #endif | 1012 | #endif |
1114 | 1013 | ||
1115 | cpufreq_debug_disable_ratelimit(); | 1014 | pr_debug("unregistering CPU %u\n", cpu); |
1116 | dprintk("unregistering CPU %u\n", cpu); | ||
1117 | 1015 | ||
1118 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 1016 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1119 | data = per_cpu(cpufreq_cpu_data, cpu); | 1017 | data = per_cpu(cpufreq_cpu_data, cpu); |
1120 | 1018 | ||
1121 | if (!data) { | 1019 | if (!data) { |
1122 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1020 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1123 | cpufreq_debug_enable_ratelimit(); | ||
1124 | unlock_policy_rwsem_write(cpu); | 1021 | unlock_policy_rwsem_write(cpu); |
1125 | return -EINVAL; | 1022 | return -EINVAL; |
1126 | } | 1023 | } |
@@ -1132,12 +1029,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1132 | * only need to unlink, put and exit | 1029 | * only need to unlink, put and exit |
1133 | */ | 1030 | */ |
1134 | if (unlikely(cpu != data->cpu)) { | 1031 | if (unlikely(cpu != data->cpu)) { |
1135 | dprintk("removing link\n"); | 1032 | pr_debug("removing link\n"); |
1136 | cpumask_clear_cpu(cpu, data->cpus); | 1033 | cpumask_clear_cpu(cpu, data->cpus); |
1137 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1034 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1138 | kobj = &sys_dev->kobj; | 1035 | kobj = &sys_dev->kobj; |
1139 | cpufreq_cpu_put(data); | 1036 | cpufreq_cpu_put(data); |
1140 | cpufreq_debug_enable_ratelimit(); | ||
1141 | unlock_policy_rwsem_write(cpu); | 1037 | unlock_policy_rwsem_write(cpu); |
1142 | sysfs_remove_link(kobj, "cpufreq"); | 1038 | sysfs_remove_link(kobj, "cpufreq"); |
1143 | return 0; | 1039 | return 0; |
@@ -1170,7 +1066,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1170 | for_each_cpu(j, data->cpus) { | 1066 | for_each_cpu(j, data->cpus) { |
1171 | if (j == cpu) | 1067 | if (j == cpu) |
1172 | continue; | 1068 | continue; |
1173 | dprintk("removing link for cpu %u\n", j); | 1069 | pr_debug("removing link for cpu %u\n", j); |
1174 | #ifdef CONFIG_HOTPLUG_CPU | 1070 | #ifdef CONFIG_HOTPLUG_CPU |
1175 | strncpy(per_cpu(cpufreq_cpu_governor, j), | 1071 | strncpy(per_cpu(cpufreq_cpu_governor, j), |
1176 | data->governor->name, CPUFREQ_NAME_LEN); | 1072 | data->governor->name, CPUFREQ_NAME_LEN); |
@@ -1199,21 +1095,35 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1199 | * not referenced anymore by anybody before we proceed with | 1095 | * not referenced anymore by anybody before we proceed with |
1200 | * unloading. | 1096 | * unloading. |
1201 | */ | 1097 | */ |
1202 | dprintk("waiting for dropping of refcount\n"); | 1098 | pr_debug("waiting for dropping of refcount\n"); |
1203 | wait_for_completion(cmp); | 1099 | wait_for_completion(cmp); |
1204 | dprintk("wait complete\n"); | 1100 | pr_debug("wait complete\n"); |
1205 | 1101 | ||
1206 | lock_policy_rwsem_write(cpu); | 1102 | lock_policy_rwsem_write(cpu); |
1207 | if (cpufreq_driver->exit) | 1103 | if (cpufreq_driver->exit) |
1208 | cpufreq_driver->exit(data); | 1104 | cpufreq_driver->exit(data); |
1209 | unlock_policy_rwsem_write(cpu); | 1105 | unlock_policy_rwsem_write(cpu); |
1210 | 1106 | ||
1107 | #ifdef CONFIG_HOTPLUG_CPU | ||
1108 | /* when the CPU which is the parent of the kobj is hotplugged | ||
1109 | * offline, check for siblings, and create cpufreq sysfs interface | ||
1110 | * and symlinks | ||
1111 | */ | ||
1112 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1113 | /* first sibling now owns the new sysfs dir */ | ||
1114 | cpumask_clear_cpu(cpu, data->cpus); | ||
1115 | cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); | ||
1116 | |||
1117 | /* finally remove our own symlink */ | ||
1118 | lock_policy_rwsem_write(cpu); | ||
1119 | __cpufreq_remove_dev(sys_dev); | ||
1120 | } | ||
1121 | #endif | ||
1122 | |||
1211 | free_cpumask_var(data->related_cpus); | 1123 | free_cpumask_var(data->related_cpus); |
1212 | free_cpumask_var(data->cpus); | 1124 | free_cpumask_var(data->cpus); |
1213 | kfree(data); | 1125 | kfree(data); |
1214 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1215 | 1126 | ||
1216 | cpufreq_debug_enable_ratelimit(); | ||
1217 | return 0; | 1127 | return 0; |
1218 | } | 1128 | } |
1219 | 1129 | ||
@@ -1239,7 +1149,7 @@ static void handle_update(struct work_struct *work) | |||
1239 | struct cpufreq_policy *policy = | 1149 | struct cpufreq_policy *policy = |
1240 | container_of(work, struct cpufreq_policy, update); | 1150 | container_of(work, struct cpufreq_policy, update); |
1241 | unsigned int cpu = policy->cpu; | 1151 | unsigned int cpu = policy->cpu; |
1242 | dprintk("handle_update for cpu %u called\n", cpu); | 1152 | pr_debug("handle_update for cpu %u called\n", cpu); |
1243 | cpufreq_update_policy(cpu); | 1153 | cpufreq_update_policy(cpu); |
1244 | } | 1154 | } |
1245 | 1155 | ||
@@ -1257,7 +1167,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | |||
1257 | { | 1167 | { |
1258 | struct cpufreq_freqs freqs; | 1168 | struct cpufreq_freqs freqs; |
1259 | 1169 | ||
1260 | dprintk("Warning: CPU frequency out of sync: cpufreq and timing " | 1170 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " |
1261 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); | 1171 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); |
1262 | 1172 | ||
1263 | freqs.cpu = cpu; | 1173 | freqs.cpu = cpu; |
@@ -1360,7 +1270,7 @@ static int cpufreq_bp_suspend(void) | |||
1360 | int cpu = smp_processor_id(); | 1270 | int cpu = smp_processor_id(); |
1361 | struct cpufreq_policy *cpu_policy; | 1271 | struct cpufreq_policy *cpu_policy; |
1362 | 1272 | ||
1363 | dprintk("suspending cpu %u\n", cpu); | 1273 | pr_debug("suspending cpu %u\n", cpu); |
1364 | 1274 | ||
1365 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1275 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1366 | cpu_policy = cpufreq_cpu_get(cpu); | 1276 | cpu_policy = cpufreq_cpu_get(cpu); |
@@ -1398,7 +1308,7 @@ static void cpufreq_bp_resume(void) | |||
1398 | int cpu = smp_processor_id(); | 1308 | int cpu = smp_processor_id(); |
1399 | struct cpufreq_policy *cpu_policy; | 1309 | struct cpufreq_policy *cpu_policy; |
1400 | 1310 | ||
1401 | dprintk("resuming cpu %u\n", cpu); | 1311 | pr_debug("resuming cpu %u\n", cpu); |
1402 | 1312 | ||
1403 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1313 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1404 | cpu_policy = cpufreq_cpu_get(cpu); | 1314 | cpu_policy = cpufreq_cpu_get(cpu); |
@@ -1510,7 +1420,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1510 | { | 1420 | { |
1511 | int retval = -EINVAL; | 1421 | int retval = -EINVAL; |
1512 | 1422 | ||
1513 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | 1423 | pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, |
1514 | target_freq, relation); | 1424 | target_freq, relation); |
1515 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1425 | if (cpu_online(policy->cpu) && cpufreq_driver->target) |
1516 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1426 | retval = cpufreq_driver->target(policy, target_freq, relation); |
@@ -1596,7 +1506,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1596 | if (!try_module_get(policy->governor->owner)) | 1506 | if (!try_module_get(policy->governor->owner)) |
1597 | return -EINVAL; | 1507 | return -EINVAL; |
1598 | 1508 | ||
1599 | dprintk("__cpufreq_governor for CPU %u, event %u\n", | 1509 | pr_debug("__cpufreq_governor for CPU %u, event %u\n", |
1600 | policy->cpu, event); | 1510 | policy->cpu, event); |
1601 | ret = policy->governor->governor(policy, event); | 1511 | ret = policy->governor->governor(policy, event); |
1602 | 1512 | ||
@@ -1697,8 +1607,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1697 | { | 1607 | { |
1698 | int ret = 0; | 1608 | int ret = 0; |
1699 | 1609 | ||
1700 | cpufreq_debug_disable_ratelimit(); | 1610 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, |
1701 | dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, | ||
1702 | policy->min, policy->max); | 1611 | policy->min, policy->max); |
1703 | 1612 | ||
1704 | memcpy(&policy->cpuinfo, &data->cpuinfo, | 1613 | memcpy(&policy->cpuinfo, &data->cpuinfo, |
@@ -1735,19 +1644,19 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1735 | data->min = policy->min; | 1644 | data->min = policy->min; |
1736 | data->max = policy->max; | 1645 | data->max = policy->max; |
1737 | 1646 | ||
1738 | dprintk("new min and max freqs are %u - %u kHz\n", | 1647 | pr_debug("new min and max freqs are %u - %u kHz\n", |
1739 | data->min, data->max); | 1648 | data->min, data->max); |
1740 | 1649 | ||
1741 | if (cpufreq_driver->setpolicy) { | 1650 | if (cpufreq_driver->setpolicy) { |
1742 | data->policy = policy->policy; | 1651 | data->policy = policy->policy; |
1743 | dprintk("setting range\n"); | 1652 | pr_debug("setting range\n"); |
1744 | ret = cpufreq_driver->setpolicy(policy); | 1653 | ret = cpufreq_driver->setpolicy(policy); |
1745 | } else { | 1654 | } else { |
1746 | if (policy->governor != data->governor) { | 1655 | if (policy->governor != data->governor) { |
1747 | /* save old, working values */ | 1656 | /* save old, working values */ |
1748 | struct cpufreq_governor *old_gov = data->governor; | 1657 | struct cpufreq_governor *old_gov = data->governor; |
1749 | 1658 | ||
1750 | dprintk("governor switch\n"); | 1659 | pr_debug("governor switch\n"); |
1751 | 1660 | ||
1752 | /* end old governor */ | 1661 | /* end old governor */ |
1753 | if (data->governor) | 1662 | if (data->governor) |
@@ -1757,7 +1666,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1757 | data->governor = policy->governor; | 1666 | data->governor = policy->governor; |
1758 | if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { | 1667 | if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { |
1759 | /* new governor failed, so re-start old one */ | 1668 | /* new governor failed, so re-start old one */ |
1760 | dprintk("starting governor %s failed\n", | 1669 | pr_debug("starting governor %s failed\n", |
1761 | data->governor->name); | 1670 | data->governor->name); |
1762 | if (old_gov) { | 1671 | if (old_gov) { |
1763 | data->governor = old_gov; | 1672 | data->governor = old_gov; |
@@ -1769,12 +1678,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1769 | } | 1678 | } |
1770 | /* might be a policy change, too, so fall through */ | 1679 | /* might be a policy change, too, so fall through */ |
1771 | } | 1680 | } |
1772 | dprintk("governor: change or update limits\n"); | 1681 | pr_debug("governor: change or update limits\n"); |
1773 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | 1682 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); |
1774 | } | 1683 | } |
1775 | 1684 | ||
1776 | error_out: | 1685 | error_out: |
1777 | cpufreq_debug_enable_ratelimit(); | ||
1778 | return ret; | 1686 | return ret; |
1779 | } | 1687 | } |
1780 | 1688 | ||
@@ -1801,7 +1709,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1801 | goto fail; | 1709 | goto fail; |
1802 | } | 1710 | } |
1803 | 1711 | ||
1804 | dprintk("updating policy for CPU %u\n", cpu); | 1712 | pr_debug("updating policy for CPU %u\n", cpu); |
1805 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1713 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
1806 | policy.min = data->user_policy.min; | 1714 | policy.min = data->user_policy.min; |
1807 | policy.max = data->user_policy.max; | 1715 | policy.max = data->user_policy.max; |
@@ -1813,7 +1721,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1813 | if (cpufreq_driver->get) { | 1721 | if (cpufreq_driver->get) { |
1814 | policy.cur = cpufreq_driver->get(cpu); | 1722 | policy.cur = cpufreq_driver->get(cpu); |
1815 | if (!data->cur) { | 1723 | if (!data->cur) { |
1816 | dprintk("Driver did not initialize current freq"); | 1724 | pr_debug("Driver did not initialize current freq"); |
1817 | data->cur = policy.cur; | 1725 | data->cur = policy.cur; |
1818 | } else { | 1726 | } else { |
1819 | if (data->cur != policy.cur) | 1727 | if (data->cur != policy.cur) |
@@ -1889,7 +1797,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1889 | ((!driver_data->setpolicy) && (!driver_data->target))) | 1797 | ((!driver_data->setpolicy) && (!driver_data->target))) |
1890 | return -EINVAL; | 1798 | return -EINVAL; |
1891 | 1799 | ||
1892 | dprintk("trying to register driver %s\n", driver_data->name); | 1800 | pr_debug("trying to register driver %s\n", driver_data->name); |
1893 | 1801 | ||
1894 | if (driver_data->setpolicy) | 1802 | if (driver_data->setpolicy) |
1895 | driver_data->flags |= CPUFREQ_CONST_LOOPS; | 1803 | driver_data->flags |= CPUFREQ_CONST_LOOPS; |
@@ -1920,15 +1828,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1920 | 1828 | ||
1921 | /* if all ->init() calls failed, unregister */ | 1829 | /* if all ->init() calls failed, unregister */ |
1922 | if (ret) { | 1830 | if (ret) { |
1923 | dprintk("no CPU initialized for driver %s\n", | 1831 | pr_debug("no CPU initialized for driver %s\n", |
1924 | driver_data->name); | 1832 | driver_data->name); |
1925 | goto err_sysdev_unreg; | 1833 | goto err_sysdev_unreg; |
1926 | } | 1834 | } |
1927 | } | 1835 | } |
1928 | 1836 | ||
1929 | register_hotcpu_notifier(&cpufreq_cpu_notifier); | 1837 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
1930 | dprintk("driver %s up and running\n", driver_data->name); | 1838 | pr_debug("driver %s up and running\n", driver_data->name); |
1931 | cpufreq_debug_enable_ratelimit(); | ||
1932 | 1839 | ||
1933 | return 0; | 1840 | return 0; |
1934 | err_sysdev_unreg: | 1841 | err_sysdev_unreg: |
@@ -1955,14 +1862,10 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
1955 | { | 1862 | { |
1956 | unsigned long flags; | 1863 | unsigned long flags; |
1957 | 1864 | ||
1958 | cpufreq_debug_disable_ratelimit(); | 1865 | if (!cpufreq_driver || (driver != cpufreq_driver)) |
1959 | |||
1960 | if (!cpufreq_driver || (driver != cpufreq_driver)) { | ||
1961 | cpufreq_debug_enable_ratelimit(); | ||
1962 | return -EINVAL; | 1866 | return -EINVAL; |
1963 | } | ||
1964 | 1867 | ||
1965 | dprintk("unregistering driver %s\n", driver->name); | 1868 | pr_debug("unregistering driver %s\n", driver->name); |
1966 | 1869 | ||
1967 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); | 1870 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); |
1968 | unregister_hotcpu_notifier(&cpufreq_cpu_notifier); | 1871 | unregister_hotcpu_notifier(&cpufreq_cpu_notifier); |
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index 7e2e515087f8..f13a8a9af6a1 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | 17 | ||
18 | #define dprintk(msg...) \ | ||
19 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) | ||
20 | |||
21 | 18 | ||
22 | static int cpufreq_governor_performance(struct cpufreq_policy *policy, | 19 | static int cpufreq_governor_performance(struct cpufreq_policy *policy, |
23 | unsigned int event) | 20 | unsigned int event) |
@@ -25,7 +22,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, | |||
25 | switch (event) { | 22 | switch (event) { |
26 | case CPUFREQ_GOV_START: | 23 | case CPUFREQ_GOV_START: |
27 | case CPUFREQ_GOV_LIMITS: | 24 | case CPUFREQ_GOV_LIMITS: |
28 | dprintk("setting to %u kHz because of event %u\n", | 25 | pr_debug("setting to %u kHz because of event %u\n", |
29 | policy->max, event); | 26 | policy->max, event); |
30 | __cpufreq_driver_target(policy, policy->max, | 27 | __cpufreq_driver_target(policy, policy->max, |
31 | CPUFREQ_RELATION_H); | 28 | CPUFREQ_RELATION_H); |
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index e6db5faf3eb1..4c2eb512f2bc 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c | |||
@@ -15,16 +15,13 @@ | |||
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | 17 | ||
18 | #define dprintk(msg...) \ | ||
19 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) | ||
20 | |||
21 | static int cpufreq_governor_powersave(struct cpufreq_policy *policy, | 18 | static int cpufreq_governor_powersave(struct cpufreq_policy *policy, |
22 | unsigned int event) | 19 | unsigned int event) |
23 | { | 20 | { |
24 | switch (event) { | 21 | switch (event) { |
25 | case CPUFREQ_GOV_START: | 22 | case CPUFREQ_GOV_START: |
26 | case CPUFREQ_GOV_LIMITS: | 23 | case CPUFREQ_GOV_LIMITS: |
27 | dprintk("setting to %u kHz because of event %u\n", | 24 | pr_debug("setting to %u kHz because of event %u\n", |
28 | policy->min, event); | 25 | policy->min, event); |
29 | __cpufreq_driver_target(policy, policy->min, | 26 | __cpufreq_driver_target(policy, policy->min, |
30 | CPUFREQ_RELATION_L); | 27 | CPUFREQ_RELATION_L); |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 00d73fc8e4e2..b60a4c263686 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) | |||
165 | return -1; | 165 | return -1; |
166 | } | 166 | } |
167 | 167 | ||
168 | /* should be called late in the CPU removal sequence so that the stats | ||
169 | * memory is still available in case someone tries to use it. | ||
170 | */ | ||
168 | static void cpufreq_stats_free_table(unsigned int cpu) | 171 | static void cpufreq_stats_free_table(unsigned int cpu) |
169 | { | 172 | { |
170 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); | 173 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); |
171 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
172 | if (policy && policy->cpu == cpu) | ||
173 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | ||
174 | if (stat) { | 174 | if (stat) { |
175 | kfree(stat->time_in_state); | 175 | kfree(stat->time_in_state); |
176 | kfree(stat); | 176 | kfree(stat); |
177 | } | 177 | } |
178 | per_cpu(cpufreq_stats_table, cpu) = NULL; | 178 | per_cpu(cpufreq_stats_table, cpu) = NULL; |
179 | } | ||
180 | |||
181 | /* must be called early in the CPU removal sequence (before | ||
182 | * cpufreq_remove_dev) so that policy is still valid. | ||
183 | */ | ||
184 | static void cpufreq_stats_free_sysfs(unsigned int cpu) | ||
185 | { | ||
186 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
187 | if (policy && policy->cpu == cpu) | ||
188 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | ||
179 | if (policy) | 189 | if (policy) |
180 | cpufreq_cpu_put(policy); | 190 | cpufreq_cpu_put(policy); |
181 | } | 191 | } |
@@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
316 | case CPU_ONLINE_FROZEN: | 326 | case CPU_ONLINE_FROZEN: |
317 | cpufreq_update_policy(cpu); | 327 | cpufreq_update_policy(cpu); |
318 | break; | 328 | break; |
329 | case CPU_DOWN_PREPARE: | ||
330 | cpufreq_stats_free_sysfs(cpu); | ||
331 | break; | ||
319 | case CPU_DEAD: | 332 | case CPU_DEAD: |
320 | case CPU_DEAD_FROZEN: | 333 | case CPU_DEAD_FROZEN: |
321 | cpufreq_stats_free_table(cpu); | 334 | cpufreq_stats_free_table(cpu); |
@@ -324,9 +337,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
324 | return NOTIFY_OK; | 337 | return NOTIFY_OK; |
325 | } | 338 | } |
326 | 339 | ||
327 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = | 340 | /* priority=1 so this will get called before cpufreq_remove_dev */ |
328 | { | 341 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { |
329 | .notifier_call = cpufreq_stat_cpu_callback, | 342 | .notifier_call = cpufreq_stat_cpu_callback, |
343 | .priority = 1, | ||
330 | }; | 344 | }; |
331 | 345 | ||
332 | static struct notifier_block notifier_policy_block = { | 346 | static struct notifier_block notifier_policy_block = { |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 66d2d1d6c80f..f231015904c0 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -37,9 +37,6 @@ static DEFINE_PER_CPU(unsigned int, cpu_is_managed); | |||
37 | static DEFINE_MUTEX(userspace_mutex); | 37 | static DEFINE_MUTEX(userspace_mutex); |
38 | static int cpus_using_userspace_governor; | 38 | static int cpus_using_userspace_governor; |
39 | 39 | ||
40 | #define dprintk(msg...) \ | ||
41 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | ||
42 | |||
43 | /* keep track of frequency transitions */ | 40 | /* keep track of frequency transitions */ |
44 | static int | 41 | static int |
45 | userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 42 | userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
@@ -50,7 +47,7 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
50 | if (!per_cpu(cpu_is_managed, freq->cpu)) | 47 | if (!per_cpu(cpu_is_managed, freq->cpu)) |
51 | return 0; | 48 | return 0; |
52 | 49 | ||
53 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", | 50 | pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", |
54 | freq->cpu, freq->new); | 51 | freq->cpu, freq->new); |
55 | per_cpu(cpu_cur_freq, freq->cpu) = freq->new; | 52 | per_cpu(cpu_cur_freq, freq->cpu) = freq->new; |
56 | 53 | ||
@@ -73,7 +70,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
73 | { | 70 | { |
74 | int ret = -EINVAL; | 71 | int ret = -EINVAL; |
75 | 72 | ||
76 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); | 73 | pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); |
77 | 74 | ||
78 | mutex_lock(&userspace_mutex); | 75 | mutex_lock(&userspace_mutex); |
79 | if (!per_cpu(cpu_is_managed, policy->cpu)) | 76 | if (!per_cpu(cpu_is_managed, policy->cpu)) |
@@ -134,7 +131,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
134 | per_cpu(cpu_max_freq, cpu) = policy->max; | 131 | per_cpu(cpu_max_freq, cpu) = policy->max; |
135 | per_cpu(cpu_cur_freq, cpu) = policy->cur; | 132 | per_cpu(cpu_cur_freq, cpu) = policy->cur; |
136 | per_cpu(cpu_set_freq, cpu) = policy->cur; | 133 | per_cpu(cpu_set_freq, cpu) = policy->cur; |
137 | dprintk("managing cpu %u started " | 134 | pr_debug("managing cpu %u started " |
138 | "(%u - %u kHz, currently %u kHz)\n", | 135 | "(%u - %u kHz, currently %u kHz)\n", |
139 | cpu, | 136 | cpu, |
140 | per_cpu(cpu_min_freq, cpu), | 137 | per_cpu(cpu_min_freq, cpu), |
@@ -156,12 +153,12 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
156 | per_cpu(cpu_min_freq, cpu) = 0; | 153 | per_cpu(cpu_min_freq, cpu) = 0; |
157 | per_cpu(cpu_max_freq, cpu) = 0; | 154 | per_cpu(cpu_max_freq, cpu) = 0; |
158 | per_cpu(cpu_set_freq, cpu) = 0; | 155 | per_cpu(cpu_set_freq, cpu) = 0; |
159 | dprintk("managing cpu %u stopped\n", cpu); | 156 | pr_debug("managing cpu %u stopped\n", cpu); |
160 | mutex_unlock(&userspace_mutex); | 157 | mutex_unlock(&userspace_mutex); |
161 | break; | 158 | break; |
162 | case CPUFREQ_GOV_LIMITS: | 159 | case CPUFREQ_GOV_LIMITS: |
163 | mutex_lock(&userspace_mutex); | 160 | mutex_lock(&userspace_mutex); |
164 | dprintk("limit event for cpu %u: %u - %u kHz, " | 161 | pr_debug("limit event for cpu %u: %u - %u kHz, " |
165 | "currently %u kHz, last set to %u kHz\n", | 162 | "currently %u kHz, last set to %u kHz\n", |
166 | cpu, policy->min, policy->max, | 163 | cpu, policy->min, policy->max, |
167 | per_cpu(cpu_cur_freq, cpu), | 164 | per_cpu(cpu_cur_freq, cpu), |
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 35a257dd4bb7..35a257dd4bb7 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index c587db472a75..c587db472a75 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c | |||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 05432216e224..90431cb92804 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -14,9 +14,6 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | 16 | ||
17 | #define dprintk(msg...) \ | ||
18 | cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) | ||
19 | |||
20 | /********************************************************************* | 17 | /********************************************************************* |
21 | * FREQUENCY TABLE HELPERS * | 18 | * FREQUENCY TABLE HELPERS * |
22 | *********************************************************************/ | 19 | *********************************************************************/ |
@@ -31,11 +28,11 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, | |||
31 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | 28 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
32 | unsigned int freq = table[i].frequency; | 29 | unsigned int freq = table[i].frequency; |
33 | if (freq == CPUFREQ_ENTRY_INVALID) { | 30 | if (freq == CPUFREQ_ENTRY_INVALID) { |
34 | dprintk("table entry %u is invalid, skipping\n", i); | 31 | pr_debug("table entry %u is invalid, skipping\n", i); |
35 | 32 | ||
36 | continue; | 33 | continue; |
37 | } | 34 | } |
38 | dprintk("table entry %u: %u kHz, %u index\n", | 35 | pr_debug("table entry %u: %u kHz, %u index\n", |
39 | i, freq, table[i].index); | 36 | i, freq, table[i].index); |
40 | if (freq < min_freq) | 37 | if (freq < min_freq) |
41 | min_freq = freq; | 38 | min_freq = freq; |
@@ -61,7 +58,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | |||
61 | unsigned int i; | 58 | unsigned int i; |
62 | unsigned int count = 0; | 59 | unsigned int count = 0; |
63 | 60 | ||
64 | dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", | 61 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", |
65 | policy->min, policy->max, policy->cpu); | 62 | policy->min, policy->max, policy->cpu); |
66 | 63 | ||
67 | if (!cpu_online(policy->cpu)) | 64 | if (!cpu_online(policy->cpu)) |
@@ -86,7 +83,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | |||
86 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 83 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
87 | policy->cpuinfo.max_freq); | 84 | policy->cpuinfo.max_freq); |
88 | 85 | ||
89 | dprintk("verification lead to (%u - %u kHz) for cpu %u\n", | 86 | pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", |
90 | policy->min, policy->max, policy->cpu); | 87 | policy->min, policy->max, policy->cpu); |
91 | 88 | ||
92 | return 0; | 89 | return 0; |
@@ -110,7 +107,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
110 | }; | 107 | }; |
111 | unsigned int i; | 108 | unsigned int i; |
112 | 109 | ||
113 | dprintk("request for target %u kHz (relation: %u) for cpu %u\n", | 110 | pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", |
114 | target_freq, relation, policy->cpu); | 111 | target_freq, relation, policy->cpu); |
115 | 112 | ||
116 | switch (relation) { | 113 | switch (relation) { |
@@ -167,7 +164,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
167 | } else | 164 | } else |
168 | *index = optimal.index; | 165 | *index = optimal.index; |
169 | 166 | ||
170 | dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, | 167 | pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, |
171 | table[*index].index); | 168 | table[*index].index); |
172 | 169 | ||
173 | return 0; | 170 | return 0; |
@@ -216,14 +213,14 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); | |||
216 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | 213 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
217 | unsigned int cpu) | 214 | unsigned int cpu) |
218 | { | 215 | { |
219 | dprintk("setting show_table for cpu %u to %p\n", cpu, table); | 216 | pr_debug("setting show_table for cpu %u to %p\n", cpu, table); |
220 | per_cpu(cpufreq_show_table, cpu) = table; | 217 | per_cpu(cpufreq_show_table, cpu) = table; |
221 | } | 218 | } |
222 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); | 219 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); |
223 | 220 | ||
224 | void cpufreq_frequency_table_put_attr(unsigned int cpu) | 221 | void cpufreq_frequency_table_put_attr(unsigned int cpu) |
225 | { | 222 | { |
226 | dprintk("clearing show_table for cpu %u\n", cpu); | 223 | pr_debug("clearing show_table for cpu %u\n", cpu); |
227 | per_cpu(cpufreq_show_table, cpu) = NULL; | 224 | per_cpu(cpufreq_show_table, cpu) = NULL; |
228 | } | 225 | } |
229 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | 226 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 32974cf84232..ffe1f2c92ed3 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c | |||
@@ -142,9 +142,6 @@ module_param(max_duration, int, 0444); | |||
142 | #define POLICY_MIN_DIV 20 | 142 | #define POLICY_MIN_DIV 20 |
143 | 143 | ||
144 | 144 | ||
145 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
146 | "gx-suspmod", msg) | ||
147 | |||
148 | /** | 145 | /** |
149 | * we can detect a core multipiler from dir0_lsb | 146 | * we can detect a core multipiler from dir0_lsb |
150 | * from GX1 datasheet p.56, | 147 | * from GX1 datasheet p.56, |
@@ -191,7 +188,7 @@ static __init struct pci_dev *gx_detect_chipset(void) | |||
191 | /* check if CPU is a MediaGX or a Geode. */ | 188 | /* check if CPU is a MediaGX or a Geode. */ |
192 | if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && | 189 | if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && |
193 | (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { | 190 | (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { |
194 | dprintk("error: no MediaGX/Geode processor found!\n"); | 191 | pr_debug("error: no MediaGX/Geode processor found!\n"); |
195 | return NULL; | 192 | return NULL; |
196 | } | 193 | } |
197 | 194 | ||
@@ -201,7 +198,7 @@ static __init struct pci_dev *gx_detect_chipset(void) | |||
201 | return gx_pci; | 198 | return gx_pci; |
202 | } | 199 | } |
203 | 200 | ||
204 | dprintk("error: no supported chipset found!\n"); | 201 | pr_debug("error: no supported chipset found!\n"); |
205 | return NULL; | 202 | return NULL; |
206 | } | 203 | } |
207 | 204 | ||
@@ -305,14 +302,14 @@ static void gx_set_cpuspeed(unsigned int khz) | |||
305 | break; | 302 | break; |
306 | default: | 303 | default: |
307 | local_irq_restore(flags); | 304 | local_irq_restore(flags); |
308 | dprintk("fatal: try to set unknown chipset.\n"); | 305 | pr_debug("fatal: try to set unknown chipset.\n"); |
309 | return; | 306 | return; |
310 | } | 307 | } |
311 | } else { | 308 | } else { |
312 | suscfg = gx_params->pci_suscfg & ~(SUSMOD); | 309 | suscfg = gx_params->pci_suscfg & ~(SUSMOD); |
313 | gx_params->off_duration = 0; | 310 | gx_params->off_duration = 0; |
314 | gx_params->on_duration = 0; | 311 | gx_params->on_duration = 0; |
315 | dprintk("suspend modulation disabled: cpu runs 100%% speed.\n"); | 312 | pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n"); |
316 | } | 313 | } |
317 | 314 | ||
318 | gx_write_byte(PCI_MODOFF, gx_params->off_duration); | 315 | gx_write_byte(PCI_MODOFF, gx_params->off_duration); |
@@ -327,9 +324,9 @@ static void gx_set_cpuspeed(unsigned int khz) | |||
327 | 324 | ||
328 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 325 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
329 | 326 | ||
330 | dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", | 327 | pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", |
331 | gx_params->on_duration * 32, gx_params->off_duration * 32); | 328 | gx_params->on_duration * 32, gx_params->off_duration * 32); |
332 | dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); | 329 | pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); |
333 | } | 330 | } |
334 | 331 | ||
335 | /**************************************************************** | 332 | /**************************************************************** |
@@ -428,8 +425,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
428 | stock_freq = maxfreq; | 425 | stock_freq = maxfreq; |
429 | curfreq = gx_get_cpuspeed(0); | 426 | curfreq = gx_get_cpuspeed(0); |
430 | 427 | ||
431 | dprintk("cpu max frequency is %d.\n", maxfreq); | 428 | pr_debug("cpu max frequency is %d.\n", maxfreq); |
432 | dprintk("cpu current frequency is %dkHz.\n", curfreq); | 429 | pr_debug("cpu current frequency is %dkHz.\n", curfreq); |
433 | 430 | ||
434 | /* setup basic struct for cpufreq API */ | 431 | /* setup basic struct for cpufreq API */ |
435 | policy->cpu = 0; | 432 | policy->cpu = 0; |
@@ -475,7 +472,7 @@ static int __init cpufreq_gx_init(void) | |||
475 | if (max_duration > 0xff) | 472 | if (max_duration > 0xff) |
476 | max_duration = 0xff; | 473 | max_duration = 0xff; |
477 | 474 | ||
478 | dprintk("geode suspend modulation available.\n"); | 475 | pr_debug("geode suspend modulation available.\n"); |
479 | 476 | ||
480 | params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); | 477 | params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); |
481 | if (params == NULL) | 478 | if (params == NULL) |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index cf48cdd6907d..f47d26e2a135 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
@@ -77,9 +77,6 @@ static int scale_voltage; | |||
77 | static int disable_acpi_c3; | 77 | static int disable_acpi_c3; |
78 | static int revid_errata; | 78 | static int revid_errata; |
79 | 79 | ||
80 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
81 | "longhaul", msg) | ||
82 | |||
83 | 80 | ||
84 | /* Clock ratios multiplied by 10 */ | 81 | /* Clock ratios multiplied by 10 */ |
85 | static int mults[32]; | 82 | static int mults[32]; |
@@ -87,7 +84,6 @@ static int eblcr[32]; | |||
87 | static int longhaul_version; | 84 | static int longhaul_version; |
88 | static struct cpufreq_frequency_table *longhaul_table; | 85 | static struct cpufreq_frequency_table *longhaul_table; |
89 | 86 | ||
90 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
91 | static char speedbuffer[8]; | 87 | static char speedbuffer[8]; |
92 | 88 | ||
93 | static char *print_speed(int speed) | 89 | static char *print_speed(int speed) |
@@ -106,7 +102,6 @@ static char *print_speed(int speed) | |||
106 | 102 | ||
107 | return speedbuffer; | 103 | return speedbuffer; |
108 | } | 104 | } |
109 | #endif | ||
110 | 105 | ||
111 | 106 | ||
112 | static unsigned int calc_speed(int mult) | 107 | static unsigned int calc_speed(int mult) |
@@ -275,7 +270,7 @@ static void longhaul_setstate(unsigned int table_index) | |||
275 | 270 | ||
276 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 271 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
277 | 272 | ||
278 | dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", | 273 | pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", |
279 | fsb, mult/10, mult%10, print_speed(speed/1000)); | 274 | fsb, mult/10, mult%10, print_speed(speed/1000)); |
280 | retry_loop: | 275 | retry_loop: |
281 | preempt_disable(); | 276 | preempt_disable(); |
@@ -460,12 +455,12 @@ static int __cpuinit longhaul_get_ranges(void) | |||
460 | break; | 455 | break; |
461 | } | 456 | } |
462 | 457 | ||
463 | dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", | 458 | pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n", |
464 | minmult/10, minmult%10, maxmult/10, maxmult%10); | 459 | minmult/10, minmult%10, maxmult/10, maxmult%10); |
465 | 460 | ||
466 | highest_speed = calc_speed(maxmult); | 461 | highest_speed = calc_speed(maxmult); |
467 | lowest_speed = calc_speed(minmult); | 462 | lowest_speed = calc_speed(minmult); |
468 | dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, | 463 | pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, |
469 | print_speed(lowest_speed/1000), | 464 | print_speed(lowest_speed/1000), |
470 | print_speed(highest_speed/1000)); | 465 | print_speed(highest_speed/1000)); |
471 | 466 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h index cbf48fbca881..cbf48fbca881 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.h +++ b/drivers/cpufreq/longhaul.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index d9f51367666b..34ea359b370e 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <asm/msr.h> | 15 | #include <asm/msr.h> |
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | 17 | ||
18 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
19 | "longrun", msg) | ||
20 | |||
21 | static struct cpufreq_driver longrun_driver; | 18 | static struct cpufreq_driver longrun_driver; |
22 | 19 | ||
23 | /** | 20 | /** |
@@ -40,14 +37,14 @@ static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) | |||
40 | u32 msr_lo, msr_hi; | 37 | u32 msr_lo, msr_hi; |
41 | 38 | ||
42 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); | 39 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); |
43 | dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi); | 40 | pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi); |
44 | if (msr_lo & 0x01) | 41 | if (msr_lo & 0x01) |
45 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; | 42 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
46 | else | 43 | else |
47 | policy->policy = CPUFREQ_POLICY_POWERSAVE; | 44 | policy->policy = CPUFREQ_POLICY_POWERSAVE; |
48 | 45 | ||
49 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | 46 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
50 | dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi); | 47 | pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi); |
51 | msr_lo &= 0x0000007F; | 48 | msr_lo &= 0x0000007F; |
52 | msr_hi &= 0x0000007F; | 49 | msr_hi &= 0x0000007F; |
53 | 50 | ||
@@ -150,7 +147,7 @@ static unsigned int longrun_get(unsigned int cpu) | |||
150 | return 0; | 147 | return 0; |
151 | 148 | ||
152 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); | 149 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); |
153 | dprintk("cpuid eax is %u\n", eax); | 150 | pr_debug("cpuid eax is %u\n", eax); |
154 | 151 | ||
155 | return eax * 1000; | 152 | return eax * 1000; |
156 | } | 153 | } |
@@ -196,7 +193,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
196 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); | 193 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); |
197 | *high_freq = msr_lo * 1000; /* to kHz */ | 194 | *high_freq = msr_lo * 1000; /* to kHz */ |
198 | 195 | ||
199 | dprintk("longrun table interface told %u - %u kHz\n", | 196 | pr_debug("longrun table interface told %u - %u kHz\n", |
200 | *low_freq, *high_freq); | 197 | *low_freq, *high_freq); |
201 | 198 | ||
202 | if (*low_freq > *high_freq) | 199 | if (*low_freq > *high_freq) |
@@ -207,7 +204,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
207 | /* set the upper border to the value determined during TSC init */ | 204 | /* set the upper border to the value determined during TSC init */ |
208 | *high_freq = (cpu_khz / 1000); | 205 | *high_freq = (cpu_khz / 1000); |
209 | *high_freq = *high_freq * 1000; | 206 | *high_freq = *high_freq * 1000; |
210 | dprintk("high frequency is %u kHz\n", *high_freq); | 207 | pr_debug("high frequency is %u kHz\n", *high_freq); |
211 | 208 | ||
212 | /* get current borders */ | 209 | /* get current borders */ |
213 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | 210 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
@@ -233,7 +230,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
233 | /* restore values */ | 230 | /* restore values */ |
234 | wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); | 231 | wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); |
235 | } | 232 | } |
236 | dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax); | 233 | pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax); |
237 | 234 | ||
238 | /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) | 235 | /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) |
239 | * eqals | 236 | * eqals |
@@ -249,7 +246,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
249 | edx = ((eax - ebx) * 100) / (100 - ecx); | 246 | edx = ((eax - ebx) * 100) / (100 - ecx); |
250 | *low_freq = edx * 1000; /* back to kHz */ | 247 | *low_freq = edx * 1000; /* back to kHz */ |
251 | 248 | ||
252 | dprintk("low frequency is %u kHz\n", *low_freq); | 249 | pr_debug("low frequency is %u kHz\n", *low_freq); |
253 | 250 | ||
254 | if (*low_freq > *high_freq) | 251 | if (*low_freq > *high_freq) |
255 | *low_freq = *high_freq; | 252 | *low_freq = *high_freq; |
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.c b/drivers/cpufreq/mperf.c index 911e193018ae..911e193018ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/mperf.c +++ b/drivers/cpufreq/mperf.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.h b/drivers/cpufreq/mperf.h index 5dbf2950dc22..5dbf2950dc22 100644 --- a/arch/x86/kernel/cpu/cpufreq/mperf.h +++ b/drivers/cpufreq/mperf.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 52c93648e492..6be3e0760c26 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c | |||
@@ -35,8 +35,6 @@ | |||
35 | #include "speedstep-lib.h" | 35 | #include "speedstep-lib.h" |
36 | 36 | ||
37 | #define PFX "p4-clockmod: " | 37 | #define PFX "p4-clockmod: " |
38 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
39 | "p4-clockmod", msg) | ||
40 | 38 | ||
41 | /* | 39 | /* |
42 | * Duty Cycle (3bits), note DC_DISABLE is not specified in | 40 | * Duty Cycle (3bits), note DC_DISABLE is not specified in |
@@ -66,7 +64,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | |||
66 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); | 64 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); |
67 | 65 | ||
68 | if (l & 0x01) | 66 | if (l & 0x01) |
69 | dprintk("CPU#%d currently thermal throttled\n", cpu); | 67 | pr_debug("CPU#%d currently thermal throttled\n", cpu); |
70 | 68 | ||
71 | if (has_N44_O17_errata[cpu] && | 69 | if (has_N44_O17_errata[cpu] && |
72 | (newstate == DC_25PT || newstate == DC_DFLT)) | 70 | (newstate == DC_25PT || newstate == DC_DFLT)) |
@@ -74,10 +72,10 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | |||
74 | 72 | ||
75 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); | 73 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); |
76 | if (newstate == DC_DISABLE) { | 74 | if (newstate == DC_DISABLE) { |
77 | dprintk("CPU#%d disabling modulation\n", cpu); | 75 | pr_debug("CPU#%d disabling modulation\n", cpu); |
78 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); | 76 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); |
79 | } else { | 77 | } else { |
80 | dprintk("CPU#%d setting duty cycle to %d%%\n", | 78 | pr_debug("CPU#%d setting duty cycle to %d%%\n", |
81 | cpu, ((125 * newstate) / 10)); | 79 | cpu, ((125 * newstate) / 10)); |
82 | /* bits 63 - 5 : reserved | 80 | /* bits 63 - 5 : reserved |
83 | * bit 4 : enable/disable | 81 | * bit 4 : enable/disable |
@@ -217,7 +215,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
217 | case 0x0f11: | 215 | case 0x0f11: |
218 | case 0x0f12: | 216 | case 0x0f12: |
219 | has_N44_O17_errata[policy->cpu] = 1; | 217 | has_N44_O17_errata[policy->cpu] = 1; |
220 | dprintk("has errata -- disabling low frequencies\n"); | 218 | pr_debug("has errata -- disabling low frequencies\n"); |
221 | } | 219 | } |
222 | 220 | ||
223 | if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && | 221 | if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 755a31e0f5b0..7b0603eb0129 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | #include <acpi/processor.h> | 40 | #include <acpi/processor.h> |
41 | 41 | ||
42 | #define PCC_VERSION "1.00.00" | 42 | #define PCC_VERSION "1.10.00" |
43 | #define POLL_LOOPS 300 | 43 | #define POLL_LOOPS 300 |
44 | 44 | ||
45 | #define CMD_COMPLETE 0x1 | 45 | #define CMD_COMPLETE 0x1 |
@@ -48,9 +48,6 @@ | |||
48 | 48 | ||
49 | #define BUF_SZ 4 | 49 | #define BUF_SZ 4 |
50 | 50 | ||
51 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
52 | "pcc-cpufreq", msg) | ||
53 | |||
54 | struct pcc_register_resource { | 51 | struct pcc_register_resource { |
55 | u8 descriptor; | 52 | u8 descriptor; |
56 | u16 length; | 53 | u16 length; |
@@ -102,7 +99,7 @@ static struct acpi_generic_address doorbell; | |||
102 | static u64 doorbell_preserve; | 99 | static u64 doorbell_preserve; |
103 | static u64 doorbell_write; | 100 | static u64 doorbell_write; |
104 | 101 | ||
105 | static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f, | 102 | static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49, |
106 | 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; | 103 | 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; |
107 | 104 | ||
108 | struct pcc_cpu { | 105 | struct pcc_cpu { |
@@ -152,7 +149,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
152 | 149 | ||
153 | spin_lock(&pcc_lock); | 150 | spin_lock(&pcc_lock); |
154 | 151 | ||
155 | dprintk("get: get_freq for CPU %d\n", cpu); | 152 | pr_debug("get: get_freq for CPU %d\n", cpu); |
156 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | 153 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); |
157 | 154 | ||
158 | input_buffer = 0x1; | 155 | input_buffer = 0x1; |
@@ -170,7 +167,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
170 | 167 | ||
171 | status = ioread16(&pcch_hdr->status); | 168 | status = ioread16(&pcch_hdr->status); |
172 | if (status != CMD_COMPLETE) { | 169 | if (status != CMD_COMPLETE) { |
173 | dprintk("get: FAILED: for CPU %d, status is %d\n", | 170 | pr_debug("get: FAILED: for CPU %d, status is %d\n", |
174 | cpu, status); | 171 | cpu, status); |
175 | goto cmd_incomplete; | 172 | goto cmd_incomplete; |
176 | } | 173 | } |
@@ -178,14 +175,14 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
178 | curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) | 175 | curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) |
179 | / 100) * 1000); | 176 | / 100) * 1000); |
180 | 177 | ||
181 | dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is " | 178 | pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is " |
182 | "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n", | 179 | "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n", |
183 | cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), | 180 | cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), |
184 | output_buffer, curr_freq); | 181 | output_buffer, curr_freq); |
185 | 182 | ||
186 | freq_limit = (output_buffer >> 8) & 0xff; | 183 | freq_limit = (output_buffer >> 8) & 0xff; |
187 | if (freq_limit != 0xff) { | 184 | if (freq_limit != 0xff) { |
188 | dprintk("get: frequency for cpu %d is being temporarily" | 185 | pr_debug("get: frequency for cpu %d is being temporarily" |
189 | " capped at %d\n", cpu, curr_freq); | 186 | " capped at %d\n", cpu, curr_freq); |
190 | } | 187 | } |
191 | 188 | ||
@@ -212,8 +209,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
212 | cpu = policy->cpu; | 209 | cpu = policy->cpu; |
213 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | 210 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); |
214 | 211 | ||
215 | dprintk("target: CPU %d should go to target freq: %d " | 212 | pr_debug("target: CPU %d should go to target freq: %d " |
216 | "(virtual) input_offset is 0x%x\n", | 213 | "(virtual) input_offset is 0x%p\n", |
217 | cpu, target_freq, | 214 | cpu, target_freq, |
218 | (pcch_virt_addr + pcc_cpu_data->input_offset)); | 215 | (pcch_virt_addr + pcc_cpu_data->input_offset)); |
219 | 216 | ||
@@ -234,14 +231,14 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
234 | 231 | ||
235 | status = ioread16(&pcch_hdr->status); | 232 | status = ioread16(&pcch_hdr->status); |
236 | if (status != CMD_COMPLETE) { | 233 | if (status != CMD_COMPLETE) { |
237 | dprintk("target: FAILED for cpu %d, with status: 0x%x\n", | 234 | pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", |
238 | cpu, status); | 235 | cpu, status); |
239 | goto cmd_incomplete; | 236 | goto cmd_incomplete; |
240 | } | 237 | } |
241 | iowrite16(0, &pcch_hdr->status); | 238 | iowrite16(0, &pcch_hdr->status); |
242 | 239 | ||
243 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 240 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
244 | dprintk("target: was SUCCESSFUL for cpu %d\n", cpu); | 241 | pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); |
245 | spin_unlock(&pcc_lock); | 242 | spin_unlock(&pcc_lock); |
246 | 243 | ||
247 | return 0; | 244 | return 0; |
@@ -293,7 +290,7 @@ static int pcc_get_offset(int cpu) | |||
293 | memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); | 290 | memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); |
294 | memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); | 291 | memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); |
295 | 292 | ||
296 | dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data " | 293 | pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data " |
297 | "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", | 294 | "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", |
298 | cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); | 295 | cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); |
299 | out_free: | 296 | out_free: |
@@ -410,7 +407,7 @@ static int __init pcc_cpufreq_probe(void) | |||
410 | if (ACPI_SUCCESS(status)) { | 407 | if (ACPI_SUCCESS(status)) { |
411 | ret = pcc_cpufreq_do_osc(&osc_handle); | 408 | ret = pcc_cpufreq_do_osc(&osc_handle); |
412 | if (ret) | 409 | if (ret) |
413 | dprintk("probe: _OSC evaluation did not succeed\n"); | 410 | pr_debug("probe: _OSC evaluation did not succeed\n"); |
414 | /* Firmware's use of _OSC is optional */ | 411 | /* Firmware's use of _OSC is optional */ |
415 | ret = 0; | 412 | ret = 0; |
416 | } | 413 | } |
@@ -433,7 +430,7 @@ static int __init pcc_cpufreq_probe(void) | |||
433 | 430 | ||
434 | mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; | 431 | mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; |
435 | 432 | ||
436 | dprintk("probe: mem_resource descriptor: 0x%x," | 433 | pr_debug("probe: mem_resource descriptor: 0x%x," |
437 | " length: %d, space_id: %d, resource_usage: %d," | 434 | " length: %d, space_id: %d, resource_usage: %d," |
438 | " type_specific: %d, granularity: 0x%llx," | 435 | " type_specific: %d, granularity: 0x%llx," |
439 | " minimum: 0x%llx, maximum: 0x%llx," | 436 | " minimum: 0x%llx, maximum: 0x%llx," |
@@ -453,13 +450,13 @@ static int __init pcc_cpufreq_probe(void) | |||
453 | pcch_virt_addr = ioremap_nocache(mem_resource->minimum, | 450 | pcch_virt_addr = ioremap_nocache(mem_resource->minimum, |
454 | mem_resource->address_length); | 451 | mem_resource->address_length); |
455 | if (pcch_virt_addr == NULL) { | 452 | if (pcch_virt_addr == NULL) { |
456 | dprintk("probe: could not map shared mem region\n"); | 453 | pr_debug("probe: could not map shared mem region\n"); |
457 | goto out_free; | 454 | goto out_free; |
458 | } | 455 | } |
459 | pcch_hdr = pcch_virt_addr; | 456 | pcch_hdr = pcch_virt_addr; |
460 | 457 | ||
461 | dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); | 458 | pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); |
462 | dprintk("probe: PCCH header is at physical address: 0x%llx," | 459 | pr_debug("probe: PCCH header is at physical address: 0x%llx," |
463 | " signature: 0x%x, length: %d bytes, major: %d, minor: %d," | 460 | " signature: 0x%x, length: %d bytes, major: %d, minor: %d," |
464 | " supported features: 0x%x, command field: 0x%x," | 461 | " supported features: 0x%x, command field: 0x%x," |
465 | " status field: 0x%x, nominal latency: %d us\n", | 462 | " status field: 0x%x, nominal latency: %d us\n", |
@@ -469,7 +466,7 @@ static int __init pcc_cpufreq_probe(void) | |||
469 | ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), | 466 | ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), |
470 | ioread32(&pcch_hdr->latency)); | 467 | ioread32(&pcch_hdr->latency)); |
471 | 468 | ||
472 | dprintk("probe: min time between commands: %d us," | 469 | pr_debug("probe: min time between commands: %d us," |
473 | " max time between commands: %d us," | 470 | " max time between commands: %d us," |
474 | " nominal CPU frequency: %d MHz," | 471 | " nominal CPU frequency: %d MHz," |
475 | " minimum CPU frequency: %d MHz," | 472 | " minimum CPU frequency: %d MHz," |
@@ -494,7 +491,7 @@ static int __init pcc_cpufreq_probe(void) | |||
494 | doorbell.access_width = 64; | 491 | doorbell.access_width = 64; |
495 | doorbell.address = reg_resource->address; | 492 | doorbell.address = reg_resource->address; |
496 | 493 | ||
497 | dprintk("probe: doorbell: space_id is %d, bit_width is %d, " | 494 | pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " |
498 | "bit_offset is %d, access_width is %d, address is 0x%llx\n", | 495 | "bit_offset is %d, access_width is %d, address is 0x%llx\n", |
499 | doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, | 496 | doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, |
500 | doorbell.access_width, reg_resource->address); | 497 | doorbell.access_width, reg_resource->address); |
@@ -515,7 +512,7 @@ static int __init pcc_cpufreq_probe(void) | |||
515 | 512 | ||
516 | doorbell_write = member->integer.value; | 513 | doorbell_write = member->integer.value; |
517 | 514 | ||
518 | dprintk("probe: doorbell_preserve: 0x%llx," | 515 | pr_debug("probe: doorbell_preserve: 0x%llx," |
519 | " doorbell_write: 0x%llx\n", | 516 | " doorbell_write: 0x%llx\n", |
520 | doorbell_preserve, doorbell_write); | 517 | doorbell_preserve, doorbell_write); |
521 | 518 | ||
@@ -550,7 +547,7 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
550 | 547 | ||
551 | result = pcc_get_offset(cpu); | 548 | result = pcc_get_offset(cpu); |
552 | if (result) { | 549 | if (result) { |
553 | dprintk("init: PCCP evaluation failed\n"); | 550 | pr_debug("init: PCCP evaluation failed\n"); |
554 | goto out; | 551 | goto out; |
555 | } | 552 | } |
556 | 553 | ||
@@ -561,12 +558,12 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
561 | policy->cur = pcc_get_freq(cpu); | 558 | policy->cur = pcc_get_freq(cpu); |
562 | 559 | ||
563 | if (!policy->cur) { | 560 | if (!policy->cur) { |
564 | dprintk("init: Unable to get current CPU frequency\n"); | 561 | pr_debug("init: Unable to get current CPU frequency\n"); |
565 | result = -EINVAL; | 562 | result = -EINVAL; |
566 | goto out; | 563 | goto out; |
567 | } | 564 | } |
568 | 565 | ||
569 | dprintk("init: policy->max is %d, policy->min is %d\n", | 566 | pr_debug("init: policy->max is %d, policy->min is %d\n", |
570 | policy->max, policy->min); | 567 | policy->max, policy->min); |
571 | out: | 568 | out: |
572 | return result; | 569 | return result; |
@@ -597,7 +594,7 @@ static int __init pcc_cpufreq_init(void) | |||
597 | 594 | ||
598 | ret = pcc_cpufreq_probe(); | 595 | ret = pcc_cpufreq_probe(); |
599 | if (ret) { | 596 | if (ret) { |
600 | dprintk("pcc_cpufreq_init: PCCH evaluation failed\n"); | 597 | pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n"); |
601 | return ret; | 598 | return ret; |
602 | } | 599 | } |
603 | 600 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index b3379d6a5c57..b3379d6a5c57 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 4a45fd6e41ba..d71d9f372359 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c | |||
@@ -68,7 +68,6 @@ union powernow_acpi_control_t { | |||
68 | }; | 68 | }; |
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
72 | /* divide by 1000 to get VCore voltage in V. */ | 71 | /* divide by 1000 to get VCore voltage in V. */ |
73 | static const int mobile_vid_table[32] = { | 72 | static const int mobile_vid_table[32] = { |
74 | 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, | 73 | 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, |
@@ -76,7 +75,6 @@ static const int mobile_vid_table[32] = { | |||
76 | 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, | 75 | 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, |
77 | 1075, 1050, 1025, 1000, 975, 950, 925, 0, | 76 | 1075, 1050, 1025, 1000, 975, 950, 925, 0, |
78 | }; | 77 | }; |
79 | #endif | ||
80 | 78 | ||
81 | /* divide by 10 to get FID. */ | 79 | /* divide by 10 to get FID. */ |
82 | static const int fid_codes[32] = { | 80 | static const int fid_codes[32] = { |
@@ -103,9 +101,6 @@ static unsigned int fsb; | |||
103 | static unsigned int latency; | 101 | static unsigned int latency; |
104 | static char have_a0; | 102 | static char have_a0; |
105 | 103 | ||
106 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
107 | "powernow-k7", msg) | ||
108 | |||
109 | static int check_fsb(unsigned int fsbspeed) | 104 | static int check_fsb(unsigned int fsbspeed) |
110 | { | 105 | { |
111 | int delta; | 106 | int delta; |
@@ -209,7 +204,7 @@ static int get_ranges(unsigned char *pst) | |||
209 | vid = *pst++; | 204 | vid = *pst++; |
210 | powernow_table[j].index |= (vid << 8); /* upper 8 bits */ | 205 | powernow_table[j].index |= (vid << 8); /* upper 8 bits */ |
211 | 206 | ||
212 | dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " | 207 | pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " |
213 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, | 208 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, |
214 | fid_codes[fid] % 10, speed/1000, vid, | 209 | fid_codes[fid] % 10, speed/1000, vid, |
215 | mobile_vid_table[vid]/1000, | 210 | mobile_vid_table[vid]/1000, |
@@ -367,7 +362,7 @@ static int powernow_acpi_init(void) | |||
367 | unsigned int speed, speed_mhz; | 362 | unsigned int speed, speed_mhz; |
368 | 363 | ||
369 | pc.val = (unsigned long) state->control; | 364 | pc.val = (unsigned long) state->control; |
370 | dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", | 365 | pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", |
371 | i, | 366 | i, |
372 | (u32) state->core_frequency, | 367 | (u32) state->core_frequency, |
373 | (u32) state->power, | 368 | (u32) state->power, |
@@ -401,7 +396,7 @@ static int powernow_acpi_init(void) | |||
401 | invalidate_entry(i); | 396 | invalidate_entry(i); |
402 | } | 397 | } |
403 | 398 | ||
404 | dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " | 399 | pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " |
405 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, | 400 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, |
406 | fid_codes[fid] % 10, speed_mhz, vid, | 401 | fid_codes[fid] % 10, speed_mhz, vid, |
407 | mobile_vid_table[vid]/1000, | 402 | mobile_vid_table[vid]/1000, |
@@ -409,7 +404,7 @@ static int powernow_acpi_init(void) | |||
409 | 404 | ||
410 | if (state->core_frequency != speed_mhz) { | 405 | if (state->core_frequency != speed_mhz) { |
411 | state->core_frequency = speed_mhz; | 406 | state->core_frequency = speed_mhz; |
412 | dprintk(" Corrected ACPI frequency to %d\n", | 407 | pr_debug(" Corrected ACPI frequency to %d\n", |
413 | speed_mhz); | 408 | speed_mhz); |
414 | } | 409 | } |
415 | 410 | ||
@@ -453,8 +448,8 @@ static int powernow_acpi_init(void) | |||
453 | 448 | ||
454 | static void print_pst_entry(struct pst_s *pst, unsigned int j) | 449 | static void print_pst_entry(struct pst_s *pst, unsigned int j) |
455 | { | 450 | { |
456 | dprintk("PST:%d (@%p)\n", j, pst); | 451 | pr_debug("PST:%d (@%p)\n", j, pst); |
457 | dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", | 452 | pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", |
458 | pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); | 453 | pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); |
459 | } | 454 | } |
460 | 455 | ||
@@ -474,20 +469,20 @@ static int powernow_decode_bios(int maxfid, int startvid) | |||
474 | p = phys_to_virt(i); | 469 | p = phys_to_virt(i); |
475 | 470 | ||
476 | if (memcmp(p, "AMDK7PNOW!", 10) == 0) { | 471 | if (memcmp(p, "AMDK7PNOW!", 10) == 0) { |
477 | dprintk("Found PSB header at %p\n", p); | 472 | pr_debug("Found PSB header at %p\n", p); |
478 | psb = (struct psb_s *) p; | 473 | psb = (struct psb_s *) p; |
479 | dprintk("Table version: 0x%x\n", psb->tableversion); | 474 | pr_debug("Table version: 0x%x\n", psb->tableversion); |
480 | if (psb->tableversion != 0x12) { | 475 | if (psb->tableversion != 0x12) { |
481 | printk(KERN_INFO PFX "Sorry, only v1.2 tables" | 476 | printk(KERN_INFO PFX "Sorry, only v1.2 tables" |
482 | " supported right now\n"); | 477 | " supported right now\n"); |
483 | return -ENODEV; | 478 | return -ENODEV; |
484 | } | 479 | } |
485 | 480 | ||
486 | dprintk("Flags: 0x%x\n", psb->flags); | 481 | pr_debug("Flags: 0x%x\n", psb->flags); |
487 | if ((psb->flags & 1) == 0) | 482 | if ((psb->flags & 1) == 0) |
488 | dprintk("Mobile voltage regulator\n"); | 483 | pr_debug("Mobile voltage regulator\n"); |
489 | else | 484 | else |
490 | dprintk("Desktop voltage regulator\n"); | 485 | pr_debug("Desktop voltage regulator\n"); |
491 | 486 | ||
492 | latency = psb->settlingtime; | 487 | latency = psb->settlingtime; |
493 | if (latency < 100) { | 488 | if (latency < 100) { |
@@ -497,9 +492,9 @@ static int powernow_decode_bios(int maxfid, int startvid) | |||
497 | "Correcting.\n", latency); | 492 | "Correcting.\n", latency); |
498 | latency = 100; | 493 | latency = 100; |
499 | } | 494 | } |
500 | dprintk("Settling Time: %d microseconds.\n", | 495 | pr_debug("Settling Time: %d microseconds.\n", |
501 | psb->settlingtime); | 496 | psb->settlingtime); |
502 | dprintk("Has %d PST tables. (Only dumping ones " | 497 | pr_debug("Has %d PST tables. (Only dumping ones " |
503 | "relevant to this CPU).\n", | 498 | "relevant to this CPU).\n", |
504 | psb->numpst); | 499 | psb->numpst); |
505 | 500 | ||
@@ -650,7 +645,7 @@ static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) | |||
650 | printk(KERN_WARNING PFX "can not determine bus frequency\n"); | 645 | printk(KERN_WARNING PFX "can not determine bus frequency\n"); |
651 | return -EINVAL; | 646 | return -EINVAL; |
652 | } | 647 | } |
653 | dprintk("FSB: %3dMHz\n", fsb/1000); | 648 | pr_debug("FSB: %3dMHz\n", fsb/1000); |
654 | 649 | ||
655 | if (dmi_check_system(powernow_dmi_table) || acpi_force) { | 650 | if (dmi_check_system(powernow_dmi_table) || acpi_force) { |
656 | printk(KERN_INFO PFX "PSB/PST known to be broken. " | 651 | printk(KERN_INFO PFX "PSB/PST known to be broken. " |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h b/drivers/cpufreq/powernow-k7.h index 35fb4eaf6e1c..35fb4eaf6e1c 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h +++ b/drivers/cpufreq/powernow-k7.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 2368e38327b3..83479b6fb9a1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -139,7 +139,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | |||
139 | } | 139 | } |
140 | do { | 140 | do { |
141 | if (i++ > 10000) { | 141 | if (i++ > 10000) { |
142 | dprintk("detected change pending stuck\n"); | 142 | pr_debug("detected change pending stuck\n"); |
143 | return 1; | 143 | return 1; |
144 | } | 144 | } |
145 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | 145 | rdmsr(MSR_FIDVID_STATUS, lo, hi); |
@@ -176,7 +176,7 @@ static void fidvid_msr_init(void) | |||
176 | fid = lo & MSR_S_LO_CURRENT_FID; | 176 | fid = lo & MSR_S_LO_CURRENT_FID; |
177 | lo = fid | (vid << MSR_C_LO_VID_SHIFT); | 177 | lo = fid | (vid << MSR_C_LO_VID_SHIFT); |
178 | hi = MSR_C_HI_STP_GNT_BENIGN; | 178 | hi = MSR_C_HI_STP_GNT_BENIGN; |
179 | dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); | 179 | pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); |
180 | wrmsr(MSR_FIDVID_CTL, lo, hi); | 180 | wrmsr(MSR_FIDVID_CTL, lo, hi); |
181 | } | 181 | } |
182 | 182 | ||
@@ -196,7 +196,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) | |||
196 | lo |= (data->currvid << MSR_C_LO_VID_SHIFT); | 196 | lo |= (data->currvid << MSR_C_LO_VID_SHIFT); |
197 | lo |= MSR_C_LO_INIT_FID_VID; | 197 | lo |= MSR_C_LO_INIT_FID_VID; |
198 | 198 | ||
199 | dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", | 199 | pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", |
200 | fid, lo, data->plllock * PLL_LOCK_CONVERSION); | 200 | fid, lo, data->plllock * PLL_LOCK_CONVERSION); |
201 | 201 | ||
202 | do { | 202 | do { |
@@ -244,7 +244,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) | |||
244 | lo |= (vid << MSR_C_LO_VID_SHIFT); | 244 | lo |= (vid << MSR_C_LO_VID_SHIFT); |
245 | lo |= MSR_C_LO_INIT_FID_VID; | 245 | lo |= MSR_C_LO_INIT_FID_VID; |
246 | 246 | ||
247 | dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", | 247 | pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", |
248 | vid, lo, STOP_GRANT_5NS); | 248 | vid, lo, STOP_GRANT_5NS); |
249 | 249 | ||
250 | do { | 250 | do { |
@@ -325,7 +325,7 @@ static int transition_fid_vid(struct powernow_k8_data *data, | |||
325 | return 1; | 325 | return 1; |
326 | } | 326 | } |
327 | 327 | ||
328 | dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", | 328 | pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", |
329 | smp_processor_id(), data->currfid, data->currvid); | 329 | smp_processor_id(), data->currfid, data->currvid); |
330 | 330 | ||
331 | return 0; | 331 | return 0; |
@@ -339,7 +339,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
339 | u32 savefid = data->currfid; | 339 | u32 savefid = data->currfid; |
340 | u32 maxvid, lo, rvomult = 1; | 340 | u32 maxvid, lo, rvomult = 1; |
341 | 341 | ||
342 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " | 342 | pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " |
343 | "reqvid 0x%x, rvo 0x%x\n", | 343 | "reqvid 0x%x, rvo 0x%x\n", |
344 | smp_processor_id(), | 344 | smp_processor_id(), |
345 | data->currfid, data->currvid, reqvid, data->rvo); | 345 | data->currfid, data->currvid, reqvid, data->rvo); |
@@ -349,12 +349,12 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
349 | rvosteps *= rvomult; | 349 | rvosteps *= rvomult; |
350 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); | 350 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); |
351 | maxvid = 0x1f & (maxvid >> 16); | 351 | maxvid = 0x1f & (maxvid >> 16); |
352 | dprintk("ph1 maxvid=0x%x\n", maxvid); | 352 | pr_debug("ph1 maxvid=0x%x\n", maxvid); |
353 | if (reqvid < maxvid) /* lower numbers are higher voltages */ | 353 | if (reqvid < maxvid) /* lower numbers are higher voltages */ |
354 | reqvid = maxvid; | 354 | reqvid = maxvid; |
355 | 355 | ||
356 | while (data->currvid > reqvid) { | 356 | while (data->currvid > reqvid) { |
357 | dprintk("ph1: curr 0x%x, req vid 0x%x\n", | 357 | pr_debug("ph1: curr 0x%x, req vid 0x%x\n", |
358 | data->currvid, reqvid); | 358 | data->currvid, reqvid); |
359 | if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) | 359 | if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) |
360 | return 1; | 360 | return 1; |
@@ -365,7 +365,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
365 | if (data->currvid == maxvid) { | 365 | if (data->currvid == maxvid) { |
366 | rvosteps = 0; | 366 | rvosteps = 0; |
367 | } else { | 367 | } else { |
368 | dprintk("ph1: changing vid for rvo, req 0x%x\n", | 368 | pr_debug("ph1: changing vid for rvo, req 0x%x\n", |
369 | data->currvid - 1); | 369 | data->currvid - 1); |
370 | if (decrease_vid_code_by_step(data, data->currvid-1, 1)) | 370 | if (decrease_vid_code_by_step(data, data->currvid-1, 1)) |
371 | return 1; | 371 | return 1; |
@@ -382,7 +382,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
382 | return 1; | 382 | return 1; |
383 | } | 383 | } |
384 | 384 | ||
385 | dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n", | 385 | pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", |
386 | data->currfid, data->currvid); | 386 | data->currfid, data->currvid); |
387 | 387 | ||
388 | return 0; | 388 | return 0; |
@@ -400,7 +400,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " | 403 | pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " |
404 | "reqfid 0x%x\n", | 404 | "reqfid 0x%x\n", |
405 | smp_processor_id(), | 405 | smp_processor_id(), |
406 | data->currfid, data->currvid, reqfid); | 406 | data->currfid, data->currvid, reqfid); |
@@ -457,7 +457,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
457 | return 1; | 457 | return 1; |
458 | } | 458 | } |
459 | 459 | ||
460 | dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n", | 460 | pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", |
461 | data->currfid, data->currvid); | 461 | data->currfid, data->currvid); |
462 | 462 | ||
463 | return 0; | 463 | return 0; |
@@ -470,7 +470,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, | |||
470 | u32 savefid = data->currfid; | 470 | u32 savefid = data->currfid; |
471 | u32 savereqvid = reqvid; | 471 | u32 savereqvid = reqvid; |
472 | 472 | ||
473 | dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", | 473 | pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", |
474 | smp_processor_id(), | 474 | smp_processor_id(), |
475 | data->currfid, data->currvid); | 475 | data->currfid, data->currvid); |
476 | 476 | ||
@@ -498,17 +498,17 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, | |||
498 | return 1; | 498 | return 1; |
499 | 499 | ||
500 | if (savereqvid != data->currvid) { | 500 | if (savereqvid != data->currvid) { |
501 | dprintk("ph3 failed, currvid 0x%x\n", data->currvid); | 501 | pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); |
502 | return 1; | 502 | return 1; |
503 | } | 503 | } |
504 | 504 | ||
505 | if (savefid != data->currfid) { | 505 | if (savefid != data->currfid) { |
506 | dprintk("ph3 failed, currfid changed 0x%x\n", | 506 | pr_debug("ph3 failed, currfid changed 0x%x\n", |
507 | data->currfid); | 507 | data->currfid); |
508 | return 1; | 508 | return 1; |
509 | } | 509 | } |
510 | 510 | ||
511 | dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n", | 511 | pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", |
512 | data->currfid, data->currvid); | 512 | data->currfid, data->currvid); |
513 | 513 | ||
514 | return 0; | 514 | return 0; |
@@ -707,7 +707,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, | |||
707 | return -EIO; | 707 | return -EIO; |
708 | } | 708 | } |
709 | 709 | ||
710 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | 710 | pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); |
711 | data->powernow_table = powernow_table; | 711 | data->powernow_table = powernow_table; |
712 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) | 712 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
713 | print_basics(data); | 713 | print_basics(data); |
@@ -717,7 +717,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, | |||
717 | (pst[j].vid == data->currvid)) | 717 | (pst[j].vid == data->currvid)) |
718 | return 0; | 718 | return 0; |
719 | 719 | ||
720 | dprintk("currfid/vid do not match PST, ignoring\n"); | 720 | pr_debug("currfid/vid do not match PST, ignoring\n"); |
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
@@ -739,36 +739,36 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
739 | if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) | 739 | if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) |
740 | continue; | 740 | continue; |
741 | 741 | ||
742 | dprintk("found PSB header at 0x%p\n", psb); | 742 | pr_debug("found PSB header at 0x%p\n", psb); |
743 | 743 | ||
744 | dprintk("table vers: 0x%x\n", psb->tableversion); | 744 | pr_debug("table vers: 0x%x\n", psb->tableversion); |
745 | if (psb->tableversion != PSB_VERSION_1_4) { | 745 | if (psb->tableversion != PSB_VERSION_1_4) { |
746 | printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); | 746 | printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); |
747 | return -ENODEV; | 747 | return -ENODEV; |
748 | } | 748 | } |
749 | 749 | ||
750 | dprintk("flags: 0x%x\n", psb->flags1); | 750 | pr_debug("flags: 0x%x\n", psb->flags1); |
751 | if (psb->flags1) { | 751 | if (psb->flags1) { |
752 | printk(KERN_ERR FW_BUG PFX "unknown flags\n"); | 752 | printk(KERN_ERR FW_BUG PFX "unknown flags\n"); |
753 | return -ENODEV; | 753 | return -ENODEV; |
754 | } | 754 | } |
755 | 755 | ||
756 | data->vstable = psb->vstable; | 756 | data->vstable = psb->vstable; |
757 | dprintk("voltage stabilization time: %d(*20us)\n", | 757 | pr_debug("voltage stabilization time: %d(*20us)\n", |
758 | data->vstable); | 758 | data->vstable); |
759 | 759 | ||
760 | dprintk("flags2: 0x%x\n", psb->flags2); | 760 | pr_debug("flags2: 0x%x\n", psb->flags2); |
761 | data->rvo = psb->flags2 & 3; | 761 | data->rvo = psb->flags2 & 3; |
762 | data->irt = ((psb->flags2) >> 2) & 3; | 762 | data->irt = ((psb->flags2) >> 2) & 3; |
763 | mvs = ((psb->flags2) >> 4) & 3; | 763 | mvs = ((psb->flags2) >> 4) & 3; |
764 | data->vidmvs = 1 << mvs; | 764 | data->vidmvs = 1 << mvs; |
765 | data->batps = ((psb->flags2) >> 6) & 3; | 765 | data->batps = ((psb->flags2) >> 6) & 3; |
766 | 766 | ||
767 | dprintk("ramp voltage offset: %d\n", data->rvo); | 767 | pr_debug("ramp voltage offset: %d\n", data->rvo); |
768 | dprintk("isochronous relief time: %d\n", data->irt); | 768 | pr_debug("isochronous relief time: %d\n", data->irt); |
769 | dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); | 769 | pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); |
770 | 770 | ||
771 | dprintk("numpst: 0x%x\n", psb->num_tables); | 771 | pr_debug("numpst: 0x%x\n", psb->num_tables); |
772 | cpst = psb->num_tables; | 772 | cpst = psb->num_tables; |
773 | if ((psb->cpuid == 0x00000fc0) || | 773 | if ((psb->cpuid == 0x00000fc0) || |
774 | (psb->cpuid == 0x00000fe0)) { | 774 | (psb->cpuid == 0x00000fe0)) { |
@@ -783,13 +783,13 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
783 | } | 783 | } |
784 | 784 | ||
785 | data->plllock = psb->plllocktime; | 785 | data->plllock = psb->plllocktime; |
786 | dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); | 786 | pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); |
787 | dprintk("maxfid: 0x%x\n", psb->maxfid); | 787 | pr_debug("maxfid: 0x%x\n", psb->maxfid); |
788 | dprintk("maxvid: 0x%x\n", psb->maxvid); | 788 | pr_debug("maxvid: 0x%x\n", psb->maxvid); |
789 | maxvid = psb->maxvid; | 789 | maxvid = psb->maxvid; |
790 | 790 | ||
791 | data->numps = psb->numps; | 791 | data->numps = psb->numps; |
792 | dprintk("numpstates: 0x%x\n", data->numps); | 792 | pr_debug("numpstates: 0x%x\n", data->numps); |
793 | return fill_powernow_table(data, | 793 | return fill_powernow_table(data, |
794 | (struct pst_s *)(psb+1), maxvid); | 794 | (struct pst_s *)(psb+1), maxvid); |
795 | } | 795 | } |
@@ -834,13 +834,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
834 | u64 control, status; | 834 | u64 control, status; |
835 | 835 | ||
836 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { | 836 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { |
837 | dprintk("register performance failed: bad ACPI data\n"); | 837 | pr_debug("register performance failed: bad ACPI data\n"); |
838 | return -EIO; | 838 | return -EIO; |
839 | } | 839 | } |
840 | 840 | ||
841 | /* verify the data contained in the ACPI structures */ | 841 | /* verify the data contained in the ACPI structures */ |
842 | if (data->acpi_data.state_count <= 1) { | 842 | if (data->acpi_data.state_count <= 1) { |
843 | dprintk("No ACPI P-States\n"); | 843 | pr_debug("No ACPI P-States\n"); |
844 | goto err_out; | 844 | goto err_out; |
845 | } | 845 | } |
846 | 846 | ||
@@ -849,7 +849,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
849 | 849 | ||
850 | if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || | 850 | if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || |
851 | (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | 851 | (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { |
852 | dprintk("Invalid control/status registers (%x - %x)\n", | 852 | pr_debug("Invalid control/status registers (%llx - %llx)\n", |
853 | control, status); | 853 | control, status); |
854 | goto err_out; | 854 | goto err_out; |
855 | } | 855 | } |
@@ -858,7 +858,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
858 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | 858 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) |
859 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); | 859 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); |
860 | if (!powernow_table) { | 860 | if (!powernow_table) { |
861 | dprintk("powernow_table memory alloc failure\n"); | 861 | pr_debug("powernow_table memory alloc failure\n"); |
862 | goto err_out; | 862 | goto err_out; |
863 | } | 863 | } |
864 | 864 | ||
@@ -928,7 +928,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, | |||
928 | } | 928 | } |
929 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | 929 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); |
930 | if (!(hi & HW_PSTATE_VALID_MASK)) { | 930 | if (!(hi & HW_PSTATE_VALID_MASK)) { |
931 | dprintk("invalid pstate %d, ignoring\n", index); | 931 | pr_debug("invalid pstate %d, ignoring\n", index); |
932 | invalidate_entry(powernow_table, i); | 932 | invalidate_entry(powernow_table, i); |
933 | continue; | 933 | continue; |
934 | } | 934 | } |
@@ -968,7 +968,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
968 | vid = (control >> VID_SHIFT) & VID_MASK; | 968 | vid = (control >> VID_SHIFT) & VID_MASK; |
969 | } | 969 | } |
970 | 970 | ||
971 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); | 971 | pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); |
972 | 972 | ||
973 | index = fid | (vid<<8); | 973 | index = fid | (vid<<8); |
974 | powernow_table[i].index = index; | 974 | powernow_table[i].index = index; |
@@ -978,7 +978,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
978 | 978 | ||
979 | /* verify frequency is OK */ | 979 | /* verify frequency is OK */ |
980 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { | 980 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { |
981 | dprintk("invalid freq %u kHz, ignoring\n", freq); | 981 | pr_debug("invalid freq %u kHz, ignoring\n", freq); |
982 | invalidate_entry(powernow_table, i); | 982 | invalidate_entry(powernow_table, i); |
983 | continue; | 983 | continue; |
984 | } | 984 | } |
@@ -986,7 +986,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
986 | /* verify voltage is OK - | 986 | /* verify voltage is OK - |
987 | * BIOSs are using "off" to indicate invalid */ | 987 | * BIOSs are using "off" to indicate invalid */ |
988 | if (vid == VID_OFF) { | 988 | if (vid == VID_OFF) { |
989 | dprintk("invalid vid %u, ignoring\n", vid); | 989 | pr_debug("invalid vid %u, ignoring\n", vid); |
990 | invalidate_entry(powernow_table, i); | 990 | invalidate_entry(powernow_table, i); |
991 | continue; | 991 | continue; |
992 | } | 992 | } |
@@ -1047,7 +1047,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1047 | int res, i; | 1047 | int res, i; |
1048 | struct cpufreq_freqs freqs; | 1048 | struct cpufreq_freqs freqs; |
1049 | 1049 | ||
1050 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | 1050 | pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); |
1051 | 1051 | ||
1052 | /* fid/vid correctness check for k8 */ | 1052 | /* fid/vid correctness check for k8 */ |
1053 | /* fid are the lower 8 bits of the index we stored into | 1053 | /* fid are the lower 8 bits of the index we stored into |
@@ -1057,18 +1057,18 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1057 | fid = data->powernow_table[index].index & 0xFF; | 1057 | fid = data->powernow_table[index].index & 0xFF; |
1058 | vid = (data->powernow_table[index].index & 0xFF00) >> 8; | 1058 | vid = (data->powernow_table[index].index & 0xFF00) >> 8; |
1059 | 1059 | ||
1060 | dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); | 1060 | pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); |
1061 | 1061 | ||
1062 | if (query_current_values_with_pending_wait(data)) | 1062 | if (query_current_values_with_pending_wait(data)) |
1063 | return 1; | 1063 | return 1; |
1064 | 1064 | ||
1065 | if ((data->currvid == vid) && (data->currfid == fid)) { | 1065 | if ((data->currvid == vid) && (data->currfid == fid)) { |
1066 | dprintk("target matches current values (fid 0x%x, vid 0x%x)\n", | 1066 | pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", |
1067 | fid, vid); | 1067 | fid, vid); |
1068 | return 0; | 1068 | return 0; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", | 1071 | pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", |
1072 | smp_processor_id(), fid, vid); | 1072 | smp_processor_id(), fid, vid); |
1073 | freqs.old = find_khz_freq_from_fid(data->currfid); | 1073 | freqs.old = find_khz_freq_from_fid(data->currfid); |
1074 | freqs.new = find_khz_freq_from_fid(fid); | 1074 | freqs.new = find_khz_freq_from_fid(fid); |
@@ -1096,7 +1096,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, | |||
1096 | int res, i; | 1096 | int res, i; |
1097 | struct cpufreq_freqs freqs; | 1097 | struct cpufreq_freqs freqs; |
1098 | 1098 | ||
1099 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | 1099 | pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); |
1100 | 1100 | ||
1101 | /* get MSR index for hardware pstate transition */ | 1101 | /* get MSR index for hardware pstate transition */ |
1102 | pstate = index & HW_PSTATE_MASK; | 1102 | pstate = index & HW_PSTATE_MASK; |
@@ -1156,14 +1156,14 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1156 | goto err_out; | 1156 | goto err_out; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", | 1159 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", |
1160 | pol->cpu, targfreq, pol->min, pol->max, relation); | 1160 | pol->cpu, targfreq, pol->min, pol->max, relation); |
1161 | 1161 | ||
1162 | if (query_current_values_with_pending_wait(data)) | 1162 | if (query_current_values_with_pending_wait(data)) |
1163 | goto err_out; | 1163 | goto err_out; |
1164 | 1164 | ||
1165 | if (cpu_family != CPU_HW_PSTATE) { | 1165 | if (cpu_family != CPU_HW_PSTATE) { |
1166 | dprintk("targ: curr fid 0x%x, vid 0x%x\n", | 1166 | pr_debug("targ: curr fid 0x%x, vid 0x%x\n", |
1167 | data->currfid, data->currvid); | 1167 | data->currfid, data->currvid); |
1168 | 1168 | ||
1169 | if ((checkvid != data->currvid) || | 1169 | if ((checkvid != data->currvid) || |
@@ -1319,7 +1319,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1319 | data->currpstate); | 1319 | data->currpstate); |
1320 | else | 1320 | else |
1321 | pol->cur = find_khz_freq_from_fid(data->currfid); | 1321 | pol->cur = find_khz_freq_from_fid(data->currfid); |
1322 | dprintk("policy current frequency %d kHz\n", pol->cur); | 1322 | pr_debug("policy current frequency %d kHz\n", pol->cur); |
1323 | 1323 | ||
1324 | /* min/max the cpu is capable of */ | 1324 | /* min/max the cpu is capable of */ |
1325 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { | 1325 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { |
@@ -1337,10 +1337,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1337 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | 1337 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); |
1338 | 1338 | ||
1339 | if (cpu_family == CPU_HW_PSTATE) | 1339 | if (cpu_family == CPU_HW_PSTATE) |
1340 | dprintk("cpu_init done, current pstate 0x%x\n", | 1340 | pr_debug("cpu_init done, current pstate 0x%x\n", |
1341 | data->currpstate); | 1341 | data->currpstate); |
1342 | else | 1342 | else |
1343 | dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", | 1343 | pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", |
1344 | data->currfid, data->currvid); | 1344 | data->currfid, data->currvid); |
1345 | 1345 | ||
1346 | per_cpu(powernow_data, pol->cpu) = data; | 1346 | per_cpu(powernow_data, pol->cpu) = data; |
@@ -1586,7 +1586,7 @@ static int __cpuinit powernowk8_init(void) | |||
1586 | /* driver entry point for term */ | 1586 | /* driver entry point for term */ |
1587 | static void __exit powernowk8_exit(void) | 1587 | static void __exit powernowk8_exit(void) |
1588 | { | 1588 | { |
1589 | dprintk("exit\n"); | 1589 | pr_debug("exit\n"); |
1590 | 1590 | ||
1591 | if (boot_cpu_has(X86_FEATURE_CPB)) { | 1591 | if (boot_cpu_has(X86_FEATURE_CPB)) { |
1592 | msrs_free(msrs); | 1592 | msrs_free(msrs); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h index df3529b1c02d..3744d26cdc2b 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/drivers/cpufreq/powernow-k8.h | |||
@@ -211,8 +211,6 @@ struct pst_s { | |||
211 | u8 vid; | 211 | u8 vid; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) | ||
215 | |||
216 | static int core_voltage_pre_transition(struct powernow_k8_data *data, | 214 | static int core_voltage_pre_transition(struct powernow_k8_data *data, |
217 | u32 reqvid, u32 regfid); | 215 | u32 reqvid, u32 regfid); |
218 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); | 216 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); |
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index 435a996a613a..1e205e6b1727 100644 --- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c | |||
@@ -29,8 +29,6 @@ | |||
29 | 29 | ||
30 | static __u8 __iomem *cpuctl; | 30 | static __u8 __iomem *cpuctl; |
31 | 31 | ||
32 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
33 | "sc520_freq", msg) | ||
34 | #define PFX "sc520_freq: " | 32 | #define PFX "sc520_freq: " |
35 | 33 | ||
36 | static struct cpufreq_frequency_table sc520_freq_table[] = { | 34 | static struct cpufreq_frequency_table sc520_freq_table[] = { |
@@ -66,7 +64,7 @@ static void sc520_freq_set_cpu_state(unsigned int state) | |||
66 | 64 | ||
67 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 65 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
68 | 66 | ||
69 | dprintk("attempting to set frequency to %i kHz\n", | 67 | pr_debug("attempting to set frequency to %i kHz\n", |
70 | sc520_freq_table[state].frequency); | 68 | sc520_freq_table[state].frequency); |
71 | 69 | ||
72 | local_irq_disable(); | 70 | local_irq_disable(); |
@@ -161,7 +159,7 @@ static int __init sc520_freq_init(void) | |||
161 | /* Test if we have the right hardware */ | 159 | /* Test if we have the right hardware */ |
162 | if (c->x86_vendor != X86_VENDOR_AMD || | 160 | if (c->x86_vendor != X86_VENDOR_AMD || |
163 | c->x86 != 4 || c->x86_model != 9) { | 161 | c->x86 != 4 || c->x86_model != 9) { |
164 | dprintk("no Elan SC520 processor found!\n"); | 162 | pr_debug("no Elan SC520 processor found!\n"); |
165 | return -ENODEV; | 163 | return -ENODEV; |
166 | } | 164 | } |
167 | cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); | 165 | cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 9b1ff37de46a..6ea3455def21 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c | |||
@@ -29,9 +29,6 @@ | |||
29 | #define PFX "speedstep-centrino: " | 29 | #define PFX "speedstep-centrino: " |
30 | #define MAINTAINER "cpufreq@vger.kernel.org" | 30 | #define MAINTAINER "cpufreq@vger.kernel.org" |
31 | 31 | ||
32 | #define dprintk(msg...) \ | ||
33 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) | ||
34 | |||
35 | #define INTEL_MSR_RANGE (0xffff) | 32 | #define INTEL_MSR_RANGE (0xffff) |
36 | 33 | ||
37 | struct cpu_id | 34 | struct cpu_id |
@@ -244,7 +241,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
244 | 241 | ||
245 | if (model->cpu_id == NULL) { | 242 | if (model->cpu_id == NULL) { |
246 | /* No match at all */ | 243 | /* No match at all */ |
247 | dprintk("no support for CPU model \"%s\": " | 244 | pr_debug("no support for CPU model \"%s\": " |
248 | "send /proc/cpuinfo to " MAINTAINER "\n", | 245 | "send /proc/cpuinfo to " MAINTAINER "\n", |
249 | cpu->x86_model_id); | 246 | cpu->x86_model_id); |
250 | return -ENOENT; | 247 | return -ENOENT; |
@@ -252,15 +249,15 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
252 | 249 | ||
253 | if (model->op_points == NULL) { | 250 | if (model->op_points == NULL) { |
254 | /* Matched a non-match */ | 251 | /* Matched a non-match */ |
255 | dprintk("no table support for CPU model \"%s\"\n", | 252 | pr_debug("no table support for CPU model \"%s\"\n", |
256 | cpu->x86_model_id); | 253 | cpu->x86_model_id); |
257 | dprintk("try using the acpi-cpufreq driver\n"); | 254 | pr_debug("try using the acpi-cpufreq driver\n"); |
258 | return -ENOENT; | 255 | return -ENOENT; |
259 | } | 256 | } |
260 | 257 | ||
261 | per_cpu(centrino_model, policy->cpu) = model; | 258 | per_cpu(centrino_model, policy->cpu) = model; |
262 | 259 | ||
263 | dprintk("found \"%s\": max frequency: %dkHz\n", | 260 | pr_debug("found \"%s\": max frequency: %dkHz\n", |
264 | model->model_name, model->max_freq); | 261 | model->model_name, model->max_freq); |
265 | 262 | ||
266 | return 0; | 263 | return 0; |
@@ -369,7 +366,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
369 | per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; | 366 | per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; |
370 | 367 | ||
371 | if (!per_cpu(centrino_cpu, policy->cpu)) { | 368 | if (!per_cpu(centrino_cpu, policy->cpu)) { |
372 | dprintk("found unsupported CPU with " | 369 | pr_debug("found unsupported CPU with " |
373 | "Enhanced SpeedStep: send /proc/cpuinfo to " | 370 | "Enhanced SpeedStep: send /proc/cpuinfo to " |
374 | MAINTAINER "\n"); | 371 | MAINTAINER "\n"); |
375 | return -ENODEV; | 372 | return -ENODEV; |
@@ -385,7 +382,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
385 | 382 | ||
386 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { | 383 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
387 | l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; | 384 | l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
388 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); | 385 | pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l); |
389 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); | 386 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); |
390 | 387 | ||
391 | /* check to see if it stuck */ | 388 | /* check to see if it stuck */ |
@@ -402,7 +399,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
402 | /* 10uS transition latency */ | 399 | /* 10uS transition latency */ |
403 | policy->cur = freq; | 400 | policy->cur = freq; |
404 | 401 | ||
405 | dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); | 402 | pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); |
406 | 403 | ||
407 | ret = cpufreq_frequency_table_cpuinfo(policy, | 404 | ret = cpufreq_frequency_table_cpuinfo(policy, |
408 | per_cpu(centrino_model, policy->cpu)->op_points); | 405 | per_cpu(centrino_model, policy->cpu)->op_points); |
@@ -498,7 +495,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
498 | good_cpu = j; | 495 | good_cpu = j; |
499 | 496 | ||
500 | if (good_cpu >= nr_cpu_ids) { | 497 | if (good_cpu >= nr_cpu_ids) { |
501 | dprintk("couldn't limit to CPUs in this domain\n"); | 498 | pr_debug("couldn't limit to CPUs in this domain\n"); |
502 | retval = -EAGAIN; | 499 | retval = -EAGAIN; |
503 | if (first_cpu) { | 500 | if (first_cpu) { |
504 | /* We haven't started the transition yet. */ | 501 | /* We haven't started the transition yet. */ |
@@ -512,7 +509,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
512 | if (first_cpu) { | 509 | if (first_cpu) { |
513 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); | 510 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); |
514 | if (msr == (oldmsr & 0xffff)) { | 511 | if (msr == (oldmsr & 0xffff)) { |
515 | dprintk("no change needed - msr was and needs " | 512 | pr_debug("no change needed - msr was and needs " |
516 | "to be %x\n", oldmsr); | 513 | "to be %x\n", oldmsr); |
517 | retval = 0; | 514 | retval = 0; |
518 | goto out; | 515 | goto out; |
@@ -521,7 +518,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
521 | freqs.old = extract_clock(oldmsr, cpu, 0); | 518 | freqs.old = extract_clock(oldmsr, cpu, 0); |
522 | freqs.new = extract_clock(msr, cpu, 0); | 519 | freqs.new = extract_clock(msr, cpu, 0); |
523 | 520 | ||
524 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 521 | pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", |
525 | target_freq, freqs.old, freqs.new, msr); | 522 | target_freq, freqs.old, freqs.new, msr); |
526 | 523 | ||
527 | for_each_cpu(k, policy->cpus) { | 524 | for_each_cpu(k, policy->cpus) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index 561758e95180..a748ce782fee 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c | |||
@@ -53,10 +53,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | 55 | ||
56 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
57 | "speedstep-ich", msg) | ||
58 | |||
59 | |||
60 | /** | 56 | /** |
61 | * speedstep_find_register - read the PMBASE address | 57 | * speedstep_find_register - read the PMBASE address |
62 | * | 58 | * |
@@ -80,7 +76,7 @@ static int speedstep_find_register(void) | |||
80 | return -ENODEV; | 76 | return -ENODEV; |
81 | } | 77 | } |
82 | 78 | ||
83 | dprintk("pmbase is 0x%x\n", pmbase); | 79 | pr_debug("pmbase is 0x%x\n", pmbase); |
84 | return 0; | 80 | return 0; |
85 | } | 81 | } |
86 | 82 | ||
@@ -106,13 +102,13 @@ static void speedstep_set_state(unsigned int state) | |||
106 | /* read state */ | 102 | /* read state */ |
107 | value = inb(pmbase + 0x50); | 103 | value = inb(pmbase + 0x50); |
108 | 104 | ||
109 | dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); | 105 | pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); |
110 | 106 | ||
111 | /* write new state */ | 107 | /* write new state */ |
112 | value &= 0xFE; | 108 | value &= 0xFE; |
113 | value |= state; | 109 | value |= state; |
114 | 110 | ||
115 | dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); | 111 | pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); |
116 | 112 | ||
117 | /* Disable bus master arbitration */ | 113 | /* Disable bus master arbitration */ |
118 | pm2_blk = inb(pmbase + 0x20); | 114 | pm2_blk = inb(pmbase + 0x20); |
@@ -132,10 +128,10 @@ static void speedstep_set_state(unsigned int state) | |||
132 | /* Enable IRQs */ | 128 | /* Enable IRQs */ |
133 | local_irq_restore(flags); | 129 | local_irq_restore(flags); |
134 | 130 | ||
135 | dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); | 131 | pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); |
136 | 132 | ||
137 | if (state == (value & 0x1)) | 133 | if (state == (value & 0x1)) |
138 | dprintk("change to %u MHz succeeded\n", | 134 | pr_debug("change to %u MHz succeeded\n", |
139 | speedstep_get_frequency(speedstep_processor) / 1000); | 135 | speedstep_get_frequency(speedstep_processor) / 1000); |
140 | else | 136 | else |
141 | printk(KERN_ERR "cpufreq: change failed - I/O error\n"); | 137 | printk(KERN_ERR "cpufreq: change failed - I/O error\n"); |
@@ -165,7 +161,7 @@ static int speedstep_activate(void) | |||
165 | pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); | 161 | pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); |
166 | if (!(value & 0x08)) { | 162 | if (!(value & 0x08)) { |
167 | value |= 0x08; | 163 | value |= 0x08; |
168 | dprintk("activating SpeedStep (TM) registers\n"); | 164 | pr_debug("activating SpeedStep (TM) registers\n"); |
169 | pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); | 165 | pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); |
170 | } | 166 | } |
171 | 167 | ||
@@ -218,7 +214,7 @@ static unsigned int speedstep_detect_chipset(void) | |||
218 | return 2; /* 2-M */ | 214 | return 2; /* 2-M */ |
219 | 215 | ||
220 | if (hostbridge->revision < 5) { | 216 | if (hostbridge->revision < 5) { |
221 | dprintk("hostbridge does not support speedstep\n"); | 217 | pr_debug("hostbridge does not support speedstep\n"); |
222 | speedstep_chipset_dev = NULL; | 218 | speedstep_chipset_dev = NULL; |
223 | pci_dev_put(hostbridge); | 219 | pci_dev_put(hostbridge); |
224 | return 0; | 220 | return 0; |
@@ -246,7 +242,7 @@ static unsigned int speedstep_get(unsigned int cpu) | |||
246 | if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) | 242 | if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) |
247 | BUG(); | 243 | BUG(); |
248 | 244 | ||
249 | dprintk("detected %u kHz as current frequency\n", speed); | 245 | pr_debug("detected %u kHz as current frequency\n", speed); |
250 | return speed; | 246 | return speed; |
251 | } | 247 | } |
252 | 248 | ||
@@ -276,7 +272,7 @@ static int speedstep_target(struct cpufreq_policy *policy, | |||
276 | freqs.new = speedstep_freqs[newstate].frequency; | 272 | freqs.new = speedstep_freqs[newstate].frequency; |
277 | freqs.cpu = policy->cpu; | 273 | freqs.cpu = policy->cpu; |
278 | 274 | ||
279 | dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); | 275 | pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); |
280 | 276 | ||
281 | /* no transition necessary */ | 277 | /* no transition necessary */ |
282 | if (freqs.old == freqs.new) | 278 | if (freqs.old == freqs.new) |
@@ -351,7 +347,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
351 | if (!speed) | 347 | if (!speed) |
352 | return -EIO; | 348 | return -EIO; |
353 | 349 | ||
354 | dprintk("currently at %s speed setting - %i MHz\n", | 350 | pr_debug("currently at %s speed setting - %i MHz\n", |
355 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | 351 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) |
356 | ? "low" : "high", | 352 | ? "low" : "high", |
357 | (speed / 1000)); | 353 | (speed / 1000)); |
@@ -405,14 +401,14 @@ static int __init speedstep_init(void) | |||
405 | /* detect processor */ | 401 | /* detect processor */ |
406 | speedstep_processor = speedstep_detect_processor(); | 402 | speedstep_processor = speedstep_detect_processor(); |
407 | if (!speedstep_processor) { | 403 | if (!speedstep_processor) { |
408 | dprintk("Intel(R) SpeedStep(TM) capable processor " | 404 | pr_debug("Intel(R) SpeedStep(TM) capable processor " |
409 | "not found\n"); | 405 | "not found\n"); |
410 | return -ENODEV; | 406 | return -ENODEV; |
411 | } | 407 | } |
412 | 408 | ||
413 | /* detect chipset */ | 409 | /* detect chipset */ |
414 | if (!speedstep_detect_chipset()) { | 410 | if (!speedstep_detect_chipset()) { |
415 | dprintk("Intel(R) SpeedStep(TM) for this chipset not " | 411 | pr_debug("Intel(R) SpeedStep(TM) for this chipset not " |
416 | "(yet) available.\n"); | 412 | "(yet) available.\n"); |
417 | return -ENODEV; | 413 | return -ENODEV; |
418 | } | 414 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index a94ec6be69fa..8af2d2fd9d51 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c | |||
@@ -18,9 +18,6 @@ | |||
18 | #include <asm/tsc.h> | 18 | #include <asm/tsc.h> |
19 | #include "speedstep-lib.h" | 19 | #include "speedstep-lib.h" |
20 | 20 | ||
21 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
22 | "speedstep-lib", msg) | ||
23 | |||
24 | #define PFX "speedstep-lib: " | 21 | #define PFX "speedstep-lib: " |
25 | 22 | ||
26 | #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK | 23 | #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK |
@@ -75,7 +72,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | |||
75 | 72 | ||
76 | /* read MSR 0x2a - we only need the low 32 bits */ | 73 | /* read MSR 0x2a - we only need the low 32 bits */ |
77 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | 74 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); |
78 | dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); | 75 | pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); |
79 | msr_tmp = msr_lo; | 76 | msr_tmp = msr_lo; |
80 | 77 | ||
81 | /* decode the FSB */ | 78 | /* decode the FSB */ |
@@ -89,7 +86,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | |||
89 | 86 | ||
90 | /* decode the multiplier */ | 87 | /* decode the multiplier */ |
91 | if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { | 88 | if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { |
92 | dprintk("workaround for early PIIIs\n"); | 89 | pr_debug("workaround for early PIIIs\n"); |
93 | msr_lo &= 0x03c00000; | 90 | msr_lo &= 0x03c00000; |
94 | } else | 91 | } else |
95 | msr_lo &= 0x0bc00000; | 92 | msr_lo &= 0x0bc00000; |
@@ -100,7 +97,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | |||
100 | j++; | 97 | j++; |
101 | } | 98 | } |
102 | 99 | ||
103 | dprintk("speed is %u\n", | 100 | pr_debug("speed is %u\n", |
104 | (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); | 101 | (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); |
105 | 102 | ||
106 | return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; | 103 | return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; |
@@ -112,7 +109,7 @@ static unsigned int pentiumM_get_frequency(void) | |||
112 | u32 msr_lo, msr_tmp; | 109 | u32 msr_lo, msr_tmp; |
113 | 110 | ||
114 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | 111 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); |
115 | dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); | 112 | pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); |
116 | 113 | ||
117 | /* see table B-2 of 24547212.pdf */ | 114 | /* see table B-2 of 24547212.pdf */ |
118 | if (msr_lo & 0x00040000) { | 115 | if (msr_lo & 0x00040000) { |
@@ -122,7 +119,7 @@ static unsigned int pentiumM_get_frequency(void) | |||
122 | } | 119 | } |
123 | 120 | ||
124 | msr_tmp = (msr_lo >> 22) & 0x1f; | 121 | msr_tmp = (msr_lo >> 22) & 0x1f; |
125 | dprintk("bits 22-26 are 0x%x, speed is %u\n", | 122 | pr_debug("bits 22-26 are 0x%x, speed is %u\n", |
126 | msr_tmp, (msr_tmp * 100 * 1000)); | 123 | msr_tmp, (msr_tmp * 100 * 1000)); |
127 | 124 | ||
128 | return msr_tmp * 100 * 1000; | 125 | return msr_tmp * 100 * 1000; |
@@ -160,11 +157,11 @@ static unsigned int pentium_core_get_frequency(void) | |||
160 | } | 157 | } |
161 | 158 | ||
162 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | 159 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); |
163 | dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", | 160 | pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", |
164 | msr_lo, msr_tmp); | 161 | msr_lo, msr_tmp); |
165 | 162 | ||
166 | msr_tmp = (msr_lo >> 22) & 0x1f; | 163 | msr_tmp = (msr_lo >> 22) & 0x1f; |
167 | dprintk("bits 22-26 are 0x%x, speed is %u\n", | 164 | pr_debug("bits 22-26 are 0x%x, speed is %u\n", |
168 | msr_tmp, (msr_tmp * fsb)); | 165 | msr_tmp, (msr_tmp * fsb)); |
169 | 166 | ||
170 | ret = (msr_tmp * fsb); | 167 | ret = (msr_tmp * fsb); |
@@ -190,7 +187,7 @@ static unsigned int pentium4_get_frequency(void) | |||
190 | 187 | ||
191 | rdmsr(0x2c, msr_lo, msr_hi); | 188 | rdmsr(0x2c, msr_lo, msr_hi); |
192 | 189 | ||
193 | dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); | 190 | pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); |
194 | 191 | ||
195 | /* decode the FSB: see IA-32 Intel (C) Architecture Software | 192 | /* decode the FSB: see IA-32 Intel (C) Architecture Software |
196 | * Developer's Manual, Volume 3: System Prgramming Guide, | 193 | * Developer's Manual, Volume 3: System Prgramming Guide, |
@@ -217,7 +214,7 @@ static unsigned int pentium4_get_frequency(void) | |||
217 | /* Multiplier. */ | 214 | /* Multiplier. */ |
218 | mult = msr_lo >> 24; | 215 | mult = msr_lo >> 24; |
219 | 216 | ||
220 | dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", | 217 | pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", |
221 | fsb, mult, (fsb * mult)); | 218 | fsb, mult, (fsb * mult)); |
222 | 219 | ||
223 | ret = (fsb * mult); | 220 | ret = (fsb * mult); |
@@ -257,7 +254,7 @@ unsigned int speedstep_detect_processor(void) | |||
257 | struct cpuinfo_x86 *c = &cpu_data(0); | 254 | struct cpuinfo_x86 *c = &cpu_data(0); |
258 | u32 ebx, msr_lo, msr_hi; | 255 | u32 ebx, msr_lo, msr_hi; |
259 | 256 | ||
260 | dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); | 257 | pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); |
261 | 258 | ||
262 | if ((c->x86_vendor != X86_VENDOR_INTEL) || | 259 | if ((c->x86_vendor != X86_VENDOR_INTEL) || |
263 | ((c->x86 != 6) && (c->x86 != 0xF))) | 260 | ((c->x86 != 6) && (c->x86 != 0xF))) |
@@ -272,7 +269,7 @@ unsigned int speedstep_detect_processor(void) | |||
272 | ebx = cpuid_ebx(0x00000001); | 269 | ebx = cpuid_ebx(0x00000001); |
273 | ebx &= 0x000000FF; | 270 | ebx &= 0x000000FF; |
274 | 271 | ||
275 | dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); | 272 | pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); |
276 | 273 | ||
277 | switch (c->x86_mask) { | 274 | switch (c->x86_mask) { |
278 | case 4: | 275 | case 4: |
@@ -327,7 +324,7 @@ unsigned int speedstep_detect_processor(void) | |||
327 | /* cpuid_ebx(1) is 0x04 for desktop PIII, | 324 | /* cpuid_ebx(1) is 0x04 for desktop PIII, |
328 | * 0x06 for mobile PIII-M */ | 325 | * 0x06 for mobile PIII-M */ |
329 | ebx = cpuid_ebx(0x00000001); | 326 | ebx = cpuid_ebx(0x00000001); |
330 | dprintk("ebx is %x\n", ebx); | 327 | pr_debug("ebx is %x\n", ebx); |
331 | 328 | ||
332 | ebx &= 0x000000FF; | 329 | ebx &= 0x000000FF; |
333 | 330 | ||
@@ -344,7 +341,7 @@ unsigned int speedstep_detect_processor(void) | |||
344 | /* all mobile PIII Coppermines have FSB 100 MHz | 341 | /* all mobile PIII Coppermines have FSB 100 MHz |
345 | * ==> sort out a few desktop PIIIs. */ | 342 | * ==> sort out a few desktop PIIIs. */ |
346 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); | 343 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); |
347 | dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", | 344 | pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", |
348 | msr_lo, msr_hi); | 345 | msr_lo, msr_hi); |
349 | msr_lo &= 0x00c0000; | 346 | msr_lo &= 0x00c0000; |
350 | if (msr_lo != 0x0080000) | 347 | if (msr_lo != 0x0080000) |
@@ -357,12 +354,12 @@ unsigned int speedstep_detect_processor(void) | |||
357 | * bit 56 or 57 is set | 354 | * bit 56 or 57 is set |
358 | */ | 355 | */ |
359 | rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); | 356 | rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); |
360 | dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", | 357 | pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", |
361 | msr_lo, msr_hi); | 358 | msr_lo, msr_hi); |
362 | if ((msr_hi & (1<<18)) && | 359 | if ((msr_hi & (1<<18)) && |
363 | (relaxed_check ? 1 : (msr_hi & (3<<24)))) { | 360 | (relaxed_check ? 1 : (msr_hi & (3<<24)))) { |
364 | if (c->x86_mask == 0x01) { | 361 | if (c->x86_mask == 0x01) { |
365 | dprintk("early PIII version\n"); | 362 | pr_debug("early PIII version\n"); |
366 | return SPEEDSTEP_CPU_PIII_C_EARLY; | 363 | return SPEEDSTEP_CPU_PIII_C_EARLY; |
367 | } else | 364 | } else |
368 | return SPEEDSTEP_CPU_PIII_C; | 365 | return SPEEDSTEP_CPU_PIII_C; |
@@ -393,14 +390,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
393 | if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) | 390 | if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) |
394 | return -EINVAL; | 391 | return -EINVAL; |
395 | 392 | ||
396 | dprintk("trying to determine both speeds\n"); | 393 | pr_debug("trying to determine both speeds\n"); |
397 | 394 | ||
398 | /* get current speed */ | 395 | /* get current speed */ |
399 | prev_speed = speedstep_get_frequency(processor); | 396 | prev_speed = speedstep_get_frequency(processor); |
400 | if (!prev_speed) | 397 | if (!prev_speed) |
401 | return -EIO; | 398 | return -EIO; |
402 | 399 | ||
403 | dprintk("previous speed is %u\n", prev_speed); | 400 | pr_debug("previous speed is %u\n", prev_speed); |
404 | 401 | ||
405 | local_irq_save(flags); | 402 | local_irq_save(flags); |
406 | 403 | ||
@@ -412,7 +409,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
412 | goto out; | 409 | goto out; |
413 | } | 410 | } |
414 | 411 | ||
415 | dprintk("low speed is %u\n", *low_speed); | 412 | pr_debug("low speed is %u\n", *low_speed); |
416 | 413 | ||
417 | /* start latency measurement */ | 414 | /* start latency measurement */ |
418 | if (transition_latency) | 415 | if (transition_latency) |
@@ -431,7 +428,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
431 | goto out; | 428 | goto out; |
432 | } | 429 | } |
433 | 430 | ||
434 | dprintk("high speed is %u\n", *high_speed); | 431 | pr_debug("high speed is %u\n", *high_speed); |
435 | 432 | ||
436 | if (*low_speed == *high_speed) { | 433 | if (*low_speed == *high_speed) { |
437 | ret = -ENODEV; | 434 | ret = -ENODEV; |
@@ -445,7 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
445 | if (transition_latency) { | 442 | if (transition_latency) { |
446 | *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + | 443 | *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + |
447 | tv2.tv_usec - tv1.tv_usec; | 444 | tv2.tv_usec - tv1.tv_usec; |
448 | dprintk("transition latency is %u uSec\n", *transition_latency); | 445 | pr_debug("transition latency is %u uSec\n", *transition_latency); |
449 | 446 | ||
450 | /* convert uSec to nSec and add 20% for safety reasons */ | 447 | /* convert uSec to nSec and add 20% for safety reasons */ |
451 | *transition_latency *= 1200; | 448 | *transition_latency *= 1200; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h index 70d9cea1219d..70d9cea1219d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h +++ b/drivers/cpufreq/speedstep-lib.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 91bc25b67bc1..c76ead3490bf 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c | |||
@@ -55,9 +55,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { | |||
55 | * of DMA activity going on? */ | 55 | * of DMA activity going on? */ |
56 | #define SMI_TRIES 5 | 56 | #define SMI_TRIES 5 |
57 | 57 | ||
58 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
59 | "speedstep-smi", msg) | ||
60 | |||
61 | /** | 58 | /** |
62 | * speedstep_smi_ownership | 59 | * speedstep_smi_ownership |
63 | */ | 60 | */ |
@@ -70,7 +67,7 @@ static int speedstep_smi_ownership(void) | |||
70 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 67 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
71 | magic = virt_to_phys(magic_data); | 68 | magic = virt_to_phys(magic_data); |
72 | 69 | ||
73 | dprintk("trying to obtain ownership with command %x at port %x\n", | 70 | pr_debug("trying to obtain ownership with command %x at port %x\n", |
74 | command, smi_port); | 71 | command, smi_port); |
75 | 72 | ||
76 | __asm__ __volatile__( | 73 | __asm__ __volatile__( |
@@ -85,7 +82,7 @@ static int speedstep_smi_ownership(void) | |||
85 | : "memory" | 82 | : "memory" |
86 | ); | 83 | ); |
87 | 84 | ||
88 | dprintk("result is %x\n", result); | 85 | pr_debug("result is %x\n", result); |
89 | 86 | ||
90 | return result; | 87 | return result; |
91 | } | 88 | } |
@@ -106,13 +103,13 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) | |||
106 | u32 function = GET_SPEEDSTEP_FREQS; | 103 | u32 function = GET_SPEEDSTEP_FREQS; |
107 | 104 | ||
108 | if (!(ist_info.event & 0xFFFF)) { | 105 | if (!(ist_info.event & 0xFFFF)) { |
109 | dprintk("bug #1422 -- can't read freqs from BIOS\n"); | 106 | pr_debug("bug #1422 -- can't read freqs from BIOS\n"); |
110 | return -ENODEV; | 107 | return -ENODEV; |
111 | } | 108 | } |
112 | 109 | ||
113 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 110 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
114 | 111 | ||
115 | dprintk("trying to determine frequencies with command %x at port %x\n", | 112 | pr_debug("trying to determine frequencies with command %x at port %x\n", |
116 | command, smi_port); | 113 | command, smi_port); |
117 | 114 | ||
118 | __asm__ __volatile__( | 115 | __asm__ __volatile__( |
@@ -129,7 +126,7 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) | |||
129 | "d" (smi_port), "S" (0), "D" (0) | 126 | "d" (smi_port), "S" (0), "D" (0) |
130 | ); | 127 | ); |
131 | 128 | ||
132 | dprintk("result %x, low_freq %u, high_freq %u\n", | 129 | pr_debug("result %x, low_freq %u, high_freq %u\n", |
133 | result, low_mhz, high_mhz); | 130 | result, low_mhz, high_mhz); |
134 | 131 | ||
135 | /* abort if results are obviously incorrect... */ | 132 | /* abort if results are obviously incorrect... */ |
@@ -154,7 +151,7 @@ static int speedstep_get_state(void) | |||
154 | 151 | ||
155 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 152 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
156 | 153 | ||
157 | dprintk("trying to determine current setting with command %x " | 154 | pr_debug("trying to determine current setting with command %x " |
158 | "at port %x\n", command, smi_port); | 155 | "at port %x\n", command, smi_port); |
159 | 156 | ||
160 | __asm__ __volatile__( | 157 | __asm__ __volatile__( |
@@ -168,7 +165,7 @@ static int speedstep_get_state(void) | |||
168 | "d" (smi_port), "S" (0), "D" (0) | 165 | "d" (smi_port), "S" (0), "D" (0) |
169 | ); | 166 | ); |
170 | 167 | ||
171 | dprintk("state is %x, result is %x\n", state, result); | 168 | pr_debug("state is %x, result is %x\n", state, result); |
172 | 169 | ||
173 | return state & 1; | 170 | return state & 1; |
174 | } | 171 | } |
@@ -194,13 +191,13 @@ static void speedstep_set_state(unsigned int state) | |||
194 | 191 | ||
195 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 192 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
196 | 193 | ||
197 | dprintk("trying to set frequency to state %u " | 194 | pr_debug("trying to set frequency to state %u " |
198 | "with command %x at port %x\n", | 195 | "with command %x at port %x\n", |
199 | state, command, smi_port); | 196 | state, command, smi_port); |
200 | 197 | ||
201 | do { | 198 | do { |
202 | if (retry) { | 199 | if (retry) { |
203 | dprintk("retry %u, previous result %u, waiting...\n", | 200 | pr_debug("retry %u, previous result %u, waiting...\n", |
204 | retry, result); | 201 | retry, result); |
205 | mdelay(retry * 50); | 202 | mdelay(retry * 50); |
206 | } | 203 | } |
@@ -221,7 +218,7 @@ static void speedstep_set_state(unsigned int state) | |||
221 | local_irq_restore(flags); | 218 | local_irq_restore(flags); |
222 | 219 | ||
223 | if (new_state == state) | 220 | if (new_state == state) |
224 | dprintk("change to %u MHz succeeded after %u tries " | 221 | pr_debug("change to %u MHz succeeded after %u tries " |
225 | "with result %u\n", | 222 | "with result %u\n", |
226 | (speedstep_freqs[new_state].frequency / 1000), | 223 | (speedstep_freqs[new_state].frequency / 1000), |
227 | retry, result); | 224 | retry, result); |
@@ -292,7 +289,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
292 | 289 | ||
293 | result = speedstep_smi_ownership(); | 290 | result = speedstep_smi_ownership(); |
294 | if (result) { | 291 | if (result) { |
295 | dprintk("fails in acquiring ownership of a SMI interface.\n"); | 292 | pr_debug("fails in acquiring ownership of a SMI interface.\n"); |
296 | return -EINVAL; | 293 | return -EINVAL; |
297 | } | 294 | } |
298 | 295 | ||
@@ -304,7 +301,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
304 | if (result) { | 301 | if (result) { |
305 | /* fall back to speedstep_lib.c dection mechanism: | 302 | /* fall back to speedstep_lib.c dection mechanism: |
306 | * try both states out */ | 303 | * try both states out */ |
307 | dprintk("could not detect low and high frequencies " | 304 | pr_debug("could not detect low and high frequencies " |
308 | "by SMI call.\n"); | 305 | "by SMI call.\n"); |
309 | result = speedstep_get_freqs(speedstep_processor, | 306 | result = speedstep_get_freqs(speedstep_processor, |
310 | low, high, | 307 | low, high, |
@@ -312,18 +309,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
312 | &speedstep_set_state); | 309 | &speedstep_set_state); |
313 | 310 | ||
314 | if (result) { | 311 | if (result) { |
315 | dprintk("could not detect two different speeds" | 312 | pr_debug("could not detect two different speeds" |
316 | " -- aborting.\n"); | 313 | " -- aborting.\n"); |
317 | return result; | 314 | return result; |
318 | } else | 315 | } else |
319 | dprintk("workaround worked.\n"); | 316 | pr_debug("workaround worked.\n"); |
320 | } | 317 | } |
321 | 318 | ||
322 | /* get current speed setting */ | 319 | /* get current speed setting */ |
323 | state = speedstep_get_state(); | 320 | state = speedstep_get_state(); |
324 | speed = speedstep_freqs[state].frequency; | 321 | speed = speedstep_freqs[state].frequency; |
325 | 322 | ||
326 | dprintk("currently at %s speed setting - %i MHz\n", | 323 | pr_debug("currently at %s speed setting - %i MHz\n", |
327 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | 324 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) |
328 | ? "low" : "high", | 325 | ? "low" : "high", |
329 | (speed / 1000)); | 326 | (speed / 1000)); |
@@ -360,7 +357,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) | |||
360 | int result = speedstep_smi_ownership(); | 357 | int result = speedstep_smi_ownership(); |
361 | 358 | ||
362 | if (result) | 359 | if (result) |
363 | dprintk("fails in re-acquiring ownership of a SMI interface.\n"); | 360 | pr_debug("fails in re-acquiring ownership of a SMI interface.\n"); |
364 | 361 | ||
365 | return result; | 362 | return result; |
366 | } | 363 | } |
@@ -403,12 +400,12 @@ static int __init speedstep_init(void) | |||
403 | } | 400 | } |
404 | 401 | ||
405 | if (!speedstep_processor) { | 402 | if (!speedstep_processor) { |
406 | dprintk("No supported Intel CPU detected.\n"); | 403 | pr_debug("No supported Intel CPU detected.\n"); |
407 | return -ENODEV; | 404 | return -ENODEV; |
408 | } | 405 | } |
409 | 406 | ||
410 | dprintk("signature:0x%.8lx, command:0x%.8lx, " | 407 | pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " |
411 | "event:0x%.8lx, perf_level:0x%.8lx.\n", | 408 | "event:0x%.8ulx, perf_level:0x%.8ulx.\n", |
412 | ist_info.signature, ist_info.command, | 409 | ist_info.signature, ist_info.command, |
413 | ist_info.event, ist_info.perf_level); | 410 | ist_info.event, ist_info.perf_level); |
414 | 411 | ||
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index c1f0045ceb8e..af8e7b1aa290 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c | |||
@@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | |||
1019 | struct ppc4xx_edac_pdata *pdata = NULL; | 1019 | struct ppc4xx_edac_pdata *pdata = NULL; |
1020 | const struct device_node *np = op->dev.of_node; | 1020 | const struct device_node *np = op->dev.of_node; |
1021 | 1021 | ||
1022 | if (op->dev.of_match == NULL) | 1022 | if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) |
1023 | return -EINVAL; | 1023 | return -EINVAL; |
1024 | 1024 | ||
1025 | /* Initial driver pointers and private data */ | 1025 | /* Initial driver pointers and private data */ |
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index b3a25a55ba23..efba163595db 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -157,4 +157,6 @@ config SIGMA | |||
157 | If unsure, say N here. Drivers that need these helpers will select | 157 | If unsure, say N here. Drivers that need these helpers will select |
158 | this option automatically. | 158 | this option automatically. |
159 | 159 | ||
160 | source "drivers/firmware/google/Kconfig" | ||
161 | |||
160 | endmenu | 162 | endmenu |
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 00bb0b80a79f..47338c979126 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
@@ -13,3 +13,5 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o | |||
13 | obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o | 13 | obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o |
14 | obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o | 14 | obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o |
15 | obj-$(CONFIG_SIGMA) += sigma.o | 15 | obj-$(CONFIG_SIGMA) += sigma.o |
16 | |||
17 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ | ||
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c index 96c25d93eed1..f1b7f659d3c9 100644 --- a/drivers/firmware/edd.c +++ b/drivers/firmware/edd.c | |||
@@ -531,8 +531,8 @@ static int | |||
531 | edd_has_edd30(struct edd_device *edev) | 531 | edd_has_edd30(struct edd_device *edev) |
532 | { | 532 | { |
533 | struct edd_info *info; | 533 | struct edd_info *info; |
534 | int i, nonzero_path = 0; | 534 | int i; |
535 | char c; | 535 | u8 csum = 0; |
536 | 536 | ||
537 | if (!edev) | 537 | if (!edev) |
538 | return 0; | 538 | return 0; |
@@ -544,16 +544,16 @@ edd_has_edd30(struct edd_device *edev) | |||
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | for (i = 30; i <= 73; i++) { | 547 | |
548 | c = *(((uint8_t *) info) + i + 4); | 548 | /* We support only T13 spec */ |
549 | if (c) { | 549 | if (info->params.device_path_info_length != 44) |
550 | nonzero_path++; | 550 | return 0; |
551 | break; | 551 | |
552 | } | 552 | for (i = 30; i < info->params.device_path_info_length + 30; i++) |
553 | } | 553 | csum += *(((u8 *)&info->params) + i); |
554 | if (!nonzero_path) { | 554 | |
555 | if (csum) | ||
555 | return 0; | 556 | return 0; |
556 | } | ||
557 | 557 | ||
558 | return 1; | 558 | return 1; |
559 | } | 559 | } |
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index ff0c373e3bbf..a2d2f1f0d4f3 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c | |||
@@ -677,8 +677,8 @@ create_efivars_bin_attributes(struct efivars *efivars) | |||
677 | 677 | ||
678 | return 0; | 678 | return 0; |
679 | out_free: | 679 | out_free: |
680 | kfree(efivars->new_var); | 680 | kfree(efivars->del_var); |
681 | efivars->new_var = NULL; | 681 | efivars->del_var = NULL; |
682 | kfree(efivars->new_var); | 682 | kfree(efivars->new_var); |
683 | efivars->new_var = NULL; | 683 | efivars->new_var = NULL; |
684 | return error; | 684 | return error; |
@@ -803,6 +803,8 @@ efivars_init(void) | |||
803 | ops.set_variable = efi.set_variable; | 803 | ops.set_variable = efi.set_variable; |
804 | ops.get_next_variable = efi.get_next_variable; | 804 | ops.get_next_variable = efi.get_next_variable; |
805 | error = register_efivars(&__efivars, &ops, efi_kobj); | 805 | error = register_efivars(&__efivars, &ops, efi_kobj); |
806 | if (error) | ||
807 | goto err_put; | ||
806 | 808 | ||
807 | /* Don't forget the systab entry */ | 809 | /* Don't forget the systab entry */ |
808 | error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); | 810 | error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); |
@@ -810,18 +812,25 @@ efivars_init(void) | |||
810 | printk(KERN_ERR | 812 | printk(KERN_ERR |
811 | "efivars: Sysfs attribute export failed with error %d.\n", | 813 | "efivars: Sysfs attribute export failed with error %d.\n", |
812 | error); | 814 | error); |
813 | unregister_efivars(&__efivars); | 815 | goto err_unregister; |
814 | kobject_put(efi_kobj); | ||
815 | } | 816 | } |
816 | 817 | ||
818 | return 0; | ||
819 | |||
820 | err_unregister: | ||
821 | unregister_efivars(&__efivars); | ||
822 | err_put: | ||
823 | kobject_put(efi_kobj); | ||
817 | return error; | 824 | return error; |
818 | } | 825 | } |
819 | 826 | ||
820 | static void __exit | 827 | static void __exit |
821 | efivars_exit(void) | 828 | efivars_exit(void) |
822 | { | 829 | { |
823 | unregister_efivars(&__efivars); | 830 | if (efi_enabled) { |
824 | kobject_put(efi_kobj); | 831 | unregister_efivars(&__efivars); |
832 | kobject_put(efi_kobj); | ||
833 | } | ||
825 | } | 834 | } |
826 | 835 | ||
827 | module_init(efivars_init); | 836 | module_init(efivars_init); |
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig new file mode 100644 index 000000000000..87096b6ca5c9 --- /dev/null +++ b/drivers/firmware/google/Kconfig | |||
@@ -0,0 +1,31 @@ | |||
1 | config GOOGLE_FIRMWARE | ||
2 | bool "Google Firmware Drivers" | ||
3 | depends on X86 | ||
4 | default n | ||
5 | help | ||
6 | These firmware drivers are used by Google's servers. They are | ||
7 | only useful if you are working directly on one of their | ||
8 | proprietary servers. If in doubt, say "N". | ||
9 | |||
10 | menu "Google Firmware Drivers" | ||
11 | depends on GOOGLE_FIRMWARE | ||
12 | |||
13 | config GOOGLE_SMI | ||
14 | tristate "SMI interface for Google platforms" | ||
15 | depends on ACPI && DMI | ||
16 | select EFI_VARS | ||
17 | help | ||
18 | Say Y here if you want to enable SMI callbacks for Google | ||
19 | platforms. This provides an interface for writing to and | ||
20 | clearing the EFI event log and reading and writing NVRAM | ||
21 | variables. | ||
22 | |||
23 | config GOOGLE_MEMCONSOLE | ||
24 | tristate "Firmware Memory Console" | ||
25 | depends on DMI | ||
26 | help | ||
27 | This option enables the kernel to search for a firmware log in | ||
28 | the EBDA on Google servers. If found, this log is exported to | ||
29 | userland in the file /sys/firmware/log. | ||
30 | |||
31 | endmenu | ||
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile new file mode 100644 index 000000000000..54a294e3cb61 --- /dev/null +++ b/drivers/firmware/google/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | |||
2 | obj-$(CONFIG_GOOGLE_SMI) += gsmi.o | ||
3 | obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o | ||
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c new file mode 100644 index 000000000000..fa7f0b3e81dd --- /dev/null +++ b/drivers/firmware/google/gsmi.c | |||
@@ -0,0 +1,940 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Google Inc. All Rights Reserved. | ||
3 | * Author: dlaurie@google.com (Duncan Laurie) | ||
4 | * | ||
5 | * Re-worked to expose sysfs APIs by mikew@google.com (Mike Waychison) | ||
6 | * | ||
7 | * EFI SMI interface for Google platforms | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/dmapool.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/ioctl.h> | ||
23 | #include <linux/acpi.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/dmi.h> | ||
27 | #include <linux/kdebug.h> | ||
28 | #include <linux/reboot.h> | ||
29 | #include <linux/efi.h> | ||
30 | |||
31 | #define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */ | ||
32 | /* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */ | ||
33 | #define GSMI_SHUTDOWN_NMIWDT 1 /* NMI Watchdog */ | ||
34 | #define GSMI_SHUTDOWN_PANIC 2 /* Panic */ | ||
35 | #define GSMI_SHUTDOWN_OOPS 3 /* Oops */ | ||
36 | #define GSMI_SHUTDOWN_DIE 4 /* Die -- No longer meaningful */ | ||
37 | #define GSMI_SHUTDOWN_MCE 5 /* Machine Check */ | ||
38 | #define GSMI_SHUTDOWN_SOFTWDT 6 /* Software Watchdog */ | ||
39 | #define GSMI_SHUTDOWN_MBE 7 /* Uncorrected ECC */ | ||
40 | #define GSMI_SHUTDOWN_TRIPLE 8 /* Triple Fault */ | ||
41 | |||
42 | #define DRIVER_VERSION "1.0" | ||
43 | #define GSMI_GUID_SIZE 16 | ||
44 | #define GSMI_BUF_SIZE 1024 | ||
45 | #define GSMI_BUF_ALIGN sizeof(u64) | ||
46 | #define GSMI_CALLBACK 0xef | ||
47 | |||
48 | /* SMI return codes */ | ||
49 | #define GSMI_SUCCESS 0x00 | ||
50 | #define GSMI_UNSUPPORTED2 0x03 | ||
51 | #define GSMI_LOG_FULL 0x0b | ||
52 | #define GSMI_VAR_NOT_FOUND 0x0e | ||
53 | #define GSMI_HANDSHAKE_SPIN 0x7d | ||
54 | #define GSMI_HANDSHAKE_CF 0x7e | ||
55 | #define GSMI_HANDSHAKE_NONE 0x7f | ||
56 | #define GSMI_INVALID_PARAMETER 0x82 | ||
57 | #define GSMI_UNSUPPORTED 0x83 | ||
58 | #define GSMI_BUFFER_TOO_SMALL 0x85 | ||
59 | #define GSMI_NOT_READY 0x86 | ||
60 | #define GSMI_DEVICE_ERROR 0x87 | ||
61 | #define GSMI_NOT_FOUND 0x8e | ||
62 | |||
63 | #define QUIRKY_BOARD_HASH 0x78a30a50 | ||
64 | |||
65 | /* Internally used commands passed to the firmware */ | ||
66 | #define GSMI_CMD_GET_NVRAM_VAR 0x01 | ||
67 | #define GSMI_CMD_GET_NEXT_VAR 0x02 | ||
68 | #define GSMI_CMD_SET_NVRAM_VAR 0x03 | ||
69 | #define GSMI_CMD_SET_EVENT_LOG 0x08 | ||
70 | #define GSMI_CMD_CLEAR_EVENT_LOG 0x09 | ||
71 | #define GSMI_CMD_CLEAR_CONFIG 0x20 | ||
72 | #define GSMI_CMD_HANDSHAKE_TYPE 0xC1 | ||
73 | |||
74 | /* Magic entry type for kernel events */ | ||
75 | #define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD | ||
76 | |||
77 | /* SMI buffers must be in 32bit physical address space */ | ||
78 | struct gsmi_buf { | ||
79 | u8 *start; /* start of buffer */ | ||
80 | size_t length; /* length of buffer */ | ||
81 | dma_addr_t handle; /* dma allocation handle */ | ||
82 | u32 address; /* physical address of buffer */ | ||
83 | }; | ||
84 | |||
85 | struct gsmi_device { | ||
86 | struct platform_device *pdev; /* platform device */ | ||
87 | struct gsmi_buf *name_buf; /* variable name buffer */ | ||
88 | struct gsmi_buf *data_buf; /* generic data buffer */ | ||
89 | struct gsmi_buf *param_buf; /* parameter buffer */ | ||
90 | spinlock_t lock; /* serialize access to SMIs */ | ||
91 | u16 smi_cmd; /* SMI command port */ | ||
92 | int handshake_type; /* firmware handler interlock type */ | ||
93 | struct dma_pool *dma_pool; /* DMA buffer pool */ | ||
94 | } gsmi_dev; | ||
95 | |||
96 | /* Packed structures for communicating with the firmware */ | ||
97 | struct gsmi_nvram_var_param { | ||
98 | efi_guid_t guid; | ||
99 | u32 name_ptr; | ||
100 | u32 attributes; | ||
101 | u32 data_len; | ||
102 | u32 data_ptr; | ||
103 | } __packed; | ||
104 | |||
105 | struct gsmi_get_next_var_param { | ||
106 | u8 guid[GSMI_GUID_SIZE]; | ||
107 | u32 name_ptr; | ||
108 | u32 name_len; | ||
109 | } __packed; | ||
110 | |||
111 | struct gsmi_set_eventlog_param { | ||
112 | u32 data_ptr; | ||
113 | u32 data_len; | ||
114 | u32 type; | ||
115 | } __packed; | ||
116 | |||
117 | /* Event log formats */ | ||
118 | struct gsmi_log_entry_type_1 { | ||
119 | u16 type; | ||
120 | u32 instance; | ||
121 | } __packed; | ||
122 | |||
123 | |||
124 | /* | ||
125 | * Some platforms don't have explicit SMI handshake | ||
126 | * and need to wait for SMI to complete. | ||
127 | */ | ||
128 | #define GSMI_DEFAULT_SPINCOUNT 0x10000 | ||
129 | static unsigned int spincount = GSMI_DEFAULT_SPINCOUNT; | ||
130 | module_param(spincount, uint, 0600); | ||
131 | MODULE_PARM_DESC(spincount, | ||
132 | "The number of loop iterations to use when using the spin handshake."); | ||
133 | |||
134 | static struct gsmi_buf *gsmi_buf_alloc(void) | ||
135 | { | ||
136 | struct gsmi_buf *smibuf; | ||
137 | |||
138 | smibuf = kzalloc(sizeof(*smibuf), GFP_KERNEL); | ||
139 | if (!smibuf) { | ||
140 | printk(KERN_ERR "gsmi: out of memory\n"); | ||
141 | return NULL; | ||
142 | } | ||
143 | |||
144 | /* allocate buffer in 32bit address space */ | ||
145 | smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL, | ||
146 | &smibuf->handle); | ||
147 | if (!smibuf->start) { | ||
148 | printk(KERN_ERR "gsmi: failed to allocate name buffer\n"); | ||
149 | kfree(smibuf); | ||
150 | return NULL; | ||
151 | } | ||
152 | |||
153 | /* fill in the buffer handle */ | ||
154 | smibuf->length = GSMI_BUF_SIZE; | ||
155 | smibuf->address = (u32)virt_to_phys(smibuf->start); | ||
156 | |||
157 | return smibuf; | ||
158 | } | ||
159 | |||
160 | static void gsmi_buf_free(struct gsmi_buf *smibuf) | ||
161 | { | ||
162 | if (smibuf) { | ||
163 | if (smibuf->start) | ||
164 | dma_pool_free(gsmi_dev.dma_pool, smibuf->start, | ||
165 | smibuf->handle); | ||
166 | kfree(smibuf); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Make a call to gsmi func(sub). GSMI error codes are translated to | ||
172 | * in-kernel errnos (0 on success, -ERRNO on error). | ||
173 | */ | ||
174 | static int gsmi_exec(u8 func, u8 sub) | ||
175 | { | ||
176 | u16 cmd = (sub << 8) | func; | ||
177 | u16 result = 0; | ||
178 | int rc = 0; | ||
179 | |||
180 | /* | ||
181 | * AH : Subfunction number | ||
182 | * AL : Function number | ||
183 | * EBX : Parameter block address | ||
184 | * DX : SMI command port | ||
185 | * | ||
186 | * Three protocols here. See also the comment in gsmi_init(). | ||
187 | */ | ||
188 | if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_CF) { | ||
189 | /* | ||
190 | * If handshake_type == HANDSHAKE_CF then set CF on the | ||
191 | * way in and wait for the handler to clear it; this avoids | ||
192 | * corrupting register state on those chipsets which have | ||
193 | * a delay between writing the SMI trigger register and | ||
194 | * entering SMM. | ||
195 | */ | ||
196 | asm volatile ( | ||
197 | "stc\n" | ||
198 | "outb %%al, %%dx\n" | ||
199 | "1: jc 1b\n" | ||
200 | : "=a" (result) | ||
201 | : "0" (cmd), | ||
202 | "d" (gsmi_dev.smi_cmd), | ||
203 | "b" (gsmi_dev.param_buf->address) | ||
204 | : "memory", "cc" | ||
205 | ); | ||
206 | } else if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_SPIN) { | ||
207 | /* | ||
208 | * If handshake_type == HANDSHAKE_SPIN we spin a | ||
209 | * hundred-ish usecs to ensure the SMI has triggered. | ||
210 | */ | ||
211 | asm volatile ( | ||
212 | "outb %%al, %%dx\n" | ||
213 | "1: loop 1b\n" | ||
214 | : "=a" (result) | ||
215 | : "0" (cmd), | ||
216 | "d" (gsmi_dev.smi_cmd), | ||
217 | "b" (gsmi_dev.param_buf->address), | ||
218 | "c" (spincount) | ||
219 | : "memory", "cc" | ||
220 | ); | ||
221 | } else { | ||
222 | /* | ||
223 | * If handshake_type == HANDSHAKE_NONE we do nothing; | ||
224 | * either we don't need to or it's legacy firmware that | ||
225 | * doesn't understand the CF protocol. | ||
226 | */ | ||
227 | asm volatile ( | ||
228 | "outb %%al, %%dx\n\t" | ||
229 | : "=a" (result) | ||
230 | : "0" (cmd), | ||
231 | "d" (gsmi_dev.smi_cmd), | ||
232 | "b" (gsmi_dev.param_buf->address) | ||
233 | : "memory", "cc" | ||
234 | ); | ||
235 | } | ||
236 | |||
237 | /* check return code from SMI handler */ | ||
238 | switch (result) { | ||
239 | case GSMI_SUCCESS: | ||
240 | break; | ||
241 | case GSMI_VAR_NOT_FOUND: | ||
242 | /* not really an error, but let the caller know */ | ||
243 | rc = 1; | ||
244 | break; | ||
245 | case GSMI_INVALID_PARAMETER: | ||
246 | printk(KERN_ERR "gsmi: exec 0x%04x: Invalid parameter\n", cmd); | ||
247 | rc = -EINVAL; | ||
248 | break; | ||
249 | case GSMI_BUFFER_TOO_SMALL: | ||
250 | printk(KERN_ERR "gsmi: exec 0x%04x: Buffer too small\n", cmd); | ||
251 | rc = -ENOMEM; | ||
252 | break; | ||
253 | case GSMI_UNSUPPORTED: | ||
254 | case GSMI_UNSUPPORTED2: | ||
255 | if (sub != GSMI_CMD_HANDSHAKE_TYPE) | ||
256 | printk(KERN_ERR "gsmi: exec 0x%04x: Not supported\n", | ||
257 | cmd); | ||
258 | rc = -ENOSYS; | ||
259 | break; | ||
260 | case GSMI_NOT_READY: | ||
261 | printk(KERN_ERR "gsmi: exec 0x%04x: Not ready\n", cmd); | ||
262 | rc = -EBUSY; | ||
263 | break; | ||
264 | case GSMI_DEVICE_ERROR: | ||
265 | printk(KERN_ERR "gsmi: exec 0x%04x: Device error\n", cmd); | ||
266 | rc = -EFAULT; | ||
267 | break; | ||
268 | case GSMI_NOT_FOUND: | ||
269 | printk(KERN_ERR "gsmi: exec 0x%04x: Data not found\n", cmd); | ||
270 | rc = -ENOENT; | ||
271 | break; | ||
272 | case GSMI_LOG_FULL: | ||
273 | printk(KERN_ERR "gsmi: exec 0x%04x: Log full\n", cmd); | ||
274 | rc = -ENOSPC; | ||
275 | break; | ||
276 | case GSMI_HANDSHAKE_CF: | ||
277 | case GSMI_HANDSHAKE_SPIN: | ||
278 | case GSMI_HANDSHAKE_NONE: | ||
279 | rc = result; | ||
280 | break; | ||
281 | default: | ||
282 | printk(KERN_ERR "gsmi: exec 0x%04x: Unknown error 0x%04x\n", | ||
283 | cmd, result); | ||
284 | rc = -ENXIO; | ||
285 | } | ||
286 | |||
287 | return rc; | ||
288 | } | ||
289 | |||
290 | /* Return the number of unicode characters in data */ | ||
291 | static size_t | ||
292 | utf16_strlen(efi_char16_t *data, unsigned long maxlength) | ||
293 | { | ||
294 | unsigned long length = 0; | ||
295 | |||
296 | while (*data++ != 0 && length < maxlength) | ||
297 | length++; | ||
298 | return length; | ||
299 | } | ||
300 | |||
301 | static efi_status_t gsmi_get_variable(efi_char16_t *name, | ||
302 | efi_guid_t *vendor, u32 *attr, | ||
303 | unsigned long *data_size, | ||
304 | void *data) | ||
305 | { | ||
306 | struct gsmi_nvram_var_param param = { | ||
307 | .name_ptr = gsmi_dev.name_buf->address, | ||
308 | .data_ptr = gsmi_dev.data_buf->address, | ||
309 | .data_len = (u32)*data_size, | ||
310 | }; | ||
311 | efi_status_t ret = EFI_SUCCESS; | ||
312 | unsigned long flags; | ||
313 | size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2); | ||
314 | int rc; | ||
315 | |||
316 | if (name_len >= GSMI_BUF_SIZE / 2) | ||
317 | return EFI_BAD_BUFFER_SIZE; | ||
318 | |||
319 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
320 | |||
321 | /* Vendor guid */ | ||
322 | memcpy(¶m.guid, vendor, sizeof(param.guid)); | ||
323 | |||
324 | /* variable name, already in UTF-16 */ | ||
325 | memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length); | ||
326 | memcpy(gsmi_dev.name_buf->start, name, name_len * 2); | ||
327 | |||
328 | /* data pointer */ | ||
329 | memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); | ||
330 | |||
331 | /* parameter buffer */ | ||
332 | memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); | ||
333 | memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); | ||
334 | |||
335 | rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NVRAM_VAR); | ||
336 | if (rc < 0) { | ||
337 | printk(KERN_ERR "gsmi: Get Variable failed\n"); | ||
338 | ret = EFI_LOAD_ERROR; | ||
339 | } else if (rc == 1) { | ||
340 | /* variable was not found */ | ||
341 | ret = EFI_NOT_FOUND; | ||
342 | } else { | ||
343 | /* Get the arguments back */ | ||
344 | memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param)); | ||
345 | |||
346 | /* The size reported is the min of all of our buffers */ | ||
347 | *data_size = min(*data_size, gsmi_dev.data_buf->length); | ||
348 | *data_size = min_t(unsigned long, *data_size, param.data_len); | ||
349 | |||
350 | /* Copy data back to return buffer. */ | ||
351 | memcpy(data, gsmi_dev.data_buf->start, *data_size); | ||
352 | |||
353 | /* All variables are have the following attributes */ | ||
354 | *attr = EFI_VARIABLE_NON_VOLATILE | | ||
355 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
356 | EFI_VARIABLE_RUNTIME_ACCESS; | ||
357 | } | ||
358 | |||
359 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
360 | |||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | static efi_status_t gsmi_get_next_variable(unsigned long *name_size, | ||
365 | efi_char16_t *name, | ||
366 | efi_guid_t *vendor) | ||
367 | { | ||
368 | struct gsmi_get_next_var_param param = { | ||
369 | .name_ptr = gsmi_dev.name_buf->address, | ||
370 | .name_len = gsmi_dev.name_buf->length, | ||
371 | }; | ||
372 | efi_status_t ret = EFI_SUCCESS; | ||
373 | int rc; | ||
374 | unsigned long flags; | ||
375 | |||
376 | /* For the moment, only support buffers that exactly match in size */ | ||
377 | if (*name_size != GSMI_BUF_SIZE) | ||
378 | return EFI_BAD_BUFFER_SIZE; | ||
379 | |||
380 | /* Let's make sure the thing is at least null-terminated */ | ||
381 | if (utf16_strlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2) | ||
382 | return EFI_INVALID_PARAMETER; | ||
383 | |||
384 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
385 | |||
386 | /* guid */ | ||
387 | memcpy(¶m.guid, vendor, sizeof(param.guid)); | ||
388 | |||
389 | /* variable name, already in UTF-16 */ | ||
390 | memcpy(gsmi_dev.name_buf->start, name, *name_size); | ||
391 | |||
392 | /* parameter buffer */ | ||
393 | memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); | ||
394 | memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); | ||
395 | |||
396 | rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NEXT_VAR); | ||
397 | if (rc < 0) { | ||
398 | printk(KERN_ERR "gsmi: Get Next Variable Name failed\n"); | ||
399 | ret = EFI_LOAD_ERROR; | ||
400 | } else if (rc == 1) { | ||
401 | /* variable not found -- end of list */ | ||
402 | ret = EFI_NOT_FOUND; | ||
403 | } else { | ||
404 | /* copy variable data back to return buffer */ | ||
405 | memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param)); | ||
406 | |||
407 | /* Copy the name back */ | ||
408 | memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE); | ||
409 | *name_size = utf16_strlen(name, GSMI_BUF_SIZE / 2) * 2; | ||
410 | |||
411 | /* copy guid to return buffer */ | ||
412 | memcpy(vendor, ¶m.guid, sizeof(param.guid)); | ||
413 | ret = EFI_SUCCESS; | ||
414 | } | ||
415 | |||
416 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
417 | |||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static efi_status_t gsmi_set_variable(efi_char16_t *name, | ||
422 | efi_guid_t *vendor, | ||
423 | unsigned long attr, | ||
424 | unsigned long data_size, | ||
425 | void *data) | ||
426 | { | ||
427 | struct gsmi_nvram_var_param param = { | ||
428 | .name_ptr = gsmi_dev.name_buf->address, | ||
429 | .data_ptr = gsmi_dev.data_buf->address, | ||
430 | .data_len = (u32)data_size, | ||
431 | .attributes = EFI_VARIABLE_NON_VOLATILE | | ||
432 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
433 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
434 | }; | ||
435 | size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2); | ||
436 | efi_status_t ret = EFI_SUCCESS; | ||
437 | int rc; | ||
438 | unsigned long flags; | ||
439 | |||
440 | if (name_len >= GSMI_BUF_SIZE / 2) | ||
441 | return EFI_BAD_BUFFER_SIZE; | ||
442 | |||
443 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
444 | |||
445 | /* guid */ | ||
446 | memcpy(¶m.guid, vendor, sizeof(param.guid)); | ||
447 | |||
448 | /* variable name, already in UTF-16 */ | ||
449 | memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length); | ||
450 | memcpy(gsmi_dev.name_buf->start, name, name_len * 2); | ||
451 | |||
452 | /* data pointer */ | ||
453 | memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); | ||
454 | memcpy(gsmi_dev.data_buf->start, data, data_size); | ||
455 | |||
456 | /* parameter buffer */ | ||
457 | memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); | ||
458 | memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); | ||
459 | |||
460 | rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_NVRAM_VAR); | ||
461 | if (rc < 0) { | ||
462 | printk(KERN_ERR "gsmi: Set Variable failed\n"); | ||
463 | ret = EFI_INVALID_PARAMETER; | ||
464 | } | ||
465 | |||
466 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | static const struct efivar_operations efivar_ops = { | ||
472 | .get_variable = gsmi_get_variable, | ||
473 | .set_variable = gsmi_set_variable, | ||
474 | .get_next_variable = gsmi_get_next_variable, | ||
475 | }; | ||
476 | |||
477 | static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, | ||
478 | struct bin_attribute *bin_attr, | ||
479 | char *buf, loff_t pos, size_t count) | ||
480 | { | ||
481 | struct gsmi_set_eventlog_param param = { | ||
482 | .data_ptr = gsmi_dev.data_buf->address, | ||
483 | }; | ||
484 | int rc = 0; | ||
485 | unsigned long flags; | ||
486 | |||
487 | /* Pull the type out */ | ||
488 | if (count < sizeof(u32)) | ||
489 | return -EINVAL; | ||
490 | param.type = *(u32 *)buf; | ||
491 | count -= sizeof(u32); | ||
492 | buf += sizeof(u32); | ||
493 | |||
494 | /* The remaining buffer is the data payload */ | ||
495 | if (count > gsmi_dev.data_buf->length) | ||
496 | return -EINVAL; | ||
497 | param.data_len = count - sizeof(u32); | ||
498 | |||
499 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
500 | |||
501 | /* data pointer */ | ||
502 | memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); | ||
503 | memcpy(gsmi_dev.data_buf->start, buf, param.data_len); | ||
504 | |||
505 | /* parameter buffer */ | ||
506 | memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); | ||
507 | memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); | ||
508 | |||
509 | rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG); | ||
510 | if (rc < 0) | ||
511 | printk(KERN_ERR "gsmi: Set Event Log failed\n"); | ||
512 | |||
513 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
514 | |||
515 | return rc; | ||
516 | |||
517 | } | ||
518 | |||
519 | static struct bin_attribute eventlog_bin_attr = { | ||
520 | .attr = {.name = "append_to_eventlog", .mode = 0200}, | ||
521 | .write = eventlog_write, | ||
522 | }; | ||
523 | |||
524 | static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj, | ||
525 | struct kobj_attribute *attr, | ||
526 | const char *buf, size_t count) | ||
527 | { | ||
528 | int rc; | ||
529 | unsigned long flags; | ||
530 | unsigned long val; | ||
531 | struct { | ||
532 | u32 percentage; | ||
533 | u32 data_type; | ||
534 | } param; | ||
535 | |||
536 | rc = strict_strtoul(buf, 0, &val); | ||
537 | if (rc) | ||
538 | return rc; | ||
539 | |||
540 | /* | ||
541 | * Value entered is a percentage, 0 through 100, anything else | ||
542 | * is invalid. | ||
543 | */ | ||
544 | if (val > 100) | ||
545 | return -EINVAL; | ||
546 | |||
547 | /* data_type here selects the smbios event log. */ | ||
548 | param.percentage = val; | ||
549 | param.data_type = 0; | ||
550 | |||
551 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
552 | |||
553 | /* parameter buffer */ | ||
554 | memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); | ||
555 | memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); | ||
556 | |||
557 | rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_EVENT_LOG); | ||
558 | |||
559 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
560 | |||
561 | if (rc) | ||
562 | return rc; | ||
563 | return count; | ||
564 | } | ||
565 | |||
566 | static struct kobj_attribute gsmi_clear_eventlog_attr = { | ||
567 | .attr = {.name = "clear_eventlog", .mode = 0200}, | ||
568 | .store = gsmi_clear_eventlog_store, | ||
569 | }; | ||
570 | |||
571 | static ssize_t gsmi_clear_config_store(struct kobject *kobj, | ||
572 | struct kobj_attribute *attr, | ||
573 | const char *buf, size_t count) | ||
574 | { | ||
575 | int rc; | ||
576 | unsigned long flags; | ||
577 | |||
578 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
579 | |||
580 | /* clear parameter buffer */ | ||
581 | memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); | ||
582 | |||
583 | rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_CONFIG); | ||
584 | |||
585 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
586 | |||
587 | if (rc) | ||
588 | return rc; | ||
589 | return count; | ||
590 | } | ||
591 | |||
592 | static struct kobj_attribute gsmi_clear_config_attr = { | ||
593 | .attr = {.name = "clear_config", .mode = 0200}, | ||
594 | .store = gsmi_clear_config_store, | ||
595 | }; | ||
596 | |||
597 | static const struct attribute *gsmi_attrs[] = { | ||
598 | &gsmi_clear_config_attr.attr, | ||
599 | &gsmi_clear_eventlog_attr.attr, | ||
600 | NULL, | ||
601 | }; | ||
602 | |||
603 | static int gsmi_shutdown_reason(int reason) | ||
604 | { | ||
605 | struct gsmi_log_entry_type_1 entry = { | ||
606 | .type = GSMI_LOG_ENTRY_TYPE_KERNEL, | ||
607 | .instance = reason, | ||
608 | }; | ||
609 | struct gsmi_set_eventlog_param param = { | ||
610 | .data_len = sizeof(entry), | ||
611 | .type = 1, | ||
612 | }; | ||
613 | static int saved_reason; | ||
614 | int rc = 0; | ||
615 | unsigned long flags; | ||
616 | |||
617 | /* avoid duplicate entries in the log */ | ||
618 | if (saved_reason & (1 << reason)) | ||
619 | return 0; | ||
620 | |||
621 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
622 | |||
623 | saved_reason |= (1 << reason); | ||
624 | |||
625 | /* data pointer */ | ||
626 | memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); | ||
627 | memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry)); | ||
628 | |||
629 | /* parameter buffer */ | ||
630 | param.data_ptr = gsmi_dev.data_buf->address; | ||
631 | memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); | ||
632 | memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); | ||
633 | |||
634 | rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG); | ||
635 | |||
636 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
637 | |||
638 | if (rc < 0) | ||
639 | printk(KERN_ERR "gsmi: Log Shutdown Reason failed\n"); | ||
640 | else | ||
641 | printk(KERN_EMERG "gsmi: Log Shutdown Reason 0x%02x\n", | ||
642 | reason); | ||
643 | |||
644 | return rc; | ||
645 | } | ||
646 | |||
647 | static int gsmi_reboot_callback(struct notifier_block *nb, | ||
648 | unsigned long reason, void *arg) | ||
649 | { | ||
650 | gsmi_shutdown_reason(GSMI_SHUTDOWN_CLEAN); | ||
651 | return NOTIFY_DONE; | ||
652 | } | ||
653 | |||
654 | static struct notifier_block gsmi_reboot_notifier = { | ||
655 | .notifier_call = gsmi_reboot_callback | ||
656 | }; | ||
657 | |||
658 | static int gsmi_die_callback(struct notifier_block *nb, | ||
659 | unsigned long reason, void *arg) | ||
660 | { | ||
661 | if (reason == DIE_OOPS) | ||
662 | gsmi_shutdown_reason(GSMI_SHUTDOWN_OOPS); | ||
663 | return NOTIFY_DONE; | ||
664 | } | ||
665 | |||
666 | static struct notifier_block gsmi_die_notifier = { | ||
667 | .notifier_call = gsmi_die_callback | ||
668 | }; | ||
669 | |||
670 | static int gsmi_panic_callback(struct notifier_block *nb, | ||
671 | unsigned long reason, void *arg) | ||
672 | { | ||
673 | gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC); | ||
674 | return NOTIFY_DONE; | ||
675 | } | ||
676 | |||
677 | static struct notifier_block gsmi_panic_notifier = { | ||
678 | .notifier_call = gsmi_panic_callback, | ||
679 | }; | ||
680 | |||
681 | /* | ||
682 | * This hash function was blatantly copied from include/linux/hash.h. | ||
683 | * It is used by this driver to obfuscate a board name that requires a | ||
684 | * quirk within this driver. | ||
685 | * | ||
686 | * Please do not remove this copy of the function as any changes to the | ||
687 | * global utility hash_64() function would break this driver's ability | ||
688 | * to identify a board and provide the appropriate quirk -- mikew@google.com | ||
689 | */ | ||
690 | static u64 __init local_hash_64(u64 val, unsigned bits) | ||
691 | { | ||
692 | u64 hash = val; | ||
693 | |||
694 | /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ | ||
695 | u64 n = hash; | ||
696 | n <<= 18; | ||
697 | hash -= n; | ||
698 | n <<= 33; | ||
699 | hash -= n; | ||
700 | n <<= 3; | ||
701 | hash += n; | ||
702 | n <<= 3; | ||
703 | hash -= n; | ||
704 | n <<= 4; | ||
705 | hash += n; | ||
706 | n <<= 2; | ||
707 | hash += n; | ||
708 | |||
709 | /* High bits are more random, so use them. */ | ||
710 | return hash >> (64 - bits); | ||
711 | } | ||
712 | |||
713 | static u32 __init hash_oem_table_id(char s[8]) | ||
714 | { | ||
715 | u64 input; | ||
716 | memcpy(&input, s, 8); | ||
717 | return local_hash_64(input, 32); | ||
718 | } | ||
719 | |||
720 | static struct dmi_system_id gsmi_dmi_table[] __initdata = { | ||
721 | { | ||
722 | .ident = "Google Board", | ||
723 | .matches = { | ||
724 | DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."), | ||
725 | }, | ||
726 | }, | ||
727 | {} | ||
728 | }; | ||
729 | MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table); | ||
730 | |||
731 | static __init int gsmi_system_valid(void) | ||
732 | { | ||
733 | u32 hash; | ||
734 | |||
735 | if (!dmi_check_system(gsmi_dmi_table)) | ||
736 | return -ENODEV; | ||
737 | |||
738 | /* | ||
739 | * Only newer firmware supports the gsmi interface. All older | ||
740 | * firmware that didn't support this interface used to plug the | ||
741 | * table name in the first four bytes of the oem_table_id field. | ||
742 | * Newer firmware doesn't do that though, so use that as the | ||
743 | * discriminant factor. We have to do this in order to | ||
744 | * whitewash our board names out of the public driver. | ||
745 | */ | ||
746 | if (!strncmp(acpi_gbl_FADT.header.oem_table_id, "FACP", 4)) { | ||
747 | printk(KERN_INFO "gsmi: Board is too old\n"); | ||
748 | return -ENODEV; | ||
749 | } | ||
750 | |||
751 | /* Disable on board with 1.0 BIOS due to Google bug 2602657 */ | ||
752 | hash = hash_oem_table_id(acpi_gbl_FADT.header.oem_table_id); | ||
753 | if (hash == QUIRKY_BOARD_HASH) { | ||
754 | const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); | ||
755 | if (strncmp(bios_ver, "1.0", 3) == 0) { | ||
756 | pr_info("gsmi: disabled on this board's BIOS %s\n", | ||
757 | bios_ver); | ||
758 | return -ENODEV; | ||
759 | } | ||
760 | } | ||
761 | |||
762 | /* check for valid SMI command port in ACPI FADT */ | ||
763 | if (acpi_gbl_FADT.smi_command == 0) { | ||
764 | pr_info("gsmi: missing smi_command\n"); | ||
765 | return -ENODEV; | ||
766 | } | ||
767 | |||
768 | /* Found */ | ||
769 | return 0; | ||
770 | } | ||
771 | |||
772 | static struct kobject *gsmi_kobj; | ||
773 | static struct efivars efivars; | ||
774 | |||
775 | static __init int gsmi_init(void) | ||
776 | { | ||
777 | unsigned long flags; | ||
778 | int ret; | ||
779 | |||
780 | ret = gsmi_system_valid(); | ||
781 | if (ret) | ||
782 | return ret; | ||
783 | |||
784 | gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command; | ||
785 | |||
786 | /* register device */ | ||
787 | gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0); | ||
788 | if (IS_ERR(gsmi_dev.pdev)) { | ||
789 | printk(KERN_ERR "gsmi: unable to register platform device\n"); | ||
790 | return PTR_ERR(gsmi_dev.pdev); | ||
791 | } | ||
792 | |||
793 | /* SMI access needs to be serialized */ | ||
794 | spin_lock_init(&gsmi_dev.lock); | ||
795 | |||
796 | /* SMI callbacks require 32bit addresses */ | ||
797 | gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
798 | gsmi_dev.pdev->dev.dma_mask = | ||
799 | &gsmi_dev.pdev->dev.coherent_dma_mask; | ||
800 | ret = -ENOMEM; | ||
801 | gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev, | ||
802 | GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0); | ||
803 | if (!gsmi_dev.dma_pool) | ||
804 | goto out_err; | ||
805 | |||
806 | /* | ||
807 | * pre-allocate buffers because sometimes we are called when | ||
808 | * this is not feasible: oops, panic, die, mce, etc | ||
809 | */ | ||
810 | gsmi_dev.name_buf = gsmi_buf_alloc(); | ||
811 | if (!gsmi_dev.name_buf) { | ||
812 | printk(KERN_ERR "gsmi: failed to allocate name buffer\n"); | ||
813 | goto out_err; | ||
814 | } | ||
815 | |||
816 | gsmi_dev.data_buf = gsmi_buf_alloc(); | ||
817 | if (!gsmi_dev.data_buf) { | ||
818 | printk(KERN_ERR "gsmi: failed to allocate data buffer\n"); | ||
819 | goto out_err; | ||
820 | } | ||
821 | |||
822 | gsmi_dev.param_buf = gsmi_buf_alloc(); | ||
823 | if (!gsmi_dev.param_buf) { | ||
824 | printk(KERN_ERR "gsmi: failed to allocate param buffer\n"); | ||
825 | goto out_err; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * Determine type of handshake used to serialize the SMI | ||
830 | * entry. See also gsmi_exec(). | ||
831 | * | ||
832 | * There's a "behavior" present on some chipsets where writing the | ||
833 | * SMI trigger register in the southbridge doesn't result in an | ||
834 | * immediate SMI. Rather, the processor can execute "a few" more | ||
835 | * instructions before the SMI takes effect. To ensure synchronous | ||
836 | * behavior, implement a handshake between the kernel driver and the | ||
837 | * firmware handler to spin until released. This ioctl determines | ||
838 | * the type of handshake. | ||
839 | * | ||
840 | * NONE: The firmware handler does not implement any | ||
841 | * handshake. Either it doesn't need to, or it's legacy firmware | ||
842 | * that doesn't know it needs to and never will. | ||
843 | * | ||
844 | * CF: The firmware handler will clear the CF in the saved | ||
845 | * state before returning. The driver may set the CF and test for | ||
846 | * it to clear before proceeding. | ||
847 | * | ||
848 | * SPIN: The firmware handler does not implement any handshake | ||
849 | * but the driver should spin for a hundred or so microseconds | ||
850 | * to ensure the SMI has triggered. | ||
851 | * | ||
852 | * Finally, the handler will return -ENOSYS if | ||
853 | * GSMI_CMD_HANDSHAKE_TYPE is unimplemented, which implies | ||
854 | * HANDSHAKE_NONE. | ||
855 | */ | ||
856 | spin_lock_irqsave(&gsmi_dev.lock, flags); | ||
857 | gsmi_dev.handshake_type = GSMI_HANDSHAKE_SPIN; | ||
858 | gsmi_dev.handshake_type = | ||
859 | gsmi_exec(GSMI_CALLBACK, GSMI_CMD_HANDSHAKE_TYPE); | ||
860 | if (gsmi_dev.handshake_type == -ENOSYS) | ||
861 | gsmi_dev.handshake_type = GSMI_HANDSHAKE_NONE; | ||
862 | spin_unlock_irqrestore(&gsmi_dev.lock, flags); | ||
863 | |||
864 | /* Remove and clean up gsmi if the handshake could not complete. */ | ||
865 | if (gsmi_dev.handshake_type == -ENXIO) { | ||
866 | printk(KERN_INFO "gsmi version " DRIVER_VERSION | ||
867 | " failed to load\n"); | ||
868 | ret = -ENODEV; | ||
869 | goto out_err; | ||
870 | } | ||
871 | |||
872 | printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n"); | ||
873 | |||
874 | /* Register in the firmware directory */ | ||
875 | ret = -ENOMEM; | ||
876 | gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj); | ||
877 | if (!gsmi_kobj) { | ||
878 | printk(KERN_INFO "gsmi: Failed to create firmware kobj\n"); | ||
879 | goto out_err; | ||
880 | } | ||
881 | |||
882 | /* Setup eventlog access */ | ||
883 | ret = sysfs_create_bin_file(gsmi_kobj, &eventlog_bin_attr); | ||
884 | if (ret) { | ||
885 | printk(KERN_INFO "gsmi: Failed to setup eventlog"); | ||
886 | goto out_err; | ||
887 | } | ||
888 | |||
889 | /* Other attributes */ | ||
890 | ret = sysfs_create_files(gsmi_kobj, gsmi_attrs); | ||
891 | if (ret) { | ||
892 | printk(KERN_INFO "gsmi: Failed to add attrs"); | ||
893 | goto out_err; | ||
894 | } | ||
895 | |||
896 | if (register_efivars(&efivars, &efivar_ops, gsmi_kobj)) { | ||
897 | printk(KERN_INFO "gsmi: Failed to register efivars\n"); | ||
898 | goto out_err; | ||
899 | } | ||
900 | |||
901 | register_reboot_notifier(&gsmi_reboot_notifier); | ||
902 | register_die_notifier(&gsmi_die_notifier); | ||
903 | atomic_notifier_chain_register(&panic_notifier_list, | ||
904 | &gsmi_panic_notifier); | ||
905 | |||
906 | return 0; | ||
907 | |||
908 | out_err: | ||
909 | kobject_put(gsmi_kobj); | ||
910 | gsmi_buf_free(gsmi_dev.param_buf); | ||
911 | gsmi_buf_free(gsmi_dev.data_buf); | ||
912 | gsmi_buf_free(gsmi_dev.name_buf); | ||
913 | if (gsmi_dev.dma_pool) | ||
914 | dma_pool_destroy(gsmi_dev.dma_pool); | ||
915 | platform_device_unregister(gsmi_dev.pdev); | ||
916 | pr_info("gsmi: failed to load: %d\n", ret); | ||
917 | return ret; | ||
918 | } | ||
919 | |||
920 | static void __exit gsmi_exit(void) | ||
921 | { | ||
922 | unregister_reboot_notifier(&gsmi_reboot_notifier); | ||
923 | unregister_die_notifier(&gsmi_die_notifier); | ||
924 | atomic_notifier_chain_unregister(&panic_notifier_list, | ||
925 | &gsmi_panic_notifier); | ||
926 | unregister_efivars(&efivars); | ||
927 | |||
928 | kobject_put(gsmi_kobj); | ||
929 | gsmi_buf_free(gsmi_dev.param_buf); | ||
930 | gsmi_buf_free(gsmi_dev.data_buf); | ||
931 | gsmi_buf_free(gsmi_dev.name_buf); | ||
932 | dma_pool_destroy(gsmi_dev.dma_pool); | ||
933 | platform_device_unregister(gsmi_dev.pdev); | ||
934 | } | ||
935 | |||
936 | module_init(gsmi_init); | ||
937 | module_exit(gsmi_exit); | ||
938 | |||
939 | MODULE_AUTHOR("Google, Inc."); | ||
940 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c new file mode 100644 index 000000000000..2a90ba613613 --- /dev/null +++ b/drivers/firmware/google/memconsole.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * memconsole.c | ||
3 | * | ||
4 | * Infrastructure for importing the BIOS memory based console | ||
5 | * into the kernel log ringbuffer. | ||
6 | * | ||
7 | * Copyright 2010 Google Inc. All rights reserved. | ||
8 | */ | ||
9 | |||
10 | #include <linux/ctype.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/sysfs.h> | ||
15 | #include <linux/kobject.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/dmi.h> | ||
18 | #include <asm/bios_ebda.h> | ||
19 | |||
20 | #define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE | ||
21 | #define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24)) | ||
22 | |||
23 | struct biosmemcon_ebda { | ||
24 | u32 signature; | ||
25 | union { | ||
26 | struct { | ||
27 | u8 enabled; | ||
28 | u32 buffer_addr; | ||
29 | u16 start; | ||
30 | u16 end; | ||
31 | u16 num_chars; | ||
32 | u8 wrapped; | ||
33 | } __packed v1; | ||
34 | struct { | ||
35 | u32 buffer_addr; | ||
36 | /* Misdocumented as number of pages! */ | ||
37 | u16 num_bytes; | ||
38 | u16 start; | ||
39 | u16 end; | ||
40 | } __packed v2; | ||
41 | }; | ||
42 | } __packed; | ||
43 | |||
44 | static char *memconsole_baseaddr; | ||
45 | static size_t memconsole_length; | ||
46 | |||
47 | static ssize_t memconsole_read(struct file *filp, struct kobject *kobp, | ||
48 | struct bin_attribute *bin_attr, char *buf, | ||
49 | loff_t pos, size_t count) | ||
50 | { | ||
51 | return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr, | ||
52 | memconsole_length); | ||
53 | } | ||
54 | |||
55 | static struct bin_attribute memconsole_bin_attr = { | ||
56 | .attr = {.name = "log", .mode = 0444}, | ||
57 | .read = memconsole_read, | ||
58 | }; | ||
59 | |||
60 | |||
61 | static void found_v1_header(struct biosmemcon_ebda *hdr) | ||
62 | { | ||
63 | printk(KERN_INFO "BIOS console v1 EBDA structure found at %p\n", hdr); | ||
64 | printk(KERN_INFO "BIOS console buffer at 0x%.8x, " | ||
65 | "start = %d, end = %d, num = %d\n", | ||
66 | hdr->v1.buffer_addr, hdr->v1.start, | ||
67 | hdr->v1.end, hdr->v1.num_chars); | ||
68 | |||
69 | memconsole_length = hdr->v1.num_chars; | ||
70 | memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr); | ||
71 | } | ||
72 | |||
73 | static void found_v2_header(struct biosmemcon_ebda *hdr) | ||
74 | { | ||
75 | printk(KERN_INFO "BIOS console v2 EBDA structure found at %p\n", hdr); | ||
76 | printk(KERN_INFO "BIOS console buffer at 0x%.8x, " | ||
77 | "start = %d, end = %d, num_bytes = %d\n", | ||
78 | hdr->v2.buffer_addr, hdr->v2.start, | ||
79 | hdr->v2.end, hdr->v2.num_bytes); | ||
80 | |||
81 | memconsole_length = hdr->v2.end - hdr->v2.start; | ||
82 | memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr | ||
83 | + hdr->v2.start); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Search through the EBDA for the BIOS Memory Console, and | ||
88 | * set the global variables to point to it. Return true if found. | ||
89 | */ | ||
90 | static bool found_memconsole(void) | ||
91 | { | ||
92 | unsigned int address; | ||
93 | size_t length, cur; | ||
94 | |||
95 | address = get_bios_ebda(); | ||
96 | if (!address) { | ||
97 | printk(KERN_INFO "BIOS EBDA non-existent.\n"); | ||
98 | return false; | ||
99 | } | ||
100 | |||
101 | /* EBDA length is byte 0 of EBDA (in KB) */ | ||
102 | length = *(u8 *)phys_to_virt(address); | ||
103 | length <<= 10; /* convert to bytes */ | ||
104 | |||
105 | /* | ||
106 | * Search through EBDA for BIOS memory console structure | ||
107 | * note: signature is not necessarily dword-aligned | ||
108 | */ | ||
109 | for (cur = 0; cur < length; cur++) { | ||
110 | struct biosmemcon_ebda *hdr = phys_to_virt(address + cur); | ||
111 | |||
112 | /* memconsole v1 */ | ||
113 | if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) { | ||
114 | found_v1_header(hdr); | ||
115 | return true; | ||
116 | } | ||
117 | |||
118 | /* memconsole v2 */ | ||
119 | if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) { | ||
120 | found_v2_header(hdr); | ||
121 | return true; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | printk(KERN_INFO "BIOS console EBDA structure not found!\n"); | ||
126 | return false; | ||
127 | } | ||
128 | |||
129 | static struct dmi_system_id memconsole_dmi_table[] __initdata = { | ||
130 | { | ||
131 | .ident = "Google Board", | ||
132 | .matches = { | ||
133 | DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."), | ||
134 | }, | ||
135 | }, | ||
136 | {} | ||
137 | }; | ||
138 | MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table); | ||
139 | |||
140 | static int __init memconsole_init(void) | ||
141 | { | ||
142 | int ret; | ||
143 | |||
144 | if (!dmi_check_system(memconsole_dmi_table)) | ||
145 | return -ENODEV; | ||
146 | |||
147 | if (!found_memconsole()) | ||
148 | return -ENODEV; | ||
149 | |||
150 | memconsole_bin_attr.size = memconsole_length; | ||
151 | |||
152 | ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static void __exit memconsole_exit(void) | ||
158 | { | ||
159 | sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr); | ||
160 | } | ||
161 | |||
162 | module_init(memconsole_init); | ||
163 | module_exit(memconsole_exit); | ||
164 | |||
165 | MODULE_AUTHOR("Google, Inc."); | ||
166 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 2192456dfd68..f032e446fc11 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c | |||
@@ -42,7 +42,20 @@ | |||
42 | struct acpi_table_ibft *ibft_addr; | 42 | struct acpi_table_ibft *ibft_addr; |
43 | EXPORT_SYMBOL_GPL(ibft_addr); | 43 | EXPORT_SYMBOL_GPL(ibft_addr); |
44 | 44 | ||
45 | #define IBFT_SIGN "iBFT" | 45 | static const struct { |
46 | char *sign; | ||
47 | } ibft_signs[] = { | ||
48 | #ifdef CONFIG_ACPI | ||
49 | /* | ||
50 | * One spec says "IBFT", the other says "iBFT". We have to check | ||
51 | * for both. | ||
52 | */ | ||
53 | { ACPI_SIG_IBFT }, | ||
54 | #endif | ||
55 | { "iBFT" }, | ||
56 | { "BIFT" }, /* Broadcom iSCSI Offload */ | ||
57 | }; | ||
58 | |||
46 | #define IBFT_SIGN_LEN 4 | 59 | #define IBFT_SIGN_LEN 4 |
47 | #define IBFT_START 0x80000 /* 512kB */ | 60 | #define IBFT_START 0x80000 /* 512kB */ |
48 | #define IBFT_END 0x100000 /* 1MB */ | 61 | #define IBFT_END 0x100000 /* 1MB */ |
@@ -62,6 +75,7 @@ static int __init find_ibft_in_mem(void) | |||
62 | unsigned long pos; | 75 | unsigned long pos; |
63 | unsigned int len = 0; | 76 | unsigned int len = 0; |
64 | void *virt; | 77 | void *virt; |
78 | int i; | ||
65 | 79 | ||
66 | for (pos = IBFT_START; pos < IBFT_END; pos += 16) { | 80 | for (pos = IBFT_START; pos < IBFT_END; pos += 16) { |
67 | /* The table can't be inside the VGA BIOS reserved space, | 81 | /* The table can't be inside the VGA BIOS reserved space, |
@@ -69,18 +83,23 @@ static int __init find_ibft_in_mem(void) | |||
69 | if (pos == VGA_MEM) | 83 | if (pos == VGA_MEM) |
70 | pos += VGA_SIZE; | 84 | pos += VGA_SIZE; |
71 | virt = isa_bus_to_virt(pos); | 85 | virt = isa_bus_to_virt(pos); |
72 | if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { | 86 | |
73 | unsigned long *addr = | 87 | for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) { |
74 | (unsigned long *)isa_bus_to_virt(pos + 4); | 88 | if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) == |
75 | len = *addr; | 89 | 0) { |
76 | /* if the length of the table extends past 1M, | 90 | unsigned long *addr = |
77 | * the table cannot be valid. */ | 91 | (unsigned long *)isa_bus_to_virt(pos + 4); |
78 | if (pos + len <= (IBFT_END-1)) { | 92 | len = *addr; |
79 | ibft_addr = (struct acpi_table_ibft *)virt; | 93 | /* if the length of the table extends past 1M, |
80 | break; | 94 | * the table cannot be valid. */ |
95 | if (pos + len <= (IBFT_END-1)) { | ||
96 | ibft_addr = (struct acpi_table_ibft *)virt; | ||
97 | goto done; | ||
98 | } | ||
81 | } | 99 | } |
82 | } | 100 | } |
83 | } | 101 | } |
102 | done: | ||
84 | return len; | 103 | return len; |
85 | } | 104 | } |
86 | /* | 105 | /* |
@@ -89,18 +108,12 @@ static int __init find_ibft_in_mem(void) | |||
89 | */ | 108 | */ |
90 | unsigned long __init find_ibft_region(unsigned long *sizep) | 109 | unsigned long __init find_ibft_region(unsigned long *sizep) |
91 | { | 110 | { |
92 | 111 | int i; | |
93 | ibft_addr = NULL; | 112 | ibft_addr = NULL; |
94 | 113 | ||
95 | #ifdef CONFIG_ACPI | 114 | #ifdef CONFIG_ACPI |
96 | /* | 115 | for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) |
97 | * One spec says "IBFT", the other says "iBFT". We have to check | 116 | acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft); |
98 | * for both. | ||
99 | */ | ||
100 | if (!ibft_addr) | ||
101 | acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft); | ||
102 | if (!ibft_addr) | ||
103 | acpi_table_parse(IBFT_SIGN, acpi_find_ibft); | ||
104 | #endif /* CONFIG_ACPI */ | 117 | #endif /* CONFIG_ACPI */ |
105 | 118 | ||
106 | /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will | 119 | /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 11d7a72c22d9..140b9525b48a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) | |||
1516 | } | 1516 | } |
1517 | EXPORT_SYMBOL(drm_fb_helper_initial_config); | 1517 | EXPORT_SYMBOL(drm_fb_helper_initial_config); |
1518 | 1518 | ||
1519 | bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | 1519 | /** |
1520 | * drm_fb_helper_hotplug_event - respond to a hotplug notification by | ||
1521 | * probing all the outputs attached to the fb. | ||
1522 | * @fb_helper: the drm_fb_helper | ||
1523 | * | ||
1524 | * LOCKING: | ||
1525 | * Called at runtime, must take mode config lock. | ||
1526 | * | ||
1527 | * Scan the connectors attached to the fb_helper and try to put together a | ||
1528 | * setup after *notification of a change in output configuration. | ||
1529 | * | ||
1530 | * RETURNS: | ||
1531 | * 0 on success and a non-zero error code otherwise. | ||
1532 | */ | ||
1533 | int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | ||
1520 | { | 1534 | { |
1535 | struct drm_device *dev = fb_helper->dev; | ||
1521 | int count = 0; | 1536 | int count = 0; |
1522 | u32 max_width, max_height, bpp_sel; | 1537 | u32 max_width, max_height, bpp_sel; |
1523 | bool bound = false, crtcs_bound = false; | 1538 | bool bound = false, crtcs_bound = false; |
1524 | struct drm_crtc *crtc; | 1539 | struct drm_crtc *crtc; |
1525 | 1540 | ||
1526 | if (!fb_helper->fb) | 1541 | if (!fb_helper->fb) |
1527 | return false; | 1542 | return 0; |
1528 | 1543 | ||
1529 | list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { | 1544 | mutex_lock(&dev->mode_config.mutex); |
1545 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
1530 | if (crtc->fb) | 1546 | if (crtc->fb) |
1531 | crtcs_bound = true; | 1547 | crtcs_bound = true; |
1532 | if (crtc->fb == fb_helper->fb) | 1548 | if (crtc->fb == fb_helper->fb) |
@@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1535 | 1551 | ||
1536 | if (!bound && crtcs_bound) { | 1552 | if (!bound && crtcs_bound) { |
1537 | fb_helper->delayed_hotplug = true; | 1553 | fb_helper->delayed_hotplug = true; |
1538 | return false; | 1554 | mutex_unlock(&dev->mode_config.mutex); |
1555 | return 0; | ||
1539 | } | 1556 | } |
1540 | DRM_DEBUG_KMS("\n"); | 1557 | DRM_DEBUG_KMS("\n"); |
1541 | 1558 | ||
@@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1546 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, | 1563 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, |
1547 | max_height); | 1564 | max_height); |
1548 | drm_setup_crtcs(fb_helper); | 1565 | drm_setup_crtcs(fb_helper); |
1566 | mutex_unlock(&dev->mode_config.mutex); | ||
1549 | 1567 | ||
1550 | return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); | 1568 | return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); |
1551 | } | 1569 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c34a8dd31d02..32d1b3e829c8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); | |||
49 | unsigned int i915_powersave = 1; | 49 | unsigned int i915_powersave = 1; |
50 | module_param_named(powersave, i915_powersave, int, 0600); | 50 | module_param_named(powersave, i915_powersave, int, 0600); |
51 | 51 | ||
52 | unsigned int i915_semaphores = 1; | 52 | unsigned int i915_semaphores = 0; |
53 | module_param_named(semaphores, i915_semaphores, int, 0600); | 53 | module_param_named(semaphores, i915_semaphores, int, 0600); |
54 | 54 | ||
55 | unsigned int i915_enable_rc6 = 0; | 55 | unsigned int i915_enable_rc6 = 0; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 373c2a005ec1..2166ee071ddb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5154,6 +5154,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5154 | 5154 | ||
5155 | I915_WRITE(DSPCNTR(plane), dspcntr); | 5155 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5156 | POSTING_READ(DSPCNTR(plane)); | 5156 | POSTING_READ(DSPCNTR(plane)); |
5157 | if (!HAS_PCH_SPLIT(dev)) | ||
5158 | intel_enable_plane(dev_priv, plane, pipe); | ||
5157 | 5159 | ||
5158 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 5160 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
5159 | 5161 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4bce801bc588..c77111eca6ac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
42 | 42 | ||
43 | nvbe->nr_pages = 0; | 43 | nvbe->nr_pages = 0; |
44 | while (num_pages--) { | 44 | while (num_pages--) { |
45 | if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { | 45 | /* this code path isn't called and is incorrect anyways */ |
46 | if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ | ||
46 | nvbe->pages[nvbe->nr_pages] = | 47 | nvbe->pages[nvbe->nr_pages] = |
47 | dma_addrs[nvbe->nr_pages]; | 48 | dma_addrs[nvbe->nr_pages]; |
48 | nvbe->ttm_alloced[nvbe->nr_pages] = true; | 49 | nvbe->ttm_alloced[nvbe->nr_pages] = true; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c20eac3379e6..9073e3bfb08c 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1780,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1780 | 1780 | ||
1781 | 1781 | ||
1782 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 1782 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
1783 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 1783 | if (rdev->flags & RADEON_IS_IGP) |
1784 | mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); | ||
1785 | else | ||
1786 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | ||
1784 | 1787 | ||
1785 | switch (rdev->config.evergreen.max_tile_pipes) { | 1788 | switch (rdev->config.evergreen.max_tile_pipes) { |
1786 | case 1: | 1789 | case 1: |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 94533849927e..fc40e0cc3451 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -200,6 +200,7 @@ | |||
200 | #define BURSTLENGTH_SHIFT 9 | 200 | #define BURSTLENGTH_SHIFT 9 |
201 | #define BURSTLENGTH_MASK 0x00000200 | 201 | #define BURSTLENGTH_MASK 0x00000200 |
202 | #define CHANSIZE_OVERRIDE (1 << 11) | 202 | #define CHANSIZE_OVERRIDE (1 << 11) |
203 | #define FUS_MC_ARB_RAMCFG 0x2768 | ||
203 | #define MC_VM_AGP_TOP 0x2028 | 204 | #define MC_VM_AGP_TOP 0x2028 |
204 | #define MC_VM_AGP_BOT 0x202C | 205 | #define MC_VM_AGP_BOT 0x202C |
205 | #define MC_VM_AGP_BASE 0x2030 | 206 | #define MC_VM_AGP_BASE 0x2030 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7aade20f63a8..3d8a7634bbe9 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
674 | 674 | ||
675 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); | 675 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); |
676 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); | 676 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); |
677 | cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); | 677 | cgts_tcc_disable = 0xff000000; |
678 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); | 678 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); |
679 | gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); | 679 | gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); |
680 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); | 680 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); |
@@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
871 | 871 | ||
872 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); | 872 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); |
873 | smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); | 873 | smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); |
874 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); | 874 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); |
875 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); | 875 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); |
876 | 876 | ||
877 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); | 877 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); |
@@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
887 | 887 | ||
888 | WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); | 888 | WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); |
889 | 889 | ||
890 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | | 890 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | |
891 | POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | | 891 | POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | |
892 | SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); | 892 | SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); |
893 | 893 | ||
894 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | | 894 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | |
895 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | | 895 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | |
896 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); | 896 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); |
897 | 897 | ||
898 | 898 | ||
899 | WREG32(VGT_NUM_INSTANCES, 1); | 899 | WREG32(VGT_NUM_INSTANCES, 1); |
900 | 900 | ||
901 | WREG32(CP_PERFMON_CNTL, 0); | 901 | WREG32(CP_PERFMON_CNTL, 0); |
902 | 902 | ||
903 | WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | | 903 | WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | |
904 | FETCH_FIFO_HIWATER(0x4) | | 904 | FETCH_FIFO_HIWATER(0x4) | |
905 | DONE_FIFO_HIWATER(0xe0) | | 905 | DONE_FIFO_HIWATER(0xe0) | |
906 | ALU_UPDATE_FIFO_HIWATER(0x8))); | 906 | ALU_UPDATE_FIFO_HIWATER(0x8))); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index dd881d035f09..90dfb2b8cf03 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1574 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; | 1574 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; |
1575 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; | 1575 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; |
1576 | bool bad_record = false; | 1576 | bool bad_record = false; |
1577 | u8 *record = (u8 *)(mode_info->atom_context->bios + | 1577 | u8 *record; |
1578 | data_offset + | 1578 | |
1579 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | 1579 | if ((frev == 1) && (crev < 2)) |
1580 | /* absolute */ | ||
1581 | record = (u8 *)(mode_info->atom_context->bios + | ||
1582 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
1583 | else | ||
1584 | /* relative */ | ||
1585 | record = (u8 *)(mode_info->atom_context->bios + | ||
1586 | data_offset + | ||
1587 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
1580 | while (*record != ATOM_RECORD_END_TYPE) { | 1588 | while (*record != ATOM_RECORD_END_TYPE) { |
1581 | switch (*record) { | 1589 | switch (*record) { |
1582 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: | 1590 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 8a955bbdb608..a533f52fd163 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
181 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 181 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
182 | 182 | ||
183 | for (i = 0; i < pages; i++, p++) { | 183 | for (i = 0; i < pages; i++, p++) { |
184 | /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 | 184 | /* we reverted the patch using dma_addr in TTM for now but this |
185 | * is requested. */ | 185 | * code stops building on alpha so just comment it out for now */ |
186 | if (dma_addr[i] != DMA_ERROR_CODE) { | 186 | if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ |
187 | rdev->gart.ttm_alloced[p] = true; | 187 | rdev->gart.ttm_alloced[p] = true; |
188 | rdev->gart.pages_addr[p] = dma_addr[i]; | 188 | rdev->gart.pages_addr[p] = dma_addr[i]; |
189 | } else { | 189 | } else { |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 6334f8ac1209..0aa8e85a9457 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman | |||
@@ -33,6 +33,7 @@ cayman 0x9400 | |||
33 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | 33 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS |
34 | 0x00009100 SPI_CONFIG_CNTL | 34 | 0x00009100 SPI_CONFIG_CNTL |
35 | 0x0000913C SPI_CONFIG_CNTL_1 | 35 | 0x0000913C SPI_CONFIG_CNTL_1 |
36 | 0x00009508 TA_CNTL_AUX | ||
36 | 0x00009830 DB_DEBUG | 37 | 0x00009830 DB_DEBUG |
37 | 0x00009834 DB_DEBUG2 | 38 | 0x00009834 DB_DEBUG2 |
38 | 0x00009838 DB_DEBUG3 | 39 | 0x00009838 DB_DEBUG3 |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 7e1637176e08..0e28cae7ea43 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -46,6 +46,7 @@ evergreen 0x9400 | |||
46 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | 46 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS |
47 | 0x00009100 SPI_CONFIG_CNTL | 47 | 0x00009100 SPI_CONFIG_CNTL |
48 | 0x0000913C SPI_CONFIG_CNTL_1 | 48 | 0x0000913C SPI_CONFIG_CNTL_1 |
49 | 0x00009508 TA_CNTL_AUX | ||
49 | 0x00009700 VC_CNTL | 50 | 0x00009700 VC_CNTL |
50 | 0x00009714 VC_ENHANCE | 51 | 0x00009714 VC_ENHANCE |
51 | 0x00009830 DB_DEBUG | 52 | 0x00009830 DB_DEBUG |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index e01cacba685f..498b284e5ef9 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
@@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) | |||
219 | int i; | 219 | int i; |
220 | struct vga_switcheroo_client *active = NULL; | 220 | struct vga_switcheroo_client *active = NULL; |
221 | 221 | ||
222 | if (new_client->active == true) | ||
223 | return 0; | ||
224 | |||
225 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { | 222 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { |
226 | if (vgasr_priv.clients[i].active == true) { | 223 | if (vgasr_priv.clients[i].active == true) { |
227 | active = &vgasr_priv.clients[i]; | 224 | active = &vgasr_priv.clients[i]; |
@@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
372 | goto out; | 369 | goto out; |
373 | } | 370 | } |
374 | 371 | ||
372 | if (client->active == true) | ||
373 | goto out; | ||
374 | |||
375 | /* okay we want a switch - test if devices are willing to switch */ | 375 | /* okay we want a switch - test if devices are willing to switch */ |
376 | can_switch = true; | 376 | can_switch = true; |
377 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { | 377 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { |
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 75b984c519ac..107397a606b4 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c | |||
@@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = { | |||
560 | .timeout = HZ, | 560 | .timeout = HZ, |
561 | }; | 561 | }; |
562 | 562 | ||
563 | static const struct of_device_id mpc_i2c_of_match[]; | ||
563 | static int __devinit fsl_i2c_probe(struct platform_device *op) | 564 | static int __devinit fsl_i2c_probe(struct platform_device *op) |
564 | { | 565 | { |
566 | const struct of_device_id *match; | ||
565 | struct mpc_i2c *i2c; | 567 | struct mpc_i2c *i2c; |
566 | const u32 *prop; | 568 | const u32 *prop; |
567 | u32 clock = MPC_I2C_CLOCK_LEGACY; | 569 | u32 clock = MPC_I2C_CLOCK_LEGACY; |
568 | int result = 0; | 570 | int result = 0; |
569 | int plen; | 571 | int plen; |
570 | 572 | ||
571 | if (!op->dev.of_match) | 573 | match = of_match_device(mpc_i2c_of_match, &op->dev); |
574 | if (!match) | ||
572 | return -EINVAL; | 575 | return -EINVAL; |
573 | 576 | ||
574 | i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); | 577 | i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); |
@@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op) | |||
605 | clock = *prop; | 608 | clock = *prop; |
606 | } | 609 | } |
607 | 610 | ||
608 | if (op->dev.of_match->data) { | 611 | if (match->data) { |
609 | struct mpc_i2c_data *data = op->dev.of_match->data; | 612 | struct mpc_i2c_data *data = match->data; |
610 | data->setup(op->dev.of_node, i2c, clock, data->prescaler); | 613 | data->setup(op->dev.of_node, i2c, clock, data->prescaler); |
611 | } else { | 614 | } else { |
612 | /* Backwards compatibility */ | 615 | /* Backwards compatibility */ |
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index a97e3fec8148..04be9f82e14b 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c | |||
@@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) | |||
65 | jiffies, expires); | 65 | jiffies, expires); |
66 | 66 | ||
67 | timer->expires = jiffies + expires; | 67 | timer->expires = jiffies + expires; |
68 | timer->data = (unsigned long)&alg_data; | 68 | timer->data = (unsigned long)alg_data; |
69 | 69 | ||
70 | add_timer(timer); | 70 | add_timer(timer); |
71 | } | 71 | } |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5ed9d25d021a..99dde874fbbd 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -148,6 +148,7 @@ struct rdma_id_private { | |||
148 | u32 qp_num; | 148 | u32 qp_num; |
149 | u8 srq; | 149 | u8 srq; |
150 | u8 tos; | 150 | u8 tos; |
151 | u8 reuseaddr; | ||
151 | }; | 152 | }; |
152 | 153 | ||
153 | struct cma_multicast { | 154 | struct cma_multicast { |
@@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr) | |||
712 | return cma_zero_addr(addr) || cma_loopback_addr(addr); | 713 | return cma_zero_addr(addr) || cma_loopback_addr(addr); |
713 | } | 714 | } |
714 | 715 | ||
716 | static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) | ||
717 | { | ||
718 | if (src->sa_family != dst->sa_family) | ||
719 | return -1; | ||
720 | |||
721 | switch (src->sa_family) { | ||
722 | case AF_INET: | ||
723 | return ((struct sockaddr_in *) src)->sin_addr.s_addr != | ||
724 | ((struct sockaddr_in *) dst)->sin_addr.s_addr; | ||
725 | default: | ||
726 | return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, | ||
727 | &((struct sockaddr_in6 *) dst)->sin6_addr); | ||
728 | } | ||
729 | } | ||
730 | |||
715 | static inline __be16 cma_port(struct sockaddr *addr) | 731 | static inline __be16 cma_port(struct sockaddr *addr) |
716 | { | 732 | { |
717 | if (addr->sa_family == AF_INET) | 733 | if (addr->sa_family == AF_INET) |
@@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv) | |||
1564 | mutex_unlock(&lock); | 1580 | mutex_unlock(&lock); |
1565 | } | 1581 | } |
1566 | 1582 | ||
1567 | int rdma_listen(struct rdma_cm_id *id, int backlog) | ||
1568 | { | ||
1569 | struct rdma_id_private *id_priv; | ||
1570 | int ret; | ||
1571 | |||
1572 | id_priv = container_of(id, struct rdma_id_private, id); | ||
1573 | if (id_priv->state == CMA_IDLE) { | ||
1574 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; | ||
1575 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); | ||
1576 | if (ret) | ||
1577 | return ret; | ||
1578 | } | ||
1579 | |||
1580 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) | ||
1581 | return -EINVAL; | ||
1582 | |||
1583 | id_priv->backlog = backlog; | ||
1584 | if (id->device) { | ||
1585 | switch (rdma_node_get_transport(id->device->node_type)) { | ||
1586 | case RDMA_TRANSPORT_IB: | ||
1587 | ret = cma_ib_listen(id_priv); | ||
1588 | if (ret) | ||
1589 | goto err; | ||
1590 | break; | ||
1591 | case RDMA_TRANSPORT_IWARP: | ||
1592 | ret = cma_iw_listen(id_priv, backlog); | ||
1593 | if (ret) | ||
1594 | goto err; | ||
1595 | break; | ||
1596 | default: | ||
1597 | ret = -ENOSYS; | ||
1598 | goto err; | ||
1599 | } | ||
1600 | } else | ||
1601 | cma_listen_on_all(id_priv); | ||
1602 | |||
1603 | return 0; | ||
1604 | err: | ||
1605 | id_priv->backlog = 0; | ||
1606 | cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); | ||
1607 | return ret; | ||
1608 | } | ||
1609 | EXPORT_SYMBOL(rdma_listen); | ||
1610 | |||
1611 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) | 1583 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) |
1612 | { | 1584 | { |
1613 | struct rdma_id_private *id_priv; | 1585 | struct rdma_id_private *id_priv; |
@@ -2090,6 +2062,25 @@ err: | |||
2090 | } | 2062 | } |
2091 | EXPORT_SYMBOL(rdma_resolve_addr); | 2063 | EXPORT_SYMBOL(rdma_resolve_addr); |
2092 | 2064 | ||
2065 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) | ||
2066 | { | ||
2067 | struct rdma_id_private *id_priv; | ||
2068 | unsigned long flags; | ||
2069 | int ret; | ||
2070 | |||
2071 | id_priv = container_of(id, struct rdma_id_private, id); | ||
2072 | spin_lock_irqsave(&id_priv->lock, flags); | ||
2073 | if (id_priv->state == CMA_IDLE) { | ||
2074 | id_priv->reuseaddr = reuse; | ||
2075 | ret = 0; | ||
2076 | } else { | ||
2077 | ret = -EINVAL; | ||
2078 | } | ||
2079 | spin_unlock_irqrestore(&id_priv->lock, flags); | ||
2080 | return ret; | ||
2081 | } | ||
2082 | EXPORT_SYMBOL(rdma_set_reuseaddr); | ||
2083 | |||
2093 | static void cma_bind_port(struct rdma_bind_list *bind_list, | 2084 | static void cma_bind_port(struct rdma_bind_list *bind_list, |
2094 | struct rdma_id_private *id_priv) | 2085 | struct rdma_id_private *id_priv) |
2095 | { | 2086 | { |
@@ -2165,41 +2156,71 @@ retry: | |||
2165 | return -EADDRNOTAVAIL; | 2156 | return -EADDRNOTAVAIL; |
2166 | } | 2157 | } |
2167 | 2158 | ||
2168 | static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) | 2159 | /* |
2160 | * Check that the requested port is available. This is called when trying to | ||
2161 | * bind to a specific port, or when trying to listen on a bound port. In | ||
2162 | * the latter case, the provided id_priv may already be on the bind_list, but | ||
2163 | * we still need to check that it's okay to start listening. | ||
2164 | */ | ||
2165 | static int cma_check_port(struct rdma_bind_list *bind_list, | ||
2166 | struct rdma_id_private *id_priv, uint8_t reuseaddr) | ||
2169 | { | 2167 | { |
2170 | struct rdma_id_private *cur_id; | 2168 | struct rdma_id_private *cur_id; |
2171 | struct sockaddr_in *sin, *cur_sin; | 2169 | struct sockaddr *addr, *cur_addr; |
2172 | struct rdma_bind_list *bind_list; | ||
2173 | struct hlist_node *node; | 2170 | struct hlist_node *node; |
2171 | |||
2172 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; | ||
2173 | if (cma_any_addr(addr) && !reuseaddr) | ||
2174 | return -EADDRNOTAVAIL; | ||
2175 | |||
2176 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | ||
2177 | if (id_priv == cur_id) | ||
2178 | continue; | ||
2179 | |||
2180 | if ((cur_id->state == CMA_LISTEN) || | ||
2181 | !reuseaddr || !cur_id->reuseaddr) { | ||
2182 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; | ||
2183 | if (cma_any_addr(cur_addr)) | ||
2184 | return -EADDRNOTAVAIL; | ||
2185 | |||
2186 | if (!cma_addr_cmp(addr, cur_addr)) | ||
2187 | return -EADDRINUSE; | ||
2188 | } | ||
2189 | } | ||
2190 | return 0; | ||
2191 | } | ||
2192 | |||
2193 | static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) | ||
2194 | { | ||
2195 | struct rdma_bind_list *bind_list; | ||
2174 | unsigned short snum; | 2196 | unsigned short snum; |
2197 | int ret; | ||
2175 | 2198 | ||
2176 | sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; | 2199 | snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)); |
2177 | snum = ntohs(sin->sin_port); | ||
2178 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | 2200 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) |
2179 | return -EACCES; | 2201 | return -EACCES; |
2180 | 2202 | ||
2181 | bind_list = idr_find(ps, snum); | 2203 | bind_list = idr_find(ps, snum); |
2182 | if (!bind_list) | 2204 | if (!bind_list) { |
2183 | return cma_alloc_port(ps, id_priv, snum); | 2205 | ret = cma_alloc_port(ps, id_priv, snum); |
2184 | 2206 | } else { | |
2185 | /* | 2207 | ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); |
2186 | * We don't support binding to any address if anyone is bound to | 2208 | if (!ret) |
2187 | * a specific address on the same port. | 2209 | cma_bind_port(bind_list, id_priv); |
2188 | */ | ||
2189 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) | ||
2190 | return -EADDRNOTAVAIL; | ||
2191 | |||
2192 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | ||
2193 | if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) | ||
2194 | return -EADDRNOTAVAIL; | ||
2195 | |||
2196 | cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; | ||
2197 | if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) | ||
2198 | return -EADDRINUSE; | ||
2199 | } | 2210 | } |
2211 | return ret; | ||
2212 | } | ||
2200 | 2213 | ||
2201 | cma_bind_port(bind_list, id_priv); | 2214 | static int cma_bind_listen(struct rdma_id_private *id_priv) |
2202 | return 0; | 2215 | { |
2216 | struct rdma_bind_list *bind_list = id_priv->bind_list; | ||
2217 | int ret = 0; | ||
2218 | |||
2219 | mutex_lock(&lock); | ||
2220 | if (bind_list->owners.first->next) | ||
2221 | ret = cma_check_port(bind_list, id_priv, 0); | ||
2222 | mutex_unlock(&lock); | ||
2223 | return ret; | ||
2203 | } | 2224 | } |
2204 | 2225 | ||
2205 | static int cma_get_port(struct rdma_id_private *id_priv) | 2226 | static int cma_get_port(struct rdma_id_private *id_priv) |
@@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, | |||
2253 | return 0; | 2274 | return 0; |
2254 | } | 2275 | } |
2255 | 2276 | ||
2277 | int rdma_listen(struct rdma_cm_id *id, int backlog) | ||
2278 | { | ||
2279 | struct rdma_id_private *id_priv; | ||
2280 | int ret; | ||
2281 | |||
2282 | id_priv = container_of(id, struct rdma_id_private, id); | ||
2283 | if (id_priv->state == CMA_IDLE) { | ||
2284 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; | ||
2285 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); | ||
2286 | if (ret) | ||
2287 | return ret; | ||
2288 | } | ||
2289 | |||
2290 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) | ||
2291 | return -EINVAL; | ||
2292 | |||
2293 | if (id_priv->reuseaddr) { | ||
2294 | ret = cma_bind_listen(id_priv); | ||
2295 | if (ret) | ||
2296 | goto err; | ||
2297 | } | ||
2298 | |||
2299 | id_priv->backlog = backlog; | ||
2300 | if (id->device) { | ||
2301 | switch (rdma_node_get_transport(id->device->node_type)) { | ||
2302 | case RDMA_TRANSPORT_IB: | ||
2303 | ret = cma_ib_listen(id_priv); | ||
2304 | if (ret) | ||
2305 | goto err; | ||
2306 | break; | ||
2307 | case RDMA_TRANSPORT_IWARP: | ||
2308 | ret = cma_iw_listen(id_priv, backlog); | ||
2309 | if (ret) | ||
2310 | goto err; | ||
2311 | break; | ||
2312 | default: | ||
2313 | ret = -ENOSYS; | ||
2314 | goto err; | ||
2315 | } | ||
2316 | } else | ||
2317 | cma_listen_on_all(id_priv); | ||
2318 | |||
2319 | return 0; | ||
2320 | err: | ||
2321 | id_priv->backlog = 0; | ||
2322 | cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); | ||
2323 | return ret; | ||
2324 | } | ||
2325 | EXPORT_SYMBOL(rdma_listen); | ||
2326 | |||
2256 | int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | 2327 | int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) |
2257 | { | 2328 | { |
2258 | struct rdma_id_private *id_priv; | 2329 | struct rdma_id_private *id_priv; |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 2a1e9ae134b4..a9c042345c6f 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, | |||
725 | */ | 725 | */ |
726 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | 726 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); |
727 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | 727 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); |
728 | if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { | 728 | if (iw_event->status == 0) { |
729 | cm_id_priv->id.local_addr = iw_event->local_addr; | 729 | cm_id_priv->id.local_addr = iw_event->local_addr; |
730 | cm_id_priv->id.remote_addr = iw_event->remote_addr; | 730 | cm_id_priv->id.remote_addr = iw_event->remote_addr; |
731 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | 731 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ec1e9da1488b..b3fa798525b2 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, | |||
883 | } | 883 | } |
884 | rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); | 884 | rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); |
885 | break; | 885 | break; |
886 | case RDMA_OPTION_ID_REUSEADDR: | ||
887 | if (optlen != sizeof(int)) { | ||
888 | ret = -EINVAL; | ||
889 | break; | ||
890 | } | ||
891 | ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); | ||
892 | break; | ||
886 | default: | 893 | default: |
887 | ret = -ENOSYS; | 894 | ret = -ENOSYS; |
888 | } | 895 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 9d8dcfab2b38..d7ee70fc9173 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1198 | } | 1198 | } |
1199 | PDBG("%s ep %p status %d error %d\n", __func__, ep, | 1199 | PDBG("%s ep %p status %d error %d\n", __func__, ep, |
1200 | rpl->status, status2errno(rpl->status)); | 1200 | rpl->status, status2errno(rpl->status)); |
1201 | ep->com.wr_wait.ret = status2errno(rpl->status); | 1201 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
1202 | ep->com.wr_wait.done = 1; | ||
1203 | wake_up(&ep->com.wr_wait.wait); | ||
1204 | 1202 | ||
1205 | return 0; | 1203 | return 0; |
1206 | } | 1204 | } |
@@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1234 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | 1232 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); |
1235 | 1233 | ||
1236 | PDBG("%s ep %p\n", __func__, ep); | 1234 | PDBG("%s ep %p\n", __func__, ep); |
1237 | ep->com.wr_wait.ret = status2errno(rpl->status); | 1235 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
1238 | ep->com.wr_wait.done = 1; | ||
1239 | wake_up(&ep->com.wr_wait.wait); | ||
1240 | return 0; | 1236 | return 0; |
1241 | } | 1237 | } |
1242 | 1238 | ||
@@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1466 | struct c4iw_qp_attributes attrs; | 1462 | struct c4iw_qp_attributes attrs; |
1467 | int disconnect = 1; | 1463 | int disconnect = 1; |
1468 | int release = 0; | 1464 | int release = 0; |
1469 | int closing = 0; | 1465 | int abort = 0; |
1470 | struct tid_info *t = dev->rdev.lldi.tids; | 1466 | struct tid_info *t = dev->rdev.lldi.tids; |
1471 | unsigned int tid = GET_TID(hdr); | 1467 | unsigned int tid = GET_TID(hdr); |
1472 | 1468 | ||
@@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1492 | * in rdma connection migration (see c4iw_accept_cr()). | 1488 | * in rdma connection migration (see c4iw_accept_cr()). |
1493 | */ | 1489 | */ |
1494 | __state_set(&ep->com, CLOSING); | 1490 | __state_set(&ep->com, CLOSING); |
1495 | ep->com.wr_wait.done = 1; | ||
1496 | ep->com.wr_wait.ret = -ECONNRESET; | ||
1497 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | 1491 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
1498 | wake_up(&ep->com.wr_wait.wait); | 1492 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
1499 | break; | 1493 | break; |
1500 | case MPA_REP_SENT: | 1494 | case MPA_REP_SENT: |
1501 | __state_set(&ep->com, CLOSING); | 1495 | __state_set(&ep->com, CLOSING); |
1502 | ep->com.wr_wait.done = 1; | ||
1503 | ep->com.wr_wait.ret = -ECONNRESET; | ||
1504 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | 1496 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
1505 | wake_up(&ep->com.wr_wait.wait); | 1497 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
1506 | break; | 1498 | break; |
1507 | case FPDU_MODE: | 1499 | case FPDU_MODE: |
1508 | start_ep_timer(ep); | 1500 | start_ep_timer(ep); |
1509 | __state_set(&ep->com, CLOSING); | 1501 | __state_set(&ep->com, CLOSING); |
1510 | closing = 1; | 1502 | attrs.next_state = C4IW_QP_STATE_CLOSING; |
1503 | abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1504 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1511 | peer_close_upcall(ep); | 1505 | peer_close_upcall(ep); |
1506 | disconnect = 1; | ||
1512 | break; | 1507 | break; |
1513 | case ABORTING: | 1508 | case ABORTING: |
1514 | disconnect = 0; | 1509 | disconnect = 0; |
@@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1536 | BUG_ON(1); | 1531 | BUG_ON(1); |
1537 | } | 1532 | } |
1538 | mutex_unlock(&ep->com.mutex); | 1533 | mutex_unlock(&ep->com.mutex); |
1539 | if (closing) { | ||
1540 | attrs.next_state = C4IW_QP_STATE_CLOSING; | ||
1541 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1542 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1543 | } | ||
1544 | if (disconnect) | 1534 | if (disconnect) |
1545 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | 1535 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); |
1546 | if (release) | 1536 | if (release) |
@@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1581 | /* | 1571 | /* |
1582 | * Wake up any threads in rdma_init() or rdma_fini(). | 1572 | * Wake up any threads in rdma_init() or rdma_fini(). |
1583 | */ | 1573 | */ |
1584 | ep->com.wr_wait.done = 1; | 1574 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
1585 | ep->com.wr_wait.ret = -ECONNRESET; | ||
1586 | wake_up(&ep->com.wr_wait.wait); | ||
1587 | 1575 | ||
1588 | mutex_lock(&ep->com.mutex); | 1576 | mutex_lock(&ep->com.mutex); |
1589 | switch (ep->com.state) { | 1577 | switch (ep->com.state) { |
@@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1710 | ep = lookup_tid(t, tid); | 1698 | ep = lookup_tid(t, tid); |
1711 | BUG_ON(!ep); | 1699 | BUG_ON(!ep); |
1712 | 1700 | ||
1713 | if (ep->com.qp) { | 1701 | if (ep && ep->com.qp) { |
1714 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, | 1702 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, |
1715 | ep->com.qp->wq.sq.qid); | 1703 | ep->com.qp->wq.sq.qid); |
1716 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1704 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1717 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1705 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1718 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | 1706 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1719 | } else | 1707 | } else |
1720 | printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); | 1708 | printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); |
1721 | 1709 | ||
1722 | return 0; | 1710 | return 0; |
1723 | } | 1711 | } |
@@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2296 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); | 2284 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); |
2297 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; | 2285 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; |
2298 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); | 2286 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); |
2299 | if (wr_waitp) { | 2287 | if (wr_waitp) |
2300 | if (ret) | 2288 | c4iw_wake_up(wr_waitp, ret ? -ret : 0); |
2301 | wr_waitp->ret = -ret; | ||
2302 | else | ||
2303 | wr_waitp->ret = 0; | ||
2304 | wr_waitp->done = 1; | ||
2305 | wake_up(&wr_waitp->wait); | ||
2306 | } | ||
2307 | kfree_skb(skb); | 2289 | kfree_skb(skb); |
2308 | break; | 2290 | break; |
2309 | case 2: | 2291 | case 2: |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index e29172c2afcb..40a13cc633a3 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); | |||
44 | MODULE_LICENSE("Dual BSD/GPL"); | 44 | MODULE_LICENSE("Dual BSD/GPL"); |
45 | MODULE_VERSION(DRV_VERSION); | 45 | MODULE_VERSION(DRV_VERSION); |
46 | 46 | ||
47 | static LIST_HEAD(dev_list); | 47 | static LIST_HEAD(uld_ctx_list); |
48 | static DEFINE_MUTEX(dev_mutex); | 48 | static DEFINE_MUTEX(dev_mutex); |
49 | 49 | ||
50 | static struct dentry *c4iw_debugfs_root; | 50 | static struct dentry *c4iw_debugfs_root; |
@@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) | |||
370 | c4iw_destroy_resource(&rdev->resource); | 370 | c4iw_destroy_resource(&rdev->resource); |
371 | } | 371 | } |
372 | 372 | ||
373 | static void c4iw_remove(struct c4iw_dev *dev) | 373 | struct uld_ctx { |
374 | struct list_head entry; | ||
375 | struct cxgb4_lld_info lldi; | ||
376 | struct c4iw_dev *dev; | ||
377 | }; | ||
378 | |||
379 | static void c4iw_remove(struct uld_ctx *ctx) | ||
374 | { | 380 | { |
375 | PDBG("%s c4iw_dev %p\n", __func__, dev); | 381 | PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); |
376 | list_del(&dev->entry); | 382 | c4iw_unregister_device(ctx->dev); |
377 | if (dev->registered) | 383 | c4iw_rdev_close(&ctx->dev->rdev); |
378 | c4iw_unregister_device(dev); | 384 | idr_destroy(&ctx->dev->cqidr); |
379 | c4iw_rdev_close(&dev->rdev); | 385 | idr_destroy(&ctx->dev->qpidr); |
380 | idr_destroy(&dev->cqidr); | 386 | idr_destroy(&ctx->dev->mmidr); |
381 | idr_destroy(&dev->qpidr); | 387 | iounmap(ctx->dev->rdev.oc_mw_kva); |
382 | idr_destroy(&dev->mmidr); | 388 | ib_dealloc_device(&ctx->dev->ibdev); |
383 | iounmap(dev->rdev.oc_mw_kva); | 389 | ctx->dev = NULL; |
384 | ib_dealloc_device(&dev->ibdev); | ||
385 | } | 390 | } |
386 | 391 | ||
387 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | 392 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) |
@@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
392 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); | 397 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); |
393 | if (!devp) { | 398 | if (!devp) { |
394 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); | 399 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); |
395 | return NULL; | 400 | return ERR_PTR(-ENOMEM); |
396 | } | 401 | } |
397 | devp->rdev.lldi = *infop; | 402 | devp->rdev.lldi = *infop; |
398 | 403 | ||
@@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
402 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, | 407 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, |
403 | devp->rdev.lldi.vr->ocq.size); | 408 | devp->rdev.lldi.vr->ocq.size); |
404 | 409 | ||
405 | printk(KERN_INFO MOD "ocq memory: " | 410 | PDBG(KERN_INFO MOD "ocq memory: " |
406 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", | 411 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", |
407 | devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, | 412 | devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, |
408 | devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); | 413 | devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); |
409 | 414 | ||
410 | mutex_lock(&dev_mutex); | ||
411 | |||
412 | ret = c4iw_rdev_open(&devp->rdev); | 415 | ret = c4iw_rdev_open(&devp->rdev); |
413 | if (ret) { | 416 | if (ret) { |
414 | mutex_unlock(&dev_mutex); | 417 | mutex_unlock(&dev_mutex); |
415 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); | 418 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); |
416 | ib_dealloc_device(&devp->ibdev); | 419 | ib_dealloc_device(&devp->ibdev); |
417 | return NULL; | 420 | return ERR_PTR(ret); |
418 | } | 421 | } |
419 | 422 | ||
420 | idr_init(&devp->cqidr); | 423 | idr_init(&devp->cqidr); |
421 | idr_init(&devp->qpidr); | 424 | idr_init(&devp->qpidr); |
422 | idr_init(&devp->mmidr); | 425 | idr_init(&devp->mmidr); |
423 | spin_lock_init(&devp->lock); | 426 | spin_lock_init(&devp->lock); |
424 | list_add_tail(&devp->entry, &dev_list); | ||
425 | mutex_unlock(&dev_mutex); | ||
426 | 427 | ||
427 | if (c4iw_debugfs_root) { | 428 | if (c4iw_debugfs_root) { |
428 | devp->debugfs_root = debugfs_create_dir( | 429 | devp->debugfs_root = debugfs_create_dir( |
@@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
435 | 436 | ||
436 | static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) | 437 | static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) |
437 | { | 438 | { |
438 | struct c4iw_dev *dev; | 439 | struct uld_ctx *ctx; |
439 | static int vers_printed; | 440 | static int vers_printed; |
440 | int i; | 441 | int i; |
441 | 442 | ||
@@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) | |||
443 | printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", | 444 | printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", |
444 | DRV_VERSION); | 445 | DRV_VERSION); |
445 | 446 | ||
446 | dev = c4iw_alloc(infop); | 447 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); |
447 | if (!dev) | 448 | if (!ctx) { |
449 | ctx = ERR_PTR(-ENOMEM); | ||
448 | goto out; | 450 | goto out; |
451 | } | ||
452 | ctx->lldi = *infop; | ||
449 | 453 | ||
450 | PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", | 454 | PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", |
451 | __func__, pci_name(dev->rdev.lldi.pdev), | 455 | __func__, pci_name(ctx->lldi.pdev), |
452 | dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, | 456 | ctx->lldi.nchan, ctx->lldi.nrxq, |
453 | dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); | 457 | ctx->lldi.ntxq, ctx->lldi.nports); |
458 | |||
459 | mutex_lock(&dev_mutex); | ||
460 | list_add_tail(&ctx->entry, &uld_ctx_list); | ||
461 | mutex_unlock(&dev_mutex); | ||
454 | 462 | ||
455 | for (i = 0; i < dev->rdev.lldi.nrxq; i++) | 463 | for (i = 0; i < ctx->lldi.nrxq; i++) |
456 | PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); | 464 | PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); |
457 | out: | 465 | out: |
458 | return dev; | 466 | return ctx; |
459 | } | 467 | } |
460 | 468 | ||
461 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | 469 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, |
462 | const struct pkt_gl *gl) | 470 | const struct pkt_gl *gl) |
463 | { | 471 | { |
464 | struct c4iw_dev *dev = handle; | 472 | struct uld_ctx *ctx = handle; |
473 | struct c4iw_dev *dev = ctx->dev; | ||
465 | struct sk_buff *skb; | 474 | struct sk_buff *skb; |
466 | const struct cpl_act_establish *rpl; | 475 | const struct cpl_act_establish *rpl; |
467 | unsigned int opcode; | 476 | unsigned int opcode; |
@@ -503,47 +512,49 @@ nomem: | |||
503 | 512 | ||
504 | static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) | 513 | static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) |
505 | { | 514 | { |
506 | struct c4iw_dev *dev = handle; | 515 | struct uld_ctx *ctx = handle; |
507 | 516 | ||
508 | PDBG("%s new_state %u\n", __func__, new_state); | 517 | PDBG("%s new_state %u\n", __func__, new_state); |
509 | switch (new_state) { | 518 | switch (new_state) { |
510 | case CXGB4_STATE_UP: | 519 | case CXGB4_STATE_UP: |
511 | printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); | 520 | printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); |
512 | if (!dev->registered) { | 521 | if (!ctx->dev) { |
513 | int ret; | 522 | int ret = 0; |
514 | ret = c4iw_register_device(dev); | 523 | |
515 | if (ret) | 524 | ctx->dev = c4iw_alloc(&ctx->lldi); |
525 | if (!IS_ERR(ctx->dev)) | ||
526 | ret = c4iw_register_device(ctx->dev); | ||
527 | if (IS_ERR(ctx->dev) || ret) | ||
516 | printk(KERN_ERR MOD | 528 | printk(KERN_ERR MOD |
517 | "%s: RDMA registration failed: %d\n", | 529 | "%s: RDMA registration failed: %d\n", |
518 | pci_name(dev->rdev.lldi.pdev), ret); | 530 | pci_name(ctx->lldi.pdev), ret); |
519 | } | 531 | } |
520 | break; | 532 | break; |
521 | case CXGB4_STATE_DOWN: | 533 | case CXGB4_STATE_DOWN: |
522 | printk(KERN_INFO MOD "%s: Down\n", | 534 | printk(KERN_INFO MOD "%s: Down\n", |
523 | pci_name(dev->rdev.lldi.pdev)); | 535 | pci_name(ctx->lldi.pdev)); |
524 | if (dev->registered) | 536 | if (ctx->dev) |
525 | c4iw_unregister_device(dev); | 537 | c4iw_remove(ctx); |
526 | break; | 538 | break; |
527 | case CXGB4_STATE_START_RECOVERY: | 539 | case CXGB4_STATE_START_RECOVERY: |
528 | printk(KERN_INFO MOD "%s: Fatal Error\n", | 540 | printk(KERN_INFO MOD "%s: Fatal Error\n", |
529 | pci_name(dev->rdev.lldi.pdev)); | 541 | pci_name(ctx->lldi.pdev)); |
530 | dev->rdev.flags |= T4_FATAL_ERROR; | 542 | if (ctx->dev) { |
531 | if (dev->registered) { | ||
532 | struct ib_event event; | 543 | struct ib_event event; |
533 | 544 | ||
545 | ctx->dev->rdev.flags |= T4_FATAL_ERROR; | ||
534 | memset(&event, 0, sizeof event); | 546 | memset(&event, 0, sizeof event); |
535 | event.event = IB_EVENT_DEVICE_FATAL; | 547 | event.event = IB_EVENT_DEVICE_FATAL; |
536 | event.device = &dev->ibdev; | 548 | event.device = &ctx->dev->ibdev; |
537 | ib_dispatch_event(&event); | 549 | ib_dispatch_event(&event); |
538 | c4iw_unregister_device(dev); | 550 | c4iw_remove(ctx); |
539 | } | 551 | } |
540 | break; | 552 | break; |
541 | case CXGB4_STATE_DETACH: | 553 | case CXGB4_STATE_DETACH: |
542 | printk(KERN_INFO MOD "%s: Detach\n", | 554 | printk(KERN_INFO MOD "%s: Detach\n", |
543 | pci_name(dev->rdev.lldi.pdev)); | 555 | pci_name(ctx->lldi.pdev)); |
544 | mutex_lock(&dev_mutex); | 556 | if (ctx->dev) |
545 | c4iw_remove(dev); | 557 | c4iw_remove(ctx); |
546 | mutex_unlock(&dev_mutex); | ||
547 | break; | 558 | break; |
548 | } | 559 | } |
549 | return 0; | 560 | return 0; |
@@ -576,11 +587,13 @@ static int __init c4iw_init_module(void) | |||
576 | 587 | ||
577 | static void __exit c4iw_exit_module(void) | 588 | static void __exit c4iw_exit_module(void) |
578 | { | 589 | { |
579 | struct c4iw_dev *dev, *tmp; | 590 | struct uld_ctx *ctx, *tmp; |
580 | 591 | ||
581 | mutex_lock(&dev_mutex); | 592 | mutex_lock(&dev_mutex); |
582 | list_for_each_entry_safe(dev, tmp, &dev_list, entry) { | 593 | list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { |
583 | c4iw_remove(dev); | 594 | if (ctx->dev) |
595 | c4iw_remove(ctx); | ||
596 | kfree(ctx); | ||
584 | } | 597 | } |
585 | mutex_unlock(&dev_mutex); | 598 | mutex_unlock(&dev_mutex); |
586 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); | 599 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 9f6166f59268..35d2a5dd9bb4 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) | |||
131 | 131 | ||
132 | #define C4IW_WR_TO (10*HZ) | 132 | #define C4IW_WR_TO (10*HZ) |
133 | 133 | ||
134 | enum { | ||
135 | REPLY_READY = 0, | ||
136 | }; | ||
137 | |||
134 | struct c4iw_wr_wait { | 138 | struct c4iw_wr_wait { |
135 | wait_queue_head_t wait; | 139 | wait_queue_head_t wait; |
136 | int done; | 140 | unsigned long status; |
137 | int ret; | 141 | int ret; |
138 | }; | 142 | }; |
139 | 143 | ||
140 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | 144 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) |
141 | { | 145 | { |
142 | wr_waitp->ret = 0; | 146 | wr_waitp->ret = 0; |
143 | wr_waitp->done = 0; | 147 | wr_waitp->status = 0; |
144 | init_waitqueue_head(&wr_waitp->wait); | 148 | init_waitqueue_head(&wr_waitp->wait); |
145 | } | 149 | } |
146 | 150 | ||
151 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) | ||
152 | { | ||
153 | wr_waitp->ret = ret; | ||
154 | set_bit(REPLY_READY, &wr_waitp->status); | ||
155 | wake_up(&wr_waitp->wait); | ||
156 | } | ||
157 | |||
147 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | 158 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, |
148 | struct c4iw_wr_wait *wr_waitp, | 159 | struct c4iw_wr_wait *wr_waitp, |
149 | u32 hwtid, u32 qpid, | 160 | u32 hwtid, u32 qpid, |
150 | const char *func) | 161 | const char *func) |
151 | { | 162 | { |
152 | unsigned to = C4IW_WR_TO; | 163 | unsigned to = C4IW_WR_TO; |
153 | do { | 164 | int ret; |
154 | 165 | ||
155 | wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); | 166 | do { |
156 | if (!wr_waitp->done) { | 167 | ret = wait_event_timeout(wr_waitp->wait, |
168 | test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); | ||
169 | if (!ret) { | ||
157 | printk(KERN_ERR MOD "%s - Device %s not responding - " | 170 | printk(KERN_ERR MOD "%s - Device %s not responding - " |
158 | "tid %u qpid %u\n", func, | 171 | "tid %u qpid %u\n", func, |
159 | pci_name(rdev->lldi.pdev), hwtid, qpid); | 172 | pci_name(rdev->lldi.pdev), hwtid, qpid); |
173 | if (c4iw_fatal_error(rdev)) { | ||
174 | wr_waitp->ret = -EIO; | ||
175 | break; | ||
176 | } | ||
160 | to = to << 2; | 177 | to = to << 2; |
161 | } | 178 | } |
162 | } while (!wr_waitp->done); | 179 | } while (!ret); |
163 | if (wr_waitp->ret) | 180 | if (wr_waitp->ret) |
164 | printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", | 181 | PDBG("%s: FW reply %d tid %u qpid %u\n", |
165 | pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); | 182 | pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); |
166 | return wr_waitp->ret; | 183 | return wr_waitp->ret; |
167 | } | 184 | } |
168 | 185 | ||
169 | |||
170 | struct c4iw_dev { | 186 | struct c4iw_dev { |
171 | struct ib_device ibdev; | 187 | struct ib_device ibdev; |
172 | struct c4iw_rdev rdev; | 188 | struct c4iw_rdev rdev; |
@@ -175,9 +191,7 @@ struct c4iw_dev { | |||
175 | struct idr qpidr; | 191 | struct idr qpidr; |
176 | struct idr mmidr; | 192 | struct idr mmidr; |
177 | spinlock_t lock; | 193 | spinlock_t lock; |
178 | struct list_head entry; | ||
179 | struct dentry *debugfs_root; | 194 | struct dentry *debugfs_root; |
180 | u8 registered; | ||
181 | }; | 195 | }; |
182 | 196 | ||
183 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) | 197 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index f66dd8bf5128..5b9e4220ca08 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
516 | if (ret) | 516 | if (ret) |
517 | goto bail2; | 517 | goto bail2; |
518 | } | 518 | } |
519 | dev->registered = 1; | ||
520 | return 0; | 519 | return 0; |
521 | bail2: | 520 | bail2: |
522 | ib_unregister_device(&dev->ibdev); | 521 | ib_unregister_device(&dev->ibdev); |
@@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev) | |||
535 | c4iw_class_attributes[i]); | 534 | c4iw_class_attributes[i]); |
536 | ib_unregister_device(&dev->ibdev); | 535 | ib_unregister_device(&dev->ibdev); |
537 | kfree(dev->ibdev.iwcm); | 536 | kfree(dev->ibdev.iwcm); |
538 | dev->registered = 0; | ||
539 | return; | 537 | return; |
540 | } | 538 | } |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 70a5a3c646da..3b773b05a898 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
214 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ | 214 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ |
215 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ | 215 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ |
216 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ | 216 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ |
217 | t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | | 217 | (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | |
218 | V_FW_RI_RES_WR_IQID(scq->cqid)); | 218 | V_FW_RI_RES_WR_IQID(scq->cqid)); |
219 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | 219 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
220 | V_FW_RI_RES_WR_DCAEN(0) | | 220 | V_FW_RI_RES_WR_DCAEN(0) | |
@@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1210 | if (ret) { | 1210 | if (ret) { |
1211 | if (internal) | 1211 | if (internal) |
1212 | c4iw_get_ep(&qhp->ep->com); | 1212 | c4iw_get_ep(&qhp->ep->com); |
1213 | disconnect = abort = 1; | ||
1214 | goto err; | 1213 | goto err; |
1215 | } | 1214 | } |
1216 | break; | 1215 | break; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 58c0e417bc30..be24ac726114 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
398 | struct ipath_devdata *dd; | 398 | struct ipath_devdata *dd; |
399 | unsigned long long addr; | 399 | unsigned long long addr; |
400 | u32 bar0 = 0, bar1 = 0; | 400 | u32 bar0 = 0, bar1 = 0; |
401 | u8 rev; | ||
402 | 401 | ||
403 | dd = ipath_alloc_devdata(pdev); | 402 | dd = ipath_alloc_devdata(pdev); |
404 | if (IS_ERR(dd)) { | 403 | if (IS_ERR(dd)) { |
@@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
540 | goto bail_regions; | 539 | goto bail_regions; |
541 | } | 540 | } |
542 | 541 | ||
543 | ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); | 542 | dd->ipath_pcirev = pdev->revision; |
544 | if (ret) { | ||
545 | ipath_dev_err(dd, "Failed to read PCI revision ID unit " | ||
546 | "%u: err %d\n", dd->ipath_unit, -ret); | ||
547 | goto bail_regions; /* shouldn't ever happen */ | ||
548 | } | ||
549 | dd->ipath_pcirev = rev; | ||
550 | 543 | ||
551 | #if defined(__powerpc__) | 544 | #if defined(__powerpc__) |
552 | /* There isn't a generic way to specify writethrough mappings */ | 545 | /* There isn't a generic way to specify writethrough mappings */ |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 33c7eedaba6c..e74cdf9ef471 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2563 | u16 last_ae; | 2563 | u16 last_ae; |
2564 | u8 original_hw_tcp_state; | 2564 | u8 original_hw_tcp_state; |
2565 | u8 original_ibqp_state; | 2565 | u8 original_ibqp_state; |
2566 | enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; | 2566 | int disconn_status = 0; |
2567 | int issue_disconn = 0; | 2567 | int issue_disconn = 0; |
2568 | int issue_close = 0; | 2568 | int issue_close = 0; |
2569 | int issue_flush = 0; | 2569 | int issue_flush = 0; |
@@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2605 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | 2605 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { |
2606 | issue_disconn = 1; | 2606 | issue_disconn = 1; |
2607 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) | 2607 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) |
2608 | disconn_status = IW_CM_EVENT_STATUS_RESET; | 2608 | disconn_status = -ECONNRESET; |
2609 | } | 2609 | } |
2610 | 2610 | ||
2611 | if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || | 2611 | if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || |
@@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2666 | cm_id->provider_data = nesqp; | 2666 | cm_id->provider_data = nesqp; |
2667 | /* Send up the close complete event */ | 2667 | /* Send up the close complete event */ |
2668 | cm_event.event = IW_CM_EVENT_CLOSE; | 2668 | cm_event.event = IW_CM_EVENT_CLOSE; |
2669 | cm_event.status = IW_CM_EVENT_STATUS_OK; | 2669 | cm_event.status = 0; |
2670 | cm_event.provider_data = cm_id->provider_data; | 2670 | cm_event.provider_data = cm_id->provider_data; |
2671 | cm_event.local_addr = cm_id->local_addr; | 2671 | cm_event.local_addr = cm_id->local_addr; |
2672 | cm_event.remote_addr = cm_id->remote_addr; | 2672 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2966 | nes_add_ref(&nesqp->ibqp); | 2966 | nes_add_ref(&nesqp->ibqp); |
2967 | 2967 | ||
2968 | cm_event.event = IW_CM_EVENT_ESTABLISHED; | 2968 | cm_event.event = IW_CM_EVENT_ESTABLISHED; |
2969 | cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; | 2969 | cm_event.status = 0; |
2970 | cm_event.provider_data = (void *)nesqp; | 2970 | cm_event.provider_data = (void *)nesqp; |
2971 | cm_event.local_addr = cm_id->local_addr; | 2971 | cm_event.local_addr = cm_id->local_addr; |
2972 | cm_event.remote_addr = cm_id->remote_addr; | 2972 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3377 | 3377 | ||
3378 | /* notify OF layer we successfully created the requested connection */ | 3378 | /* notify OF layer we successfully created the requested connection */ |
3379 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | 3379 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; |
3380 | cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; | 3380 | cm_event.status = 0; |
3381 | cm_event.provider_data = cm_id->provider_data; | 3381 | cm_event.provider_data = cm_id->provider_data; |
3382 | cm_event.local_addr.sin_family = AF_INET; | 3382 | cm_event.local_addr.sin_family = AF_INET; |
3383 | cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; | 3383 | cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; |
@@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event) | |||
3484 | nesqp->cm_id = NULL; | 3484 | nesqp->cm_id = NULL; |
3485 | /* cm_id->provider_data = NULL; */ | 3485 | /* cm_id->provider_data = NULL; */ |
3486 | cm_event.event = IW_CM_EVENT_DISCONNECT; | 3486 | cm_event.event = IW_CM_EVENT_DISCONNECT; |
3487 | cm_event.status = IW_CM_EVENT_STATUS_RESET; | 3487 | cm_event.status = -ECONNRESET; |
3488 | cm_event.provider_data = cm_id->provider_data; | 3488 | cm_event.provider_data = cm_id->provider_data; |
3489 | cm_event.local_addr = cm_id->local_addr; | 3489 | cm_event.local_addr = cm_id->local_addr; |
3490 | cm_event.remote_addr = cm_id->remote_addr; | 3490 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event) | |||
3495 | ret = cm_id->event_handler(cm_id, &cm_event); | 3495 | ret = cm_id->event_handler(cm_id, &cm_event); |
3496 | atomic_inc(&cm_closes); | 3496 | atomic_inc(&cm_closes); |
3497 | cm_event.event = IW_CM_EVENT_CLOSE; | 3497 | cm_event.event = IW_CM_EVENT_CLOSE; |
3498 | cm_event.status = IW_CM_EVENT_STATUS_OK; | 3498 | cm_event.status = 0; |
3499 | cm_event.provider_data = cm_id->provider_data; | 3499 | cm_event.provider_data = cm_id->provider_data; |
3500 | cm_event.local_addr = cm_id->local_addr; | 3500 | cm_event.local_addr = cm_id->local_addr; |
3501 | cm_event.remote_addr = cm_id->remote_addr; | 3501 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) | |||
3534 | cm_node, cm_id, jiffies); | 3534 | cm_node, cm_id, jiffies); |
3535 | 3535 | ||
3536 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; | 3536 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; |
3537 | cm_event.status = IW_CM_EVENT_STATUS_OK; | 3537 | cm_event.status = 0; |
3538 | cm_event.provider_data = (void *)cm_node; | 3538 | cm_event.provider_data = (void *)cm_node; |
3539 | 3539 | ||
3540 | cm_event.local_addr.sin_family = AF_INET; | 3540 | cm_event.local_addr.sin_family = AF_INET; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 26d8018c0a7c..95ca93ceedac 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) | |||
1484 | (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { | 1484 | (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { |
1485 | cm_id = nesqp->cm_id; | 1485 | cm_id = nesqp->cm_id; |
1486 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | 1486 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; |
1487 | cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; | 1487 | cm_event.status = -ETIMEDOUT; |
1488 | cm_event.local_addr = cm_id->local_addr; | 1488 | cm_event.local_addr = cm_id->local_addr; |
1489 | cm_event.remote_addr = cm_id->remote_addr; | 1489 | cm_event.remote_addr = cm_id->remote_addr; |
1490 | cm_event.private_data = NULL; | 1490 | cm_event.private_data = NULL; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 6bab3eaea70f..9f53e68a096a 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7534 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); | 7534 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); |
7535 | tstart = get_jiffies_64(); | 7535 | tstart = get_jiffies_64(); |
7536 | while (chan_done && | 7536 | while (chan_done && |
7537 | !time_after64(tstart, tstart + msecs_to_jiffies(500))) { | 7537 | !time_after64(get_jiffies_64(), |
7538 | tstart + msecs_to_jiffies(500))) { | ||
7538 | msleep(20); | 7539 | msleep(20); |
7539 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | 7540 | for (chan = 0; chan < SERDES_CHANS; ++chan) { |
7540 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | 7541 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 48b6674cbc49..891cc2ff5f00 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) | |||
526 | */ | 526 | */ |
527 | devid = parent->device; | 527 | devid = parent->device; |
528 | if (devid >= 0x25e2 && devid <= 0x25fa) { | 528 | if (devid >= 0x25e2 && devid <= 0x25fa) { |
529 | u8 rev; | ||
530 | |||
531 | /* 5000 P/V/X/Z */ | 529 | /* 5000 P/V/X/Z */ |
532 | pci_read_config_byte(parent, PCI_REVISION_ID, &rev); | 530 | if (parent->revision <= 0xb2) |
533 | if (rev <= 0xb2) | ||
534 | bits = 1U << 10; | 531 | bits = 1U << 10; |
535 | else | 532 | else |
536 | bits = 7U << 10; | 533 | bits = 7U << 10; |
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c index 1839194ea987..10bcd4ae5402 100644 --- a/drivers/input/keyboard/atakbd.c +++ b/drivers/input/keyboard/atakbd.c | |||
@@ -223,8 +223,9 @@ static int __init atakbd_init(void) | |||
223 | return -ENODEV; | 223 | return -ENODEV; |
224 | 224 | ||
225 | // need to init core driver if not already done so | 225 | // need to init core driver if not already done so |
226 | if (atari_keyb_init()) | 226 | error = atari_keyb_init(); |
227 | return -ENODEV; | 227 | if (error) |
228 | return error; | ||
228 | 229 | ||
229 | atakbd_dev = input_allocate_device(); | 230 | atakbd_dev = input_allocate_device(); |
230 | if (!atakbd_dev) | 231 | if (!atakbd_dev) |
diff --git a/drivers/input/mouse/atarimouse.c b/drivers/input/mouse/atarimouse.c index adf45b3040e9..5c4a692bf73a 100644 --- a/drivers/input/mouse/atarimouse.c +++ b/drivers/input/mouse/atarimouse.c | |||
@@ -77,15 +77,15 @@ static void atamouse_interrupt(char *buf) | |||
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | /* only relative events get here */ | 79 | /* only relative events get here */ |
80 | dx = buf[1]; | 80 | dx = buf[1]; |
81 | dy = -buf[2]; | 81 | dy = buf[2]; |
82 | 82 | ||
83 | input_report_rel(atamouse_dev, REL_X, dx); | 83 | input_report_rel(atamouse_dev, REL_X, dx); |
84 | input_report_rel(atamouse_dev, REL_Y, dy); | 84 | input_report_rel(atamouse_dev, REL_Y, dy); |
85 | 85 | ||
86 | input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x1); | 86 | input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x4); |
87 | input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); | 87 | input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); |
88 | input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x4); | 88 | input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x1); |
89 | 89 | ||
90 | input_sync(atamouse_dev); | 90 | input_sync(atamouse_dev); |
91 | 91 | ||
@@ -108,7 +108,7 @@ static int atamouse_open(struct input_dev *dev) | |||
108 | static void atamouse_close(struct input_dev *dev) | 108 | static void atamouse_close(struct input_dev *dev) |
109 | { | 109 | { |
110 | ikbd_mouse_disable(); | 110 | ikbd_mouse_disable(); |
111 | atari_mouse_interrupt_hook = NULL; | 111 | atari_input_mouse_interrupt_hook = NULL; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int __init atamouse_init(void) | 114 | static int __init atamouse_init(void) |
@@ -118,8 +118,9 @@ static int __init atamouse_init(void) | |||
118 | if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) | 118 | if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) |
119 | return -ENODEV; | 119 | return -ENODEV; |
120 | 120 | ||
121 | if (!atari_keyb_init()) | 121 | error = atari_keyb_init(); |
122 | return -ENODEV; | 122 | if (error) |
123 | return error; | ||
123 | 124 | ||
124 | atamouse_dev = input_allocate_device(); | 125 | atamouse_dev = input_allocate_device(); |
125 | if (!atamouse_dev) | 126 | if (!atamouse_dev) |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index c24946f51256..1de1c19dad30 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -281,17 +281,24 @@ struct ser_req { | |||
281 | u8 command; | 281 | u8 command; |
282 | u8 ref_off; | 282 | u8 ref_off; |
283 | u16 scratch; | 283 | u16 scratch; |
284 | __be16 sample; | ||
285 | struct spi_message msg; | 284 | struct spi_message msg; |
286 | struct spi_transfer xfer[6]; | 285 | struct spi_transfer xfer[6]; |
286 | /* | ||
287 | * DMA (thus cache coherency maintenance) requires the | ||
288 | * transfer buffers to live in their own cache lines. | ||
289 | */ | ||
290 | __be16 sample ____cacheline_aligned; | ||
287 | }; | 291 | }; |
288 | 292 | ||
289 | struct ads7845_ser_req { | 293 | struct ads7845_ser_req { |
290 | u8 command[3]; | 294 | u8 command[3]; |
291 | u8 pwrdown[3]; | ||
292 | u8 sample[3]; | ||
293 | struct spi_message msg; | 295 | struct spi_message msg; |
294 | struct spi_transfer xfer[2]; | 296 | struct spi_transfer xfer[2]; |
297 | /* | ||
298 | * DMA (thus cache coherency maintenance) requires the | ||
299 | * transfer buffers to live in their own cache lines. | ||
300 | */ | ||
301 | u8 sample[3] ____cacheline_aligned; | ||
295 | }; | 302 | }; |
296 | 303 | ||
297 | static int ads7846_read12_ser(struct device *dev, unsigned command) | 304 | static int ads7846_read12_ser(struct device *dev, unsigned command) |
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index e7089a1f6cb6..b37e6186d0fa 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c | |||
@@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = { | |||
349 | {LM3530_NAME, 0}, | 349 | {LM3530_NAME, 0}, |
350 | {} | 350 | {} |
351 | }; | 351 | }; |
352 | MODULE_DEVICE_TABLE(i2c, lm3530_id); | ||
352 | 353 | ||
353 | static struct i2c_driver lm3530_i2c_driver = { | 354 | static struct i2c_driver lm3530_i2c_driver = { |
354 | .probe = lm3530_probe, | 355 | .probe = lm3530_probe, |
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index 0aaa0597a622..34ae49dc557c 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig | |||
@@ -5,8 +5,10 @@ config LGUEST | |||
5 | ---help--- | 5 | ---help--- |
6 | This is a very simple module which allows you to run | 6 | This is a very simple module which allows you to run |
7 | multiple instances of the same Linux kernel, using the | 7 | multiple instances of the same Linux kernel, using the |
8 | "lguest" command found in the Documentation/lguest directory. | 8 | "lguest" command found in the Documentation/virtual/lguest |
9 | directory. | ||
10 | |||
9 | Note that "lguest" is pronounced to rhyme with "fell quest", | 11 | Note that "lguest" is pronounced to rhyme with "fell quest", |
10 | not "rustyvisor". See Documentation/lguest/lguest.txt. | 12 | not "rustyvisor". See Documentation/virtual/lguest/lguest.txt. |
11 | 13 | ||
12 | If unsure, say N. If curious, say M. If masochistic, say Y. | 14 | If unsure, say N. If curious, say M. If masochistic, say Y. |
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile index 7d463c26124f..8ac947c7e7c7 100644 --- a/drivers/lguest/Makefile +++ b/drivers/lguest/Makefile | |||
@@ -18,7 +18,7 @@ Mastery: PREFIX=M | |||
18 | Beer: | 18 | Beer: |
19 | @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" | 19 | @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" |
20 | Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: | 20 | Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: |
21 | @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` | 21 | @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` |
22 | Puppy: | 22 | Puppy: |
23 | @clear | 23 | @clear |
24 | @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" | 24 | @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" |
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index c820e2f53527..3f442003623d 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c | |||
@@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core) | |||
524 | for (todo = 32; todo > 0; todo -= bits) { | 524 | for (todo = 32; todo > 0; todo -= bits) { |
525 | ev.pulse = samples & 0x80000000 ? false : true; | 525 | ev.pulse = samples & 0x80000000 ? false : true; |
526 | bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); | 526 | bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); |
527 | ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); | 527 | ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; |
528 | ir_raw_event_store_with_filter(ir->dev, &ev); | 528 | ir_raw_event_store_with_filter(ir->dev, &ev); |
529 | samples <<= bits; | 529 | samples <<= bits; |
530 | } | 530 | } |
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 3973f9a94753..ddb4c091dedc 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c | |||
@@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, | |||
136 | } | 136 | } |
137 | EXPORT_SYMBOL(soc_camera_apply_sensor_flags); | 137 | EXPORT_SYMBOL(soc_camera_apply_sensor_flags); |
138 | 138 | ||
139 | #define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ | ||
140 | ((x) >> 24) & 0xff | ||
141 | |||
142 | static int soc_camera_try_fmt(struct soc_camera_device *icd, | ||
143 | struct v4l2_format *f) | ||
144 | { | ||
145 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | ||
146 | struct v4l2_pix_format *pix = &f->fmt.pix; | ||
147 | int ret; | ||
148 | |||
149 | dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", | ||
150 | pixfmtstr(pix->pixelformat), pix->width, pix->height); | ||
151 | |||
152 | pix->bytesperline = 0; | ||
153 | pix->sizeimage = 0; | ||
154 | |||
155 | ret = ici->ops->try_fmt(icd, f); | ||
156 | if (ret < 0) | ||
157 | return ret; | ||
158 | |||
159 | if (!pix->sizeimage) { | ||
160 | if (!pix->bytesperline) { | ||
161 | const struct soc_camera_format_xlate *xlate; | ||
162 | |||
163 | xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); | ||
164 | if (!xlate) | ||
165 | return -EINVAL; | ||
166 | |||
167 | ret = soc_mbus_bytes_per_line(pix->width, | ||
168 | xlate->host_fmt); | ||
169 | if (ret > 0) | ||
170 | pix->bytesperline = ret; | ||
171 | } | ||
172 | if (pix->bytesperline) | ||
173 | pix->sizeimage = pix->bytesperline * pix->height; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
139 | static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, | 179 | static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, |
140 | struct v4l2_format *f) | 180 | struct v4l2_format *f) |
141 | { | 181 | { |
142 | struct soc_camera_device *icd = file->private_data; | 182 | struct soc_camera_device *icd = file->private_data; |
143 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | ||
144 | 183 | ||
145 | WARN_ON(priv != file->private_data); | 184 | WARN_ON(priv != file->private_data); |
146 | 185 | ||
@@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, | |||
149 | return -EINVAL; | 188 | return -EINVAL; |
150 | 189 | ||
151 | /* limit format to hardware capabilities */ | 190 | /* limit format to hardware capabilities */ |
152 | return ici->ops->try_fmt(icd, f); | 191 | return soc_camera_try_fmt(icd, f); |
153 | } | 192 | } |
154 | 193 | ||
155 | static int soc_camera_enum_input(struct file *file, void *priv, | 194 | static int soc_camera_enum_input(struct file *file, void *priv, |
@@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) | |||
362 | icd->user_formats = NULL; | 401 | icd->user_formats = NULL; |
363 | } | 402 | } |
364 | 403 | ||
365 | #define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ | ||
366 | ((x) >> 24) & 0xff | ||
367 | |||
368 | /* Called with .vb_lock held, or from the first open(2), see comment there */ | 404 | /* Called with .vb_lock held, or from the first open(2), see comment there */ |
369 | static int soc_camera_set_fmt(struct soc_camera_device *icd, | 405 | static int soc_camera_set_fmt(struct soc_camera_device *icd, |
370 | struct v4l2_format *f) | 406 | struct v4l2_format *f) |
@@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd, | |||
377 | pixfmtstr(pix->pixelformat), pix->width, pix->height); | 413 | pixfmtstr(pix->pixelformat), pix->width, pix->height); |
378 | 414 | ||
379 | /* We always call try_fmt() before set_fmt() or set_crop() */ | 415 | /* We always call try_fmt() before set_fmt() or set_crop() */ |
380 | ret = ici->ops->try_fmt(icd, f); | 416 | ret = soc_camera_try_fmt(icd, f); |
381 | if (ret < 0) | 417 | if (ret < 0) |
382 | return ret; | 418 | return ret; |
383 | 419 | ||
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index 5aeaf876ba9b..4aae501f02d0 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c | |||
@@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, | |||
155 | sd->v4l2_dev = v4l2_dev; | 155 | sd->v4l2_dev = v4l2_dev; |
156 | if (sd->internal_ops && sd->internal_ops->registered) { | 156 | if (sd->internal_ops && sd->internal_ops->registered) { |
157 | err = sd->internal_ops->registered(sd); | 157 | err = sd->internal_ops->registered(sd); |
158 | if (err) | 158 | if (err) { |
159 | module_put(sd->owner); | ||
159 | return err; | 160 | return err; |
161 | } | ||
160 | } | 162 | } |
161 | 163 | ||
162 | /* This just returns 0 if either of the two args is NULL */ | 164 | /* This just returns 0 if either of the two args is NULL */ |
@@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, | |||
164 | if (err) { | 166 | if (err) { |
165 | if (sd->internal_ops && sd->internal_ops->unregistered) | 167 | if (sd->internal_ops && sd->internal_ops->unregistered) |
166 | sd->internal_ops->unregistered(sd); | 168 | sd->internal_ops->unregistered(sd); |
169 | module_put(sd->owner); | ||
167 | return err; | 170 | return err; |
168 | } | 171 | } |
169 | 172 | ||
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c index 0b8064490676..812729ebf09e 100644 --- a/drivers/media/video/v4l2-subdev.c +++ b/drivers/media/video/v4l2-subdev.c | |||
@@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) | |||
155 | 155 | ||
156 | switch (cmd) { | 156 | switch (cmd) { |
157 | case VIDIOC_QUERYCTRL: | 157 | case VIDIOC_QUERYCTRL: |
158 | return v4l2_subdev_queryctrl(sd, arg); | 158 | return v4l2_queryctrl(sd->ctrl_handler, arg); |
159 | 159 | ||
160 | case VIDIOC_QUERYMENU: | 160 | case VIDIOC_QUERYMENU: |
161 | return v4l2_subdev_querymenu(sd, arg); | 161 | return v4l2_querymenu(sd->ctrl_handler, arg); |
162 | 162 | ||
163 | case VIDIOC_G_CTRL: | 163 | case VIDIOC_G_CTRL: |
164 | return v4l2_subdev_g_ctrl(sd, arg); | 164 | return v4l2_g_ctrl(sd->ctrl_handler, arg); |
165 | 165 | ||
166 | case VIDIOC_S_CTRL: | 166 | case VIDIOC_S_CTRL: |
167 | return v4l2_subdev_s_ctrl(sd, arg); | 167 | return v4l2_s_ctrl(sd->ctrl_handler, arg); |
168 | 168 | ||
169 | case VIDIOC_G_EXT_CTRLS: | 169 | case VIDIOC_G_EXT_CTRLS: |
170 | return v4l2_subdev_g_ext_ctrls(sd, arg); | 170 | return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); |
171 | 171 | ||
172 | case VIDIOC_S_EXT_CTRLS: | 172 | case VIDIOC_S_EXT_CTRLS: |
173 | return v4l2_subdev_s_ext_ctrls(sd, arg); | 173 | return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); |
174 | 174 | ||
175 | case VIDIOC_TRY_EXT_CTRLS: | 175 | case VIDIOC_TRY_EXT_CTRLS: |
176 | return v4l2_subdev_try_ext_ctrls(sd, arg); | 176 | return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); |
177 | 177 | ||
178 | case VIDIOC_DQEVENT: | 178 | case VIDIOC_DQEVENT: |
179 | if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) | 179 | if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 643ad52e3ca2..4796bbf0ae4e 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void) | |||
1000 | gd->major = I2O_MAJOR; | 1000 | gd->major = I2O_MAJOR; |
1001 | gd->queue = queue; | 1001 | gd->queue = queue; |
1002 | gd->fops = &i2o_block_fops; | 1002 | gd->fops = &i2o_block_fops; |
1003 | gd->events = DISK_EVENT_MEDIA_CHANGE; | ||
1004 | gd->private_data = dev; | 1003 | gd->private_data = dev; |
1005 | 1004 | ||
1006 | dev->gd = gd; | 1005 | dev->gd = gd; |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index d4a851c6b5bf..0b4d5b23bec9 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
144 | int iter, i; | 144 | int iter, i; |
145 | unsigned long flags; | 145 | unsigned long flags; |
146 | 146 | ||
147 | data->chip->irq_ack(irq_data); | 147 | data->chip->irq_ack(data); |
148 | 148 | ||
149 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { | 149 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { |
150 | u32 status; | 150 | u32 status; |
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 2e165117457b..3ab9ffa00aad 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c | |||
@@ -717,14 +717,14 @@ static int usbhs_enable(struct device *dev) | |||
717 | gpio_request(pdata->ehci_data->reset_gpio_port[0], | 717 | gpio_request(pdata->ehci_data->reset_gpio_port[0], |
718 | "USB1 PHY reset"); | 718 | "USB1 PHY reset"); |
719 | gpio_direction_output | 719 | gpio_direction_output |
720 | (pdata->ehci_data->reset_gpio_port[0], 1); | 720 | (pdata->ehci_data->reset_gpio_port[0], 0); |
721 | } | 721 | } |
722 | 722 | ||
723 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { | 723 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { |
724 | gpio_request(pdata->ehci_data->reset_gpio_port[1], | 724 | gpio_request(pdata->ehci_data->reset_gpio_port[1], |
725 | "USB2 PHY reset"); | 725 | "USB2 PHY reset"); |
726 | gpio_direction_output | 726 | gpio_direction_output |
727 | (pdata->ehci_data->reset_gpio_port[1], 1); | 727 | (pdata->ehci_data->reset_gpio_port[1], 0); |
728 | } | 728 | } |
729 | 729 | ||
730 | /* Hold the PHY in RESET for enough time till DIR is high */ | 730 | /* Hold the PHY in RESET for enough time till DIR is high */ |
@@ -904,11 +904,11 @@ static int usbhs_enable(struct device *dev) | |||
904 | 904 | ||
905 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) | 905 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) |
906 | gpio_set_value | 906 | gpio_set_value |
907 | (pdata->ehci_data->reset_gpio_port[0], 0); | 907 | (pdata->ehci_data->reset_gpio_port[0], 1); |
908 | 908 | ||
909 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) | 909 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) |
910 | gpio_set_value | 910 | gpio_set_value |
911 | (pdata->ehci_data->reset_gpio_port[1], 0); | 911 | (pdata->ehci_data->reset_gpio_port[1], 1); |
912 | } | 912 | } |
913 | 913 | ||
914 | end_count: | 914 | end_count: |
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 16422de0823a..2c0d4d16491a 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c | |||
@@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript, | |||
447 | if (err) | 447 | if (err) |
448 | goto out; | 448 | goto out; |
449 | } | 449 | } |
450 | if (tscript->flags & TWL4030_SLEEP_SCRIPT) | 450 | if (tscript->flags & TWL4030_SLEEP_SCRIPT) { |
451 | if (order) | 451 | if (order) |
452 | pr_warning("TWL4030: Bad order of scripts (sleep "\ | 452 | pr_warning("TWL4030: Bad order of scripts (sleep "\ |
453 | "script before wakeup) Leads to boot"\ | 453 | "script before wakeup) Leads to boot"\ |
454 | "failure on some boards\n"); | 454 | "failure on some boards\n"); |
455 | err = twl4030_config_sleep_sequence(address); | 455 | err = twl4030_config_sleep_sequence(address); |
456 | } | ||
456 | out: | 457 | out: |
457 | return err; | 458 | return err; |
458 | } | 459 | } |
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig index 2c8c3f39710d..abb5de1afce3 100644 --- a/drivers/misc/ti-st/Kconfig +++ b/drivers/misc/ti-st/Kconfig | |||
@@ -5,7 +5,7 @@ | |||
5 | menu "Texas Instruments shared transport line discipline" | 5 | menu "Texas Instruments shared transport line discipline" |
6 | config TI_ST | 6 | config TI_ST |
7 | tristate "Shared transport core driver" | 7 | tristate "Shared transport core driver" |
8 | depends on RFKILL | 8 | depends on NET && GPIOLIB |
9 | select FW_LOADER | 9 | select FW_LOADER |
10 | help | 10 | help |
11 | This enables the shared transport core driver for TI | 11 | This enables the shared transport core driver for TI |
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 486117f72c9f..f91f82eabda7 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c | |||
@@ -43,13 +43,15 @@ static void add_channel_to_table(struct st_data_s *st_gdata, | |||
43 | pr_info("%s: id %d\n", __func__, new_proto->chnl_id); | 43 | pr_info("%s: id %d\n", __func__, new_proto->chnl_id); |
44 | /* list now has the channel id as index itself */ | 44 | /* list now has the channel id as index itself */ |
45 | st_gdata->list[new_proto->chnl_id] = new_proto; | 45 | st_gdata->list[new_proto->chnl_id] = new_proto; |
46 | st_gdata->is_registered[new_proto->chnl_id] = true; | ||
46 | } | 47 | } |
47 | 48 | ||
48 | static void remove_channel_from_table(struct st_data_s *st_gdata, | 49 | static void remove_channel_from_table(struct st_data_s *st_gdata, |
49 | struct st_proto_s *proto) | 50 | struct st_proto_s *proto) |
50 | { | 51 | { |
51 | pr_info("%s: id %d\n", __func__, proto->chnl_id); | 52 | pr_info("%s: id %d\n", __func__, proto->chnl_id); |
52 | st_gdata->list[proto->chnl_id] = NULL; | 53 | /* st_gdata->list[proto->chnl_id] = NULL; */ |
54 | st_gdata->is_registered[proto->chnl_id] = false; | ||
53 | } | 55 | } |
54 | 56 | ||
55 | /* | 57 | /* |
@@ -104,7 +106,7 @@ void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata) | |||
104 | 106 | ||
105 | if (unlikely | 107 | if (unlikely |
106 | (st_gdata == NULL || st_gdata->rx_skb == NULL | 108 | (st_gdata == NULL || st_gdata->rx_skb == NULL |
107 | || st_gdata->list[chnl_id] == NULL)) { | 109 | || st_gdata->is_registered[chnl_id] == false)) { |
108 | pr_err("chnl_id %d not registered, no data to send?", | 110 | pr_err("chnl_id %d not registered, no data to send?", |
109 | chnl_id); | 111 | chnl_id); |
110 | kfree_skb(st_gdata->rx_skb); | 112 | kfree_skb(st_gdata->rx_skb); |
@@ -141,14 +143,15 @@ void st_reg_complete(struct st_data_s *st_gdata, char err) | |||
141 | unsigned char i = 0; | 143 | unsigned char i = 0; |
142 | pr_info(" %s ", __func__); | 144 | pr_info(" %s ", __func__); |
143 | for (i = 0; i < ST_MAX_CHANNELS; i++) { | 145 | for (i = 0; i < ST_MAX_CHANNELS; i++) { |
144 | if (likely(st_gdata != NULL && st_gdata->list[i] != NULL && | 146 | if (likely(st_gdata != NULL && |
145 | st_gdata->list[i]->reg_complete_cb != NULL)) { | 147 | st_gdata->is_registered[i] == true && |
148 | st_gdata->list[i]->reg_complete_cb != NULL)) { | ||
146 | st_gdata->list[i]->reg_complete_cb | 149 | st_gdata->list[i]->reg_complete_cb |
147 | (st_gdata->list[i]->priv_data, err); | 150 | (st_gdata->list[i]->priv_data, err); |
148 | pr_info("protocol %d's cb sent %d\n", i, err); | 151 | pr_info("protocol %d's cb sent %d\n", i, err); |
149 | if (err) { /* cleanup registered protocol */ | 152 | if (err) { /* cleanup registered protocol */ |
150 | st_gdata->protos_registered--; | 153 | st_gdata->protos_registered--; |
151 | st_gdata->list[i] = NULL; | 154 | st_gdata->is_registered[i] = false; |
152 | } | 155 | } |
153 | } | 156 | } |
154 | } | 157 | } |
@@ -475,9 +478,9 @@ void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf) | |||
475 | { | 478 | { |
476 | seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n", | 479 | seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n", |
477 | st_gdata->protos_registered, | 480 | st_gdata->protos_registered, |
478 | st_gdata->list[0x04] != NULL ? 'R' : 'U', | 481 | st_gdata->is_registered[0x04] == true ? 'R' : 'U', |
479 | st_gdata->list[0x08] != NULL ? 'R' : 'U', | 482 | st_gdata->is_registered[0x08] == true ? 'R' : 'U', |
480 | st_gdata->list[0x09] != NULL ? 'R' : 'U'); | 483 | st_gdata->is_registered[0x09] == true ? 'R' : 'U'); |
481 | } | 484 | } |
482 | 485 | ||
483 | /********************************************************************/ | 486 | /********************************************************************/ |
@@ -504,7 +507,7 @@ long st_register(struct st_proto_s *new_proto) | |||
504 | return -EPROTONOSUPPORT; | 507 | return -EPROTONOSUPPORT; |
505 | } | 508 | } |
506 | 509 | ||
507 | if (st_gdata->list[new_proto->chnl_id] != NULL) { | 510 | if (st_gdata->is_registered[new_proto->chnl_id] == true) { |
508 | pr_err("chnl_id %d already registered", new_proto->chnl_id); | 511 | pr_err("chnl_id %d already registered", new_proto->chnl_id); |
509 | return -EALREADY; | 512 | return -EALREADY; |
510 | } | 513 | } |
@@ -563,7 +566,7 @@ long st_register(struct st_proto_s *new_proto) | |||
563 | /* check for already registered once more, | 566 | /* check for already registered once more, |
564 | * since the above check is old | 567 | * since the above check is old |
565 | */ | 568 | */ |
566 | if (st_gdata->list[new_proto->chnl_id] != NULL) { | 569 | if (st_gdata->is_registered[new_proto->chnl_id] == true) { |
567 | pr_err(" proto %d already registered ", | 570 | pr_err(" proto %d already registered ", |
568 | new_proto->chnl_id); | 571 | new_proto->chnl_id); |
569 | return -EALREADY; | 572 | return -EALREADY; |
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index b4488c8f6b23..5da93ee6f6be 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/debugfs.h> | 30 | #include <linux/debugfs.h> |
31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/sysfs.h> | ||
33 | #include <linux/tty.h> | 34 | #include <linux/tty.h> |
34 | 35 | ||
35 | #include <linux/skbuff.h> | 36 | #include <linux/skbuff.h> |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 2b200c1cfbba..461e6a17fb90 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) | |||
94 | spin_unlock_irqrestore(&host->clk_lock, flags); | 94 | spin_unlock_irqrestore(&host->clk_lock, flags); |
95 | return; | 95 | return; |
96 | } | 96 | } |
97 | mmc_claim_host(host); | 97 | mutex_lock(&host->clk_gate_mutex); |
98 | spin_lock_irqsave(&host->clk_lock, flags); | 98 | spin_lock_irqsave(&host->clk_lock, flags); |
99 | if (!host->clk_requests) { | 99 | if (!host->clk_requests) { |
100 | spin_unlock_irqrestore(&host->clk_lock, flags); | 100 | spin_unlock_irqrestore(&host->clk_lock, flags); |
@@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) | |||
104 | pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); | 104 | pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); |
105 | } | 105 | } |
106 | spin_unlock_irqrestore(&host->clk_lock, flags); | 106 | spin_unlock_irqrestore(&host->clk_lock, flags); |
107 | mmc_release_host(host); | 107 | mutex_unlock(&host->clk_gate_mutex); |
108 | } | 108 | } |
109 | 109 | ||
110 | /* | 110 | /* |
@@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) | |||
130 | { | 130 | { |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | 132 | ||
133 | mmc_claim_host(host); | 133 | mutex_lock(&host->clk_gate_mutex); |
134 | spin_lock_irqsave(&host->clk_lock, flags); | 134 | spin_lock_irqsave(&host->clk_lock, flags); |
135 | if (host->clk_gated) { | 135 | if (host->clk_gated) { |
136 | spin_unlock_irqrestore(&host->clk_lock, flags); | 136 | spin_unlock_irqrestore(&host->clk_lock, flags); |
@@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) | |||
140 | } | 140 | } |
141 | host->clk_requests++; | 141 | host->clk_requests++; |
142 | spin_unlock_irqrestore(&host->clk_lock, flags); | 142 | spin_unlock_irqrestore(&host->clk_lock, flags); |
143 | mmc_release_host(host); | 143 | mutex_unlock(&host->clk_gate_mutex); |
144 | } | 144 | } |
145 | 145 | ||
146 | /** | 146 | /** |
@@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host) | |||
215 | host->clk_gated = false; | 215 | host->clk_gated = false; |
216 | INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); | 216 | INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); |
217 | spin_lock_init(&host->clk_lock); | 217 | spin_lock_init(&host->clk_lock); |
218 | mutex_init(&host->clk_gate_mutex); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | /** | 221 | /** |
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index f9b611fc773e..60e4186a4345 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c | |||
@@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) | |||
124 | #endif | 124 | #endif |
125 | } | 125 | } |
126 | 126 | ||
127 | static const struct of_device_id sdhci_of_match[]; | ||
127 | static int __devinit sdhci_of_probe(struct platform_device *ofdev) | 128 | static int __devinit sdhci_of_probe(struct platform_device *ofdev) |
128 | { | 129 | { |
130 | const struct of_device_id *match; | ||
129 | struct device_node *np = ofdev->dev.of_node; | 131 | struct device_node *np = ofdev->dev.of_node; |
130 | struct sdhci_of_data *sdhci_of_data; | 132 | struct sdhci_of_data *sdhci_of_data; |
131 | struct sdhci_host *host; | 133 | struct sdhci_host *host; |
@@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev) | |||
134 | int size; | 136 | int size; |
135 | int ret; | 137 | int ret; |
136 | 138 | ||
137 | if (!ofdev->dev.of_match) | 139 | match = of_match_device(sdhci_of_match, &ofdev->dev); |
140 | if (!match) | ||
138 | return -EINVAL; | 141 | return -EINVAL; |
139 | sdhci_of_data = ofdev->dev.of_match->data; | 142 | sdhci_of_data = match->data; |
140 | 143 | ||
141 | if (!of_device_is_available(np)) | 144 | if (!of_device_is_available(np)) |
142 | return -ENODEV; | 145 | return -ENODEV; |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 44b1f46458ca..5069111c81cc 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -260,6 +260,13 @@ config MTD_BCM963XX | |||
260 | Support for parsing CFE image tag and creating MTD partitions on | 260 | Support for parsing CFE image tag and creating MTD partitions on |
261 | Broadcom BCM63xx boards. | 261 | Broadcom BCM63xx boards. |
262 | 262 | ||
263 | config MTD_LANTIQ | ||
264 | tristate "Lantiq SoC NOR support" | ||
265 | depends on LANTIQ | ||
266 | select MTD_PARTITIONS | ||
267 | help | ||
268 | Support for NOR flash attached to the Lantiq SoC's External Bus Unit. | ||
269 | |||
263 | config MTD_DILNETPC | 270 | config MTD_DILNETPC |
264 | tristate "CFI Flash device mapped on DIL/Net PC" | 271 | tristate "CFI Flash device mapped on DIL/Net PC" |
265 | depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN | 272 | depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 08533bd5cba7..6adf4c9b9057 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o | |||
60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o | 60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o |
61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o | 61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o |
62 | obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o | 62 | obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o |
63 | obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o | ||
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c new file mode 100644 index 000000000000..a90cabd7b84d --- /dev/null +++ b/drivers/mtd/maps/lantiq-flash.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE | ||
7 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/mtd/map.h> | ||
18 | #include <linux/mtd/partitions.h> | ||
19 | #include <linux/mtd/cfi.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/mtd/physmap.h> | ||
22 | |||
23 | #include <lantiq_soc.h> | ||
24 | #include <lantiq_platform.h> | ||
25 | |||
26 | /* | ||
27 | * The NOR flash is connected to the same external bus unit (EBU) as PCI. | ||
28 | * To make PCI work we need to enable the endianness swapping for the address | ||
29 | * written to the EBU. This endianness swapping works for PCI correctly but | ||
30 | * fails for attached NOR devices. To workaround this we need to use a complex | ||
31 | * map. The workaround involves swapping all addresses whilst probing the chip. | ||
32 | * Once probing is complete we stop swapping the addresses but swizzle the | ||
33 | * unlock addresses to ensure that access to the NOR device works correctly. | ||
34 | */ | ||
35 | |||
36 | enum { | ||
37 | LTQ_NOR_PROBING, | ||
38 | LTQ_NOR_NORMAL | ||
39 | }; | ||
40 | |||
41 | struct ltq_mtd { | ||
42 | struct resource *res; | ||
43 | struct mtd_info *mtd; | ||
44 | struct map_info *map; | ||
45 | }; | ||
46 | |||
47 | static char ltq_map_name[] = "ltq_nor"; | ||
48 | |||
49 | static map_word | ||
50 | ltq_read16(struct map_info *map, unsigned long adr) | ||
51 | { | ||
52 | unsigned long flags; | ||
53 | map_word temp; | ||
54 | |||
55 | if (map->map_priv_1 == LTQ_NOR_PROBING) | ||
56 | adr ^= 2; | ||
57 | spin_lock_irqsave(&ebu_lock, flags); | ||
58 | temp.x[0] = *(u16 *)(map->virt + adr); | ||
59 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
60 | return temp; | ||
61 | } | ||
62 | |||
63 | static void | ||
64 | ltq_write16(struct map_info *map, map_word d, unsigned long adr) | ||
65 | { | ||
66 | unsigned long flags; | ||
67 | |||
68 | if (map->map_priv_1 == LTQ_NOR_PROBING) | ||
69 | adr ^= 2; | ||
70 | spin_lock_irqsave(&ebu_lock, flags); | ||
71 | *(u16 *)(map->virt + adr) = d.x[0]; | ||
72 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * The following 2 functions copy data between iomem and a cached memory | ||
77 | * section. As memcpy() makes use of pre-fetching we cannot use it here. | ||
78 | * The normal alternative of using memcpy_{to,from}io also makes use of | ||
79 | * memcpy() on MIPS so it is not applicable either. We are therefore stuck | ||
80 | * with having to use our own loop. | ||
81 | */ | ||
82 | static void | ||
83 | ltq_copy_from(struct map_info *map, void *to, | ||
84 | unsigned long from, ssize_t len) | ||
85 | { | ||
86 | unsigned char *f = (unsigned char *)map->virt + from; | ||
87 | unsigned char *t = (unsigned char *)to; | ||
88 | unsigned long flags; | ||
89 | |||
90 | spin_lock_irqsave(&ebu_lock, flags); | ||
91 | while (len--) | ||
92 | *t++ = *f++; | ||
93 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
94 | } | ||
95 | |||
96 | static void | ||
97 | ltq_copy_to(struct map_info *map, unsigned long to, | ||
98 | const void *from, ssize_t len) | ||
99 | { | ||
100 | unsigned char *f = (unsigned char *)from; | ||
101 | unsigned char *t = (unsigned char *)map->virt + to; | ||
102 | unsigned long flags; | ||
103 | |||
104 | spin_lock_irqsave(&ebu_lock, flags); | ||
105 | while (len--) | ||
106 | *t++ = *f++; | ||
107 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
108 | } | ||
109 | |||
110 | static const char const *part_probe_types[] = { "cmdlinepart", NULL }; | ||
111 | |||
112 | static int __init | ||
113 | ltq_mtd_probe(struct platform_device *pdev) | ||
114 | { | ||
115 | struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); | ||
116 | struct ltq_mtd *ltq_mtd; | ||
117 | struct mtd_partition *parts; | ||
118 | struct resource *res; | ||
119 | int nr_parts = 0; | ||
120 | struct cfi_private *cfi; | ||
121 | int err; | ||
122 | |||
123 | ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); | ||
124 | platform_set_drvdata(pdev, ltq_mtd); | ||
125 | |||
126 | ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
127 | if (!ltq_mtd->res) { | ||
128 | dev_err(&pdev->dev, "failed to get memory resource"); | ||
129 | err = -ENOENT; | ||
130 | goto err_out; | ||
131 | } | ||
132 | |||
133 | res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start, | ||
134 | resource_size(ltq_mtd->res), dev_name(&pdev->dev)); | ||
135 | if (!ltq_mtd->res) { | ||
136 | dev_err(&pdev->dev, "failed to request mem resource"); | ||
137 | err = -EBUSY; | ||
138 | goto err_out; | ||
139 | } | ||
140 | |||
141 | ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); | ||
142 | ltq_mtd->map->phys = res->start; | ||
143 | ltq_mtd->map->size = resource_size(res); | ||
144 | ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev, | ||
145 | ltq_mtd->map->phys, ltq_mtd->map->size); | ||
146 | if (!ltq_mtd->map->virt) { | ||
147 | dev_err(&pdev->dev, "failed to ioremap!\n"); | ||
148 | err = -ENOMEM; | ||
149 | goto err_free; | ||
150 | } | ||
151 | |||
152 | ltq_mtd->map->name = ltq_map_name; | ||
153 | ltq_mtd->map->bankwidth = 2; | ||
154 | ltq_mtd->map->read = ltq_read16; | ||
155 | ltq_mtd->map->write = ltq_write16; | ||
156 | ltq_mtd->map->copy_from = ltq_copy_from; | ||
157 | ltq_mtd->map->copy_to = ltq_copy_to; | ||
158 | |||
159 | ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING; | ||
160 | ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map); | ||
161 | ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL; | ||
162 | |||
163 | if (!ltq_mtd->mtd) { | ||
164 | dev_err(&pdev->dev, "probing failed\n"); | ||
165 | err = -ENXIO; | ||
166 | goto err_unmap; | ||
167 | } | ||
168 | |||
169 | ltq_mtd->mtd->owner = THIS_MODULE; | ||
170 | |||
171 | cfi = ltq_mtd->map->fldrv_priv; | ||
172 | cfi->addr_unlock1 ^= 1; | ||
173 | cfi->addr_unlock2 ^= 1; | ||
174 | |||
175 | nr_parts = parse_mtd_partitions(ltq_mtd->mtd, | ||
176 | part_probe_types, &parts, 0); | ||
177 | if (nr_parts > 0) { | ||
178 | dev_info(&pdev->dev, | ||
179 | "using %d partitions from cmdline", nr_parts); | ||
180 | } else { | ||
181 | nr_parts = ltq_mtd_data->nr_parts; | ||
182 | parts = ltq_mtd_data->parts; | ||
183 | } | ||
184 | |||
185 | err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts); | ||
186 | if (err) { | ||
187 | dev_err(&pdev->dev, "failed to add partitions\n"); | ||
188 | goto err_destroy; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | |||
193 | err_destroy: | ||
194 | map_destroy(ltq_mtd->mtd); | ||
195 | err_unmap: | ||
196 | iounmap(ltq_mtd->map->virt); | ||
197 | err_free: | ||
198 | kfree(ltq_mtd->map); | ||
199 | err_out: | ||
200 | kfree(ltq_mtd); | ||
201 | return err; | ||
202 | } | ||
203 | |||
204 | static int __devexit | ||
205 | ltq_mtd_remove(struct platform_device *pdev) | ||
206 | { | ||
207 | struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); | ||
208 | |||
209 | if (ltq_mtd) { | ||
210 | if (ltq_mtd->mtd) { | ||
211 | del_mtd_partitions(ltq_mtd->mtd); | ||
212 | map_destroy(ltq_mtd->mtd); | ||
213 | } | ||
214 | if (ltq_mtd->map->virt) | ||
215 | iounmap(ltq_mtd->map->virt); | ||
216 | kfree(ltq_mtd->map); | ||
217 | kfree(ltq_mtd); | ||
218 | } | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static struct platform_driver ltq_mtd_driver = { | ||
223 | .remove = __devexit_p(ltq_mtd_remove), | ||
224 | .driver = { | ||
225 | .name = "ltq_nor", | ||
226 | .owner = THIS_MODULE, | ||
227 | }, | ||
228 | }; | ||
229 | |||
230 | static int __init | ||
231 | init_ltq_mtd(void) | ||
232 | { | ||
233 | int ret = platform_driver_probe(<q_mtd_driver, ltq_mtd_probe); | ||
234 | |||
235 | if (ret) | ||
236 | pr_err("ltq_nor: error registering platform driver"); | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static void __exit | ||
241 | exit_ltq_mtd(void) | ||
242 | { | ||
243 | platform_driver_unregister(<q_mtd_driver); | ||
244 | } | ||
245 | |||
246 | module_init(init_ltq_mtd); | ||
247 | module_exit(exit_ltq_mtd); | ||
248 | |||
249 | MODULE_LICENSE("GPL"); | ||
250 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
251 | MODULE_DESCRIPTION("Lantiq SoC NOR"); | ||
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index bd483f0c57e1..c1d33464aee8 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes) | |||
214 | } | 214 | } |
215 | #endif | 215 | #endif |
216 | 216 | ||
217 | static struct of_device_id of_flash_match[]; | ||
217 | static int __devinit of_flash_probe(struct platform_device *dev) | 218 | static int __devinit of_flash_probe(struct platform_device *dev) |
218 | { | 219 | { |
219 | #ifdef CONFIG_MTD_PARTITIONS | 220 | #ifdef CONFIG_MTD_PARTITIONS |
220 | const char **part_probe_types; | 221 | const char **part_probe_types; |
221 | #endif | 222 | #endif |
223 | const struct of_device_id *match; | ||
222 | struct device_node *dp = dev->dev.of_node; | 224 | struct device_node *dp = dev->dev.of_node; |
223 | struct resource res; | 225 | struct resource res; |
224 | struct of_flash *info; | 226 | struct of_flash *info; |
@@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
232 | struct mtd_info **mtd_list = NULL; | 234 | struct mtd_info **mtd_list = NULL; |
233 | resource_size_t res_size; | 235 | resource_size_t res_size; |
234 | 236 | ||
235 | if (!dev->dev.of_match) | 237 | match = of_match_device(of_flash_match, &dev->dev); |
238 | if (!match) | ||
236 | return -EINVAL; | 239 | return -EINVAL; |
237 | probe_type = dev->dev.of_match->data; | 240 | probe_type = match->data; |
238 | 241 | ||
239 | reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); | 242 | reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); |
240 | 243 | ||
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 3ffe05db4923..5d513b54a7d7 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/gpio.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
@@ -470,7 +471,7 @@ static int __init au1xxx_nand_init(void) | |||
470 | 471 | ||
471 | #ifdef CONFIG_MIPS_PB1550 | 472 | #ifdef CONFIG_MIPS_PB1550 |
472 | /* set gpio206 high */ | 473 | /* set gpio206 high */ |
473 | au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); | 474 | gpio_direction_input(206); |
474 | 475 | ||
475 | boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); | 476 | boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); |
476 | 477 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index dc280bc8eba2..19f04a34783a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2017,6 +2017,13 @@ config FTMAC100 | |||
2017 | from Faraday. It is used on Faraday A320, Andes AG101 and some | 2017 | from Faraday. It is used on Faraday A320, Andes AG101 and some |
2018 | other ARM/NDS32 SoC's. | 2018 | other ARM/NDS32 SoC's. |
2019 | 2019 | ||
2020 | config LANTIQ_ETOP | ||
2021 | tristate "Lantiq SoC ETOP driver" | ||
2022 | depends on SOC_TYPE_XWAY | ||
2023 | help | ||
2024 | Support for the MII0 inside the Lantiq SoC | ||
2025 | |||
2026 | |||
2020 | source "drivers/net/fs_enet/Kconfig" | 2027 | source "drivers/net/fs_enet/Kconfig" |
2021 | 2028 | ||
2022 | source "drivers/net/octeon/Kconfig" | 2029 | source "drivers/net/octeon/Kconfig" |
@@ -2536,7 +2543,7 @@ config S6GMAC | |||
2536 | source "drivers/net/stmmac/Kconfig" | 2543 | source "drivers/net/stmmac/Kconfig" |
2537 | 2544 | ||
2538 | config PCH_GBE | 2545 | config PCH_GBE |
2539 | tristate "PCH Gigabit Ethernet" | 2546 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" |
2540 | depends on PCI | 2547 | depends on PCI |
2541 | select MII | 2548 | select MII |
2542 | ---help--- | 2549 | ---help--- |
@@ -2548,6 +2555,12 @@ config PCH_GBE | |||
2548 | to Gigabit Ethernet. | 2555 | to Gigabit Ethernet. |
2549 | This driver enables Gigabit Ethernet function. | 2556 | This driver enables Gigabit Ethernet function. |
2550 | 2557 | ||
2558 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | ||
2559 | Output Hub), ML7223. | ||
2560 | ML7223 IOH is for MP(Media Phone) use. | ||
2561 | ML7223 is companion chip for Intel Atom E6xx series. | ||
2562 | ML7223 is completely compatible for Intel EG20T PCH. | ||
2563 | |||
2551 | endif # NETDEV_1000 | 2564 | endif # NETDEV_1000 |
2552 | 2565 | ||
2553 | # | 2566 | # |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 01b604ad155e..209fbb70619b 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o | |||
144 | obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o | 144 | obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o |
145 | obj-$(CONFIG_B44) += b44.o | 145 | obj-$(CONFIG_B44) += b44.o |
146 | obj-$(CONFIG_FORCEDETH) += forcedeth.o | 146 | obj-$(CONFIG_FORCEDETH) += forcedeth.o |
147 | obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o | 147 | obj-$(CONFIG_NE_H8300) += ne-h8300.o |
148 | obj-$(CONFIG_AX88796) += ax88796.o | 148 | obj-$(CONFIG_AX88796) += ax88796.o |
149 | obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o | 149 | obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o |
150 | obj-$(CONFIG_FTMAC100) += ftmac100.o | 150 | obj-$(CONFIG_FTMAC100) += ftmac100.o |
@@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o | |||
219 | obj-$(CONFIG_LP486E) += lp486e.o | 219 | obj-$(CONFIG_LP486E) += lp486e.o |
220 | 220 | ||
221 | obj-$(CONFIG_ETH16I) += eth16i.o | 221 | obj-$(CONFIG_ETH16I) += eth16i.o |
222 | obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o | 222 | obj-$(CONFIG_ZORRO8390) += zorro8390.o |
223 | obj-$(CONFIG_HPLANCE) += hplance.o 7990.o | 223 | obj-$(CONFIG_HPLANCE) += hplance.o 7990.o |
224 | obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o | 224 | obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o |
225 | obj-$(CONFIG_EQUALIZER) += eql.o | 225 | obj-$(CONFIG_EQUALIZER) += eql.o |
@@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o | |||
231 | obj-$(CONFIG_DECLANCE) += declance.o | 231 | obj-$(CONFIG_DECLANCE) += declance.o |
232 | obj-$(CONFIG_ATARILANCE) += atarilance.o | 232 | obj-$(CONFIG_ATARILANCE) += atarilance.o |
233 | obj-$(CONFIG_A2065) += a2065.o | 233 | obj-$(CONFIG_A2065) += a2065.o |
234 | obj-$(CONFIG_HYDRA) += hydra.o 8390.o | 234 | obj-$(CONFIG_HYDRA) += hydra.o |
235 | obj-$(CONFIG_ARIADNE) += ariadne.o | 235 | obj-$(CONFIG_ARIADNE) += ariadne.o |
236 | obj-$(CONFIG_CS89x0) += cs89x0.o | 236 | obj-$(CONFIG_CS89x0) += cs89x0.o |
237 | obj-$(CONFIG_MACSONIC) += macsonic.o | 237 | obj-$(CONFIG_MACSONIC) += macsonic.o |
@@ -259,6 +259,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/ | |||
259 | obj-$(CONFIG_ENC28J60) += enc28j60.o | 259 | obj-$(CONFIG_ENC28J60) += enc28j60.o |
260 | obj-$(CONFIG_ETHOC) += ethoc.o | 260 | obj-$(CONFIG_ETHOC) += ethoc.o |
261 | obj-$(CONFIG_GRETH) += greth.o | 261 | obj-$(CONFIG_GRETH) += greth.o |
262 | obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o | ||
262 | 263 | ||
263 | obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o | 264 | obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o |
264 | 265 | ||
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 4af235d41fda..fbfb5b47c506 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c | |||
@@ -527,7 +527,7 @@ static void __init etherh_banner(void) | |||
527 | * Read the ethernet address string from the on board rom. | 527 | * Read the ethernet address string from the on board rom. |
528 | * This is an ascii string... | 528 | * This is an ascii string... |
529 | */ | 529 | */ |
530 | static int __init etherh_addr(char *addr, struct expansion_card *ec) | 530 | static int __devinit etherh_addr(char *addr, struct expansion_card *ec) |
531 | { | 531 | { |
532 | struct in_chunk_dir cd; | 532 | struct in_chunk_dir cd; |
533 | char *s; | 533 | char *s; |
@@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = { | |||
655 | static u32 etherh_regoffsets[16]; | 655 | static u32 etherh_regoffsets[16]; |
656 | static u32 etherm_regoffsets[16]; | 656 | static u32 etherm_regoffsets[16]; |
657 | 657 | ||
658 | static int __init | 658 | static int __devinit |
659 | etherh_probe(struct expansion_card *ec, const struct ecard_id *id) | 659 | etherh_probe(struct expansion_card *ec, const struct ecard_id *id) |
660 | { | 660 | { |
661 | const struct etherh_data *data = id->data; | 661 | const struct etherh_data *data = id->data; |
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index ce0091eb06f5..1264d781b554 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c | |||
@@ -554,7 +554,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, | |||
554 | memaddr == (unsigned short *)0xffe00000) { | 554 | memaddr == (unsigned short *)0xffe00000) { |
555 | /* PAMs card and Riebl on ST use level 5 autovector */ | 555 | /* PAMs card and Riebl on ST use level 5 autovector */ |
556 | if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, | 556 | if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, |
557 | "PAM/Riebl-ST Ethernet", dev)) { | 557 | "PAM,Riebl-ST Ethernet", dev)) { |
558 | printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); | 558 | printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); |
559 | return 0; | 559 | return 0; |
560 | } | 560 | } |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 66823eded7a3..2353eca32593 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
@@ -213,7 +213,7 @@ struct be_rx_stats { | |||
213 | 213 | ||
214 | struct be_rx_compl_info { | 214 | struct be_rx_compl_info { |
215 | u32 rss_hash; | 215 | u32 rss_hash; |
216 | u16 vid; | 216 | u16 vlan_tag; |
217 | u16 pkt_size; | 217 | u16 pkt_size; |
218 | u16 rxq_idx; | 218 | u16 rxq_idx; |
219 | u16 mac_id; | 219 | u16 mac_id; |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 1e2d825bb94a..9dc9394fd4ca 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, | |||
132 | struct be_async_event_grp5_pvid_state *evt) | 132 | struct be_async_event_grp5_pvid_state *evt) |
133 | { | 133 | { |
134 | if (evt->enabled) | 134 | if (evt->enabled) |
135 | adapter->pvid = evt->tag; | 135 | adapter->pvid = le16_to_cpu(evt->tag); |
136 | else | 136 | else |
137 | adapter->pvid = 0; | 137 | adapter->pvid = 0; |
138 | } | 138 | } |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 02a0443d1821..9187fb4e08f1 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -1018,7 +1018,8 @@ static void be_rx_compl_process(struct be_adapter *adapter, | |||
1018 | kfree_skb(skb); | 1018 | kfree_skb(skb); |
1019 | return; | 1019 | return; |
1020 | } | 1020 | } |
1021 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); | 1021 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, |
1022 | rxcp->vlan_tag); | ||
1022 | } else { | 1023 | } else { |
1023 | netif_receive_skb(skb); | 1024 | netif_receive_skb(skb); |
1024 | } | 1025 | } |
@@ -1076,7 +1077,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
1076 | if (likely(!rxcp->vlanf)) | 1077 | if (likely(!rxcp->vlanf)) |
1077 | napi_gro_frags(&eq_obj->napi); | 1078 | napi_gro_frags(&eq_obj->napi); |
1078 | else | 1079 | else |
1079 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); | 1080 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, |
1081 | rxcp->vlan_tag); | ||
1080 | } | 1082 | } |
1081 | 1083 | ||
1082 | static void be_parse_rx_compl_v1(struct be_adapter *adapter, | 1084 | static void be_parse_rx_compl_v1(struct be_adapter *adapter, |
@@ -1102,7 +1104,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter, | |||
1102 | rxcp->pkt_type = | 1104 | rxcp->pkt_type = |
1103 | AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); | 1105 | AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); |
1104 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); | 1106 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); |
1105 | rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); | 1107 | rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, |
1108 | compl); | ||
1106 | } | 1109 | } |
1107 | 1110 | ||
1108 | static void be_parse_rx_compl_v0(struct be_adapter *adapter, | 1111 | static void be_parse_rx_compl_v0(struct be_adapter *adapter, |
@@ -1128,7 +1131,8 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter, | |||
1128 | rxcp->pkt_type = | 1131 | rxcp->pkt_type = |
1129 | AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); | 1132 | AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); |
1130 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); | 1133 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); |
1131 | rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); | 1134 | rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, |
1135 | compl); | ||
1132 | } | 1136 | } |
1133 | 1137 | ||
1134 | static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) | 1138 | static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) |
@@ -1155,9 +1159,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) | |||
1155 | rxcp->vlanf = 0; | 1159 | rxcp->vlanf = 0; |
1156 | 1160 | ||
1157 | if (!lancer_chip(adapter)) | 1161 | if (!lancer_chip(adapter)) |
1158 | rxcp->vid = swab16(rxcp->vid); | 1162 | rxcp->vlan_tag = swab16(rxcp->vlan_tag); |
1159 | 1163 | ||
1160 | if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) | 1164 | if (((adapter->pvid & VLAN_VID_MASK) == |
1165 | (rxcp->vlan_tag & VLAN_VID_MASK)) && | ||
1166 | !adapter->vlan_tag[rxcp->vlan_tag]) | ||
1161 | rxcp->vlanf = 0; | 1167 | rxcp->vlanf = 0; |
1162 | 1168 | ||
1163 | /* As the compl has been parsed, reset it; we wont touch it again */ | 1169 | /* As the compl has been parsed, reset it; we wont touch it again */ |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index b28baff70864..01b8a6af275b 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | typedef struct mac_addr { | 40 | typedef struct mac_addr { |
41 | u8 mac_addr_value[ETH_ALEN]; | 41 | u8 mac_addr_value[ETH_ALEN]; |
42 | } mac_addr_t; | 42 | } __packed mac_addr_t; |
43 | 43 | ||
44 | enum { | 44 | enum { |
45 | BOND_AD_STABLE = 0, | 45 | BOND_AD_STABLE = 0, |
@@ -134,12 +134,12 @@ typedef struct lacpdu { | |||
134 | u8 tlv_type_terminator; // = terminator | 134 | u8 tlv_type_terminator; // = terminator |
135 | u8 terminator_length; // = 0 | 135 | u8 terminator_length; // = 0 |
136 | u8 reserved_50[50]; // = 0 | 136 | u8 reserved_50[50]; // = 0 |
137 | } lacpdu_t; | 137 | } __packed lacpdu_t; |
138 | 138 | ||
139 | typedef struct lacpdu_header { | 139 | typedef struct lacpdu_header { |
140 | struct ethhdr hdr; | 140 | struct ethhdr hdr; |
141 | struct lacpdu lacpdu; | 141 | struct lacpdu lacpdu; |
142 | } lacpdu_header_t; | 142 | } __packed lacpdu_header_t; |
143 | 143 | ||
144 | // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) | 144 | // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) |
145 | typedef struct bond_marker { | 145 | typedef struct bond_marker { |
@@ -155,12 +155,12 @@ typedef struct bond_marker { | |||
155 | u8 tlv_type_terminator; // = 0x00 | 155 | u8 tlv_type_terminator; // = 0x00 |
156 | u8 terminator_length; // = 0x00 | 156 | u8 terminator_length; // = 0x00 |
157 | u8 reserved_90[90]; // = 0 | 157 | u8 reserved_90[90]; // = 0 |
158 | } bond_marker_t; | 158 | } __packed bond_marker_t; |
159 | 159 | ||
160 | typedef struct bond_marker_header { | 160 | typedef struct bond_marker_header { |
161 | struct ethhdr hdr; | 161 | struct ethhdr hdr; |
162 | struct bond_marker marker; | 162 | struct bond_marker marker; |
163 | } bond_marker_header_t; | 163 | } __packed bond_marker_header_t; |
164 | 164 | ||
165 | #pragma pack() | 165 | #pragma pack() |
166 | 166 | ||
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index bd1d811c204f..5fedc3375562 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c | |||
@@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, | |||
247 | } | 247 | } |
248 | #endif /* CONFIG_PPC_MPC512x */ | 248 | #endif /* CONFIG_PPC_MPC512x */ |
249 | 249 | ||
250 | static struct of_device_id mpc5xxx_can_table[]; | ||
250 | static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) | 251 | static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) |
251 | { | 252 | { |
253 | const struct of_device_id *match; | ||
252 | struct mpc5xxx_can_data *data; | 254 | struct mpc5xxx_can_data *data; |
253 | struct device_node *np = ofdev->dev.of_node; | 255 | struct device_node *np = ofdev->dev.of_node; |
254 | struct net_device *dev; | 256 | struct net_device *dev; |
@@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) | |||
258 | int irq, mscan_clksrc = 0; | 260 | int irq, mscan_clksrc = 0; |
259 | int err = -ENOMEM; | 261 | int err = -ENOMEM; |
260 | 262 | ||
261 | if (!ofdev->dev.of_match) | 263 | match = of_match_device(mpc5xxx_can_table, &ofdev->dev); |
264 | if (!match) | ||
262 | return -EINVAL; | 265 | return -EINVAL; |
263 | data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; | 266 | data = match->data; |
264 | 267 | ||
265 | base = of_iomap(np, 0); | 268 | base = of_iomap(np, 0); |
266 | if (!base) { | 269 | if (!base) { |
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index a358ea9445a2..f501bba1fc6f 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev) | |||
346 | | (priv->read_reg(priv, REG_ID2) >> 5); | 346 | | (priv->read_reg(priv, REG_ID2) >> 5); |
347 | } | 347 | } |
348 | 348 | ||
349 | cf->can_dlc = get_can_dlc(fi & 0x0F); | ||
349 | if (fi & FI_RTR) { | 350 | if (fi & FI_RTR) { |
350 | id |= CAN_RTR_FLAG; | 351 | id |= CAN_RTR_FLAG; |
351 | } else { | 352 | } else { |
352 | cf->can_dlc = get_can_dlc(fi & 0x0F); | ||
353 | for (i = 0; i < cf->can_dlc; i++) | 353 | for (i = 0; i < cf->can_dlc; i++) |
354 | cf->data[i] = priv->read_reg(priv, dreg++); | 354 | cf->data[i] = priv->read_reg(priv, dreg++); |
355 | } | 355 | } |
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index b423965a78d1..1b49df6b2470 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
@@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty) | |||
583 | /* Done. We have linked the TTY line to a channel. */ | 583 | /* Done. We have linked the TTY line to a channel. */ |
584 | rtnl_unlock(); | 584 | rtnl_unlock(); |
585 | tty->receive_room = 65536; /* We don't flow control */ | 585 | tty->receive_room = 65536; /* We don't flow control */ |
586 | return sl->dev->base_addr; | 586 | |
587 | /* TTY layer expects 0 on success */ | ||
588 | return 0; | ||
587 | 589 | ||
588 | err_free_chan: | 590 | err_free_chan: |
589 | sl->tty = NULL; | 591 | sl->tty = NULL; |
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 3e2e734fecb7..f3bbdcef338c 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c | |||
@@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
55 | cmd->duplex = -1; | 55 | cmd->duplex = -1; |
56 | } | 56 | } |
57 | 57 | ||
58 | cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | 58 | if (cmd->speed == SPEED_10000) { |
59 | | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half | 59 | cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); |
60 | | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half | 60 | cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); |
61 | | SUPPORTED_Autoneg | SUPPORTED_FIBRE); | 61 | cmd->port = PORT_FIBRE; |
62 | 62 | } else { | |
63 | cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg | 63 | cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full |
64 | | ADVERTISED_FIBRE); | 64 | | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full |
65 | | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg | ||
66 | | SUPPORTED_TP); | ||
67 | cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ||
68 | | ADVERTISED_TP); | ||
69 | cmd->port = PORT_TP; | ||
70 | } | ||
65 | 71 | ||
66 | cmd->port = PORT_FIBRE; | ||
67 | cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; | 72 | cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; |
68 | 73 | ||
69 | return 0; | 74 | return 0; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 53c0f04b1b23..cf79cf759e13 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev) | |||
2688 | netif_start_queue(dev); | 2688 | netif_start_queue(dev); |
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | init_waitqueue_head(&port->swqe_avail_wq); | ||
2692 | init_waitqueue_head(&port->restart_wq); | ||
2693 | |||
2694 | mutex_unlock(&port->port_lock); | 2691 | mutex_unlock(&port->port_lock); |
2695 | 2692 | ||
2696 | return ret; | 2693 | return ret; |
@@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3276 | 3273 | ||
3277 | INIT_WORK(&port->reset_task, ehea_reset_port); | 3274 | INIT_WORK(&port->reset_task, ehea_reset_port); |
3278 | 3275 | ||
3276 | init_waitqueue_head(&port->swqe_avail_wq); | ||
3277 | init_waitqueue_head(&port->restart_wq); | ||
3278 | |||
3279 | ret = register_netdev(dev); | 3279 | ret = register_netdev(dev); |
3280 | if (ret) { | 3280 | if (ret) { |
3281 | pr_err("register_netdev failed. ret=%d\n", ret); | 3281 | pr_err("register_netdev failed. ret=%d\n", ret); |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 24cb953900dd..5131e61c358c 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = { | |||
998 | #endif | 998 | #endif |
999 | }; | 999 | }; |
1000 | 1000 | ||
1001 | static struct of_device_id fs_enet_match[]; | ||
1001 | static int __devinit fs_enet_probe(struct platform_device *ofdev) | 1002 | static int __devinit fs_enet_probe(struct platform_device *ofdev) |
1002 | { | 1003 | { |
1004 | const struct of_device_id *match; | ||
1003 | struct net_device *ndev; | 1005 | struct net_device *ndev; |
1004 | struct fs_enet_private *fep; | 1006 | struct fs_enet_private *fep; |
1005 | struct fs_platform_info *fpi; | 1007 | struct fs_platform_info *fpi; |
@@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) | |||
1007 | const u8 *mac_addr; | 1009 | const u8 *mac_addr; |
1008 | int privsize, len, ret = -ENODEV; | 1010 | int privsize, len, ret = -ENODEV; |
1009 | 1011 | ||
1010 | if (!ofdev->dev.of_match) | 1012 | match = of_match_device(fs_enet_match, &ofdev->dev); |
1013 | if (!match) | ||
1011 | return -EINVAL; | 1014 | return -EINVAL; |
1012 | 1015 | ||
1013 | fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); | 1016 | fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); |
1014 | if (!fpi) | 1017 | if (!fpi) |
1015 | return -ENOMEM; | 1018 | return -ENOMEM; |
1016 | 1019 | ||
1017 | if (!IS_FEC(ofdev->dev.of_match)) { | 1020 | if (!IS_FEC(match)) { |
1018 | data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); | 1021 | data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); |
1019 | if (!data || len != 4) | 1022 | if (!data || len != 4) |
1020 | goto out_free_fpi; | 1023 | goto out_free_fpi; |
@@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) | |||
1049 | fep->dev = &ofdev->dev; | 1052 | fep->dev = &ofdev->dev; |
1050 | fep->ndev = ndev; | 1053 | fep->ndev = ndev; |
1051 | fep->fpi = fpi; | 1054 | fep->fpi = fpi; |
1052 | fep->ops = ofdev->dev.of_match->data; | 1055 | fep->ops = match->data; |
1053 | 1056 | ||
1054 | ret = fep->ops->setup_data(ndev); | 1057 | ret = fep->ops->setup_data(ndev); |
1055 | if (ret) | 1058 | if (ret) |
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index 7e840d373ab3..6a2e150e75bb 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c | |||
@@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) | |||
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | 103 | ||
104 | static struct of_device_id fs_enet_mdio_fec_match[]; | ||
104 | static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) | 105 | static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) |
105 | { | 106 | { |
107 | const struct of_device_id *match; | ||
106 | struct resource res; | 108 | struct resource res; |
107 | struct mii_bus *new_bus; | 109 | struct mii_bus *new_bus; |
108 | struct fec_info *fec; | 110 | struct fec_info *fec; |
109 | int (*get_bus_freq)(struct device_node *); | 111 | int (*get_bus_freq)(struct device_node *); |
110 | int ret = -ENOMEM, clock, speed; | 112 | int ret = -ENOMEM, clock, speed; |
111 | 113 | ||
112 | if (!ofdev->dev.of_match) | 114 | match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); |
115 | if (!match) | ||
113 | return -EINVAL; | 116 | return -EINVAL; |
114 | get_bus_freq = ofdev->dev.of_match->data; | 117 | get_bus_freq = match->data; |
115 | 118 | ||
116 | new_bus = mdiobus_alloc(); | 119 | new_bus = mdiobus_alloc(); |
117 | if (!new_bus) | 120 | if (!new_bus) |
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index c5ef62ceb840..1cd481c04202 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c | |||
@@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = { | |||
98 | .ndo_open = hydra_open, | 98 | .ndo_open = hydra_open, |
99 | .ndo_stop = hydra_close, | 99 | .ndo_stop = hydra_close, |
100 | 100 | ||
101 | .ndo_start_xmit = ei_start_xmit, | 101 | .ndo_start_xmit = __ei_start_xmit, |
102 | .ndo_tx_timeout = ei_tx_timeout, | 102 | .ndo_tx_timeout = __ei_tx_timeout, |
103 | .ndo_get_stats = ei_get_stats, | 103 | .ndo_get_stats = __ei_get_stats, |
104 | .ndo_set_multicast_list = ei_set_multicast_list, | 104 | .ndo_set_multicast_list = __ei_set_multicast_list, |
105 | .ndo_validate_addr = eth_validate_addr, | 105 | .ndo_validate_addr = eth_validate_addr, |
106 | .ndo_set_mac_address = eth_mac_addr, | 106 | .ndo_set_mac_address = eth_mac_addr, |
107 | .ndo_change_mtu = eth_change_mtu, | 107 | .ndo_change_mtu = eth_change_mtu, |
108 | #ifdef CONFIG_NET_POLL_CONTROLLER | 108 | #ifdef CONFIG_NET_POLL_CONTROLLER |
109 | .ndo_poll_controller = ei_poll, | 109 | .ndo_poll_controller = __ei_poll, |
110 | #endif | 110 | #endif |
111 | }; | 111 | }; |
112 | 112 | ||
@@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z) | |||
125 | 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, | 125 | 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, |
126 | }; | 126 | }; |
127 | 127 | ||
128 | dev = alloc_ei_netdev(); | 128 | dev = ____alloc_ei_netdev(0); |
129 | if (!dev) | 129 | if (!dev) |
130 | return -ENOMEM; | 130 | return -ENOMEM; |
131 | 131 | ||
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 6f8adc7f5d7c..e145f2c455cb 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -5100,11 +5100,6 @@ err_set_interrupt: | |||
5100 | return err; | 5100 | return err; |
5101 | } | 5101 | } |
5102 | 5102 | ||
5103 | static void ring_free_rcu(struct rcu_head *head) | ||
5104 | { | ||
5105 | kfree(container_of(head, struct ixgbe_ring, rcu)); | ||
5106 | } | ||
5107 | |||
5108 | /** | 5103 | /** |
5109 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | 5104 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings |
5110 | * @adapter: board private structure to clear interrupt scheme on | 5105 | * @adapter: board private structure to clear interrupt scheme on |
@@ -5126,7 +5121,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
5126 | /* ixgbe_get_stats64() might access this ring, we must wait | 5121 | /* ixgbe_get_stats64() might access this ring, we must wait |
5127 | * a grace period before freeing it. | 5122 | * a grace period before freeing it. |
5128 | */ | 5123 | */ |
5129 | call_rcu(&ring->rcu, ring_free_rcu); | 5124 | kfree_rcu(ring, rcu); |
5130 | adapter->rx_ring[i] = NULL; | 5125 | adapter->rx_ring[i] = NULL; |
5131 | } | 5126 | } |
5132 | 5127 | ||
diff --git a/drivers/net/lantiq_etop.c b/drivers/net/lantiq_etop.c new file mode 100644 index 000000000000..45f252b7da30 --- /dev/null +++ b/drivers/net/lantiq_etop.c | |||
@@ -0,0 +1,805 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
14 | * | ||
15 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/in.h> | ||
25 | #include <linux/netdevice.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/phy.h> | ||
28 | #include <linux/ip.h> | ||
29 | #include <linux/tcp.h> | ||
30 | #include <linux/skbuff.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/io.h> | ||
37 | |||
38 | #include <asm/checksum.h> | ||
39 | |||
40 | #include <lantiq_soc.h> | ||
41 | #include <xway_dma.h> | ||
42 | #include <lantiq_platform.h> | ||
43 | |||
44 | #define LTQ_ETOP_MDIO 0x11804 | ||
45 | #define MDIO_REQUEST 0x80000000 | ||
46 | #define MDIO_READ 0x40000000 | ||
47 | #define MDIO_ADDR_MASK 0x1f | ||
48 | #define MDIO_ADDR_OFFSET 0x15 | ||
49 | #define MDIO_REG_MASK 0x1f | ||
50 | #define MDIO_REG_OFFSET 0x10 | ||
51 | #define MDIO_VAL_MASK 0xffff | ||
52 | |||
53 | #define PPE32_CGEN 0x800 | ||
54 | #define LQ_PPE32_ENET_MAC_CFG 0x1840 | ||
55 | |||
56 | #define LTQ_ETOP_ENETS0 0x11850 | ||
57 | #define LTQ_ETOP_MAC_DA0 0x1186C | ||
58 | #define LTQ_ETOP_MAC_DA1 0x11870 | ||
59 | #define LTQ_ETOP_CFG 0x16020 | ||
60 | #define LTQ_ETOP_IGPLEN 0x16080 | ||
61 | |||
62 | #define MAX_DMA_CHAN 0x8 | ||
63 | #define MAX_DMA_CRC_LEN 0x4 | ||
64 | #define MAX_DMA_DATA_LEN 0x600 | ||
65 | |||
66 | #define ETOP_FTCU BIT(28) | ||
67 | #define ETOP_MII_MASK 0xf | ||
68 | #define ETOP_MII_NORMAL 0xd | ||
69 | #define ETOP_MII_REVERSE 0xe | ||
70 | #define ETOP_PLEN_UNDER 0x40 | ||
71 | #define ETOP_CGEN 0x800 | ||
72 | |||
73 | /* use 2 static channels for TX/RX */ | ||
74 | #define LTQ_ETOP_TX_CHANNEL 1 | ||
75 | #define LTQ_ETOP_RX_CHANNEL 6 | ||
76 | #define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) | ||
77 | #define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) | ||
78 | |||
79 | #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) | ||
80 | #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) | ||
81 | #define ltq_etop_w32_mask(x, y, z) \ | ||
82 | ltq_w32_mask(x, y, ltq_etop_membase + (z)) | ||
83 | |||
84 | #define DRV_VERSION "1.0" | ||
85 | |||
86 | static void __iomem *ltq_etop_membase; | ||
87 | |||
88 | struct ltq_etop_chan { | ||
89 | int idx; | ||
90 | int tx_free; | ||
91 | struct net_device *netdev; | ||
92 | struct napi_struct napi; | ||
93 | struct ltq_dma_channel dma; | ||
94 | struct sk_buff *skb[LTQ_DESC_NUM]; | ||
95 | }; | ||
96 | |||
97 | struct ltq_etop_priv { | ||
98 | struct net_device *netdev; | ||
99 | struct ltq_eth_data *pldata; | ||
100 | struct resource *res; | ||
101 | |||
102 | struct mii_bus *mii_bus; | ||
103 | struct phy_device *phydev; | ||
104 | |||
105 | struct ltq_etop_chan ch[MAX_DMA_CHAN]; | ||
106 | int tx_free[MAX_DMA_CHAN >> 1]; | ||
107 | |||
108 | spinlock_t lock; | ||
109 | }; | ||
110 | |||
111 | static int | ||
112 | ltq_etop_alloc_skb(struct ltq_etop_chan *ch) | ||
113 | { | ||
114 | ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN); | ||
115 | if (!ch->skb[ch->dma.desc]) | ||
116 | return -ENOMEM; | ||
117 | ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, | ||
118 | ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, | ||
119 | DMA_FROM_DEVICE); | ||
120 | ch->dma.desc_base[ch->dma.desc].addr = | ||
121 | CPHYSADDR(ch->skb[ch->dma.desc]->data); | ||
122 | ch->dma.desc_base[ch->dma.desc].ctl = | ||
123 | LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | | ||
124 | MAX_DMA_DATA_LEN; | ||
125 | skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static void | ||
130 | ltq_etop_hw_receive(struct ltq_etop_chan *ch) | ||
131 | { | ||
132 | struct ltq_etop_priv *priv = netdev_priv(ch->netdev); | ||
133 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | ||
134 | struct sk_buff *skb = ch->skb[ch->dma.desc]; | ||
135 | int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN; | ||
136 | unsigned long flags; | ||
137 | |||
138 | spin_lock_irqsave(&priv->lock, flags); | ||
139 | if (ltq_etop_alloc_skb(ch)) { | ||
140 | netdev_err(ch->netdev, | ||
141 | "failed to allocate new rx buffer, stopping DMA\n"); | ||
142 | ltq_dma_close(&ch->dma); | ||
143 | } | ||
144 | ch->dma.desc++; | ||
145 | ch->dma.desc %= LTQ_DESC_NUM; | ||
146 | spin_unlock_irqrestore(&priv->lock, flags); | ||
147 | |||
148 | skb_put(skb, len); | ||
149 | skb->dev = ch->netdev; | ||
150 | skb->protocol = eth_type_trans(skb, ch->netdev); | ||
151 | netif_receive_skb(skb); | ||
152 | } | ||
153 | |||
154 | static int | ||
155 | ltq_etop_poll_rx(struct napi_struct *napi, int budget) | ||
156 | { | ||
157 | struct ltq_etop_chan *ch = container_of(napi, | ||
158 | struct ltq_etop_chan, napi); | ||
159 | int rx = 0; | ||
160 | int complete = 0; | ||
161 | |||
162 | while ((rx < budget) && !complete) { | ||
163 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | ||
164 | |||
165 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { | ||
166 | ltq_etop_hw_receive(ch); | ||
167 | rx++; | ||
168 | } else { | ||
169 | complete = 1; | ||
170 | } | ||
171 | } | ||
172 | if (complete || !rx) { | ||
173 | napi_complete(&ch->napi); | ||
174 | ltq_dma_ack_irq(&ch->dma); | ||
175 | } | ||
176 | return rx; | ||
177 | } | ||
178 | |||
179 | static int | ||
180 | ltq_etop_poll_tx(struct napi_struct *napi, int budget) | ||
181 | { | ||
182 | struct ltq_etop_chan *ch = | ||
183 | container_of(napi, struct ltq_etop_chan, napi); | ||
184 | struct ltq_etop_priv *priv = netdev_priv(ch->netdev); | ||
185 | struct netdev_queue *txq = | ||
186 | netdev_get_tx_queue(ch->netdev, ch->idx >> 1); | ||
187 | unsigned long flags; | ||
188 | |||
189 | spin_lock_irqsave(&priv->lock, flags); | ||
190 | while ((ch->dma.desc_base[ch->tx_free].ctl & | ||
191 | (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { | ||
192 | dev_kfree_skb_any(ch->skb[ch->tx_free]); | ||
193 | ch->skb[ch->tx_free] = NULL; | ||
194 | memset(&ch->dma.desc_base[ch->tx_free], 0, | ||
195 | sizeof(struct ltq_dma_desc)); | ||
196 | ch->tx_free++; | ||
197 | ch->tx_free %= LTQ_DESC_NUM; | ||
198 | } | ||
199 | spin_unlock_irqrestore(&priv->lock, flags); | ||
200 | |||
201 | if (netif_tx_queue_stopped(txq)) | ||
202 | netif_tx_start_queue(txq); | ||
203 | napi_complete(&ch->napi); | ||
204 | ltq_dma_ack_irq(&ch->dma); | ||
205 | return 1; | ||
206 | } | ||
207 | |||
208 | static irqreturn_t | ||
209 | ltq_etop_dma_irq(int irq, void *_priv) | ||
210 | { | ||
211 | struct ltq_etop_priv *priv = _priv; | ||
212 | int ch = irq - LTQ_DMA_CH0_INT; | ||
213 | |||
214 | napi_schedule(&priv->ch[ch].napi); | ||
215 | return IRQ_HANDLED; | ||
216 | } | ||
217 | |||
218 | static void | ||
219 | ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) | ||
220 | { | ||
221 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
222 | |||
223 | ltq_dma_free(&ch->dma); | ||
224 | if (ch->dma.irq) | ||
225 | free_irq(ch->dma.irq, priv); | ||
226 | if (IS_RX(ch->idx)) { | ||
227 | int desc; | ||
228 | for (desc = 0; desc < LTQ_DESC_NUM; desc++) | ||
229 | dev_kfree_skb_any(ch->skb[ch->dma.desc]); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | static void | ||
234 | ltq_etop_hw_exit(struct net_device *dev) | ||
235 | { | ||
236 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
237 | int i; | ||
238 | |||
239 | ltq_pmu_disable(PMU_PPE); | ||
240 | for (i = 0; i < MAX_DMA_CHAN; i++) | ||
241 | if (IS_TX(i) || IS_RX(i)) | ||
242 | ltq_etop_free_channel(dev, &priv->ch[i]); | ||
243 | } | ||
244 | |||
245 | static int | ||
246 | ltq_etop_hw_init(struct net_device *dev) | ||
247 | { | ||
248 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
249 | int i; | ||
250 | |||
251 | ltq_pmu_enable(PMU_PPE); | ||
252 | |||
253 | switch (priv->pldata->mii_mode) { | ||
254 | case PHY_INTERFACE_MODE_RMII: | ||
255 | ltq_etop_w32_mask(ETOP_MII_MASK, | ||
256 | ETOP_MII_REVERSE, LTQ_ETOP_CFG); | ||
257 | break; | ||
258 | |||
259 | case PHY_INTERFACE_MODE_MII: | ||
260 | ltq_etop_w32_mask(ETOP_MII_MASK, | ||
261 | ETOP_MII_NORMAL, LTQ_ETOP_CFG); | ||
262 | break; | ||
263 | |||
264 | default: | ||
265 | netdev_err(dev, "unknown mii mode %d\n", | ||
266 | priv->pldata->mii_mode); | ||
267 | return -ENOTSUPP; | ||
268 | } | ||
269 | |||
270 | /* enable crc generation */ | ||
271 | ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); | ||
272 | |||
273 | ltq_dma_init_port(DMA_PORT_ETOP); | ||
274 | |||
275 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
276 | int irq = LTQ_DMA_CH0_INT + i; | ||
277 | struct ltq_etop_chan *ch = &priv->ch[i]; | ||
278 | |||
279 | ch->idx = ch->dma.nr = i; | ||
280 | |||
281 | if (IS_TX(i)) { | ||
282 | ltq_dma_alloc_tx(&ch->dma); | ||
283 | request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, | ||
284 | "etop_tx", priv); | ||
285 | } else if (IS_RX(i)) { | ||
286 | ltq_dma_alloc_rx(&ch->dma); | ||
287 | for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; | ||
288 | ch->dma.desc++) | ||
289 | if (ltq_etop_alloc_skb(ch)) | ||
290 | return -ENOMEM; | ||
291 | ch->dma.desc = 0; | ||
292 | request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, | ||
293 | "etop_rx", priv); | ||
294 | } | ||
295 | ch->dma.irq = irq; | ||
296 | } | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static void | ||
301 | ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
302 | { | ||
303 | strcpy(info->driver, "Lantiq ETOP"); | ||
304 | strcpy(info->bus_info, "internal"); | ||
305 | strcpy(info->version, DRV_VERSION); | ||
306 | } | ||
307 | |||
308 | static int | ||
309 | ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
310 | { | ||
311 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
312 | |||
313 | return phy_ethtool_gset(priv->phydev, cmd); | ||
314 | } | ||
315 | |||
316 | static int | ||
317 | ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
318 | { | ||
319 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
320 | |||
321 | return phy_ethtool_sset(priv->phydev, cmd); | ||
322 | } | ||
323 | |||
324 | static int | ||
325 | ltq_etop_nway_reset(struct net_device *dev) | ||
326 | { | ||
327 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
328 | |||
329 | return phy_start_aneg(priv->phydev); | ||
330 | } | ||
331 | |||
332 | static const struct ethtool_ops ltq_etop_ethtool_ops = { | ||
333 | .get_drvinfo = ltq_etop_get_drvinfo, | ||
334 | .get_settings = ltq_etop_get_settings, | ||
335 | .set_settings = ltq_etop_set_settings, | ||
336 | .nway_reset = ltq_etop_nway_reset, | ||
337 | }; | ||
338 | |||
339 | static int | ||
340 | ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data) | ||
341 | { | ||
342 | u32 val = MDIO_REQUEST | | ||
343 | ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | | ||
344 | ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) | | ||
345 | phy_data; | ||
346 | |||
347 | while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) | ||
348 | ; | ||
349 | ltq_etop_w32(val, LTQ_ETOP_MDIO); | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | static int | ||
354 | ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg) | ||
355 | { | ||
356 | u32 val = MDIO_REQUEST | MDIO_READ | | ||
357 | ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | | ||
358 | ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET); | ||
359 | |||
360 | while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) | ||
361 | ; | ||
362 | ltq_etop_w32(val, LTQ_ETOP_MDIO); | ||
363 | while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) | ||
364 | ; | ||
365 | val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK; | ||
366 | return val; | ||
367 | } | ||
368 | |||
369 | static void | ||
370 | ltq_etop_mdio_link(struct net_device *dev) | ||
371 | { | ||
372 | /* nothing to do */ | ||
373 | } | ||
374 | |||
375 | static int | ||
376 | ltq_etop_mdio_probe(struct net_device *dev) | ||
377 | { | ||
378 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
379 | struct phy_device *phydev = NULL; | ||
380 | int phy_addr; | ||
381 | |||
382 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { | ||
383 | if (priv->mii_bus->phy_map[phy_addr]) { | ||
384 | phydev = priv->mii_bus->phy_map[phy_addr]; | ||
385 | break; | ||
386 | } | ||
387 | } | ||
388 | |||
389 | if (!phydev) { | ||
390 | netdev_err(dev, "no PHY found\n"); | ||
391 | return -ENODEV; | ||
392 | } | ||
393 | |||
394 | phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link, | ||
395 | 0, priv->pldata->mii_mode); | ||
396 | |||
397 | if (IS_ERR(phydev)) { | ||
398 | netdev_err(dev, "Could not attach to PHY\n"); | ||
399 | return PTR_ERR(phydev); | ||
400 | } | ||
401 | |||
402 | phydev->supported &= (SUPPORTED_10baseT_Half | ||
403 | | SUPPORTED_10baseT_Full | ||
404 | | SUPPORTED_100baseT_Half | ||
405 | | SUPPORTED_100baseT_Full | ||
406 | | SUPPORTED_Autoneg | ||
407 | | SUPPORTED_MII | ||
408 | | SUPPORTED_TP); | ||
409 | |||
410 | phydev->advertising = phydev->supported; | ||
411 | priv->phydev = phydev; | ||
412 | pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", | ||
413 | dev->name, phydev->drv->name, | ||
414 | dev_name(&phydev->dev), phydev->irq); | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | static int | ||
420 | ltq_etop_mdio_init(struct net_device *dev) | ||
421 | { | ||
422 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
423 | int i; | ||
424 | int err; | ||
425 | |||
426 | priv->mii_bus = mdiobus_alloc(); | ||
427 | if (!priv->mii_bus) { | ||
428 | netdev_err(dev, "failed to allocate mii bus\n"); | ||
429 | err = -ENOMEM; | ||
430 | goto err_out; | ||
431 | } | ||
432 | |||
433 | priv->mii_bus->priv = dev; | ||
434 | priv->mii_bus->read = ltq_etop_mdio_rd; | ||
435 | priv->mii_bus->write = ltq_etop_mdio_wr; | ||
436 | priv->mii_bus->name = "ltq_mii"; | ||
437 | snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); | ||
438 | priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | ||
439 | if (!priv->mii_bus->irq) { | ||
440 | err = -ENOMEM; | ||
441 | goto err_out_free_mdiobus; | ||
442 | } | ||
443 | |||
444 | for (i = 0; i < PHY_MAX_ADDR; ++i) | ||
445 | priv->mii_bus->irq[i] = PHY_POLL; | ||
446 | |||
447 | if (mdiobus_register(priv->mii_bus)) { | ||
448 | err = -ENXIO; | ||
449 | goto err_out_free_mdio_irq; | ||
450 | } | ||
451 | |||
452 | if (ltq_etop_mdio_probe(dev)) { | ||
453 | err = -ENXIO; | ||
454 | goto err_out_unregister_bus; | ||
455 | } | ||
456 | return 0; | ||
457 | |||
458 | err_out_unregister_bus: | ||
459 | mdiobus_unregister(priv->mii_bus); | ||
460 | err_out_free_mdio_irq: | ||
461 | kfree(priv->mii_bus->irq); | ||
462 | err_out_free_mdiobus: | ||
463 | mdiobus_free(priv->mii_bus); | ||
464 | err_out: | ||
465 | return err; | ||
466 | } | ||
467 | |||
468 | static void | ||
469 | ltq_etop_mdio_cleanup(struct net_device *dev) | ||
470 | { | ||
471 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
472 | |||
473 | phy_disconnect(priv->phydev); | ||
474 | mdiobus_unregister(priv->mii_bus); | ||
475 | kfree(priv->mii_bus->irq); | ||
476 | mdiobus_free(priv->mii_bus); | ||
477 | } | ||
478 | |||
479 | static int | ||
480 | ltq_etop_open(struct net_device *dev) | ||
481 | { | ||
482 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
483 | int i; | ||
484 | |||
485 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
486 | struct ltq_etop_chan *ch = &priv->ch[i]; | ||
487 | |||
488 | if (!IS_TX(i) && (!IS_RX(i))) | ||
489 | continue; | ||
490 | ltq_dma_open(&ch->dma); | ||
491 | napi_enable(&ch->napi); | ||
492 | } | ||
493 | phy_start(priv->phydev); | ||
494 | netif_tx_start_all_queues(dev); | ||
495 | return 0; | ||
496 | } | ||
497 | |||
498 | static int | ||
499 | ltq_etop_stop(struct net_device *dev) | ||
500 | { | ||
501 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
502 | int i; | ||
503 | |||
504 | netif_tx_stop_all_queues(dev); | ||
505 | phy_stop(priv->phydev); | ||
506 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
507 | struct ltq_etop_chan *ch = &priv->ch[i]; | ||
508 | |||
509 | if (!IS_RX(i) && !IS_TX(i)) | ||
510 | continue; | ||
511 | napi_disable(&ch->napi); | ||
512 | ltq_dma_close(&ch->dma); | ||
513 | } | ||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static int | ||
518 | ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) | ||
519 | { | ||
520 | int queue = skb_get_queue_mapping(skb); | ||
521 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); | ||
522 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
523 | struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; | ||
524 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | ||
525 | int len; | ||
526 | unsigned long flags; | ||
527 | u32 byte_offset; | ||
528 | |||
529 | len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; | ||
530 | |||
531 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { | ||
532 | dev_kfree_skb_any(skb); | ||
533 | netdev_err(dev, "tx ring full\n"); | ||
534 | netif_tx_stop_queue(txq); | ||
535 | return NETDEV_TX_BUSY; | ||
536 | } | ||
537 | |||
538 | /* dma needs to start on a 16 byte aligned address */ | ||
539 | byte_offset = CPHYSADDR(skb->data) % 16; | ||
540 | ch->skb[ch->dma.desc] = skb; | ||
541 | |||
542 | dev->trans_start = jiffies; | ||
543 | |||
544 | spin_lock_irqsave(&priv->lock, flags); | ||
545 | desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, | ||
546 | DMA_TO_DEVICE)) - byte_offset; | ||
547 | wmb(); | ||
548 | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | | ||
549 | LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); | ||
550 | ch->dma.desc++; | ||
551 | ch->dma.desc %= LTQ_DESC_NUM; | ||
552 | spin_unlock_irqrestore(&priv->lock, flags); | ||
553 | |||
554 | if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) | ||
555 | netif_tx_stop_queue(txq); | ||
556 | |||
557 | return NETDEV_TX_OK; | ||
558 | } | ||
559 | |||
560 | static int | ||
561 | ltq_etop_change_mtu(struct net_device *dev, int new_mtu) | ||
562 | { | ||
563 | int ret = eth_change_mtu(dev, new_mtu); | ||
564 | |||
565 | if (!ret) { | ||
566 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
567 | unsigned long flags; | ||
568 | |||
569 | spin_lock_irqsave(&priv->lock, flags); | ||
570 | ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, | ||
571 | LTQ_ETOP_IGPLEN); | ||
572 | spin_unlock_irqrestore(&priv->lock, flags); | ||
573 | } | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | static int | ||
578 | ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
579 | { | ||
580 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
581 | |||
582 | /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ | ||
583 | return phy_mii_ioctl(priv->phydev, rq, cmd); | ||
584 | } | ||
585 | |||
586 | static int | ||
587 | ltq_etop_set_mac_address(struct net_device *dev, void *p) | ||
588 | { | ||
589 | int ret = eth_mac_addr(dev, p); | ||
590 | |||
591 | if (!ret) { | ||
592 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
593 | unsigned long flags; | ||
594 | |||
595 | /* store the mac for the unicast filter */ | ||
596 | spin_lock_irqsave(&priv->lock, flags); | ||
597 | ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); | ||
598 | ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, | ||
599 | LTQ_ETOP_MAC_DA1); | ||
600 | spin_unlock_irqrestore(&priv->lock, flags); | ||
601 | } | ||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | static void | ||
606 | ltq_etop_set_multicast_list(struct net_device *dev) | ||
607 | { | ||
608 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
609 | unsigned long flags; | ||
610 | |||
611 | /* ensure that the unicast filter is not enabled in promiscious mode */ | ||
612 | spin_lock_irqsave(&priv->lock, flags); | ||
613 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) | ||
614 | ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0); | ||
615 | else | ||
616 | ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0); | ||
617 | spin_unlock_irqrestore(&priv->lock, flags); | ||
618 | } | ||
619 | |||
620 | static u16 | ||
621 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
622 | { | ||
623 | /* we are currently only using the first queue */ | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | static int | ||
628 | ltq_etop_init(struct net_device *dev) | ||
629 | { | ||
630 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
631 | struct sockaddr mac; | ||
632 | int err; | ||
633 | |||
634 | ether_setup(dev); | ||
635 | dev->watchdog_timeo = 10 * HZ; | ||
636 | err = ltq_etop_hw_init(dev); | ||
637 | if (err) | ||
638 | goto err_hw; | ||
639 | ltq_etop_change_mtu(dev, 1500); | ||
640 | |||
641 | memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); | ||
642 | if (!is_valid_ether_addr(mac.sa_data)) { | ||
643 | pr_warn("etop: invalid MAC, using random\n"); | ||
644 | random_ether_addr(mac.sa_data); | ||
645 | } | ||
646 | |||
647 | err = ltq_etop_set_mac_address(dev, &mac); | ||
648 | if (err) | ||
649 | goto err_netdev; | ||
650 | ltq_etop_set_multicast_list(dev); | ||
651 | err = ltq_etop_mdio_init(dev); | ||
652 | if (err) | ||
653 | goto err_netdev; | ||
654 | return 0; | ||
655 | |||
656 | err_netdev: | ||
657 | unregister_netdev(dev); | ||
658 | free_netdev(dev); | ||
659 | err_hw: | ||
660 | ltq_etop_hw_exit(dev); | ||
661 | return err; | ||
662 | } | ||
663 | |||
664 | static void | ||
665 | ltq_etop_tx_timeout(struct net_device *dev) | ||
666 | { | ||
667 | int err; | ||
668 | |||
669 | ltq_etop_hw_exit(dev); | ||
670 | err = ltq_etop_hw_init(dev); | ||
671 | if (err) | ||
672 | goto err_hw; | ||
673 | dev->trans_start = jiffies; | ||
674 | netif_wake_queue(dev); | ||
675 | return; | ||
676 | |||
677 | err_hw: | ||
678 | ltq_etop_hw_exit(dev); | ||
679 | netdev_err(dev, "failed to restart etop after TX timeout\n"); | ||
680 | } | ||
681 | |||
682 | static const struct net_device_ops ltq_eth_netdev_ops = { | ||
683 | .ndo_open = ltq_etop_open, | ||
684 | .ndo_stop = ltq_etop_stop, | ||
685 | .ndo_start_xmit = ltq_etop_tx, | ||
686 | .ndo_change_mtu = ltq_etop_change_mtu, | ||
687 | .ndo_do_ioctl = ltq_etop_ioctl, | ||
688 | .ndo_set_mac_address = ltq_etop_set_mac_address, | ||
689 | .ndo_validate_addr = eth_validate_addr, | ||
690 | .ndo_set_multicast_list = ltq_etop_set_multicast_list, | ||
691 | .ndo_select_queue = ltq_etop_select_queue, | ||
692 | .ndo_init = ltq_etop_init, | ||
693 | .ndo_tx_timeout = ltq_etop_tx_timeout, | ||
694 | }; | ||
695 | |||
696 | static int __init | ||
697 | ltq_etop_probe(struct platform_device *pdev) | ||
698 | { | ||
699 | struct net_device *dev; | ||
700 | struct ltq_etop_priv *priv; | ||
701 | struct resource *res; | ||
702 | int err; | ||
703 | int i; | ||
704 | |||
705 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
706 | if (!res) { | ||
707 | dev_err(&pdev->dev, "failed to get etop resource\n"); | ||
708 | err = -ENOENT; | ||
709 | goto err_out; | ||
710 | } | ||
711 | |||
712 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
713 | resource_size(res), dev_name(&pdev->dev)); | ||
714 | if (!res) { | ||
715 | dev_err(&pdev->dev, "failed to request etop resource\n"); | ||
716 | err = -EBUSY; | ||
717 | goto err_out; | ||
718 | } | ||
719 | |||
720 | ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, | ||
721 | res->start, resource_size(res)); | ||
722 | if (!ltq_etop_membase) { | ||
723 | dev_err(&pdev->dev, "failed to remap etop engine %d\n", | ||
724 | pdev->id); | ||
725 | err = -ENOMEM; | ||
726 | goto err_out; | ||
727 | } | ||
728 | |||
729 | dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); | ||
730 | strcpy(dev->name, "eth%d"); | ||
731 | dev->netdev_ops = <q_eth_netdev_ops; | ||
732 | dev->ethtool_ops = <q_etop_ethtool_ops; | ||
733 | priv = netdev_priv(dev); | ||
734 | priv->res = res; | ||
735 | priv->pldata = dev_get_platdata(&pdev->dev); | ||
736 | priv->netdev = dev; | ||
737 | spin_lock_init(&priv->lock); | ||
738 | |||
739 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
740 | if (IS_TX(i)) | ||
741 | netif_napi_add(dev, &priv->ch[i].napi, | ||
742 | ltq_etop_poll_tx, 8); | ||
743 | else if (IS_RX(i)) | ||
744 | netif_napi_add(dev, &priv->ch[i].napi, | ||
745 | ltq_etop_poll_rx, 32); | ||
746 | priv->ch[i].netdev = dev; | ||
747 | } | ||
748 | |||
749 | err = register_netdev(dev); | ||
750 | if (err) | ||
751 | goto err_free; | ||
752 | |||
753 | platform_set_drvdata(pdev, dev); | ||
754 | return 0; | ||
755 | |||
756 | err_free: | ||
757 | kfree(dev); | ||
758 | err_out: | ||
759 | return err; | ||
760 | } | ||
761 | |||
762 | static int __devexit | ||
763 | ltq_etop_remove(struct platform_device *pdev) | ||
764 | { | ||
765 | struct net_device *dev = platform_get_drvdata(pdev); | ||
766 | |||
767 | if (dev) { | ||
768 | netif_tx_stop_all_queues(dev); | ||
769 | ltq_etop_hw_exit(dev); | ||
770 | ltq_etop_mdio_cleanup(dev); | ||
771 | unregister_netdev(dev); | ||
772 | } | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | static struct platform_driver ltq_mii_driver = { | ||
777 | .remove = __devexit_p(ltq_etop_remove), | ||
778 | .driver = { | ||
779 | .name = "ltq_etop", | ||
780 | .owner = THIS_MODULE, | ||
781 | }, | ||
782 | }; | ||
783 | |||
784 | int __init | ||
785 | init_ltq_etop(void) | ||
786 | { | ||
787 | int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); | ||
788 | |||
789 | if (ret) | ||
790 | pr_err("ltq_etop: Error registering platfom driver!"); | ||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | static void __exit | ||
795 | exit_ltq_etop(void) | ||
796 | { | ||
797 | platform_driver_unregister(<q_mii_driver); | ||
798 | } | ||
799 | |||
800 | module_init(init_ltq_etop); | ||
801 | module_exit(exit_ltq_etop); | ||
802 | |||
803 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
804 | MODULE_DESCRIPTION("Lantiq SoC ETOP"); | ||
805 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 78e34e9e4f00..d8e4e69ad0b9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -603,21 +603,13 @@ static int macvlan_port_create(struct net_device *dev) | |||
603 | return err; | 603 | return err; |
604 | } | 604 | } |
605 | 605 | ||
606 | static void macvlan_port_rcu_free(struct rcu_head *head) | ||
607 | { | ||
608 | struct macvlan_port *port; | ||
609 | |||
610 | port = container_of(head, struct macvlan_port, rcu); | ||
611 | kfree(port); | ||
612 | } | ||
613 | |||
614 | static void macvlan_port_destroy(struct net_device *dev) | 606 | static void macvlan_port_destroy(struct net_device *dev) |
615 | { | 607 | { |
616 | struct macvlan_port *port = macvlan_port_get(dev); | 608 | struct macvlan_port *port = macvlan_port_get(dev); |
617 | 609 | ||
618 | dev->priv_flags &= ~IFF_MACVLAN_PORT; | 610 | dev->priv_flags &= ~IFF_MACVLAN_PORT; |
619 | netdev_rx_handler_unregister(dev); | 611 | netdev_rx_handler_unregister(dev); |
620 | call_rcu(&port->rcu, macvlan_port_rcu_free); | 612 | kfree_rcu(port, rcu); |
621 | } | 613 | } |
622 | 614 | ||
623 | static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) | 615 | static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) |
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 30be8c634ebd..7298a34bc795 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c | |||
@@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev) | |||
167 | #ifndef MODULE | 167 | #ifndef MODULE |
168 | struct net_device * __init ne_probe(int unit) | 168 | struct net_device * __init ne_probe(int unit) |
169 | { | 169 | { |
170 | struct net_device *dev = alloc_ei_netdev(); | 170 | struct net_device *dev = ____alloc_ei_netdev(0); |
171 | int err; | 171 | int err; |
172 | 172 | ||
173 | if (!dev) | 173 | if (!dev) |
@@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = { | |||
197 | .ndo_open = ne_open, | 197 | .ndo_open = ne_open, |
198 | .ndo_stop = ne_close, | 198 | .ndo_stop = ne_close, |
199 | 199 | ||
200 | .ndo_start_xmit = ei_start_xmit, | 200 | .ndo_start_xmit = __ei_start_xmit, |
201 | .ndo_tx_timeout = ei_tx_timeout, | 201 | .ndo_tx_timeout = __ei_tx_timeout, |
202 | .ndo_get_stats = ei_get_stats, | 202 | .ndo_get_stats = __ei_get_stats, |
203 | .ndo_set_multicast_list = ei_set_multicast_list, | 203 | .ndo_set_multicast_list = __ei_set_multicast_list, |
204 | .ndo_validate_addr = eth_validate_addr, | 204 | .ndo_validate_addr = eth_validate_addr, |
205 | .ndo_set_mac_address = eth_mac_addr, | 205 | .ndo_set_mac_address = eth_mac_addr, |
206 | .ndo_change_mtu = eth_change_mtu, | 206 | .ndo_change_mtu = eth_change_mtu, |
207 | #ifdef CONFIG_NET_POLL_CONTROLLER | 207 | #ifdef CONFIG_NET_POLL_CONTROLLER |
208 | .ndo_poll_controller = ei_poll, | 208 | .ndo_poll_controller = __ei_poll, |
209 | #endif | 209 | #endif |
210 | }; | 210 | }; |
211 | 211 | ||
@@ -637,7 +637,7 @@ int init_module(void) | |||
637 | int err; | 637 | int err; |
638 | 638 | ||
639 | for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { | 639 | for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { |
640 | struct net_device *dev = alloc_ei_netdev(); | 640 | struct net_device *dev = ____alloc_ei_netdev(0); |
641 | if (!dev) | 641 | if (!dev) |
642 | break; | 642 | break; |
643 | if (io[this_dev]) { | 643 | if (io[this_dev]) { |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 2ef2f9cdefa6..56d049a472da 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION; | |||
34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
35 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
36 | 36 | ||
37 | /* Macros for ML7223 */ | ||
38 | #define PCI_VENDOR_ID_ROHM 0x10db | ||
39 | #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 | ||
40 | |||
37 | #define PCH_GBE_TX_WEIGHT 64 | 41 | #define PCH_GBE_TX_WEIGHT 64 |
38 | #define PCH_GBE_RX_WEIGHT 64 | 42 | #define PCH_GBE_RX_WEIGHT 64 |
39 | #define PCH_GBE_RX_BUFFER_WRITE 16 | 43 | #define PCH_GBE_RX_BUFFER_WRITE 16 |
@@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
43 | 47 | ||
44 | #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ | 48 | #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ |
45 | PCH_GBE_CHIP_TYPE_INTERNAL | \ | 49 | PCH_GBE_CHIP_TYPE_INTERNAL | \ |
46 | PCH_GBE_RGMII_MODE_RGMII | \ | 50 | PCH_GBE_RGMII_MODE_RGMII \ |
47 | PCH_GBE_CRS_SEL \ | ||
48 | ) | 51 | ) |
49 | 52 | ||
50 | /* Ethertype field values */ | 53 | /* Ethertype field values */ |
@@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1494 | /* Write meta date of skb */ | 1497 | /* Write meta date of skb */ |
1495 | skb_put(skb, length); | 1498 | skb_put(skb, length); |
1496 | skb->protocol = eth_type_trans(skb, netdev); | 1499 | skb->protocol = eth_type_trans(skb, netdev); |
1497 | if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == | 1500 | if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) |
1498 | PCH_GBE_RXD_ACC_STAT_TCPIPOK) { | ||
1499 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1500 | } else { | ||
1501 | skb->ip_summed = CHECKSUM_NONE; | 1501 | skb->ip_summed = CHECKSUM_NONE; |
1502 | } | 1502 | else |
1503 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1504 | |||
1503 | napi_gro_receive(&adapter->napi, skb); | 1505 | napi_gro_receive(&adapter->napi, skb); |
1504 | (*work_done)++; | 1506 | (*work_done)++; |
1505 | pr_debug("Receive skb->ip_summed: %d length: %d\n", | 1507 | pr_debug("Receive skb->ip_summed: %d length: %d\n", |
@@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { | |||
2420 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | 2422 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), |
2421 | .class_mask = (0xFFFF00) | 2423 | .class_mask = (0xFFFF00) |
2422 | }, | 2424 | }, |
2425 | {.vendor = PCI_VENDOR_ID_ROHM, | ||
2426 | .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, | ||
2427 | .subvendor = PCI_ANY_ID, | ||
2428 | .subdevice = PCI_ANY_ID, | ||
2429 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | ||
2430 | .class_mask = (0xFFFF00) | ||
2431 | }, | ||
2423 | /* required last entry */ | 2432 | /* required last entry */ |
2424 | {0} | 2433 | {0} |
2425 | }; | 2434 | }; |
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index d98479030ef2..3dd45ed61f0a 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c | |||
@@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | |||
50 | return &nic_data->mcdi; | 50 | return &nic_data->mcdi; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline void | ||
54 | efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) | ||
55 | { | ||
56 | struct siena_nic_data *nic_data = efx->nic_data; | ||
57 | value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) | ||
62 | { | ||
63 | struct siena_nic_data *nic_data = efx->nic_data; | ||
64 | __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); | ||
65 | } | ||
66 | |||
53 | void efx_mcdi_init(struct efx_nic *efx) | 67 | void efx_mcdi_init(struct efx_nic *efx) |
54 | { | 68 | { |
55 | struct efx_mcdi_iface *mcdi; | 69 | struct efx_mcdi_iface *mcdi; |
@@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
70 | const u8 *inbuf, size_t inlen) | 84 | const u8 *inbuf, size_t inlen) |
71 | { | 85 | { |
72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 86 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 87 | unsigned pdu = MCDI_PDU(efx); |
74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); | 88 | unsigned doorbell = MCDI_DOORBELL(efx); |
75 | unsigned int i; | 89 | unsigned int i; |
76 | efx_dword_t hdr; | 90 | efx_dword_t hdr; |
77 | u32 xflags, seqno; | 91 | u32 xflags, seqno; |
@@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
92 | MCDI_HEADER_SEQ, seqno, | 106 | MCDI_HEADER_SEQ, seqno, |
93 | MCDI_HEADER_XFLAGS, xflags); | 107 | MCDI_HEADER_XFLAGS, xflags); |
94 | 108 | ||
95 | efx_writed(efx, &hdr, pdu); | 109 | efx_mcdi_writed(efx, &hdr, pdu); |
96 | 110 | ||
97 | for (i = 0; i < inlen; i += 4) { | 111 | for (i = 0; i < inlen; i += 4) |
98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); | 112 | efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), |
99 | /* use wmb() within loop to inhibit write combining */ | 113 | pdu + 4 + i); |
100 | wmb(); | ||
101 | } | ||
102 | 114 | ||
103 | /* ring the doorbell with a distinctive value */ | 115 | /* ring the doorbell with a distinctive value */ |
104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); | 116 | EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); |
105 | wmb(); | 117 | efx_mcdi_writed(efx, &hdr, doorbell); |
106 | } | 118 | } |
107 | 119 | ||
108 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | 120 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) |
109 | { | 121 | { |
110 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
111 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 123 | unsigned int pdu = MCDI_PDU(efx); |
112 | int i; | 124 | int i; |
113 | 125 | ||
114 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 126 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
115 | BUG_ON(outlen & 3 || outlen >= 0x100); | 127 | BUG_ON(outlen & 3 || outlen >= 0x100); |
116 | 128 | ||
117 | for (i = 0; i < outlen; i += 4) | 129 | for (i = 0; i < outlen; i += 4) |
118 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); | 130 | efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); |
119 | } | 131 | } |
120 | 132 | ||
121 | static int efx_mcdi_poll(struct efx_nic *efx) | 133 | static int efx_mcdi_poll(struct efx_nic *efx) |
@@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
123 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 135 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
124 | unsigned int time, finish; | 136 | unsigned int time, finish; |
125 | unsigned int respseq, respcmd, error; | 137 | unsigned int respseq, respcmd, error; |
126 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 138 | unsigned int pdu = MCDI_PDU(efx); |
127 | unsigned int rc, spins; | 139 | unsigned int rc, spins; |
128 | efx_dword_t reg; | 140 | efx_dword_t reg; |
129 | 141 | ||
@@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
149 | 161 | ||
150 | time = get_seconds(); | 162 | time = get_seconds(); |
151 | 163 | ||
152 | rmb(); | 164 | efx_mcdi_readd(efx, ®, pdu); |
153 | efx_readd(efx, ®, pdu); | ||
154 | 165 | ||
155 | /* All 1's indicates that shared memory is in reset (and is | 166 | /* All 1's indicates that shared memory is in reset (and is |
156 | * not a valid header). Wait for it to come out reset before | 167 | * not a valid header). Wait for it to come out reset before |
@@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
177 | respseq, mcdi->seqno); | 188 | respseq, mcdi->seqno); |
178 | rc = EIO; | 189 | rc = EIO; |
179 | } else if (error) { | 190 | } else if (error) { |
180 | efx_readd(efx, ®, pdu + 4); | 191 | efx_mcdi_readd(efx, ®, pdu + 4); |
181 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | 192 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { |
182 | #define TRANSLATE_ERROR(name) \ | 193 | #define TRANSLATE_ERROR(name) \ |
183 | case MC_CMD_ERR_ ## name: \ | 194 | case MC_CMD_ERR_ ## name: \ |
@@ -211,21 +222,21 @@ out: | |||
211 | /* Test and clear MC-rebooted flag for this port/function */ | 222 | /* Test and clear MC-rebooted flag for this port/function */ |
212 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | 223 | int efx_mcdi_poll_reboot(struct efx_nic *efx) |
213 | { | 224 | { |
214 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); | 225 | unsigned int addr = MCDI_REBOOT_FLAG(efx); |
215 | efx_dword_t reg; | 226 | efx_dword_t reg; |
216 | uint32_t value; | 227 | uint32_t value; |
217 | 228 | ||
218 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 229 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
219 | return false; | 230 | return false; |
220 | 231 | ||
221 | efx_readd(efx, ®, addr); | 232 | efx_mcdi_readd(efx, ®, addr); |
222 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | 233 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); |
223 | 234 | ||
224 | if (value == 0) | 235 | if (value == 0) |
225 | return 0; | 236 | return 0; |
226 | 237 | ||
227 | EFX_ZERO_DWORD(reg); | 238 | EFX_ZERO_DWORD(reg); |
228 | efx_writed(efx, ®, addr); | 239 | efx_mcdi_writed(efx, ®, addr); |
229 | 240 | ||
230 | if (value == MC_STATUS_DWORD_ASSERT) | 241 | if (value == MC_STATUS_DWORD_ASSERT) |
231 | return -EINTR; | 242 | return -EINTR; |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 10f1cb79c147..9b29a8d7c449 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) | |||
1937 | 1937 | ||
1938 | size = min_t(size_t, table->step, 16); | 1938 | size = min_t(size_t, table->step, 16); |
1939 | 1939 | ||
1940 | if (table->offset >= efx->type->mem_map_size) { | ||
1941 | /* No longer mapped; return dummy data */ | ||
1942 | memcpy(buf, "\xde\xc0\xad\xde", 4); | ||
1943 | buf += table->rows * size; | ||
1944 | continue; | ||
1945 | } | ||
1946 | |||
1940 | for (i = 0; i < table->rows; i++) { | 1947 | for (i = 0; i < table->rows; i++) { |
1941 | switch (table->step) { | 1948 | switch (table->step) { |
1942 | case 4: /* 32-bit register or SRAM */ | 1949 | case 4: /* 32-bit register or SRAM */ |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index a42db6e35be3..d91701abd331 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) | |||
143 | /** | 143 | /** |
144 | * struct siena_nic_data - Siena NIC state | 144 | * struct siena_nic_data - Siena NIC state |
145 | * @mcdi: Management-Controller-to-Driver Interface | 145 | * @mcdi: Management-Controller-to-Driver Interface |
146 | * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. | ||
146 | * @wol_filter_id: Wake-on-LAN packet filter id | 147 | * @wol_filter_id: Wake-on-LAN packet filter id |
147 | */ | 148 | */ |
148 | struct siena_nic_data { | 149 | struct siena_nic_data { |
149 | struct efx_mcdi_iface mcdi; | 150 | struct efx_mcdi_iface mcdi; |
151 | void __iomem *mcdi_smem; | ||
150 | int wol_filter_id; | 152 | int wol_filter_id; |
151 | }; | 153 | }; |
152 | 154 | ||
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index e4dd8986b1fe..837869b71db9 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
@@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
220 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); | 220 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); |
221 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; | 221 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; |
222 | 222 | ||
223 | /* Initialise MCDI */ | ||
224 | nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + | ||
225 | FR_CZ_MC_TREG_SMEM, | ||
226 | FR_CZ_MC_TREG_SMEM_STEP * | ||
227 | FR_CZ_MC_TREG_SMEM_ROWS); | ||
228 | if (!nic_data->mcdi_smem) { | ||
229 | netif_err(efx, probe, efx->net_dev, | ||
230 | "could not map MCDI at %llx+%x\n", | ||
231 | (unsigned long long)efx->membase_phys + | ||
232 | FR_CZ_MC_TREG_SMEM, | ||
233 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); | ||
234 | rc = -ENOMEM; | ||
235 | goto fail1; | ||
236 | } | ||
223 | efx_mcdi_init(efx); | 237 | efx_mcdi_init(efx); |
224 | 238 | ||
225 | /* Recover from a failed assertion before probing */ | 239 | /* Recover from a failed assertion before probing */ |
226 | rc = efx_mcdi_handle_assertion(efx); | 240 | rc = efx_mcdi_handle_assertion(efx); |
227 | if (rc) | 241 | if (rc) |
228 | goto fail1; | 242 | goto fail2; |
229 | 243 | ||
230 | /* Let the BMC know that the driver is now in charge of link and | 244 | /* Let the BMC know that the driver is now in charge of link and |
231 | * filter settings. We must do this before we reset the NIC */ | 245 | * filter settings. We must do this before we reset the NIC */ |
@@ -280,6 +294,7 @@ fail4: | |||
280 | fail3: | 294 | fail3: |
281 | efx_mcdi_drv_attach(efx, false, NULL); | 295 | efx_mcdi_drv_attach(efx, false, NULL); |
282 | fail2: | 296 | fail2: |
297 | iounmap(nic_data->mcdi_smem); | ||
283 | fail1: | 298 | fail1: |
284 | kfree(efx->nic_data); | 299 | kfree(efx->nic_data); |
285 | return rc; | 300 | return rc; |
@@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx) | |||
359 | 374 | ||
360 | static void siena_remove_nic(struct efx_nic *efx) | 375 | static void siena_remove_nic(struct efx_nic *efx) |
361 | { | 376 | { |
377 | struct siena_nic_data *nic_data = efx->nic_data; | ||
378 | |||
362 | efx_nic_free_buffer(efx, &efx->irq_status); | 379 | efx_nic_free_buffer(efx, &efx->irq_status); |
363 | 380 | ||
364 | siena_reset_hw(efx, RESET_TYPE_ALL); | 381 | siena_reset_hw(efx, RESET_TYPE_ALL); |
@@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx) | |||
368 | efx_mcdi_drv_attach(efx, false, NULL); | 385 | efx_mcdi_drv_attach(efx, false, NULL); |
369 | 386 | ||
370 | /* Tear down the private nic state */ | 387 | /* Tear down the private nic state */ |
371 | kfree(efx->nic_data); | 388 | iounmap(nic_data->mcdi_smem); |
389 | kfree(nic_data); | ||
372 | efx->nic_data = NULL; | 390 | efx->nic_data = NULL; |
373 | } | 391 | } |
374 | 392 | ||
@@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = { | |||
606 | .default_mac_ops = &efx_mcdi_mac_operations, | 624 | .default_mac_ops = &efx_mcdi_mac_operations, |
607 | 625 | ||
608 | .revision = EFX_REV_SIENA_A0, | 626 | .revision = EFX_REV_SIENA_A0, |
609 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + | 627 | .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ |
610 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | ||
611 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 628 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
612 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | 629 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
613 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | 630 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 86cbb9ea2f26..8ec1a9a0bb9a 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty) | |||
853 | /* Done. We have linked the TTY line to a channel. */ | 853 | /* Done. We have linked the TTY line to a channel. */ |
854 | rtnl_unlock(); | 854 | rtnl_unlock(); |
855 | tty->receive_room = 65536; /* We don't flow control */ | 855 | tty->receive_room = 65536; /* We don't flow control */ |
856 | return sl->dev->base_addr; | 856 | |
857 | /* TTY layer expects 0 on success */ | ||
858 | return 0; | ||
857 | 859 | ||
858 | err_free_bufs: | 860 | err_free_bufs: |
859 | sl_free_bufs(sl); | 861 | sl_free_bufs(sl); |
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index eb4f59fb01e9..bff2f7999ff0 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void) | |||
3237 | #endif | 3237 | #endif |
3238 | 3238 | ||
3239 | #ifdef CONFIG_SBUS | 3239 | #ifdef CONFIG_SBUS |
3240 | static const struct of_device_id hme_sbus_match[]; | ||
3240 | static int __devinit hme_sbus_probe(struct platform_device *op) | 3241 | static int __devinit hme_sbus_probe(struct platform_device *op) |
3241 | { | 3242 | { |
3243 | const struct of_device_id *match; | ||
3242 | struct device_node *dp = op->dev.of_node; | 3244 | struct device_node *dp = op->dev.of_node; |
3243 | const char *model = of_get_property(dp, "model", NULL); | 3245 | const char *model = of_get_property(dp, "model", NULL); |
3244 | int is_qfe; | 3246 | int is_qfe; |
3245 | 3247 | ||
3246 | if (!op->dev.of_match) | 3248 | match = of_match_device(hme_sbus_match, &op->dev); |
3249 | if (!match) | ||
3247 | return -EINVAL; | 3250 | return -EINVAL; |
3248 | is_qfe = (op->dev.of_match->data != NULL); | 3251 | is_qfe = (match->data != NULL); |
3249 | 3252 | ||
3250 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) | 3253 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) |
3251 | is_qfe = 1; | 3254 | is_qfe = 1; |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a301479ecc60..c924ea2bce07 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -567,7 +567,7 @@ static const struct usb_device_id products [] = { | |||
567 | { | 567 | { |
568 | USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, | 568 | USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, |
569 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | 569 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), |
570 | .driver_info = 0, | 570 | .driver_info = (unsigned long)&wwan_info, |
571 | }, | 571 | }, |
572 | 572 | ||
573 | /* | 573 | /* |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 7d42f9a2c068..81126ff85e05 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #define IPHETH_USBINTF_PROTO 1 | 65 | #define IPHETH_USBINTF_PROTO 1 |
66 | 66 | ||
67 | #define IPHETH_BUF_SIZE 1516 | 67 | #define IPHETH_BUF_SIZE 1516 |
68 | #define IPHETH_IP_ALIGN 2 /* padding at front of URB */ | ||
68 | #define IPHETH_TX_TIMEOUT (5 * HZ) | 69 | #define IPHETH_TX_TIMEOUT (5 * HZ) |
69 | 70 | ||
70 | #define IPHETH_INTFNUM 2 | 71 | #define IPHETH_INTFNUM 2 |
@@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb) | |||
202 | return; | 203 | return; |
203 | } | 204 | } |
204 | 205 | ||
205 | len = urb->actual_length; | 206 | if (urb->actual_length <= IPHETH_IP_ALIGN) { |
206 | buf = urb->transfer_buffer; | 207 | dev->net->stats.rx_length_errors++; |
208 | return; | ||
209 | } | ||
210 | len = urb->actual_length - IPHETH_IP_ALIGN; | ||
211 | buf = urb->transfer_buffer + IPHETH_IP_ALIGN; | ||
207 | 212 | ||
208 | skb = dev_alloc_skb(NET_IP_ALIGN + len); | 213 | skb = dev_alloc_skb(len); |
209 | if (!skb) { | 214 | if (!skb) { |
210 | err("%s: dev_alloc_skb: -ENOMEM", __func__); | 215 | err("%s: dev_alloc_skb: -ENOMEM", __func__); |
211 | dev->net->stats.rx_dropped++; | 216 | dev->net->stats.rx_dropped++; |
212 | return; | 217 | return; |
213 | } | 218 | } |
214 | 219 | ||
215 | skb_reserve(skb, NET_IP_ALIGN); | 220 | memcpy(skb_put(skb, len), buf, len); |
216 | memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); | ||
217 | skb->dev = dev->net; | 221 | skb->dev = dev->net; |
218 | skb->protocol = eth_type_trans(skb, dev->net); | 222 | skb->protocol = eth_type_trans(skb, dev->net); |
219 | 223 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 009bba3d753e..9ab439d144ed 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net) | |||
645 | struct driver_info *info = dev->driver_info; | 645 | struct driver_info *info = dev->driver_info; |
646 | int retval; | 646 | int retval; |
647 | 647 | ||
648 | clear_bit(EVENT_DEV_OPEN, &dev->flags); | ||
648 | netif_stop_queue (net); | 649 | netif_stop_queue (net); |
649 | 650 | ||
650 | netif_info(dev, ifdown, dev->net, | 651 | netif_info(dev, ifdown, dev->net, |
@@ -1524,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf) | |||
1524 | smp_mb(); | 1525 | smp_mb(); |
1525 | clear_bit(EVENT_DEV_ASLEEP, &dev->flags); | 1526 | clear_bit(EVENT_DEV_ASLEEP, &dev->flags); |
1526 | spin_unlock_irq(&dev->txq.lock); | 1527 | spin_unlock_irq(&dev->txq.lock); |
1527 | if (!(dev->txq.qlen >= TX_QLEN(dev))) | 1528 | |
1528 | netif_start_queue(dev->net); | 1529 | if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { |
1529 | tasklet_schedule (&dev->bh); | 1530 | if (!(dev->txq.qlen >= TX_QLEN(dev))) |
1531 | netif_start_queue(dev->net); | ||
1532 | tasklet_schedule (&dev->bh); | ||
1533 | } | ||
1530 | } | 1534 | } |
1531 | return 0; | 1535 | return 0; |
1532 | } | 1536 | } |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0d47c3a05307..c16ed961153a 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -178,6 +178,7 @@ static void | |||
178 | vmxnet3_process_events(struct vmxnet3_adapter *adapter) | 178 | vmxnet3_process_events(struct vmxnet3_adapter *adapter) |
179 | { | 179 | { |
180 | int i; | 180 | int i; |
181 | unsigned long flags; | ||
181 | u32 events = le32_to_cpu(adapter->shared->ecr); | 182 | u32 events = le32_to_cpu(adapter->shared->ecr); |
182 | if (!events) | 183 | if (!events) |
183 | return; | 184 | return; |
@@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) | |||
190 | 191 | ||
191 | /* Check if there is an error on xmit/recv queues */ | 192 | /* Check if there is an error on xmit/recv queues */ |
192 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { | 193 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { |
193 | spin_lock(&adapter->cmd_lock); | 194 | spin_lock_irqsave(&adapter->cmd_lock, flags); |
194 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 195 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
195 | VMXNET3_CMD_GET_QUEUE_STATUS); | 196 | VMXNET3_CMD_GET_QUEUE_STATUS); |
196 | spin_unlock(&adapter->cmd_lock); | 197 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
197 | 198 | ||
198 | for (i = 0; i < adapter->num_tx_queues; i++) | 199 | for (i = 0; i < adapter->num_tx_queues; i++) |
199 | if (adapter->tqd_start[i].status.stopped) | 200 | if (adapter->tqd_start[i].status.stopped) |
@@ -2733,13 +2734,14 @@ static void | |||
2733 | vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | 2734 | vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) |
2734 | { | 2735 | { |
2735 | u32 cfg; | 2736 | u32 cfg; |
2737 | unsigned long flags; | ||
2736 | 2738 | ||
2737 | /* intr settings */ | 2739 | /* intr settings */ |
2738 | spin_lock(&adapter->cmd_lock); | 2740 | spin_lock_irqsave(&adapter->cmd_lock, flags); |
2739 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2741 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2740 | VMXNET3_CMD_GET_CONF_INTR); | 2742 | VMXNET3_CMD_GET_CONF_INTR); |
2741 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2743 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2742 | spin_unlock(&adapter->cmd_lock); | 2744 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
2743 | adapter->intr.type = cfg & 0x3; | 2745 | adapter->intr.type = cfg & 0x3; |
2744 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; | 2746 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; |
2745 | 2747 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 51f2ef142a5b..976467253d20 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
311 | /* toggle the LRO feature*/ | 311 | /* toggle the LRO feature*/ |
312 | netdev->features ^= NETIF_F_LRO; | 312 | netdev->features ^= NETIF_F_LRO; |
313 | 313 | ||
314 | /* Update private LRO flag */ | ||
315 | adapter->lro = lro_requested; | ||
316 | |||
314 | /* update harware LRO capability accordingly */ | 317 | /* update harware LRO capability accordingly */ |
315 | if (lro_requested) | 318 | if (lro_requested) |
316 | adapter->shared->devRead.misc.uptFeatures |= | 319 | adapter->shared->devRead.misc.uptFeatures |= |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 17d04ff8d678..1482fa650833 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) | |||
2141 | static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | 2141 | static void ath9k_flush(struct ieee80211_hw *hw, bool drop) |
2142 | { | 2142 | { |
2143 | struct ath_softc *sc = hw->priv; | 2143 | struct ath_softc *sc = hw->priv; |
2144 | struct ath_hw *ah = sc->sc_ah; | ||
2145 | struct ath_common *common = ath9k_hw_common(ah); | ||
2144 | int timeout = 200; /* ms */ | 2146 | int timeout = 200; /* ms */ |
2145 | int i, j; | 2147 | int i, j; |
2146 | 2148 | ||
@@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | |||
2149 | 2151 | ||
2150 | cancel_delayed_work_sync(&sc->tx_complete_work); | 2152 | cancel_delayed_work_sync(&sc->tx_complete_work); |
2151 | 2153 | ||
2154 | if (sc->sc_flags & SC_OP_INVALID) { | ||
2155 | ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); | ||
2156 | mutex_unlock(&sc->mutex); | ||
2157 | return; | ||
2158 | } | ||
2159 | |||
2152 | if (drop) | 2160 | if (drop) |
2153 | timeout = 1; | 2161 | timeout = 1; |
2154 | 2162 | ||
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index c1511b14b239..42db0fc8b921 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c | |||
@@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) | |||
2155 | goto set_ch_out; | 2155 | goto set_ch_out; |
2156 | } | 2156 | } |
2157 | 2157 | ||
2158 | if (priv->iw_mode == NL80211_IFTYPE_ADHOC && | ||
2159 | !iwl_legacy_is_channel_ibss(ch_info)) { | ||
2160 | IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); | ||
2161 | ret = -EINVAL; | ||
2162 | goto set_ch_out; | ||
2163 | } | ||
2164 | |||
2158 | spin_lock_irqsave(&priv->lock, flags); | 2165 | spin_lock_irqsave(&priv->lock, flags); |
2159 | 2166 | ||
2160 | for_each_context(priv, ctx) { | 2167 | for_each_context(priv, ctx) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h index 9ee849d669f3..f43ac1eb9014 100644 --- a/drivers/net/wireless/iwlegacy/iwl-dev.h +++ b/drivers/net/wireless/iwlegacy/iwl-dev.h | |||
@@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) | |||
1411 | return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; | 1411 | return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; |
1412 | } | 1412 | } |
1413 | 1413 | ||
1414 | static inline int | ||
1415 | iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) | ||
1416 | { | ||
1417 | return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; | ||
1418 | } | ||
1419 | |||
1414 | static inline void | 1420 | static inline void |
1415 | __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) | 1421 | __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) |
1416 | { | 1422 | { |
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 7e8a658b7670..f3ac62431a30 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c | |||
@@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv) | |||
1339 | cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { | 1339 | cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { |
1340 | lbs_deb_host( | 1340 | lbs_deb_host( |
1341 | "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); | 1341 | "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); |
1342 | list_del(&cmdnode->list); | ||
1343 | spin_lock_irqsave(&priv->driver_lock, flags); | 1342 | spin_lock_irqsave(&priv->driver_lock, flags); |
1343 | list_del(&cmdnode->list); | ||
1344 | lbs_complete_command(priv, cmdnode, 0); | 1344 | lbs_complete_command(priv, cmdnode, 0); |
1345 | spin_unlock_irqrestore(&priv->driver_lock, flags); | 1345 | spin_unlock_irqrestore(&priv->driver_lock, flags); |
1346 | 1346 | ||
@@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv) | |||
1352 | (priv->psstate == PS_STATE_PRE_SLEEP)) { | 1352 | (priv->psstate == PS_STATE_PRE_SLEEP)) { |
1353 | lbs_deb_host( | 1353 | lbs_deb_host( |
1354 | "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); | 1354 | "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); |
1355 | list_del(&cmdnode->list); | ||
1356 | spin_lock_irqsave(&priv->driver_lock, flags); | 1355 | spin_lock_irqsave(&priv->driver_lock, flags); |
1356 | list_del(&cmdnode->list); | ||
1357 | lbs_complete_command(priv, cmdnode, 0); | 1357 | lbs_complete_command(priv, cmdnode, 0); |
1358 | spin_unlock_irqrestore(&priv->driver_lock, flags); | 1358 | spin_unlock_irqrestore(&priv->driver_lock, flags); |
1359 | priv->needtowakeup = 1; | 1359 | priv->needtowakeup = 1; |
@@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv) | |||
1366 | "EXEC_NEXT_CMD: sending EXIT_PS\n"); | 1366 | "EXEC_NEXT_CMD: sending EXIT_PS\n"); |
1367 | } | 1367 | } |
1368 | } | 1368 | } |
1369 | spin_lock_irqsave(&priv->driver_lock, flags); | ||
1369 | list_del(&cmdnode->list); | 1370 | list_del(&cmdnode->list); |
1371 | spin_unlock_irqrestore(&priv->driver_lock, flags); | ||
1370 | lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", | 1372 | lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", |
1371 | le16_to_cpu(cmd->command)); | 1373 | le16_to_cpu(cmd->command)); |
1372 | lbs_submit_command(priv, cmdnode); | 1374 | lbs_submit_command(priv, cmdnode); |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index b78a38d9172a..8c7c522a056a 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, | |||
126 | 126 | ||
127 | board = z->resource.start; | 127 | board = z->resource.start; |
128 | ioaddr = board+cards[i].offset; | 128 | ioaddr = board+cards[i].offset; |
129 | dev = alloc_ei_netdev(); | 129 | dev = ____alloc_ei_netdev(0); |
130 | if (!dev) | 130 | if (!dev) |
131 | return -ENOMEM; | 131 | return -ENOMEM; |
132 | if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { | 132 | if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { |
@@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, | |||
146 | static const struct net_device_ops zorro8390_netdev_ops = { | 146 | static const struct net_device_ops zorro8390_netdev_ops = { |
147 | .ndo_open = zorro8390_open, | 147 | .ndo_open = zorro8390_open, |
148 | .ndo_stop = zorro8390_close, | 148 | .ndo_stop = zorro8390_close, |
149 | .ndo_start_xmit = ei_start_xmit, | 149 | .ndo_start_xmit = __ei_start_xmit, |
150 | .ndo_tx_timeout = ei_tx_timeout, | 150 | .ndo_tx_timeout = __ei_tx_timeout, |
151 | .ndo_get_stats = ei_get_stats, | 151 | .ndo_get_stats = __ei_get_stats, |
152 | .ndo_set_multicast_list = ei_set_multicast_list, | 152 | .ndo_set_multicast_list = __ei_set_multicast_list, |
153 | .ndo_validate_addr = eth_validate_addr, | 153 | .ndo_validate_addr = eth_validate_addr, |
154 | .ndo_set_mac_address = eth_mac_addr, | 154 | .ndo_set_mac_address = eth_mac_addr, |
155 | .ndo_change_mtu = eth_change_mtu, | 155 | .ndo_change_mtu = eth_change_mtu, |
156 | #ifdef CONFIG_NET_POLL_CONTROLLER | 156 | #ifdef CONFIG_NET_POLL_CONTROLLER |
157 | .ndo_poll_controller = ei_poll, | 157 | .ndo_poll_controller = __ei_poll, |
158 | #endif | 158 | #endif |
159 | }; | 159 | }; |
160 | 160 | ||
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index d552d2c77844..6af6b628175b 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/syscore_ops.h> | 39 | #include <linux/syscore_ops.h> |
40 | #include <linux/tboot.h> | 40 | #include <linux/tboot.h> |
41 | #include <linux/dmi.h> | 41 | #include <linux/dmi.h> |
42 | #include <linux/pci-ats.h> | ||
42 | #include <asm/cacheflush.h> | 43 | #include <asm/cacheflush.h> |
43 | #include <asm/iommu.h> | 44 | #include <asm/iommu.h> |
44 | #include "pci.h" | 45 | #include "pci.h" |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 553d8ee55c1c..42fae4776515 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/pci-ats.h> | ||
16 | #include "pci.h" | 17 | #include "pci.h" |
17 | 18 | ||
18 | #define VIRTFN_ID_LEN 16 | 19 | #define VIRTFN_ID_LEN 16 |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index a6ec200fe5ee..4020025f854e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -250,15 +250,6 @@ struct pci_sriov { | |||
250 | u8 __iomem *mstate; /* VF Migration State Array */ | 250 | u8 __iomem *mstate; /* VF Migration State Array */ |
251 | }; | 251 | }; |
252 | 252 | ||
253 | /* Address Translation Service */ | ||
254 | struct pci_ats { | ||
255 | int pos; /* capability position */ | ||
256 | int stu; /* Smallest Translation Unit */ | ||
257 | int qdep; /* Invalidate Queue Depth */ | ||
258 | int ref_cnt; /* Physical Function reference count */ | ||
259 | unsigned int is_enabled:1; /* Enable bit is set */ | ||
260 | }; | ||
261 | |||
262 | #ifdef CONFIG_PCI_IOV | 253 | #ifdef CONFIG_PCI_IOV |
263 | extern int pci_iov_init(struct pci_dev *dev); | 254 | extern int pci_iov_init(struct pci_dev *dev); |
264 | extern void pci_iov_release(struct pci_dev *dev); | 255 | extern void pci_iov_release(struct pci_dev *dev); |
@@ -269,19 +260,6 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, | |||
269 | extern void pci_restore_iov_state(struct pci_dev *dev); | 260 | extern void pci_restore_iov_state(struct pci_dev *dev); |
270 | extern int pci_iov_bus_range(struct pci_bus *bus); | 261 | extern int pci_iov_bus_range(struct pci_bus *bus); |
271 | 262 | ||
272 | extern int pci_enable_ats(struct pci_dev *dev, int ps); | ||
273 | extern void pci_disable_ats(struct pci_dev *dev); | ||
274 | extern int pci_ats_queue_depth(struct pci_dev *dev); | ||
275 | /** | ||
276 | * pci_ats_enabled - query the ATS status | ||
277 | * @dev: the PCI device | ||
278 | * | ||
279 | * Returns 1 if ATS capability is enabled, or 0 if not. | ||
280 | */ | ||
281 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
282 | { | ||
283 | return dev->ats && dev->ats->is_enabled; | ||
284 | } | ||
285 | #else | 263 | #else |
286 | static inline int pci_iov_init(struct pci_dev *dev) | 264 | static inline int pci_iov_init(struct pci_dev *dev) |
287 | { | 265 | { |
@@ -304,21 +282,6 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) | |||
304 | return 0; | 282 | return 0; |
305 | } | 283 | } |
306 | 284 | ||
307 | static inline int pci_enable_ats(struct pci_dev *dev, int ps) | ||
308 | { | ||
309 | return -ENODEV; | ||
310 | } | ||
311 | static inline void pci_disable_ats(struct pci_dev *dev) | ||
312 | { | ||
313 | } | ||
314 | static inline int pci_ats_queue_depth(struct pci_dev *dev) | ||
315 | { | ||
316 | return -ENODEV; | ||
317 | } | ||
318 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
319 | { | ||
320 | return 0; | ||
321 | } | ||
322 | #endif /* CONFIG_PCI_IOV */ | 285 | #endif /* CONFIG_PCI_IOV */ |
323 | 286 | ||
324 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | 287 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ebf51ad1b714..a806cb321d2e 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
579 | } | 579 | } |
580 | size0 = calculate_iosize(size, min_size, size1, | 580 | size0 = calculate_iosize(size, min_size, size1, |
581 | resource_size(b_res), 4096); | 581 | resource_size(b_res), 4096); |
582 | size1 = !add_size? size0: | 582 | size1 = (!add_head || (add_head && !add_size)) ? size0 : |
583 | calculate_iosize(size, min_size+add_size, size1, | 583 | calculate_iosize(size, min_size+add_size, size1, |
584 | resource_size(b_res), 4096); | 584 | resource_size(b_res), 4096); |
585 | if (!size0 && !size1) { | 585 | if (!size0 && !size1) { |
@@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
677 | align += aligns[order]; | 677 | align += aligns[order]; |
678 | } | 678 | } |
679 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); | 679 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); |
680 | size1 = !add_size ? size : | 680 | size1 = (!add_head || (add_head && !add_size)) ? size0 : |
681 | calculate_memsize(size, min_size+add_size, 0, | 681 | calculate_memsize(size, min_size+add_size, 0, |
682 | resource_size(b_res), min_align); | 682 | resource_size(b_res), min_align); |
683 | if (!size0 && !size1) { | 683 | if (!size0 && !size1) { |
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index ac2701b22e71..043ee3136e40 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c | |||
@@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
95 | else | 95 | else |
96 | table++; | 96 | table++; |
97 | 97 | ||
98 | if (route_port == RIO_INVALID_ROUTE) | ||
99 | route_port = IDT_DEFAULT_ROUTE; | ||
100 | |||
98 | rio_mport_write_config_32(mport, destid, hopcount, | 101 | rio_mport_write_config_32(mport, destid, hopcount, |
99 | LOCAL_RTE_CONF_DESTID_SEL, table); | 102 | LOCAL_RTE_CONF_DESTID_SEL, table); |
100 | 103 | ||
@@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) | |||
411 | rdev->rswitch->em_handle = idtg2_em_handler; | 414 | rdev->rswitch->em_handle = idtg2_em_handler; |
412 | rdev->rswitch->sw_sysfs = idtg2_sysfs; | 415 | rdev->rswitch->sw_sysfs = idtg2_sysfs; |
413 | 416 | ||
417 | if (do_enum) { | ||
418 | /* Ensure that default routing is disabled on startup */ | ||
419 | rio_write_config_32(rdev, | ||
420 | RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); | ||
421 | } | ||
422 | |||
414 | return 0; | 423 | return 0; |
415 | } | 424 | } |
416 | 425 | ||
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index 3a971077e7bf..d06ee2d44b44 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c | |||
@@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
26 | { | 26 | { |
27 | u32 result; | 27 | u32 result; |
28 | 28 | ||
29 | if (route_port == RIO_INVALID_ROUTE) | ||
30 | route_port = CPS_DEFAULT_ROUTE; | ||
31 | |||
29 | if (table == RIO_GLOBAL_TABLE) { | 32 | if (table == RIO_GLOBAL_TABLE) { |
30 | rio_mport_write_config_32(mport, destid, hopcount, | 33 | rio_mport_write_config_32(mport, destid, hopcount, |
31 | RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); | 34 | RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); |
@@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) | |||
130 | /* set TVAL = ~50us */ | 133 | /* set TVAL = ~50us */ |
131 | rio_write_config_32(rdev, | 134 | rio_write_config_32(rdev, |
132 | rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); | 135 | rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); |
136 | /* Ensure that default routing is disabled on startup */ | ||
137 | rio_write_config_32(rdev, | ||
138 | RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); | ||
133 | } | 139 | } |
134 | 140 | ||
135 | return 0; | 141 | return 0; |
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 1a62934bfebc..db8b8028988d 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c | |||
@@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) | |||
303 | rdev->rswitch->em_init = tsi57x_em_init; | 303 | rdev->rswitch->em_init = tsi57x_em_init; |
304 | rdev->rswitch->em_handle = tsi57x_em_handler; | 304 | rdev->rswitch->em_handle = tsi57x_em_handler; |
305 | 305 | ||
306 | if (do_enum) { | ||
307 | /* Ensure that default routing is disabled on startup */ | ||
308 | rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, | ||
309 | RIO_INVALID_ROUTE); | ||
310 | } | ||
311 | |||
306 | return 0; | 312 | return 0; |
307 | } | 313 | } |
308 | 314 | ||
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index e1878877399c..42891726ea72 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -3,10 +3,10 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | config RTC_LIB | 5 | config RTC_LIB |
6 | tristate | 6 | bool |
7 | 7 | ||
8 | menuconfig RTC_CLASS | 8 | menuconfig RTC_CLASS |
9 | tristate "Real Time Clock" | 9 | bool "Real Time Clock" |
10 | default n | 10 | default n |
11 | depends on !S390 | 11 | depends on !S390 |
12 | select RTC_LIB | 12 | select RTC_LIB |
@@ -15,9 +15,6 @@ menuconfig RTC_CLASS | |||
15 | be allowed to plug one or more RTCs to your system. You will | 15 | be allowed to plug one or more RTCs to your system. You will |
16 | probably want to enable one or more of the interfaces below. | 16 | probably want to enable one or more of the interfaces below. |
17 | 17 | ||
18 | This driver can also be built as a module. If so, the module | ||
19 | will be called rtc-core. | ||
20 | |||
21 | if RTC_CLASS | 18 | if RTC_CLASS |
22 | 19 | ||
23 | config RTC_HCTOSYS | 20 | config RTC_HCTOSYS |
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 39013867cbd6..4194e59e14cd 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c | |||
@@ -41,26 +41,21 @@ static void rtc_device_release(struct device *dev) | |||
41 | * system's wall clock; restore it on resume(). | 41 | * system's wall clock; restore it on resume(). |
42 | */ | 42 | */ |
43 | 43 | ||
44 | static struct timespec delta; | ||
45 | static time_t oldtime; | 44 | static time_t oldtime; |
45 | static struct timespec oldts; | ||
46 | 46 | ||
47 | static int rtc_suspend(struct device *dev, pm_message_t mesg) | 47 | static int rtc_suspend(struct device *dev, pm_message_t mesg) |
48 | { | 48 | { |
49 | struct rtc_device *rtc = to_rtc_device(dev); | 49 | struct rtc_device *rtc = to_rtc_device(dev); |
50 | struct rtc_time tm; | 50 | struct rtc_time tm; |
51 | struct timespec ts = current_kernel_time(); | ||
52 | 51 | ||
53 | if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) | 52 | if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) |
54 | return 0; | 53 | return 0; |
55 | 54 | ||
56 | rtc_read_time(rtc, &tm); | 55 | rtc_read_time(rtc, &tm); |
56 | ktime_get_ts(&oldts); | ||
57 | rtc_tm_to_time(&tm, &oldtime); | 57 | rtc_tm_to_time(&tm, &oldtime); |
58 | 58 | ||
59 | /* RTC precision is 1 second; adjust delta for avg 1/2 sec err */ | ||
60 | set_normalized_timespec(&delta, | ||
61 | ts.tv_sec - oldtime, | ||
62 | ts.tv_nsec - (NSEC_PER_SEC >> 1)); | ||
63 | |||
64 | return 0; | 59 | return 0; |
65 | } | 60 | } |
66 | 61 | ||
@@ -70,10 +65,12 @@ static int rtc_resume(struct device *dev) | |||
70 | struct rtc_time tm; | 65 | struct rtc_time tm; |
71 | time_t newtime; | 66 | time_t newtime; |
72 | struct timespec time; | 67 | struct timespec time; |
68 | struct timespec newts; | ||
73 | 69 | ||
74 | if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) | 70 | if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) |
75 | return 0; | 71 | return 0; |
76 | 72 | ||
73 | ktime_get_ts(&newts); | ||
77 | rtc_read_time(rtc, &tm); | 74 | rtc_read_time(rtc, &tm); |
78 | if (rtc_valid_tm(&tm) != 0) { | 75 | if (rtc_valid_tm(&tm) != 0) { |
79 | pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); | 76 | pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); |
@@ -85,15 +82,13 @@ static int rtc_resume(struct device *dev) | |||
85 | pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); | 82 | pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); |
86 | return 0; | 83 | return 0; |
87 | } | 84 | } |
85 | /* calculate the RTC time delta */ | ||
86 | set_normalized_timespec(&time, newtime - oldtime, 0); | ||
88 | 87 | ||
89 | /* restore wall clock using delta against this RTC; | 88 | /* subtract kernel time between rtc_suspend to rtc_resume */ |
90 | * adjust again for avg 1/2 second RTC sampling error | 89 | time = timespec_sub(time, timespec_sub(newts, oldts)); |
91 | */ | ||
92 | set_normalized_timespec(&time, | ||
93 | newtime + delta.tv_sec, | ||
94 | (NSEC_PER_SEC >> 1) + delta.tv_nsec); | ||
95 | do_settimeofday(&time); | ||
96 | 90 | ||
91 | timekeeping_inject_sleeptime(&time); | ||
97 | return 0; | 92 | return 0; |
98 | } | 93 | } |
99 | 94 | ||
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index 8d46838dff8a..755e1fe914af 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c | |||
@@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
524 | goto fail2; | 524 | goto fail2; |
525 | } | 525 | } |
526 | 526 | ||
527 | platform_set_drvdata(pdev, davinci_rtc); | ||
528 | |||
527 | davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, | 529 | davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, |
528 | &davinci_rtc_ops, THIS_MODULE); | 530 | &davinci_rtc_ops, THIS_MODULE); |
529 | if (IS_ERR(davinci_rtc->rtc)) { | 531 | if (IS_ERR(davinci_rtc->rtc)) { |
@@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
553 | 555 | ||
554 | rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); | 556 | rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); |
555 | 557 | ||
556 | platform_set_drvdata(pdev, davinci_rtc); | ||
557 | |||
558 | device_init_wakeup(&pdev->dev, 0); | 558 | device_init_wakeup(&pdev->dev, 0); |
559 | 559 | ||
560 | return 0; | 560 | return 0; |
@@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
562 | fail4: | 562 | fail4: |
563 | rtc_device_unregister(davinci_rtc->rtc); | 563 | rtc_device_unregister(davinci_rtc->rtc); |
564 | fail3: | 564 | fail3: |
565 | platform_set_drvdata(pdev, NULL); | ||
565 | iounmap(davinci_rtc->base); | 566 | iounmap(davinci_rtc->base); |
566 | fail2: | 567 | fail2: |
567 | release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); | 568 | release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); |
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index 60ce69600828..47e681df31e2 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c | |||
@@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev) | |||
355 | goto out; | 355 | goto out; |
356 | } | 356 | } |
357 | spin_lock_init(&priv->lock); | 357 | spin_lock_init(&priv->lock); |
358 | platform_set_drvdata(pdev, priv); | ||
358 | rtc = rtc_device_register("ds1286", &pdev->dev, | 359 | rtc = rtc_device_register("ds1286", &pdev->dev, |
359 | &ds1286_ops, THIS_MODULE); | 360 | &ds1286_ops, THIS_MODULE); |
360 | if (IS_ERR(rtc)) { | 361 | if (IS_ERR(rtc)) { |
@@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev) | |||
362 | goto out; | 363 | goto out; |
363 | } | 364 | } |
364 | priv->rtc = rtc; | 365 | priv->rtc = rtc; |
365 | platform_set_drvdata(pdev, priv); | ||
366 | return 0; | 366 | return 0; |
367 | 367 | ||
368 | out: | 368 | out: |
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 11ae64dcbf3c..335551d333b2 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c | |||
@@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
151 | return -ENXIO; | 151 | return -ENXIO; |
152 | 152 | ||
153 | pdev->dev.platform_data = ep93xx_rtc; | 153 | pdev->dev.platform_data = ep93xx_rtc; |
154 | platform_set_drvdata(pdev, rtc); | ||
154 | 155 | ||
155 | rtc = rtc_device_register(pdev->name, | 156 | rtc = rtc_device_register(pdev->name, |
156 | &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); | 157 | &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); |
@@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
159 | goto exit; | 160 | goto exit; |
160 | } | 161 | } |
161 | 162 | ||
162 | platform_set_drvdata(pdev, rtc); | ||
163 | |||
164 | err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); | 163 | err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); |
165 | if (err) | 164 | if (err) |
166 | goto fail; | 165 | goto fail; |
@@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
168 | return 0; | 167 | return 0; |
169 | 168 | ||
170 | fail: | 169 | fail: |
171 | platform_set_drvdata(pdev, NULL); | ||
172 | rtc_device_unregister(rtc); | 170 | rtc_device_unregister(rtc); |
173 | exit: | 171 | exit: |
172 | platform_set_drvdata(pdev, NULL); | ||
174 | pdev->dev.platform_data = NULL; | 173 | pdev->dev.platform_data = NULL; |
175 | return err; | 174 | return err; |
176 | } | 175 | } |
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 69fe664a2228..eda128fc1d38 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
@@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client, | |||
783 | goto exit; | 783 | goto exit; |
784 | } | 784 | } |
785 | 785 | ||
786 | clientdata->features = id->driver_data; | ||
787 | i2c_set_clientdata(client, clientdata); | ||
788 | |||
786 | rtc = rtc_device_register(client->name, &client->dev, | 789 | rtc = rtc_device_register(client->name, &client->dev, |
787 | &m41t80_rtc_ops, THIS_MODULE); | 790 | &m41t80_rtc_ops, THIS_MODULE); |
788 | if (IS_ERR(rtc)) { | 791 | if (IS_ERR(rtc)) { |
@@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client, | |||
792 | } | 795 | } |
793 | 796 | ||
794 | clientdata->rtc = rtc; | 797 | clientdata->rtc = rtc; |
795 | clientdata->features = id->driver_data; | ||
796 | i2c_set_clientdata(client, clientdata); | ||
797 | 798 | ||
798 | /* Make sure HT (Halt Update) bit is cleared */ | 799 | /* Make sure HT (Halt Update) bit is cleared */ |
799 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); | 800 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); |
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c index 20494b5edc3c..3bc046f427e0 100644 --- a/drivers/rtc/rtc-max8925.c +++ b/drivers/rtc/rtc-max8925.c | |||
@@ -258,6 +258,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) | |||
258 | } | 258 | } |
259 | 259 | ||
260 | dev_set_drvdata(&pdev->dev, info); | 260 | dev_set_drvdata(&pdev->dev, info); |
261 | /* XXX - isn't this redundant? */ | ||
262 | platform_set_drvdata(pdev, info); | ||
261 | 263 | ||
262 | info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, | 264 | info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, |
263 | &max8925_rtc_ops, THIS_MODULE); | 265 | &max8925_rtc_ops, THIS_MODULE); |
@@ -267,10 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) | |||
267 | goto out_rtc; | 269 | goto out_rtc; |
268 | } | 270 | } |
269 | 271 | ||
270 | platform_set_drvdata(pdev, info); | ||
271 | |||
272 | return 0; | 272 | return 0; |
273 | out_rtc: | 273 | out_rtc: |
274 | platform_set_drvdata(pdev, NULL); | ||
274 | free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); | 275 | free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); |
275 | out_irq: | 276 | out_irq: |
276 | kfree(info); | 277 | kfree(info); |
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index 3f7bc6b9fefa..2e48aa604273 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c | |||
@@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) | |||
265 | info->rtc = max8998->rtc; | 265 | info->rtc = max8998->rtc; |
266 | info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; | 266 | info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; |
267 | 267 | ||
268 | platform_set_drvdata(pdev, info); | ||
269 | |||
268 | info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, | 270 | info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, |
269 | &max8998_rtc_ops, THIS_MODULE); | 271 | &max8998_rtc_ops, THIS_MODULE); |
270 | 272 | ||
@@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) | |||
274 | goto out_rtc; | 276 | goto out_rtc; |
275 | } | 277 | } |
276 | 278 | ||
277 | platform_set_drvdata(pdev, info); | ||
278 | |||
279 | ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, | 279 | ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, |
280 | "rtc-alarm0", info); | 280 | "rtc-alarm0", info); |
281 | 281 | ||
@@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) | |||
293 | return 0; | 293 | return 0; |
294 | 294 | ||
295 | out_rtc: | 295 | out_rtc: |
296 | platform_set_drvdata(pdev, NULL); | ||
296 | kfree(info); | 297 | kfree(info); |
297 | return ret; | 298 | return ret; |
298 | } | 299 | } |
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index c5ac03793e79..a1a278bc340d 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c | |||
@@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) | |||
349 | if (ret) | 349 | if (ret) |
350 | goto err_alarm_irq_request; | 350 | goto err_alarm_irq_request; |
351 | 351 | ||
352 | mc13xxx_unlock(mc13xxx); | ||
353 | |||
352 | priv->rtc = rtc_device_register(pdev->name, | 354 | priv->rtc = rtc_device_register(pdev->name, |
353 | &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); | 355 | &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); |
354 | if (IS_ERR(priv->rtc)) { | 356 | if (IS_ERR(priv->rtc)) { |
355 | ret = PTR_ERR(priv->rtc); | 357 | ret = PTR_ERR(priv->rtc); |
356 | 358 | ||
359 | mc13xxx_lock(mc13xxx); | ||
360 | |||
357 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); | 361 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); |
358 | err_alarm_irq_request: | 362 | err_alarm_irq_request: |
359 | 363 | ||
@@ -365,12 +369,12 @@ err_reset_irq_status: | |||
365 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); | 369 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); |
366 | err_reset_irq_request: | 370 | err_reset_irq_request: |
367 | 371 | ||
372 | mc13xxx_unlock(mc13xxx); | ||
373 | |||
368 | platform_set_drvdata(pdev, NULL); | 374 | platform_set_drvdata(pdev, NULL); |
369 | kfree(priv); | 375 | kfree(priv); |
370 | } | 376 | } |
371 | 377 | ||
372 | mc13xxx_unlock(mc13xxx); | ||
373 | |||
374 | return ret; | 378 | return ret; |
375 | } | 379 | } |
376 | 380 | ||
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index 67820626e18f..fcb113c11122 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c | |||
@@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) | |||
214 | error = -ENOMEM; | 214 | error = -ENOMEM; |
215 | goto out_free_priv; | 215 | goto out_free_priv; |
216 | } | 216 | } |
217 | platform_set_drvdata(dev, priv); | ||
217 | 218 | ||
218 | rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, | 219 | rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, |
219 | THIS_MODULE); | 220 | THIS_MODULE); |
@@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) | |||
223 | } | 224 | } |
224 | 225 | ||
225 | priv->rtc = rtc; | 226 | priv->rtc = rtc; |
226 | platform_set_drvdata(dev, priv); | ||
227 | return 0; | 227 | return 0; |
228 | 228 | ||
229 | out_unmap: | 229 | out_unmap: |
230 | platform_set_drvdata(dev, NULL); | ||
230 | iounmap(priv->regs); | 231 | iounmap(priv->regs); |
231 | out_free_priv: | 232 | out_free_priv: |
232 | kfree(priv); | 233 | kfree(priv); |
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 826ab64a8fa9..d814417bee8c 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c | |||
@@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
418 | goto exit_put_clk; | 418 | goto exit_put_clk; |
419 | } | 419 | } |
420 | 420 | ||
421 | rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, | ||
422 | THIS_MODULE); | ||
423 | if (IS_ERR(rtc)) { | ||
424 | ret = PTR_ERR(rtc); | ||
425 | goto exit_put_clk; | ||
426 | } | ||
427 | |||
428 | pdata->rtc = rtc; | ||
429 | platform_set_drvdata(pdev, pdata); | 421 | platform_set_drvdata(pdev, pdata); |
430 | 422 | ||
431 | /* Configure and enable the RTC */ | 423 | /* Configure and enable the RTC */ |
@@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
438 | pdata->irq = -1; | 430 | pdata->irq = -1; |
439 | } | 431 | } |
440 | 432 | ||
433 | rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, | ||
434 | THIS_MODULE); | ||
435 | if (IS_ERR(rtc)) { | ||
436 | ret = PTR_ERR(rtc); | ||
437 | goto exit_clr_drvdata; | ||
438 | } | ||
439 | |||
440 | pdata->rtc = rtc; | ||
441 | |||
441 | return 0; | 442 | return 0; |
442 | 443 | ||
444 | exit_clr_drvdata: | ||
445 | platform_set_drvdata(pdev, NULL); | ||
443 | exit_put_clk: | 446 | exit_put_clk: |
444 | clk_disable(pdata->clk); | 447 | clk_disable(pdata->clk); |
445 | clk_put(pdata->clk); | 448 | clk_put(pdata->clk); |
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c index a633abc42896..cd4f198cc2ef 100644 --- a/drivers/rtc/rtc-pcap.c +++ b/drivers/rtc/rtc-pcap.c | |||
@@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) | |||
151 | 151 | ||
152 | pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); | 152 | pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); |
153 | 153 | ||
154 | platform_set_drvdata(pdev, pcap_rtc); | ||
155 | |||
154 | pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, | 156 | pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, |
155 | &pcap_rtc_ops, THIS_MODULE); | 157 | &pcap_rtc_ops, THIS_MODULE); |
156 | if (IS_ERR(pcap_rtc->rtc)) { | 158 | if (IS_ERR(pcap_rtc->rtc)) { |
@@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) | |||
158 | goto fail_rtc; | 160 | goto fail_rtc; |
159 | } | 161 | } |
160 | 162 | ||
161 | platform_set_drvdata(pdev, pcap_rtc); | ||
162 | 163 | ||
163 | timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); | 164 | timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); |
164 | alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); | 165 | alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); |
@@ -177,6 +178,7 @@ fail_alarm: | |||
177 | fail_timer: | 178 | fail_timer: |
178 | rtc_device_unregister(pcap_rtc->rtc); | 179 | rtc_device_unregister(pcap_rtc->rtc); |
179 | fail_rtc: | 180 | fail_rtc: |
181 | platform_set_drvdata(pdev, NULL); | ||
180 | kfree(pcap_rtc); | 182 | kfree(pcap_rtc); |
181 | return err; | 183 | return err; |
182 | } | 184 | } |
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 694da39b6dd2..359da6d020b9 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c | |||
@@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) | |||
249 | 249 | ||
250 | spin_lock_init(&priv->lock); | 250 | spin_lock_init(&priv->lock); |
251 | 251 | ||
252 | platform_set_drvdata(dev, priv); | ||
253 | |||
252 | rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, | 254 | rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, |
253 | THIS_MODULE); | 255 | THIS_MODULE); |
254 | if (IS_ERR(rtc)) { | 256 | if (IS_ERR(rtc)) { |
255 | error = PTR_ERR(rtc); | 257 | error = PTR_ERR(rtc); |
256 | goto out_unmap; | 258 | goto out_unmap; |
257 | } | 259 | } |
258 | |||
259 | priv->rtc = rtc; | 260 | priv->rtc = rtc; |
260 | platform_set_drvdata(dev, priv); | ||
261 | 261 | ||
262 | error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); | 262 | error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); |
263 | if (error) | 263 | if (error) |
@@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) | |||
268 | out_unregister: | 268 | out_unregister: |
269 | rtc_device_unregister(rtc); | 269 | rtc_device_unregister(rtc); |
270 | out_unmap: | 270 | out_unmap: |
271 | platform_set_drvdata(dev, NULL); | ||
271 | iounmap(priv->regs); | 272 | iounmap(priv->regs); |
272 | out_free_priv: | 273 | out_free_priv: |
273 | kfree(priv); | 274 | kfree(priv); |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index b3466c491cd3..16512ecae31a 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -46,6 +46,7 @@ static struct clk *rtc_clk; | |||
46 | static void __iomem *s3c_rtc_base; | 46 | static void __iomem *s3c_rtc_base; |
47 | static int s3c_rtc_alarmno = NO_IRQ; | 47 | static int s3c_rtc_alarmno = NO_IRQ; |
48 | static int s3c_rtc_tickno = NO_IRQ; | 48 | static int s3c_rtc_tickno = NO_IRQ; |
49 | static bool wake_en; | ||
49 | static enum s3c_cpu_type s3c_rtc_cpu_type; | 50 | static enum s3c_cpu_type s3c_rtc_cpu_type; |
50 | 51 | ||
51 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); | 52 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); |
@@ -562,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) | |||
562 | } | 563 | } |
563 | s3c_rtc_enable(pdev, 0); | 564 | s3c_rtc_enable(pdev, 0); |
564 | 565 | ||
565 | if (device_may_wakeup(&pdev->dev)) | 566 | if (device_may_wakeup(&pdev->dev) && !wake_en) { |
566 | enable_irq_wake(s3c_rtc_alarmno); | 567 | if (enable_irq_wake(s3c_rtc_alarmno) == 0) |
568 | wake_en = true; | ||
569 | else | ||
570 | dev_err(&pdev->dev, "enable_irq_wake failed\n"); | ||
571 | } | ||
567 | 572 | ||
568 | return 0; | 573 | return 0; |
569 | } | 574 | } |
@@ -579,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) | |||
579 | writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); | 584 | writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); |
580 | } | 585 | } |
581 | 586 | ||
582 | if (device_may_wakeup(&pdev->dev)) | 587 | if (device_may_wakeup(&pdev->dev) && wake_en) { |
583 | disable_irq_wake(s3c_rtc_alarmno); | 588 | disable_irq_wake(s3c_rtc_alarmno); |
589 | wake_en = false; | ||
590 | } | ||
584 | 591 | ||
585 | return 0; | 592 | return 0; |
586 | } | 593 | } |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 475e603fc584..86b6f1cc1b10 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) | |||
1742 | static inline int _dasd_term_running_cqr(struct dasd_device *device) | 1742 | static inline int _dasd_term_running_cqr(struct dasd_device *device) |
1743 | { | 1743 | { |
1744 | struct dasd_ccw_req *cqr; | 1744 | struct dasd_ccw_req *cqr; |
1745 | int rc; | ||
1745 | 1746 | ||
1746 | if (list_empty(&device->ccw_queue)) | 1747 | if (list_empty(&device->ccw_queue)) |
1747 | return 0; | 1748 | return 0; |
1748 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); | 1749 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1749 | return device->discipline->term_IO(cqr); | 1750 | rc = device->discipline->term_IO(cqr); |
1751 | if (!rc) | ||
1752 | /* | ||
1753 | * CQR terminated because a more important request is pending. | ||
1754 | * Undo decreasing of retry counter because this is | ||
1755 | * not an error case. | ||
1756 | */ | ||
1757 | cqr->retries++; | ||
1758 | return rc; | ||
1750 | } | 1759 | } |
1751 | 1760 | ||
1752 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | 1761 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 4b60ede07f0e..be55fb2b1b1c 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned) | |||
518 | return; | 518 | return; |
519 | new_incr->rn = rn; | 519 | new_incr->rn = rn; |
520 | new_incr->standby = standby; | 520 | new_incr->standby = standby; |
521 | if (!standby) | ||
522 | new_incr->usecount = 1; | ||
521 | last_rn = 0; | 523 | last_rn = 0; |
522 | prev = &sclp_mem_list; | 524 | prev = &sclp_mem_list; |
523 | list_for_each_entry(incr, &sclp_mem_list, list) { | 525 | list_for_each_entry(incr, &sclp_mem_list, list) { |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 83cea9a55e2f..1b3924c2fffd 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device) | |||
236 | disk->major = tapeblock_major; | 236 | disk->major = tapeblock_major; |
237 | disk->first_minor = device->first_minor; | 237 | disk->first_minor = device->first_minor; |
238 | disk->fops = &tapeblock_fops; | 238 | disk->fops = &tapeblock_fops; |
239 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
240 | disk->private_data = tape_get_device(device); | 239 | disk->private_data = tape_get_device(device); |
241 | disk->queue = blkdat->request_queue; | 240 | disk->queue = blkdat->request_queue; |
242 | set_capacity(disk, 0); | 241 | set_capacity(disk, 0); |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index e2d45c91b8e8..9689d41c7888 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = { | |||
1292 | .use_clustering = ENABLE_CLUSTERING, | 1292 | .use_clustering = ENABLE_CLUSTERING, |
1293 | }; | 1293 | }; |
1294 | 1294 | ||
1295 | static const struct of_device_id qpti_match[]; | ||
1295 | static int __devinit qpti_sbus_probe(struct platform_device *op) | 1296 | static int __devinit qpti_sbus_probe(struct platform_device *op) |
1296 | { | 1297 | { |
1298 | const struct of_device_id *match; | ||
1297 | struct scsi_host_template *tpnt; | 1299 | struct scsi_host_template *tpnt; |
1298 | struct device_node *dp = op->dev.of_node; | 1300 | struct device_node *dp = op->dev.of_node; |
1299 | struct Scsi_Host *host; | 1301 | struct Scsi_Host *host; |
@@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op) | |||
1301 | static int nqptis; | 1303 | static int nqptis; |
1302 | const char *fcode; | 1304 | const char *fcode; |
1303 | 1305 | ||
1304 | if (!op->dev.of_match) | 1306 | match = of_match_device(qpti_match, &op->dev); |
1307 | if (!match) | ||
1305 | return -EINVAL; | 1308 | return -EINVAL; |
1306 | tpnt = op->dev.of_match->data; | 1309 | tpnt = match->data; |
1307 | 1310 | ||
1308 | /* Sometimes Antares cards come up not completely | 1311 | /* Sometimes Antares cards come up not completely |
1309 | * setup, and we get a report of a zero IRQ. | 1312 | * setup, and we get a report of a zero IRQ. |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0bac91e72370..ec1803a48723 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache; | |||
74 | */ | 74 | */ |
75 | #define SCSI_QUEUE_DELAY 3 | 75 | #define SCSI_QUEUE_DELAY 3 |
76 | 76 | ||
77 | static void scsi_run_queue(struct request_queue *q); | ||
78 | |||
79 | /* | 77 | /* |
80 | * Function: scsi_unprep_request() | 78 | * Function: scsi_unprep_request() |
81 | * | 79 | * |
@@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
161 | blk_requeue_request(q, cmd->request); | 159 | blk_requeue_request(q, cmd->request); |
162 | spin_unlock_irqrestore(q->queue_lock, flags); | 160 | spin_unlock_irqrestore(q->queue_lock, flags); |
163 | 161 | ||
164 | scsi_run_queue(q); | 162 | kblockd_schedule_work(q, &device->requeue_work); |
165 | 163 | ||
166 | return 0; | 164 | return 0; |
167 | } | 165 | } |
@@ -438,7 +436,11 @@ static void scsi_run_queue(struct request_queue *q) | |||
438 | continue; | 436 | continue; |
439 | } | 437 | } |
440 | 438 | ||
441 | blk_run_queue_async(sdev->request_queue); | 439 | spin_unlock(shost->host_lock); |
440 | spin_lock(sdev->request_queue->queue_lock); | ||
441 | __blk_run_queue(sdev->request_queue); | ||
442 | spin_unlock(sdev->request_queue->queue_lock); | ||
443 | spin_lock(shost->host_lock); | ||
442 | } | 444 | } |
443 | /* put any unprocessed entries back */ | 445 | /* put any unprocessed entries back */ |
444 | list_splice(&starved_list, &shost->starved_list); | 446 | list_splice(&starved_list, &shost->starved_list); |
@@ -447,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q) | |||
447 | blk_run_queue(q); | 449 | blk_run_queue(q); |
448 | } | 450 | } |
449 | 451 | ||
452 | void scsi_requeue_run_queue(struct work_struct *work) | ||
453 | { | ||
454 | struct scsi_device *sdev; | ||
455 | struct request_queue *q; | ||
456 | |||
457 | sdev = container_of(work, struct scsi_device, requeue_work); | ||
458 | q = sdev->request_queue; | ||
459 | scsi_run_queue(q); | ||
460 | } | ||
461 | |||
450 | /* | 462 | /* |
451 | * Function: scsi_requeue_command() | 463 | * Function: scsi_requeue_command() |
452 | * | 464 | * |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 087821fac8fe..58584dc0724a 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
242 | int display_failure_msg = 1, ret; | 242 | int display_failure_msg = 1, ret; |
243 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 243 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
244 | extern void scsi_evt_thread(struct work_struct *work); | 244 | extern void scsi_evt_thread(struct work_struct *work); |
245 | extern void scsi_requeue_run_queue(struct work_struct *work); | ||
245 | 246 | ||
246 | sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, | 247 | sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, |
247 | GFP_ATOMIC); | 248 | GFP_ATOMIC); |
@@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
264 | INIT_LIST_HEAD(&sdev->event_list); | 265 | INIT_LIST_HEAD(&sdev->event_list); |
265 | spin_lock_init(&sdev->list_lock); | 266 | spin_lock_init(&sdev->list_lock); |
266 | INIT_WORK(&sdev->event_work, scsi_evt_thread); | 267 | INIT_WORK(&sdev->event_work, scsi_evt_thread); |
268 | INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); | ||
267 | 269 | ||
268 | sdev->sdev_gendev.parent = get_device(&starget->dev); | 270 | sdev->sdev_gendev.parent = get_device(&starget->dev); |
269 | sdev->sdev_target = starget; | 271 | sdev->sdev_target = starget; |
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 6f34963b3c64..7ad48585c5e6 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c | |||
@@ -662,7 +662,6 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, | |||
662 | static int ssb_pci_sprom_get(struct ssb_bus *bus, | 662 | static int ssb_pci_sprom_get(struct ssb_bus *bus, |
663 | struct ssb_sprom *sprom) | 663 | struct ssb_sprom *sprom) |
664 | { | 664 | { |
665 | const struct ssb_sprom *fallback; | ||
666 | int err; | 665 | int err; |
667 | u16 *buf; | 666 | u16 *buf; |
668 | 667 | ||
@@ -707,10 +706,17 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, | |||
707 | if (err) { | 706 | if (err) { |
708 | /* All CRC attempts failed. | 707 | /* All CRC attempts failed. |
709 | * Maybe there is no SPROM on the device? | 708 | * Maybe there is no SPROM on the device? |
710 | * If we have a fallback, use that. */ | 709 | * Now we ask the arch code if there is some sprom |
711 | fallback = ssb_get_fallback_sprom(); | 710 | * available for this device in some other storage */ |
712 | if (fallback) { | 711 | err = ssb_fill_sprom_with_fallback(bus, sprom); |
713 | memcpy(sprom, fallback, sizeof(*sprom)); | 712 | if (err) { |
713 | ssb_printk(KERN_WARNING PFX "WARNING: Using" | ||
714 | " fallback SPROM failed (err %d)\n", | ||
715 | err); | ||
716 | } else { | ||
717 | ssb_dprintk(KERN_DEBUG PFX "Using SPROM" | ||
718 | " revision %d provided by" | ||
719 | " platform.\n", sprom->revision); | ||
714 | err = 0; | 720 | err = 0; |
715 | goto out_free; | 721 | goto out_free; |
716 | } | 722 | } |
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c index 5f34d7a3e3a5..45ff0e3a3828 100644 --- a/drivers/ssb/sprom.c +++ b/drivers/ssb/sprom.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | 18 | ||
19 | 19 | ||
20 | static const struct ssb_sprom *fallback_sprom; | 20 | static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out); |
21 | 21 | ||
22 | 22 | ||
23 | static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, | 23 | static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, |
@@ -145,36 +145,43 @@ out: | |||
145 | } | 145 | } |
146 | 146 | ||
147 | /** | 147 | /** |
148 | * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. | 148 | * ssb_arch_register_fallback_sprom - Registers a method providing a |
149 | * fallback SPROM if no SPROM is found. | ||
149 | * | 150 | * |
150 | * @sprom: The SPROM data structure to register. | 151 | * @sprom_callback: The callback function. |
151 | * | 152 | * |
152 | * With this function the architecture implementation may register a fallback | 153 | * With this function the architecture implementation may register a |
153 | * SPROM data structure. The fallback is only used for PCI based SSB devices, | 154 | * callback handler which fills the SPROM data structure. The fallback is |
154 | * where no valid SPROM can be found in the shadow registers. | 155 | * only used for PCI based SSB devices, where no valid SPROM can be found |
156 | * in the shadow registers. | ||
155 | * | 157 | * |
156 | * This function is useful for weird architectures that have a half-assed SSB device | 158 | * This function is useful for weird architectures that have a half-assed |
157 | * hardwired to their PCI bus. | 159 | * SSB device hardwired to their PCI bus. |
158 | * | 160 | * |
159 | * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently | 161 | * Note that it does only work with PCI attached SSB devices. PCMCIA |
160 | * don't use this fallback. | 162 | * devices currently don't use this fallback. |
161 | * Architectures must provide the SPROM for native SSB devices anyway, | 163 | * Architectures must provide the SPROM for native SSB devices anyway, so |
162 | * so the fallback also isn't used for native devices. | 164 | * the fallback also isn't used for native devices. |
163 | * | 165 | * |
164 | * This function is available for architecture code, only. So it is not exported. | 166 | * This function is available for architecture code, only. So it is not |
167 | * exported. | ||
165 | */ | 168 | */ |
166 | int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) | 169 | int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus, |
170 | struct ssb_sprom *out)) | ||
167 | { | 171 | { |
168 | if (fallback_sprom) | 172 | if (get_fallback_sprom) |
169 | return -EEXIST; | 173 | return -EEXIST; |
170 | fallback_sprom = sprom; | 174 | get_fallback_sprom = sprom_callback; |
171 | 175 | ||
172 | return 0; | 176 | return 0; |
173 | } | 177 | } |
174 | 178 | ||
175 | const struct ssb_sprom *ssb_get_fallback_sprom(void) | 179 | int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) |
176 | { | 180 | { |
177 | return fallback_sprom; | 181 | if (!get_fallback_sprom) |
182 | return -ENOENT; | ||
183 | |||
184 | return get_fallback_sprom(bus, out); | ||
178 | } | 185 | } |
179 | 186 | ||
180 | /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ | 187 | /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ |
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 0331139a726f..77653014db0b 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h | |||
@@ -171,7 +171,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, | |||
171 | const char *buf, size_t count, | 171 | const char *buf, size_t count, |
172 | int (*sprom_check_crc)(const u16 *sprom, size_t size), | 172 | int (*sprom_check_crc)(const u16 *sprom, size_t size), |
173 | int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); | 173 | int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); |
174 | extern const struct ssb_sprom *ssb_get_fallback_sprom(void); | 174 | extern int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, |
175 | struct ssb_sprom *out); | ||
175 | 176 | ||
176 | 177 | ||
177 | /* core.c */ | 178 | /* core.c */ |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 80484af781e1..b1f0f83b870d 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
@@ -1391,6 +1391,14 @@ config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE | |||
1391 | help | 1391 | help |
1392 | Support for Console on the NWP serial ports. | 1392 | Support for Console on the NWP serial ports. |
1393 | 1393 | ||
1394 | config SERIAL_LANTIQ | ||
1395 | bool "Lantiq serial driver" | ||
1396 | depends on LANTIQ | ||
1397 | select SERIAL_CORE | ||
1398 | select SERIAL_CORE_CONSOLE | ||
1399 | help | ||
1400 | Support for console and UART on Lantiq SoCs. | ||
1401 | |||
1394 | config SERIAL_QE | 1402 | config SERIAL_QE |
1395 | tristate "Freescale QUICC Engine serial port support" | 1403 | tristate "Freescale QUICC Engine serial port support" |
1396 | depends on QUICC_ENGINE | 1404 | depends on QUICC_ENGINE |
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index fee0690ef8e3..35276043d9d1 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile | |||
@@ -94,3 +94,4 @@ obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o | |||
94 | obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o | 94 | obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o |
95 | obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o | 95 | obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o |
96 | obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o | 96 | obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o |
97 | obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o | ||
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c new file mode 100644 index 000000000000..58cf279ed879 --- /dev/null +++ b/drivers/tty/serial/lantiq.c | |||
@@ -0,0 +1,756 @@ | |||
1 | /* | ||
2 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published | ||
6 | * by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | * | ||
17 | * Copyright (C) 2004 Infineon IFAP DC COM CPE | ||
18 | * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> | ||
19 | * Copyright (C) 2007 John Crispin <blogic@openwrt.org> | ||
20 | * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com> | ||
21 | */ | ||
22 | |||
23 | #include <linux/slab.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/console.h> | ||
28 | #include <linux/sysrq.h> | ||
29 | #include <linux/device.h> | ||
30 | #include <linux/tty.h> | ||
31 | #include <linux/tty_flip.h> | ||
32 | #include <linux/serial_core.h> | ||
33 | #include <linux/serial.h> | ||
34 | #include <linux/platform_device.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/clk.h> | ||
37 | |||
38 | #include <lantiq_soc.h> | ||
39 | |||
40 | #define PORT_LTQ_ASC 111 | ||
41 | #define MAXPORTS 2 | ||
42 | #define UART_DUMMY_UER_RX 1 | ||
43 | #define DRVNAME "ltq_asc" | ||
44 | #ifdef __BIG_ENDIAN | ||
45 | #define LTQ_ASC_TBUF (0x0020 + 3) | ||
46 | #define LTQ_ASC_RBUF (0x0024 + 3) | ||
47 | #else | ||
48 | #define LTQ_ASC_TBUF 0x0020 | ||
49 | #define LTQ_ASC_RBUF 0x0024 | ||
50 | #endif | ||
51 | #define LTQ_ASC_FSTAT 0x0048 | ||
52 | #define LTQ_ASC_WHBSTATE 0x0018 | ||
53 | #define LTQ_ASC_STATE 0x0014 | ||
54 | #define LTQ_ASC_IRNCR 0x00F8 | ||
55 | #define LTQ_ASC_CLC 0x0000 | ||
56 | #define LTQ_ASC_ID 0x0008 | ||
57 | #define LTQ_ASC_PISEL 0x0004 | ||
58 | #define LTQ_ASC_TXFCON 0x0044 | ||
59 | #define LTQ_ASC_RXFCON 0x0040 | ||
60 | #define LTQ_ASC_CON 0x0010 | ||
61 | #define LTQ_ASC_BG 0x0050 | ||
62 | #define LTQ_ASC_IRNREN 0x00F4 | ||
63 | |||
64 | #define ASC_IRNREN_TX 0x1 | ||
65 | #define ASC_IRNREN_RX 0x2 | ||
66 | #define ASC_IRNREN_ERR 0x4 | ||
67 | #define ASC_IRNREN_TX_BUF 0x8 | ||
68 | #define ASC_IRNCR_TIR 0x1 | ||
69 | #define ASC_IRNCR_RIR 0x2 | ||
70 | #define ASC_IRNCR_EIR 0x4 | ||
71 | |||
72 | #define ASCOPT_CSIZE 0x3 | ||
73 | #define TXFIFO_FL 1 | ||
74 | #define RXFIFO_FL 1 | ||
75 | #define ASCCLC_DISS 0x2 | ||
76 | #define ASCCLC_RMCMASK 0x0000FF00 | ||
77 | #define ASCCLC_RMCOFFSET 8 | ||
78 | #define ASCCON_M_8ASYNC 0x0 | ||
79 | #define ASCCON_M_7ASYNC 0x2 | ||
80 | #define ASCCON_ODD 0x00000020 | ||
81 | #define ASCCON_STP 0x00000080 | ||
82 | #define ASCCON_BRS 0x00000100 | ||
83 | #define ASCCON_FDE 0x00000200 | ||
84 | #define ASCCON_R 0x00008000 | ||
85 | #define ASCCON_FEN 0x00020000 | ||
86 | #define ASCCON_ROEN 0x00080000 | ||
87 | #define ASCCON_TOEN 0x00100000 | ||
88 | #define ASCSTATE_PE 0x00010000 | ||
89 | #define ASCSTATE_FE 0x00020000 | ||
90 | #define ASCSTATE_ROE 0x00080000 | ||
91 | #define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) | ||
92 | #define ASCWHBSTATE_CLRREN 0x00000001 | ||
93 | #define ASCWHBSTATE_SETREN 0x00000002 | ||
94 | #define ASCWHBSTATE_CLRPE 0x00000004 | ||
95 | #define ASCWHBSTATE_CLRFE 0x00000008 | ||
96 | #define ASCWHBSTATE_CLRROE 0x00000020 | ||
97 | #define ASCTXFCON_TXFEN 0x0001 | ||
98 | #define ASCTXFCON_TXFFLU 0x0002 | ||
99 | #define ASCTXFCON_TXFITLMASK 0x3F00 | ||
100 | #define ASCTXFCON_TXFITLOFF 8 | ||
101 | #define ASCRXFCON_RXFEN 0x0001 | ||
102 | #define ASCRXFCON_RXFFLU 0x0002 | ||
103 | #define ASCRXFCON_RXFITLMASK 0x3F00 | ||
104 | #define ASCRXFCON_RXFITLOFF 8 | ||
105 | #define ASCFSTAT_RXFFLMASK 0x003F | ||
106 | #define ASCFSTAT_TXFFLMASK 0x3F00 | ||
107 | #define ASCFSTAT_TXFREEMASK 0x3F000000 | ||
108 | #define ASCFSTAT_TXFREEOFF 24 | ||
109 | |||
110 | static void lqasc_tx_chars(struct uart_port *port); | ||
111 | static struct ltq_uart_port *lqasc_port[MAXPORTS]; | ||
112 | static struct uart_driver lqasc_reg; | ||
113 | static DEFINE_SPINLOCK(ltq_asc_lock); | ||
114 | |||
115 | struct ltq_uart_port { | ||
116 | struct uart_port port; | ||
117 | struct clk *clk; | ||
118 | unsigned int tx_irq; | ||
119 | unsigned int rx_irq; | ||
120 | unsigned int err_irq; | ||
121 | }; | ||
122 | |||
123 | static inline struct | ||
124 | ltq_uart_port *to_ltq_uart_port(struct uart_port *port) | ||
125 | { | ||
126 | return container_of(port, struct ltq_uart_port, port); | ||
127 | } | ||
128 | |||
129 | static void | ||
130 | lqasc_stop_tx(struct uart_port *port) | ||
131 | { | ||
132 | return; | ||
133 | } | ||
134 | |||
135 | static void | ||
136 | lqasc_start_tx(struct uart_port *port) | ||
137 | { | ||
138 | unsigned long flags; | ||
139 | spin_lock_irqsave(<q_asc_lock, flags); | ||
140 | lqasc_tx_chars(port); | ||
141 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
142 | return; | ||
143 | } | ||
144 | |||
145 | static void | ||
146 | lqasc_stop_rx(struct uart_port *port) | ||
147 | { | ||
148 | ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); | ||
149 | } | ||
150 | |||
151 | static void | ||
152 | lqasc_enable_ms(struct uart_port *port) | ||
153 | { | ||
154 | } | ||
155 | |||
156 | static int | ||
157 | lqasc_rx_chars(struct uart_port *port) | ||
158 | { | ||
159 | struct tty_struct *tty = tty_port_tty_get(&port->state->port); | ||
160 | unsigned int ch = 0, rsr = 0, fifocnt; | ||
161 | |||
162 | if (!tty) { | ||
163 | dev_dbg(port->dev, "%s:tty is busy now", __func__); | ||
164 | return -EBUSY; | ||
165 | } | ||
166 | fifocnt = | ||
167 | ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; | ||
168 | while (fifocnt--) { | ||
169 | u8 flag = TTY_NORMAL; | ||
170 | ch = ltq_r8(port->membase + LTQ_ASC_RBUF); | ||
171 | rsr = (ltq_r32(port->membase + LTQ_ASC_STATE) | ||
172 | & ASCSTATE_ANY) | UART_DUMMY_UER_RX; | ||
173 | tty_flip_buffer_push(tty); | ||
174 | port->icount.rx++; | ||
175 | |||
176 | /* | ||
177 | * Note that the error handling code is | ||
178 | * out of the main execution path | ||
179 | */ | ||
180 | if (rsr & ASCSTATE_ANY) { | ||
181 | if (rsr & ASCSTATE_PE) { | ||
182 | port->icount.parity++; | ||
183 | ltq_w32_mask(0, ASCWHBSTATE_CLRPE, | ||
184 | port->membase + LTQ_ASC_WHBSTATE); | ||
185 | } else if (rsr & ASCSTATE_FE) { | ||
186 | port->icount.frame++; | ||
187 | ltq_w32_mask(0, ASCWHBSTATE_CLRFE, | ||
188 | port->membase + LTQ_ASC_WHBSTATE); | ||
189 | } | ||
190 | if (rsr & ASCSTATE_ROE) { | ||
191 | port->icount.overrun++; | ||
192 | ltq_w32_mask(0, ASCWHBSTATE_CLRROE, | ||
193 | port->membase + LTQ_ASC_WHBSTATE); | ||
194 | } | ||
195 | |||
196 | rsr &= port->read_status_mask; | ||
197 | |||
198 | if (rsr & ASCSTATE_PE) | ||
199 | flag = TTY_PARITY; | ||
200 | else if (rsr & ASCSTATE_FE) | ||
201 | flag = TTY_FRAME; | ||
202 | } | ||
203 | |||
204 | if ((rsr & port->ignore_status_mask) == 0) | ||
205 | tty_insert_flip_char(tty, ch, flag); | ||
206 | |||
207 | if (rsr & ASCSTATE_ROE) | ||
208 | /* | ||
209 | * Overrun is special, since it's reported | ||
210 | * immediately, and doesn't affect the current | ||
211 | * character | ||
212 | */ | ||
213 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | ||
214 | } | ||
215 | if (ch != 0) | ||
216 | tty_flip_buffer_push(tty); | ||
217 | tty_kref_put(tty); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static void | ||
222 | lqasc_tx_chars(struct uart_port *port) | ||
223 | { | ||
224 | struct circ_buf *xmit = &port->state->xmit; | ||
225 | if (uart_tx_stopped(port)) { | ||
226 | lqasc_stop_tx(port); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) & | ||
231 | ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { | ||
232 | if (port->x_char) { | ||
233 | ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF); | ||
234 | port->icount.tx++; | ||
235 | port->x_char = 0; | ||
236 | continue; | ||
237 | } | ||
238 | |||
239 | if (uart_circ_empty(xmit)) | ||
240 | break; | ||
241 | |||
242 | ltq_w8(port->state->xmit.buf[port->state->xmit.tail], | ||
243 | port->membase + LTQ_ASC_TBUF); | ||
244 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | ||
245 | port->icount.tx++; | ||
246 | } | ||
247 | |||
248 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
249 | uart_write_wakeup(port); | ||
250 | } | ||
251 | |||
252 | static irqreturn_t | ||
253 | lqasc_tx_int(int irq, void *_port) | ||
254 | { | ||
255 | unsigned long flags; | ||
256 | struct uart_port *port = (struct uart_port *)_port; | ||
257 | spin_lock_irqsave(<q_asc_lock, flags); | ||
258 | ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); | ||
259 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
260 | lqasc_start_tx(port); | ||
261 | return IRQ_HANDLED; | ||
262 | } | ||
263 | |||
264 | static irqreturn_t | ||
265 | lqasc_err_int(int irq, void *_port) | ||
266 | { | ||
267 | unsigned long flags; | ||
268 | struct uart_port *port = (struct uart_port *)_port; | ||
269 | spin_lock_irqsave(<q_asc_lock, flags); | ||
270 | /* clear any pending interrupts */ | ||
271 | ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | | ||
272 | ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); | ||
273 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
274 | return IRQ_HANDLED; | ||
275 | } | ||
276 | |||
277 | static irqreturn_t | ||
278 | lqasc_rx_int(int irq, void *_port) | ||
279 | { | ||
280 | unsigned long flags; | ||
281 | struct uart_port *port = (struct uart_port *)_port; | ||
282 | spin_lock_irqsave(<q_asc_lock, flags); | ||
283 | ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); | ||
284 | lqasc_rx_chars(port); | ||
285 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
286 | return IRQ_HANDLED; | ||
287 | } | ||
288 | |||
289 | static unsigned int | ||
290 | lqasc_tx_empty(struct uart_port *port) | ||
291 | { | ||
292 | int status; | ||
293 | status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; | ||
294 | return status ? 0 : TIOCSER_TEMT; | ||
295 | } | ||
296 | |||
297 | static unsigned int | ||
298 | lqasc_get_mctrl(struct uart_port *port) | ||
299 | { | ||
300 | return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; | ||
301 | } | ||
302 | |||
303 | static void | ||
304 | lqasc_set_mctrl(struct uart_port *port, u_int mctrl) | ||
305 | { | ||
306 | } | ||
307 | |||
308 | static void | ||
309 | lqasc_break_ctl(struct uart_port *port, int break_state) | ||
310 | { | ||
311 | } | ||
312 | |||
313 | static int | ||
314 | lqasc_startup(struct uart_port *port) | ||
315 | { | ||
316 | struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); | ||
317 | int retval; | ||
318 | |||
319 | port->uartclk = clk_get_rate(ltq_port->clk); | ||
320 | |||
321 | ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), | ||
322 | port->membase + LTQ_ASC_CLC); | ||
323 | |||
324 | ltq_w32(0, port->membase + LTQ_ASC_PISEL); | ||
325 | ltq_w32( | ||
326 | ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | | ||
327 | ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, | ||
328 | port->membase + LTQ_ASC_TXFCON); | ||
329 | ltq_w32( | ||
330 | ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) | ||
331 | | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, | ||
332 | port->membase + LTQ_ASC_RXFCON); | ||
333 | /* make sure other settings are written to hardware before | ||
334 | * setting enable bits | ||
335 | */ | ||
336 | wmb(); | ||
337 | ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | | ||
338 | ASCCON_ROEN, port->membase + LTQ_ASC_CON); | ||
339 | |||
340 | retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, | ||
341 | IRQF_DISABLED, "asc_tx", port); | ||
342 | if (retval) { | ||
343 | pr_err("failed to request lqasc_tx_int\n"); | ||
344 | return retval; | ||
345 | } | ||
346 | |||
347 | retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, | ||
348 | IRQF_DISABLED, "asc_rx", port); | ||
349 | if (retval) { | ||
350 | pr_err("failed to request lqasc_rx_int\n"); | ||
351 | goto err1; | ||
352 | } | ||
353 | |||
354 | retval = request_irq(ltq_port->err_irq, lqasc_err_int, | ||
355 | IRQF_DISABLED, "asc_err", port); | ||
356 | if (retval) { | ||
357 | pr_err("failed to request lqasc_err_int\n"); | ||
358 | goto err2; | ||
359 | } | ||
360 | |||
361 | ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, | ||
362 | port->membase + LTQ_ASC_IRNREN); | ||
363 | return 0; | ||
364 | |||
365 | err2: | ||
366 | free_irq(ltq_port->rx_irq, port); | ||
367 | err1: | ||
368 | free_irq(ltq_port->tx_irq, port); | ||
369 | return retval; | ||
370 | } | ||
371 | |||
372 | static void | ||
373 | lqasc_shutdown(struct uart_port *port) | ||
374 | { | ||
375 | struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); | ||
376 | free_irq(ltq_port->tx_irq, port); | ||
377 | free_irq(ltq_port->rx_irq, port); | ||
378 | free_irq(ltq_port->err_irq, port); | ||
379 | |||
380 | ltq_w32(0, port->membase + LTQ_ASC_CON); | ||
381 | ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, | ||
382 | port->membase + LTQ_ASC_RXFCON); | ||
383 | ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, | ||
384 | port->membase + LTQ_ASC_TXFCON); | ||
385 | } | ||
386 | |||
387 | static void | ||
388 | lqasc_set_termios(struct uart_port *port, | ||
389 | struct ktermios *new, struct ktermios *old) | ||
390 | { | ||
391 | unsigned int cflag; | ||
392 | unsigned int iflag; | ||
393 | unsigned int divisor; | ||
394 | unsigned int baud; | ||
395 | unsigned int con = 0; | ||
396 | unsigned long flags; | ||
397 | |||
398 | cflag = new->c_cflag; | ||
399 | iflag = new->c_iflag; | ||
400 | |||
401 | switch (cflag & CSIZE) { | ||
402 | case CS7: | ||
403 | con = ASCCON_M_7ASYNC; | ||
404 | break; | ||
405 | |||
406 | case CS5: | ||
407 | case CS6: | ||
408 | default: | ||
409 | new->c_cflag &= ~ CSIZE; | ||
410 | new->c_cflag |= CS8; | ||
411 | con = ASCCON_M_8ASYNC; | ||
412 | break; | ||
413 | } | ||
414 | |||
415 | cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ | ||
416 | |||
417 | if (cflag & CSTOPB) | ||
418 | con |= ASCCON_STP; | ||
419 | |||
420 | if (cflag & PARENB) { | ||
421 | if (!(cflag & PARODD)) | ||
422 | con &= ~ASCCON_ODD; | ||
423 | else | ||
424 | con |= ASCCON_ODD; | ||
425 | } | ||
426 | |||
427 | port->read_status_mask = ASCSTATE_ROE; | ||
428 | if (iflag & INPCK) | ||
429 | port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; | ||
430 | |||
431 | port->ignore_status_mask = 0; | ||
432 | if (iflag & IGNPAR) | ||
433 | port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; | ||
434 | |||
435 | if (iflag & IGNBRK) { | ||
436 | /* | ||
437 | * If we're ignoring parity and break indicators, | ||
438 | * ignore overruns too (for real raw support). | ||
439 | */ | ||
440 | if (iflag & IGNPAR) | ||
441 | port->ignore_status_mask |= ASCSTATE_ROE; | ||
442 | } | ||
443 | |||
444 | if ((cflag & CREAD) == 0) | ||
445 | port->ignore_status_mask |= UART_DUMMY_UER_RX; | ||
446 | |||
447 | /* set error signals - framing, parity and overrun, enable receiver */ | ||
448 | con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; | ||
449 | |||
450 | spin_lock_irqsave(<q_asc_lock, flags); | ||
451 | |||
452 | /* set up CON */ | ||
453 | ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON); | ||
454 | |||
455 | /* Set baud rate - take a divider of 2 into account */ | ||
456 | baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); | ||
457 | divisor = uart_get_divisor(port, baud); | ||
458 | divisor = divisor / 2 - 1; | ||
459 | |||
460 | /* disable the baudrate generator */ | ||
461 | ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON); | ||
462 | |||
463 | /* make sure the fractional divider is off */ | ||
464 | ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); | ||
465 | |||
466 | /* set up to use divisor of 2 */ | ||
467 | ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); | ||
468 | |||
469 | /* now we can write the new baudrate into the register */ | ||
470 | ltq_w32(divisor, port->membase + LTQ_ASC_BG); | ||
471 | |||
472 | /* turn the baudrate generator back on */ | ||
473 | ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON); | ||
474 | |||
475 | /* enable rx */ | ||
476 | ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); | ||
477 | |||
478 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
479 | |||
480 | /* Don't rewrite B0 */ | ||
481 | if (tty_termios_baud_rate(new)) | ||
482 | tty_termios_encode_baud_rate(new, baud, baud); | ||
483 | } | ||
484 | |||
485 | static const char* | ||
486 | lqasc_type(struct uart_port *port) | ||
487 | { | ||
488 | if (port->type == PORT_LTQ_ASC) | ||
489 | return DRVNAME; | ||
490 | else | ||
491 | return NULL; | ||
492 | } | ||
493 | |||
494 | static void | ||
495 | lqasc_release_port(struct uart_port *port) | ||
496 | { | ||
497 | if (port->flags & UPF_IOREMAP) { | ||
498 | iounmap(port->membase); | ||
499 | port->membase = NULL; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | static int | ||
504 | lqasc_request_port(struct uart_port *port) | ||
505 | { | ||
506 | struct platform_device *pdev = to_platform_device(port->dev); | ||
507 | struct resource *res; | ||
508 | int size; | ||
509 | |||
510 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
511 | if (!res) { | ||
512 | dev_err(&pdev->dev, "cannot obtain I/O memory region"); | ||
513 | return -ENODEV; | ||
514 | } | ||
515 | size = resource_size(res); | ||
516 | |||
517 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
518 | size, dev_name(&pdev->dev)); | ||
519 | if (!res) { | ||
520 | dev_err(&pdev->dev, "cannot request I/O memory region"); | ||
521 | return -EBUSY; | ||
522 | } | ||
523 | |||
524 | if (port->flags & UPF_IOREMAP) { | ||
525 | port->membase = devm_ioremap_nocache(&pdev->dev, | ||
526 | port->mapbase, size); | ||
527 | if (port->membase == NULL) | ||
528 | return -ENOMEM; | ||
529 | } | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static void | ||
534 | lqasc_config_port(struct uart_port *port, int flags) | ||
535 | { | ||
536 | if (flags & UART_CONFIG_TYPE) { | ||
537 | port->type = PORT_LTQ_ASC; | ||
538 | lqasc_request_port(port); | ||
539 | } | ||
540 | } | ||
541 | |||
542 | static int | ||
543 | lqasc_verify_port(struct uart_port *port, | ||
544 | struct serial_struct *ser) | ||
545 | { | ||
546 | int ret = 0; | ||
547 | if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) | ||
548 | ret = -EINVAL; | ||
549 | if (ser->irq < 0 || ser->irq >= NR_IRQS) | ||
550 | ret = -EINVAL; | ||
551 | if (ser->baud_base < 9600) | ||
552 | ret = -EINVAL; | ||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | static struct uart_ops lqasc_pops = { | ||
557 | .tx_empty = lqasc_tx_empty, | ||
558 | .set_mctrl = lqasc_set_mctrl, | ||
559 | .get_mctrl = lqasc_get_mctrl, | ||
560 | .stop_tx = lqasc_stop_tx, | ||
561 | .start_tx = lqasc_start_tx, | ||
562 | .stop_rx = lqasc_stop_rx, | ||
563 | .enable_ms = lqasc_enable_ms, | ||
564 | .break_ctl = lqasc_break_ctl, | ||
565 | .startup = lqasc_startup, | ||
566 | .shutdown = lqasc_shutdown, | ||
567 | .set_termios = lqasc_set_termios, | ||
568 | .type = lqasc_type, | ||
569 | .release_port = lqasc_release_port, | ||
570 | .request_port = lqasc_request_port, | ||
571 | .config_port = lqasc_config_port, | ||
572 | .verify_port = lqasc_verify_port, | ||
573 | }; | ||
574 | |||
575 | static void | ||
576 | lqasc_console_putchar(struct uart_port *port, int ch) | ||
577 | { | ||
578 | int fifofree; | ||
579 | |||
580 | if (!port->membase) | ||
581 | return; | ||
582 | |||
583 | do { | ||
584 | fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT) | ||
585 | & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; | ||
586 | } while (fifofree == 0); | ||
587 | ltq_w8(ch, port->membase + LTQ_ASC_TBUF); | ||
588 | } | ||
589 | |||
590 | |||
591 | static void | ||
592 | lqasc_console_write(struct console *co, const char *s, u_int count) | ||
593 | { | ||
594 | struct ltq_uart_port *ltq_port; | ||
595 | struct uart_port *port; | ||
596 | unsigned long flags; | ||
597 | |||
598 | if (co->index >= MAXPORTS) | ||
599 | return; | ||
600 | |||
601 | ltq_port = lqasc_port[co->index]; | ||
602 | if (!ltq_port) | ||
603 | return; | ||
604 | |||
605 | port = <q_port->port; | ||
606 | |||
607 | spin_lock_irqsave(<q_asc_lock, flags); | ||
608 | uart_console_write(port, s, count, lqasc_console_putchar); | ||
609 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
610 | } | ||
611 | |||
612 | static int __init | ||
613 | lqasc_console_setup(struct console *co, char *options) | ||
614 | { | ||
615 | struct ltq_uart_port *ltq_port; | ||
616 | struct uart_port *port; | ||
617 | int baud = 115200; | ||
618 | int bits = 8; | ||
619 | int parity = 'n'; | ||
620 | int flow = 'n'; | ||
621 | |||
622 | if (co->index >= MAXPORTS) | ||
623 | return -ENODEV; | ||
624 | |||
625 | ltq_port = lqasc_port[co->index]; | ||
626 | if (!ltq_port) | ||
627 | return -ENODEV; | ||
628 | |||
629 | port = <q_port->port; | ||
630 | |||
631 | port->uartclk = clk_get_rate(ltq_port->clk); | ||
632 | |||
633 | if (options) | ||
634 | uart_parse_options(options, &baud, &parity, &bits, &flow); | ||
635 | return uart_set_options(port, co, baud, parity, bits, flow); | ||
636 | } | ||
637 | |||
638 | static struct console lqasc_console = { | ||
639 | .name = "ttyLTQ", | ||
640 | .write = lqasc_console_write, | ||
641 | .device = uart_console_device, | ||
642 | .setup = lqasc_console_setup, | ||
643 | .flags = CON_PRINTBUFFER, | ||
644 | .index = -1, | ||
645 | .data = &lqasc_reg, | ||
646 | }; | ||
647 | |||
648 | static int __init | ||
649 | lqasc_console_init(void) | ||
650 | { | ||
651 | register_console(&lqasc_console); | ||
652 | return 0; | ||
653 | } | ||
654 | console_initcall(lqasc_console_init); | ||
655 | |||
656 | static struct uart_driver lqasc_reg = { | ||
657 | .owner = THIS_MODULE, | ||
658 | .driver_name = DRVNAME, | ||
659 | .dev_name = "ttyLTQ", | ||
660 | .major = 0, | ||
661 | .minor = 0, | ||
662 | .nr = MAXPORTS, | ||
663 | .cons = &lqasc_console, | ||
664 | }; | ||
665 | |||
666 | static int __init | ||
667 | lqasc_probe(struct platform_device *pdev) | ||
668 | { | ||
669 | struct ltq_uart_port *ltq_port; | ||
670 | struct uart_port *port; | ||
671 | struct resource *mmres, *irqres; | ||
672 | int tx_irq, rx_irq, err_irq; | ||
673 | struct clk *clk; | ||
674 | int ret; | ||
675 | |||
676 | mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
677 | irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
678 | if (!mmres || !irqres) | ||
679 | return -ENODEV; | ||
680 | |||
681 | if (pdev->id >= MAXPORTS) | ||
682 | return -EBUSY; | ||
683 | |||
684 | if (lqasc_port[pdev->id] != NULL) | ||
685 | return -EBUSY; | ||
686 | |||
687 | clk = clk_get(&pdev->dev, "fpi"); | ||
688 | if (IS_ERR(clk)) { | ||
689 | pr_err("failed to get fpi clk\n"); | ||
690 | return -ENOENT; | ||
691 | } | ||
692 | |||
693 | tx_irq = platform_get_irq_byname(pdev, "tx"); | ||
694 | rx_irq = platform_get_irq_byname(pdev, "rx"); | ||
695 | err_irq = platform_get_irq_byname(pdev, "err"); | ||
696 | if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0)) | ||
697 | return -ENODEV; | ||
698 | |||
699 | ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL); | ||
700 | if (!ltq_port) | ||
701 | return -ENOMEM; | ||
702 | |||
703 | port = <q_port->port; | ||
704 | |||
705 | port->iotype = SERIAL_IO_MEM; | ||
706 | port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; | ||
707 | port->ops = &lqasc_pops; | ||
708 | port->fifosize = 16; | ||
709 | port->type = PORT_LTQ_ASC, | ||
710 | port->line = pdev->id; | ||
711 | port->dev = &pdev->dev; | ||
712 | |||
713 | port->irq = tx_irq; /* unused, just to be backward-compatibe */ | ||
714 | port->mapbase = mmres->start; | ||
715 | |||
716 | ltq_port->clk = clk; | ||
717 | |||
718 | ltq_port->tx_irq = tx_irq; | ||
719 | ltq_port->rx_irq = rx_irq; | ||
720 | ltq_port->err_irq = err_irq; | ||
721 | |||
722 | lqasc_port[pdev->id] = ltq_port; | ||
723 | platform_set_drvdata(pdev, ltq_port); | ||
724 | |||
725 | ret = uart_add_one_port(&lqasc_reg, port); | ||
726 | |||
727 | return ret; | ||
728 | } | ||
729 | |||
730 | static struct platform_driver lqasc_driver = { | ||
731 | .driver = { | ||
732 | .name = DRVNAME, | ||
733 | .owner = THIS_MODULE, | ||
734 | }, | ||
735 | }; | ||
736 | |||
737 | int __init | ||
738 | init_lqasc(void) | ||
739 | { | ||
740 | int ret; | ||
741 | |||
742 | ret = uart_register_driver(&lqasc_reg); | ||
743 | if (ret != 0) | ||
744 | return ret; | ||
745 | |||
746 | ret = platform_driver_probe(&lqasc_driver, lqasc_probe); | ||
747 | if (ret != 0) | ||
748 | uart_unregister_driver(&lqasc_reg); | ||
749 | |||
750 | return ret; | ||
751 | } | ||
752 | |||
753 | module_init(init_lqasc); | ||
754 | |||
755 | MODULE_DESCRIPTION("Lantiq serial port driver"); | ||
756 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 0e8eec516df4..c911b2419abb 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c | |||
@@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, | |||
80 | /* | 80 | /* |
81 | * Try to register a serial port | 81 | * Try to register a serial port |
82 | */ | 82 | */ |
83 | static struct of_device_id of_platform_serial_table[]; | ||
83 | static int __devinit of_platform_serial_probe(struct platform_device *ofdev) | 84 | static int __devinit of_platform_serial_probe(struct platform_device *ofdev) |
84 | { | 85 | { |
86 | const struct of_device_id *match; | ||
85 | struct of_serial_info *info; | 87 | struct of_serial_info *info; |
86 | struct uart_port port; | 88 | struct uart_port port; |
87 | int port_type; | 89 | int port_type; |
88 | int ret; | 90 | int ret; |
89 | 91 | ||
90 | if (!ofdev->dev.of_match) | 92 | match = of_match_device(of_platform_serial_table, &ofdev->dev); |
93 | if (!match) | ||
91 | return -EINVAL; | 94 | return -EINVAL; |
92 | 95 | ||
93 | if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) | 96 | if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) |
@@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev) | |||
97 | if (info == NULL) | 100 | if (info == NULL) |
98 | return -ENOMEM; | 101 | return -ENOMEM; |
99 | 102 | ||
100 | port_type = (unsigned long)ofdev->dev.of_match->data; | 103 | port_type = (unsigned long)match->data; |
101 | ret = of_platform_serial_setup(ofdev, port_type, &port); | 104 | ret = of_platform_serial_setup(ofdev, port_type, &port); |
102 | if (ret) | 105 | if (ret) |
103 | goto out; | 106 | goto out; |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 51fe1795d5a8..d2efe823c20d 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
@@ -381,7 +381,13 @@ static int uio_get_minor(struct uio_device *idev) | |||
381 | retval = -ENOMEM; | 381 | retval = -ENOMEM; |
382 | goto exit; | 382 | goto exit; |
383 | } | 383 | } |
384 | idev->minor = id & MAX_ID_MASK; | 384 | if (id < UIO_MAX_DEVICES) { |
385 | idev->minor = id; | ||
386 | } else { | ||
387 | dev_err(idev->dev, "too many uio devices\n"); | ||
388 | retval = -EINVAL; | ||
389 | idr_remove(&uio_idr, id); | ||
390 | } | ||
385 | exit: | 391 | exit: |
386 | mutex_unlock(&minor_lock); | 392 | mutex_unlock(&minor_lock); |
387 | return retval; | 393 | return retval; |
@@ -587,14 +593,12 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, | |||
587 | 593 | ||
588 | static int uio_find_mem_index(struct vm_area_struct *vma) | 594 | static int uio_find_mem_index(struct vm_area_struct *vma) |
589 | { | 595 | { |
590 | int mi; | ||
591 | struct uio_device *idev = vma->vm_private_data; | 596 | struct uio_device *idev = vma->vm_private_data; |
592 | 597 | ||
593 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | 598 | if (vma->vm_pgoff < MAX_UIO_MAPS) { |
594 | if (idev->info->mem[mi].size == 0) | 599 | if (idev->info->mem[vma->vm_pgoff].size == 0) |
595 | return -1; | 600 | return -1; |
596 | if (vma->vm_pgoff == mi) | 601 | return (int)vma->vm_pgoff; |
597 | return mi; | ||
598 | } | 602 | } |
599 | return -1; | 603 | return -1; |
600 | } | 604 | } |
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c index 5ffdb483b015..a879fd5741f8 100644 --- a/drivers/uio/uio_netx.c +++ b/drivers/uio/uio_netx.c | |||
@@ -18,6 +18,9 @@ | |||
18 | 18 | ||
19 | #define PCI_VENDOR_ID_HILSCHER 0x15CF | 19 | #define PCI_VENDOR_ID_HILSCHER 0x15CF |
20 | #define PCI_DEVICE_ID_HILSCHER_NETX 0x0000 | 20 | #define PCI_DEVICE_ID_HILSCHER_NETX 0x0000 |
21 | #define PCI_DEVICE_ID_HILSCHER_NETPLC 0x0010 | ||
22 | #define PCI_SUBDEVICE_ID_NETPLC_RAM 0x0000 | ||
23 | #define PCI_SUBDEVICE_ID_NETPLC_FLASH 0x0001 | ||
21 | #define PCI_SUBDEVICE_ID_NXSB_PCA 0x3235 | 24 | #define PCI_SUBDEVICE_ID_NXSB_PCA 0x3235 |
22 | #define PCI_SUBDEVICE_ID_NXPCA 0x3335 | 25 | #define PCI_SUBDEVICE_ID_NXPCA 0x3335 |
23 | 26 | ||
@@ -66,6 +69,10 @@ static int __devinit netx_pci_probe(struct pci_dev *dev, | |||
66 | bar = 0; | 69 | bar = 0; |
67 | info->name = "netx"; | 70 | info->name = "netx"; |
68 | break; | 71 | break; |
72 | case PCI_DEVICE_ID_HILSCHER_NETPLC: | ||
73 | bar = 0; | ||
74 | info->name = "netplc"; | ||
75 | break; | ||
69 | default: | 76 | default: |
70 | bar = 2; | 77 | bar = 2; |
71 | info->name = "netx_plx"; | 78 | info->name = "netx_plx"; |
@@ -134,6 +141,18 @@ static struct pci_device_id netx_pci_ids[] = { | |||
134 | .subdevice = 0, | 141 | .subdevice = 0, |
135 | }, | 142 | }, |
136 | { | 143 | { |
144 | .vendor = PCI_VENDOR_ID_HILSCHER, | ||
145 | .device = PCI_DEVICE_ID_HILSCHER_NETPLC, | ||
146 | .subvendor = PCI_VENDOR_ID_HILSCHER, | ||
147 | .subdevice = PCI_SUBDEVICE_ID_NETPLC_RAM, | ||
148 | }, | ||
149 | { | ||
150 | .vendor = PCI_VENDOR_ID_HILSCHER, | ||
151 | .device = PCI_DEVICE_ID_HILSCHER_NETPLC, | ||
152 | .subvendor = PCI_VENDOR_ID_HILSCHER, | ||
153 | .subdevice = PCI_SUBDEVICE_ID_NETPLC_FLASH, | ||
154 | }, | ||
155 | { | ||
137 | .vendor = PCI_VENDOR_ID_PLX, | 156 | .vendor = PCI_VENDOR_ID_PLX, |
138 | .device = PCI_DEVICE_ID_PLX_9030, | 157 | .device = PCI_DEVICE_ID_PLX_9030, |
139 | .subvendor = PCI_VENDOR_ID_PLX, | 158 | .subvendor = PCI_VENDOR_ID_PLX, |
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index 7174d518b8a6..0f424af7f109 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c | |||
@@ -189,6 +189,10 @@ static int uio_pdrv_genirq_remove(struct platform_device *pdev) | |||
189 | 189 | ||
190 | uio_unregister_device(priv->uioinfo); | 190 | uio_unregister_device(priv->uioinfo); |
191 | pm_runtime_disable(&pdev->dev); | 191 | pm_runtime_disable(&pdev->dev); |
192 | |||
193 | priv->uioinfo->handler = NULL; | ||
194 | priv->uioinfo->irqcontrol = NULL; | ||
195 | |||
192 | kfree(priv); | 196 | kfree(priv); |
193 | return 0; | 197 | return 0; |
194 | } | 198 | } |
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 36613b37c504..3a68e09309f7 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c | |||
@@ -2539,15 +2539,18 @@ static void qe_udc_release(struct device *dev) | |||
2539 | } | 2539 | } |
2540 | 2540 | ||
2541 | /* Driver probe functions */ | 2541 | /* Driver probe functions */ |
2542 | static const struct of_device_id qe_udc_match[]; | ||
2542 | static int __devinit qe_udc_probe(struct platform_device *ofdev) | 2543 | static int __devinit qe_udc_probe(struct platform_device *ofdev) |
2543 | { | 2544 | { |
2545 | const struct of_device_id *match; | ||
2544 | struct device_node *np = ofdev->dev.of_node; | 2546 | struct device_node *np = ofdev->dev.of_node; |
2545 | struct qe_ep *ep; | 2547 | struct qe_ep *ep; |
2546 | unsigned int ret = 0; | 2548 | unsigned int ret = 0; |
2547 | unsigned int i; | 2549 | unsigned int i; |
2548 | const void *prop; | 2550 | const void *prop; |
2549 | 2551 | ||
2550 | if (!ofdev->dev.of_match) | 2552 | match = of_match_device(qe_udc_match, &ofdev->dev); |
2553 | if (!match) | ||
2551 | return -EINVAL; | 2554 | return -EINVAL; |
2552 | 2555 | ||
2553 | prop = of_get_property(np, "mode", NULL); | 2556 | prop = of_get_property(np, "mode", NULL); |
@@ -2561,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev) | |||
2561 | return -ENOMEM; | 2564 | return -ENOMEM; |
2562 | } | 2565 | } |
2563 | 2566 | ||
2564 | udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; | 2567 | udc_controller->soc_type = (unsigned long)match->data; |
2565 | udc_controller->usb_regs = of_iomap(np, 0); | 2568 | udc_controller->usb_regs = of_iomap(np, 0); |
2566 | if (!udc_controller->usb_regs) { | 2569 | if (!udc_controller->usb_regs) { |
2567 | ret = -ENOMEM; | 2570 | ret = -ENOMEM; |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 2ab291241635..7aa4eea930f1 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Author: Michael S. Tsirkin <mst@redhat.com> | 4 | * Author: Michael S. Tsirkin <mst@redhat.com> |
5 | * | 5 | * |
6 | * Inspiration, some code, and most witty comments come from | 6 | * Inspiration, some code, and most witty comments come from |
7 | * Documentation/lguest/lguest.c, by Rusty Russell | 7 | * Documentation/virtual/lguest/lguest.c, by Rusty Russell |
8 | * | 8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2. | 9 | * This work is licensed under the terms of the GNU GPL, version 2. |
10 | * | 10 | * |
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c index 82acb8dc4aa1..6183a57eb69d 100644 --- a/drivers/video/acornfb.c +++ b/drivers/video/acornfb.c | |||
@@ -66,7 +66,7 @@ | |||
66 | * have. Allow 1% either way on the nominal for TVs. | 66 | * have. Allow 1% either way on the nominal for TVs. |
67 | */ | 67 | */ |
68 | #define NR_MONTYPES 6 | 68 | #define NR_MONTYPES 6 |
69 | static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { | 69 | static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { |
70 | { /* TV */ | 70 | { /* TV */ |
71 | .hfmin = 15469, | 71 | .hfmin = 15469, |
72 | .hfmax = 15781, | 72 | .hfmax = 15781, |
@@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = { | |||
873 | /* | 873 | /* |
874 | * Everything after here is initialisation!!! | 874 | * Everything after here is initialisation!!! |
875 | */ | 875 | */ |
876 | static struct fb_videomode modedb[] __initdata = { | 876 | static struct fb_videomode modedb[] __devinitdata = { |
877 | { /* 320x256 @ 50Hz */ | 877 | { /* 320x256 @ 50Hz */ |
878 | NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, | 878 | NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, |
879 | FB_SYNC_COMP_HIGH_ACT, | 879 | FB_SYNC_COMP_HIGH_ACT, |
@@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = { | |||
925 | } | 925 | } |
926 | }; | 926 | }; |
927 | 927 | ||
928 | static struct fb_videomode __initdata | 928 | static struct fb_videomode acornfb_default_mode __devinitdata = { |
929 | acornfb_default_mode = { | ||
930 | .name = NULL, | 929 | .name = NULL, |
931 | .refresh = 60, | 930 | .refresh = 60, |
932 | .xres = 640, | 931 | .xres = 640, |
@@ -942,7 +941,7 @@ acornfb_default_mode = { | |||
942 | .vmode = FB_VMODE_NONINTERLACED | 941 | .vmode = FB_VMODE_NONINTERLACED |
943 | }; | 942 | }; |
944 | 943 | ||
945 | static void __init acornfb_init_fbinfo(void) | 944 | static void __devinit acornfb_init_fbinfo(void) |
946 | { | 945 | { |
947 | static int first = 1; | 946 | static int first = 1; |
948 | 947 | ||
@@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void) | |||
1018 | * size can optionally be followed by 'M' or 'K' for | 1017 | * size can optionally be followed by 'M' or 'K' for |
1019 | * MB or KB respectively. | 1018 | * MB or KB respectively. |
1020 | */ | 1019 | */ |
1021 | static void __init | 1020 | static void __devinit acornfb_parse_mon(char *opt) |
1022 | acornfb_parse_mon(char *opt) | ||
1023 | { | 1021 | { |
1024 | char *p = opt; | 1022 | char *p = opt; |
1025 | 1023 | ||
@@ -1066,8 +1064,7 @@ bad: | |||
1066 | current_par.montype = -1; | 1064 | current_par.montype = -1; |
1067 | } | 1065 | } |
1068 | 1066 | ||
1069 | static void __init | 1067 | static void __devinit acornfb_parse_montype(char *opt) |
1070 | acornfb_parse_montype(char *opt) | ||
1071 | { | 1068 | { |
1072 | current_par.montype = -2; | 1069 | current_par.montype = -2; |
1073 | 1070 | ||
@@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt) | |||
1108 | } | 1105 | } |
1109 | } | 1106 | } |
1110 | 1107 | ||
1111 | static void __init | 1108 | static void __devinit acornfb_parse_dram(char *opt) |
1112 | acornfb_parse_dram(char *opt) | ||
1113 | { | 1109 | { |
1114 | unsigned int size; | 1110 | unsigned int size; |
1115 | 1111 | ||
@@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt) | |||
1134 | static struct options { | 1130 | static struct options { |
1135 | char *name; | 1131 | char *name; |
1136 | void (*parse)(char *opt); | 1132 | void (*parse)(char *opt); |
1137 | } opt_table[] __initdata = { | 1133 | } opt_table[] __devinitdata = { |
1138 | { "mon", acornfb_parse_mon }, | 1134 | { "mon", acornfb_parse_mon }, |
1139 | { "montype", acornfb_parse_montype }, | 1135 | { "montype", acornfb_parse_montype }, |
1140 | { "dram", acornfb_parse_dram }, | 1136 | { "dram", acornfb_parse_dram }, |
1141 | { NULL, NULL } | 1137 | { NULL, NULL } |
1142 | }; | 1138 | }; |
1143 | 1139 | ||
1144 | int __init | 1140 | static int __devinit acornfb_setup(char *options) |
1145 | acornfb_setup(char *options) | ||
1146 | { | 1141 | { |
1147 | struct options *optp; | 1142 | struct options *optp; |
1148 | char *opt; | 1143 | char *opt; |
@@ -1179,8 +1174,7 @@ acornfb_setup(char *options) | |||
1179 | * Detect type of monitor connected | 1174 | * Detect type of monitor connected |
1180 | * For now, we just assume SVGA | 1175 | * For now, we just assume SVGA |
1181 | */ | 1176 | */ |
1182 | static int __init | 1177 | static int __devinit acornfb_detect_monitortype(void) |
1183 | acornfb_detect_monitortype(void) | ||
1184 | { | 1178 | { |
1185 | return 4; | 1179 | return 4; |
1186 | } | 1180 | } |
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c index 5b2b5ef4edba..64e41f5448c4 100644 --- a/drivers/video/atafb.c +++ b/drivers/video/atafb.c | |||
@@ -3117,7 +3117,7 @@ int __init atafb_init(void) | |||
3117 | atafb_ops.fb_setcolreg = &falcon_setcolreg; | 3117 | atafb_ops.fb_setcolreg = &falcon_setcolreg; |
3118 | error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, | 3118 | error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, |
3119 | IRQ_TYPE_PRIO, | 3119 | IRQ_TYPE_PRIO, |
3120 | "framebuffer/modeswitch", | 3120 | "framebuffer:modeswitch", |
3121 | falcon_vbl_switcher); | 3121 | falcon_vbl_switcher); |
3122 | if (error) | 3122 | if (error) |
3123 | return error; | 3123 | return error; |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index e0c2284924b6..5aac00eb1830 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -42,9 +42,34 @@ | |||
42 | 42 | ||
43 | #define FBPIXMAPSIZE (1024 * 8) | 43 | #define FBPIXMAPSIZE (1024 * 8) |
44 | 44 | ||
45 | static DEFINE_MUTEX(registration_lock); | ||
45 | struct fb_info *registered_fb[FB_MAX] __read_mostly; | 46 | struct fb_info *registered_fb[FB_MAX] __read_mostly; |
46 | int num_registered_fb __read_mostly; | 47 | int num_registered_fb __read_mostly; |
47 | 48 | ||
49 | static struct fb_info *get_fb_info(unsigned int idx) | ||
50 | { | ||
51 | struct fb_info *fb_info; | ||
52 | |||
53 | if (idx >= FB_MAX) | ||
54 | return ERR_PTR(-ENODEV); | ||
55 | |||
56 | mutex_lock(®istration_lock); | ||
57 | fb_info = registered_fb[idx]; | ||
58 | if (fb_info) | ||
59 | atomic_inc(&fb_info->count); | ||
60 | mutex_unlock(®istration_lock); | ||
61 | |||
62 | return fb_info; | ||
63 | } | ||
64 | |||
65 | static void put_fb_info(struct fb_info *fb_info) | ||
66 | { | ||
67 | if (!atomic_dec_and_test(&fb_info->count)) | ||
68 | return; | ||
69 | if (fb_info->fbops->fb_destroy) | ||
70 | fb_info->fbops->fb_destroy(fb_info); | ||
71 | } | ||
72 | |||
48 | int lock_fb_info(struct fb_info *info) | 73 | int lock_fb_info(struct fb_info *info) |
49 | { | 74 | { |
50 | mutex_lock(&info->lock); | 75 | mutex_lock(&info->lock); |
@@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; } | |||
647 | 672 | ||
648 | static void *fb_seq_start(struct seq_file *m, loff_t *pos) | 673 | static void *fb_seq_start(struct seq_file *m, loff_t *pos) |
649 | { | 674 | { |
675 | mutex_lock(®istration_lock); | ||
650 | return (*pos < FB_MAX) ? pos : NULL; | 676 | return (*pos < FB_MAX) ? pos : NULL; |
651 | } | 677 | } |
652 | 678 | ||
@@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos) | |||
658 | 684 | ||
659 | static void fb_seq_stop(struct seq_file *m, void *v) | 685 | static void fb_seq_stop(struct seq_file *m, void *v) |
660 | { | 686 | { |
687 | mutex_unlock(®istration_lock); | ||
661 | } | 688 | } |
662 | 689 | ||
663 | static int fb_seq_show(struct seq_file *m, void *v) | 690 | static int fb_seq_show(struct seq_file *m, void *v) |
@@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = { | |||
690 | .release = seq_release, | 717 | .release = seq_release, |
691 | }; | 718 | }; |
692 | 719 | ||
693 | static ssize_t | 720 | /* |
694 | fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 721 | * We hold a reference to the fb_info in file->private_data, |
722 | * but if the current registered fb has changed, we don't | ||
723 | * actually want to use it. | ||
724 | * | ||
725 | * So look up the fb_info using the inode minor number, | ||
726 | * and just verify it against the reference we have. | ||
727 | */ | ||
728 | static struct fb_info *file_fb_info(struct file *file) | ||
695 | { | 729 | { |
696 | unsigned long p = *ppos; | ||
697 | struct inode *inode = file->f_path.dentry->d_inode; | 730 | struct inode *inode = file->f_path.dentry->d_inode; |
698 | int fbidx = iminor(inode); | 731 | int fbidx = iminor(inode); |
699 | struct fb_info *info = registered_fb[fbidx]; | 732 | struct fb_info *info = registered_fb[fbidx]; |
733 | |||
734 | if (info != file->private_data) | ||
735 | info = NULL; | ||
736 | return info; | ||
737 | } | ||
738 | |||
739 | static ssize_t | ||
740 | fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
741 | { | ||
742 | unsigned long p = *ppos; | ||
743 | struct fb_info *info = file_fb_info(file); | ||
700 | u8 *buffer, *dst; | 744 | u8 *buffer, *dst; |
701 | u8 __iomem *src; | 745 | u8 __iomem *src; |
702 | int c, cnt = 0, err = 0; | 746 | int c, cnt = 0, err = 0; |
@@ -761,9 +805,7 @@ static ssize_t | |||
761 | fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | 805 | fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
762 | { | 806 | { |
763 | unsigned long p = *ppos; | 807 | unsigned long p = *ppos; |
764 | struct inode *inode = file->f_path.dentry->d_inode; | 808 | struct fb_info *info = file_fb_info(file); |
765 | int fbidx = iminor(inode); | ||
766 | struct fb_info *info = registered_fb[fbidx]; | ||
767 | u8 *buffer, *src; | 809 | u8 *buffer, *src; |
768 | u8 __iomem *dst; | 810 | u8 __iomem *dst; |
769 | int c, cnt = 0, err = 0; | 811 | int c, cnt = 0, err = 0; |
@@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, | |||
1141 | 1183 | ||
1142 | static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 1184 | static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
1143 | { | 1185 | { |
1144 | struct inode *inode = file->f_path.dentry->d_inode; | 1186 | struct fb_info *info = file_fb_info(file); |
1145 | int fbidx = iminor(inode); | ||
1146 | struct fb_info *info = registered_fb[fbidx]; | ||
1147 | 1187 | ||
1188 | if (!info) | ||
1189 | return -ENODEV; | ||
1148 | return do_fb_ioctl(info, cmd, arg); | 1190 | return do_fb_ioctl(info, cmd, arg); |
1149 | } | 1191 | } |
1150 | 1192 | ||
@@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, | |||
1265 | static long fb_compat_ioctl(struct file *file, unsigned int cmd, | 1307 | static long fb_compat_ioctl(struct file *file, unsigned int cmd, |
1266 | unsigned long arg) | 1308 | unsigned long arg) |
1267 | { | 1309 | { |
1268 | struct inode *inode = file->f_path.dentry->d_inode; | 1310 | struct fb_info *info = file_fb_info(file); |
1269 | int fbidx = iminor(inode); | 1311 | struct fb_ops *fb; |
1270 | struct fb_info *info = registered_fb[fbidx]; | ||
1271 | struct fb_ops *fb = info->fbops; | ||
1272 | long ret = -ENOIOCTLCMD; | 1312 | long ret = -ENOIOCTLCMD; |
1273 | 1313 | ||
1314 | if (!info) | ||
1315 | return -ENODEV; | ||
1316 | fb = info->fbops; | ||
1274 | switch(cmd) { | 1317 | switch(cmd) { |
1275 | case FBIOGET_VSCREENINFO: | 1318 | case FBIOGET_VSCREENINFO: |
1276 | case FBIOPUT_VSCREENINFO: | 1319 | case FBIOPUT_VSCREENINFO: |
@@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, | |||
1303 | static int | 1346 | static int |
1304 | fb_mmap(struct file *file, struct vm_area_struct * vma) | 1347 | fb_mmap(struct file *file, struct vm_area_struct * vma) |
1305 | { | 1348 | { |
1306 | int fbidx = iminor(file->f_path.dentry->d_inode); | 1349 | struct fb_info *info = file_fb_info(file); |
1307 | struct fb_info *info = registered_fb[fbidx]; | 1350 | struct fb_ops *fb; |
1308 | struct fb_ops *fb = info->fbops; | ||
1309 | unsigned long off; | 1351 | unsigned long off; |
1310 | unsigned long start; | 1352 | unsigned long start; |
1311 | u32 len; | 1353 | u32 len; |
1312 | 1354 | ||
1355 | if (!info) | ||
1356 | return -ENODEV; | ||
1313 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) | 1357 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) |
1314 | return -EINVAL; | 1358 | return -EINVAL; |
1315 | off = vma->vm_pgoff << PAGE_SHIFT; | 1359 | off = vma->vm_pgoff << PAGE_SHIFT; |
1360 | fb = info->fbops; | ||
1316 | if (!fb) | 1361 | if (!fb) |
1317 | return -ENODEV; | 1362 | return -ENODEV; |
1318 | mutex_lock(&info->mm_lock); | 1363 | mutex_lock(&info->mm_lock); |
@@ -1361,14 +1406,16 @@ __releases(&info->lock) | |||
1361 | struct fb_info *info; | 1406 | struct fb_info *info; |
1362 | int res = 0; | 1407 | int res = 0; |
1363 | 1408 | ||
1364 | if (fbidx >= FB_MAX) | 1409 | info = get_fb_info(fbidx); |
1365 | return -ENODEV; | 1410 | if (!info) { |
1366 | info = registered_fb[fbidx]; | ||
1367 | if (!info) | ||
1368 | request_module("fb%d", fbidx); | 1411 | request_module("fb%d", fbidx); |
1369 | info = registered_fb[fbidx]; | 1412 | info = get_fb_info(fbidx); |
1370 | if (!info) | 1413 | if (!info) |
1371 | return -ENODEV; | 1414 | return -ENODEV; |
1415 | } | ||
1416 | if (IS_ERR(info)) | ||
1417 | return PTR_ERR(info); | ||
1418 | |||
1372 | mutex_lock(&info->lock); | 1419 | mutex_lock(&info->lock); |
1373 | if (!try_module_get(info->fbops->owner)) { | 1420 | if (!try_module_get(info->fbops->owner)) { |
1374 | res = -ENODEV; | 1421 | res = -ENODEV; |
@@ -1386,6 +1433,8 @@ __releases(&info->lock) | |||
1386 | #endif | 1433 | #endif |
1387 | out: | 1434 | out: |
1388 | mutex_unlock(&info->lock); | 1435 | mutex_unlock(&info->lock); |
1436 | if (res) | ||
1437 | put_fb_info(info); | ||
1389 | return res; | 1438 | return res; |
1390 | } | 1439 | } |
1391 | 1440 | ||
@@ -1401,6 +1450,7 @@ __releases(&info->lock) | |||
1401 | info->fbops->fb_release(info,1); | 1450 | info->fbops->fb_release(info,1); |
1402 | module_put(info->fbops->owner); | 1451 | module_put(info->fbops->owner); |
1403 | mutex_unlock(&info->lock); | 1452 | mutex_unlock(&info->lock); |
1453 | put_fb_info(info); | ||
1404 | return 0; | 1454 | return 0; |
1405 | } | 1455 | } |
1406 | 1456 | ||
@@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena, | |||
1487 | return false; | 1537 | return false; |
1488 | } | 1538 | } |
1489 | 1539 | ||
1540 | static int do_unregister_framebuffer(struct fb_info *fb_info); | ||
1541 | |||
1490 | #define VGA_FB_PHYS 0xA0000 | 1542 | #define VGA_FB_PHYS 0xA0000 |
1491 | void remove_conflicting_framebuffers(struct apertures_struct *a, | 1543 | static void do_remove_conflicting_framebuffers(struct apertures_struct *a, |
1492 | const char *name, bool primary) | 1544 | const char *name, bool primary) |
1493 | { | 1545 | { |
1494 | int i; | 1546 | int i; |
@@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a, | |||
1510 | printk(KERN_INFO "fb: conflicting fb hw usage " | 1562 | printk(KERN_INFO "fb: conflicting fb hw usage " |
1511 | "%s vs %s - removing generic driver\n", | 1563 | "%s vs %s - removing generic driver\n", |
1512 | name, registered_fb[i]->fix.id); | 1564 | name, registered_fb[i]->fix.id); |
1513 | unregister_framebuffer(registered_fb[i]); | 1565 | do_unregister_framebuffer(registered_fb[i]); |
1514 | } | 1566 | } |
1515 | } | 1567 | } |
1516 | } | 1568 | } |
1517 | EXPORT_SYMBOL(remove_conflicting_framebuffers); | ||
1518 | 1569 | ||
1519 | /** | 1570 | static int do_register_framebuffer(struct fb_info *fb_info) |
1520 | * register_framebuffer - registers a frame buffer device | ||
1521 | * @fb_info: frame buffer info structure | ||
1522 | * | ||
1523 | * Registers a frame buffer device @fb_info. | ||
1524 | * | ||
1525 | * Returns negative errno on error, or zero for success. | ||
1526 | * | ||
1527 | */ | ||
1528 | |||
1529 | int | ||
1530 | register_framebuffer(struct fb_info *fb_info) | ||
1531 | { | 1571 | { |
1532 | int i; | 1572 | int i; |
1533 | struct fb_event event; | 1573 | struct fb_event event; |
1534 | struct fb_videomode mode; | 1574 | struct fb_videomode mode; |
1535 | 1575 | ||
1536 | if (num_registered_fb == FB_MAX) | ||
1537 | return -ENXIO; | ||
1538 | |||
1539 | if (fb_check_foreignness(fb_info)) | 1576 | if (fb_check_foreignness(fb_info)) |
1540 | return -ENOSYS; | 1577 | return -ENOSYS; |
1541 | 1578 | ||
1542 | remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, | 1579 | do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, |
1543 | fb_is_primary_device(fb_info)); | 1580 | fb_is_primary_device(fb_info)); |
1544 | 1581 | ||
1582 | if (num_registered_fb == FB_MAX) | ||
1583 | return -ENXIO; | ||
1584 | |||
1545 | num_registered_fb++; | 1585 | num_registered_fb++; |
1546 | for (i = 0 ; i < FB_MAX; i++) | 1586 | for (i = 0 ; i < FB_MAX; i++) |
1547 | if (!registered_fb[i]) | 1587 | if (!registered_fb[i]) |
1548 | break; | 1588 | break; |
1549 | fb_info->node = i; | 1589 | fb_info->node = i; |
1590 | atomic_set(&fb_info->count, 1); | ||
1550 | mutex_init(&fb_info->lock); | 1591 | mutex_init(&fb_info->lock); |
1551 | mutex_init(&fb_info->mm_lock); | 1592 | mutex_init(&fb_info->mm_lock); |
1552 | 1593 | ||
@@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info) | |||
1592 | return 0; | 1633 | return 0; |
1593 | } | 1634 | } |
1594 | 1635 | ||
1595 | 1636 | static int do_unregister_framebuffer(struct fb_info *fb_info) | |
1596 | /** | ||
1597 | * unregister_framebuffer - releases a frame buffer device | ||
1598 | * @fb_info: frame buffer info structure | ||
1599 | * | ||
1600 | * Unregisters a frame buffer device @fb_info. | ||
1601 | * | ||
1602 | * Returns negative errno on error, or zero for success. | ||
1603 | * | ||
1604 | * This function will also notify the framebuffer console | ||
1605 | * to release the driver. | ||
1606 | * | ||
1607 | * This is meant to be called within a driver's module_exit() | ||
1608 | * function. If this is called outside module_exit(), ensure | ||
1609 | * that the driver implements fb_open() and fb_release() to | ||
1610 | * check that no processes are using the device. | ||
1611 | */ | ||
1612 | |||
1613 | int | ||
1614 | unregister_framebuffer(struct fb_info *fb_info) | ||
1615 | { | 1637 | { |
1616 | struct fb_event event; | 1638 | struct fb_event event; |
1617 | int i, ret = 0; | 1639 | int i, ret = 0; |
1618 | 1640 | ||
1619 | i = fb_info->node; | 1641 | i = fb_info->node; |
1620 | if (!registered_fb[i]) { | 1642 | if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) |
1621 | ret = -EINVAL; | 1643 | return -EINVAL; |
1622 | goto done; | ||
1623 | } | ||
1624 | |||
1625 | 1644 | ||
1626 | if (!lock_fb_info(fb_info)) | 1645 | if (!lock_fb_info(fb_info)) |
1627 | return -ENODEV; | 1646 | return -ENODEV; |
@@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info) | |||
1629 | ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); | 1648 | ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); |
1630 | unlock_fb_info(fb_info); | 1649 | unlock_fb_info(fb_info); |
1631 | 1650 | ||
1632 | if (ret) { | 1651 | if (ret) |
1633 | ret = -EINVAL; | 1652 | return -EINVAL; |
1634 | goto done; | ||
1635 | } | ||
1636 | 1653 | ||
1637 | if (fb_info->pixmap.addr && | 1654 | if (fb_info->pixmap.addr && |
1638 | (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) | 1655 | (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) |
1639 | kfree(fb_info->pixmap.addr); | 1656 | kfree(fb_info->pixmap.addr); |
1640 | fb_destroy_modelist(&fb_info->modelist); | 1657 | fb_destroy_modelist(&fb_info->modelist); |
1641 | registered_fb[i]=NULL; | 1658 | registered_fb[i] = NULL; |
1642 | num_registered_fb--; | 1659 | num_registered_fb--; |
1643 | fb_cleanup_device(fb_info); | 1660 | fb_cleanup_device(fb_info); |
1644 | device_destroy(fb_class, MKDEV(FB_MAJOR, i)); | 1661 | device_destroy(fb_class, MKDEV(FB_MAJOR, i)); |
@@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info) | |||
1646 | fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); | 1663 | fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); |
1647 | 1664 | ||
1648 | /* this may free fb info */ | 1665 | /* this may free fb info */ |
1649 | if (fb_info->fbops->fb_destroy) | 1666 | put_fb_info(fb_info); |
1650 | fb_info->fbops->fb_destroy(fb_info); | 1667 | return 0; |
1651 | done: | 1668 | } |
1669 | |||
1670 | void remove_conflicting_framebuffers(struct apertures_struct *a, | ||
1671 | const char *name, bool primary) | ||
1672 | { | ||
1673 | mutex_lock(®istration_lock); | ||
1674 | do_remove_conflicting_framebuffers(a, name, primary); | ||
1675 | mutex_unlock(®istration_lock); | ||
1676 | } | ||
1677 | EXPORT_SYMBOL(remove_conflicting_framebuffers); | ||
1678 | |||
1679 | /** | ||
1680 | * register_framebuffer - registers a frame buffer device | ||
1681 | * @fb_info: frame buffer info structure | ||
1682 | * | ||
1683 | * Registers a frame buffer device @fb_info. | ||
1684 | * | ||
1685 | * Returns negative errno on error, or zero for success. | ||
1686 | * | ||
1687 | */ | ||
1688 | int | ||
1689 | register_framebuffer(struct fb_info *fb_info) | ||
1690 | { | ||
1691 | int ret; | ||
1692 | |||
1693 | mutex_lock(®istration_lock); | ||
1694 | ret = do_register_framebuffer(fb_info); | ||
1695 | mutex_unlock(®istration_lock); | ||
1696 | |||
1697 | return ret; | ||
1698 | } | ||
1699 | |||
1700 | /** | ||
1701 | * unregister_framebuffer - releases a frame buffer device | ||
1702 | * @fb_info: frame buffer info structure | ||
1703 | * | ||
1704 | * Unregisters a frame buffer device @fb_info. | ||
1705 | * | ||
1706 | * Returns negative errno on error, or zero for success. | ||
1707 | * | ||
1708 | * This function will also notify the framebuffer console | ||
1709 | * to release the driver. | ||
1710 | * | ||
1711 | * This is meant to be called within a driver's module_exit() | ||
1712 | * function. If this is called outside module_exit(), ensure | ||
1713 | * that the driver implements fb_open() and fb_release() to | ||
1714 | * check that no processes are using the device. | ||
1715 | */ | ||
1716 | int | ||
1717 | unregister_framebuffer(struct fb_info *fb_info) | ||
1718 | { | ||
1719 | int ret; | ||
1720 | |||
1721 | mutex_lock(®istration_lock); | ||
1722 | ret = do_unregister_framebuffer(fb_info); | ||
1723 | mutex_unlock(®istration_lock); | ||
1724 | |||
1652 | return ret; | 1725 | return ret; |
1653 | } | 1726 | } |
1654 | 1727 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1b0f98bc51b5..022f9eb0b7bf 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -990,6 +990,12 @@ config BCM63XX_WDT | |||
990 | To compile this driver as a loadable module, choose M here. | 990 | To compile this driver as a loadable module, choose M here. |
991 | The module will be called bcm63xx_wdt. | 991 | The module will be called bcm63xx_wdt. |
992 | 992 | ||
993 | config LANTIQ_WDT | ||
994 | tristate "Lantiq SoC watchdog" | ||
995 | depends on LANTIQ | ||
996 | help | ||
997 | Hardware driver for the Lantiq SoC Watchdog Timer. | ||
998 | |||
993 | # PARISC Architecture | 999 | # PARISC Architecture |
994 | 1000 | ||
995 | # POWERPC Architecture | 1001 | # POWERPC Architecture |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 3f8608b922a7..ed26f7094e47 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -123,6 +123,7 @@ obj-$(CONFIG_AR7_WDT) += ar7_wdt.o | |||
123 | obj-$(CONFIG_TXX9_WDT) += txx9wdt.o | 123 | obj-$(CONFIG_TXX9_WDT) += txx9wdt.o |
124 | obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o | 124 | obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o |
125 | octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o | 125 | octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o |
126 | obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o | ||
126 | 127 | ||
127 | # PARISC Architecture | 128 | # PARISC Architecture |
128 | 129 | ||
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c new file mode 100644 index 000000000000..7d82adac1cb2 --- /dev/null +++ b/drivers/watchdog/lantiq_wdt.c | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | * Based on EP93xx wdt driver | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/watchdog.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/clk.h> | ||
17 | #include <linux/io.h> | ||
18 | |||
19 | #include <lantiq.h> | ||
20 | |||
21 | /* Section 3.4 of the datasheet | ||
22 | * The password sequence protects the WDT control register from unintended | ||
23 | * write actions, which might cause malfunction of the WDT. | ||
24 | * | ||
25 | * essentially the following two magic passwords need to be written to allow | ||
26 | * IO access to the WDT core | ||
27 | */ | ||
28 | #define LTQ_WDT_PW1 0x00BE0000 | ||
29 | #define LTQ_WDT_PW2 0x00DC0000 | ||
30 | |||
31 | #define LTQ_WDT_CR 0x0 /* watchdog control register */ | ||
32 | #define LTQ_WDT_SR 0x8 /* watchdog status register */ | ||
33 | |||
34 | #define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */ | ||
35 | #define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */ | ||
36 | #define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */ | ||
37 | /* divider to 0x40000 */ | ||
38 | #define LTQ_WDT_DIVIDER 0x40000 | ||
39 | #define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */ | ||
40 | |||
41 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
42 | |||
43 | static void __iomem *ltq_wdt_membase; | ||
44 | static unsigned long ltq_io_region_clk_rate; | ||
45 | |||
46 | static unsigned long ltq_wdt_bootstatus; | ||
47 | static unsigned long ltq_wdt_in_use; | ||
48 | static int ltq_wdt_timeout = 30; | ||
49 | static int ltq_wdt_ok_to_close; | ||
50 | |||
51 | static void | ||
52 | ltq_wdt_enable(void) | ||
53 | { | ||
54 | ltq_wdt_timeout = ltq_wdt_timeout * | ||
55 | (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; | ||
56 | if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) | ||
57 | ltq_wdt_timeout = LTQ_MAX_TIMEOUT; | ||
58 | |||
59 | /* write the first password magic */ | ||
60 | ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); | ||
61 | /* write the second magic plus the configuration and new timeout */ | ||
62 | ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | | ||
63 | LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); | ||
64 | } | ||
65 | |||
66 | static void | ||
67 | ltq_wdt_disable(void) | ||
68 | { | ||
69 | /* write the first password magic */ | ||
70 | ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); | ||
71 | /* write the second password magic with no config | ||
72 | * this turns the watchdog off | ||
73 | */ | ||
74 | ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR); | ||
75 | } | ||
76 | |||
77 | static ssize_t | ||
78 | ltq_wdt_write(struct file *file, const char __user *data, | ||
79 | size_t len, loff_t *ppos) | ||
80 | { | ||
81 | if (len) { | ||
82 | if (!nowayout) { | ||
83 | size_t i; | ||
84 | |||
85 | ltq_wdt_ok_to_close = 0; | ||
86 | for (i = 0; i != len; i++) { | ||
87 | char c; | ||
88 | |||
89 | if (get_user(c, data + i)) | ||
90 | return -EFAULT; | ||
91 | if (c == 'V') | ||
92 | ltq_wdt_ok_to_close = 1; | ||
93 | else | ||
94 | ltq_wdt_ok_to_close = 0; | ||
95 | } | ||
96 | } | ||
97 | ltq_wdt_enable(); | ||
98 | } | ||
99 | |||
100 | return len; | ||
101 | } | ||
102 | |||
103 | static struct watchdog_info ident = { | ||
104 | .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | | ||
105 | WDIOF_CARDRESET, | ||
106 | .identity = "ltq_wdt", | ||
107 | }; | ||
108 | |||
109 | static long | ||
110 | ltq_wdt_ioctl(struct file *file, | ||
111 | unsigned int cmd, unsigned long arg) | ||
112 | { | ||
113 | int ret = -ENOTTY; | ||
114 | |||
115 | switch (cmd) { | ||
116 | case WDIOC_GETSUPPORT: | ||
117 | ret = copy_to_user((struct watchdog_info __user *)arg, &ident, | ||
118 | sizeof(ident)) ? -EFAULT : 0; | ||
119 | break; | ||
120 | |||
121 | case WDIOC_GETBOOTSTATUS: | ||
122 | ret = put_user(ltq_wdt_bootstatus, (int __user *)arg); | ||
123 | break; | ||
124 | |||
125 | case WDIOC_GETSTATUS: | ||
126 | ret = put_user(0, (int __user *)arg); | ||
127 | break; | ||
128 | |||
129 | case WDIOC_SETTIMEOUT: | ||
130 | ret = get_user(ltq_wdt_timeout, (int __user *)arg); | ||
131 | if (!ret) | ||
132 | ltq_wdt_enable(); | ||
133 | /* intentional drop through */ | ||
134 | case WDIOC_GETTIMEOUT: | ||
135 | ret = put_user(ltq_wdt_timeout, (int __user *)arg); | ||
136 | break; | ||
137 | |||
138 | case WDIOC_KEEPALIVE: | ||
139 | ltq_wdt_enable(); | ||
140 | ret = 0; | ||
141 | break; | ||
142 | } | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static int | ||
147 | ltq_wdt_open(struct inode *inode, struct file *file) | ||
148 | { | ||
149 | if (test_and_set_bit(0, <q_wdt_in_use)) | ||
150 | return -EBUSY; | ||
151 | ltq_wdt_in_use = 1; | ||
152 | ltq_wdt_enable(); | ||
153 | |||
154 | return nonseekable_open(inode, file); | ||
155 | } | ||
156 | |||
157 | static int | ||
158 | ltq_wdt_release(struct inode *inode, struct file *file) | ||
159 | { | ||
160 | if (ltq_wdt_ok_to_close) | ||
161 | ltq_wdt_disable(); | ||
162 | else | ||
163 | pr_err("ltq_wdt: watchdog closed without warning\n"); | ||
164 | ltq_wdt_ok_to_close = 0; | ||
165 | clear_bit(0, <q_wdt_in_use); | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static const struct file_operations ltq_wdt_fops = { | ||
171 | .owner = THIS_MODULE, | ||
172 | .write = ltq_wdt_write, | ||
173 | .unlocked_ioctl = ltq_wdt_ioctl, | ||
174 | .open = ltq_wdt_open, | ||
175 | .release = ltq_wdt_release, | ||
176 | .llseek = no_llseek, | ||
177 | }; | ||
178 | |||
179 | static struct miscdevice ltq_wdt_miscdev = { | ||
180 | .minor = WATCHDOG_MINOR, | ||
181 | .name = "watchdog", | ||
182 | .fops = <q_wdt_fops, | ||
183 | }; | ||
184 | |||
185 | static int __init | ||
186 | ltq_wdt_probe(struct platform_device *pdev) | ||
187 | { | ||
188 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
189 | struct clk *clk; | ||
190 | |||
191 | if (!res) { | ||
192 | dev_err(&pdev->dev, "cannot obtain I/O memory region"); | ||
193 | return -ENOENT; | ||
194 | } | ||
195 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
196 | resource_size(res), dev_name(&pdev->dev)); | ||
197 | if (!res) { | ||
198 | dev_err(&pdev->dev, "cannot request I/O memory region"); | ||
199 | return -EBUSY; | ||
200 | } | ||
201 | ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
202 | resource_size(res)); | ||
203 | if (!ltq_wdt_membase) { | ||
204 | dev_err(&pdev->dev, "cannot remap I/O memory region\n"); | ||
205 | return -ENOMEM; | ||
206 | } | ||
207 | |||
208 | /* we do not need to enable the clock as it is always running */ | ||
209 | clk = clk_get(&pdev->dev, "io"); | ||
210 | WARN_ON(!clk); | ||
211 | ltq_io_region_clk_rate = clk_get_rate(clk); | ||
212 | clk_put(clk); | ||
213 | |||
214 | if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST) | ||
215 | ltq_wdt_bootstatus = WDIOF_CARDRESET; | ||
216 | |||
217 | return misc_register(<q_wdt_miscdev); | ||
218 | } | ||
219 | |||
220 | static int __devexit | ||
221 | ltq_wdt_remove(struct platform_device *pdev) | ||
222 | { | ||
223 | misc_deregister(<q_wdt_miscdev); | ||
224 | |||
225 | if (ltq_wdt_membase) | ||
226 | iounmap(ltq_wdt_membase); | ||
227 | |||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | |||
232 | static struct platform_driver ltq_wdt_driver = { | ||
233 | .remove = __devexit_p(ltq_wdt_remove), | ||
234 | .driver = { | ||
235 | .name = "ltq_wdt", | ||
236 | .owner = THIS_MODULE, | ||
237 | }, | ||
238 | }; | ||
239 | |||
240 | static int __init | ||
241 | init_ltq_wdt(void) | ||
242 | { | ||
243 | return platform_driver_probe(<q_wdt_driver, ltq_wdt_probe); | ||
244 | } | ||
245 | |||
246 | static void __exit | ||
247 | exit_ltq_wdt(void) | ||
248 | { | ||
249 | return platform_driver_unregister(<q_wdt_driver); | ||
250 | } | ||
251 | |||
252 | module_init(init_ltq_wdt); | ||
253 | module_exit(exit_ltq_wdt); | ||
254 | |||
255 | module_param(nowayout, int, 0); | ||
256 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); | ||
257 | |||
258 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
259 | MODULE_DESCRIPTION("Lantiq SoC Watchdog"); | ||
260 | MODULE_LICENSE("GPL"); | ||
261 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 528bceb220fd..eed5436ffb51 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c | |||
@@ -185,17 +185,20 @@ static struct miscdevice mpc8xxx_wdt_miscdev = { | |||
185 | .fops = &mpc8xxx_wdt_fops, | 185 | .fops = &mpc8xxx_wdt_fops, |
186 | }; | 186 | }; |
187 | 187 | ||
188 | static const struct of_device_id mpc8xxx_wdt_match[]; | ||
188 | static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) | 189 | static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) |
189 | { | 190 | { |
190 | int ret; | 191 | int ret; |
192 | const struct of_device_id *match; | ||
191 | struct device_node *np = ofdev->dev.of_node; | 193 | struct device_node *np = ofdev->dev.of_node; |
192 | struct mpc8xxx_wdt_type *wdt_type; | 194 | struct mpc8xxx_wdt_type *wdt_type; |
193 | u32 freq = fsl_get_sys_freq(); | 195 | u32 freq = fsl_get_sys_freq(); |
194 | bool enabled; | 196 | bool enabled; |
195 | 197 | ||
196 | if (!ofdev->dev.of_match) | 198 | match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); |
199 | if (!match) | ||
197 | return -EINVAL; | 200 | return -EINVAL; |
198 | wdt_type = ofdev->dev.of_match->data; | 201 | wdt_type = match->data; |
199 | 202 | ||
200 | if (!freq || freq == -1) | 203 | if (!freq || freq == -1) |
201 | return -EINVAL; | 204 | return -EINVAL; |
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index 5ec5ac1f7878..1479dc4d6129 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c | |||
@@ -66,6 +66,7 @@ static struct { | |||
66 | int default_ticks; | 66 | int default_ticks; |
67 | unsigned long inuse; | 67 | unsigned long inuse; |
68 | unsigned gpio; | 68 | unsigned gpio; |
69 | int gstate; | ||
69 | } mtx1_wdt_device; | 70 | } mtx1_wdt_device; |
70 | 71 | ||
71 | static void mtx1_wdt_trigger(unsigned long unused) | 72 | static void mtx1_wdt_trigger(unsigned long unused) |
@@ -75,13 +76,13 @@ static void mtx1_wdt_trigger(unsigned long unused) | |||
75 | spin_lock(&mtx1_wdt_device.lock); | 76 | spin_lock(&mtx1_wdt_device.lock); |
76 | if (mtx1_wdt_device.running) | 77 | if (mtx1_wdt_device.running) |
77 | ticks--; | 78 | ticks--; |
78 | /* | 79 | |
79 | * toggle GPIO2_15 | 80 | /* toggle wdt gpio */ |
80 | */ | 81 | mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate; |
81 | tmp = au_readl(GPIO2_DIR); | 82 | if (mtx1_wdt_device.gstate) |
82 | tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) | | 83 | gpio_direction_output(mtx1_wdt_device.gpio, 1); |
83 | ((~tmp) & (1 << mtx1_wdt_device.gpio)); | 84 | else |
84 | au_writel(tmp, GPIO2_DIR); | 85 | gpio_direction_input(mtx1_wdt_device.gpio); |
85 | 86 | ||
86 | if (mtx1_wdt_device.queue && ticks) | 87 | if (mtx1_wdt_device.queue && ticks) |
87 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); | 88 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); |
@@ -103,7 +104,8 @@ static void mtx1_wdt_start(void) | |||
103 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); | 104 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); |
104 | if (!mtx1_wdt_device.queue) { | 105 | if (!mtx1_wdt_device.queue) { |
105 | mtx1_wdt_device.queue = 1; | 106 | mtx1_wdt_device.queue = 1; |
106 | gpio_set_value(mtx1_wdt_device.gpio, 1); | 107 | mtx1_wdt_device.gstate = 1; |
108 | gpio_direction_output(mtx1_wdt_device.gpio, 1); | ||
107 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); | 109 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); |
108 | } | 110 | } |
109 | mtx1_wdt_device.running++; | 111 | mtx1_wdt_device.running++; |
@@ -117,7 +119,8 @@ static int mtx1_wdt_stop(void) | |||
117 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); | 119 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); |
118 | if (mtx1_wdt_device.queue) { | 120 | if (mtx1_wdt_device.queue) { |
119 | mtx1_wdt_device.queue = 0; | 121 | mtx1_wdt_device.queue = 0; |
120 | gpio_set_value(mtx1_wdt_device.gpio, 0); | 122 | mtx1_wdt_device.gstate = 0; |
123 | gpio_direction_output(mtx1_wdt_device.gpio, 0); | ||
121 | } | 124 | } |
122 | ticks = mtx1_wdt_device.default_ticks; | 125 | ticks = mtx1_wdt_device.default_ticks; |
123 | spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); | 126 | spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index f420f1ff7f13..4781f806701d 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -4,21 +4,21 @@ obj-y += xenbus/ | |||
4 | nostackp := $(call cc-option, -fno-stack-protector) | 4 | nostackp := $(call cc-option, -fno-stack-protector) |
5 | CFLAGS_features.o := $(nostackp) | 5 | CFLAGS_features.o := $(nostackp) |
6 | 6 | ||
7 | obj-$(CONFIG_BLOCK) += biomerge.o | 7 | obj-$(CONFIG_BLOCK) += biomerge.o |
8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
10 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o | 10 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o |
11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o | 11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o |
12 | obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o | 12 | obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o |
13 | obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o | 13 | obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o |
14 | obj-$(CONFIG_XENFS) += xenfs/ | 14 | obj-$(CONFIG_XENFS) += xenfs/ |
15 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o | 15 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o |
16 | obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o | 16 | obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o |
17 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | 17 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o |
18 | obj-$(CONFIG_XEN_DOM0) += pci.o | 18 | obj-$(CONFIG_XEN_DOM0) += pci.o |
19 | 19 | ||
20 | xen-evtchn-y := evtchn.o | 20 | xen-evtchn-y := evtchn.o |
21 | xen-gntdev-y := gntdev.o | 21 | xen-gntdev-y := gntdev.o |
22 | xen-gntalloc-y := gntalloc.o | 22 | xen-gntalloc-y := gntalloc.o |
23 | 23 | ||
24 | xen-platform-pci-y := platform-pci.o | 24 | xen-platform-pci-y := platform-pci.o |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 043af8ad6b60..f54290baa3db 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -114,7 +114,6 @@ static void __balloon_append(struct page *page) | |||
114 | if (PageHighMem(page)) { | 114 | if (PageHighMem(page)) { |
115 | list_add_tail(&page->lru, &ballooned_pages); | 115 | list_add_tail(&page->lru, &ballooned_pages); |
116 | balloon_stats.balloon_high++; | 116 | balloon_stats.balloon_high++; |
117 | dec_totalhigh_pages(); | ||
118 | } else { | 117 | } else { |
119 | list_add(&page->lru, &ballooned_pages); | 118 | list_add(&page->lru, &ballooned_pages); |
120 | balloon_stats.balloon_low++; | 119 | balloon_stats.balloon_low++; |
@@ -124,6 +123,8 @@ static void __balloon_append(struct page *page) | |||
124 | static void balloon_append(struct page *page) | 123 | static void balloon_append(struct page *page) |
125 | { | 124 | { |
126 | __balloon_append(page); | 125 | __balloon_append(page); |
126 | if (PageHighMem(page)) | ||
127 | dec_totalhigh_pages(); | ||
127 | totalram_pages--; | 128 | totalram_pages--; |
128 | } | 129 | } |
129 | 130 | ||
@@ -193,7 +194,7 @@ static enum bp_state update_schedule(enum bp_state state) | |||
193 | return BP_EAGAIN; | 194 | return BP_EAGAIN; |
194 | } | 195 | } |
195 | 196 | ||
196 | static unsigned long current_target(void) | 197 | static long current_credit(void) |
197 | { | 198 | { |
198 | unsigned long target = balloon_stats.target_pages; | 199 | unsigned long target = balloon_stats.target_pages; |
199 | 200 | ||
@@ -202,7 +203,7 @@ static unsigned long current_target(void) | |||
202 | balloon_stats.balloon_low + | 203 | balloon_stats.balloon_low + |
203 | balloon_stats.balloon_high); | 204 | balloon_stats.balloon_high); |
204 | 205 | ||
205 | return target; | 206 | return target - balloon_stats.current_pages; |
206 | } | 207 | } |
207 | 208 | ||
208 | static enum bp_state increase_reservation(unsigned long nr_pages) | 209 | static enum bp_state increase_reservation(unsigned long nr_pages) |
@@ -246,7 +247,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
246 | set_phys_to_machine(pfn, frame_list[i]); | 247 | set_phys_to_machine(pfn, frame_list[i]); |
247 | 248 | ||
248 | /* Link back into the page tables if not highmem. */ | 249 | /* Link back into the page tables if not highmem. */ |
249 | if (!xen_hvm_domain() && pfn < max_low_pfn) { | 250 | if (xen_pv_domain() && !PageHighMem(page)) { |
250 | int ret; | 251 | int ret; |
251 | ret = HYPERVISOR_update_va_mapping( | 252 | ret = HYPERVISOR_update_va_mapping( |
252 | (unsigned long)__va(pfn << PAGE_SHIFT), | 253 | (unsigned long)__va(pfn << PAGE_SHIFT), |
@@ -293,7 +294,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
293 | 294 | ||
294 | scrub_page(page); | 295 | scrub_page(page); |
295 | 296 | ||
296 | if (!xen_hvm_domain() && !PageHighMem(page)) { | 297 | if (xen_pv_domain() && !PageHighMem(page)) { |
297 | ret = HYPERVISOR_update_va_mapping( | 298 | ret = HYPERVISOR_update_va_mapping( |
298 | (unsigned long)__va(pfn << PAGE_SHIFT), | 299 | (unsigned long)__va(pfn << PAGE_SHIFT), |
299 | __pte_ma(0), 0); | 300 | __pte_ma(0), 0); |
@@ -337,7 +338,7 @@ static void balloon_process(struct work_struct *work) | |||
337 | mutex_lock(&balloon_mutex); | 338 | mutex_lock(&balloon_mutex); |
338 | 339 | ||
339 | do { | 340 | do { |
340 | credit = current_target() - balloon_stats.current_pages; | 341 | credit = current_credit(); |
341 | 342 | ||
342 | if (credit > 0) | 343 | if (credit > 0) |
343 | state = increase_reservation(credit); | 344 | state = increase_reservation(credit); |
@@ -420,7 +421,7 @@ void free_xenballooned_pages(int nr_pages, struct page** pages) | |||
420 | } | 421 | } |
421 | 422 | ||
422 | /* The balloon may be too large now. Shrink it if needed. */ | 423 | /* The balloon may be too large now. Shrink it if needed. */ |
423 | if (current_target() != balloon_stats.current_pages) | 424 | if (current_credit()) |
424 | schedule_delayed_work(&balloon_worker, 0); | 425 | schedule_delayed_work(&balloon_worker, 0); |
425 | 426 | ||
426 | mutex_unlock(&balloon_mutex); | 427 | mutex_unlock(&balloon_mutex); |
@@ -429,7 +430,7 @@ EXPORT_SYMBOL(free_xenballooned_pages); | |||
429 | 430 | ||
430 | static int __init balloon_init(void) | 431 | static int __init balloon_init(void) |
431 | { | 432 | { |
432 | unsigned long pfn, nr_pages, extra_pfn_end; | 433 | unsigned long pfn, extra_pfn_end; |
433 | struct page *page; | 434 | struct page *page; |
434 | 435 | ||
435 | if (!xen_domain()) | 436 | if (!xen_domain()) |
@@ -437,11 +438,7 @@ static int __init balloon_init(void) | |||
437 | 438 | ||
438 | pr_info("xen/balloon: Initialising balloon driver.\n"); | 439 | pr_info("xen/balloon: Initialising balloon driver.\n"); |
439 | 440 | ||
440 | if (xen_pv_domain()) | 441 | balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn; |
441 | nr_pages = xen_start_info->nr_pages; | ||
442 | else | ||
443 | nr_pages = max_pfn; | ||
444 | balloon_stats.current_pages = min(nr_pages, max_pfn); | ||
445 | balloon_stats.target_pages = balloon_stats.current_pages; | 442 | balloon_stats.target_pages = balloon_stats.current_pages; |
446 | balloon_stats.balloon_low = 0; | 443 | balloon_stats.balloon_low = 0; |
447 | balloon_stats.balloon_high = 0; | 444 | balloon_stats.balloon_high = 0; |
@@ -466,7 +463,7 @@ static int __init balloon_init(void) | |||
466 | pfn < extra_pfn_end; | 463 | pfn < extra_pfn_end; |
467 | pfn++) { | 464 | pfn++) { |
468 | page = pfn_to_page(pfn); | 465 | page = pfn_to_page(pfn); |
469 | /* totalram_pages doesn't include the boot-time | 466 | /* totalram_pages and totalhigh_pages do not include the boot-time |
470 | balloon extension, so don't subtract from it. */ | 467 | balloon extension, so don't subtract from it. */ |
471 | __balloon_append(page); | 468 | __balloon_append(page); |
472 | } | 469 | } |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 33167b43ac7e..3ff822b48145 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -101,6 +101,7 @@ struct irq_info | |||
101 | unsigned short gsi; | 101 | unsigned short gsi; |
102 | unsigned char vector; | 102 | unsigned char vector; |
103 | unsigned char flags; | 103 | unsigned char flags; |
104 | uint16_t domid; | ||
104 | } pirq; | 105 | } pirq; |
105 | } u; | 106 | } u; |
106 | }; | 107 | }; |
@@ -118,6 +119,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], | |||
118 | static struct irq_chip xen_dynamic_chip; | 119 | static struct irq_chip xen_dynamic_chip; |
119 | static struct irq_chip xen_percpu_chip; | 120 | static struct irq_chip xen_percpu_chip; |
120 | static struct irq_chip xen_pirq_chip; | 121 | static struct irq_chip xen_pirq_chip; |
122 | static void enable_dynirq(struct irq_data *data); | ||
123 | static void disable_dynirq(struct irq_data *data); | ||
121 | 124 | ||
122 | /* Get info for IRQ */ | 125 | /* Get info for IRQ */ |
123 | static struct irq_info *info_for_irq(unsigned irq) | 126 | static struct irq_info *info_for_irq(unsigned irq) |
@@ -184,6 +187,7 @@ static void xen_irq_info_pirq_init(unsigned irq, | |||
184 | unsigned short pirq, | 187 | unsigned short pirq, |
185 | unsigned short gsi, | 188 | unsigned short gsi, |
186 | unsigned short vector, | 189 | unsigned short vector, |
190 | uint16_t domid, | ||
187 | unsigned char flags) | 191 | unsigned char flags) |
188 | { | 192 | { |
189 | struct irq_info *info = info_for_irq(irq); | 193 | struct irq_info *info = info_for_irq(irq); |
@@ -193,6 +197,7 @@ static void xen_irq_info_pirq_init(unsigned irq, | |||
193 | info->u.pirq.pirq = pirq; | 197 | info->u.pirq.pirq = pirq; |
194 | info->u.pirq.gsi = gsi; | 198 | info->u.pirq.gsi = gsi; |
195 | info->u.pirq.vector = vector; | 199 | info->u.pirq.vector = vector; |
200 | info->u.pirq.domid = domid; | ||
196 | info->u.pirq.flags = flags; | 201 | info->u.pirq.flags = flags; |
197 | } | 202 | } |
198 | 203 | ||
@@ -473,16 +478,6 @@ static void xen_free_irq(unsigned irq) | |||
473 | irq_free_desc(irq); | 478 | irq_free_desc(irq); |
474 | } | 479 | } |
475 | 480 | ||
476 | static void pirq_unmask_notify(int irq) | ||
477 | { | ||
478 | struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; | ||
479 | |||
480 | if (unlikely(pirq_needs_eoi(irq))) { | ||
481 | int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
482 | WARN_ON(rc); | ||
483 | } | ||
484 | } | ||
485 | |||
486 | static void pirq_query_unmask(int irq) | 481 | static void pirq_query_unmask(int irq) |
487 | { | 482 | { |
488 | struct physdev_irq_status_query irq_status; | 483 | struct physdev_irq_status_query irq_status; |
@@ -506,6 +501,29 @@ static bool probing_irq(int irq) | |||
506 | return desc && desc->action == NULL; | 501 | return desc && desc->action == NULL; |
507 | } | 502 | } |
508 | 503 | ||
504 | static void eoi_pirq(struct irq_data *data) | ||
505 | { | ||
506 | int evtchn = evtchn_from_irq(data->irq); | ||
507 | struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; | ||
508 | int rc = 0; | ||
509 | |||
510 | irq_move_irq(data); | ||
511 | |||
512 | if (VALID_EVTCHN(evtchn)) | ||
513 | clear_evtchn(evtchn); | ||
514 | |||
515 | if (pirq_needs_eoi(data->irq)) { | ||
516 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
517 | WARN_ON(rc); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | static void mask_ack_pirq(struct irq_data *data) | ||
522 | { | ||
523 | disable_dynirq(data); | ||
524 | eoi_pirq(data); | ||
525 | } | ||
526 | |||
509 | static unsigned int __startup_pirq(unsigned int irq) | 527 | static unsigned int __startup_pirq(unsigned int irq) |
510 | { | 528 | { |
511 | struct evtchn_bind_pirq bind_pirq; | 529 | struct evtchn_bind_pirq bind_pirq; |
@@ -539,7 +557,7 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
539 | 557 | ||
540 | out: | 558 | out: |
541 | unmask_evtchn(evtchn); | 559 | unmask_evtchn(evtchn); |
542 | pirq_unmask_notify(irq); | 560 | eoi_pirq(irq_get_irq_data(irq)); |
543 | 561 | ||
544 | return 0; | 562 | return 0; |
545 | } | 563 | } |
@@ -579,18 +597,7 @@ static void enable_pirq(struct irq_data *data) | |||
579 | 597 | ||
580 | static void disable_pirq(struct irq_data *data) | 598 | static void disable_pirq(struct irq_data *data) |
581 | { | 599 | { |
582 | } | 600 | disable_dynirq(data); |
583 | |||
584 | static void ack_pirq(struct irq_data *data) | ||
585 | { | ||
586 | int evtchn = evtchn_from_irq(data->irq); | ||
587 | |||
588 | irq_move_irq(data); | ||
589 | |||
590 | if (VALID_EVTCHN(evtchn)) { | ||
591 | mask_evtchn(evtchn); | ||
592 | clear_evtchn(evtchn); | ||
593 | } | ||
594 | } | 601 | } |
595 | 602 | ||
596 | static int find_irq_by_gsi(unsigned gsi) | 603 | static int find_irq_by_gsi(unsigned gsi) |
@@ -639,9 +646,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
639 | if (irq < 0) | 646 | if (irq < 0) |
640 | goto out; | 647 | goto out; |
641 | 648 | ||
642 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | ||
643 | name); | ||
644 | |||
645 | irq_op.irq = irq; | 649 | irq_op.irq = irq; |
646 | irq_op.vector = 0; | 650 | irq_op.vector = 0; |
647 | 651 | ||
@@ -655,9 +659,35 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
655 | goto out; | 659 | goto out; |
656 | } | 660 | } |
657 | 661 | ||
658 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, | 662 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, |
659 | shareable ? PIRQ_SHAREABLE : 0); | 663 | shareable ? PIRQ_SHAREABLE : 0); |
660 | 664 | ||
665 | pirq_query_unmask(irq); | ||
666 | /* We try to use the handler with the appropriate semantic for the | ||
667 | * type of interrupt: if the interrupt doesn't need an eoi | ||
668 | * (pirq_needs_eoi returns false), we treat it like an edge | ||
669 | * triggered interrupt so we use handle_edge_irq. | ||
670 | * As a matter of fact this only happens when the corresponding | ||
671 | * physical interrupt is edge triggered or an msi. | ||
672 | * | ||
673 | * On the other hand if the interrupt needs an eoi (pirq_needs_eoi | ||
674 | * returns true) we treat it like a level triggered interrupt so we | ||
675 | * use handle_fasteoi_irq like the native code does for this kind of | ||
676 | * interrupts. | ||
677 | * Depending on the Xen version, pirq_needs_eoi might return true | ||
678 | * not only for level triggered interrupts but for edge triggered | ||
679 | * interrupts too. In any case Xen always honors the eoi mechanism, | ||
680 | * not injecting any more pirqs of the same kind if the first one | ||
681 | * hasn't received an eoi yet. Therefore using the fasteoi handler | ||
682 | * is the right choice either way. | ||
683 | */ | ||
684 | if (pirq_needs_eoi(irq)) | ||
685 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
686 | handle_fasteoi_irq, name); | ||
687 | else | ||
688 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
689 | handle_edge_irq, name); | ||
690 | |||
661 | out: | 691 | out: |
662 | spin_unlock(&irq_mapping_update_lock); | 692 | spin_unlock(&irq_mapping_update_lock); |
663 | 693 | ||
@@ -680,7 +710,8 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) | |||
680 | } | 710 | } |
681 | 711 | ||
682 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 712 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
683 | int pirq, int vector, const char *name) | 713 | int pirq, int vector, const char *name, |
714 | domid_t domid) | ||
684 | { | 715 | { |
685 | int irq, ret; | 716 | int irq, ret; |
686 | 717 | ||
@@ -690,10 +721,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
690 | if (irq == -1) | 721 | if (irq == -1) |
691 | goto out; | 722 | goto out; |
692 | 723 | ||
693 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | 724 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, |
694 | name); | 725 | name); |
695 | 726 | ||
696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); | 727 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); |
697 | ret = irq_set_msi_desc(irq, msidesc); | 728 | ret = irq_set_msi_desc(irq, msidesc); |
698 | if (ret < 0) | 729 | if (ret < 0) |
699 | goto error_irq; | 730 | goto error_irq; |
@@ -722,9 +753,16 @@ int xen_destroy_irq(int irq) | |||
722 | 753 | ||
723 | if (xen_initial_domain()) { | 754 | if (xen_initial_domain()) { |
724 | unmap_irq.pirq = info->u.pirq.pirq; | 755 | unmap_irq.pirq = info->u.pirq.pirq; |
725 | unmap_irq.domid = DOMID_SELF; | 756 | unmap_irq.domid = info->u.pirq.domid; |
726 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); | 757 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); |
727 | if (rc) { | 758 | /* If another domain quits without making the pci_disable_msix |
759 | * call, the Xen hypervisor takes care of freeing the PIRQs | ||
760 | * (free_domain_pirqs). | ||
761 | */ | ||
762 | if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) | ||
763 | printk(KERN_INFO "domain %d does not have %d anymore\n", | ||
764 | info->u.pirq.domid, info->u.pirq.pirq); | ||
765 | else if (rc) { | ||
728 | printk(KERN_WARNING "unmap irq failed %d\n", rc); | 766 | printk(KERN_WARNING "unmap irq failed %d\n", rc); |
729 | goto out; | 767 | goto out; |
730 | } | 768 | } |
@@ -759,6 +797,12 @@ out: | |||
759 | return irq; | 797 | return irq; |
760 | } | 798 | } |
761 | 799 | ||
800 | |||
801 | int xen_pirq_from_irq(unsigned irq) | ||
802 | { | ||
803 | return pirq_from_irq(irq); | ||
804 | } | ||
805 | EXPORT_SYMBOL_GPL(xen_pirq_from_irq); | ||
762 | int bind_evtchn_to_irq(unsigned int evtchn) | 806 | int bind_evtchn_to_irq(unsigned int evtchn) |
763 | { | 807 | { |
764 | int irq; | 808 | int irq; |
@@ -773,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
773 | goto out; | 817 | goto out; |
774 | 818 | ||
775 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, | 819 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
776 | handle_fasteoi_irq, "event"); | 820 | handle_edge_irq, "event"); |
777 | 821 | ||
778 | xen_irq_info_evtchn_init(irq, evtchn); | 822 | xen_irq_info_evtchn_init(irq, evtchn); |
779 | } | 823 | } |
@@ -1179,9 +1223,6 @@ static void __xen_evtchn_do_upcall(void) | |||
1179 | port = (word_idx * BITS_PER_LONG) + bit_idx; | 1223 | port = (word_idx * BITS_PER_LONG) + bit_idx; |
1180 | irq = evtchn_to_irq[port]; | 1224 | irq = evtchn_to_irq[port]; |
1181 | 1225 | ||
1182 | mask_evtchn(port); | ||
1183 | clear_evtchn(port); | ||
1184 | |||
1185 | if (irq != -1) { | 1226 | if (irq != -1) { |
1186 | desc = irq_to_desc(irq); | 1227 | desc = irq_to_desc(irq); |
1187 | if (desc) | 1228 | if (desc) |
@@ -1337,10 +1378,16 @@ static void ack_dynirq(struct irq_data *data) | |||
1337 | { | 1378 | { |
1338 | int evtchn = evtchn_from_irq(data->irq); | 1379 | int evtchn = evtchn_from_irq(data->irq); |
1339 | 1380 | ||
1340 | irq_move_masked_irq(data); | 1381 | irq_move_irq(data); |
1341 | 1382 | ||
1342 | if (VALID_EVTCHN(evtchn)) | 1383 | if (VALID_EVTCHN(evtchn)) |
1343 | unmask_evtchn(evtchn); | 1384 | clear_evtchn(evtchn); |
1385 | } | ||
1386 | |||
1387 | static void mask_ack_dynirq(struct irq_data *data) | ||
1388 | { | ||
1389 | disable_dynirq(data); | ||
1390 | ack_dynirq(data); | ||
1344 | } | 1391 | } |
1345 | 1392 | ||
1346 | static int retrigger_dynirq(struct irq_data *data) | 1393 | static int retrigger_dynirq(struct irq_data *data) |
@@ -1502,6 +1549,18 @@ void xen_poll_irq(int irq) | |||
1502 | xen_poll_irq_timeout(irq, 0 /* no timeout */); | 1549 | xen_poll_irq_timeout(irq, 0 /* no timeout */); |
1503 | } | 1550 | } |
1504 | 1551 | ||
1552 | /* Check whether the IRQ line is shared with other guests. */ | ||
1553 | int xen_test_irq_shared(int irq) | ||
1554 | { | ||
1555 | struct irq_info *info = info_for_irq(irq); | ||
1556 | struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; | ||
1557 | |||
1558 | if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) | ||
1559 | return 0; | ||
1560 | return !(irq_status.flags & XENIRQSTAT_shared); | ||
1561 | } | ||
1562 | EXPORT_SYMBOL_GPL(xen_test_irq_shared); | ||
1563 | |||
1505 | void xen_irq_resume(void) | 1564 | void xen_irq_resume(void) |
1506 | { | 1565 | { |
1507 | unsigned int cpu, evtchn; | 1566 | unsigned int cpu, evtchn; |
@@ -1535,7 +1594,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { | |||
1535 | .irq_mask = disable_dynirq, | 1594 | .irq_mask = disable_dynirq, |
1536 | .irq_unmask = enable_dynirq, | 1595 | .irq_unmask = enable_dynirq, |
1537 | 1596 | ||
1538 | .irq_eoi = ack_dynirq, | 1597 | .irq_ack = ack_dynirq, |
1598 | .irq_mask_ack = mask_ack_dynirq, | ||
1599 | |||
1539 | .irq_set_affinity = set_affinity_irq, | 1600 | .irq_set_affinity = set_affinity_irq, |
1540 | .irq_retrigger = retrigger_dynirq, | 1601 | .irq_retrigger = retrigger_dynirq, |
1541 | }; | 1602 | }; |
@@ -1545,14 +1606,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = { | |||
1545 | 1606 | ||
1546 | .irq_startup = startup_pirq, | 1607 | .irq_startup = startup_pirq, |
1547 | .irq_shutdown = shutdown_pirq, | 1608 | .irq_shutdown = shutdown_pirq, |
1548 | |||
1549 | .irq_enable = enable_pirq, | 1609 | .irq_enable = enable_pirq, |
1550 | .irq_unmask = enable_pirq, | ||
1551 | |||
1552 | .irq_disable = disable_pirq, | 1610 | .irq_disable = disable_pirq, |
1553 | .irq_mask = disable_pirq, | ||
1554 | 1611 | ||
1555 | .irq_ack = ack_pirq, | 1612 | .irq_mask = disable_dynirq, |
1613 | .irq_unmask = enable_dynirq, | ||
1614 | |||
1615 | .irq_ack = eoi_pirq, | ||
1616 | .irq_eoi = eoi_pirq, | ||
1617 | .irq_mask_ack = mask_ack_pirq, | ||
1556 | 1618 | ||
1557 | .irq_set_affinity = set_affinity_irq, | 1619 | .irq_set_affinity = set_affinity_irq, |
1558 | 1620 | ||
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index a7ffdfe19fc9..f6832f46aea4 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c | |||
@@ -427,6 +427,17 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd, | |||
427 | return 0; | 427 | return 0; |
428 | } | 428 | } |
429 | 429 | ||
430 | static void gntalloc_vma_open(struct vm_area_struct *vma) | ||
431 | { | ||
432 | struct gntalloc_gref *gref = vma->vm_private_data; | ||
433 | if (!gref) | ||
434 | return; | ||
435 | |||
436 | spin_lock(&gref_lock); | ||
437 | gref->users++; | ||
438 | spin_unlock(&gref_lock); | ||
439 | } | ||
440 | |||
430 | static void gntalloc_vma_close(struct vm_area_struct *vma) | 441 | static void gntalloc_vma_close(struct vm_area_struct *vma) |
431 | { | 442 | { |
432 | struct gntalloc_gref *gref = vma->vm_private_data; | 443 | struct gntalloc_gref *gref = vma->vm_private_data; |
@@ -441,6 +452,7 @@ static void gntalloc_vma_close(struct vm_area_struct *vma) | |||
441 | } | 452 | } |
442 | 453 | ||
443 | static struct vm_operations_struct gntalloc_vmops = { | 454 | static struct vm_operations_struct gntalloc_vmops = { |
455 | .open = gntalloc_vma_open, | ||
444 | .close = gntalloc_vma_close, | 456 | .close = gntalloc_vma_close, |
445 | }; | 457 | }; |
446 | 458 | ||
@@ -471,8 +483,6 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) | |||
471 | vma->vm_private_data = gref; | 483 | vma->vm_private_data = gref; |
472 | 484 | ||
473 | vma->vm_flags |= VM_RESERVED; | 485 | vma->vm_flags |= VM_RESERVED; |
474 | vma->vm_flags |= VM_DONTCOPY; | ||
475 | vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP; | ||
476 | 486 | ||
477 | vma->vm_ops = &gntalloc_vmops; | 487 | vma->vm_ops = &gntalloc_vmops; |
478 | 488 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index b0f9e8fb0052..f914b26cf0c2 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -330,17 +330,26 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
330 | 330 | ||
331 | /* ------------------------------------------------------------------ */ | 331 | /* ------------------------------------------------------------------ */ |
332 | 332 | ||
333 | static void gntdev_vma_open(struct vm_area_struct *vma) | ||
334 | { | ||
335 | struct grant_map *map = vma->vm_private_data; | ||
336 | |||
337 | pr_debug("gntdev_vma_open %p\n", vma); | ||
338 | atomic_inc(&map->users); | ||
339 | } | ||
340 | |||
333 | static void gntdev_vma_close(struct vm_area_struct *vma) | 341 | static void gntdev_vma_close(struct vm_area_struct *vma) |
334 | { | 342 | { |
335 | struct grant_map *map = vma->vm_private_data; | 343 | struct grant_map *map = vma->vm_private_data; |
336 | 344 | ||
337 | pr_debug("close %p\n", vma); | 345 | pr_debug("gntdev_vma_close %p\n", vma); |
338 | map->vma = NULL; | 346 | map->vma = NULL; |
339 | vma->vm_private_data = NULL; | 347 | vma->vm_private_data = NULL; |
340 | gntdev_put_map(map); | 348 | gntdev_put_map(map); |
341 | } | 349 | } |
342 | 350 | ||
343 | static struct vm_operations_struct gntdev_vmops = { | 351 | static struct vm_operations_struct gntdev_vmops = { |
352 | .open = gntdev_vma_open, | ||
344 | .close = gntdev_vma_close, | 353 | .close = gntdev_vma_close, |
345 | }; | 354 | }; |
346 | 355 | ||
@@ -652,7 +661,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
652 | 661 | ||
653 | vma->vm_ops = &gntdev_vmops; | 662 | vma->vm_ops = &gntdev_vmops; |
654 | 663 | ||
655 | vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; | 664 | vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; |
665 | |||
666 | if (use_ptemod) | ||
667 | vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; | ||
656 | 668 | ||
657 | vma->vm_private_data = map; | 669 | vma->vm_private_data = map; |
658 | 670 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 3745a318defc..fd725cde6ad1 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -466,13 +466,30 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
466 | if (map_ops[i].status) | 466 | if (map_ops[i].status) |
467 | continue; | 467 | continue; |
468 | 468 | ||
469 | /* m2p override only supported for GNTMAP_contains_pte mappings */ | 469 | if (map_ops[i].flags & GNTMAP_contains_pte) { |
470 | if (!(map_ops[i].flags & GNTMAP_contains_pte)) | 470 | pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + |
471 | continue; | ||
472 | pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + | ||
473 | (map_ops[i].host_addr & ~PAGE_MASK)); | 471 | (map_ops[i].host_addr & ~PAGE_MASK)); |
474 | mfn = pte_mfn(*pte); | 472 | mfn = pte_mfn(*pte); |
475 | ret = m2p_add_override(mfn, pages[i]); | 473 | } else { |
474 | /* If you really wanted to do this: | ||
475 | * mfn = PFN_DOWN(map_ops[i].dev_bus_addr); | ||
476 | * | ||
477 | * The reason we do not implement it is b/c on the | ||
478 | * unmap path (gnttab_unmap_refs) we have no means of | ||
479 | * checking whether the page is !GNTMAP_contains_pte. | ||
480 | * | ||
481 | * That is without some extra data-structure to carry | ||
482 | * the struct page, bool clear_pte, and list_head next | ||
483 | * tuples and deal with allocation/delallocation, etc. | ||
484 | * | ||
485 | * The users of this API set the GNTMAP_contains_pte | ||
486 | * flag so lets just return not supported until it | ||
487 | * becomes neccessary to implement. | ||
488 | */ | ||
489 | return -EOPNOTSUPP; | ||
490 | } | ||
491 | ret = m2p_add_override(mfn, pages[i], | ||
492 | map_ops[i].flags & GNTMAP_contains_pte); | ||
476 | if (ret) | 493 | if (ret) |
477 | return ret; | 494 | return ret; |
478 | } | 495 | } |
@@ -494,7 +511,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
494 | return ret; | 511 | return ret; |
495 | 512 | ||
496 | for (i = 0; i < count; i++) { | 513 | for (i = 0; i < count; i++) { |
497 | ret = m2p_remove_override(pages[i]); | 514 | ret = m2p_remove_override(pages[i], true /* clear the PTE */); |
498 | if (ret) | 515 | if (ret) |
499 | return ret; | 516 | return ret; |
500 | } | 517 | } |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index a2eee574784e..0b5366b5be20 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -70,12 +70,7 @@ static int xen_suspend(void *data) | |||
70 | 70 | ||
71 | BUG_ON(!irqs_disabled()); | 71 | BUG_ON(!irqs_disabled()); |
72 | 72 | ||
73 | err = sysdev_suspend(PMSG_FREEZE); | 73 | err = syscore_suspend(); |
74 | if (!err) { | ||
75 | err = syscore_suspend(); | ||
76 | if (err) | ||
77 | sysdev_resume(); | ||
78 | } | ||
79 | if (err) { | 74 | if (err) { |
80 | printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", | 75 | printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", |
81 | err); | 76 | err); |
@@ -102,7 +97,6 @@ static int xen_suspend(void *data) | |||
102 | } | 97 | } |
103 | 98 | ||
104 | syscore_resume(); | 99 | syscore_resume(); |
105 | sysdev_resume(); | ||
106 | 100 | ||
107 | return 0; | 101 | return 0; |
108 | } | 102 | } |
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index 60f1827a32cb..1e0fe01eb670 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c | |||
@@ -215,7 +215,7 @@ static struct attribute_group xen_compilation_group = { | |||
215 | .attrs = xen_compile_attrs, | 215 | .attrs = xen_compile_attrs, |
216 | }; | 216 | }; |
217 | 217 | ||
218 | int __init static xen_compilation_init(void) | 218 | static int __init xen_compilation_init(void) |
219 | { | 219 | { |
220 | return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); | 220 | return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); |
221 | } | 221 | } |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 5147bdd3b8e1..257b00e98428 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1102,6 +1102,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1102 | if (!bdev->bd_part) | 1102 | if (!bdev->bd_part) |
1103 | goto out_clear; | 1103 | goto out_clear; |
1104 | 1104 | ||
1105 | ret = 0; | ||
1105 | if (disk->fops->open) { | 1106 | if (disk->fops->open) { |
1106 | ret = disk->fops->open(bdev, mode); | 1107 | ret = disk->fops->open(bdev, mode); |
1107 | if (ret == -ERESTARTSYS) { | 1108 | if (ret == -ERESTARTSYS) { |
@@ -1118,9 +1119,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1118 | put_disk(disk); | 1119 | put_disk(disk); |
1119 | goto restart; | 1120 | goto restart; |
1120 | } | 1121 | } |
1121 | if (ret) | ||
1122 | goto out_clear; | ||
1123 | } | 1122 | } |
1123 | /* | ||
1124 | * If the device is invalidated, rescan partition | ||
1125 | * if open succeeded or failed with -ENOMEDIUM. | ||
1126 | * The latter is necessary to prevent ghost | ||
1127 | * partitions on a removed medium. | ||
1128 | */ | ||
1129 | if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) | ||
1130 | rescan_partitions(disk, bdev); | ||
1131 | if (ret) | ||
1132 | goto out_clear; | ||
1133 | |||
1124 | if (!bdev->bd_openers) { | 1134 | if (!bdev->bd_openers) { |
1125 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); | 1135 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
1126 | bdi = blk_get_backing_dev_info(bdev); | 1136 | bdi = blk_get_backing_dev_info(bdev); |
@@ -1128,8 +1138,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1128 | bdi = &default_backing_dev_info; | 1138 | bdi = &default_backing_dev_info; |
1129 | bdev_inode_switch_bdi(bdev->bd_inode, bdi); | 1139 | bdev_inode_switch_bdi(bdev->bd_inode, bdi); |
1130 | } | 1140 | } |
1131 | if (bdev->bd_invalidated) | ||
1132 | rescan_partitions(disk, bdev); | ||
1133 | } else { | 1141 | } else { |
1134 | struct block_device *whole; | 1142 | struct block_device *whole; |
1135 | whole = bdget_disk(disk, 0); | 1143 | whole = bdget_disk(disk, 0); |
@@ -1153,13 +1161,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1153 | } | 1161 | } |
1154 | } else { | 1162 | } else { |
1155 | if (bdev->bd_contains == bdev) { | 1163 | if (bdev->bd_contains == bdev) { |
1156 | if (bdev->bd_disk->fops->open) { | 1164 | ret = 0; |
1165 | if (bdev->bd_disk->fops->open) | ||
1157 | ret = bdev->bd_disk->fops->open(bdev, mode); | 1166 | ret = bdev->bd_disk->fops->open(bdev, mode); |
1158 | if (ret) | 1167 | /* the same as first opener case, read comment there */ |
1159 | goto out_unlock_bdev; | 1168 | if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) |
1160 | } | ||
1161 | if (bdev->bd_invalidated) | ||
1162 | rescan_partitions(bdev->bd_disk, bdev); | 1169 | rescan_partitions(bdev->bd_disk, bdev); |
1170 | if (ret) | ||
1171 | goto out_unlock_bdev; | ||
1163 | } | 1172 | } |
1164 | /* only one opener holds refs to the module and disk */ | 1173 | /* only one opener holds refs to the module and disk */ |
1165 | module_put(disk->fops->owner); | 1174 | module_put(disk->fops->owner); |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 5d505aaa72fb..44ea5b92e1ba 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
@@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, | |||
178 | 178 | ||
179 | if (value) { | 179 | if (value) { |
180 | acl = posix_acl_from_xattr(value, size); | 180 | acl = posix_acl_from_xattr(value, size); |
181 | if (IS_ERR(acl)) | ||
182 | return PTR_ERR(acl); | ||
183 | |||
181 | if (acl) { | 184 | if (acl) { |
182 | ret = posix_acl_valid(acl); | 185 | ret = posix_acl_valid(acl); |
183 | if (ret) | 186 | if (ret) |
184 | goto out; | 187 | goto out; |
185 | } else if (IS_ERR(acl)) { | ||
186 | return PTR_ERR(acl); | ||
187 | } | 188 | } |
188 | } | 189 | } |
189 | 190 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd52f7f556ef..9ee6bd55e16c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -8856,23 +8856,38 @@ out: | |||
8856 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info) | 8856 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info) |
8857 | { | 8857 | { |
8858 | struct btrfs_space_info *space_info; | 8858 | struct btrfs_space_info *space_info; |
8859 | struct btrfs_super_block *disk_super; | ||
8860 | u64 features; | ||
8861 | u64 flags; | ||
8862 | int mixed = 0; | ||
8859 | int ret; | 8863 | int ret; |
8860 | 8864 | ||
8861 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, | 8865 | disk_super = &fs_info->super_copy; |
8862 | &space_info); | 8866 | if (!btrfs_super_root(disk_super)) |
8863 | if (ret) | 8867 | return 1; |
8864 | return ret; | ||
8865 | 8868 | ||
8866 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, | 8869 | features = btrfs_super_incompat_flags(disk_super); |
8867 | &space_info); | 8870 | if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) |
8868 | if (ret) | 8871 | mixed = 1; |
8869 | return ret; | ||
8870 | 8872 | ||
8871 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, | 8873 | flags = BTRFS_BLOCK_GROUP_SYSTEM; |
8872 | &space_info); | 8874 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); |
8873 | if (ret) | 8875 | if (ret) |
8874 | return ret; | 8876 | goto out; |
8875 | 8877 | ||
8878 | if (mixed) { | ||
8879 | flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; | ||
8880 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | ||
8881 | } else { | ||
8882 | flags = BTRFS_BLOCK_GROUP_METADATA; | ||
8883 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | ||
8884 | if (ret) | ||
8885 | goto out; | ||
8886 | |||
8887 | flags = BTRFS_BLOCK_GROUP_DATA; | ||
8888 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | ||
8889 | } | ||
8890 | out: | ||
8876 | return ret; | 8891 | return ret; |
8877 | } | 8892 | } |
8878 | 8893 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ffb48d6c5433..2616f7ed4799 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -81,6 +81,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) | |||
81 | iflags |= FS_NOATIME_FL; | 81 | iflags |= FS_NOATIME_FL; |
82 | if (flags & BTRFS_INODE_DIRSYNC) | 82 | if (flags & BTRFS_INODE_DIRSYNC) |
83 | iflags |= FS_DIRSYNC_FL; | 83 | iflags |= FS_DIRSYNC_FL; |
84 | if (flags & BTRFS_INODE_NODATACOW) | ||
85 | iflags |= FS_NOCOW_FL; | ||
86 | |||
87 | if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) | ||
88 | iflags |= FS_COMPR_FL; | ||
89 | else if (flags & BTRFS_INODE_NOCOMPRESS) | ||
90 | iflags |= FS_NOCOMP_FL; | ||
84 | 91 | ||
85 | return iflags; | 92 | return iflags; |
86 | } | 93 | } |
@@ -144,16 +151,13 @@ static int check_flags(unsigned int flags) | |||
144 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ | 151 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ |
145 | FS_NOATIME_FL | FS_NODUMP_FL | \ | 152 | FS_NOATIME_FL | FS_NODUMP_FL | \ |
146 | FS_SYNC_FL | FS_DIRSYNC_FL | \ | 153 | FS_SYNC_FL | FS_DIRSYNC_FL | \ |
147 | FS_NOCOMP_FL | FS_COMPR_FL | \ | 154 | FS_NOCOMP_FL | FS_COMPR_FL | |
148 | FS_NOCOW_FL | FS_COW_FL)) | 155 | FS_NOCOW_FL)) |
149 | return -EOPNOTSUPP; | 156 | return -EOPNOTSUPP; |
150 | 157 | ||
151 | if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) | 158 | if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) |
152 | return -EINVAL; | 159 | return -EINVAL; |
153 | 160 | ||
154 | if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) | ||
155 | return -EINVAL; | ||
156 | |||
157 | return 0; | 161 | return 0; |
158 | } | 162 | } |
159 | 163 | ||
@@ -218,6 +222,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
218 | ip->flags |= BTRFS_INODE_DIRSYNC; | 222 | ip->flags |= BTRFS_INODE_DIRSYNC; |
219 | else | 223 | else |
220 | ip->flags &= ~BTRFS_INODE_DIRSYNC; | 224 | ip->flags &= ~BTRFS_INODE_DIRSYNC; |
225 | if (flags & FS_NOCOW_FL) | ||
226 | ip->flags |= BTRFS_INODE_NODATACOW; | ||
227 | else | ||
228 | ip->flags &= ~BTRFS_INODE_NODATACOW; | ||
221 | 229 | ||
222 | /* | 230 | /* |
223 | * The COMPRESS flag can only be changed by users, while the NOCOMPRESS | 231 | * The COMPRESS flag can only be changed by users, while the NOCOMPRESS |
@@ -230,11 +238,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
230 | } else if (flags & FS_COMPR_FL) { | 238 | } else if (flags & FS_COMPR_FL) { |
231 | ip->flags |= BTRFS_INODE_COMPRESS; | 239 | ip->flags |= BTRFS_INODE_COMPRESS; |
232 | ip->flags &= ~BTRFS_INODE_NOCOMPRESS; | 240 | ip->flags &= ~BTRFS_INODE_NOCOMPRESS; |
241 | } else { | ||
242 | ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); | ||
233 | } | 243 | } |
234 | if (flags & FS_NOCOW_FL) | ||
235 | ip->flags |= BTRFS_INODE_NODATACOW; | ||
236 | else if (flags & FS_COW_FL) | ||
237 | ip->flags &= ~BTRFS_INODE_NODATACOW; | ||
238 | 244 | ||
239 | trans = btrfs_join_transaction(root, 1); | 245 | trans = btrfs_join_transaction(root, 1); |
240 | BUG_ON(IS_ERR(trans)); | 246 | BUG_ON(IS_ERR(trans)); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 9fa08662a88d..2a5404c1c42f 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -819,7 +819,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci) | |||
819 | used |= CEPH_CAP_FILE_CACHE; | 819 | used |= CEPH_CAP_FILE_CACHE; |
820 | if (ci->i_wr_ref) | 820 | if (ci->i_wr_ref) |
821 | used |= CEPH_CAP_FILE_WR; | 821 | used |= CEPH_CAP_FILE_WR; |
822 | if (ci->i_wrbuffer_ref) | 822 | if (ci->i_wb_ref || ci->i_wrbuffer_ref) |
823 | used |= CEPH_CAP_FILE_BUFFER; | 823 | used |= CEPH_CAP_FILE_BUFFER; |
824 | return used; | 824 | return used; |
825 | } | 825 | } |
@@ -1990,11 +1990,11 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) | |||
1990 | if (got & CEPH_CAP_FILE_WR) | 1990 | if (got & CEPH_CAP_FILE_WR) |
1991 | ci->i_wr_ref++; | 1991 | ci->i_wr_ref++; |
1992 | if (got & CEPH_CAP_FILE_BUFFER) { | 1992 | if (got & CEPH_CAP_FILE_BUFFER) { |
1993 | if (ci->i_wrbuffer_ref == 0) | 1993 | if (ci->i_wb_ref == 0) |
1994 | ihold(&ci->vfs_inode); | 1994 | ihold(&ci->vfs_inode); |
1995 | ci->i_wrbuffer_ref++; | 1995 | ci->i_wb_ref++; |
1996 | dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", | 1996 | dout("__take_cap_refs %p wb %d -> %d (?)\n", |
1997 | &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); | 1997 | &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); |
1998 | } | 1998 | } |
1999 | } | 1999 | } |
2000 | 2000 | ||
@@ -2169,12 +2169,12 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) | |||
2169 | if (--ci->i_rdcache_ref == 0) | 2169 | if (--ci->i_rdcache_ref == 0) |
2170 | last++; | 2170 | last++; |
2171 | if (had & CEPH_CAP_FILE_BUFFER) { | 2171 | if (had & CEPH_CAP_FILE_BUFFER) { |
2172 | if (--ci->i_wrbuffer_ref == 0) { | 2172 | if (--ci->i_wb_ref == 0) { |
2173 | last++; | 2173 | last++; |
2174 | put++; | 2174 | put++; |
2175 | } | 2175 | } |
2176 | dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", | 2176 | dout("put_cap_refs %p wb %d -> %d (?)\n", |
2177 | inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); | 2177 | inode, ci->i_wb_ref+1, ci->i_wb_ref); |
2178 | } | 2178 | } |
2179 | if (had & CEPH_CAP_FILE_WR) | 2179 | if (had & CEPH_CAP_FILE_WR) |
2180 | if (--ci->i_wr_ref == 0) { | 2180 | if (--ci->i_wr_ref == 0) { |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 03d6dafda61f..70b6a4839c38 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -355,6 +355,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) | |||
355 | ci->i_rd_ref = 0; | 355 | ci->i_rd_ref = 0; |
356 | ci->i_rdcache_ref = 0; | 356 | ci->i_rdcache_ref = 0; |
357 | ci->i_wr_ref = 0; | 357 | ci->i_wr_ref = 0; |
358 | ci->i_wb_ref = 0; | ||
358 | ci->i_wrbuffer_ref = 0; | 359 | ci->i_wrbuffer_ref = 0; |
359 | ci->i_wrbuffer_ref_head = 0; | 360 | ci->i_wrbuffer_ref_head = 0; |
360 | ci->i_shared_gen = 0; | 361 | ci->i_shared_gen = 0; |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f60b07b0feb0..d0fae4ce9ba5 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -3304,8 +3304,8 @@ static void con_put(struct ceph_connection *con) | |||
3304 | { | 3304 | { |
3305 | struct ceph_mds_session *s = con->private; | 3305 | struct ceph_mds_session *s = con->private; |
3306 | 3306 | ||
3307 | dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); | ||
3307 | ceph_put_mds_session(s); | 3308 | ceph_put_mds_session(s); |
3308 | dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref)); | ||
3309 | } | 3309 | } |
3310 | 3310 | ||
3311 | /* | 3311 | /* |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index e86ec1155f8f..24067d68a554 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -206,7 +206,7 @@ void ceph_put_snap_realm(struct ceph_mds_client *mdsc, | |||
206 | up_write(&mdsc->snap_rwsem); | 206 | up_write(&mdsc->snap_rwsem); |
207 | } else { | 207 | } else { |
208 | spin_lock(&mdsc->snap_empty_lock); | 208 | spin_lock(&mdsc->snap_empty_lock); |
209 | list_add(&mdsc->snap_empty, &realm->empty_item); | 209 | list_add(&realm->empty_item, &mdsc->snap_empty); |
210 | spin_unlock(&mdsc->snap_empty_lock); | 210 | spin_unlock(&mdsc->snap_empty_lock); |
211 | } | 211 | } |
212 | } | 212 | } |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index b1f1b8bb1271..f5cabefa98dc 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -293,7 +293,7 @@ struct ceph_inode_info { | |||
293 | 293 | ||
294 | /* held references to caps */ | 294 | /* held references to caps */ |
295 | int i_pin_ref; | 295 | int i_pin_ref; |
296 | int i_rd_ref, i_rdcache_ref, i_wr_ref; | 296 | int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref; |
297 | int i_wrbuffer_ref, i_wrbuffer_ref_head; | 297 | int i_wrbuffer_ref, i_wrbuffer_ref_head; |
298 | u32 i_shared_gen; /* increment each time we get FILE_SHARED */ | 298 | u32 i_shared_gen; /* increment each time we get FILE_SHARED */ |
299 | u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ | 299 | u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 23d43cde4306..1b2e180b018d 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -277,6 +277,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, | |||
277 | 277 | ||
278 | for (i = 0, j = 0; i < srclen; j++) { | 278 | for (i = 0, j = 0; i < srclen; j++) { |
279 | src_char = source[i]; | 279 | src_char = source[i]; |
280 | charlen = 1; | ||
280 | switch (src_char) { | 281 | switch (src_char) { |
281 | case 0: | 282 | case 0: |
282 | put_unaligned(0, &target[j]); | 283 | put_unaligned(0, &target[j]); |
@@ -316,16 +317,13 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, | |||
316 | dst_char = cpu_to_le16(0x003f); | 317 | dst_char = cpu_to_le16(0x003f); |
317 | charlen = 1; | 318 | charlen = 1; |
318 | } | 319 | } |
319 | /* | ||
320 | * character may take more than one byte in the source | ||
321 | * string, but will take exactly two bytes in the | ||
322 | * target string | ||
323 | */ | ||
324 | i += charlen; | ||
325 | continue; | ||
326 | } | 320 | } |
321 | /* | ||
322 | * character may take more than one byte in the source string, | ||
323 | * but will take exactly two bytes in the target string | ||
324 | */ | ||
325 | i += charlen; | ||
327 | put_unaligned(dst_char, &target[j]); | 326 | put_unaligned(dst_char, &target[j]); |
328 | i++; /* move to next char in source string */ | ||
329 | } | 327 | } |
330 | 328 | ||
331 | ctoUCS_out: | 329 | ctoUCS_out: |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 05f1dcf7d79a..277262a8e82f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -2673,6 +2673,11 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon, | |||
2673 | 0 /* not legacy */, cifs_sb->local_nls, | 2673 | 0 /* not legacy */, cifs_sb->local_nls, |
2674 | cifs_sb->mnt_cifs_flags & | 2674 | cifs_sb->mnt_cifs_flags & |
2675 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 2675 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
2676 | |||
2677 | if (rc == -EOPNOTSUPP || rc == -EINVAL) | ||
2678 | rc = SMBQueryInformation(xid, tcon, full_path, pfile_info, | ||
2679 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & | ||
2680 | CIFS_MOUNT_MAP_SPECIAL_CHR); | ||
2676 | kfree(pfile_info); | 2681 | kfree(pfile_info); |
2677 | return rc; | 2682 | return rc; |
2678 | } | 2683 | } |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 3313dd19f543..9a37a9b6de3a 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -53,11 +53,14 @@ DEFINE_SPINLOCK(configfs_dirent_lock); | |||
53 | static void configfs_d_iput(struct dentry * dentry, | 53 | static void configfs_d_iput(struct dentry * dentry, |
54 | struct inode * inode) | 54 | struct inode * inode) |
55 | { | 55 | { |
56 | struct configfs_dirent * sd = dentry->d_fsdata; | 56 | struct configfs_dirent *sd = dentry->d_fsdata; |
57 | 57 | ||
58 | if (sd) { | 58 | if (sd) { |
59 | BUG_ON(sd->s_dentry != dentry); | 59 | BUG_ON(sd->s_dentry != dentry); |
60 | /* Coordinate with configfs_readdir */ | ||
61 | spin_lock(&configfs_dirent_lock); | ||
60 | sd->s_dentry = NULL; | 62 | sd->s_dentry = NULL; |
63 | spin_unlock(&configfs_dirent_lock); | ||
61 | configfs_put(sd); | 64 | configfs_put(sd); |
62 | } | 65 | } |
63 | iput(inode); | 66 | iput(inode); |
@@ -689,7 +692,8 @@ static int create_default_group(struct config_group *parent_group, | |||
689 | sd = child->d_fsdata; | 692 | sd = child->d_fsdata; |
690 | sd->s_type |= CONFIGFS_USET_DEFAULT; | 693 | sd->s_type |= CONFIGFS_USET_DEFAULT; |
691 | } else { | 694 | } else { |
692 | d_delete(child); | 695 | BUG_ON(child->d_inode); |
696 | d_drop(child); | ||
693 | dput(child); | 697 | dput(child); |
694 | } | 698 | } |
695 | } | 699 | } |
@@ -1545,7 +1549,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
1545 | struct configfs_dirent * parent_sd = dentry->d_fsdata; | 1549 | struct configfs_dirent * parent_sd = dentry->d_fsdata; |
1546 | struct configfs_dirent *cursor = filp->private_data; | 1550 | struct configfs_dirent *cursor = filp->private_data; |
1547 | struct list_head *p, *q = &cursor->s_sibling; | 1551 | struct list_head *p, *q = &cursor->s_sibling; |
1548 | ino_t ino; | 1552 | ino_t ino = 0; |
1549 | int i = filp->f_pos; | 1553 | int i = filp->f_pos; |
1550 | 1554 | ||
1551 | switch (i) { | 1555 | switch (i) { |
@@ -1573,6 +1577,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
1573 | struct configfs_dirent *next; | 1577 | struct configfs_dirent *next; |
1574 | const char * name; | 1578 | const char * name; |
1575 | int len; | 1579 | int len; |
1580 | struct inode *inode = NULL; | ||
1576 | 1581 | ||
1577 | next = list_entry(p, struct configfs_dirent, | 1582 | next = list_entry(p, struct configfs_dirent, |
1578 | s_sibling); | 1583 | s_sibling); |
@@ -1581,9 +1586,28 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
1581 | 1586 | ||
1582 | name = configfs_get_name(next); | 1587 | name = configfs_get_name(next); |
1583 | len = strlen(name); | 1588 | len = strlen(name); |
1584 | if (next->s_dentry) | 1589 | |
1585 | ino = next->s_dentry->d_inode->i_ino; | 1590 | /* |
1586 | else | 1591 | * We'll have a dentry and an inode for |
1592 | * PINNED items and for open attribute | ||
1593 | * files. We lock here to prevent a race | ||
1594 | * with configfs_d_iput() clearing | ||
1595 | * s_dentry before calling iput(). | ||
1596 | * | ||
1597 | * Why do we go to the trouble? If | ||
1598 | * someone has an attribute file open, | ||
1599 | * the inode number should match until | ||
1600 | * they close it. Beyond that, we don't | ||
1601 | * care. | ||
1602 | */ | ||
1603 | spin_lock(&configfs_dirent_lock); | ||
1604 | dentry = next->s_dentry; | ||
1605 | if (dentry) | ||
1606 | inode = dentry->d_inode; | ||
1607 | if (inode) | ||
1608 | ino = inode->i_ino; | ||
1609 | spin_unlock(&configfs_dirent_lock); | ||
1610 | if (!inode) | ||
1587 | ino = iunique(configfs_sb, 2); | 1611 | ino = iunique(configfs_sb, 2); |
1588 | 1612 | ||
1589 | if (filldir(dirent, name, len, filp->f_pos, ino, | 1613 | if (filldir(dirent, name, len, filp->f_pos, ino, |
@@ -1683,7 +1707,8 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) | |||
1683 | err = configfs_attach_group(sd->s_element, &group->cg_item, | 1707 | err = configfs_attach_group(sd->s_element, &group->cg_item, |
1684 | dentry); | 1708 | dentry); |
1685 | if (err) { | 1709 | if (err) { |
1686 | d_delete(dentry); | 1710 | BUG_ON(dentry->d_inode); |
1711 | d_drop(dentry); | ||
1687 | dput(dentry); | 1712 | dput(dentry); |
1688 | } else { | 1713 | } else { |
1689 | spin_lock(&configfs_dirent_lock); | 1714 | spin_lock(&configfs_dirent_lock); |
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 89d394d8fe24..90f76575c056 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c | |||
@@ -428,26 +428,17 @@ static ssize_t write_file_bool(struct file *file, const char __user *user_buf, | |||
428 | size_t count, loff_t *ppos) | 428 | size_t count, loff_t *ppos) |
429 | { | 429 | { |
430 | char buf[32]; | 430 | char buf[32]; |
431 | int buf_size; | 431 | size_t buf_size; |
432 | bool bv; | ||
432 | u32 *val = file->private_data; | 433 | u32 *val = file->private_data; |
433 | 434 | ||
434 | buf_size = min(count, (sizeof(buf)-1)); | 435 | buf_size = min(count, (sizeof(buf)-1)); |
435 | if (copy_from_user(buf, user_buf, buf_size)) | 436 | if (copy_from_user(buf, user_buf, buf_size)) |
436 | return -EFAULT; | 437 | return -EFAULT; |
437 | 438 | ||
438 | switch (buf[0]) { | 439 | if (strtobool(buf, &bv) == 0) |
439 | case 'y': | 440 | *val = bv; |
440 | case 'Y': | 441 | |
441 | case '1': | ||
442 | *val = 1; | ||
443 | break; | ||
444 | case 'n': | ||
445 | case 'N': | ||
446 | case '0': | ||
447 | *val = 0; | ||
448 | break; | ||
449 | } | ||
450 | |||
451 | return count; | 442 | return count; |
452 | } | 443 | } |
453 | 444 | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c6ba49bd95b3..b32eb29a4e6f 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) | |||
174 | if (!inode) | 174 | if (!inode) |
175 | return 0; | 175 | return 0; |
176 | 176 | ||
177 | if (nd->flags & LOOKUP_RCU) | 177 | if (nd && (nd->flags & LOOKUP_RCU)) |
178 | return -ECHILD; | 178 | return -ECHILD; |
179 | 179 | ||
180 | fc = get_fuse_conn(inode); | 180 | fc = get_fuse_conn(inode); |
diff --git a/fs/namei.c b/fs/namei.c index 54fc993e3027..e3c4f112ebf7 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname); | |||
179 | static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, | 179 | static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, |
180 | int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) | 180 | int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) |
181 | { | 181 | { |
182 | umode_t mode = inode->i_mode; | 182 | unsigned int mode = inode->i_mode; |
183 | 183 | ||
184 | mask &= MAY_READ | MAY_WRITE | MAY_EXEC; | 184 | mask &= MAY_READ | MAY_WRITE | MAY_EXEC; |
185 | 185 | ||
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 6f8192f4cfc7..be79dc9f386d 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c | |||
@@ -117,6 +117,8 @@ static int filelayout_async_handle_error(struct rpc_task *task, | |||
117 | case -EKEYEXPIRED: | 117 | case -EKEYEXPIRED: |
118 | rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); | 118 | rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); |
119 | break; | 119 | break; |
120 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
121 | break; | ||
120 | default: | 122 | default: |
121 | dprintk("%s DS error. Retry through MDS %d\n", __func__, | 123 | dprintk("%s DS error. Retry through MDS %d\n", __func__, |
122 | task->tk_status); | 124 | task->tk_status); |
@@ -416,7 +418,8 @@ static int | |||
416 | filelayout_check_layout(struct pnfs_layout_hdr *lo, | 418 | filelayout_check_layout(struct pnfs_layout_hdr *lo, |
417 | struct nfs4_filelayout_segment *fl, | 419 | struct nfs4_filelayout_segment *fl, |
418 | struct nfs4_layoutget_res *lgr, | 420 | struct nfs4_layoutget_res *lgr, |
419 | struct nfs4_deviceid *id) | 421 | struct nfs4_deviceid *id, |
422 | gfp_t gfp_flags) | ||
420 | { | 423 | { |
421 | struct nfs4_file_layout_dsaddr *dsaddr; | 424 | struct nfs4_file_layout_dsaddr *dsaddr; |
422 | int status = -EINVAL; | 425 | int status = -EINVAL; |
@@ -439,7 +442,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo, | |||
439 | /* find and reference the deviceid */ | 442 | /* find and reference the deviceid */ |
440 | dsaddr = nfs4_fl_find_get_deviceid(id); | 443 | dsaddr = nfs4_fl_find_get_deviceid(id); |
441 | if (dsaddr == NULL) { | 444 | if (dsaddr == NULL) { |
442 | dsaddr = get_device_info(lo->plh_inode, id); | 445 | dsaddr = get_device_info(lo->plh_inode, id, gfp_flags); |
443 | if (dsaddr == NULL) | 446 | if (dsaddr == NULL) |
444 | goto out; | 447 | goto out; |
445 | } | 448 | } |
@@ -500,7 +503,8 @@ static int | |||
500 | filelayout_decode_layout(struct pnfs_layout_hdr *flo, | 503 | filelayout_decode_layout(struct pnfs_layout_hdr *flo, |
501 | struct nfs4_filelayout_segment *fl, | 504 | struct nfs4_filelayout_segment *fl, |
502 | struct nfs4_layoutget_res *lgr, | 505 | struct nfs4_layoutget_res *lgr, |
503 | struct nfs4_deviceid *id) | 506 | struct nfs4_deviceid *id, |
507 | gfp_t gfp_flags) | ||
504 | { | 508 | { |
505 | struct xdr_stream stream; | 509 | struct xdr_stream stream; |
506 | struct xdr_buf buf = { | 510 | struct xdr_buf buf = { |
@@ -516,7 +520,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, | |||
516 | 520 | ||
517 | dprintk("%s: set_layout_map Begin\n", __func__); | 521 | dprintk("%s: set_layout_map Begin\n", __func__); |
518 | 522 | ||
519 | scratch = alloc_page(GFP_KERNEL); | 523 | scratch = alloc_page(gfp_flags); |
520 | if (!scratch) | 524 | if (!scratch) |
521 | return -ENOMEM; | 525 | return -ENOMEM; |
522 | 526 | ||
@@ -554,13 +558,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, | |||
554 | goto out_err; | 558 | goto out_err; |
555 | 559 | ||
556 | fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), | 560 | fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), |
557 | GFP_KERNEL); | 561 | gfp_flags); |
558 | if (!fl->fh_array) | 562 | if (!fl->fh_array) |
559 | goto out_err; | 563 | goto out_err; |
560 | 564 | ||
561 | for (i = 0; i < fl->num_fh; i++) { | 565 | for (i = 0; i < fl->num_fh; i++) { |
562 | /* Do we want to use a mempool here? */ | 566 | /* Do we want to use a mempool here? */ |
563 | fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); | 567 | fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); |
564 | if (!fl->fh_array[i]) | 568 | if (!fl->fh_array[i]) |
565 | goto out_err_free; | 569 | goto out_err_free; |
566 | 570 | ||
@@ -605,19 +609,20 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) | |||
605 | 609 | ||
606 | static struct pnfs_layout_segment * | 610 | static struct pnfs_layout_segment * |
607 | filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, | 611 | filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, |
608 | struct nfs4_layoutget_res *lgr) | 612 | struct nfs4_layoutget_res *lgr, |
613 | gfp_t gfp_flags) | ||
609 | { | 614 | { |
610 | struct nfs4_filelayout_segment *fl; | 615 | struct nfs4_filelayout_segment *fl; |
611 | int rc; | 616 | int rc; |
612 | struct nfs4_deviceid id; | 617 | struct nfs4_deviceid id; |
613 | 618 | ||
614 | dprintk("--> %s\n", __func__); | 619 | dprintk("--> %s\n", __func__); |
615 | fl = kzalloc(sizeof(*fl), GFP_KERNEL); | 620 | fl = kzalloc(sizeof(*fl), gfp_flags); |
616 | if (!fl) | 621 | if (!fl) |
617 | return NULL; | 622 | return NULL; |
618 | 623 | ||
619 | rc = filelayout_decode_layout(layoutid, fl, lgr, &id); | 624 | rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); |
620 | if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) { | 625 | if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { |
621 | _filelayout_free_lseg(fl); | 626 | _filelayout_free_lseg(fl); |
622 | return NULL; | 627 | return NULL; |
623 | } | 628 | } |
@@ -633,7 +638,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, | |||
633 | int size = (fl->stripe_type == STRIPE_SPARSE) ? | 638 | int size = (fl->stripe_type == STRIPE_SPARSE) ? |
634 | fl->dsaddr->ds_num : fl->dsaddr->stripe_count; | 639 | fl->dsaddr->ds_num : fl->dsaddr->stripe_count; |
635 | 640 | ||
636 | fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL); | 641 | fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags); |
637 | if (!fl->commit_buckets) { | 642 | if (!fl->commit_buckets) { |
638 | filelayout_free_lseg(&fl->generic_hdr); | 643 | filelayout_free_lseg(&fl->generic_hdr); |
639 | return NULL; | 644 | return NULL; |
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h index 7c44579f5832..2b461d77b43a 100644 --- a/fs/nfs/nfs4filelayout.h +++ b/fs/nfs/nfs4filelayout.h | |||
@@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr * | |||
104 | nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); | 104 | nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); |
105 | extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); | 105 | extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); |
106 | struct nfs4_file_layout_dsaddr * | 106 | struct nfs4_file_layout_dsaddr * |
107 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id); | 107 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags); |
108 | 108 | ||
109 | #endif /* FS_NFS_NFS4FILELAYOUT_H */ | 109 | #endif /* FS_NFS_NFS4FILELAYOUT_H */ |
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index de5350f2b249..db07c7af1395 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c | |||
@@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) | |||
225 | } | 225 | } |
226 | 226 | ||
227 | static struct nfs4_pnfs_ds * | 227 | static struct nfs4_pnfs_ds * |
228 | nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port) | 228 | nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) |
229 | { | 229 | { |
230 | struct nfs4_pnfs_ds *tmp_ds, *ds; | 230 | struct nfs4_pnfs_ds *tmp_ds, *ds; |
231 | 231 | ||
232 | ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL); | 232 | ds = kzalloc(sizeof(*tmp_ds), gfp_flags); |
233 | if (!ds) | 233 | if (!ds) |
234 | goto out; | 234 | goto out; |
235 | 235 | ||
@@ -261,7 +261,7 @@ out: | |||
261 | * Currently only support ipv4, and one multi-path address. | 261 | * Currently only support ipv4, and one multi-path address. |
262 | */ | 262 | */ |
263 | static struct nfs4_pnfs_ds * | 263 | static struct nfs4_pnfs_ds * |
264 | decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) | 264 | decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags) |
265 | { | 265 | { |
266 | struct nfs4_pnfs_ds *ds = NULL; | 266 | struct nfs4_pnfs_ds *ds = NULL; |
267 | char *buf; | 267 | char *buf; |
@@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) | |||
303 | rlen); | 303 | rlen); |
304 | goto out_err; | 304 | goto out_err; |
305 | } | 305 | } |
306 | buf = kmalloc(rlen + 1, GFP_KERNEL); | 306 | buf = kmalloc(rlen + 1, gfp_flags); |
307 | if (!buf) { | 307 | if (!buf) { |
308 | dprintk("%s: Not enough memory\n", __func__); | 308 | dprintk("%s: Not enough memory\n", __func__); |
309 | goto out_err; | 309 | goto out_err; |
@@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) | |||
333 | sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); | 333 | sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); |
334 | port = htons((tmp[0] << 8) | (tmp[1])); | 334 | port = htons((tmp[0] << 8) | (tmp[1])); |
335 | 335 | ||
336 | ds = nfs4_pnfs_ds_add(inode, ip_addr, port); | 336 | ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags); |
337 | dprintk("%s: Decoded address and port %s\n", __func__, buf); | 337 | dprintk("%s: Decoded address and port %s\n", __func__, buf); |
338 | out_free: | 338 | out_free: |
339 | kfree(buf); | 339 | kfree(buf); |
@@ -343,7 +343,7 @@ out_err: | |||
343 | 343 | ||
344 | /* Decode opaque device data and return the result */ | 344 | /* Decode opaque device data and return the result */ |
345 | static struct nfs4_file_layout_dsaddr* | 345 | static struct nfs4_file_layout_dsaddr* |
346 | decode_device(struct inode *ino, struct pnfs_device *pdev) | 346 | decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) |
347 | { | 347 | { |
348 | int i; | 348 | int i; |
349 | u32 cnt, num; | 349 | u32 cnt, num; |
@@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
362 | struct page *scratch; | 362 | struct page *scratch; |
363 | 363 | ||
364 | /* set up xdr stream */ | 364 | /* set up xdr stream */ |
365 | scratch = alloc_page(GFP_KERNEL); | 365 | scratch = alloc_page(gfp_flags); |
366 | if (!scratch) | 366 | if (!scratch) |
367 | goto out_err; | 367 | goto out_err; |
368 | 368 | ||
@@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | /* read stripe indices */ | 386 | /* read stripe indices */ |
387 | stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL); | 387 | stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); |
388 | if (!stripe_indices) | 388 | if (!stripe_indices) |
389 | goto out_err_free_scratch; | 389 | goto out_err_free_scratch; |
390 | 390 | ||
@@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
423 | 423 | ||
424 | dsaddr = kzalloc(sizeof(*dsaddr) + | 424 | dsaddr = kzalloc(sizeof(*dsaddr) + |
425 | (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), | 425 | (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), |
426 | GFP_KERNEL); | 426 | gfp_flags); |
427 | if (!dsaddr) | 427 | if (!dsaddr) |
428 | goto out_err_free_stripe_indices; | 428 | goto out_err_free_stripe_indices; |
429 | 429 | ||
@@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
452 | for (j = 0; j < mp_count; j++) { | 452 | for (j = 0; j < mp_count; j++) { |
453 | if (j == 0) { | 453 | if (j == 0) { |
454 | dsaddr->ds_list[i] = decode_and_add_ds(&stream, | 454 | dsaddr->ds_list[i] = decode_and_add_ds(&stream, |
455 | ino); | 455 | ino, gfp_flags); |
456 | if (dsaddr->ds_list[i] == NULL) | 456 | if (dsaddr->ds_list[i] == NULL) |
457 | goto out_err_free_deviceid; | 457 | goto out_err_free_deviceid; |
458 | } else { | 458 | } else { |
@@ -503,12 +503,12 @@ out_err: | |||
503 | * available devices. | 503 | * available devices. |
504 | */ | 504 | */ |
505 | static struct nfs4_file_layout_dsaddr * | 505 | static struct nfs4_file_layout_dsaddr * |
506 | decode_and_add_device(struct inode *inode, struct pnfs_device *dev) | 506 | decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) |
507 | { | 507 | { |
508 | struct nfs4_file_layout_dsaddr *d, *new; | 508 | struct nfs4_file_layout_dsaddr *d, *new; |
509 | long hash; | 509 | long hash; |
510 | 510 | ||
511 | new = decode_device(inode, dev); | 511 | new = decode_device(inode, dev, gfp_flags); |
512 | if (!new) { | 512 | if (!new) { |
513 | printk(KERN_WARNING "%s: Could not decode or add device\n", | 513 | printk(KERN_WARNING "%s: Could not decode or add device\n", |
514 | __func__); | 514 | __func__); |
@@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev) | |||
537 | * of available devices, and return it. | 537 | * of available devices, and return it. |
538 | */ | 538 | */ |
539 | struct nfs4_file_layout_dsaddr * | 539 | struct nfs4_file_layout_dsaddr * |
540 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) | 540 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) |
541 | { | 541 | { |
542 | struct pnfs_device *pdev = NULL; | 542 | struct pnfs_device *pdev = NULL; |
543 | u32 max_resp_sz; | 543 | u32 max_resp_sz; |
@@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) | |||
556 | dprintk("%s inode %p max_resp_sz %u max_pages %d\n", | 556 | dprintk("%s inode %p max_resp_sz %u max_pages %d\n", |
557 | __func__, inode, max_resp_sz, max_pages); | 557 | __func__, inode, max_resp_sz, max_pages); |
558 | 558 | ||
559 | pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL); | 559 | pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); |
560 | if (pdev == NULL) | 560 | if (pdev == NULL) |
561 | return NULL; | 561 | return NULL; |
562 | 562 | ||
563 | pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); | 563 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); |
564 | if (pages == NULL) { | 564 | if (pages == NULL) { |
565 | kfree(pdev); | 565 | kfree(pdev); |
566 | return NULL; | 566 | return NULL; |
567 | } | 567 | } |
568 | for (i = 0; i < max_pages; i++) { | 568 | for (i = 0; i < max_pages; i++) { |
569 | pages[i] = alloc_page(GFP_KERNEL); | 569 | pages[i] = alloc_page(gfp_flags); |
570 | if (!pages[i]) | 570 | if (!pages[i]) |
571 | goto out_free; | 571 | goto out_free; |
572 | } | 572 | } |
@@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) | |||
587 | * Found new device, need to decode it and then add it to the | 587 | * Found new device, need to decode it and then add it to the |
588 | * list of known devices for this mountpoint. | 588 | * list of known devices for this mountpoint. |
589 | */ | 589 | */ |
590 | dsaddr = decode_and_add_device(inode, pdev); | 590 | dsaddr = decode_and_add_device(inode, pdev, gfp_flags); |
591 | out_free: | 591 | out_free: |
592 | for (i = 0; i < max_pages; i++) | 592 | for (i = 0; i < max_pages; i++) |
593 | __free_page(pages[i]); | 593 | __free_page(pages[i]); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 69c0f3c5ee7a..cf1b339c3937 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -300,6 +300,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc | |||
300 | ret = nfs4_delay(server->client, &exception->timeout); | 300 | ret = nfs4_delay(server->client, &exception->timeout); |
301 | if (ret != 0) | 301 | if (ret != 0) |
302 | break; | 302 | break; |
303 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
303 | case -NFS4ERR_OLD_STATEID: | 304 | case -NFS4ERR_OLD_STATEID: |
304 | exception->retry = 1; | 305 | exception->retry = 1; |
305 | break; | 306 | break; |
@@ -3695,6 +3696,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3695 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 3696 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
3696 | task->tk_status = 0; | 3697 | task->tk_status = 0; |
3697 | return -EAGAIN; | 3698 | return -EAGAIN; |
3699 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
3698 | case -NFS4ERR_OLD_STATEID: | 3700 | case -NFS4ERR_OLD_STATEID: |
3699 | task->tk_status = 0; | 3701 | task->tk_status = 0; |
3700 | return -EAGAIN; | 3702 | return -EAGAIN; |
@@ -4844,6 +4846,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) | |||
4844 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); | 4846 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); |
4845 | rpc_delay(task, NFS4_POLL_RETRY_MIN); | 4847 | rpc_delay(task, NFS4_POLL_RETRY_MIN); |
4846 | task->tk_status = 0; | 4848 | task->tk_status = 0; |
4849 | /* fall through */ | ||
4850 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
4847 | nfs_restart_rpc(task, data->clp); | 4851 | nfs_restart_rpc(task, data->clp); |
4848 | return; | 4852 | return; |
4849 | } | 4853 | } |
@@ -5479,6 +5483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf | |||
5479 | break; | 5483 | break; |
5480 | case -NFS4ERR_DELAY: | 5484 | case -NFS4ERR_DELAY: |
5481 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5485 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5486 | /* fall through */ | ||
5487 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
5482 | return -EAGAIN; | 5488 | return -EAGAIN; |
5483 | default: | 5489 | default: |
5484 | nfs4_schedule_lease_recovery(clp); | 5490 | nfs4_schedule_lease_recovery(clp); |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index ff681ab65d31..f57f5281a520 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -383,6 +383,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) | |||
383 | plh_layouts); | 383 | plh_layouts); |
384 | dprintk("%s freeing layout for inode %lu\n", __func__, | 384 | dprintk("%s freeing layout for inode %lu\n", __func__, |
385 | lo->plh_inode->i_ino); | 385 | lo->plh_inode->i_ino); |
386 | list_del_init(&lo->plh_layouts); | ||
386 | pnfs_destroy_layout(NFS_I(lo->plh_inode)); | 387 | pnfs_destroy_layout(NFS_I(lo->plh_inode)); |
387 | } | 388 | } |
388 | } | 389 | } |
@@ -466,7 +467,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, | |||
466 | static struct pnfs_layout_segment * | 467 | static struct pnfs_layout_segment * |
467 | send_layoutget(struct pnfs_layout_hdr *lo, | 468 | send_layoutget(struct pnfs_layout_hdr *lo, |
468 | struct nfs_open_context *ctx, | 469 | struct nfs_open_context *ctx, |
469 | u32 iomode) | 470 | u32 iomode, |
471 | gfp_t gfp_flags) | ||
470 | { | 472 | { |
471 | struct inode *ino = lo->plh_inode; | 473 | struct inode *ino = lo->plh_inode; |
472 | struct nfs_server *server = NFS_SERVER(ino); | 474 | struct nfs_server *server = NFS_SERVER(ino); |
@@ -479,7 +481,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
479 | dprintk("--> %s\n", __func__); | 481 | dprintk("--> %s\n", __func__); |
480 | 482 | ||
481 | BUG_ON(ctx == NULL); | 483 | BUG_ON(ctx == NULL); |
482 | lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); | 484 | lgp = kzalloc(sizeof(*lgp), gfp_flags); |
483 | if (lgp == NULL) | 485 | if (lgp == NULL) |
484 | return NULL; | 486 | return NULL; |
485 | 487 | ||
@@ -487,12 +489,12 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
487 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; | 489 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; |
488 | max_pages = max_resp_sz >> PAGE_SHIFT; | 490 | max_pages = max_resp_sz >> PAGE_SHIFT; |
489 | 491 | ||
490 | pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); | 492 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); |
491 | if (!pages) | 493 | if (!pages) |
492 | goto out_err_free; | 494 | goto out_err_free; |
493 | 495 | ||
494 | for (i = 0; i < max_pages; i++) { | 496 | for (i = 0; i < max_pages; i++) { |
495 | pages[i] = alloc_page(GFP_KERNEL); | 497 | pages[i] = alloc_page(gfp_flags); |
496 | if (!pages[i]) | 498 | if (!pages[i]) |
497 | goto out_err_free; | 499 | goto out_err_free; |
498 | } | 500 | } |
@@ -508,6 +510,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
508 | lgp->args.layout.pages = pages; | 510 | lgp->args.layout.pages = pages; |
509 | lgp->args.layout.pglen = max_pages * PAGE_SIZE; | 511 | lgp->args.layout.pglen = max_pages * PAGE_SIZE; |
510 | lgp->lsegpp = &lseg; | 512 | lgp->lsegpp = &lseg; |
513 | lgp->gfp_flags = gfp_flags; | ||
511 | 514 | ||
512 | /* Synchronously retrieve layout information from server and | 515 | /* Synchronously retrieve layout information from server and |
513 | * store in lseg. | 516 | * store in lseg. |
@@ -665,11 +668,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, | |||
665 | } | 668 | } |
666 | 669 | ||
667 | static struct pnfs_layout_hdr * | 670 | static struct pnfs_layout_hdr * |
668 | alloc_init_layout_hdr(struct inode *ino) | 671 | alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags) |
669 | { | 672 | { |
670 | struct pnfs_layout_hdr *lo; | 673 | struct pnfs_layout_hdr *lo; |
671 | 674 | ||
672 | lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); | 675 | lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags); |
673 | if (!lo) | 676 | if (!lo) |
674 | return NULL; | 677 | return NULL; |
675 | atomic_set(&lo->plh_refcount, 1); | 678 | atomic_set(&lo->plh_refcount, 1); |
@@ -681,7 +684,7 @@ alloc_init_layout_hdr(struct inode *ino) | |||
681 | } | 684 | } |
682 | 685 | ||
683 | static struct pnfs_layout_hdr * | 686 | static struct pnfs_layout_hdr * |
684 | pnfs_find_alloc_layout(struct inode *ino) | 687 | pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags) |
685 | { | 688 | { |
686 | struct nfs_inode *nfsi = NFS_I(ino); | 689 | struct nfs_inode *nfsi = NFS_I(ino); |
687 | struct pnfs_layout_hdr *new = NULL; | 690 | struct pnfs_layout_hdr *new = NULL; |
@@ -696,7 +699,7 @@ pnfs_find_alloc_layout(struct inode *ino) | |||
696 | return nfsi->layout; | 699 | return nfsi->layout; |
697 | } | 700 | } |
698 | spin_unlock(&ino->i_lock); | 701 | spin_unlock(&ino->i_lock); |
699 | new = alloc_init_layout_hdr(ino); | 702 | new = alloc_init_layout_hdr(ino, gfp_flags); |
700 | spin_lock(&ino->i_lock); | 703 | spin_lock(&ino->i_lock); |
701 | 704 | ||
702 | if (likely(nfsi->layout == NULL)) /* Won the race? */ | 705 | if (likely(nfsi->layout == NULL)) /* Won the race? */ |
@@ -756,7 +759,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode) | |||
756 | struct pnfs_layout_segment * | 759 | struct pnfs_layout_segment * |
757 | pnfs_update_layout(struct inode *ino, | 760 | pnfs_update_layout(struct inode *ino, |
758 | struct nfs_open_context *ctx, | 761 | struct nfs_open_context *ctx, |
759 | enum pnfs_iomode iomode) | 762 | enum pnfs_iomode iomode, |
763 | gfp_t gfp_flags) | ||
760 | { | 764 | { |
761 | struct nfs_inode *nfsi = NFS_I(ino); | 765 | struct nfs_inode *nfsi = NFS_I(ino); |
762 | struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; | 766 | struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; |
@@ -767,7 +771,7 @@ pnfs_update_layout(struct inode *ino, | |||
767 | if (!pnfs_enabled_sb(NFS_SERVER(ino))) | 771 | if (!pnfs_enabled_sb(NFS_SERVER(ino))) |
768 | return NULL; | 772 | return NULL; |
769 | spin_lock(&ino->i_lock); | 773 | spin_lock(&ino->i_lock); |
770 | lo = pnfs_find_alloc_layout(ino); | 774 | lo = pnfs_find_alloc_layout(ino, gfp_flags); |
771 | if (lo == NULL) { | 775 | if (lo == NULL) { |
772 | dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); | 776 | dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); |
773 | goto out_unlock; | 777 | goto out_unlock; |
@@ -807,7 +811,7 @@ pnfs_update_layout(struct inode *ino, | |||
807 | spin_unlock(&clp->cl_lock); | 811 | spin_unlock(&clp->cl_lock); |
808 | } | 812 | } |
809 | 813 | ||
810 | lseg = send_layoutget(lo, ctx, iomode); | 814 | lseg = send_layoutget(lo, ctx, iomode, gfp_flags); |
811 | if (!lseg && first) { | 815 | if (!lseg && first) { |
812 | spin_lock(&clp->cl_lock); | 816 | spin_lock(&clp->cl_lock); |
813 | list_del_init(&lo->plh_layouts); | 817 | list_del_init(&lo->plh_layouts); |
@@ -846,7 +850,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) | |||
846 | goto out; | 850 | goto out; |
847 | } | 851 | } |
848 | /* Inject layout blob into I/O device driver */ | 852 | /* Inject layout blob into I/O device driver */ |
849 | lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); | 853 | lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); |
850 | if (!lseg || IS_ERR(lseg)) { | 854 | if (!lseg || IS_ERR(lseg)) { |
851 | if (!lseg) | 855 | if (!lseg) |
852 | status = -ENOMEM; | 856 | status = -ENOMEM; |
@@ -899,7 +903,8 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio, | |||
899 | /* This is first coelesce call for a series of nfs_pages */ | 903 | /* This is first coelesce call for a series of nfs_pages */ |
900 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | 904 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, |
901 | prev->wb_context, | 905 | prev->wb_context, |
902 | IOMODE_READ); | 906 | IOMODE_READ, |
907 | GFP_KERNEL); | ||
903 | } | 908 | } |
904 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); | 909 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); |
905 | } | 910 | } |
@@ -921,7 +926,8 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio, | |||
921 | /* This is first coelesce call for a series of nfs_pages */ | 926 | /* This is first coelesce call for a series of nfs_pages */ |
922 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | 927 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, |
923 | prev->wb_context, | 928 | prev->wb_context, |
924 | IOMODE_RW); | 929 | IOMODE_RW, |
930 | GFP_NOFS); | ||
925 | } | 931 | } |
926 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); | 932 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); |
927 | } | 933 | } |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index bc4827202e7a..0c015bad9e7a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type { | |||
70 | const u32 id; | 70 | const u32 id; |
71 | const char *name; | 71 | const char *name; |
72 | struct module *owner; | 72 | struct module *owner; |
73 | struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr); | 73 | struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); |
74 | void (*free_lseg) (struct pnfs_layout_segment *lseg); | 74 | void (*free_lseg) (struct pnfs_layout_segment *lseg); |
75 | 75 | ||
76 | /* test for nfs page cache coalescing */ | 76 | /* test for nfs page cache coalescing */ |
@@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo); | |||
126 | void put_lseg(struct pnfs_layout_segment *lseg); | 126 | void put_lseg(struct pnfs_layout_segment *lseg); |
127 | struct pnfs_layout_segment * | 127 | struct pnfs_layout_segment * |
128 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, | 128 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, |
129 | enum pnfs_iomode access_type); | 129 | enum pnfs_iomode access_type, gfp_t gfp_flags); |
130 | void set_pnfs_layoutdriver(struct nfs_server *, u32 id); | 130 | void set_pnfs_layoutdriver(struct nfs_server *, u32 id); |
131 | void unset_pnfs_layoutdriver(struct nfs_server *); | 131 | void unset_pnfs_layoutdriver(struct nfs_server *); |
132 | enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, | 132 | enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, |
@@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg) | |||
245 | 245 | ||
246 | static inline struct pnfs_layout_segment * | 246 | static inline struct pnfs_layout_segment * |
247 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, | 247 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, |
248 | enum pnfs_iomode access_type) | 248 | enum pnfs_iomode access_type, gfp_t gfp_flags) |
249 | { | 249 | { |
250 | return NULL; | 250 | return NULL; |
251 | } | 251 | } |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 7cded2b12a05..2bcf0dc306a1 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc) | |||
288 | atomic_set(&req->wb_complete, requests); | 288 | atomic_set(&req->wb_complete, requests); |
289 | 289 | ||
290 | BUG_ON(desc->pg_lseg != NULL); | 290 | BUG_ON(desc->pg_lseg != NULL); |
291 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); | 291 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); |
292 | ClearPageError(page); | 292 | ClearPageError(page); |
293 | offset = 0; | 293 | offset = 0; |
294 | nbytes = desc->pg_count; | 294 | nbytes = desc->pg_count; |
@@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc) | |||
351 | } | 351 | } |
352 | req = nfs_list_entry(data->pages.next); | 352 | req = nfs_list_entry(data->pages.next); |
353 | if ((!lseg) && list_is_singular(&data->pages)) | 353 | if ((!lseg) && list_is_singular(&data->pages)) |
354 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); | 354 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); |
355 | 355 | ||
356 | ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, | 356 | ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, |
357 | 0, lseg); | 357 | 0, lseg); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 3bd5d7e80f6c..49c715b4ac92 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -939,7 +939,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc) | |||
939 | atomic_set(&req->wb_complete, requests); | 939 | atomic_set(&req->wb_complete, requests); |
940 | 940 | ||
941 | BUG_ON(desc->pg_lseg); | 941 | BUG_ON(desc->pg_lseg); |
942 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); | 942 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); |
943 | ClearPageError(page); | 943 | ClearPageError(page); |
944 | offset = 0; | 944 | offset = 0; |
945 | nbytes = desc->pg_count; | 945 | nbytes = desc->pg_count; |
@@ -1013,7 +1013,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc) | |||
1013 | } | 1013 | } |
1014 | req = nfs_list_entry(data->pages.next); | 1014 | req = nfs_list_entry(data->pages.next); |
1015 | if ((!lseg) && list_is_singular(&data->pages)) | 1015 | if ((!lseg) && list_is_singular(&data->pages)) |
1016 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); | 1016 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); |
1017 | 1017 | ||
1018 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && | 1018 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && |
1019 | (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) | 1019 | (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) |
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 0a0a66d98cce..f7684483785e 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c | |||
@@ -646,7 +646,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) | |||
646 | unsigned long group, group_offset; | 646 | unsigned long group, group_offset; |
647 | int i, j, n, ret; | 647 | int i, j, n, ret; |
648 | 648 | ||
649 | for (i = 0; i < nitems; i += n) { | 649 | for (i = 0; i < nitems; i = j) { |
650 | group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); | 650 | group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); |
651 | ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); | 651 | ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); |
652 | if (ret < 0) | 652 | if (ret < 0) |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 643720209a98..9a3e6bbff27b 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -539,25 +539,41 @@ static int o2hb_verify_crc(struct o2hb_region *reg, | |||
539 | 539 | ||
540 | /* We want to make sure that nobody is heartbeating on top of us -- | 540 | /* We want to make sure that nobody is heartbeating on top of us -- |
541 | * this will help detect an invalid configuration. */ | 541 | * this will help detect an invalid configuration. */ |
542 | static int o2hb_check_last_timestamp(struct o2hb_region *reg) | 542 | static void o2hb_check_last_timestamp(struct o2hb_region *reg) |
543 | { | 543 | { |
544 | int node_num, ret; | ||
545 | struct o2hb_disk_slot *slot; | 544 | struct o2hb_disk_slot *slot; |
546 | struct o2hb_disk_heartbeat_block *hb_block; | 545 | struct o2hb_disk_heartbeat_block *hb_block; |
546 | char *errstr; | ||
547 | 547 | ||
548 | node_num = o2nm_this_node(); | 548 | slot = ®->hr_slots[o2nm_this_node()]; |
549 | |||
550 | ret = 1; | ||
551 | slot = ®->hr_slots[node_num]; | ||
552 | /* Don't check on our 1st timestamp */ | 549 | /* Don't check on our 1st timestamp */ |
553 | if (slot->ds_last_time) { | 550 | if (!slot->ds_last_time) |
554 | hb_block = slot->ds_raw_block; | 551 | return; |
555 | 552 | ||
556 | if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) | 553 | hb_block = slot->ds_raw_block; |
557 | ret = 0; | 554 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && |
558 | } | 555 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && |
556 | hb_block->hb_node == slot->ds_node_num) | ||
557 | return; | ||
559 | 558 | ||
560 | return ret; | 559 | #define ERRSTR1 "Another node is heartbeating on device" |
560 | #define ERRSTR2 "Heartbeat generation mismatch on device" | ||
561 | #define ERRSTR3 "Heartbeat sequence mismatch on device" | ||
562 | |||
563 | if (hb_block->hb_node != slot->ds_node_num) | ||
564 | errstr = ERRSTR1; | ||
565 | else if (le64_to_cpu(hb_block->hb_generation) != | ||
566 | slot->ds_last_generation) | ||
567 | errstr = ERRSTR2; | ||
568 | else | ||
569 | errstr = ERRSTR3; | ||
570 | |||
571 | mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " | ||
572 | "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, | ||
573 | slot->ds_node_num, (unsigned long long)slot->ds_last_generation, | ||
574 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, | ||
575 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), | ||
576 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); | ||
561 | } | 577 | } |
562 | 578 | ||
563 | static inline void o2hb_prepare_block(struct o2hb_region *reg, | 579 | static inline void o2hb_prepare_block(struct o2hb_region *reg, |
@@ -983,9 +999,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
983 | /* With an up to date view of the slots, we can check that no | 999 | /* With an up to date view of the slots, we can check that no |
984 | * other node has been improperly configured to heartbeat in | 1000 | * other node has been improperly configured to heartbeat in |
985 | * our slot. */ | 1001 | * our slot. */ |
986 | if (!o2hb_check_last_timestamp(reg)) | 1002 | o2hb_check_last_timestamp(reg); |
987 | mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " | ||
988 | "in our slot!\n", reg->hr_dev_name); | ||
989 | 1003 | ||
990 | /* fill in the proper info for our next heartbeat */ | 1004 | /* fill in the proper info for our next heartbeat */ |
991 | o2hb_prepare_block(reg, reg->hr_generation); | 1005 | o2hb_prepare_block(reg, reg->hr_generation); |
@@ -999,8 +1013,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
999 | } | 1013 | } |
1000 | 1014 | ||
1001 | i = -1; | 1015 | i = -1; |
1002 | while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | 1016 | while((i = find_next_bit(configured_nodes, |
1003 | 1017 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | |
1004 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); | 1018 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); |
1005 | } | 1019 | } |
1006 | 1020 | ||
@@ -1690,6 +1704,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1690 | struct file *filp = NULL; | 1704 | struct file *filp = NULL; |
1691 | struct inode *inode = NULL; | 1705 | struct inode *inode = NULL; |
1692 | ssize_t ret = -EINVAL; | 1706 | ssize_t ret = -EINVAL; |
1707 | int live_threshold; | ||
1693 | 1708 | ||
1694 | if (reg->hr_bdev) | 1709 | if (reg->hr_bdev) |
1695 | goto out; | 1710 | goto out; |
@@ -1766,8 +1781,18 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1766 | * A node is considered live after it has beat LIVE_THRESHOLD | 1781 | * A node is considered live after it has beat LIVE_THRESHOLD |
1767 | * times. We're not steady until we've given them a chance | 1782 | * times. We're not steady until we've given them a chance |
1768 | * _after_ our first read. | 1783 | * _after_ our first read. |
1784 | * The default threshold is bare minimum so as to limit the delay | ||
1785 | * during mounts. For global heartbeat, the threshold doubled for the | ||
1786 | * first region. | ||
1769 | */ | 1787 | */ |
1770 | atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); | 1788 | live_threshold = O2HB_LIVE_THRESHOLD; |
1789 | if (o2hb_global_heartbeat_active()) { | ||
1790 | spin_lock(&o2hb_live_lock); | ||
1791 | if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) | ||
1792 | live_threshold <<= 1; | ||
1793 | spin_unlock(&o2hb_live_lock); | ||
1794 | } | ||
1795 | atomic_set(®->hr_steady_iterations, live_threshold + 1); | ||
1771 | 1796 | ||
1772 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", | 1797 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", |
1773 | reg->hr_item.ci_name); | 1798 | reg->hr_item.ci_name); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 9fe5b8fd658f..8582e3f4f120 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, | |||
2868 | bytes = blocks_wanted << sb->s_blocksize_bits; | 2868 | bytes = blocks_wanted << sb->s_blocksize_bits; |
2869 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | 2869 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
2870 | struct ocfs2_inode_info *oi = OCFS2_I(dir); | 2870 | struct ocfs2_inode_info *oi = OCFS2_I(dir); |
2871 | struct ocfs2_alloc_context *data_ac; | 2871 | struct ocfs2_alloc_context *data_ac = NULL; |
2872 | struct ocfs2_alloc_context *meta_ac = NULL; | 2872 | struct ocfs2_alloc_context *meta_ac = NULL; |
2873 | struct buffer_head *dirdata_bh = NULL; | 2873 | struct buffer_head *dirdata_bh = NULL; |
2874 | struct buffer_head *dx_root_bh = NULL; | 2874 | struct buffer_head *dx_root_bh = NULL; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 7540a492eaba..3b179d6cbde0 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -1614,7 +1614,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
1614 | spin_unlock(&dlm->spinlock); | 1614 | spin_unlock(&dlm->spinlock); |
1615 | 1615 | ||
1616 | /* Support for global heartbeat and node info was added in 1.1 */ | 1616 | /* Support for global heartbeat and node info was added in 1.1 */ |
1617 | if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { | 1617 | if (dlm->dlm_locking_proto.pv_major > 1 || |
1618 | dlm->dlm_locking_proto.pv_minor > 0) { | ||
1618 | status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); | 1619 | status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); |
1619 | if (status) { | 1620 | if (status) { |
1620 | mlog_errno(status); | 1621 | mlog_errno(status); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index fede57ed005f..84d166328cf7 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2574,6 +2574,9 @@ fail: | |||
2574 | res->state &= ~DLM_LOCK_RES_MIGRATING; | 2574 | res->state &= ~DLM_LOCK_RES_MIGRATING; |
2575 | wake = 1; | 2575 | wake = 1; |
2576 | spin_unlock(&res->spinlock); | 2576 | spin_unlock(&res->spinlock); |
2577 | if (dlm_is_host_down(ret)) | ||
2578 | dlm_wait_for_node_death(dlm, target, | ||
2579 | DLM_NODE_DEATH_WAIT_MAX); | ||
2577 | goto leave; | 2580 | goto leave; |
2578 | } | 2581 | } |
2579 | 2582 | ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41565ae52856..89659d6dc206 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -1607,6 +1607,9 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, | |||
1607 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | 1607 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); |
1608 | 1608 | ||
1609 | if (le32_to_cpu(rec->e_cpos) >= trunc_start) { | 1609 | if (le32_to_cpu(rec->e_cpos) >= trunc_start) { |
1610 | /* | ||
1611 | * remove an entire extent record. | ||
1612 | */ | ||
1610 | *trunc_cpos = le32_to_cpu(rec->e_cpos); | 1613 | *trunc_cpos = le32_to_cpu(rec->e_cpos); |
1611 | /* | 1614 | /* |
1612 | * Skip holes if any. | 1615 | * Skip holes if any. |
@@ -1617,7 +1620,16 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, | |||
1617 | *blkno = le64_to_cpu(rec->e_blkno); | 1620 | *blkno = le64_to_cpu(rec->e_blkno); |
1618 | *trunc_end = le32_to_cpu(rec->e_cpos); | 1621 | *trunc_end = le32_to_cpu(rec->e_cpos); |
1619 | } else if (range > trunc_start) { | 1622 | } else if (range > trunc_start) { |
1623 | /* | ||
1624 | * remove a partial extent record, which means we're | ||
1625 | * removing the last extent record. | ||
1626 | */ | ||
1620 | *trunc_cpos = trunc_start; | 1627 | *trunc_cpos = trunc_start; |
1628 | /* | ||
1629 | * skip hole if any. | ||
1630 | */ | ||
1631 | if (range < *trunc_end) | ||
1632 | *trunc_end = range; | ||
1621 | *trunc_len = *trunc_end - trunc_start; | 1633 | *trunc_len = *trunc_end - trunc_start; |
1622 | coff = trunc_start - le32_to_cpu(rec->e_cpos); | 1634 | coff = trunc_start - le32_to_cpu(rec->e_cpos); |
1623 | *blkno = le64_to_cpu(rec->e_blkno) + | 1635 | *blkno = le64_to_cpu(rec->e_blkno) + |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index b141a44605ca..295d56454e8b 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -1260,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) | |||
1260 | { | 1260 | { |
1261 | struct ocfs2_journal *journal = osb->journal; | 1261 | struct ocfs2_journal *journal = osb->journal; |
1262 | 1262 | ||
1263 | if (ocfs2_is_hard_readonly(osb)) | ||
1264 | return; | ||
1265 | |||
1263 | /* No need to queue up our truncate_log as regular cleanup will catch | 1266 | /* No need to queue up our truncate_log as regular cleanup will catch |
1264 | * that */ | 1267 | * that */ |
1265 | ocfs2_queue_recovery_completion(journal, osb->slot_num, | 1268 | ocfs2_queue_recovery_completion(journal, osb->slot_num, |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index da3fefe91a8f..1ad8c93c1b85 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
@@ -24,13 +24,6 @@ | |||
24 | 24 | ||
25 | #include "sysfs.h" | 25 | #include "sysfs.h" |
26 | 26 | ||
27 | /* used in crash dumps to help with debugging */ | ||
28 | static char last_sysfs_file[PATH_MAX]; | ||
29 | void sysfs_printk_last_file(void) | ||
30 | { | ||
31 | printk(KERN_EMERG "last sysfs file: %s\n", last_sysfs_file); | ||
32 | } | ||
33 | |||
34 | /* | 27 | /* |
35 | * There's one sysfs_buffer for each open file and one | 28 | * There's one sysfs_buffer for each open file and one |
36 | * sysfs_open_dirent for each sysfs_dirent with one or more open | 29 | * sysfs_open_dirent for each sysfs_dirent with one or more open |
@@ -337,11 +330,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
337 | struct sysfs_buffer *buffer; | 330 | struct sysfs_buffer *buffer; |
338 | const struct sysfs_ops *ops; | 331 | const struct sysfs_ops *ops; |
339 | int error = -EACCES; | 332 | int error = -EACCES; |
340 | char *p; | ||
341 | |||
342 | p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file)); | ||
343 | if (!IS_ERR(p)) | ||
344 | memmove(last_sysfs_file, p, strlen(p) + 1); | ||
345 | 333 | ||
346 | /* need attr_sd for attr and ops, its parent for kobj */ | 334 | /* need attr_sd for attr and ops, its parent for kobj */ |
347 | if (!sysfs_get_active(attr_sd)) | 335 | if (!sysfs_get_active(attr_sd)) |
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index c8769dc222d8..194414f8298c 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c | |||
@@ -101,9 +101,9 @@ int sysfs_create_group(struct kobject *kobj, | |||
101 | } | 101 | } |
102 | 102 | ||
103 | /** | 103 | /** |
104 | * sysfs_update_group - given a directory kobject, create an attribute group | 104 | * sysfs_update_group - given a directory kobject, update an attribute group |
105 | * @kobj: The kobject to create the group on | 105 | * @kobj: The kobject to update the group on |
106 | * @grp: The attribute group to create | 106 | * @grp: The attribute group to update |
107 | * | 107 | * |
108 | * This function updates an attribute group. Unlike | 108 | * This function updates an attribute group. Unlike |
109 | * sysfs_create_group(), it will explicitly not warn or error if any | 109 | * sysfs_create_group(), it will explicitly not warn or error if any |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index e4f9c1b0836c..3e898a48122d 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -926,6 +926,7 @@ restart: | |||
926 | XFS_LOOKUP_BATCH, | 926 | XFS_LOOKUP_BATCH, |
927 | XFS_ICI_RECLAIM_TAG); | 927 | XFS_ICI_RECLAIM_TAG); |
928 | if (!nr_found) { | 928 | if (!nr_found) { |
929 | done = 1; | ||
929 | rcu_read_unlock(); | 930 | rcu_read_unlock(); |
930 | break; | 931 | break; |
931 | } | 932 | } |
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index acdb92f14d51..5fc2380092c8 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -346,20 +346,23 @@ xfs_ail_delete( | |||
346 | */ | 346 | */ |
347 | STATIC void | 347 | STATIC void |
348 | xfs_ail_worker( | 348 | xfs_ail_worker( |
349 | struct work_struct *work) | 349 | struct work_struct *work) |
350 | { | 350 | { |
351 | struct xfs_ail *ailp = container_of(to_delayed_work(work), | 351 | struct xfs_ail *ailp = container_of(to_delayed_work(work), |
352 | struct xfs_ail, xa_work); | 352 | struct xfs_ail, xa_work); |
353 | long tout; | 353 | xfs_mount_t *mp = ailp->xa_mount; |
354 | xfs_lsn_t target = ailp->xa_target; | ||
355 | xfs_lsn_t lsn; | ||
356 | xfs_log_item_t *lip; | ||
357 | int flush_log, count, stuck; | ||
358 | xfs_mount_t *mp = ailp->xa_mount; | ||
359 | struct xfs_ail_cursor *cur = &ailp->xa_cursors; | 354 | struct xfs_ail_cursor *cur = &ailp->xa_cursors; |
360 | int push_xfsbufd = 0; | 355 | xfs_log_item_t *lip; |
356 | xfs_lsn_t lsn; | ||
357 | xfs_lsn_t target; | ||
358 | long tout = 10; | ||
359 | int flush_log = 0; | ||
360 | int stuck = 0; | ||
361 | int count = 0; | ||
362 | int push_xfsbufd = 0; | ||
361 | 363 | ||
362 | spin_lock(&ailp->xa_lock); | 364 | spin_lock(&ailp->xa_lock); |
365 | target = ailp->xa_target; | ||
363 | xfs_trans_ail_cursor_init(ailp, cur); | 366 | xfs_trans_ail_cursor_init(ailp, cur); |
364 | lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); | 367 | lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); |
365 | if (!lip || XFS_FORCED_SHUTDOWN(mp)) { | 368 | if (!lip || XFS_FORCED_SHUTDOWN(mp)) { |
@@ -368,8 +371,7 @@ xfs_ail_worker( | |||
368 | */ | 371 | */ |
369 | xfs_trans_ail_cursor_done(ailp, cur); | 372 | xfs_trans_ail_cursor_done(ailp, cur); |
370 | spin_unlock(&ailp->xa_lock); | 373 | spin_unlock(&ailp->xa_lock); |
371 | ailp->xa_last_pushed_lsn = 0; | 374 | goto out_done; |
372 | return; | ||
373 | } | 375 | } |
374 | 376 | ||
375 | XFS_STATS_INC(xs_push_ail); | 377 | XFS_STATS_INC(xs_push_ail); |
@@ -386,8 +388,7 @@ xfs_ail_worker( | |||
386 | * lots of contention on the AIL lists. | 388 | * lots of contention on the AIL lists. |
387 | */ | 389 | */ |
388 | lsn = lip->li_lsn; | 390 | lsn = lip->li_lsn; |
389 | flush_log = stuck = count = 0; | 391 | while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { |
390 | while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { | ||
391 | int lock_result; | 392 | int lock_result; |
392 | /* | 393 | /* |
393 | * If we can lock the item without sleeping, unlock the AIL | 394 | * If we can lock the item without sleeping, unlock the AIL |
@@ -480,21 +481,25 @@ xfs_ail_worker( | |||
480 | } | 481 | } |
481 | 482 | ||
482 | /* assume we have more work to do in a short while */ | 483 | /* assume we have more work to do in a short while */ |
483 | tout = 10; | 484 | out_done: |
484 | if (!count) { | 485 | if (!count) { |
485 | /* We're past our target or empty, so idle */ | 486 | /* We're past our target or empty, so idle */ |
486 | ailp->xa_last_pushed_lsn = 0; | 487 | ailp->xa_last_pushed_lsn = 0; |
487 | 488 | ||
488 | /* | 489 | /* |
489 | * Check for an updated push target before clearing the | 490 | * We clear the XFS_AIL_PUSHING_BIT first before checking |
490 | * XFS_AIL_PUSHING_BIT. If the target changed, we've got more | 491 | * whether the target has changed. If the target has changed, |
491 | * work to do. Wait a bit longer before starting that work. | 492 | * this pushes the requeue race directly onto the result of the |
493 | * atomic test/set bit, so we are guaranteed that either the | ||
494 | * the pusher that changed the target or ourselves will requeue | ||
495 | * the work (but not both). | ||
492 | */ | 496 | */ |
497 | clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); | ||
493 | smp_rmb(); | 498 | smp_rmb(); |
494 | if (ailp->xa_target == target) { | 499 | if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || |
495 | clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); | 500 | test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) |
496 | return; | 501 | return; |
497 | } | 502 | |
498 | tout = 50; | 503 | tout = 50; |
499 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { | 504 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { |
500 | /* | 505 | /* |
@@ -553,7 +558,7 @@ xfs_ail_push( | |||
553 | * the XFS_AIL_PUSHING_BIT. | 558 | * the XFS_AIL_PUSHING_BIT. |
554 | */ | 559 | */ |
555 | smp_wmb(); | 560 | smp_wmb(); |
556 | ailp->xa_target = threshold_lsn; | 561 | xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); |
557 | if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) | 562 | if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) |
558 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); | 563 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); |
559 | } | 564 | } |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 75a8692d144f..077c00d94f6e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -276,70 +276,70 @@ | |||
276 | /* Kernel symbol table: Normal symbols */ \ | 276 | /* Kernel symbol table: Normal symbols */ \ |
277 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | 277 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ |
278 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ | 278 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ |
279 | *(__ksymtab) \ | 279 | *(SORT(___ksymtab+*)) \ |
280 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ | 280 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ |
281 | } \ | 281 | } \ |
282 | \ | 282 | \ |
283 | /* Kernel symbol table: GPL-only symbols */ \ | 283 | /* Kernel symbol table: GPL-only symbols */ \ |
284 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ | 284 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ |
285 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ | 285 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ |
286 | *(__ksymtab_gpl) \ | 286 | *(SORT(___ksymtab_gpl+*)) \ |
287 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ | 287 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ |
288 | } \ | 288 | } \ |
289 | \ | 289 | \ |
290 | /* Kernel symbol table: Normal unused symbols */ \ | 290 | /* Kernel symbol table: Normal unused symbols */ \ |
291 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ | 291 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ |
292 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ | 292 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ |
293 | *(__ksymtab_unused) \ | 293 | *(SORT(___ksymtab_unused+*)) \ |
294 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ | 294 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ |
295 | } \ | 295 | } \ |
296 | \ | 296 | \ |
297 | /* Kernel symbol table: GPL-only unused symbols */ \ | 297 | /* Kernel symbol table: GPL-only unused symbols */ \ |
298 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ | 298 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ |
299 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ | 299 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ |
300 | *(__ksymtab_unused_gpl) \ | 300 | *(SORT(___ksymtab_unused_gpl+*)) \ |
301 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ | 301 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ |
302 | } \ | 302 | } \ |
303 | \ | 303 | \ |
304 | /* Kernel symbol table: GPL-future-only symbols */ \ | 304 | /* Kernel symbol table: GPL-future-only symbols */ \ |
305 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ | 305 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ |
306 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ | 306 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ |
307 | *(__ksymtab_gpl_future) \ | 307 | *(SORT(___ksymtab_gpl_future+*)) \ |
308 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ | 308 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ |
309 | } \ | 309 | } \ |
310 | \ | 310 | \ |
311 | /* Kernel symbol table: Normal symbols */ \ | 311 | /* Kernel symbol table: Normal symbols */ \ |
312 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ | 312 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ |
313 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ | 313 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ |
314 | *(__kcrctab) \ | 314 | *(SORT(___kcrctab+*)) \ |
315 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ | 315 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ |
316 | } \ | 316 | } \ |
317 | \ | 317 | \ |
318 | /* Kernel symbol table: GPL-only symbols */ \ | 318 | /* Kernel symbol table: GPL-only symbols */ \ |
319 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ | 319 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ |
320 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ | 320 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ |
321 | *(__kcrctab_gpl) \ | 321 | *(SORT(___kcrctab_gpl+*)) \ |
322 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ | 322 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ |
323 | } \ | 323 | } \ |
324 | \ | 324 | \ |
325 | /* Kernel symbol table: Normal unused symbols */ \ | 325 | /* Kernel symbol table: Normal unused symbols */ \ |
326 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ | 326 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ |
327 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ | 327 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ |
328 | *(__kcrctab_unused) \ | 328 | *(SORT(___kcrctab_unused+*)) \ |
329 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ | 329 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ |
330 | } \ | 330 | } \ |
331 | \ | 331 | \ |
332 | /* Kernel symbol table: GPL-only unused symbols */ \ | 332 | /* Kernel symbol table: GPL-only unused symbols */ \ |
333 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ | 333 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ |
334 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ | 334 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ |
335 | *(__kcrctab_unused_gpl) \ | 335 | *(SORT(___kcrctab_unused_gpl+*)) \ |
336 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ | 336 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ |
337 | } \ | 337 | } \ |
338 | \ | 338 | \ |
339 | /* Kernel symbol table: GPL-future-only symbols */ \ | 339 | /* Kernel symbol table: GPL-future-only symbols */ \ |
340 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ | 340 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ |
341 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ | 341 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ |
342 | *(__kcrctab_gpl_future) \ | 342 | *(SORT(___kcrctab_gpl_future+*)) \ |
343 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ | 343 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ |
344 | } \ | 344 | } \ |
345 | \ | 345 | \ |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index ade09d7b4271..c99c3d3e7811 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
@@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, | |||
127 | 127 | ||
128 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); | 128 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); |
129 | 129 | ||
130 | bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); | 130 | int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); |
131 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); | 131 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); |
132 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); | 132 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); |
133 | int drm_fb_helper_debug_enter(struct fb_info *info); | 133 | int drm_fb_helper_debug_enter(struct fb_info *info); |
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h new file mode 100644 index 000000000000..c5d6095b46f8 --- /dev/null +++ b/include/linux/alarmtimer.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #ifndef _LINUX_ALARMTIMER_H | ||
2 | #define _LINUX_ALARMTIMER_H | ||
3 | |||
4 | #include <linux/time.h> | ||
5 | #include <linux/hrtimer.h> | ||
6 | #include <linux/timerqueue.h> | ||
7 | #include <linux/rtc.h> | ||
8 | |||
9 | enum alarmtimer_type { | ||
10 | ALARM_REALTIME, | ||
11 | ALARM_BOOTTIME, | ||
12 | |||
13 | ALARM_NUMTYPE, | ||
14 | }; | ||
15 | |||
16 | /** | ||
17 | * struct alarm - Alarm timer structure | ||
18 | * @node: timerqueue node for adding to the event list this value | ||
19 | * also includes the expiration time. | ||
20 | * @period: Period for recuring alarms | ||
21 | * @function: Function pointer to be executed when the timer fires. | ||
22 | * @type: Alarm type (BOOTTIME/REALTIME) | ||
23 | * @enabled: Flag that represents if the alarm is set to fire or not | ||
24 | * @data: Internal data value. | ||
25 | */ | ||
26 | struct alarm { | ||
27 | struct timerqueue_node node; | ||
28 | ktime_t period; | ||
29 | void (*function)(struct alarm *); | ||
30 | enum alarmtimer_type type; | ||
31 | bool enabled; | ||
32 | void *data; | ||
33 | }; | ||
34 | |||
35 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, | ||
36 | void (*function)(struct alarm *)); | ||
37 | void alarm_start(struct alarm *alarm, ktime_t start, ktime_t period); | ||
38 | void alarm_cancel(struct alarm *alarm); | ||
39 | |||
40 | #endif | ||
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index b8613e806aa9..01eca1794e14 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | |||
111 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 111 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
112 | #define alloc_bootmem_node(pgdat, x) \ | 112 | #define alloc_bootmem_node(pgdat, x) \ |
113 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 113 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
114 | #define alloc_bootmem_node_nopanic(pgdat, x) \ | ||
115 | __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | ||
114 | #define alloc_bootmem_pages_node(pgdat, x) \ | 116 | #define alloc_bootmem_pages_node(pgdat, x) \ |
115 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 117 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
116 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ | 118 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ |
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h new file mode 100644 index 000000000000..90b1aa867224 --- /dev/null +++ b/include/linux/bsearch.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _LINUX_BSEARCH_H | ||
2 | #define _LINUX_BSEARCH_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | void *bsearch(const void *key, const void *base, size_t num, size_t size, | ||
7 | int (*cmp)(const void *key, const void *elt)); | ||
8 | |||
9 | #endif /* _LINUX_BSEARCH_H */ | ||
diff --git a/include/linux/capability.h b/include/linux/capability.h index 16ee8b49a200..4554db0cde86 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -355,7 +355,12 @@ struct cpu_vfs_cap_data { | |||
355 | 355 | ||
356 | #define CAP_SYSLOG 34 | 356 | #define CAP_SYSLOG 34 |
357 | 357 | ||
358 | #define CAP_LAST_CAP CAP_SYSLOG | 358 | /* Allow triggering something that will wake the system */ |
359 | |||
360 | #define CAP_WAKE_ALARM 35 | ||
361 | |||
362 | |||
363 | #define CAP_LAST_CAP CAP_WAKE_ALARM | ||
359 | 364 | ||
360 | #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) | 365 | #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) |
361 | 366 | ||
@@ -546,18 +551,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); | |||
546 | extern bool capable(int cap); | 551 | extern bool capable(int cap); |
547 | extern bool ns_capable(struct user_namespace *ns, int cap); | 552 | extern bool ns_capable(struct user_namespace *ns, int cap); |
548 | extern bool task_ns_capable(struct task_struct *t, int cap); | 553 | extern bool task_ns_capable(struct task_struct *t, int cap); |
549 | 554 | extern bool nsown_capable(int cap); | |
550 | /** | ||
551 | * nsown_capable - Check superior capability to one's own user_ns | ||
552 | * @cap: The capability in question | ||
553 | * | ||
554 | * Return true if the current task has the given superior capability | ||
555 | * targeted at its own user namespace. | ||
556 | */ | ||
557 | static inline bool nsown_capable(int cap) | ||
558 | { | ||
559 | return ns_capable(current_user_ns(), cap); | ||
560 | } | ||
561 | 555 | ||
562 | /* audit system wants to get cap info from files as well */ | 556 | /* audit system wants to get cap info from files as well */ |
563 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); | 557 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index fc53492b6ad7..d6733e27af34 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -56,46 +56,52 @@ enum clock_event_nofitiers { | |||
56 | 56 | ||
57 | /** | 57 | /** |
58 | * struct clock_event_device - clock event device descriptor | 58 | * struct clock_event_device - clock event device descriptor |
59 | * @name: ptr to clock event name | 59 | * @event_handler: Assigned by the framework to be called by the low |
60 | * @features: features | 60 | * level handler of the event source |
61 | * @set_next_event: set next event function | ||
62 | * @next_event: local storage for the next event in oneshot mode | ||
61 | * @max_delta_ns: maximum delta value in ns | 63 | * @max_delta_ns: maximum delta value in ns |
62 | * @min_delta_ns: minimum delta value in ns | 64 | * @min_delta_ns: minimum delta value in ns |
63 | * @mult: nanosecond to cycles multiplier | 65 | * @mult: nanosecond to cycles multiplier |
64 | * @shift: nanoseconds to cycles divisor (power of two) | 66 | * @shift: nanoseconds to cycles divisor (power of two) |
67 | * @mode: operating mode assigned by the management code | ||
68 | * @features: features | ||
69 | * @retries: number of forced programming retries | ||
70 | * @set_mode: set mode function | ||
71 | * @broadcast: function to broadcast events | ||
72 | * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration | ||
73 | * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration | ||
74 | * @name: ptr to clock event name | ||
65 | * @rating: variable to rate clock event devices | 75 | * @rating: variable to rate clock event devices |
66 | * @irq: IRQ number (only for non CPU local devices) | 76 | * @irq: IRQ number (only for non CPU local devices) |
67 | * @cpumask: cpumask to indicate for which CPUs this device works | 77 | * @cpumask: cpumask to indicate for which CPUs this device works |
68 | * @set_next_event: set next event function | ||
69 | * @set_mode: set mode function | ||
70 | * @event_handler: Assigned by the framework to be called by the low | ||
71 | * level handler of the event source | ||
72 | * @broadcast: function to broadcast events | ||
73 | * @list: list head for the management code | 78 | * @list: list head for the management code |
74 | * @mode: operating mode assigned by the management code | ||
75 | * @next_event: local storage for the next event in oneshot mode | ||
76 | * @retries: number of forced programming retries | ||
77 | */ | 79 | */ |
78 | struct clock_event_device { | 80 | struct clock_event_device { |
79 | const char *name; | 81 | void (*event_handler)(struct clock_event_device *); |
80 | unsigned int features; | 82 | int (*set_next_event)(unsigned long evt, |
83 | struct clock_event_device *); | ||
84 | ktime_t next_event; | ||
81 | u64 max_delta_ns; | 85 | u64 max_delta_ns; |
82 | u64 min_delta_ns; | 86 | u64 min_delta_ns; |
83 | u32 mult; | 87 | u32 mult; |
84 | u32 shift; | 88 | u32 shift; |
89 | enum clock_event_mode mode; | ||
90 | unsigned int features; | ||
91 | unsigned long retries; | ||
92 | |||
93 | void (*broadcast)(const struct cpumask *mask); | ||
94 | void (*set_mode)(enum clock_event_mode mode, | ||
95 | struct clock_event_device *); | ||
96 | unsigned long min_delta_ticks; | ||
97 | unsigned long max_delta_ticks; | ||
98 | |||
99 | const char *name; | ||
85 | int rating; | 100 | int rating; |
86 | int irq; | 101 | int irq; |
87 | const struct cpumask *cpumask; | 102 | const struct cpumask *cpumask; |
88 | int (*set_next_event)(unsigned long evt, | ||
89 | struct clock_event_device *); | ||
90 | void (*set_mode)(enum clock_event_mode mode, | ||
91 | struct clock_event_device *); | ||
92 | void (*event_handler)(struct clock_event_device *); | ||
93 | void (*broadcast)(const struct cpumask *mask); | ||
94 | struct list_head list; | 103 | struct list_head list; |
95 | enum clock_event_mode mode; | 104 | } ____cacheline_aligned; |
96 | ktime_t next_event; | ||
97 | unsigned long retries; | ||
98 | }; | ||
99 | 105 | ||
100 | /* | 106 | /* |
101 | * Calculate a multiplication factor for scaled math, which is used to convert | 107 | * Calculate a multiplication factor for scaled math, which is used to convert |
@@ -122,6 +128,12 @@ extern u64 clockevent_delta2ns(unsigned long latch, | |||
122 | struct clock_event_device *evt); | 128 | struct clock_event_device *evt); |
123 | extern void clockevents_register_device(struct clock_event_device *dev); | 129 | extern void clockevents_register_device(struct clock_event_device *dev); |
124 | 130 | ||
131 | extern void clockevents_config_and_register(struct clock_event_device *dev, | ||
132 | u32 freq, unsigned long min_delta, | ||
133 | unsigned long max_delta); | ||
134 | |||
135 | extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); | ||
136 | |||
125 | extern void clockevents_exchange_device(struct clock_event_device *old, | 137 | extern void clockevents_exchange_device(struct clock_event_device *old, |
126 | struct clock_event_device *new); | 138 | struct clock_event_device *new); |
127 | extern void clockevents_set_mode(struct clock_event_device *dev, | 139 | extern void clockevents_set_mode(struct clock_event_device *dev, |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index c37b21ad5a3b..c918fbd33ee5 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -159,42 +159,38 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
159 | */ | 159 | */ |
160 | struct clocksource { | 160 | struct clocksource { |
161 | /* | 161 | /* |
162 | * First part of structure is read mostly | 162 | * Hotpath data, fits in a single cache line when the |
163 | * clocksource itself is cacheline aligned. | ||
163 | */ | 164 | */ |
164 | char *name; | ||
165 | struct list_head list; | ||
166 | int rating; | ||
167 | cycle_t (*read)(struct clocksource *cs); | 165 | cycle_t (*read)(struct clocksource *cs); |
168 | int (*enable)(struct clocksource *cs); | 166 | cycle_t cycle_last; |
169 | void (*disable)(struct clocksource *cs); | ||
170 | cycle_t mask; | 167 | cycle_t mask; |
171 | u32 mult; | 168 | u32 mult; |
172 | u32 shift; | 169 | u32 shift; |
173 | u64 max_idle_ns; | 170 | u64 max_idle_ns; |
174 | unsigned long flags; | 171 | |
175 | cycle_t (*vread)(void); | ||
176 | void (*suspend)(struct clocksource *cs); | ||
177 | void (*resume)(struct clocksource *cs); | ||
178 | #ifdef CONFIG_IA64 | 172 | #ifdef CONFIG_IA64 |
179 | void *fsys_mmio; /* used by fsyscall asm code */ | 173 | void *fsys_mmio; /* used by fsyscall asm code */ |
180 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) | 174 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) |
181 | #else | 175 | #else |
182 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) | 176 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) |
183 | #endif | 177 | #endif |
184 | 178 | const char *name; | |
185 | /* | 179 | struct list_head list; |
186 | * Second part is written at each timer interrupt | 180 | int rating; |
187 | * Keep it in a different cache line to dirty no | 181 | cycle_t (*vread)(void); |
188 | * more than one cache line. | 182 | int (*enable)(struct clocksource *cs); |
189 | */ | 183 | void (*disable)(struct clocksource *cs); |
190 | cycle_t cycle_last ____cacheline_aligned_in_smp; | 184 | unsigned long flags; |
185 | void (*suspend)(struct clocksource *cs); | ||
186 | void (*resume)(struct clocksource *cs); | ||
191 | 187 | ||
192 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 188 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
193 | /* Watchdog related data, used by the framework */ | 189 | /* Watchdog related data, used by the framework */ |
194 | struct list_head wd_list; | 190 | struct list_head wd_list; |
195 | cycle_t wd_last; | 191 | cycle_t wd_last; |
196 | #endif | 192 | #endif |
197 | }; | 193 | } ____cacheline_aligned; |
198 | 194 | ||
199 | /* | 195 | /* |
200 | * Clock source flags bits:: | 196 | * Clock source flags bits:: |
@@ -341,4 +337,6 @@ static inline void update_vsyscall_tz(void) | |||
341 | 337 | ||
342 | extern void timekeeping_notify(struct clocksource *clock); | 338 | extern void timekeeping_notify(struct clocksource *clock); |
343 | 339 | ||
340 | extern int clocksource_i8253_init(void); | ||
341 | |||
344 | #endif /* _LINUX_CLOCKSOURCE_H */ | 342 | #endif /* _LINUX_CLOCKSOURCE_H */ |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9343dd3de858..11be48e0d168 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2001 Russell King | 4 | * Copyright (C) 2001 Russell King |
5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | 5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
@@ -56,9 +56,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, | |||
56 | #define CPUFREQ_POLICY_POWERSAVE (1) | 56 | #define CPUFREQ_POLICY_POWERSAVE (1) |
57 | #define CPUFREQ_POLICY_PERFORMANCE (2) | 57 | #define CPUFREQ_POLICY_PERFORMANCE (2) |
58 | 58 | ||
59 | /* Frequency values here are CPU kHz so that hardware which doesn't run | 59 | /* Frequency values here are CPU kHz so that hardware which doesn't run |
60 | * with some frequencies can complain without having to guess what per | 60 | * with some frequencies can complain without having to guess what per |
61 | * cent / per mille means. | 61 | * cent / per mille means. |
62 | * Maximum transition latency is in nanoseconds - if it's unknown, | 62 | * Maximum transition latency is in nanoseconds - if it's unknown, |
63 | * CPUFREQ_ETERNAL shall be used. | 63 | * CPUFREQ_ETERNAL shall be used. |
64 | */ | 64 | */ |
@@ -72,13 +72,15 @@ extern struct kobject *cpufreq_global_kobject; | |||
72 | struct cpufreq_cpuinfo { | 72 | struct cpufreq_cpuinfo { |
73 | unsigned int max_freq; | 73 | unsigned int max_freq; |
74 | unsigned int min_freq; | 74 | unsigned int min_freq; |
75 | unsigned int transition_latency; /* in 10^(-9) s = nanoseconds */ | 75 | |
76 | /* in 10^(-9) s = nanoseconds */ | ||
77 | unsigned int transition_latency; | ||
76 | }; | 78 | }; |
77 | 79 | ||
78 | struct cpufreq_real_policy { | 80 | struct cpufreq_real_policy { |
79 | unsigned int min; /* in kHz */ | 81 | unsigned int min; /* in kHz */ |
80 | unsigned int max; /* in kHz */ | 82 | unsigned int max; /* in kHz */ |
81 | unsigned int policy; /* see above */ | 83 | unsigned int policy; /* see above */ |
82 | struct cpufreq_governor *governor; /* see below */ | 84 | struct cpufreq_governor *governor; /* see below */ |
83 | }; | 85 | }; |
84 | 86 | ||
@@ -94,7 +96,7 @@ struct cpufreq_policy { | |||
94 | unsigned int max; /* in kHz */ | 96 | unsigned int max; /* in kHz */ |
95 | unsigned int cur; /* in kHz, only needed if cpufreq | 97 | unsigned int cur; /* in kHz, only needed if cpufreq |
96 | * governors are used */ | 98 | * governors are used */ |
97 | unsigned int policy; /* see above */ | 99 | unsigned int policy; /* see above */ |
98 | struct cpufreq_governor *governor; /* see below */ | 100 | struct cpufreq_governor *governor; /* see below */ |
99 | 101 | ||
100 | struct work_struct update; /* if update_policy() needs to be | 102 | struct work_struct update; /* if update_policy() needs to be |
@@ -167,11 +169,11 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu | |||
167 | 169 | ||
168 | struct cpufreq_governor { | 170 | struct cpufreq_governor { |
169 | char name[CPUFREQ_NAME_LEN]; | 171 | char name[CPUFREQ_NAME_LEN]; |
170 | int (*governor) (struct cpufreq_policy *policy, | 172 | int (*governor) (struct cpufreq_policy *policy, |
171 | unsigned int event); | 173 | unsigned int event); |
172 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, | 174 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
173 | char *buf); | 175 | char *buf); |
174 | int (*store_setspeed) (struct cpufreq_policy *policy, | 176 | int (*store_setspeed) (struct cpufreq_policy *policy, |
175 | unsigned int freq); | 177 | unsigned int freq); |
176 | unsigned int max_transition_latency; /* HW must be able to switch to | 178 | unsigned int max_transition_latency; /* HW must be able to switch to |
177 | next freq faster than this value in nano secs or we | 179 | next freq faster than this value in nano secs or we |
@@ -180,7 +182,8 @@ struct cpufreq_governor { | |||
180 | struct module *owner; | 182 | struct module *owner; |
181 | }; | 183 | }; |
182 | 184 | ||
183 | /* pass a target to the cpufreq driver | 185 | /* |
186 | * Pass a target to the cpufreq driver. | ||
184 | */ | 187 | */ |
185 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, | 188 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, |
186 | unsigned int target_freq, | 189 | unsigned int target_freq, |
@@ -237,9 +240,9 @@ struct cpufreq_driver { | |||
237 | 240 | ||
238 | /* flags */ | 241 | /* flags */ |
239 | 242 | ||
240 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if | 243 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if |
241 | * all ->init() calls failed */ | 244 | * all ->init() calls failed */ |
242 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel | 245 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel |
243 | * "constants" aren't affected by | 246 | * "constants" aren't affected by |
244 | * frequency transitions */ | 247 | * frequency transitions */ |
245 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed | 248 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed |
@@ -252,7 +255,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); | |||
252 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); | 255 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); |
253 | 256 | ||
254 | 257 | ||
255 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) | 258 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) |
256 | { | 259 | { |
257 | if (policy->min < min) | 260 | if (policy->min < min) |
258 | policy->min = min; | 261 | policy->min = min; |
@@ -386,34 +389,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
386 | /* the following 3 funtions are for cpufreq core use only */ | 389 | /* the following 3 funtions are for cpufreq core use only */ |
387 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); | 390 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); |
388 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); | 391 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
389 | void cpufreq_cpu_put (struct cpufreq_policy *data); | 392 | void cpufreq_cpu_put(struct cpufreq_policy *data); |
390 | 393 | ||
391 | /* the following are really really optional */ | 394 | /* the following are really really optional */ |
392 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; | 395 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; |
393 | 396 | ||
394 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | 397 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
395 | unsigned int cpu); | 398 | unsigned int cpu); |
396 | 399 | ||
397 | void cpufreq_frequency_table_put_attr(unsigned int cpu); | 400 | void cpufreq_frequency_table_put_attr(unsigned int cpu); |
398 | 401 | ||
399 | 402 | ||
400 | /********************************************************************* | ||
401 | * UNIFIED DEBUG HELPERS * | ||
402 | *********************************************************************/ | ||
403 | |||
404 | #define CPUFREQ_DEBUG_CORE 1 | ||
405 | #define CPUFREQ_DEBUG_DRIVER 2 | ||
406 | #define CPUFREQ_DEBUG_GOVERNOR 4 | ||
407 | |||
408 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
409 | |||
410 | extern void cpufreq_debug_printk(unsigned int type, const char *prefix, | ||
411 | const char *fmt, ...); | ||
412 | |||
413 | #else | ||
414 | |||
415 | #define cpufreq_debug_printk(msg...) do { } while(0) | ||
416 | |||
417 | #endif /* CONFIG_CPU_FREQ_DEBUG */ | ||
418 | |||
419 | #endif /* _LINUX_CPUFREQ_H */ | 403 | #endif /* _LINUX_CPUFREQ_H */ |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 9aeeb0ba2003..be16b61283cc 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -146,6 +146,7 @@ struct cred { | |||
146 | void *security; /* subjective LSM security */ | 146 | void *security; /* subjective LSM security */ |
147 | #endif | 147 | #endif |
148 | struct user_struct *user; /* real user ID subscription */ | 148 | struct user_struct *user; /* real user ID subscription */ |
149 | struct user_namespace *user_ns; /* cached user->user_ns */ | ||
149 | struct group_info *group_info; /* supplementary groups for euid/fsgid */ | 150 | struct group_info *group_info; /* supplementary groups for euid/fsgid */ |
150 | struct rcu_head rcu; /* RCU deletion hook */ | 151 | struct rcu_head rcu; /* RCU deletion hook */ |
151 | }; | 152 | }; |
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred) | |||
354 | #define current_fsgid() (current_cred_xxx(fsgid)) | 355 | #define current_fsgid() (current_cred_xxx(fsgid)) |
355 | #define current_cap() (current_cred_xxx(cap_effective)) | 356 | #define current_cap() (current_cred_xxx(cap_effective)) |
356 | #define current_user() (current_cred_xxx(user)) | 357 | #define current_user() (current_cred_xxx(user)) |
357 | #define _current_user_ns() (current_cred_xxx(user)->user_ns) | ||
358 | #define current_security() (current_cred_xxx(security)) | 358 | #define current_security() (current_cred_xxx(security)) |
359 | 359 | ||
360 | extern struct user_namespace *current_user_ns(void); | 360 | #ifdef CONFIG_USER_NS |
361 | #define current_user_ns() (current_cred_xxx(user_ns)) | ||
362 | #else | ||
363 | extern struct user_namespace init_user_ns; | ||
364 | #define current_user_ns() (&init_user_ns) | ||
365 | #endif | ||
366 | |||
361 | 367 | ||
362 | #define current_uid_gid(_uid, _gid) \ | 368 | #define current_uid_gid(_uid, _gid) \ |
363 | do { \ | 369 | do { \ |
diff --git a/include/linux/device.h b/include/linux/device.h index ab8dfc095709..c66111affca9 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -47,6 +47,38 @@ extern int __must_check bus_create_file(struct bus_type *, | |||
47 | struct bus_attribute *); | 47 | struct bus_attribute *); |
48 | extern void bus_remove_file(struct bus_type *, struct bus_attribute *); | 48 | extern void bus_remove_file(struct bus_type *, struct bus_attribute *); |
49 | 49 | ||
50 | /** | ||
51 | * struct bus_type - The bus type of the device | ||
52 | * | ||
53 | * @name: The name of the bus. | ||
54 | * @bus_attrs: Default attributes of the bus. | ||
55 | * @dev_attrs: Default attributes of the devices on the bus. | ||
56 | * @drv_attrs: Default attributes of the device drivers on the bus. | ||
57 | * @match: Called, perhaps multiple times, whenever a new device or driver | ||
58 | * is added for this bus. It should return a nonzero value if the | ||
59 | * given device can be handled by the given driver. | ||
60 | * @uevent: Called when a device is added, removed, or a few other things | ||
61 | * that generate uevents to add the environment variables. | ||
62 | * @probe: Called when a new device or driver add to this bus, and callback | ||
63 | * the specific driver's probe to initial the matched device. | ||
64 | * @remove: Called when a device removed from this bus. | ||
65 | * @shutdown: Called at shut-down time to quiesce the device. | ||
66 | * @suspend: Called when a device on this bus wants to go to sleep mode. | ||
67 | * @resume: Called to bring a device on this bus out of sleep mode. | ||
68 | * @pm: Power management operations of this bus, callback the specific | ||
69 | * device driver's pm-ops. | ||
70 | * @p: The private data of the driver core, only the driver core can | ||
71 | * touch this. | ||
72 | * | ||
73 | * A bus is a channel between the processor and one or more devices. For the | ||
74 | * purposes of the device model, all devices are connected via a bus, even if | ||
75 | * it is an internal, virtual, "platform" bus. Buses can plug into each other. | ||
76 | * A USB controller is usually a PCI device, for example. The device model | ||
77 | * represents the actual connections between buses and the devices they control. | ||
78 | * A bus is represented by the bus_type structure. It contains the name, the | ||
79 | * default attributes, the bus' methods, PM operations, and the driver core's | ||
80 | * private data. | ||
81 | */ | ||
50 | struct bus_type { | 82 | struct bus_type { |
51 | const char *name; | 83 | const char *name; |
52 | struct bus_attribute *bus_attrs; | 84 | struct bus_attribute *bus_attrs; |
@@ -119,6 +151,37 @@ extern int bus_unregister_notifier(struct bus_type *bus, | |||
119 | extern struct kset *bus_get_kset(struct bus_type *bus); | 151 | extern struct kset *bus_get_kset(struct bus_type *bus); |
120 | extern struct klist *bus_get_device_klist(struct bus_type *bus); | 152 | extern struct klist *bus_get_device_klist(struct bus_type *bus); |
121 | 153 | ||
154 | /** | ||
155 | * struct device_driver - The basic device driver structure | ||
156 | * @name: Name of the device driver. | ||
157 | * @bus: The bus which the device of this driver belongs to. | ||
158 | * @owner: The module owner. | ||
159 | * @mod_name: Used for built-in modules. | ||
160 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. | ||
161 | * @of_match_table: The open firmware table. | ||
162 | * @probe: Called to query the existence of a specific device, | ||
163 | * whether this driver can work with it, and bind the driver | ||
164 | * to a specific device. | ||
165 | * @remove: Called when the device is removed from the system to | ||
166 | * unbind a device from this driver. | ||
167 | * @shutdown: Called at shut-down time to quiesce the device. | ||
168 | * @suspend: Called to put the device to sleep mode. Usually to a | ||
169 | * low power state. | ||
170 | * @resume: Called to bring a device from sleep mode. | ||
171 | * @groups: Default attributes that get created by the driver core | ||
172 | * automatically. | ||
173 | * @pm: Power management operations of the device which matched | ||
174 | * this driver. | ||
175 | * @p: Driver core's private data, no one other than the driver | ||
176 | * core can touch this. | ||
177 | * | ||
178 | * The device driver-model tracks all of the drivers known to the system. | ||
179 | * The main reason for this tracking is to enable the driver core to match | ||
180 | * up drivers with new devices. Once drivers are known objects within the | ||
181 | * system, however, a number of other things become possible. Device drivers | ||
182 | * can export information and configuration variables that are independent | ||
183 | * of any specific device. | ||
184 | */ | ||
122 | struct device_driver { | 185 | struct device_driver { |
123 | const char *name; | 186 | const char *name; |
124 | struct bus_type *bus; | 187 | struct bus_type *bus; |
@@ -185,8 +248,34 @@ struct device *driver_find_device(struct device_driver *drv, | |||
185 | struct device *start, void *data, | 248 | struct device *start, void *data, |
186 | int (*match)(struct device *dev, void *data)); | 249 | int (*match)(struct device *dev, void *data)); |
187 | 250 | ||
188 | /* | 251 | /** |
189 | * device classes | 252 | * struct class - device classes |
253 | * @name: Name of the class. | ||
254 | * @owner: The module owner. | ||
255 | * @class_attrs: Default attributes of this class. | ||
256 | * @dev_attrs: Default attributes of the devices belong to the class. | ||
257 | * @dev_bin_attrs: Default binary attributes of the devices belong to the class. | ||
258 | * @dev_kobj: The kobject that represents this class and links it into the hierarchy. | ||
259 | * @dev_uevent: Called when a device is added, removed from this class, or a | ||
260 | * few other things that generate uevents to add the environment | ||
261 | * variables. | ||
262 | * @devnode: Callback to provide the devtmpfs. | ||
263 | * @class_release: Called to release this class. | ||
264 | * @dev_release: Called to release the device. | ||
265 | * @suspend: Used to put the device to sleep mode, usually to a low power | ||
266 | * state. | ||
267 | * @resume: Used to bring the device from the sleep mode. | ||
268 | * @ns_type: Callbacks so sysfs can detemine namespaces. | ||
269 | * @namespace: Namespace of the device belongs to this class. | ||
270 | * @pm: The default device power management operations of this class. | ||
271 | * @p: The private data of the driver core, no one other than the | ||
272 | * driver core can touch this. | ||
273 | * | ||
274 | * A class is a higher-level view of a device that abstracts out low-level | ||
275 | * implementation details. Drivers may see a SCSI disk or an ATA disk, but, | ||
276 | * at the class level, they are all simply disks. Classes allow user space | ||
277 | * to work with devices based on what they do, rather than how they are | ||
278 | * connected or how they work. | ||
190 | */ | 279 | */ |
191 | struct class { | 280 | struct class { |
192 | const char *name; | 281 | const char *name; |
@@ -401,6 +490,65 @@ struct device_dma_parameters { | |||
401 | unsigned long segment_boundary_mask; | 490 | unsigned long segment_boundary_mask; |
402 | }; | 491 | }; |
403 | 492 | ||
493 | /** | ||
494 | * struct device - The basic device structure | ||
495 | * @parent: The device's "parent" device, the device to which it is attached. | ||
496 | * In most cases, a parent device is some sort of bus or host | ||
497 | * controller. If parent is NULL, the device, is a top-level device, | ||
498 | * which is not usually what you want. | ||
499 | * @p: Holds the private data of the driver core portions of the device. | ||
500 | * See the comment of the struct device_private for detail. | ||
501 | * @kobj: A top-level, abstract class from which other classes are derived. | ||
502 | * @init_name: Initial name of the device. | ||
503 | * @type: The type of device. | ||
504 | * This identifies the device type and carries type-specific | ||
505 | * information. | ||
506 | * @mutex: Mutex to synchronize calls to its driver. | ||
507 | * @bus: Type of bus device is on. | ||
508 | * @driver: Which driver has allocated this | ||
509 | * @platform_data: Platform data specific to the device. | ||
510 | * Example: For devices on custom boards, as typical of embedded | ||
511 | * and SOC based hardware, Linux often uses platform_data to point | ||
512 | * to board-specific structures describing devices and how they | ||
513 | * are wired. That can include what ports are available, chip | ||
514 | * variants, which GPIO pins act in what additional roles, and so | ||
515 | * on. This shrinks the "Board Support Packages" (BSPs) and | ||
516 | * minimizes board-specific #ifdefs in drivers. | ||
517 | * @power: For device power management. | ||
518 | * See Documentation/power/devices.txt for details. | ||
519 | * @pwr_domain: Provide callbacks that are executed during system suspend, | ||
520 | * hibernation, system resume and during runtime PM transitions | ||
521 | * along with subsystem-level and driver-level callbacks. | ||
522 | * @numa_node: NUMA node this device is close to. | ||
523 | * @dma_mask: Dma mask (if dma'ble device). | ||
524 | * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all | ||
525 | * hardware supports 64-bit addresses for consistent allocations | ||
526 | * such descriptors. | ||
527 | * @dma_parms: A low level driver may set these to teach IOMMU code about | ||
528 | * segment limitations. | ||
529 | * @dma_pools: Dma pools (if dma'ble device). | ||
530 | * @dma_mem: Internal for coherent mem override. | ||
531 | * @archdata: For arch-specific additions. | ||
532 | * @of_node: Associated device tree node. | ||
533 | * @of_match: Matching of_device_id from driver. | ||
534 | * @devt: For creating the sysfs "dev". | ||
535 | * @devres_lock: Spinlock to protect the resource of the device. | ||
536 | * @devres_head: The resources list of the device. | ||
537 | * @knode_class: The node used to add the device to the class list. | ||
538 | * @class: The class of the device. | ||
539 | * @groups: Optional attribute groups. | ||
540 | * @release: Callback to free the device after all references have | ||
541 | * gone away. This should be set by the allocator of the | ||
542 | * device (i.e. the bus driver that discovered the device). | ||
543 | * | ||
544 | * At the lowest level, every device in a Linux system is represented by an | ||
545 | * instance of struct device. The device structure contains the information | ||
546 | * that the device model core needs to model the system. Most subsystems, | ||
547 | * however, track additional information about the devices they host. As a | ||
548 | * result, it is rare for devices to be represented by bare device structures; | ||
549 | * instead, that structure, like kobject structures, is usually embedded within | ||
550 | * a higher-level representation of the device. | ||
551 | */ | ||
404 | struct device { | 552 | struct device { |
405 | struct device *parent; | 553 | struct device *parent; |
406 | 554 | ||
@@ -408,7 +556,7 @@ struct device { | |||
408 | 556 | ||
409 | struct kobject kobj; | 557 | struct kobject kobj; |
410 | const char *init_name; /* initial name of the device */ | 558 | const char *init_name; /* initial name of the device */ |
411 | struct device_type *type; | 559 | const struct device_type *type; |
412 | 560 | ||
413 | struct mutex mutex; /* mutex to synchronize calls to | 561 | struct mutex mutex; /* mutex to synchronize calls to |
414 | * its driver. | 562 | * its driver. |
@@ -442,7 +590,6 @@ struct device { | |||
442 | struct dev_archdata archdata; | 590 | struct dev_archdata archdata; |
443 | 591 | ||
444 | struct device_node *of_node; /* associated device tree node */ | 592 | struct device_node *of_node; /* associated device tree node */ |
445 | const struct of_device_id *of_match; /* matching of_device_id from driver */ | ||
446 | 593 | ||
447 | dev_t devt; /* dev_t, creates the sysfs "dev" */ | 594 | dev_t devt; /* dev_t, creates the sysfs "dev" */ |
448 | 595 | ||
@@ -557,7 +704,7 @@ extern int device_move(struct device *dev, struct device *new_parent, | |||
557 | extern const char *device_get_devnode(struct device *dev, | 704 | extern const char *device_get_devnode(struct device *dev, |
558 | mode_t *mode, const char **tmp); | 705 | mode_t *mode, const char **tmp); |
559 | extern void *dev_get_drvdata(const struct device *dev); | 706 | extern void *dev_get_drvdata(const struct device *dev); |
560 | extern void dev_set_drvdata(struct device *dev, void *data); | 707 | extern int dev_set_drvdata(struct device *dev, void *data); |
561 | 708 | ||
562 | /* | 709 | /* |
563 | * Root device objects for grouping under /sys/devices | 710 | * Root device objects for grouping under /sys/devices |
@@ -611,7 +758,7 @@ extern int (*platform_notify)(struct device *dev); | |||
611 | extern int (*platform_notify_remove)(struct device *dev); | 758 | extern int (*platform_notify_remove)(struct device *dev); |
612 | 759 | ||
613 | 760 | ||
614 | /** | 761 | /* |
615 | * get_device - atomically increment the reference count for the device. | 762 | * get_device - atomically increment the reference count for the device. |
616 | * | 763 | * |
617 | */ | 764 | */ |
@@ -633,13 +780,6 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; } | |||
633 | /* drivers/base/power/shutdown.c */ | 780 | /* drivers/base/power/shutdown.c */ |
634 | extern void device_shutdown(void); | 781 | extern void device_shutdown(void); |
635 | 782 | ||
636 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
637 | /* drivers/base/sys.c */ | ||
638 | extern void sysdev_shutdown(void); | ||
639 | #else | ||
640 | static inline void sysdev_shutdown(void) { } | ||
641 | #endif | ||
642 | |||
643 | /* debugging and troubleshooting/diagnostic helpers. */ | 783 | /* debugging and troubleshooting/diagnostic helpers. */ |
644 | extern const char *dev_driver_string(const struct device *dev); | 784 | extern const char *dev_driver_string(const struct device *dev); |
645 | 785 | ||
@@ -742,13 +882,17 @@ do { \ | |||
742 | #endif | 882 | #endif |
743 | 883 | ||
744 | /* | 884 | /* |
745 | * dev_WARN() acts like dev_printk(), but with the key difference | 885 | * dev_WARN*() acts like dev_printk(), but with the key difference |
746 | * of using a WARN/WARN_ON to get the message out, including the | 886 | * of using a WARN/WARN_ON to get the message out, including the |
747 | * file/line information and a backtrace. | 887 | * file/line information and a backtrace. |
748 | */ | 888 | */ |
749 | #define dev_WARN(dev, format, arg...) \ | 889 | #define dev_WARN(dev, format, arg...) \ |
750 | WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); | 890 | WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); |
751 | 891 | ||
892 | #define dev_WARN_ONCE(dev, condition, format, arg...) \ | ||
893 | WARN_ONCE(condition, "Device %s\n" format, \ | ||
894 | dev_driver_string(dev), ## arg) | ||
895 | |||
752 | /* Create alias, so I can be autoloaded. */ | 896 | /* Create alias, so I can be autoloaded. */ |
753 | #define MODULE_ALIAS_CHARDEV(major,minor) \ | 897 | #define MODULE_ALIAS_CHARDEV(major,minor) \ |
754 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) | 898 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) |
diff --git a/include/linux/fb.h b/include/linux/fb.h index df728c1c29ed..6a8274877171 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -832,6 +832,7 @@ struct fb_tile_ops { | |||
832 | #define FBINFO_CAN_FORCE_OUTPUT 0x200000 | 832 | #define FBINFO_CAN_FORCE_OUTPUT 0x200000 |
833 | 833 | ||
834 | struct fb_info { | 834 | struct fb_info { |
835 | atomic_t count; | ||
835 | int node; | 836 | int node; |
836 | int flags; | 837 | int flags; |
837 | struct mutex lock; /* Lock for open/release/ioctl funcs */ | 838 | struct mutex lock; /* Lock for open/release/ioctl funcs */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index dbd860af0804..cdf9495df204 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -358,7 +358,6 @@ struct inodes_stat_t { | |||
358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ | 358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ |
359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ | 359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ |
360 | #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ | 360 | #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ |
361 | #define FS_COW_FL 0x02000000 /* Cow file */ | ||
362 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ | 361 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ |
363 | 362 | ||
364 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ | 363 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index bfb8f934521e..56d8fc87fbbc 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |||
353 | 353 | ||
354 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | 354 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
355 | void free_pages_exact(void *virt, size_t size); | 355 | void free_pages_exact(void *virt, size_t size); |
356 | /* This is different from alloc_pages_exact_node !!! */ | ||
357 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); | ||
356 | 358 | ||
357 | #define __get_free_page(gfp_mask) \ | 359 | #define __get_free_page(gfp_mask) \ |
358 | __get_free_pages((gfp_mask), 0) | 360 | __get_free_pages((gfp_mask), 0) |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index caa151fbebb7..689496bb6654 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -134,7 +134,6 @@ extern struct cred init_cred; | |||
134 | .stack = &init_thread_info, \ | 134 | .stack = &init_thread_info, \ |
135 | .usage = ATOMIC_INIT(2), \ | 135 | .usage = ATOMIC_INIT(2), \ |
136 | .flags = PF_KTHREAD, \ | 136 | .flags = PF_KTHREAD, \ |
137 | .lock_depth = -1, \ | ||
138 | .prio = MAX_PRIO-20, \ | 137 | .prio = MAX_PRIO-20, \ |
139 | .static_prio = MAX_PRIO-20, \ | 138 | .static_prio = MAX_PRIO-20, \ |
140 | .normal_prio = MAX_PRIO-20, \ | 139 | .normal_prio = MAX_PRIO-20, \ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index bea0ac750712..6c12989839d9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -414,7 +414,6 @@ enum | |||
414 | TASKLET_SOFTIRQ, | 414 | TASKLET_SOFTIRQ, |
415 | SCHED_SOFTIRQ, | 415 | SCHED_SOFTIRQ, |
416 | HRTIMER_SOFTIRQ, | 416 | HRTIMER_SOFTIRQ, |
417 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | ||
418 | 417 | ||
419 | NR_SOFTIRQS | 418 | NR_SOFTIRQS |
420 | }; | 419 | }; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 09a308072f56..8b4538446636 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -53,12 +53,13 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); | |||
53 | * Bits which can be modified via irq_set/clear/modify_status_flags() | 53 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
54 | * IRQ_LEVEL - Interrupt is level type. Will be also | 54 | * IRQ_LEVEL - Interrupt is level type. Will be also |
55 | * updated in the code when the above trigger | 55 | * updated in the code when the above trigger |
56 | * bits are modified via set_irq_type() | 56 | * bits are modified via irq_set_irq_type() |
57 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | 57 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
58 | * it from affinity setting | 58 | * it from affinity setting |
59 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | 59 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
60 | * IRQ_NOREQUEST - Interrupt cannot be requested via | 60 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
61 | * request_irq() | 61 | * request_irq() |
62 | * IRQ_NOTHREAD - Interrupt cannot be threaded | ||
62 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | 63 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
63 | * request/setup_irq() | 64 | * request/setup_irq() |
64 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 65 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
@@ -85,6 +86,7 @@ enum { | |||
85 | IRQ_NO_BALANCING = (1 << 13), | 86 | IRQ_NO_BALANCING = (1 << 13), |
86 | IRQ_MOVE_PCNTXT = (1 << 14), | 87 | IRQ_MOVE_PCNTXT = (1 << 14), |
87 | IRQ_NESTED_THREAD = (1 << 15), | 88 | IRQ_NESTED_THREAD = (1 << 15), |
89 | IRQ_NOTHREAD = (1 << 16), | ||
88 | }; | 90 | }; |
89 | 91 | ||
90 | #define IRQF_MODIFY_MASK \ | 92 | #define IRQF_MODIFY_MASK \ |
@@ -261,23 +263,6 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | |||
261 | * struct irq_chip - hardware interrupt chip descriptor | 263 | * struct irq_chip - hardware interrupt chip descriptor |
262 | * | 264 | * |
263 | * @name: name for /proc/interrupts | 265 | * @name: name for /proc/interrupts |
264 | * @startup: deprecated, replaced by irq_startup | ||
265 | * @shutdown: deprecated, replaced by irq_shutdown | ||
266 | * @enable: deprecated, replaced by irq_enable | ||
267 | * @disable: deprecated, replaced by irq_disable | ||
268 | * @ack: deprecated, replaced by irq_ack | ||
269 | * @mask: deprecated, replaced by irq_mask | ||
270 | * @mask_ack: deprecated, replaced by irq_mask_ack | ||
271 | * @unmask: deprecated, replaced by irq_unmask | ||
272 | * @eoi: deprecated, replaced by irq_eoi | ||
273 | * @end: deprecated, will go away with __do_IRQ() | ||
274 | * @set_affinity: deprecated, replaced by irq_set_affinity | ||
275 | * @retrigger: deprecated, replaced by irq_retrigger | ||
276 | * @set_type: deprecated, replaced by irq_set_type | ||
277 | * @set_wake: deprecated, replaced by irq_wake | ||
278 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
279 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
280 | * | ||
281 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) | 266 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
282 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) | 267 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
283 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | 268 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
@@ -295,6 +280,9 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | |||
295 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 280 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
296 | * @irq_cpu_online: configure an interrupt source for a secondary CPU | 281 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
297 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU | 282 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
283 | * @irq_suspend: function called from core code on suspend once per chip | ||
284 | * @irq_resume: function called from core code on resume once per chip | ||
285 | * @irq_pm_shutdown: function called from core code on shutdown once per chip | ||
298 | * @irq_print_chip: optional to print special chip info in show_interrupts | 286 | * @irq_print_chip: optional to print special chip info in show_interrupts |
299 | * @flags: chip specific flags | 287 | * @flags: chip specific flags |
300 | * | 288 | * |
@@ -324,6 +312,10 @@ struct irq_chip { | |||
324 | void (*irq_cpu_online)(struct irq_data *data); | 312 | void (*irq_cpu_online)(struct irq_data *data); |
325 | void (*irq_cpu_offline)(struct irq_data *data); | 313 | void (*irq_cpu_offline)(struct irq_data *data); |
326 | 314 | ||
315 | void (*irq_suspend)(struct irq_data *data); | ||
316 | void (*irq_resume)(struct irq_data *data); | ||
317 | void (*irq_pm_shutdown)(struct irq_data *data); | ||
318 | |||
327 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 319 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
328 | 320 | ||
329 | unsigned long flags; | 321 | unsigned long flags; |
@@ -439,7 +431,7 @@ irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | |||
439 | /* | 431 | /* |
440 | * Set a highlevel chained flow handler for a given IRQ. | 432 | * Set a highlevel chained flow handler for a given IRQ. |
441 | * (a chained handler is automatically enabled and set to | 433 | * (a chained handler is automatically enabled and set to |
442 | * IRQ_NOREQUEST and IRQ_NOPROBE) | 434 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
443 | */ | 435 | */ |
444 | static inline void | 436 | static inline void |
445 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | 437 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
@@ -469,6 +461,16 @@ static inline void irq_set_probe(unsigned int irq) | |||
469 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 461 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
470 | } | 462 | } |
471 | 463 | ||
464 | static inline void irq_set_nothread(unsigned int irq) | ||
465 | { | ||
466 | irq_modify_status(irq, 0, IRQ_NOTHREAD); | ||
467 | } | ||
468 | |||
469 | static inline void irq_set_thread(unsigned int irq) | ||
470 | { | ||
471 | irq_modify_status(irq, IRQ_NOTHREAD, 0); | ||
472 | } | ||
473 | |||
472 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | 474 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
473 | { | 475 | { |
474 | if (nest) | 476 | if (nest) |
@@ -573,6 +575,145 @@ static inline int irq_reserve_irq(unsigned int irq) | |||
573 | return irq_reserve_irqs(irq, 1); | 575 | return irq_reserve_irqs(irq, 1); |
574 | } | 576 | } |
575 | 577 | ||
578 | #ifndef irq_reg_writel | ||
579 | # define irq_reg_writel(val, addr) writel(val, addr) | ||
580 | #endif | ||
581 | #ifndef irq_reg_readl | ||
582 | # define irq_reg_readl(addr) readl(addr) | ||
583 | #endif | ||
584 | |||
585 | /** | ||
586 | * struct irq_chip_regs - register offsets for struct irq_gci | ||
587 | * @enable: Enable register offset to reg_base | ||
588 | * @disable: Disable register offset to reg_base | ||
589 | * @mask: Mask register offset to reg_base | ||
590 | * @ack: Ack register offset to reg_base | ||
591 | * @eoi: Eoi register offset to reg_base | ||
592 | * @type: Type configuration register offset to reg_base | ||
593 | * @polarity: Polarity configuration register offset to reg_base | ||
594 | */ | ||
595 | struct irq_chip_regs { | ||
596 | unsigned long enable; | ||
597 | unsigned long disable; | ||
598 | unsigned long mask; | ||
599 | unsigned long ack; | ||
600 | unsigned long eoi; | ||
601 | unsigned long type; | ||
602 | unsigned long polarity; | ||
603 | }; | ||
604 | |||
605 | /** | ||
606 | * struct irq_chip_type - Generic interrupt chip instance for a flow type | ||
607 | * @chip: The real interrupt chip which provides the callbacks | ||
608 | * @regs: Register offsets for this chip | ||
609 | * @handler: Flow handler associated with this chip | ||
610 | * @type: Chip can handle these flow types | ||
611 | * | ||
612 | * A irq_generic_chip can have several instances of irq_chip_type when | ||
613 | * it requires different functions and register offsets for different | ||
614 | * flow types. | ||
615 | */ | ||
616 | struct irq_chip_type { | ||
617 | struct irq_chip chip; | ||
618 | struct irq_chip_regs regs; | ||
619 | irq_flow_handler_t handler; | ||
620 | u32 type; | ||
621 | }; | ||
622 | |||
623 | /** | ||
624 | * struct irq_chip_generic - Generic irq chip data structure | ||
625 | * @lock: Lock to protect register and cache data access | ||
626 | * @reg_base: Register base address (virtual) | ||
627 | * @irq_base: Interrupt base nr for this chip | ||
628 | * @irq_cnt: Number of interrupts handled by this chip | ||
629 | * @mask_cache: Cached mask register | ||
630 | * @type_cache: Cached type register | ||
631 | * @polarity_cache: Cached polarity register | ||
632 | * @wake_enabled: Interrupt can wakeup from suspend | ||
633 | * @wake_active: Interrupt is marked as an wakeup from suspend source | ||
634 | * @num_ct: Number of available irq_chip_type instances (usually 1) | ||
635 | * @private: Private data for non generic chip callbacks | ||
636 | * @list: List head for keeping track of instances | ||
637 | * @chip_types: Array of interrupt irq_chip_types | ||
638 | * | ||
639 | * Note, that irq_chip_generic can have multiple irq_chip_type | ||
640 | * implementations which can be associated to a particular irq line of | ||
641 | * an irq_chip_generic instance. That allows to share and protect | ||
642 | * state in an irq_chip_generic instance when we need to implement | ||
643 | * different flow mechanisms (level/edge) for it. | ||
644 | */ | ||
645 | struct irq_chip_generic { | ||
646 | raw_spinlock_t lock; | ||
647 | void __iomem *reg_base; | ||
648 | unsigned int irq_base; | ||
649 | unsigned int irq_cnt; | ||
650 | u32 mask_cache; | ||
651 | u32 type_cache; | ||
652 | u32 polarity_cache; | ||
653 | u32 wake_enabled; | ||
654 | u32 wake_active; | ||
655 | unsigned int num_ct; | ||
656 | void *private; | ||
657 | struct list_head list; | ||
658 | struct irq_chip_type chip_types[0]; | ||
659 | }; | ||
660 | |||
661 | /** | ||
662 | * enum irq_gc_flags - Initialization flags for generic irq chips | ||
663 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg | ||
664 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for | ||
665 | * irq chips which need to call irq_set_wake() on | ||
666 | * the parent irq. Usually GPIO implementations | ||
667 | */ | ||
668 | enum irq_gc_flags { | ||
669 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | ||
670 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | ||
671 | }; | ||
672 | |||
673 | /* Generic chip callback functions */ | ||
674 | void irq_gc_noop(struct irq_data *d); | ||
675 | void irq_gc_mask_disable_reg(struct irq_data *d); | ||
676 | void irq_gc_mask_set_bit(struct irq_data *d); | ||
677 | void irq_gc_mask_clr_bit(struct irq_data *d); | ||
678 | void irq_gc_unmask_enable_reg(struct irq_data *d); | ||
679 | void irq_gc_ack(struct irq_data *d); | ||
680 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | ||
681 | void irq_gc_eoi(struct irq_data *d); | ||
682 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | ||
683 | |||
684 | /* Setup functions for irq_chip_generic */ | ||
685 | struct irq_chip_generic * | ||
686 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | ||
687 | void __iomem *reg_base, irq_flow_handler_t handler); | ||
688 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
689 | enum irq_gc_flags flags, unsigned int clr, | ||
690 | unsigned int set); | ||
691 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | ||
692 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
693 | unsigned int clr, unsigned int set); | ||
694 | |||
695 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | ||
696 | { | ||
697 | return container_of(d->chip, struct irq_chip_type, chip); | ||
698 | } | ||
699 | |||
700 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | ||
701 | |||
702 | #ifdef CONFIG_SMP | ||
703 | static inline void irq_gc_lock(struct irq_chip_generic *gc) | ||
704 | { | ||
705 | raw_spin_lock(&gc->lock); | ||
706 | } | ||
707 | |||
708 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) | ||
709 | { | ||
710 | raw_spin_unlock(&gc->lock); | ||
711 | } | ||
712 | #else | ||
713 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | ||
714 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | ||
715 | #endif | ||
716 | |||
576 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 717 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
577 | 718 | ||
578 | #endif /* !CONFIG_S390 */ | 719 | #endif /* !CONFIG_S390 */ |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a082905b5ebe..2d921b35212c 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -16,16 +16,18 @@ struct timer_rand_state; | |||
16 | * @irq_data: per irq and chip data passed down to chip functions | 16 | * @irq_data: per irq and chip data passed down to chip functions |
17 | * @timer_rand_state: pointer to timer rand state struct | 17 | * @timer_rand_state: pointer to timer rand state struct |
18 | * @kstat_irqs: irq stats per cpu | 18 | * @kstat_irqs: irq stats per cpu |
19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | 19 | * @handle_irq: highlevel irq-events handler |
20 | * @preflow_handler: handler called before the flow handler (currently used by sparc) | ||
20 | * @action: the irq action chain | 21 | * @action: the irq action chain |
21 | * @status: status information | 22 | * @status: status information |
22 | * @core_internal_state__do_not_mess_with_it: core internal status information | 23 | * @core_internal_state__do_not_mess_with_it: core internal status information |
23 | * @depth: disable-depth, for nested irq_disable() calls | 24 | * @depth: disable-depth, for nested irq_disable() calls |
24 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | 25 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers |
25 | * @irq_count: stats field to detect stalled irqs | 26 | * @irq_count: stats field to detect stalled irqs |
26 | * @last_unhandled: aging timer for unhandled count | 27 | * @last_unhandled: aging timer for unhandled count |
27 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 28 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
28 | * @lock: locking for SMP | 29 | * @lock: locking for SMP |
30 | * @affinity_hint: hint to user space for preferred irq affinity | ||
29 | * @affinity_notify: context for notification of affinity changes | 31 | * @affinity_notify: context for notification of affinity changes |
30 | * @pending_mask: pending rebalanced interrupts | 32 | * @pending_mask: pending rebalanced interrupts |
31 | * @threads_oneshot: bitfield to handle shared oneshot threads | 33 | * @threads_oneshot: bitfield to handle shared oneshot threads |
@@ -109,10 +111,7 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de | |||
109 | desc->handle_irq(irq, desc); | 111 | desc->handle_irq(irq, desc); |
110 | } | 112 | } |
111 | 113 | ||
112 | static inline void generic_handle_irq(unsigned int irq) | 114 | int generic_handle_irq(unsigned int irq); |
113 | { | ||
114 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
115 | } | ||
116 | 115 | ||
117 | /* Test to see if a driver has successfully requested an irq */ | 116 | /* Test to see if a driver has successfully requested an irq */ |
118 | static inline int irq_has_action(unsigned int irq) | 117 | static inline int irq_has_action(unsigned int irq) |
diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 6efd7a78de6a..310231823852 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h | |||
@@ -113,5 +113,6 @@ extern void usermodehelper_init(void); | |||
113 | 113 | ||
114 | extern int usermodehelper_disable(void); | 114 | extern int usermodehelper_disable(void); |
115 | extern void usermodehelper_enable(void); | 115 | extern void usermodehelper_enable(void); |
116 | extern bool usermodehelper_is_disabled(void); | ||
116 | 117 | ||
117 | #endif /* __LINUX_KMOD_H__ */ | 118 | #endif /* __LINUX_KMOD_H__ */ |
diff --git a/include/linux/list.h b/include/linux/list.h index 3a54266a1e85..cc6d2aa6b415 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/poison.h> | 6 | #include <linux/poison.h> |
7 | #include <linux/prefetch.h> | 7 | #include <linux/const.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Simple doubly linked list implementation. | 10 | * Simple doubly linked list implementation. |
@@ -367,18 +367,15 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
367 | * @head: the head for your list. | 367 | * @head: the head for your list. |
368 | */ | 368 | */ |
369 | #define list_for_each(pos, head) \ | 369 | #define list_for_each(pos, head) \ |
370 | for (pos = (head)->next; prefetch(pos->next), pos != (head); \ | 370 | for (pos = (head)->next; pos != (head); pos = pos->next) |
371 | pos = pos->next) | ||
372 | 371 | ||
373 | /** | 372 | /** |
374 | * __list_for_each - iterate over a list | 373 | * __list_for_each - iterate over a list |
375 | * @pos: the &struct list_head to use as a loop cursor. | 374 | * @pos: the &struct list_head to use as a loop cursor. |
376 | * @head: the head for your list. | 375 | * @head: the head for your list. |
377 | * | 376 | * |
378 | * This variant differs from list_for_each() in that it's the | 377 | * This variant doesn't differ from list_for_each() any more. |
379 | * simplest possible list iteration code, no prefetching is done. | 378 | * We don't do prefetching in either case. |
380 | * Use this for code that knows the list to be very short (empty | ||
381 | * or 1 entry) most of the time. | ||
382 | */ | 379 | */ |
383 | #define __list_for_each(pos, head) \ | 380 | #define __list_for_each(pos, head) \ |
384 | for (pos = (head)->next; pos != (head); pos = pos->next) | 381 | for (pos = (head)->next; pos != (head); pos = pos->next) |
@@ -389,8 +386,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
389 | * @head: the head for your list. | 386 | * @head: the head for your list. |
390 | */ | 387 | */ |
391 | #define list_for_each_prev(pos, head) \ | 388 | #define list_for_each_prev(pos, head) \ |
392 | for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ | 389 | for (pos = (head)->prev; pos != (head); pos = pos->prev) |
393 | pos = pos->prev) | ||
394 | 390 | ||
395 | /** | 391 | /** |
396 | * list_for_each_safe - iterate over a list safe against removal of list entry | 392 | * list_for_each_safe - iterate over a list safe against removal of list entry |
@@ -410,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
410 | */ | 406 | */ |
411 | #define list_for_each_prev_safe(pos, n, head) \ | 407 | #define list_for_each_prev_safe(pos, n, head) \ |
412 | for (pos = (head)->prev, n = pos->prev; \ | 408 | for (pos = (head)->prev, n = pos->prev; \ |
413 | prefetch(pos->prev), pos != (head); \ | 409 | pos != (head); \ |
414 | pos = n, n = pos->prev) | 410 | pos = n, n = pos->prev) |
415 | 411 | ||
416 | /** | 412 | /** |
@@ -421,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
421 | */ | 417 | */ |
422 | #define list_for_each_entry(pos, head, member) \ | 418 | #define list_for_each_entry(pos, head, member) \ |
423 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | 419 | for (pos = list_entry((head)->next, typeof(*pos), member); \ |
424 | prefetch(pos->member.next), &pos->member != (head); \ | 420 | &pos->member != (head); \ |
425 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 421 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
426 | 422 | ||
427 | /** | 423 | /** |
@@ -432,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
432 | */ | 428 | */ |
433 | #define list_for_each_entry_reverse(pos, head, member) \ | 429 | #define list_for_each_entry_reverse(pos, head, member) \ |
434 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ | 430 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ |
435 | prefetch(pos->member.prev), &pos->member != (head); \ | 431 | &pos->member != (head); \ |
436 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | 432 | pos = list_entry(pos->member.prev, typeof(*pos), member)) |
437 | 433 | ||
438 | /** | 434 | /** |
@@ -457,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
457 | */ | 453 | */ |
458 | #define list_for_each_entry_continue(pos, head, member) \ | 454 | #define list_for_each_entry_continue(pos, head, member) \ |
459 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ | 455 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ |
460 | prefetch(pos->member.next), &pos->member != (head); \ | 456 | &pos->member != (head); \ |
461 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 457 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
462 | 458 | ||
463 | /** | 459 | /** |
@@ -471,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
471 | */ | 467 | */ |
472 | #define list_for_each_entry_continue_reverse(pos, head, member) \ | 468 | #define list_for_each_entry_continue_reverse(pos, head, member) \ |
473 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ | 469 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ |
474 | prefetch(pos->member.prev), &pos->member != (head); \ | 470 | &pos->member != (head); \ |
475 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | 471 | pos = list_entry(pos->member.prev, typeof(*pos), member)) |
476 | 472 | ||
477 | /** | 473 | /** |
@@ -483,7 +479,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
483 | * Iterate over list of given type, continuing from current position. | 479 | * Iterate over list of given type, continuing from current position. |
484 | */ | 480 | */ |
485 | #define list_for_each_entry_from(pos, head, member) \ | 481 | #define list_for_each_entry_from(pos, head, member) \ |
486 | for (; prefetch(pos->member.next), &pos->member != (head); \ | 482 | for (; &pos->member != (head); \ |
487 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 483 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
488 | 484 | ||
489 | /** | 485 | /** |
@@ -664,8 +660,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
664 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | 660 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
665 | 661 | ||
666 | #define hlist_for_each(pos, head) \ | 662 | #define hlist_for_each(pos, head) \ |
667 | for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ | 663 | for (pos = (head)->first; pos ; pos = pos->next) |
668 | pos = pos->next) | ||
669 | 664 | ||
670 | #define hlist_for_each_safe(pos, n, head) \ | 665 | #define hlist_for_each_safe(pos, n, head) \ |
671 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ | 666 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
@@ -680,7 +675,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
680 | */ | 675 | */ |
681 | #define hlist_for_each_entry(tpos, pos, head, member) \ | 676 | #define hlist_for_each_entry(tpos, pos, head, member) \ |
682 | for (pos = (head)->first; \ | 677 | for (pos = (head)->first; \ |
683 | pos && ({ prefetch(pos->next); 1;}) && \ | 678 | pos && \ |
684 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 679 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
685 | pos = pos->next) | 680 | pos = pos->next) |
686 | 681 | ||
@@ -692,7 +687,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
692 | */ | 687 | */ |
693 | #define hlist_for_each_entry_continue(tpos, pos, member) \ | 688 | #define hlist_for_each_entry_continue(tpos, pos, member) \ |
694 | for (pos = (pos)->next; \ | 689 | for (pos = (pos)->next; \ |
695 | pos && ({ prefetch(pos->next); 1;}) && \ | 690 | pos && \ |
696 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 691 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
697 | pos = pos->next) | 692 | pos = pos->next) |
698 | 693 | ||
@@ -703,7 +698,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
703 | * @member: the name of the hlist_node within the struct. | 698 | * @member: the name of the hlist_node within the struct. |
704 | */ | 699 | */ |
705 | #define hlist_for_each_entry_from(tpos, pos, member) \ | 700 | #define hlist_for_each_entry_from(tpos, pos, member) \ |
706 | for (; pos && ({ prefetch(pos->next); 1;}) && \ | 701 | for (; pos && \ |
707 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 702 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
708 | pos = pos->next) | 703 | pos = pos->next) |
709 | 704 | ||
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index eb792cb6d745..bcb793ec7374 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -183,6 +183,7 @@ struct mmc_host { | |||
183 | struct work_struct clk_gate_work; /* delayed clock gate */ | 183 | struct work_struct clk_gate_work; /* delayed clock gate */ |
184 | unsigned int clk_old; /* old clock value cache */ | 184 | unsigned int clk_old; /* old clock value cache */ |
185 | spinlock_t clk_lock; /* lock for clk fields */ | 185 | spinlock_t clk_lock; /* lock for clk fields */ |
186 | struct mutex clk_gate_mutex; /* mutex for clock gating */ | ||
186 | #endif | 187 | #endif |
187 | 188 | ||
188 | /* host specific block data */ | 189 | /* host specific block data */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 5de42043dff0..d9ca2d5dc6d0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -64,6 +64,9 @@ struct module_version_attribute { | |||
64 | const char *version; | 64 | const char *version; |
65 | } __attribute__ ((__aligned__(sizeof(void *)))); | 65 | } __attribute__ ((__aligned__(sizeof(void *)))); |
66 | 66 | ||
67 | extern ssize_t __modver_version_show(struct module_attribute *, | ||
68 | struct module *, char *); | ||
69 | |||
67 | struct module_kobject | 70 | struct module_kobject |
68 | { | 71 | { |
69 | struct kobject kobj; | 72 | struct kobject kobj; |
@@ -172,12 +175,7 @@ extern struct module __this_module; | |||
172 | #define MODULE_VERSION(_version) MODULE_INFO(version, _version) | 175 | #define MODULE_VERSION(_version) MODULE_INFO(version, _version) |
173 | #else | 176 | #else |
174 | #define MODULE_VERSION(_version) \ | 177 | #define MODULE_VERSION(_version) \ |
175 | extern ssize_t __modver_version_show(struct module_attribute *, \ | 178 | static struct module_version_attribute ___modver_attr = { \ |
176 | struct module *, char *); \ | ||
177 | static struct module_version_attribute __modver_version_attr \ | ||
178 | __used \ | ||
179 | __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \ | ||
180 | = { \ | ||
181 | .mattr = { \ | 179 | .mattr = { \ |
182 | .attr = { \ | 180 | .attr = { \ |
183 | .name = "version", \ | 181 | .name = "version", \ |
@@ -187,7 +185,10 @@ extern struct module __this_module; | |||
187 | }, \ | 185 | }, \ |
188 | .module_name = KBUILD_MODNAME, \ | 186 | .module_name = KBUILD_MODNAME, \ |
189 | .version = _version, \ | 187 | .version = _version, \ |
190 | } | 188 | }; \ |
189 | static const struct module_version_attribute \ | ||
190 | __used __attribute__ ((__section__ ("__modver"))) \ | ||
191 | * __moduleparam_const __modver_attr = &___modver_attr | ||
191 | #endif | 192 | #endif |
192 | 193 | ||
193 | /* Optional firmware file (or files) needed by the module | 194 | /* Optional firmware file (or files) needed by the module |
@@ -223,7 +224,7 @@ struct module_use { | |||
223 | extern void *__crc_##sym __attribute__((weak)); \ | 224 | extern void *__crc_##sym __attribute__((weak)); \ |
224 | static const unsigned long __kcrctab_##sym \ | 225 | static const unsigned long __kcrctab_##sym \ |
225 | __used \ | 226 | __used \ |
226 | __attribute__((section("__kcrctab" sec), unused)) \ | 227 | __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ |
227 | = (unsigned long) &__crc_##sym; | 228 | = (unsigned long) &__crc_##sym; |
228 | #else | 229 | #else |
229 | #define __CRC_SYMBOL(sym, sec) | 230 | #define __CRC_SYMBOL(sym, sec) |
@@ -238,7 +239,7 @@ struct module_use { | |||
238 | = MODULE_SYMBOL_PREFIX #sym; \ | 239 | = MODULE_SYMBOL_PREFIX #sym; \ |
239 | static const struct kernel_symbol __ksymtab_##sym \ | 240 | static const struct kernel_symbol __ksymtab_##sym \ |
240 | __used \ | 241 | __used \ |
241 | __attribute__((section("__ksymtab" sec), unused)) \ | 242 | __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ |
242 | = { (unsigned long)&sym, __kstrtab_##sym } | 243 | = { (unsigned long)&sym, __kstrtab_##sym } |
243 | 244 | ||
244 | #define EXPORT_SYMBOL(sym) \ | 245 | #define EXPORT_SYMBOL(sym) \ |
@@ -367,34 +368,35 @@ struct module | |||
367 | struct module_notes_attrs *notes_attrs; | 368 | struct module_notes_attrs *notes_attrs; |
368 | #endif | 369 | #endif |
369 | 370 | ||
371 | /* The command line arguments (may be mangled). People like | ||
372 | keeping pointers to this stuff */ | ||
373 | char *args; | ||
374 | |||
370 | #ifdef CONFIG_SMP | 375 | #ifdef CONFIG_SMP |
371 | /* Per-cpu data. */ | 376 | /* Per-cpu data. */ |
372 | void __percpu *percpu; | 377 | void __percpu *percpu; |
373 | unsigned int percpu_size; | 378 | unsigned int percpu_size; |
374 | #endif | 379 | #endif |
375 | 380 | ||
376 | /* The command line arguments (may be mangled). People like | ||
377 | keeping pointers to this stuff */ | ||
378 | char *args; | ||
379 | #ifdef CONFIG_TRACEPOINTS | 381 | #ifdef CONFIG_TRACEPOINTS |
380 | struct tracepoint * const *tracepoints_ptrs; | ||
381 | unsigned int num_tracepoints; | 382 | unsigned int num_tracepoints; |
383 | struct tracepoint * const *tracepoints_ptrs; | ||
382 | #endif | 384 | #endif |
383 | #ifdef HAVE_JUMP_LABEL | 385 | #ifdef HAVE_JUMP_LABEL |
384 | struct jump_entry *jump_entries; | 386 | struct jump_entry *jump_entries; |
385 | unsigned int num_jump_entries; | 387 | unsigned int num_jump_entries; |
386 | #endif | 388 | #endif |
387 | #ifdef CONFIG_TRACING | 389 | #ifdef CONFIG_TRACING |
388 | const char **trace_bprintk_fmt_start; | ||
389 | unsigned int num_trace_bprintk_fmt; | 390 | unsigned int num_trace_bprintk_fmt; |
391 | const char **trace_bprintk_fmt_start; | ||
390 | #endif | 392 | #endif |
391 | #ifdef CONFIG_EVENT_TRACING | 393 | #ifdef CONFIG_EVENT_TRACING |
392 | struct ftrace_event_call **trace_events; | 394 | struct ftrace_event_call **trace_events; |
393 | unsigned int num_trace_events; | 395 | unsigned int num_trace_events; |
394 | #endif | 396 | #endif |
395 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 397 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
396 | unsigned long *ftrace_callsites; | ||
397 | unsigned int num_ftrace_callsites; | 398 | unsigned int num_ftrace_callsites; |
399 | unsigned long *ftrace_callsites; | ||
398 | #endif | 400 | #endif |
399 | 401 | ||
400 | #ifdef CONFIG_MODULE_UNLOAD | 402 | #ifdef CONFIG_MODULE_UNLOAD |
@@ -475,8 +477,9 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
475 | bool warn); | 477 | bool warn); |
476 | 478 | ||
477 | /* Walk the exported symbol table */ | 479 | /* Walk the exported symbol table */ |
478 | bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | 480 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
479 | unsigned int symnum, void *data), void *data); | 481 | struct module *owner, |
482 | void *data), void *data); | ||
480 | 483 | ||
481 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if | 484 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if |
482 | symnum out of range. */ | 485 | symnum out of range. */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 07b41951e3fa..ddaae98c53f9 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -67,9 +67,9 @@ struct kparam_string { | |||
67 | struct kparam_array | 67 | struct kparam_array |
68 | { | 68 | { |
69 | unsigned int max; | 69 | unsigned int max; |
70 | unsigned int elemsize; | ||
70 | unsigned int *num; | 71 | unsigned int *num; |
71 | const struct kernel_param_ops *ops; | 72 | const struct kernel_param_ops *ops; |
72 | unsigned int elemsize; | ||
73 | void *elem; | 73 | void *elem; |
74 | }; | 74 | }; |
75 | 75 | ||
@@ -371,8 +371,9 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp); | |||
371 | */ | 371 | */ |
372 | #define module_param_array_named(name, array, type, nump, perm) \ | 372 | #define module_param_array_named(name, array, type, nump, perm) \ |
373 | static const struct kparam_array __param_arr_##name \ | 373 | static const struct kparam_array __param_arr_##name \ |
374 | = { ARRAY_SIZE(array), nump, ¶m_ops_##type, \ | 374 | = { .max = ARRAY_SIZE(array), .num = nump, \ |
375 | sizeof(array[0]), array }; \ | 375 | .ops = ¶m_ops_##type, \ |
376 | .elemsize = sizeof(array[0]), .elem = array }; \ | ||
376 | __module_param_call(MODULE_PARAM_PREFIX, name, \ | 377 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
377 | ¶m_array_ops, \ | 378 | ¶m_array_ops, \ |
378 | .arr = &__param_arr_##name, \ | 379 | .arr = &__param_arr_##name, \ |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 94b48bd40dd7..c75471db576e 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -51,7 +51,7 @@ struct mutex { | |||
51 | spinlock_t wait_lock; | 51 | spinlock_t wait_lock; |
52 | struct list_head wait_list; | 52 | struct list_head wait_list; |
53 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) | 53 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) |
54 | struct thread_info *owner; | 54 | struct task_struct *owner; |
55 | #endif | 55 | #endif |
56 | #ifdef CONFIG_DEBUG_MUTEXES | 56 | #ifdef CONFIG_DEBUG_MUTEXES |
57 | const char *name; | 57 | const char *name; |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 890dce242639..7e371f7df9c4 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -233,6 +233,7 @@ struct nfs4_layoutget { | |||
233 | struct nfs4_layoutget_args args; | 233 | struct nfs4_layoutget_args args; |
234 | struct nfs4_layoutget_res res; | 234 | struct nfs4_layoutget_res res; |
235 | struct pnfs_layout_segment **lsegpp; | 235 | struct pnfs_layout_segment **lsegpp; |
236 | gfp_t gfp_flags; | ||
236 | }; | 237 | }; |
237 | 238 | ||
238 | struct nfs4_getdeviceinfo_args { | 239 | struct nfs4_getdeviceinfo_args { |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8bfe6c1d4365..ae5638480ef2 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev); | |||
21 | static inline int of_driver_match_device(struct device *dev, | 21 | static inline int of_driver_match_device(struct device *dev, |
22 | const struct device_driver *drv) | 22 | const struct device_driver *drv) |
23 | { | 23 | { |
24 | dev->of_match = of_match_device(drv->of_match_table, dev); | 24 | return of_match_device(drv->of_match_table, dev) != NULL; |
25 | return dev->of_match != NULL; | ||
26 | } | 25 | } |
27 | 26 | ||
28 | extern struct platform_device *of_dev_get(struct platform_device *dev); | 27 | extern struct platform_device *of_dev_get(struct platform_device *dev); |
@@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev, | |||
58 | 57 | ||
59 | static inline void of_device_node_put(struct device *dev) { } | 58 | static inline void of_device_node_put(struct device *dev) { } |
60 | 59 | ||
60 | static inline const struct of_device_id *of_match_device( | ||
61 | const struct of_device_id *matches, const struct device *dev) | ||
62 | { | ||
63 | return NULL; | ||
64 | } | ||
61 | #endif /* CONFIG_OF_DEVICE */ | 65 | #endif /* CONFIG_OF_DEVICE */ |
62 | 66 | ||
63 | #endif /* _LINUX_OF_DEVICE_H */ | 67 | #endif /* _LINUX_OF_DEVICE_H */ |
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h new file mode 100644 index 000000000000..655824fa4c76 --- /dev/null +++ b/include/linux/pci-ats.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef LINUX_PCI_ATS_H | ||
2 | #define LINUX_PCI_ATS_H | ||
3 | |||
4 | /* Address Translation Service */ | ||
5 | struct pci_ats { | ||
6 | int pos; /* capability position */ | ||
7 | int stu; /* Smallest Translation Unit */ | ||
8 | int qdep; /* Invalidate Queue Depth */ | ||
9 | int ref_cnt; /* Physical Function reference count */ | ||
10 | unsigned int is_enabled:1; /* Enable bit is set */ | ||
11 | }; | ||
12 | |||
13 | #ifdef CONFIG_PCI_IOV | ||
14 | |||
15 | extern int pci_enable_ats(struct pci_dev *dev, int ps); | ||
16 | extern void pci_disable_ats(struct pci_dev *dev); | ||
17 | extern int pci_ats_queue_depth(struct pci_dev *dev); | ||
18 | /** | ||
19 | * pci_ats_enabled - query the ATS status | ||
20 | * @dev: the PCI device | ||
21 | * | ||
22 | * Returns 1 if ATS capability is enabled, or 0 if not. | ||
23 | */ | ||
24 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
25 | { | ||
26 | return dev->ats && dev->ats->is_enabled; | ||
27 | } | ||
28 | |||
29 | #else /* CONFIG_PCI_IOV */ | ||
30 | |||
31 | static inline int pci_enable_ats(struct pci_dev *dev, int ps) | ||
32 | { | ||
33 | return -ENODEV; | ||
34 | } | ||
35 | |||
36 | static inline void pci_disable_ats(struct pci_dev *dev) | ||
37 | { | ||
38 | } | ||
39 | |||
40 | static inline int pci_ats_queue_depth(struct pci_dev *dev) | ||
41 | { | ||
42 | return -ENODEV; | ||
43 | } | ||
44 | |||
45 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | #endif /* CONFIG_PCI_IOV */ | ||
51 | |||
52 | #endif /* LINUX_PCI_ATS_H*/ | ||
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 744942c95fec..ede1a80e3358 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -150,9 +150,6 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr | |||
150 | struct resource *res, unsigned int n_res, | 150 | struct resource *res, unsigned int n_res, |
151 | const void *data, size_t size); | 151 | const void *data, size_t size); |
152 | 152 | ||
153 | extern const struct dev_pm_ops * platform_bus_get_pm_ops(void); | ||
154 | extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm); | ||
155 | |||
156 | /* early platform driver interface */ | 153 | /* early platform driver interface */ |
157 | struct early_platform_driver { | 154 | struct early_platform_driver { |
158 | const char *class_str; | 155 | const char *class_str; |
@@ -205,4 +202,64 @@ static inline char *early_platform_driver_setup_func(void) \ | |||
205 | } | 202 | } |
206 | #endif /* MODULE */ | 203 | #endif /* MODULE */ |
207 | 204 | ||
205 | #ifdef CONFIG_PM_SLEEP | ||
206 | extern int platform_pm_prepare(struct device *dev); | ||
207 | extern void platform_pm_complete(struct device *dev); | ||
208 | #else | ||
209 | #define platform_pm_prepare NULL | ||
210 | #define platform_pm_complete NULL | ||
211 | #endif | ||
212 | |||
213 | #ifdef CONFIG_SUSPEND | ||
214 | extern int platform_pm_suspend(struct device *dev); | ||
215 | extern int platform_pm_suspend_noirq(struct device *dev); | ||
216 | extern int platform_pm_resume(struct device *dev); | ||
217 | extern int platform_pm_resume_noirq(struct device *dev); | ||
218 | #else | ||
219 | #define platform_pm_suspend NULL | ||
220 | #define platform_pm_resume NULL | ||
221 | #define platform_pm_suspend_noirq NULL | ||
222 | #define platform_pm_resume_noirq NULL | ||
223 | #endif | ||
224 | |||
225 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
226 | extern int platform_pm_freeze(struct device *dev); | ||
227 | extern int platform_pm_freeze_noirq(struct device *dev); | ||
228 | extern int platform_pm_thaw(struct device *dev); | ||
229 | extern int platform_pm_thaw_noirq(struct device *dev); | ||
230 | extern int platform_pm_poweroff(struct device *dev); | ||
231 | extern int platform_pm_poweroff_noirq(struct device *dev); | ||
232 | extern int platform_pm_restore(struct device *dev); | ||
233 | extern int platform_pm_restore_noirq(struct device *dev); | ||
234 | #else | ||
235 | #define platform_pm_freeze NULL | ||
236 | #define platform_pm_thaw NULL | ||
237 | #define platform_pm_poweroff NULL | ||
238 | #define platform_pm_restore NULL | ||
239 | #define platform_pm_freeze_noirq NULL | ||
240 | #define platform_pm_thaw_noirq NULL | ||
241 | #define platform_pm_poweroff_noirq NULL | ||
242 | #define platform_pm_restore_noirq NULL | ||
243 | #endif | ||
244 | |||
245 | #ifdef CONFIG_PM_SLEEP | ||
246 | #define USE_PLATFORM_PM_SLEEP_OPS \ | ||
247 | .prepare = platform_pm_prepare, \ | ||
248 | .complete = platform_pm_complete, \ | ||
249 | .suspend = platform_pm_suspend, \ | ||
250 | .resume = platform_pm_resume, \ | ||
251 | .freeze = platform_pm_freeze, \ | ||
252 | .thaw = platform_pm_thaw, \ | ||
253 | .poweroff = platform_pm_poweroff, \ | ||
254 | .restore = platform_pm_restore, \ | ||
255 | .suspend_noirq = platform_pm_suspend_noirq, \ | ||
256 | .resume_noirq = platform_pm_resume_noirq, \ | ||
257 | .freeze_noirq = platform_pm_freeze_noirq, \ | ||
258 | .thaw_noirq = platform_pm_thaw_noirq, \ | ||
259 | .poweroff_noirq = platform_pm_poweroff_noirq, \ | ||
260 | .restore_noirq = platform_pm_restore_noirq, | ||
261 | #else | ||
262 | #define USE_PLATFORM_PM_SLEEP_OPS | ||
263 | #endif | ||
264 | |||
208 | #endif /* _PLATFORM_DEVICE_H_ */ | 265 | #endif /* _PLATFORM_DEVICE_H_ */ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 512e09177e57..3160648ccdda 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -460,6 +460,7 @@ struct dev_pm_info { | |||
460 | unsigned long active_jiffies; | 460 | unsigned long active_jiffies; |
461 | unsigned long suspended_jiffies; | 461 | unsigned long suspended_jiffies; |
462 | unsigned long accounting_timestamp; | 462 | unsigned long accounting_timestamp; |
463 | void *subsys_data; /* Owned by the subsystem. */ | ||
463 | #endif | 464 | #endif |
464 | }; | 465 | }; |
465 | 466 | ||
@@ -529,21 +530,17 @@ struct dev_power_domain { | |||
529 | */ | 530 | */ |
530 | 531 | ||
531 | #ifdef CONFIG_PM_SLEEP | 532 | #ifdef CONFIG_PM_SLEEP |
532 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
533 | extern int sysdev_suspend(pm_message_t state); | ||
534 | extern int sysdev_resume(void); | ||
535 | #else | ||
536 | static inline int sysdev_suspend(pm_message_t state) { return 0; } | ||
537 | static inline int sysdev_resume(void) { return 0; } | ||
538 | #endif | ||
539 | |||
540 | extern void device_pm_lock(void); | 533 | extern void device_pm_lock(void); |
541 | extern void dpm_resume_noirq(pm_message_t state); | 534 | extern void dpm_resume_noirq(pm_message_t state); |
542 | extern void dpm_resume_end(pm_message_t state); | 535 | extern void dpm_resume_end(pm_message_t state); |
536 | extern void dpm_resume(pm_message_t state); | ||
537 | extern void dpm_complete(pm_message_t state); | ||
543 | 538 | ||
544 | extern void device_pm_unlock(void); | 539 | extern void device_pm_unlock(void); |
545 | extern int dpm_suspend_noirq(pm_message_t state); | 540 | extern int dpm_suspend_noirq(pm_message_t state); |
546 | extern int dpm_suspend_start(pm_message_t state); | 541 | extern int dpm_suspend_start(pm_message_t state); |
542 | extern int dpm_suspend(pm_message_t state); | ||
543 | extern int dpm_prepare(pm_message_t state); | ||
547 | 544 | ||
548 | extern void __suspend_report_result(const char *function, void *fn, int ret); | 545 | extern void __suspend_report_result(const char *function, void *fn, int ret); |
549 | 546 | ||
@@ -553,6 +550,16 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
553 | } while (0) | 550 | } while (0) |
554 | 551 | ||
555 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); | 552 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); |
553 | |||
554 | extern int pm_generic_prepare(struct device *dev); | ||
555 | extern int pm_generic_suspend(struct device *dev); | ||
556 | extern int pm_generic_resume(struct device *dev); | ||
557 | extern int pm_generic_freeze(struct device *dev); | ||
558 | extern int pm_generic_thaw(struct device *dev); | ||
559 | extern int pm_generic_restore(struct device *dev); | ||
560 | extern int pm_generic_poweroff(struct device *dev); | ||
561 | extern void pm_generic_complete(struct device *dev); | ||
562 | |||
556 | #else /* !CONFIG_PM_SLEEP */ | 563 | #else /* !CONFIG_PM_SLEEP */ |
557 | 564 | ||
558 | #define device_pm_lock() do {} while (0) | 565 | #define device_pm_lock() do {} while (0) |
@@ -569,6 +576,15 @@ static inline int device_pm_wait_for_dev(struct device *a, struct device *b) | |||
569 | { | 576 | { |
570 | return 0; | 577 | return 0; |
571 | } | 578 | } |
579 | |||
580 | #define pm_generic_prepare NULL | ||
581 | #define pm_generic_suspend NULL | ||
582 | #define pm_generic_resume NULL | ||
583 | #define pm_generic_freeze NULL | ||
584 | #define pm_generic_thaw NULL | ||
585 | #define pm_generic_restore NULL | ||
586 | #define pm_generic_poweroff NULL | ||
587 | #define pm_generic_complete NULL | ||
572 | #endif /* !CONFIG_PM_SLEEP */ | 588 | #endif /* !CONFIG_PM_SLEEP */ |
573 | 589 | ||
574 | /* How to reorder dpm_list after device_move() */ | 590 | /* How to reorder dpm_list after device_move() */ |
@@ -579,11 +595,4 @@ enum dpm_order { | |||
579 | DPM_ORDER_DEV_LAST, | 595 | DPM_ORDER_DEV_LAST, |
580 | }; | 596 | }; |
581 | 597 | ||
582 | extern int pm_generic_suspend(struct device *dev); | ||
583 | extern int pm_generic_resume(struct device *dev); | ||
584 | extern int pm_generic_freeze(struct device *dev); | ||
585 | extern int pm_generic_thaw(struct device *dev); | ||
586 | extern int pm_generic_restore(struct device *dev); | ||
587 | extern int pm_generic_poweroff(struct device *dev); | ||
588 | |||
589 | #endif /* _LINUX_PM_H */ | 598 | #endif /* _LINUX_PM_H */ |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 8de9aa6e7def..878cf84baeb1 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -245,4 +245,46 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) | |||
245 | __pm_runtime_use_autosuspend(dev, false); | 245 | __pm_runtime_use_autosuspend(dev, false); |
246 | } | 246 | } |
247 | 247 | ||
248 | struct pm_clk_notifier_block { | ||
249 | struct notifier_block nb; | ||
250 | struct dev_power_domain *pwr_domain; | ||
251 | char *con_ids[]; | ||
252 | }; | ||
253 | |||
254 | #ifdef CONFIG_PM_RUNTIME_CLK | ||
255 | extern int pm_runtime_clk_init(struct device *dev); | ||
256 | extern void pm_runtime_clk_destroy(struct device *dev); | ||
257 | extern int pm_runtime_clk_add(struct device *dev, const char *con_id); | ||
258 | extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); | ||
259 | extern int pm_runtime_clk_suspend(struct device *dev); | ||
260 | extern int pm_runtime_clk_resume(struct device *dev); | ||
261 | #else | ||
262 | static inline int pm_runtime_clk_init(struct device *dev) | ||
263 | { | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | static inline void pm_runtime_clk_destroy(struct device *dev) | ||
267 | { | ||
268 | } | ||
269 | static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) | ||
270 | { | ||
271 | return -EINVAL; | ||
272 | } | ||
273 | static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) | ||
274 | { | ||
275 | } | ||
276 | #define pm_runtime_clock_suspend NULL | ||
277 | #define pm_runtime_clock_resume NULL | ||
278 | #endif | ||
279 | |||
280 | #ifdef CONFIG_HAVE_CLK | ||
281 | extern void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
282 | struct pm_clk_notifier_block *clknb); | ||
283 | #else | ||
284 | static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
285 | struct pm_clk_notifier_block *clknb) | ||
286 | { | ||
287 | } | ||
288 | #endif | ||
289 | |||
248 | #endif | 290 | #endif |
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index d51243ae0726..808227d40a64 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/timex.h> | 7 | #include <linux/timex.h> |
8 | #include <linux/alarmtimer.h> | ||
8 | 9 | ||
9 | union cpu_time_count { | 10 | union cpu_time_count { |
10 | cputime_t cpu; | 11 | cputime_t cpu; |
@@ -80,6 +81,7 @@ struct k_itimer { | |||
80 | unsigned long incr; | 81 | unsigned long incr; |
81 | unsigned long expires; | 82 | unsigned long expires; |
82 | } mmtimer; | 83 | } mmtimer; |
84 | struct alarm alarmtimer; | ||
83 | } it; | 85 | } it; |
84 | }; | 86 | }; |
85 | 87 | ||
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 838c1149251a..eaf4350c0f90 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, | |||
208 | struct proc_dir_entry *parent,const char *dest) {return NULL;} | 208 | struct proc_dir_entry *parent,const char *dest) {return NULL;} |
209 | static inline struct proc_dir_entry *proc_mkdir(const char *name, | 209 | static inline struct proc_dir_entry *proc_mkdir(const char *name, |
210 | struct proc_dir_entry *parent) {return NULL;} | 210 | struct proc_dir_entry *parent) {return NULL;} |
211 | static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, | ||
212 | mode_t mode, struct proc_dir_entry *parent) { return NULL; } | ||
211 | 213 | ||
212 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, | 214 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, |
213 | mode_t mode, struct proc_dir_entry *base, | 215 | mode_t mode, struct proc_dir_entry *base, |
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 7066acb2c530..033b507b33b1 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
@@ -136,6 +136,14 @@ static inline void rb_set_color(struct rb_node *rb, int color) | |||
136 | #define RB_EMPTY_NODE(node) (rb_parent(node) == node) | 136 | #define RB_EMPTY_NODE(node) (rb_parent(node) == node) |
137 | #define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) | 137 | #define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) |
138 | 138 | ||
139 | static inline void rb_init_node(struct rb_node *rb) | ||
140 | { | ||
141 | rb->rb_parent_color = 0; | ||
142 | rb->rb_right = NULL; | ||
143 | rb->rb_left = NULL; | ||
144 | RB_CLEAR_NODE(rb); | ||
145 | } | ||
146 | |||
139 | extern void rb_insert_color(struct rb_node *, struct rb_root *); | 147 | extern void rb_insert_color(struct rb_node *, struct rb_root *); |
140 | extern void rb_erase(struct rb_node *, struct rb_root *); | 148 | extern void rb_erase(struct rb_node *, struct rb_root *); |
141 | 149 | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 2dea94fc4402..e3beb315517a 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -253,7 +253,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
253 | */ | 253 | */ |
254 | #define list_for_each_entry_rcu(pos, head, member) \ | 254 | #define list_for_each_entry_rcu(pos, head, member) \ |
255 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ | 255 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
256 | prefetch(pos->member.next), &pos->member != (head); \ | 256 | &pos->member != (head); \ |
257 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) | 257 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
258 | 258 | ||
259 | 259 | ||
@@ -270,7 +270,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
270 | */ | 270 | */ |
271 | #define list_for_each_continue_rcu(pos, head) \ | 271 | #define list_for_each_continue_rcu(pos, head) \ |
272 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ | 272 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ |
273 | prefetch((pos)->next), (pos) != (head); \ | 273 | (pos) != (head); \ |
274 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) | 274 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) |
275 | 275 | ||
276 | /** | 276 | /** |
@@ -284,7 +284,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
284 | */ | 284 | */ |
285 | #define list_for_each_entry_continue_rcu(pos, head, member) \ | 285 | #define list_for_each_entry_continue_rcu(pos, head, member) \ |
286 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ | 286 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
287 | prefetch(pos->member.next), &pos->member != (head); \ | 287 | &pos->member != (head); \ |
288 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) | 288 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
289 | 289 | ||
290 | /** | 290 | /** |
@@ -427,7 +427,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
427 | 427 | ||
428 | #define __hlist_for_each_rcu(pos, head) \ | 428 | #define __hlist_for_each_rcu(pos, head) \ |
429 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ | 429 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
430 | pos && ({ prefetch(pos->next); 1; }); \ | 430 | pos; \ |
431 | pos = rcu_dereference(hlist_next_rcu(pos))) | 431 | pos = rcu_dereference(hlist_next_rcu(pos))) |
432 | 432 | ||
433 | /** | 433 | /** |
@@ -443,7 +443,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
443 | */ | 443 | */ |
444 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | 444 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ |
445 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ | 445 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ |
446 | pos && ({ prefetch(pos->next); 1; }) && \ | 446 | pos && \ |
447 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 447 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
448 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) | 448 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) |
449 | 449 | ||
@@ -460,7 +460,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
460 | */ | 460 | */ |
461 | #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ | 461 | #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ |
462 | for (pos = rcu_dereference_bh((head)->first); \ | 462 | for (pos = rcu_dereference_bh((head)->first); \ |
463 | pos && ({ prefetch(pos->next); 1; }) && \ | 463 | pos && \ |
464 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 464 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
465 | pos = rcu_dereference_bh(pos->next)) | 465 | pos = rcu_dereference_bh(pos->next)) |
466 | 466 | ||
@@ -472,7 +472,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
472 | */ | 472 | */ |
473 | #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ | 473 | #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ |
474 | for (pos = rcu_dereference((pos)->next); \ | 474 | for (pos = rcu_dereference((pos)->next); \ |
475 | pos && ({ prefetch(pos->next); 1; }) && \ | 475 | pos && \ |
476 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 476 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
477 | pos = rcu_dereference(pos->next)) | 477 | pos = rcu_dereference(pos->next)) |
478 | 478 | ||
@@ -484,7 +484,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
484 | */ | 484 | */ |
485 | #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ | 485 | #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ |
486 | for (pos = rcu_dereference_bh((pos)->next); \ | 486 | for (pos = rcu_dereference_bh((pos)->next); \ |
487 | pos && ({ prefetch(pos->next); 1; }) && \ | 487 | pos && \ |
488 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 488 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
489 | pos = rcu_dereference_bh(pos->next)) | 489 | pos = rcu_dereference_bh(pos->next)) |
490 | 490 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff422d2b7f90..99f9aa7c2804 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -47,6 +47,18 @@ | |||
47 | extern int rcutorture_runnable; /* for sysctl */ | 47 | extern int rcutorture_runnable; /* for sysctl */ |
48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
49 | 49 | ||
50 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||
51 | extern void rcutorture_record_test_transition(void); | ||
52 | extern void rcutorture_record_progress(unsigned long vernum); | ||
53 | #else | ||
54 | static inline void rcutorture_record_test_transition(void) | ||
55 | { | ||
56 | } | ||
57 | static inline void rcutorture_record_progress(unsigned long vernum) | ||
58 | { | ||
59 | } | ||
60 | #endif | ||
61 | |||
50 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) | 62 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
51 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) | 63 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) |
52 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | 64 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
@@ -68,7 +80,6 @@ extern void call_rcu_sched(struct rcu_head *head, | |||
68 | extern void synchronize_sched(void); | 80 | extern void synchronize_sched(void); |
69 | extern void rcu_barrier_bh(void); | 81 | extern void rcu_barrier_bh(void); |
70 | extern void rcu_barrier_sched(void); | 82 | extern void rcu_barrier_sched(void); |
71 | extern int sched_expedited_torture_stats(char *page); | ||
72 | 83 | ||
73 | static inline void __rcu_read_lock_bh(void) | 84 | static inline void __rcu_read_lock_bh(void) |
74 | { | 85 | { |
@@ -774,6 +785,7 @@ extern struct debug_obj_descr rcuhead_debug_descr; | |||
774 | 785 | ||
775 | static inline void debug_rcu_head_queue(struct rcu_head *head) | 786 | static inline void debug_rcu_head_queue(struct rcu_head *head) |
776 | { | 787 | { |
788 | WARN_ON_ONCE((unsigned long)head & 0x3); | ||
777 | debug_object_activate(head, &rcuhead_debug_descr); | 789 | debug_object_activate(head, &rcuhead_debug_descr); |
778 | debug_object_active_state(head, &rcuhead_debug_descr, | 790 | debug_object_active_state(head, &rcuhead_debug_descr, |
779 | STATE_RCU_HEAD_READY, | 791 | STATE_RCU_HEAD_READY, |
@@ -797,4 +809,60 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
797 | } | 809 | } |
798 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 810 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
799 | 811 | ||
812 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) | ||
813 | { | ||
814 | return offset < 4096; | ||
815 | } | ||
816 | |||
817 | static __always_inline | ||
818 | void __kfree_rcu(struct rcu_head *head, unsigned long offset) | ||
819 | { | ||
820 | typedef void (*rcu_callback)(struct rcu_head *); | ||
821 | |||
822 | BUILD_BUG_ON(!__builtin_constant_p(offset)); | ||
823 | |||
824 | /* See the kfree_rcu() header comment. */ | ||
825 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); | ||
826 | |||
827 | call_rcu(head, (rcu_callback)offset); | ||
828 | } | ||
829 | |||
830 | extern void kfree(const void *); | ||
831 | |||
832 | static inline void __rcu_reclaim(struct rcu_head *head) | ||
833 | { | ||
834 | unsigned long offset = (unsigned long)head->func; | ||
835 | |||
836 | if (__is_kfree_rcu_offset(offset)) | ||
837 | kfree((void *)head - offset); | ||
838 | else | ||
839 | head->func(head); | ||
840 | } | ||
841 | |||
842 | /** | ||
843 | * kfree_rcu() - kfree an object after a grace period. | ||
844 | * @ptr: pointer to kfree | ||
845 | * @rcu_head: the name of the struct rcu_head within the type of @ptr. | ||
846 | * | ||
847 | * Many rcu callbacks functions just call kfree() on the base structure. | ||
848 | * These functions are trivial, but their size adds up, and furthermore | ||
849 | * when they are used in a kernel module, that module must invoke the | ||
850 | * high-latency rcu_barrier() function at module-unload time. | ||
851 | * | ||
852 | * The kfree_rcu() function handles this issue. Rather than encoding a | ||
853 | * function address in the embedded rcu_head structure, kfree_rcu() instead | ||
854 | * encodes the offset of the rcu_head structure within the base structure. | ||
855 | * Because the functions are not allowed in the low-order 4096 bytes of | ||
856 | * kernel virtual memory, offsets up to 4095 bytes can be accommodated. | ||
857 | * If the offset is larger than 4095 bytes, a compile-time error will | ||
858 | * be generated in __kfree_rcu(). If this error is triggered, you can | ||
859 | * either fall back to use of call_rcu() or rearrange the structure to | ||
860 | * position the rcu_head structure into the first 4096 bytes. | ||
861 | * | ||
862 | * Note that the allowable offset might decrease in the future, for example, | ||
863 | * to allow something like kmem_cache_free_rcu(). | ||
864 | */ | ||
865 | #define kfree_rcu(ptr, rcu_head) \ | ||
866 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) | ||
867 | |||
800 | #endif /* __LINUX_RCUPDATE_H */ | 868 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 30ebd7c8d874..52b3e0281fd0 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -100,6 +100,14 @@ static inline void rcu_note_context_switch(int cpu) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * Take advantage of the fact that there is only one CPU, which | ||
104 | * allows us to ignore virtualization-based context switches. | ||
105 | */ | ||
106 | static inline void rcu_virt_note_context_switch(int cpu) | ||
107 | { | ||
108 | } | ||
109 | |||
110 | /* | ||
103 | * Return the number of grace periods. | 111 | * Return the number of grace periods. |
104 | */ | 112 | */ |
105 | static inline long rcu_batches_completed(void) | 113 | static inline long rcu_batches_completed(void) |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3a933482734a..e65d06634dd8 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -35,6 +35,16 @@ extern void rcu_note_context_switch(int cpu); | |||
35 | extern int rcu_needs_cpu(int cpu); | 35 | extern int rcu_needs_cpu(int cpu); |
36 | extern void rcu_cpu_stall_reset(void); | 36 | extern void rcu_cpu_stall_reset(void); |
37 | 37 | ||
38 | /* | ||
39 | * Note a virtualization-based context switch. This is simply a | ||
40 | * wrapper around rcu_note_context_switch(), which allows TINY_RCU | ||
41 | * to save a few bytes. | ||
42 | */ | ||
43 | static inline void rcu_virt_note_context_switch(int cpu) | ||
44 | { | ||
45 | rcu_note_context_switch(cpu); | ||
46 | } | ||
47 | |||
38 | #ifdef CONFIG_TREE_PREEMPT_RCU | 48 | #ifdef CONFIG_TREE_PREEMPT_RCU |
39 | 49 | ||
40 | extern void exit_rcu(void); | 50 | extern void exit_rcu(void); |
@@ -58,9 +68,12 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
58 | 68 | ||
59 | extern void rcu_barrier(void); | 69 | extern void rcu_barrier(void); |
60 | 70 | ||
71 | extern unsigned long rcutorture_testseq; | ||
72 | extern unsigned long rcutorture_vernum; | ||
61 | extern long rcu_batches_completed(void); | 73 | extern long rcu_batches_completed(void); |
62 | extern long rcu_batches_completed_bh(void); | 74 | extern long rcu_batches_completed_bh(void); |
63 | extern long rcu_batches_completed_sched(void); | 75 | extern long rcu_batches_completed_sched(void); |
76 | |||
64 | extern void rcu_force_quiescent_state(void); | 77 | extern void rcu_force_quiescent_state(void); |
65 | extern void rcu_bh_force_quiescent_state(void); | 78 | extern void rcu_bh_force_quiescent_state(void); |
66 | extern void rcu_sched_force_quiescent_state(void); | 79 | extern void rcu_sched_force_quiescent_state(void); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 781abd137673..12211e1666e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); | |||
360 | extern signed long schedule_timeout_killable(signed long timeout); | 360 | extern signed long schedule_timeout_killable(signed long timeout); |
361 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 361 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
362 | asmlinkage void schedule(void); | 362 | asmlinkage void schedule(void); |
363 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | 363 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); |
364 | 364 | ||
365 | struct nsproxy; | 365 | struct nsproxy; |
366 | struct user_namespace; | 366 | struct user_namespace; |
@@ -731,10 +731,6 @@ struct sched_info { | |||
731 | /* timestamps */ | 731 | /* timestamps */ |
732 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 732 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
733 | last_queued; /* when we were last queued to run */ | 733 | last_queued; /* when we were last queued to run */ |
734 | #ifdef CONFIG_SCHEDSTATS | ||
735 | /* BKL stats */ | ||
736 | unsigned int bkl_count; | ||
737 | #endif | ||
738 | }; | 734 | }; |
739 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | 735 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ |
740 | 736 | ||
@@ -868,6 +864,7 @@ static inline int sd_power_saving_flags(void) | |||
868 | 864 | ||
869 | struct sched_group { | 865 | struct sched_group { |
870 | struct sched_group *next; /* Must be a circular list */ | 866 | struct sched_group *next; /* Must be a circular list */ |
867 | atomic_t ref; | ||
871 | 868 | ||
872 | /* | 869 | /* |
873 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 870 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -882,9 +879,6 @@ struct sched_group { | |||
882 | * NOTE: this field is variable length. (Allocated dynamically | 879 | * NOTE: this field is variable length. (Allocated dynamically |
883 | * by attaching extra space to the end of the structure, | 880 | * by attaching extra space to the end of the structure, |
884 | * depending on how many CPUs the kernel has booted up with) | 881 | * depending on how many CPUs the kernel has booted up with) |
885 | * | ||
886 | * It is also be embedded into static data structures at build | ||
887 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
888 | */ | 882 | */ |
889 | unsigned long cpumask[0]; | 883 | unsigned long cpumask[0]; |
890 | }; | 884 | }; |
@@ -894,17 +888,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
894 | return to_cpumask(sg->cpumask); | 888 | return to_cpumask(sg->cpumask); |
895 | } | 889 | } |
896 | 890 | ||
897 | enum sched_domain_level { | ||
898 | SD_LV_NONE = 0, | ||
899 | SD_LV_SIBLING, | ||
900 | SD_LV_MC, | ||
901 | SD_LV_BOOK, | ||
902 | SD_LV_CPU, | ||
903 | SD_LV_NODE, | ||
904 | SD_LV_ALLNODES, | ||
905 | SD_LV_MAX | ||
906 | }; | ||
907 | |||
908 | struct sched_domain_attr { | 891 | struct sched_domain_attr { |
909 | int relax_domain_level; | 892 | int relax_domain_level; |
910 | }; | 893 | }; |
@@ -913,6 +896,8 @@ struct sched_domain_attr { | |||
913 | .relax_domain_level = -1, \ | 896 | .relax_domain_level = -1, \ |
914 | } | 897 | } |
915 | 898 | ||
899 | extern int sched_domain_level_max; | ||
900 | |||
916 | struct sched_domain { | 901 | struct sched_domain { |
917 | /* These fields must be setup */ | 902 | /* These fields must be setup */ |
918 | struct sched_domain *parent; /* top domain must be null terminated */ | 903 | struct sched_domain *parent; /* top domain must be null terminated */ |
@@ -930,7 +915,7 @@ struct sched_domain { | |||
930 | unsigned int forkexec_idx; | 915 | unsigned int forkexec_idx; |
931 | unsigned int smt_gain; | 916 | unsigned int smt_gain; |
932 | int flags; /* See SD_* */ | 917 | int flags; /* See SD_* */ |
933 | enum sched_domain_level level; | 918 | int level; |
934 | 919 | ||
935 | /* Runtime fields. */ | 920 | /* Runtime fields. */ |
936 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 921 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
@@ -973,6 +958,10 @@ struct sched_domain { | |||
973 | #ifdef CONFIG_SCHED_DEBUG | 958 | #ifdef CONFIG_SCHED_DEBUG |
974 | char *name; | 959 | char *name; |
975 | #endif | 960 | #endif |
961 | union { | ||
962 | void *private; /* used during construction */ | ||
963 | struct rcu_head rcu; /* used during destruction */ | ||
964 | }; | ||
976 | 965 | ||
977 | unsigned int span_weight; | 966 | unsigned int span_weight; |
978 | /* | 967 | /* |
@@ -981,9 +970,6 @@ struct sched_domain { | |||
981 | * NOTE: this field is variable length. (Allocated dynamically | 970 | * NOTE: this field is variable length. (Allocated dynamically |
982 | * by attaching extra space to the end of the structure, | 971 | * by attaching extra space to the end of the structure, |
983 | * depending on how many CPUs the kernel has booted up with) | 972 | * depending on how many CPUs the kernel has booted up with) |
984 | * | ||
985 | * It is also be embedded into static data structures at build | ||
986 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
987 | */ | 973 | */ |
988 | unsigned long span[0]; | 974 | unsigned long span[0]; |
989 | }; | 975 | }; |
@@ -1048,8 +1034,12 @@ struct sched_domain; | |||
1048 | #define WF_FORK 0x02 /* child wakeup after fork */ | 1034 | #define WF_FORK 0x02 /* child wakeup after fork */ |
1049 | 1035 | ||
1050 | #define ENQUEUE_WAKEUP 1 | 1036 | #define ENQUEUE_WAKEUP 1 |
1051 | #define ENQUEUE_WAKING 2 | 1037 | #define ENQUEUE_HEAD 2 |
1052 | #define ENQUEUE_HEAD 4 | 1038 | #ifdef CONFIG_SMP |
1039 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | ||
1040 | #else | ||
1041 | #define ENQUEUE_WAKING 0 | ||
1042 | #endif | ||
1053 | 1043 | ||
1054 | #define DEQUEUE_SLEEP 1 | 1044 | #define DEQUEUE_SLEEP 1 |
1055 | 1045 | ||
@@ -1067,12 +1057,11 @@ struct sched_class { | |||
1067 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1057 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1068 | 1058 | ||
1069 | #ifdef CONFIG_SMP | 1059 | #ifdef CONFIG_SMP |
1070 | int (*select_task_rq)(struct rq *rq, struct task_struct *p, | 1060 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
1071 | int sd_flag, int flags); | ||
1072 | 1061 | ||
1073 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1062 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1074 | void (*post_schedule) (struct rq *this_rq); | 1063 | void (*post_schedule) (struct rq *this_rq); |
1075 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); | 1064 | void (*task_waking) (struct task_struct *task); |
1076 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | 1065 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
1077 | 1066 | ||
1078 | void (*set_cpus_allowed)(struct task_struct *p, | 1067 | void (*set_cpus_allowed)(struct task_struct *p, |
@@ -1197,13 +1186,11 @@ struct task_struct { | |||
1197 | unsigned int flags; /* per process flags, defined below */ | 1186 | unsigned int flags; /* per process flags, defined below */ |
1198 | unsigned int ptrace; | 1187 | unsigned int ptrace; |
1199 | 1188 | ||
1200 | int lock_depth; /* BKL lock depth */ | ||
1201 | |||
1202 | #ifdef CONFIG_SMP | 1189 | #ifdef CONFIG_SMP |
1203 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 1190 | struct task_struct *wake_entry; |
1204 | int oncpu; | 1191 | int on_cpu; |
1205 | #endif | ||
1206 | #endif | 1192 | #endif |
1193 | int on_rq; | ||
1207 | 1194 | ||
1208 | int prio, static_prio, normal_prio; | 1195 | int prio, static_prio, normal_prio; |
1209 | unsigned int rt_priority; | 1196 | unsigned int rt_priority; |
@@ -1274,6 +1261,7 @@ struct task_struct { | |||
1274 | 1261 | ||
1275 | /* Revert to default priority/policy when forking */ | 1262 | /* Revert to default priority/policy when forking */ |
1276 | unsigned sched_reset_on_fork:1; | 1263 | unsigned sched_reset_on_fork:1; |
1264 | unsigned sched_contributes_to_load:1; | ||
1277 | 1265 | ||
1278 | pid_t pid; | 1266 | pid_t pid; |
1279 | pid_t tgid; | 1267 | pid_t tgid; |
@@ -2063,14 +2051,13 @@ extern void xtime_update(unsigned long ticks); | |||
2063 | 2051 | ||
2064 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | 2052 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
2065 | extern int wake_up_process(struct task_struct *tsk); | 2053 | extern int wake_up_process(struct task_struct *tsk); |
2066 | extern void wake_up_new_task(struct task_struct *tsk, | 2054 | extern void wake_up_new_task(struct task_struct *tsk); |
2067 | unsigned long clone_flags); | ||
2068 | #ifdef CONFIG_SMP | 2055 | #ifdef CONFIG_SMP |
2069 | extern void kick_process(struct task_struct *tsk); | 2056 | extern void kick_process(struct task_struct *tsk); |
2070 | #else | 2057 | #else |
2071 | static inline void kick_process(struct task_struct *tsk) { } | 2058 | static inline void kick_process(struct task_struct *tsk) { } |
2072 | #endif | 2059 | #endif |
2073 | extern void sched_fork(struct task_struct *p, int clone_flags); | 2060 | extern void sched_fork(struct task_struct *p); |
2074 | extern void sched_dead(struct task_struct *p); | 2061 | extern void sched_dead(struct task_struct *p); |
2075 | 2062 | ||
2076 | extern void proc_caches_init(void); | 2063 | extern void proc_caches_init(void); |
@@ -2195,8 +2182,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
2195 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2182 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2196 | 2183 | ||
2197 | #ifdef CONFIG_SMP | 2184 | #ifdef CONFIG_SMP |
2185 | void scheduler_ipi(void); | ||
2198 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2186 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2199 | #else | 2187 | #else |
2188 | static inline void scheduler_ipi(void) { } | ||
2200 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2189 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2201 | long match_state) | 2190 | long match_state) |
2202 | { | 2191 | { |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e98cd2e57194..06d69648fc86 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl) | |||
88 | unsigned ret; | 88 | unsigned ret; |
89 | 89 | ||
90 | repeat: | 90 | repeat: |
91 | ret = sl->sequence; | 91 | ret = ACCESS_ONCE(sl->sequence); |
92 | smp_rmb(); | ||
93 | if (unlikely(ret & 1)) { | 92 | if (unlikely(ret & 1)) { |
94 | cpu_relax(); | 93 | cpu_relax(); |
95 | goto repeat; | 94 | goto repeat; |
96 | } | 95 | } |
96 | smp_rmb(); | ||
97 | 97 | ||
98 | return ret; | 98 | return ret; |
99 | } | 99 | } |
diff --git a/include/linux/signal.h b/include/linux/signal.h index fcd2b14b1932..29a68ac7af83 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #ifdef __KERNEL__ | 7 | #ifdef __KERNEL__ |
8 | #include <linux/list.h> | 8 | #include <linux/list.h> |
9 | 9 | ||
10 | struct task_struct; | ||
11 | |||
10 | /* for sysctl */ | 12 | /* for sysctl */ |
11 | extern int print_fatal_signals; | 13 | extern int print_fatal_signals; |
12 | /* | 14 | /* |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 9659eff52ca2..045f72ab5dfd 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -404,7 +404,9 @@ extern bool ssb_is_sprom_available(struct ssb_bus *bus); | |||
404 | 404 | ||
405 | /* Set a fallback SPROM. | 405 | /* Set a fallback SPROM. |
406 | * See kdoc at the function definition for complete documentation. */ | 406 | * See kdoc at the function definition for complete documentation. */ |
407 | extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); | 407 | extern int ssb_arch_register_fallback_sprom( |
408 | int (*sprom_callback)(struct ssb_bus *bus, | ||
409 | struct ssb_sprom *out)); | ||
408 | 410 | ||
409 | /* Suspend a SSB bus. | 411 | /* Suspend a SSB bus. |
410 | * Call this from the parent bus suspend routine. */ | 412 | * Call this from the parent bus suspend routine. */ |
diff --git a/include/linux/string.h b/include/linux/string.h index a716ee2a8adb..a176db2f2c85 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -123,6 +123,7 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); | |||
123 | extern void argv_free(char **argv); | 123 | extern void argv_free(char **argv); |
124 | 124 | ||
125 | extern bool sysfs_streq(const char *s1, const char *s2); | 125 | extern bool sysfs_streq(const char *s1, const char *s2); |
126 | extern int strtobool(const char *s, bool *res); | ||
126 | 127 | ||
127 | #ifdef CONFIG_BINARY_PRINTF | 128 | #ifdef CONFIG_BINARY_PRINTF |
128 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); | 129 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); |
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index dfb078db8ebb..d35e783a598c 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h | |||
@@ -34,12 +34,6 @@ struct sysdev_class { | |||
34 | struct list_head drivers; | 34 | struct list_head drivers; |
35 | struct sysdev_class_attribute **attrs; | 35 | struct sysdev_class_attribute **attrs; |
36 | struct kset kset; | 36 | struct kset kset; |
37 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
38 | /* Default operations for these types of devices */ | ||
39 | int (*shutdown)(struct sys_device *); | ||
40 | int (*suspend)(struct sys_device *, pm_message_t state); | ||
41 | int (*resume)(struct sys_device *); | ||
42 | #endif | ||
43 | }; | 37 | }; |
44 | 38 | ||
45 | struct sysdev_class_attribute { | 39 | struct sysdev_class_attribute { |
@@ -77,11 +71,6 @@ struct sysdev_driver { | |||
77 | struct list_head entry; | 71 | struct list_head entry; |
78 | int (*add)(struct sys_device *); | 72 | int (*add)(struct sys_device *); |
79 | int (*remove)(struct sys_device *); | 73 | int (*remove)(struct sys_device *); |
80 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
81 | int (*shutdown)(struct sys_device *); | ||
82 | int (*suspend)(struct sys_device *, pm_message_t state); | ||
83 | int (*resume)(struct sys_device *); | ||
84 | #endif | ||
85 | }; | 74 | }; |
86 | 75 | ||
87 | 76 | ||
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 30b881555fa5..c3acda60eee0 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -176,7 +176,6 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, | |||
176 | const unsigned char *name); | 176 | const unsigned char *name); |
177 | struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); | 177 | struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); |
178 | void sysfs_put(struct sysfs_dirent *sd); | 178 | void sysfs_put(struct sysfs_dirent *sd); |
179 | void sysfs_printk_last_file(void); | ||
180 | 179 | ||
181 | /* Called to clear a ns tag when it is no longer valid */ | 180 | /* Called to clear a ns tag when it is no longer valid */ |
182 | void sysfs_exit_ns(enum kobj_ns_type type, const void *tag); | 181 | void sysfs_exit_ns(enum kobj_ns_type type, const void *tag); |
@@ -348,10 +347,6 @@ static inline int __must_check sysfs_init(void) | |||
348 | return 0; | 347 | return 0; |
349 | } | 348 | } |
350 | 349 | ||
351 | static inline void sysfs_printk_last_file(void) | ||
352 | { | ||
353 | } | ||
354 | |||
355 | #endif /* CONFIG_SYSFS */ | 350 | #endif /* CONFIG_SYSFS */ |
356 | 351 | ||
357 | #endif /* _SYSFS_H_ */ | 352 | #endif /* _SYSFS_H_ */ |
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index 7071ec5d0118..b004e557caa9 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h | |||
@@ -140,12 +140,12 @@ extern long st_unregister(struct st_proto_s *); | |||
140 | */ | 140 | */ |
141 | struct st_data_s { | 141 | struct st_data_s { |
142 | unsigned long st_state; | 142 | unsigned long st_state; |
143 | struct tty_struct *tty; | ||
144 | struct sk_buff *tx_skb; | 143 | struct sk_buff *tx_skb; |
145 | #define ST_TX_SENDING 1 | 144 | #define ST_TX_SENDING 1 |
146 | #define ST_TX_WAKEUP 2 | 145 | #define ST_TX_WAKEUP 2 |
147 | unsigned long tx_state; | 146 | unsigned long tx_state; |
148 | struct st_proto_s *list[ST_MAX_CHANNELS]; | 147 | struct st_proto_s *list[ST_MAX_CHANNELS]; |
148 | bool is_registered[ST_MAX_CHANNELS]; | ||
149 | unsigned long rx_state; | 149 | unsigned long rx_state; |
150 | unsigned long rx_count; | 150 | unsigned long rx_count; |
151 | struct sk_buff *rx_skb; | 151 | struct sk_buff *rx_skb; |
@@ -155,6 +155,7 @@ struct st_data_s { | |||
155 | unsigned char protos_registered; | 155 | unsigned char protos_registered; |
156 | unsigned long ll_state; | 156 | unsigned long ll_state; |
157 | void *kim_data; | 157 | void *kim_data; |
158 | struct tty_struct *tty; | ||
158 | }; | 159 | }; |
159 | 160 | ||
160 | /* | 161 | /* |
diff --git a/include/linux/time.h b/include/linux/time.h index 454a26205787..b3061782dec3 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -126,6 +126,7 @@ struct timespec __current_kernel_time(void); /* does not take xtime_lock */ | |||
126 | struct timespec get_monotonic_coarse(void); | 126 | struct timespec get_monotonic_coarse(void); |
127 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | 127 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
128 | struct timespec *wtom, struct timespec *sleep); | 128 | struct timespec *wtom, struct timespec *sleep); |
129 | void timekeeping_inject_sleeptime(struct timespec *delta); | ||
129 | 130 | ||
130 | #define CURRENT_TIME (current_kernel_time()) | 131 | #define CURRENT_TIME (current_kernel_time()) |
131 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) | 132 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
@@ -294,6 +295,8 @@ struct itimerval { | |||
294 | #define CLOCK_REALTIME_COARSE 5 | 295 | #define CLOCK_REALTIME_COARSE 5 |
295 | #define CLOCK_MONOTONIC_COARSE 6 | 296 | #define CLOCK_MONOTONIC_COARSE 6 |
296 | #define CLOCK_BOOTTIME 7 | 297 | #define CLOCK_BOOTTIME 7 |
298 | #define CLOCK_REALTIME_ALARM 8 | ||
299 | #define CLOCK_BOOTTIME_ALARM 9 | ||
297 | 300 | ||
298 | /* | 301 | /* |
299 | * The IDs of various hardware clocks: | 302 | * The IDs of various hardware clocks: |
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index a520fd70a59f..5088727478fd 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h | |||
@@ -39,7 +39,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) | |||
39 | 39 | ||
40 | static inline void timerqueue_init(struct timerqueue_node *node) | 40 | static inline void timerqueue_init(struct timerqueue_node *node) |
41 | { | 41 | { |
42 | RB_CLEAR_NODE(&node->node); | 42 | rb_init_node(&node->node); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline void timerqueue_init_head(struct timerqueue_head *head) | 45 | static inline void timerqueue_init_head(struct timerqueue_head *head) |
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 88bdd010d65d..2fa8d1341a0a 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h | |||
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) | |||
38 | return outer; | 38 | return outer; |
39 | } | 39 | } |
40 | 40 | ||
41 | #define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) | 41 | static inline void INET_ECN_xmit(struct sock *sk) |
42 | #define INET_ECN_dontxmit(sk) \ | 42 | { |
43 | do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) | 43 | inet_sk(sk)->tos |= INET_ECN_ECT_0; |
44 | if (inet6_sk(sk) != NULL) | ||
45 | inet6_sk(sk)->tclass |= INET_ECN_ECT_0; | ||
46 | } | ||
47 | |||
48 | static inline void INET_ECN_dontxmit(struct sock *sk) | ||
49 | { | ||
50 | inet_sk(sk)->tos &= ~INET_ECN_MASK; | ||
51 | if (inet6_sk(sk) != NULL) | ||
52 | inet6_sk(sk)->tclass &= ~INET_ECN_MASK; | ||
53 | } | ||
44 | 54 | ||
45 | #define IP6_ECN_flow_init(label) do { \ | 55 | #define IP6_ECN_flow_init(label) do { \ |
46 | (label) &= ~htonl(INET_ECN_MASK << 20); \ | 56 | (label) &= ~htonl(INET_ECN_MASK << 20); \ |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index d516f00c8e0f..86aefed6140b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -791,6 +791,7 @@ struct ip_vs_app { | |||
791 | /* IPVS in network namespace */ | 791 | /* IPVS in network namespace */ |
792 | struct netns_ipvs { | 792 | struct netns_ipvs { |
793 | int gen; /* Generation */ | 793 | int gen; /* Generation */ |
794 | int enable; /* enable like nf_hooks do */ | ||
794 | /* | 795 | /* |
795 | * Hash table: for real service lookups | 796 | * Hash table: for real service lookups |
796 | */ | 797 | */ |
@@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) | |||
1089 | atomic_inc(&ctl_cp->n_control); | 1090 | atomic_inc(&ctl_cp->n_control); |
1090 | } | 1091 | } |
1091 | 1092 | ||
1093 | /* | ||
1094 | * IPVS netns init & cleanup functions | ||
1095 | */ | ||
1096 | extern int __ip_vs_estimator_init(struct net *net); | ||
1097 | extern int __ip_vs_control_init(struct net *net); | ||
1098 | extern int __ip_vs_protocol_init(struct net *net); | ||
1099 | extern int __ip_vs_app_init(struct net *net); | ||
1100 | extern int __ip_vs_conn_init(struct net *net); | ||
1101 | extern int __ip_vs_sync_init(struct net *net); | ||
1102 | extern void __ip_vs_conn_cleanup(struct net *net); | ||
1103 | extern void __ip_vs_app_cleanup(struct net *net); | ||
1104 | extern void __ip_vs_protocol_cleanup(struct net *net); | ||
1105 | extern void __ip_vs_control_cleanup(struct net *net); | ||
1106 | extern void __ip_vs_estimator_cleanup(struct net *net); | ||
1107 | extern void __ip_vs_sync_cleanup(struct net *net); | ||
1108 | extern void __ip_vs_service_cleanup(struct net *net); | ||
1092 | 1109 | ||
1093 | /* | 1110 | /* |
1094 | * IPVS application functions | 1111 | * IPVS application functions |
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 75b8e2968c9b..f57e7d46a453 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h | |||
@@ -199,7 +199,7 @@ struct llc_pdu_sn { | |||
199 | u8 ssap; | 199 | u8 ssap; |
200 | u8 ctrl_1; | 200 | u8 ctrl_1; |
201 | u8 ctrl_2; | 201 | u8 ctrl_2; |
202 | }; | 202 | } __packed; |
203 | 203 | ||
204 | static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) | 204 | static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) |
205 | { | 205 | { |
@@ -211,7 +211,7 @@ struct llc_pdu_un { | |||
211 | u8 dsap; | 211 | u8 dsap; |
212 | u8 ssap; | 212 | u8 ssap; |
213 | u8 ctrl_1; | 213 | u8 ctrl_1; |
214 | }; | 214 | } __packed; |
215 | 215 | ||
216 | static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) | 216 | static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) |
217 | { | 217 | { |
@@ -359,7 +359,7 @@ struct llc_xid_info { | |||
359 | u8 fmt_id; /* always 0x81 for LLC */ | 359 | u8 fmt_id; /* always 0x81 for LLC */ |
360 | u8 type; /* different if NULL/non-NULL LSAP */ | 360 | u8 type; /* different if NULL/non-NULL LSAP */ |
361 | u8 rw; /* sender receive window */ | 361 | u8 rw; /* sender receive window */ |
362 | }; | 362 | } __packed; |
363 | 363 | ||
364 | /** | 364 | /** |
365 | * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID | 365 | * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID |
@@ -415,7 +415,7 @@ struct llc_frmr_info { | |||
415 | u8 curr_ssv; /* current send state variable val */ | 415 | u8 curr_ssv; /* current send state variable val */ |
416 | u8 curr_rsv; /* current receive state variable */ | 416 | u8 curr_rsv; /* current receive state variable */ |
417 | u8 ind_bits; /* indicator bits set with macro */ | 417 | u8 ind_bits; /* indicator bits set with macro */ |
418 | }; | 418 | } __packed; |
419 | 419 | ||
420 | extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); | 420 | extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); |
421 | extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); | 421 | extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 505845ddb0be..01e094c6d0ae 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -115,7 +115,6 @@ | |||
115 | * sctp/protocol.c | 115 | * sctp/protocol.c |
116 | */ | 116 | */ |
117 | extern struct sock *sctp_get_ctl_sock(void); | 117 | extern struct sock *sctp_get_ctl_sock(void); |
118 | extern void sctp_local_addr_free(struct rcu_head *head); | ||
119 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, | 118 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, |
120 | sctp_scope_t, gfp_t gfp, | 119 | sctp_scope_t, gfp_t gfp, |
121 | int flags); | 120 | int flags); |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6ae4bc5ce8a7..20afeaa39395 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -324,6 +324,7 @@ struct xfrm_state_afinfo { | |||
324 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); | 324 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); |
325 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); | 325 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); |
326 | int (*output)(struct sk_buff *skb); | 326 | int (*output)(struct sk_buff *skb); |
327 | int (*output_finish)(struct sk_buff *skb); | ||
327 | int (*extract_input)(struct xfrm_state *x, | 328 | int (*extract_input)(struct xfrm_state *x, |
328 | struct sk_buff *skb); | 329 | struct sk_buff *skb); |
329 | int (*extract_output)(struct xfrm_state *x, | 330 | int (*extract_output)(struct xfrm_state *x, |
@@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) | |||
1454 | extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1455 | extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1455 | extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1456 | extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
1456 | extern int xfrm4_output(struct sk_buff *skb); | 1457 | extern int xfrm4_output(struct sk_buff *skb); |
1458 | extern int xfrm4_output_finish(struct sk_buff *skb); | ||
1457 | extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); | 1459 | extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); |
1458 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); | 1460 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); |
1459 | extern int xfrm6_extract_header(struct sk_buff *skb); | 1461 | extern int xfrm6_extract_header(struct sk_buff *skb); |
@@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr); | |||
1470 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1472 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1471 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1473 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
1472 | extern int xfrm6_output(struct sk_buff *skb); | 1474 | extern int xfrm6_output(struct sk_buff *skb); |
1475 | extern int xfrm6_output_finish(struct sk_buff *skb); | ||
1473 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, | 1476 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, |
1474 | u8 **prevhdr); | 1477 | u8 **prevhdr); |
1475 | 1478 | ||
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index cbb822e8d791..2d0191c90f9e 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h | |||
@@ -46,18 +46,9 @@ enum iw_cm_event_type { | |||
46 | IW_CM_EVENT_CLOSE /* close complete */ | 46 | IW_CM_EVENT_CLOSE /* close complete */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | enum iw_cm_event_status { | ||
50 | IW_CM_EVENT_STATUS_OK = 0, /* request successful */ | ||
51 | IW_CM_EVENT_STATUS_ACCEPTED = 0, /* connect request accepted */ | ||
52 | IW_CM_EVENT_STATUS_REJECTED, /* connect request rejected */ | ||
53 | IW_CM_EVENT_STATUS_TIMEOUT, /* the operation timed out */ | ||
54 | IW_CM_EVENT_STATUS_RESET, /* reset from remote peer */ | ||
55 | IW_CM_EVENT_STATUS_EINVAL, /* asynchronous failure for bad parm */ | ||
56 | }; | ||
57 | |||
58 | struct iw_cm_event { | 49 | struct iw_cm_event { |
59 | enum iw_cm_event_type event; | 50 | enum iw_cm_event_type event; |
60 | enum iw_cm_event_status status; | 51 | int status; |
61 | struct sockaddr_in local_addr; | 52 | struct sockaddr_in local_addr; |
62 | struct sockaddr_in remote_addr; | 53 | struct sockaddr_in remote_addr; |
63 | void *private_data; | 54 | void *private_data; |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 4fae90304648..169f7a53fb0c 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -329,4 +329,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr); | |||
329 | */ | 329 | */ |
330 | void rdma_set_service_type(struct rdma_cm_id *id, int tos); | 330 | void rdma_set_service_type(struct rdma_cm_id *id, int tos); |
331 | 331 | ||
332 | /** | ||
333 | * rdma_set_reuseaddr - Allow the reuse of local addresses when binding | ||
334 | * the rdma_cm_id. | ||
335 | * @id: Communication identifier to configure. | ||
336 | * @reuse: Value indicating if the bound address is reusable. | ||
337 | * | ||
338 | * Reuse must be set before an address is bound to the id. | ||
339 | */ | ||
340 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); | ||
341 | |||
332 | #endif /* RDMA_CM_H */ | 342 | #endif /* RDMA_CM_H */ |
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h index 1d165022c02d..fc82c1896f75 100644 --- a/include/rdma/rdma_user_cm.h +++ b/include/rdma/rdma_user_cm.h | |||
@@ -221,8 +221,9 @@ enum { | |||
221 | 221 | ||
222 | /* Option details */ | 222 | /* Option details */ |
223 | enum { | 223 | enum { |
224 | RDMA_OPTION_ID_TOS = 0, | 224 | RDMA_OPTION_ID_TOS = 0, |
225 | RDMA_OPTION_IB_PATH = 1 | 225 | RDMA_OPTION_ID_REUSEADDR = 1, |
226 | RDMA_OPTION_IB_PATH = 1 | ||
226 | }; | 227 | }; |
227 | 228 | ||
228 | struct rdma_ucm_set_option { | 229 | struct rdma_ucm_set_option { |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 2d3ec5094685..dd82e02ddde3 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -169,6 +169,7 @@ struct scsi_device { | |||
169 | sdev_dev; | 169 | sdev_dev; |
170 | 170 | ||
171 | struct execute_work ew; /* used to get process context on put */ | 171 | struct execute_work ew; /* used to get process context on put */ |
172 | struct work_struct requeue_work; | ||
172 | 173 | ||
173 | struct scsi_dh_data *scsi_dh_data; | 174 | struct scsi_dh_data *scsi_dh_data; |
174 | enum scsi_device_state sdev_state; | 175 | enum scsi_device_state sdev_state; |
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index e3615c093741..9fe3a36646e9 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #define show_gfp_flags(flags) \ | 11 | #define show_gfp_flags(flags) \ |
12 | (flags) ? __print_flags(flags, "|", \ | 12 | (flags) ? __print_flags(flags, "|", \ |
13 | {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ | ||
13 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ | 14 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ |
14 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ | 15 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ |
15 | {(unsigned long)GFP_USER, "GFP_USER"}, \ | 16 | {(unsigned long)GFP_USER, "GFP_USER"}, \ |
@@ -32,6 +33,9 @@ | |||
32 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ | 33 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ |
33 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ | 34 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ |
34 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ | 35 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ |
35 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ | 36 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ |
37 | {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ | ||
38 | {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ | ||
39 | {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ | ||
36 | ) : "GFP_NOWAIT" | 40 | ) : "GFP_NOWAIT" |
37 | 41 | ||
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 1c09820df585..ae045ca7d356 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
@@ -20,8 +20,7 @@ struct softirq_action; | |||
20 | softirq_name(BLOCK_IOPOLL), \ | 20 | softirq_name(BLOCK_IOPOLL), \ |
21 | softirq_name(TASKLET), \ | 21 | softirq_name(TASKLET), \ |
22 | softirq_name(SCHED), \ | 22 | softirq_name(SCHED), \ |
23 | softirq_name(HRTIMER), \ | 23 | softirq_name(HRTIMER)) |
24 | softirq_name(RCU)) | ||
25 | 24 | ||
26 | /** | 25 | /** |
27 | * irq_handler_entry - called immediately before the irq action handler | 26 | * irq_handler_entry - called immediately before the irq action handler |
diff --git a/include/xen/events.h b/include/xen/events.h index f1b87ad48ac7..9af21e19545a 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -85,7 +85,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
85 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); | 85 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); |
86 | /* Bind an PSI pirq to an irq. */ | 86 | /* Bind an PSI pirq to an irq. */ |
87 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 87 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
88 | int pirq, int vector, const char *name); | 88 | int pirq, int vector, const char *name, |
89 | domid_t domid); | ||
89 | #endif | 90 | #endif |
90 | 91 | ||
91 | /* De-allocates the above mentioned physical interrupt. */ | 92 | /* De-allocates the above mentioned physical interrupt. */ |
@@ -94,4 +95,10 @@ int xen_destroy_irq(int irq); | |||
94 | /* Return irq from pirq */ | 95 | /* Return irq from pirq */ |
95 | int xen_irq_from_pirq(unsigned pirq); | 96 | int xen_irq_from_pirq(unsigned pirq); |
96 | 97 | ||
98 | /* Return the pirq allocated to the irq. */ | ||
99 | int xen_pirq_from_irq(unsigned irq); | ||
100 | |||
101 | /* Determine whether to ignore this IRQ if it is passed to a guest. */ | ||
102 | int xen_test_irq_shared(int irq); | ||
103 | |||
97 | #endif /* _XEN_EVENTS_H */ | 104 | #endif /* _XEN_EVENTS_H */ |
diff --git a/init/Kconfig b/init/Kconfig index d886b1e9278e..4986ecc49e65 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -485,7 +485,7 @@ config TREE_RCU_TRACE | |||
485 | 485 | ||
486 | config RCU_BOOST | 486 | config RCU_BOOST |
487 | bool "Enable RCU priority boosting" | 487 | bool "Enable RCU priority boosting" |
488 | depends on RT_MUTEXES && TINY_PREEMPT_RCU | 488 | depends on RT_MUTEXES && PREEMPT_RCU |
489 | default n | 489 | default n |
490 | help | 490 | help |
491 | This option boosts the priority of preempted RCU readers that | 491 | This option boosts the priority of preempted RCU readers that |
@@ -827,6 +827,11 @@ config SCHED_AUTOGROUP | |||
827 | desktop applications. Task group autogeneration is currently based | 827 | desktop applications. Task group autogeneration is currently based |
828 | upon task session. | 828 | upon task session. |
829 | 829 | ||
830 | config SCHED_TTWU_QUEUE | ||
831 | bool | ||
832 | depends on !SPARC32 | ||
833 | default y | ||
834 | |||
830 | config MM_OWNER | 835 | config MM_OWNER |
831 | bool | 836 | bool |
832 | 837 | ||
@@ -1226,7 +1231,6 @@ config SLAB | |||
1226 | per cpu and per node queues. | 1231 | per cpu and per node queues. |
1227 | 1232 | ||
1228 | config SLUB | 1233 | config SLUB |
1229 | depends on BROKEN || NUMA || !DISCONTIGMEM | ||
1230 | bool "SLUB (Unqueued Allocator)" | 1234 | bool "SLUB (Unqueued Allocator)" |
1231 | help | 1235 | help |
1232 | SLUB is a slab allocator that minimizes cache line usage | 1236 | SLUB is a slab allocator that minimizes cache line usage |
diff --git a/init/main.c b/init/main.c index 4a9479ef4540..48df882d51d2 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -580,8 +580,8 @@ asmlinkage void __init start_kernel(void) | |||
580 | #endif | 580 | #endif |
581 | page_cgroup_init(); | 581 | page_cgroup_init(); |
582 | enable_debug_pagealloc(); | 582 | enable_debug_pagealloc(); |
583 | kmemleak_init(); | ||
584 | debug_objects_mem_init(); | 583 | debug_objects_mem_init(); |
584 | kmemleak_init(); | ||
585 | setup_per_cpu_pageset(); | 585 | setup_per_cpu_pageset(); |
586 | numa_policy_init(); | 586 | numa_policy_init(); |
587 | if (late_time_init) | 587 | if (late_time_init) |
diff --git a/kernel/capability.c b/kernel/capability.c index bf0c734d0c12..32a80e08ff4b 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -399,3 +399,15 @@ bool task_ns_capable(struct task_struct *t, int cap) | |||
399 | return ns_capable(task_cred_xxx(t, user)->user_ns, cap); | 399 | return ns_capable(task_cred_xxx(t, user)->user_ns, cap); |
400 | } | 400 | } |
401 | EXPORT_SYMBOL(task_ns_capable); | 401 | EXPORT_SYMBOL(task_ns_capable); |
402 | |||
403 | /** | ||
404 | * nsown_capable - Check superior capability to one's own user_ns | ||
405 | * @cap: The capability in question | ||
406 | * | ||
407 | * Return true if the current task has the given superior capability | ||
408 | * targeted at its own user namespace. | ||
409 | */ | ||
410 | bool nsown_capable(int cap) | ||
411 | { | ||
412 | return ns_capable(current_user_ns(), cap); | ||
413 | } | ||
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 25c7eb52de1a..909a35510af5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -326,12 +326,6 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) | |||
326 | return &css_set_table[index]; | 326 | return &css_set_table[index]; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void free_css_set_rcu(struct rcu_head *obj) | ||
330 | { | ||
331 | struct css_set *cg = container_of(obj, struct css_set, rcu_head); | ||
332 | kfree(cg); | ||
333 | } | ||
334 | |||
335 | /* We don't maintain the lists running through each css_set to its | 329 | /* We don't maintain the lists running through each css_set to its |
336 | * task until after the first call to cgroup_iter_start(). This | 330 | * task until after the first call to cgroup_iter_start(). This |
337 | * reduces the fork()/exit() overhead for people who have cgroups | 331 | * reduces the fork()/exit() overhead for people who have cgroups |
@@ -375,7 +369,7 @@ static void __put_css_set(struct css_set *cg, int taskexit) | |||
375 | } | 369 | } |
376 | 370 | ||
377 | write_unlock(&css_set_lock); | 371 | write_unlock(&css_set_lock); |
378 | call_rcu(&cg->rcu_head, free_css_set_rcu); | 372 | kfree_rcu(cg, rcu_head); |
379 | } | 373 | } |
380 | 374 | ||
381 | /* | 375 | /* |
@@ -812,13 +806,6 @@ static int cgroup_call_pre_destroy(struct cgroup *cgrp) | |||
812 | return ret; | 806 | return ret; |
813 | } | 807 | } |
814 | 808 | ||
815 | static void free_cgroup_rcu(struct rcu_head *obj) | ||
816 | { | ||
817 | struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head); | ||
818 | |||
819 | kfree(cgrp); | ||
820 | } | ||
821 | |||
822 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) | 809 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) |
823 | { | 810 | { |
824 | /* is dentry a directory ? if so, kfree() associated cgroup */ | 811 | /* is dentry a directory ? if so, kfree() associated cgroup */ |
@@ -856,7 +843,7 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
856 | */ | 843 | */ |
857 | BUG_ON(!list_empty(&cgrp->pidlists)); | 844 | BUG_ON(!list_empty(&cgrp->pidlists)); |
858 | 845 | ||
859 | call_rcu(&cgrp->rcu_head, free_cgroup_rcu); | 846 | kfree_rcu(cgrp, rcu_head); |
860 | } | 847 | } |
861 | iput(inode); | 848 | iput(inode); |
862 | } | 849 | } |
@@ -4623,14 +4610,6 @@ bool css_is_ancestor(struct cgroup_subsys_state *child, | |||
4623 | return ret; | 4610 | return ret; |
4624 | } | 4611 | } |
4625 | 4612 | ||
4626 | static void __free_css_id_cb(struct rcu_head *head) | ||
4627 | { | ||
4628 | struct css_id *id; | ||
4629 | |||
4630 | id = container_of(head, struct css_id, rcu_head); | ||
4631 | kfree(id); | ||
4632 | } | ||
4633 | |||
4634 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) | 4613 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) |
4635 | { | 4614 | { |
4636 | struct css_id *id = css->id; | 4615 | struct css_id *id = css->id; |
@@ -4645,7 +4624,7 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) | |||
4645 | spin_lock(&ss->id_lock); | 4624 | spin_lock(&ss->id_lock); |
4646 | idr_remove(&ss->idr, id->id); | 4625 | idr_remove(&ss->idr, id->id); |
4647 | spin_unlock(&ss->id_lock); | 4626 | spin_unlock(&ss->id_lock); |
4648 | call_rcu(&id->rcu_head, __free_css_id_cb); | 4627 | kfree_rcu(id, rcu_head); |
4649 | } | 4628 | } |
4650 | EXPORT_SYMBOL_GPL(free_css_id); | 4629 | EXPORT_SYMBOL_GPL(free_css_id); |
4651 | 4630 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 33eee16addb8..2bb8c2e98fff 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1159,7 +1159,7 @@ int current_cpuset_is_being_rebound(void) | |||
1159 | static int update_relax_domain_level(struct cpuset *cs, s64 val) | 1159 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1160 | { | 1160 | { |
1161 | #ifdef CONFIG_SMP | 1161 | #ifdef CONFIG_SMP |
1162 | if (val < -1 || val >= SD_LV_MAX) | 1162 | if (val < -1 || val >= sched_domain_level_max) |
1163 | return -EINVAL; | 1163 | return -EINVAL; |
1164 | #endif | 1164 | #endif |
1165 | 1165 | ||
diff --git a/kernel/cred.c b/kernel/cred.c index 5557b55048df..8093c16b84b1 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -54,6 +54,7 @@ struct cred init_cred = { | |||
54 | .cap_effective = CAP_INIT_EFF_SET, | 54 | .cap_effective = CAP_INIT_EFF_SET, |
55 | .cap_bset = CAP_INIT_BSET, | 55 | .cap_bset = CAP_INIT_BSET, |
56 | .user = INIT_USER, | 56 | .user = INIT_USER, |
57 | .user_ns = &init_user_ns, | ||
57 | .group_info = &init_groups, | 58 | .group_info = &init_groups, |
58 | #ifdef CONFIG_KEYS | 59 | #ifdef CONFIG_KEYS |
59 | .tgcred = &init_tgcred, | 60 | .tgcred = &init_tgcred, |
@@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) | |||
410 | goto error_put; | 411 | goto error_put; |
411 | } | 412 | } |
412 | 413 | ||
414 | /* cache user_ns in cred. Doesn't need a refcount because it will | ||
415 | * stay pinned by cred->user | ||
416 | */ | ||
417 | new->user_ns = new->user->user_ns; | ||
418 | |||
413 | #ifdef CONFIG_KEYS | 419 | #ifdef CONFIG_KEYS |
414 | /* new threads get their own thread keyrings if their parent already | 420 | /* new threads get their own thread keyrings if their parent already |
415 | * had one */ | 421 | * had one */ |
@@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode) | |||
741 | } | 747 | } |
742 | EXPORT_SYMBOL(set_create_files_as); | 748 | EXPORT_SYMBOL(set_create_files_as); |
743 | 749 | ||
744 | struct user_namespace *current_user_ns(void) | ||
745 | { | ||
746 | return _current_user_ns(); | ||
747 | } | ||
748 | EXPORT_SYMBOL(current_user_ns); | ||
749 | |||
750 | #ifdef CONFIG_DEBUG_CREDENTIALS | 750 | #ifdef CONFIG_DEBUG_CREDENTIALS |
751 | 751 | ||
752 | bool creds_are_invalid(const struct cred *cred) | 752 | bool creds_are_invalid(const struct cred *cred) |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 0fc34a370ba4..c09767f7db3e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -586,14 +586,6 @@ static void get_ctx(struct perf_event_context *ctx) | |||
586 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | 586 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); |
587 | } | 587 | } |
588 | 588 | ||
589 | static void free_ctx(struct rcu_head *head) | ||
590 | { | ||
591 | struct perf_event_context *ctx; | ||
592 | |||
593 | ctx = container_of(head, struct perf_event_context, rcu_head); | ||
594 | kfree(ctx); | ||
595 | } | ||
596 | |||
597 | static void put_ctx(struct perf_event_context *ctx) | 589 | static void put_ctx(struct perf_event_context *ctx) |
598 | { | 590 | { |
599 | if (atomic_dec_and_test(&ctx->refcount)) { | 591 | if (atomic_dec_and_test(&ctx->refcount)) { |
@@ -601,7 +593,7 @@ static void put_ctx(struct perf_event_context *ctx) | |||
601 | put_ctx(ctx->parent_ctx); | 593 | put_ctx(ctx->parent_ctx); |
602 | if (ctx->task) | 594 | if (ctx->task) |
603 | put_task_struct(ctx->task); | 595 | put_task_struct(ctx->task); |
604 | call_rcu(&ctx->rcu_head, free_ctx); | 596 | kfree_rcu(ctx, rcu_head); |
605 | } | 597 | } |
606 | } | 598 | } |
607 | 599 | ||
@@ -5331,14 +5323,6 @@ swevent_hlist_deref(struct swevent_htable *swhash) | |||
5331 | lockdep_is_held(&swhash->hlist_mutex)); | 5323 | lockdep_is_held(&swhash->hlist_mutex)); |
5332 | } | 5324 | } |
5333 | 5325 | ||
5334 | static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) | ||
5335 | { | ||
5336 | struct swevent_hlist *hlist; | ||
5337 | |||
5338 | hlist = container_of(rcu_head, struct swevent_hlist, rcu_head); | ||
5339 | kfree(hlist); | ||
5340 | } | ||
5341 | |||
5342 | static void swevent_hlist_release(struct swevent_htable *swhash) | 5326 | static void swevent_hlist_release(struct swevent_htable *swhash) |
5343 | { | 5327 | { |
5344 | struct swevent_hlist *hlist = swevent_hlist_deref(swhash); | 5328 | struct swevent_hlist *hlist = swevent_hlist_deref(swhash); |
@@ -5347,7 +5331,7 @@ static void swevent_hlist_release(struct swevent_htable *swhash) | |||
5347 | return; | 5331 | return; |
5348 | 5332 | ||
5349 | rcu_assign_pointer(swhash->swevent_hlist, NULL); | 5333 | rcu_assign_pointer(swhash->swevent_hlist, NULL); |
5350 | call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); | 5334 | kfree_rcu(hlist, rcu_head); |
5351 | } | 5335 | } |
5352 | 5336 | ||
5353 | static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) | 5337 | static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) |
diff --git a/kernel/fork.c b/kernel/fork.c index e7548dee636b..2b44d82b8237 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1103,7 +1103,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1103 | 1103 | ||
1104 | posix_cpu_timers_init(p); | 1104 | posix_cpu_timers_init(p); |
1105 | 1105 | ||
1106 | p->lock_depth = -1; /* -1 = no lock */ | ||
1107 | do_posix_clock_monotonic_gettime(&p->start_time); | 1106 | do_posix_clock_monotonic_gettime(&p->start_time); |
1108 | p->real_start_time = p->start_time; | 1107 | p->real_start_time = p->start_time; |
1109 | monotonic_to_bootbased(&p->real_start_time); | 1108 | monotonic_to_bootbased(&p->real_start_time); |
@@ -1153,7 +1152,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1153 | #endif | 1152 | #endif |
1154 | 1153 | ||
1155 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1154 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1156 | sched_fork(p, clone_flags); | 1155 | sched_fork(p); |
1157 | 1156 | ||
1158 | retval = perf_event_init_task(p); | 1157 | retval = perf_event_init_task(p); |
1159 | if (retval) | 1158 | if (retval) |
@@ -1464,7 +1463,7 @@ long do_fork(unsigned long clone_flags, | |||
1464 | */ | 1463 | */ |
1465 | p->flags &= ~PF_STARTING; | 1464 | p->flags &= ~PF_STARTING; |
1466 | 1465 | ||
1467 | wake_up_new_task(p, clone_flags); | 1466 | wake_up_new_task(p); |
1468 | 1467 | ||
1469 | tracehook_report_clone_complete(trace, regs, | 1468 | tracehook_report_clone_complete(trace, regs, |
1470 | clone_flags, nr, p); | 1469 | clone_flags, nr, p); |
diff --git a/kernel/freezer.c b/kernel/freezer.c index 66ecd2ead215..7b01de98bb6a 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -17,7 +17,7 @@ static inline void frozen_process(void) | |||
17 | { | 17 | { |
18 | if (!unlikely(current->flags & PF_NOFREEZE)) { | 18 | if (!unlikely(current->flags & PF_NOFREEZE)) { |
19 | current->flags |= PF_FROZEN; | 19 | current->flags |= PF_FROZEN; |
20 | wmb(); | 20 | smp_wmb(); |
21 | } | 21 | } |
22 | clear_freeze_flag(current); | 22 | clear_freeze_flag(current); |
23 | } | 23 | } |
@@ -93,7 +93,7 @@ bool freeze_task(struct task_struct *p, bool sig_only) | |||
93 | * the task as frozen and next clears its TIF_FREEZE. | 93 | * the task as frozen and next clears its TIF_FREEZE. |
94 | */ | 94 | */ |
95 | if (!freezing(p)) { | 95 | if (!freezing(p)) { |
96 | rmb(); | 96 | smp_rmb(); |
97 | if (frozen(p)) | 97 | if (frozen(p)) |
98 | return false; | 98 | return false; |
99 | 99 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 87fdb3f8db14..dbbbf7d43080 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -81,7 +81,7 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |||
81 | } | 81 | } |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static int hrtimer_clock_to_base_table[MAX_CLOCKS] = { | 84 | static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { |
85 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, | 85 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, |
86 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, | 86 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, |
87 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, | 87 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, |
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 53ead174da2f..ea640120ab86 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
@@ -33,7 +33,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; | |||
33 | /* | 33 | /* |
34 | * Zero means infinite timeout - no checking done: | 34 | * Zero means infinite timeout - no checking done: |
35 | */ | 35 | */ |
36 | unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; | 36 | unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; |
37 | 37 | ||
38 | unsigned long __read_mostly sysctl_hung_task_warnings = 10; | 38 | unsigned long __read_mostly sysctl_hung_task_warnings = 10; |
39 | 39 | ||
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index c574f9a12c48..d1d051b38e0b 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -48,6 +48,10 @@ config IRQ_PREFLOW_FASTEOI | |||
48 | config IRQ_EDGE_EOI_HANDLER | 48 | config IRQ_EDGE_EOI_HANDLER |
49 | bool | 49 | bool |
50 | 50 | ||
51 | # Generic configurable interrupt chip implementation | ||
52 | config GENERIC_IRQ_CHIP | ||
53 | bool | ||
54 | |||
51 | # Support forced irq threading | 55 | # Support forced irq threading |
52 | config IRQ_FORCED_THREADING | 56 | config IRQ_FORCED_THREADING |
53 | bool | 57 | bool |
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 54329cd7b3ee..73290056cfb6 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | 1 | ||
2 | obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o | 2 | obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o |
3 | obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o | ||
3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 4 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
4 | obj-$(CONFIG_PROC_FS) += proc.o | 5 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 6 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 4af1e2b244cb..d5a3009da71a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -310,6 +310,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
310 | out_unlock: | 310 | out_unlock: |
311 | raw_spin_unlock(&desc->lock); | 311 | raw_spin_unlock(&desc->lock); |
312 | } | 312 | } |
313 | EXPORT_SYMBOL_GPL(handle_simple_irq); | ||
313 | 314 | ||
314 | /** | 315 | /** |
315 | * handle_level_irq - Level type irq handler | 316 | * handle_level_irq - Level type irq handler |
@@ -573,6 +574,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
573 | if (handle != handle_bad_irq && is_chained) { | 574 | if (handle != handle_bad_irq && is_chained) { |
574 | irq_settings_set_noprobe(desc); | 575 | irq_settings_set_noprobe(desc); |
575 | irq_settings_set_norequest(desc); | 576 | irq_settings_set_norequest(desc); |
577 | irq_settings_set_nothread(desc); | ||
576 | irq_startup(desc); | 578 | irq_startup(desc); |
577 | } | 579 | } |
578 | out: | 580 | out: |
@@ -612,6 +614,7 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | |||
612 | 614 | ||
613 | irq_put_desc_unlock(desc, flags); | 615 | irq_put_desc_unlock(desc, flags); |
614 | } | 616 | } |
617 | EXPORT_SYMBOL_GPL(irq_modify_status); | ||
615 | 618 | ||
616 | /** | 619 | /** |
617 | * irq_cpu_online - Invoke all irq_cpu_online functions. | 620 | * irq_cpu_online - Invoke all irq_cpu_online functions. |
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index 306cba37e9a5..97a8bfadc88a 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h | |||
@@ -27,6 +27,7 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
27 | P(IRQ_PER_CPU); | 27 | P(IRQ_PER_CPU); |
28 | P(IRQ_NOPROBE); | 28 | P(IRQ_NOPROBE); |
29 | P(IRQ_NOREQUEST); | 29 | P(IRQ_NOREQUEST); |
30 | P(IRQ_NOTHREAD); | ||
30 | P(IRQ_NOAUTOEN); | 31 | P(IRQ_NOAUTOEN); |
31 | 32 | ||
32 | PS(IRQS_AUTODETECT); | 33 | PS(IRQS_AUTODETECT); |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c new file mode 100644 index 000000000000..31a9db711906 --- /dev/null +++ b/kernel/irq/generic-chip.c | |||
@@ -0,0 +1,354 @@ | |||
1 | /* | ||
2 | * Library implementing the most common irq chip callback functions | ||
3 | * | ||
4 | * Copyright (C) 2011, Thomas Gleixner | ||
5 | */ | ||
6 | #include <linux/io.h> | ||
7 | #include <linux/irq.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/kernel_stat.h> | ||
11 | #include <linux/syscore_ops.h> | ||
12 | |||
13 | #include "internals.h" | ||
14 | |||
15 | static LIST_HEAD(gc_list); | ||
16 | static DEFINE_RAW_SPINLOCK(gc_lock); | ||
17 | |||
18 | static inline struct irq_chip_regs *cur_regs(struct irq_data *d) | ||
19 | { | ||
20 | return &container_of(d->chip, struct irq_chip_type, chip)->regs; | ||
21 | } | ||
22 | |||
23 | /** | ||
24 | * irq_gc_noop - NOOP function | ||
25 | * @d: irq_data | ||
26 | */ | ||
27 | void irq_gc_noop(struct irq_data *d) | ||
28 | { | ||
29 | } | ||
30 | |||
31 | /** | ||
32 | * irq_gc_mask_disable_reg - Mask chip via disable register | ||
33 | * @d: irq_data | ||
34 | * | ||
35 | * Chip has separate enable/disable registers instead of a single mask | ||
36 | * register. | ||
37 | */ | ||
38 | void irq_gc_mask_disable_reg(struct irq_data *d) | ||
39 | { | ||
40 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
41 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
42 | |||
43 | irq_gc_lock(gc); | ||
44 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable); | ||
45 | gc->mask_cache &= ~mask; | ||
46 | irq_gc_unlock(gc); | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register | ||
51 | * @d: irq_data | ||
52 | * | ||
53 | * Chip has a single mask register. Values of this register are cached | ||
54 | * and protected by gc->lock | ||
55 | */ | ||
56 | void irq_gc_mask_set_bit(struct irq_data *d) | ||
57 | { | ||
58 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
59 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
60 | |||
61 | irq_gc_lock(gc); | ||
62 | gc->mask_cache |= mask; | ||
63 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | ||
64 | irq_gc_unlock(gc); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register | ||
69 | * @d: irq_data | ||
70 | * | ||
71 | * Chip has a single mask register. Values of this register are cached | ||
72 | * and protected by gc->lock | ||
73 | */ | ||
74 | void irq_gc_mask_clr_bit(struct irq_data *d) | ||
75 | { | ||
76 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
77 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
78 | |||
79 | irq_gc_lock(gc); | ||
80 | gc->mask_cache &= ~mask; | ||
81 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | ||
82 | irq_gc_unlock(gc); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * irq_gc_unmask_enable_reg - Unmask chip via enable register | ||
87 | * @d: irq_data | ||
88 | * | ||
89 | * Chip has separate enable/disable registers instead of a single mask | ||
90 | * register. | ||
91 | */ | ||
92 | void irq_gc_unmask_enable_reg(struct irq_data *d) | ||
93 | { | ||
94 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
95 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
96 | |||
97 | irq_gc_lock(gc); | ||
98 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable); | ||
99 | gc->mask_cache |= mask; | ||
100 | irq_gc_unlock(gc); | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * irq_gc_ack - Ack pending interrupt | ||
105 | * @d: irq_data | ||
106 | */ | ||
107 | void irq_gc_ack(struct irq_data *d) | ||
108 | { | ||
109 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
110 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
111 | |||
112 | irq_gc_lock(gc); | ||
113 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | ||
114 | irq_gc_unlock(gc); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt | ||
119 | * @d: irq_data | ||
120 | */ | ||
121 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) | ||
122 | { | ||
123 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
124 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
125 | |||
126 | irq_gc_lock(gc); | ||
127 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask); | ||
128 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | ||
129 | irq_gc_unlock(gc); | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * irq_gc_eoi - EOI interrupt | ||
134 | * @d: irq_data | ||
135 | */ | ||
136 | void irq_gc_eoi(struct irq_data *d) | ||
137 | { | ||
138 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
139 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
140 | |||
141 | irq_gc_lock(gc); | ||
142 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi); | ||
143 | irq_gc_unlock(gc); | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * irq_gc_set_wake - Set/clr wake bit for an interrupt | ||
148 | * @d: irq_data | ||
149 | * | ||
150 | * For chips where the wake from suspend functionality is not | ||
151 | * configured in a separate register and the wakeup active state is | ||
152 | * just stored in a bitmask. | ||
153 | */ | ||
154 | int irq_gc_set_wake(struct irq_data *d, unsigned int on) | ||
155 | { | ||
156 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
157 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
158 | |||
159 | if (!(mask & gc->wake_enabled)) | ||
160 | return -EINVAL; | ||
161 | |||
162 | irq_gc_lock(gc); | ||
163 | if (on) | ||
164 | gc->wake_active |= mask; | ||
165 | else | ||
166 | gc->wake_active &= ~mask; | ||
167 | irq_gc_unlock(gc); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * irq_alloc_generic_chip - Allocate a generic chip and initialize it | ||
173 | * @name: Name of the irq chip | ||
174 | * @num_ct: Number of irq_chip_type instances associated with this | ||
175 | * @irq_base: Interrupt base nr for this chip | ||
176 | * @reg_base: Register base address (virtual) | ||
177 | * @handler: Default flow handler associated with this chip | ||
178 | * | ||
179 | * Returns an initialized irq_chip_generic structure. The chip defaults | ||
180 | * to the primary (index 0) irq_chip_type and @handler | ||
181 | */ | ||
182 | struct irq_chip_generic * | ||
183 | irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base, | ||
184 | void __iomem *reg_base, irq_flow_handler_t handler) | ||
185 | { | ||
186 | struct irq_chip_generic *gc; | ||
187 | unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); | ||
188 | |||
189 | gc = kzalloc(sz, GFP_KERNEL); | ||
190 | if (gc) { | ||
191 | raw_spin_lock_init(&gc->lock); | ||
192 | gc->num_ct = num_ct; | ||
193 | gc->irq_base = irq_base; | ||
194 | gc->reg_base = reg_base; | ||
195 | gc->chip_types->chip.name = name; | ||
196 | gc->chip_types->handler = handler; | ||
197 | } | ||
198 | return gc; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Separate lockdep class for interrupt chip which can nest irq_desc | ||
203 | * lock. | ||
204 | */ | ||
205 | static struct lock_class_key irq_nested_lock_class; | ||
206 | |||
207 | /** | ||
208 | * irq_setup_generic_chip - Setup a range of interrupts with a generic chip | ||
209 | * @gc: Generic irq chip holding all data | ||
210 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base | ||
211 | * @flags: Flags for initialization | ||
212 | * @clr: IRQ_* bits to clear | ||
213 | * @set: IRQ_* bits to set | ||
214 | * | ||
215 | * Set up max. 32 interrupts starting from gc->irq_base. Note, this | ||
216 | * initializes all interrupts to the primary irq_chip_type and its | ||
217 | * associated handler. | ||
218 | */ | ||
219 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
220 | enum irq_gc_flags flags, unsigned int clr, | ||
221 | unsigned int set) | ||
222 | { | ||
223 | struct irq_chip_type *ct = gc->chip_types; | ||
224 | unsigned int i; | ||
225 | |||
226 | raw_spin_lock(&gc_lock); | ||
227 | list_add_tail(&gc->list, &gc_list); | ||
228 | raw_spin_unlock(&gc_lock); | ||
229 | |||
230 | /* Init mask cache ? */ | ||
231 | if (flags & IRQ_GC_INIT_MASK_CACHE) | ||
232 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); | ||
233 | |||
234 | for (i = gc->irq_base; msk; msk >>= 1, i++) { | ||
235 | if (!msk & 0x01) | ||
236 | continue; | ||
237 | |||
238 | if (flags & IRQ_GC_INIT_NESTED_LOCK) | ||
239 | irq_set_lockdep_class(i, &irq_nested_lock_class); | ||
240 | |||
241 | irq_set_chip_and_handler(i, &ct->chip, ct->handler); | ||
242 | irq_set_chip_data(i, gc); | ||
243 | irq_modify_status(i, clr, set); | ||
244 | } | ||
245 | gc->irq_cnt = i - gc->irq_base; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * irq_setup_alt_chip - Switch to alternative chip | ||
250 | * @d: irq_data for this interrupt | ||
251 | * @type Flow type to be initialized | ||
252 | * | ||
253 | * Only to be called from chip->irq_set_type() callbacks. | ||
254 | */ | ||
255 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type) | ||
256 | { | ||
257 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
258 | struct irq_chip_type *ct = gc->chip_types; | ||
259 | unsigned int i; | ||
260 | |||
261 | for (i = 0; i < gc->num_ct; i++, ct++) { | ||
262 | if (ct->type & type) { | ||
263 | d->chip = &ct->chip; | ||
264 | irq_data_to_desc(d)->handle_irq = ct->handler; | ||
265 | return 0; | ||
266 | } | ||
267 | } | ||
268 | return -EINVAL; | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * irq_remove_generic_chip - Remove a chip | ||
273 | * @gc: Generic irq chip holding all data | ||
274 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base | ||
275 | * @clr: IRQ_* bits to clear | ||
276 | * @set: IRQ_* bits to set | ||
277 | * | ||
278 | * Remove up to 32 interrupts starting from gc->irq_base. | ||
279 | */ | ||
280 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
281 | unsigned int clr, unsigned int set) | ||
282 | { | ||
283 | unsigned int i = gc->irq_base; | ||
284 | |||
285 | raw_spin_lock(&gc_lock); | ||
286 | list_del(&gc->list); | ||
287 | raw_spin_unlock(&gc_lock); | ||
288 | |||
289 | for (; msk; msk >>= 1, i++) { | ||
290 | if (!msk & 0x01) | ||
291 | continue; | ||
292 | |||
293 | /* Remove handler first. That will mask the irq line */ | ||
294 | irq_set_handler(i, NULL); | ||
295 | irq_set_chip(i, &no_irq_chip); | ||
296 | irq_set_chip_data(i, NULL); | ||
297 | irq_modify_status(i, clr, set); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | #ifdef CONFIG_PM | ||
302 | static int irq_gc_suspend(void) | ||
303 | { | ||
304 | struct irq_chip_generic *gc; | ||
305 | |||
306 | list_for_each_entry(gc, &gc_list, list) { | ||
307 | struct irq_chip_type *ct = gc->chip_types; | ||
308 | |||
309 | if (ct->chip.irq_suspend) | ||
310 | ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base)); | ||
311 | } | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static void irq_gc_resume(void) | ||
316 | { | ||
317 | struct irq_chip_generic *gc; | ||
318 | |||
319 | list_for_each_entry(gc, &gc_list, list) { | ||
320 | struct irq_chip_type *ct = gc->chip_types; | ||
321 | |||
322 | if (ct->chip.irq_resume) | ||
323 | ct->chip.irq_resume(irq_get_irq_data(gc->irq_base)); | ||
324 | } | ||
325 | } | ||
326 | #else | ||
327 | #define irq_gc_suspend NULL | ||
328 | #define irq_gc_resume NULL | ||
329 | #endif | ||
330 | |||
331 | static void irq_gc_shutdown(void) | ||
332 | { | ||
333 | struct irq_chip_generic *gc; | ||
334 | |||
335 | list_for_each_entry(gc, &gc_list, list) { | ||
336 | struct irq_chip_type *ct = gc->chip_types; | ||
337 | |||
338 | if (ct->chip.irq_pm_shutdown) | ||
339 | ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base)); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | static struct syscore_ops irq_gc_syscore_ops = { | ||
344 | .suspend = irq_gc_suspend, | ||
345 | .resume = irq_gc_resume, | ||
346 | .shutdown = irq_gc_shutdown, | ||
347 | }; | ||
348 | |||
349 | static int __init irq_gc_init_ops(void) | ||
350 | { | ||
351 | register_syscore_ops(&irq_gc_syscore_ops); | ||
352 | return 0; | ||
353 | } | ||
354 | device_initcall(irq_gc_init_ops); | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 2c039c9b9383..886e80347b32 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | static struct lock_class_key irq_desc_lock_class; | 23 | static struct lock_class_key irq_desc_lock_class; |
24 | 24 | ||
25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 25 | #if defined(CONFIG_SMP) |
26 | static void __init init_irq_default_affinity(void) | 26 | static void __init init_irq_default_affinity(void) |
27 | { | 27 | { |
28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
@@ -290,6 +290,22 @@ static int irq_expand_nr_irqs(unsigned int nr) | |||
290 | 290 | ||
291 | #endif /* !CONFIG_SPARSE_IRQ */ | 291 | #endif /* !CONFIG_SPARSE_IRQ */ |
292 | 292 | ||
293 | /** | ||
294 | * generic_handle_irq - Invoke the handler for a particular irq | ||
295 | * @irq: The irq number to handle | ||
296 | * | ||
297 | */ | ||
298 | int generic_handle_irq(unsigned int irq) | ||
299 | { | ||
300 | struct irq_desc *desc = irq_to_desc(irq); | ||
301 | |||
302 | if (!desc) | ||
303 | return -EINVAL; | ||
304 | generic_handle_irq_desc(irq, desc); | ||
305 | return 0; | ||
306 | } | ||
307 | EXPORT_SYMBOL_GPL(generic_handle_irq); | ||
308 | |||
293 | /* Dynamic interrupt handling */ | 309 | /* Dynamic interrupt handling */ |
294 | 310 | ||
295 | /** | 311 | /** |
@@ -311,6 +327,7 @@ void irq_free_descs(unsigned int from, unsigned int cnt) | |||
311 | bitmap_clear(allocated_irqs, from, cnt); | 327 | bitmap_clear(allocated_irqs, from, cnt); |
312 | mutex_unlock(&sparse_irq_lock); | 328 | mutex_unlock(&sparse_irq_lock); |
313 | } | 329 | } |
330 | EXPORT_SYMBOL_GPL(irq_free_descs); | ||
314 | 331 | ||
315 | /** | 332 | /** |
316 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | 333 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
@@ -351,6 +368,7 @@ err: | |||
351 | mutex_unlock(&sparse_irq_lock); | 368 | mutex_unlock(&sparse_irq_lock); |
352 | return ret; | 369 | return ret; |
353 | } | 370 | } |
371 | EXPORT_SYMBOL_GPL(irq_alloc_descs); | ||
354 | 372 | ||
355 | /** | 373 | /** |
356 | * irq_reserve_irqs - mark irqs allocated | 374 | * irq_reserve_irqs - mark irqs allocated |
@@ -430,7 +448,6 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | |||
430 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 448 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
431 | } | 449 | } |
432 | 450 | ||
433 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
434 | unsigned int kstat_irqs(unsigned int irq) | 451 | unsigned int kstat_irqs(unsigned int irq) |
435 | { | 452 | { |
436 | struct irq_desc *desc = irq_to_desc(irq); | 453 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -443,4 +460,3 @@ unsigned int kstat_irqs(unsigned int irq) | |||
443 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 460 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
444 | return sum; | 461 | return sum; |
445 | } | 462 | } |
446 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 07c1611f3899..f7ce0021e1c4 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -900,7 +900,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
900 | */ | 900 | */ |
901 | new->handler = irq_nested_primary_handler; | 901 | new->handler = irq_nested_primary_handler; |
902 | } else { | 902 | } else { |
903 | irq_setup_forced_threading(new); | 903 | if (irq_settings_can_thread(desc)) |
904 | irq_setup_forced_threading(new); | ||
904 | } | 905 | } |
905 | 906 | ||
906 | /* | 907 | /* |
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 0d91730b6330..f1667833d444 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h | |||
@@ -8,6 +8,7 @@ enum { | |||
8 | _IRQ_LEVEL = IRQ_LEVEL, | 8 | _IRQ_LEVEL = IRQ_LEVEL, |
9 | _IRQ_NOPROBE = IRQ_NOPROBE, | 9 | _IRQ_NOPROBE = IRQ_NOPROBE, |
10 | _IRQ_NOREQUEST = IRQ_NOREQUEST, | 10 | _IRQ_NOREQUEST = IRQ_NOREQUEST, |
11 | _IRQ_NOTHREAD = IRQ_NOTHREAD, | ||
11 | _IRQ_NOAUTOEN = IRQ_NOAUTOEN, | 12 | _IRQ_NOAUTOEN = IRQ_NOAUTOEN, |
12 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, | 13 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, |
13 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, | 14 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, |
@@ -20,6 +21,7 @@ enum { | |||
20 | #define IRQ_LEVEL GOT_YOU_MORON | 21 | #define IRQ_LEVEL GOT_YOU_MORON |
21 | #define IRQ_NOPROBE GOT_YOU_MORON | 22 | #define IRQ_NOPROBE GOT_YOU_MORON |
22 | #define IRQ_NOREQUEST GOT_YOU_MORON | 23 | #define IRQ_NOREQUEST GOT_YOU_MORON |
24 | #define IRQ_NOTHREAD GOT_YOU_MORON | ||
23 | #define IRQ_NOAUTOEN GOT_YOU_MORON | 25 | #define IRQ_NOAUTOEN GOT_YOU_MORON |
24 | #define IRQ_NESTED_THREAD GOT_YOU_MORON | 26 | #define IRQ_NESTED_THREAD GOT_YOU_MORON |
25 | #undef IRQF_MODIFY_MASK | 27 | #undef IRQF_MODIFY_MASK |
@@ -94,6 +96,21 @@ static inline void irq_settings_set_norequest(struct irq_desc *desc) | |||
94 | desc->status_use_accessors |= _IRQ_NOREQUEST; | 96 | desc->status_use_accessors |= _IRQ_NOREQUEST; |
95 | } | 97 | } |
96 | 98 | ||
99 | static inline bool irq_settings_can_thread(struct irq_desc *desc) | ||
100 | { | ||
101 | return !(desc->status_use_accessors & _IRQ_NOTHREAD); | ||
102 | } | ||
103 | |||
104 | static inline void irq_settings_clr_nothread(struct irq_desc *desc) | ||
105 | { | ||
106 | desc->status_use_accessors &= ~_IRQ_NOTHREAD; | ||
107 | } | ||
108 | |||
109 | static inline void irq_settings_set_nothread(struct irq_desc *desc) | ||
110 | { | ||
111 | desc->status_use_accessors |= _IRQ_NOTHREAD; | ||
112 | } | ||
113 | |||
97 | static inline bool irq_settings_can_probe(struct irq_desc *desc) | 114 | static inline bool irq_settings_can_probe(struct irq_desc *desc) |
98 | { | 115 | { |
99 | return !(desc->status_use_accessors & _IRQ_NOPROBE); | 116 | return !(desc->status_use_accessors & _IRQ_NOPROBE); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 87b77de03dd3..8d814cbc8109 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1531,13 +1531,7 @@ int kernel_kexec(void) | |||
1531 | if (error) | 1531 | if (error) |
1532 | goto Enable_cpus; | 1532 | goto Enable_cpus; |
1533 | local_irq_disable(); | 1533 | local_irq_disable(); |
1534 | /* Suspend system devices */ | 1534 | error = syscore_suspend(); |
1535 | error = sysdev_suspend(PMSG_FREEZE); | ||
1536 | if (!error) { | ||
1537 | error = syscore_suspend(); | ||
1538 | if (error) | ||
1539 | sysdev_resume(); | ||
1540 | } | ||
1541 | if (error) | 1535 | if (error) |
1542 | goto Enable_irqs; | 1536 | goto Enable_irqs; |
1543 | } else | 1537 | } else |
@@ -1553,7 +1547,6 @@ int kernel_kexec(void) | |||
1553 | #ifdef CONFIG_KEXEC_JUMP | 1547 | #ifdef CONFIG_KEXEC_JUMP |
1554 | if (kexec_image->preserve_context) { | 1548 | if (kexec_image->preserve_context) { |
1555 | syscore_resume(); | 1549 | syscore_resume(); |
1556 | sysdev_resume(); | ||
1557 | Enable_irqs: | 1550 | Enable_irqs: |
1558 | local_irq_enable(); | 1551 | local_irq_enable(); |
1559 | Enable_cpus: | 1552 | Enable_cpus: |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 9cd0591c96a2..5ae0ff38425f 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -245,7 +245,6 @@ static void __call_usermodehelper(struct work_struct *work) | |||
245 | } | 245 | } |
246 | } | 246 | } |
247 | 247 | ||
248 | #ifdef CONFIG_PM_SLEEP | ||
249 | /* | 248 | /* |
250 | * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY | 249 | * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY |
251 | * (used for preventing user land processes from being created after the user | 250 | * (used for preventing user land processes from being created after the user |
@@ -301,6 +300,15 @@ void usermodehelper_enable(void) | |||
301 | usermodehelper_disabled = 0; | 300 | usermodehelper_disabled = 0; |
302 | } | 301 | } |
303 | 302 | ||
303 | /** | ||
304 | * usermodehelper_is_disabled - check if new helpers are allowed to be started | ||
305 | */ | ||
306 | bool usermodehelper_is_disabled(void) | ||
307 | { | ||
308 | return usermodehelper_disabled; | ||
309 | } | ||
310 | EXPORT_SYMBOL_GPL(usermodehelper_is_disabled); | ||
311 | |||
304 | static void helper_lock(void) | 312 | static void helper_lock(void) |
305 | { | 313 | { |
306 | atomic_inc(&running_helpers); | 314 | atomic_inc(&running_helpers); |
@@ -312,12 +320,6 @@ static void helper_unlock(void) | |||
312 | if (atomic_dec_and_test(&running_helpers)) | 320 | if (atomic_dec_and_test(&running_helpers)) |
313 | wake_up(&running_helpers_waitq); | 321 | wake_up(&running_helpers_waitq); |
314 | } | 322 | } |
315 | #else /* CONFIG_PM_SLEEP */ | ||
316 | #define usermodehelper_disabled 0 | ||
317 | |||
318 | static inline void helper_lock(void) {} | ||
319 | static inline void helper_unlock(void) {} | ||
320 | #endif /* CONFIG_PM_SLEEP */ | ||
321 | 323 | ||
322 | /** | 324 | /** |
323 | * call_usermodehelper_setup - prepare to call a usermode helper | 325 | * call_usermodehelper_setup - prepare to call a usermode helper |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 0b624e791805..3b053c04dd86 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/kexec.h> | 16 | #include <linux/kexec.h> |
17 | #include <linux/profile.h> | 17 | #include <linux/profile.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/capability.h> | ||
19 | 20 | ||
20 | #define KERNEL_ATTR_RO(_name) \ | 21 | #define KERNEL_ATTR_RO(_name) \ |
21 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | 22 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
@@ -131,6 +132,14 @@ KERNEL_ATTR_RO(vmcoreinfo); | |||
131 | 132 | ||
132 | #endif /* CONFIG_KEXEC */ | 133 | #endif /* CONFIG_KEXEC */ |
133 | 134 | ||
135 | /* whether file capabilities are enabled */ | ||
136 | static ssize_t fscaps_show(struct kobject *kobj, | ||
137 | struct kobj_attribute *attr, char *buf) | ||
138 | { | ||
139 | return sprintf(buf, "%d\n", file_caps_enabled); | ||
140 | } | ||
141 | KERNEL_ATTR_RO(fscaps); | ||
142 | |||
134 | /* | 143 | /* |
135 | * Make /sys/kernel/notes give the raw contents of our kernel .notes section. | 144 | * Make /sys/kernel/notes give the raw contents of our kernel .notes section. |
136 | */ | 145 | */ |
@@ -158,6 +167,7 @@ struct kobject *kernel_kobj; | |||
158 | EXPORT_SYMBOL_GPL(kernel_kobj); | 167 | EXPORT_SYMBOL_GPL(kernel_kobj); |
159 | 168 | ||
160 | static struct attribute * kernel_attrs[] = { | 169 | static struct attribute * kernel_attrs[] = { |
170 | &fscaps_attr.attr, | ||
161 | #if defined(CONFIG_HOTPLUG) | 171 | #if defined(CONFIG_HOTPLUG) |
162 | &uevent_seqnum_attr.attr, | 172 | &uevent_seqnum_attr.attr, |
163 | &uevent_helper_attr.attr, | 173 | &uevent_helper_attr.attr, |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 53a68956f131..63437d065ac8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -490,6 +490,18 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) | |||
490 | usage[i] = '\0'; | 490 | usage[i] = '\0'; |
491 | } | 491 | } |
492 | 492 | ||
493 | static int __print_lock_name(struct lock_class *class) | ||
494 | { | ||
495 | char str[KSYM_NAME_LEN]; | ||
496 | const char *name; | ||
497 | |||
498 | name = class->name; | ||
499 | if (!name) | ||
500 | name = __get_key_name(class->key, str); | ||
501 | |||
502 | return printk("%s", name); | ||
503 | } | ||
504 | |||
493 | static void print_lock_name(struct lock_class *class) | 505 | static void print_lock_name(struct lock_class *class) |
494 | { | 506 | { |
495 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; | 507 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; |
@@ -1053,6 +1065,56 @@ print_circular_bug_entry(struct lock_list *target, int depth) | |||
1053 | return 0; | 1065 | return 0; |
1054 | } | 1066 | } |
1055 | 1067 | ||
1068 | static void | ||
1069 | print_circular_lock_scenario(struct held_lock *src, | ||
1070 | struct held_lock *tgt, | ||
1071 | struct lock_list *prt) | ||
1072 | { | ||
1073 | struct lock_class *source = hlock_class(src); | ||
1074 | struct lock_class *target = hlock_class(tgt); | ||
1075 | struct lock_class *parent = prt->class; | ||
1076 | |||
1077 | /* | ||
1078 | * A direct locking problem where unsafe_class lock is taken | ||
1079 | * directly by safe_class lock, then all we need to show | ||
1080 | * is the deadlock scenario, as it is obvious that the | ||
1081 | * unsafe lock is taken under the safe lock. | ||
1082 | * | ||
1083 | * But if there is a chain instead, where the safe lock takes | ||
1084 | * an intermediate lock (middle_class) where this lock is | ||
1085 | * not the same as the safe lock, then the lock chain is | ||
1086 | * used to describe the problem. Otherwise we would need | ||
1087 | * to show a different CPU case for each link in the chain | ||
1088 | * from the safe_class lock to the unsafe_class lock. | ||
1089 | */ | ||
1090 | if (parent != source) { | ||
1091 | printk("Chain exists of:\n "); | ||
1092 | __print_lock_name(source); | ||
1093 | printk(" --> "); | ||
1094 | __print_lock_name(parent); | ||
1095 | printk(" --> "); | ||
1096 | __print_lock_name(target); | ||
1097 | printk("\n\n"); | ||
1098 | } | ||
1099 | |||
1100 | printk(" Possible unsafe locking scenario:\n\n"); | ||
1101 | printk(" CPU0 CPU1\n"); | ||
1102 | printk(" ---- ----\n"); | ||
1103 | printk(" lock("); | ||
1104 | __print_lock_name(target); | ||
1105 | printk(");\n"); | ||
1106 | printk(" lock("); | ||
1107 | __print_lock_name(parent); | ||
1108 | printk(");\n"); | ||
1109 | printk(" lock("); | ||
1110 | __print_lock_name(target); | ||
1111 | printk(");\n"); | ||
1112 | printk(" lock("); | ||
1113 | __print_lock_name(source); | ||
1114 | printk(");\n"); | ||
1115 | printk("\n *** DEADLOCK ***\n\n"); | ||
1116 | } | ||
1117 | |||
1056 | /* | 1118 | /* |
1057 | * When a circular dependency is detected, print the | 1119 | * When a circular dependency is detected, print the |
1058 | * header first: | 1120 | * header first: |
@@ -1096,6 +1158,7 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
1096 | { | 1158 | { |
1097 | struct task_struct *curr = current; | 1159 | struct task_struct *curr = current; |
1098 | struct lock_list *parent; | 1160 | struct lock_list *parent; |
1161 | struct lock_list *first_parent; | ||
1099 | int depth; | 1162 | int depth; |
1100 | 1163 | ||
1101 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1164 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
@@ -1109,6 +1172,7 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
1109 | print_circular_bug_header(target, depth, check_src, check_tgt); | 1172 | print_circular_bug_header(target, depth, check_src, check_tgt); |
1110 | 1173 | ||
1111 | parent = get_lock_parent(target); | 1174 | parent = get_lock_parent(target); |
1175 | first_parent = parent; | ||
1112 | 1176 | ||
1113 | while (parent) { | 1177 | while (parent) { |
1114 | print_circular_bug_entry(parent, --depth); | 1178 | print_circular_bug_entry(parent, --depth); |
@@ -1116,6 +1180,9 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
1116 | } | 1180 | } |
1117 | 1181 | ||
1118 | printk("\nother info that might help us debug this:\n\n"); | 1182 | printk("\nother info that might help us debug this:\n\n"); |
1183 | print_circular_lock_scenario(check_src, check_tgt, | ||
1184 | first_parent); | ||
1185 | |||
1119 | lockdep_print_held_locks(curr); | 1186 | lockdep_print_held_locks(curr); |
1120 | 1187 | ||
1121 | printk("\nstack backtrace:\n"); | 1188 | printk("\nstack backtrace:\n"); |
@@ -1314,7 +1381,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf, | |||
1314 | printk("\n"); | 1381 | printk("\n"); |
1315 | 1382 | ||
1316 | if (depth == 0 && (entry != root)) { | 1383 | if (depth == 0 && (entry != root)) { |
1317 | printk("lockdep:%s bad BFS generated tree\n", __func__); | 1384 | printk("lockdep:%s bad path found in chain graph\n", __func__); |
1318 | break; | 1385 | break; |
1319 | } | 1386 | } |
1320 | 1387 | ||
@@ -1325,6 +1392,62 @@ print_shortest_lock_dependencies(struct lock_list *leaf, | |||
1325 | return; | 1392 | return; |
1326 | } | 1393 | } |
1327 | 1394 | ||
1395 | static void | ||
1396 | print_irq_lock_scenario(struct lock_list *safe_entry, | ||
1397 | struct lock_list *unsafe_entry, | ||
1398 | struct lock_class *prev_class, | ||
1399 | struct lock_class *next_class) | ||
1400 | { | ||
1401 | struct lock_class *safe_class = safe_entry->class; | ||
1402 | struct lock_class *unsafe_class = unsafe_entry->class; | ||
1403 | struct lock_class *middle_class = prev_class; | ||
1404 | |||
1405 | if (middle_class == safe_class) | ||
1406 | middle_class = next_class; | ||
1407 | |||
1408 | /* | ||
1409 | * A direct locking problem where unsafe_class lock is taken | ||
1410 | * directly by safe_class lock, then all we need to show | ||
1411 | * is the deadlock scenario, as it is obvious that the | ||
1412 | * unsafe lock is taken under the safe lock. | ||
1413 | * | ||
1414 | * But if there is a chain instead, where the safe lock takes | ||
1415 | * an intermediate lock (middle_class) where this lock is | ||
1416 | * not the same as the safe lock, then the lock chain is | ||
1417 | * used to describe the problem. Otherwise we would need | ||
1418 | * to show a different CPU case for each link in the chain | ||
1419 | * from the safe_class lock to the unsafe_class lock. | ||
1420 | */ | ||
1421 | if (middle_class != unsafe_class) { | ||
1422 | printk("Chain exists of:\n "); | ||
1423 | __print_lock_name(safe_class); | ||
1424 | printk(" --> "); | ||
1425 | __print_lock_name(middle_class); | ||
1426 | printk(" --> "); | ||
1427 | __print_lock_name(unsafe_class); | ||
1428 | printk("\n\n"); | ||
1429 | } | ||
1430 | |||
1431 | printk(" Possible interrupt unsafe locking scenario:\n\n"); | ||
1432 | printk(" CPU0 CPU1\n"); | ||
1433 | printk(" ---- ----\n"); | ||
1434 | printk(" lock("); | ||
1435 | __print_lock_name(unsafe_class); | ||
1436 | printk(");\n"); | ||
1437 | printk(" local_irq_disable();\n"); | ||
1438 | printk(" lock("); | ||
1439 | __print_lock_name(safe_class); | ||
1440 | printk(");\n"); | ||
1441 | printk(" lock("); | ||
1442 | __print_lock_name(middle_class); | ||
1443 | printk(");\n"); | ||
1444 | printk(" <Interrupt>\n"); | ||
1445 | printk(" lock("); | ||
1446 | __print_lock_name(safe_class); | ||
1447 | printk(");\n"); | ||
1448 | printk("\n *** DEADLOCK ***\n\n"); | ||
1449 | } | ||
1450 | |||
1328 | static int | 1451 | static int |
1329 | print_bad_irq_dependency(struct task_struct *curr, | 1452 | print_bad_irq_dependency(struct task_struct *curr, |
1330 | struct lock_list *prev_root, | 1453 | struct lock_list *prev_root, |
@@ -1376,6 +1499,9 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1376 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); | 1499 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
1377 | 1500 | ||
1378 | printk("\nother info that might help us debug this:\n\n"); | 1501 | printk("\nother info that might help us debug this:\n\n"); |
1502 | print_irq_lock_scenario(backwards_entry, forwards_entry, | ||
1503 | hlock_class(prev), hlock_class(next)); | ||
1504 | |||
1379 | lockdep_print_held_locks(curr); | 1505 | lockdep_print_held_locks(curr); |
1380 | 1506 | ||
1381 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); | 1507 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); |
@@ -1539,6 +1665,26 @@ static inline void inc_chains(void) | |||
1539 | 1665 | ||
1540 | #endif | 1666 | #endif |
1541 | 1667 | ||
1668 | static void | ||
1669 | print_deadlock_scenario(struct held_lock *nxt, | ||
1670 | struct held_lock *prv) | ||
1671 | { | ||
1672 | struct lock_class *next = hlock_class(nxt); | ||
1673 | struct lock_class *prev = hlock_class(prv); | ||
1674 | |||
1675 | printk(" Possible unsafe locking scenario:\n\n"); | ||
1676 | printk(" CPU0\n"); | ||
1677 | printk(" ----\n"); | ||
1678 | printk(" lock("); | ||
1679 | __print_lock_name(prev); | ||
1680 | printk(");\n"); | ||
1681 | printk(" lock("); | ||
1682 | __print_lock_name(next); | ||
1683 | printk(");\n"); | ||
1684 | printk("\n *** DEADLOCK ***\n\n"); | ||
1685 | printk(" May be due to missing lock nesting notation\n\n"); | ||
1686 | } | ||
1687 | |||
1542 | static int | 1688 | static int |
1543 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | 1689 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, |
1544 | struct held_lock *next) | 1690 | struct held_lock *next) |
@@ -1557,6 +1703,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | |||
1557 | print_lock(prev); | 1703 | print_lock(prev); |
1558 | 1704 | ||
1559 | printk("\nother info that might help us debug this:\n"); | 1705 | printk("\nother info that might help us debug this:\n"); |
1706 | print_deadlock_scenario(next, prev); | ||
1560 | lockdep_print_held_locks(curr); | 1707 | lockdep_print_held_locks(curr); |
1561 | 1708 | ||
1562 | printk("\nstack backtrace:\n"); | 1709 | printk("\nstack backtrace:\n"); |
@@ -1826,7 +1973,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
1826 | struct list_head *hash_head = chainhashentry(chain_key); | 1973 | struct list_head *hash_head = chainhashentry(chain_key); |
1827 | struct lock_chain *chain; | 1974 | struct lock_chain *chain; |
1828 | struct held_lock *hlock_curr, *hlock_next; | 1975 | struct held_lock *hlock_curr, *hlock_next; |
1829 | int i, j, n, cn; | 1976 | int i, j; |
1830 | 1977 | ||
1831 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 1978 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
1832 | return 0; | 1979 | return 0; |
@@ -1886,15 +2033,9 @@ cache_hit: | |||
1886 | } | 2033 | } |
1887 | i++; | 2034 | i++; |
1888 | chain->depth = curr->lockdep_depth + 1 - i; | 2035 | chain->depth = curr->lockdep_depth + 1 - i; |
1889 | cn = nr_chain_hlocks; | 2036 | if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
1890 | while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { | 2037 | chain->base = nr_chain_hlocks; |
1891 | n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); | 2038 | nr_chain_hlocks += chain->depth; |
1892 | if (n == cn) | ||
1893 | break; | ||
1894 | cn = n; | ||
1895 | } | ||
1896 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { | ||
1897 | chain->base = cn; | ||
1898 | for (j = 0; j < chain->depth - 1; j++, i++) { | 2039 | for (j = 0; j < chain->depth - 1; j++, i++) { |
1899 | int lock_id = curr->held_locks[i].class_idx - 1; | 2040 | int lock_id = curr->held_locks[i].class_idx - 1; |
1900 | chain_hlocks[chain->base + j] = lock_id; | 2041 | chain_hlocks[chain->base + j] = lock_id; |
@@ -2011,6 +2152,24 @@ static void check_chain_key(struct task_struct *curr) | |||
2011 | #endif | 2152 | #endif |
2012 | } | 2153 | } |
2013 | 2154 | ||
2155 | static void | ||
2156 | print_usage_bug_scenario(struct held_lock *lock) | ||
2157 | { | ||
2158 | struct lock_class *class = hlock_class(lock); | ||
2159 | |||
2160 | printk(" Possible unsafe locking scenario:\n\n"); | ||
2161 | printk(" CPU0\n"); | ||
2162 | printk(" ----\n"); | ||
2163 | printk(" lock("); | ||
2164 | __print_lock_name(class); | ||
2165 | printk(");\n"); | ||
2166 | printk(" <Interrupt>\n"); | ||
2167 | printk(" lock("); | ||
2168 | __print_lock_name(class); | ||
2169 | printk(");\n"); | ||
2170 | printk("\n *** DEADLOCK ***\n\n"); | ||
2171 | } | ||
2172 | |||
2014 | static int | 2173 | static int |
2015 | print_usage_bug(struct task_struct *curr, struct held_lock *this, | 2174 | print_usage_bug(struct task_struct *curr, struct held_lock *this, |
2016 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) | 2175 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) |
@@ -2039,6 +2198,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
2039 | 2198 | ||
2040 | print_irqtrace_events(curr); | 2199 | print_irqtrace_events(curr); |
2041 | printk("\nother info that might help us debug this:\n"); | 2200 | printk("\nother info that might help us debug this:\n"); |
2201 | print_usage_bug_scenario(this); | ||
2202 | |||
2042 | lockdep_print_held_locks(curr); | 2203 | lockdep_print_held_locks(curr); |
2043 | 2204 | ||
2044 | printk("\nstack backtrace:\n"); | 2205 | printk("\nstack backtrace:\n"); |
@@ -2073,6 +2234,10 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2073 | struct held_lock *this, int forwards, | 2234 | struct held_lock *this, int forwards, |
2074 | const char *irqclass) | 2235 | const char *irqclass) |
2075 | { | 2236 | { |
2237 | struct lock_list *entry = other; | ||
2238 | struct lock_list *middle = NULL; | ||
2239 | int depth; | ||
2240 | |||
2076 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 2241 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
2077 | return 0; | 2242 | return 0; |
2078 | 2243 | ||
@@ -2091,6 +2256,25 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2091 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 2256 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
2092 | 2257 | ||
2093 | printk("\nother info that might help us debug this:\n"); | 2258 | printk("\nother info that might help us debug this:\n"); |
2259 | |||
2260 | /* Find a middle lock (if one exists) */ | ||
2261 | depth = get_lock_depth(other); | ||
2262 | do { | ||
2263 | if (depth == 0 && (entry != root)) { | ||
2264 | printk("lockdep:%s bad path found in chain graph\n", __func__); | ||
2265 | break; | ||
2266 | } | ||
2267 | middle = entry; | ||
2268 | entry = get_lock_parent(entry); | ||
2269 | depth--; | ||
2270 | } while (entry && entry != root && (depth >= 0)); | ||
2271 | if (forwards) | ||
2272 | print_irq_lock_scenario(root, other, | ||
2273 | middle ? middle->class : root->class, other->class); | ||
2274 | else | ||
2275 | print_irq_lock_scenario(other, root, | ||
2276 | middle ? middle->class : other->class, root->class); | ||
2277 | |||
2094 | lockdep_print_held_locks(curr); | 2278 | lockdep_print_held_locks(curr); |
2095 | 2279 | ||
2096 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); | 2280 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |
diff --git a/kernel/module.c b/kernel/module.c index d5938a5c19c4..22879725678d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/kmemleak.h> | 57 | #include <linux/kmemleak.h> |
58 | #include <linux/jump_label.h> | 58 | #include <linux/jump_label.h> |
59 | #include <linux/pfn.h> | 59 | #include <linux/pfn.h> |
60 | #include <linux/bsearch.h> | ||
60 | 61 | ||
61 | #define CREATE_TRACE_POINTS | 62 | #define CREATE_TRACE_POINTS |
62 | #include <trace/events/module.h> | 63 | #include <trace/events/module.h> |
@@ -240,23 +241,24 @@ static bool each_symbol_in_section(const struct symsearch *arr, | |||
240 | struct module *owner, | 241 | struct module *owner, |
241 | bool (*fn)(const struct symsearch *syms, | 242 | bool (*fn)(const struct symsearch *syms, |
242 | struct module *owner, | 243 | struct module *owner, |
243 | unsigned int symnum, void *data), | 244 | void *data), |
244 | void *data) | 245 | void *data) |
245 | { | 246 | { |
246 | unsigned int i, j; | 247 | unsigned int j; |
247 | 248 | ||
248 | for (j = 0; j < arrsize; j++) { | 249 | for (j = 0; j < arrsize; j++) { |
249 | for (i = 0; i < arr[j].stop - arr[j].start; i++) | 250 | if (fn(&arr[j], owner, data)) |
250 | if (fn(&arr[j], owner, i, data)) | 251 | return true; |
251 | return true; | ||
252 | } | 252 | } |
253 | 253 | ||
254 | return false; | 254 | return false; |
255 | } | 255 | } |
256 | 256 | ||
257 | /* Returns true as soon as fn returns true, otherwise false. */ | 257 | /* Returns true as soon as fn returns true, otherwise false. */ |
258 | bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | 258 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
259 | unsigned int symnum, void *data), void *data) | 259 | struct module *owner, |
260 | void *data), | ||
261 | void *data) | ||
260 | { | 262 | { |
261 | struct module *mod; | 263 | struct module *mod; |
262 | static const struct symsearch arr[] = { | 264 | static const struct symsearch arr[] = { |
@@ -309,7 +311,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | |||
309 | } | 311 | } |
310 | return false; | 312 | return false; |
311 | } | 313 | } |
312 | EXPORT_SYMBOL_GPL(each_symbol); | 314 | EXPORT_SYMBOL_GPL(each_symbol_section); |
313 | 315 | ||
314 | struct find_symbol_arg { | 316 | struct find_symbol_arg { |
315 | /* Input */ | 317 | /* Input */ |
@@ -323,15 +325,12 @@ struct find_symbol_arg { | |||
323 | const struct kernel_symbol *sym; | 325 | const struct kernel_symbol *sym; |
324 | }; | 326 | }; |
325 | 327 | ||
326 | static bool find_symbol_in_section(const struct symsearch *syms, | 328 | static bool check_symbol(const struct symsearch *syms, |
327 | struct module *owner, | 329 | struct module *owner, |
328 | unsigned int symnum, void *data) | 330 | unsigned int symnum, void *data) |
329 | { | 331 | { |
330 | struct find_symbol_arg *fsa = data; | 332 | struct find_symbol_arg *fsa = data; |
331 | 333 | ||
332 | if (strcmp(syms->start[symnum].name, fsa->name) != 0) | ||
333 | return false; | ||
334 | |||
335 | if (!fsa->gplok) { | 334 | if (!fsa->gplok) { |
336 | if (syms->licence == GPL_ONLY) | 335 | if (syms->licence == GPL_ONLY) |
337 | return false; | 336 | return false; |
@@ -365,6 +364,30 @@ static bool find_symbol_in_section(const struct symsearch *syms, | |||
365 | return true; | 364 | return true; |
366 | } | 365 | } |
367 | 366 | ||
367 | static int cmp_name(const void *va, const void *vb) | ||
368 | { | ||
369 | const char *a; | ||
370 | const struct kernel_symbol *b; | ||
371 | a = va; b = vb; | ||
372 | return strcmp(a, b->name); | ||
373 | } | ||
374 | |||
375 | static bool find_symbol_in_section(const struct symsearch *syms, | ||
376 | struct module *owner, | ||
377 | void *data) | ||
378 | { | ||
379 | struct find_symbol_arg *fsa = data; | ||
380 | struct kernel_symbol *sym; | ||
381 | |||
382 | sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, | ||
383 | sizeof(struct kernel_symbol), cmp_name); | ||
384 | |||
385 | if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) | ||
386 | return true; | ||
387 | |||
388 | return false; | ||
389 | } | ||
390 | |||
368 | /* Find a symbol and return it, along with, (optional) crc and | 391 | /* Find a symbol and return it, along with, (optional) crc and |
369 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ | 392 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ |
370 | const struct kernel_symbol *find_symbol(const char *name, | 393 | const struct kernel_symbol *find_symbol(const char *name, |
@@ -379,7 +402,7 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
379 | fsa.gplok = gplok; | 402 | fsa.gplok = gplok; |
380 | fsa.warn = warn; | 403 | fsa.warn = warn; |
381 | 404 | ||
382 | if (each_symbol(find_symbol_in_section, &fsa)) { | 405 | if (each_symbol_section(find_symbol_in_section, &fsa)) { |
383 | if (owner) | 406 | if (owner) |
384 | *owner = fsa.owner; | 407 | *owner = fsa.owner; |
385 | if (crc) | 408 | if (crc) |
@@ -1607,27 +1630,28 @@ static void set_section_ro_nx(void *base, | |||
1607 | } | 1630 | } |
1608 | } | 1631 | } |
1609 | 1632 | ||
1610 | /* Setting memory back to RW+NX before releasing it */ | 1633 | static void unset_module_core_ro_nx(struct module *mod) |
1611 | void unset_section_ro_nx(struct module *mod, void *module_region) | ||
1612 | { | 1634 | { |
1613 | unsigned long total_pages; | 1635 | set_page_attributes(mod->module_core + mod->core_text_size, |
1614 | 1636 | mod->module_core + mod->core_size, | |
1615 | if (mod->module_core == module_region) { | 1637 | set_memory_x); |
1616 | /* Set core as NX+RW */ | 1638 | set_page_attributes(mod->module_core, |
1617 | total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size); | 1639 | mod->module_core + mod->core_ro_size, |
1618 | set_memory_nx((unsigned long)mod->module_core, total_pages); | 1640 | set_memory_rw); |
1619 | set_memory_rw((unsigned long)mod->module_core, total_pages); | 1641 | } |
1620 | 1642 | ||
1621 | } else if (mod->module_init == module_region) { | 1643 | static void unset_module_init_ro_nx(struct module *mod) |
1622 | /* Set init as NX+RW */ | 1644 | { |
1623 | total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size); | 1645 | set_page_attributes(mod->module_init + mod->init_text_size, |
1624 | set_memory_nx((unsigned long)mod->module_init, total_pages); | 1646 | mod->module_init + mod->init_size, |
1625 | set_memory_rw((unsigned long)mod->module_init, total_pages); | 1647 | set_memory_x); |
1626 | } | 1648 | set_page_attributes(mod->module_init, |
1649 | mod->module_init + mod->init_ro_size, | ||
1650 | set_memory_rw); | ||
1627 | } | 1651 | } |
1628 | 1652 | ||
1629 | /* Iterate through all modules and set each module's text as RW */ | 1653 | /* Iterate through all modules and set each module's text as RW */ |
1630 | void set_all_modules_text_rw() | 1654 | void set_all_modules_text_rw(void) |
1631 | { | 1655 | { |
1632 | struct module *mod; | 1656 | struct module *mod; |
1633 | 1657 | ||
@@ -1648,7 +1672,7 @@ void set_all_modules_text_rw() | |||
1648 | } | 1672 | } |
1649 | 1673 | ||
1650 | /* Iterate through all modules and set each module's text as RO */ | 1674 | /* Iterate through all modules and set each module's text as RO */ |
1651 | void set_all_modules_text_ro() | 1675 | void set_all_modules_text_ro(void) |
1652 | { | 1676 | { |
1653 | struct module *mod; | 1677 | struct module *mod; |
1654 | 1678 | ||
@@ -1669,7 +1693,8 @@ void set_all_modules_text_ro() | |||
1669 | } | 1693 | } |
1670 | #else | 1694 | #else |
1671 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } | 1695 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } |
1672 | static inline void unset_section_ro_nx(struct module *mod, void *module_region) { } | 1696 | static void unset_module_core_ro_nx(struct module *mod) { } |
1697 | static void unset_module_init_ro_nx(struct module *mod) { } | ||
1673 | #endif | 1698 | #endif |
1674 | 1699 | ||
1675 | /* Free a module, remove from lists, etc. */ | 1700 | /* Free a module, remove from lists, etc. */ |
@@ -1696,7 +1721,7 @@ static void free_module(struct module *mod) | |||
1696 | destroy_params(mod->kp, mod->num_kp); | 1721 | destroy_params(mod->kp, mod->num_kp); |
1697 | 1722 | ||
1698 | /* This may be NULL, but that's OK */ | 1723 | /* This may be NULL, but that's OK */ |
1699 | unset_section_ro_nx(mod, mod->module_init); | 1724 | unset_module_init_ro_nx(mod); |
1700 | module_free(mod, mod->module_init); | 1725 | module_free(mod, mod->module_init); |
1701 | kfree(mod->args); | 1726 | kfree(mod->args); |
1702 | percpu_modfree(mod); | 1727 | percpu_modfree(mod); |
@@ -1705,7 +1730,7 @@ static void free_module(struct module *mod) | |||
1705 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1730 | lockdep_free_key_range(mod->module_core, mod->core_size); |
1706 | 1731 | ||
1707 | /* Finally, free the core (containing the module structure) */ | 1732 | /* Finally, free the core (containing the module structure) */ |
1708 | unset_section_ro_nx(mod, mod->module_core); | 1733 | unset_module_core_ro_nx(mod); |
1709 | module_free(mod, mod->module_core); | 1734 | module_free(mod, mod->module_core); |
1710 | 1735 | ||
1711 | #ifdef CONFIG_MPU | 1736 | #ifdef CONFIG_MPU |
@@ -2030,11 +2055,8 @@ static const struct kernel_symbol *lookup_symbol(const char *name, | |||
2030 | const struct kernel_symbol *start, | 2055 | const struct kernel_symbol *start, |
2031 | const struct kernel_symbol *stop) | 2056 | const struct kernel_symbol *stop) |
2032 | { | 2057 | { |
2033 | const struct kernel_symbol *ks = start; | 2058 | return bsearch(name, start, stop - start, |
2034 | for (; ks < stop; ks++) | 2059 | sizeof(struct kernel_symbol), cmp_name); |
2035 | if (strcmp(ks->name, name) == 0) | ||
2036 | return ks; | ||
2037 | return NULL; | ||
2038 | } | 2060 | } |
2039 | 2061 | ||
2040 | static int is_exported(const char *name, unsigned long value, | 2062 | static int is_exported(const char *name, unsigned long value, |
@@ -2931,10 +2953,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, | |||
2931 | mod->symtab = mod->core_symtab; | 2953 | mod->symtab = mod->core_symtab; |
2932 | mod->strtab = mod->core_strtab; | 2954 | mod->strtab = mod->core_strtab; |
2933 | #endif | 2955 | #endif |
2934 | unset_section_ro_nx(mod, mod->module_init); | 2956 | unset_module_init_ro_nx(mod); |
2935 | module_free(mod, mod->module_init); | 2957 | module_free(mod, mod->module_init); |
2936 | mod->module_init = NULL; | 2958 | mod->module_init = NULL; |
2937 | mod->init_size = 0; | 2959 | mod->init_size = 0; |
2960 | mod->init_ro_size = 0; | ||
2938 | mod->init_text_size = 0; | 2961 | mod->init_text_size = 0; |
2939 | mutex_unlock(&module_mutex); | 2962 | mutex_unlock(&module_mutex); |
2940 | 2963 | ||
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index ec815a960b5d..73da83aff418 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock) | |||
75 | return; | 75 | return; |
76 | 76 | ||
77 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); | 77 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
78 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | 78 | DEBUG_LOCKS_WARN_ON(lock->owner != current); |
79 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 79 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
80 | mutex_clear_owner(lock); | 80 | mutex_clear_owner(lock); |
81 | } | 81 | } |
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index 57d527a16f9d..0799fd3e4cfa 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h | |||
@@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name, | |||
29 | 29 | ||
30 | static inline void mutex_set_owner(struct mutex *lock) | 30 | static inline void mutex_set_owner(struct mutex *lock) |
31 | { | 31 | { |
32 | lock->owner = current_thread_info(); | 32 | lock->owner = current; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void mutex_clear_owner(struct mutex *lock) | 35 | static inline void mutex_clear_owner(struct mutex *lock) |
diff --git a/kernel/mutex.c b/kernel/mutex.c index c4195fa98900..2c938e2337cd 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -160,14 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
160 | */ | 160 | */ |
161 | 161 | ||
162 | for (;;) { | 162 | for (;;) { |
163 | struct thread_info *owner; | 163 | struct task_struct *owner; |
164 | |||
165 | /* | ||
166 | * If we own the BKL, then don't spin. The owner of | ||
167 | * the mutex might be waiting on us to release the BKL. | ||
168 | */ | ||
169 | if (unlikely(current->lock_depth >= 0)) | ||
170 | break; | ||
171 | 164 | ||
172 | /* | 165 | /* |
173 | * If there's an owner, wait for it to either | 166 | * If there's an owner, wait for it to either |
diff --git a/kernel/mutex.h b/kernel/mutex.h index 67578ca48f94..4115fbf83b12 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
20 | static inline void mutex_set_owner(struct mutex *lock) | 20 | static inline void mutex_set_owner(struct mutex *lock) |
21 | { | 21 | { |
22 | lock->owner = current_thread_info(); | 22 | lock->owner = current; |
23 | } | 23 | } |
24 | 24 | ||
25 | static inline void mutex_clear_owner(struct mutex *lock) | 25 | static inline void mutex_clear_owner(struct mutex *lock) |
diff --git a/kernel/params.c b/kernel/params.c index 7ab388a48a2e..ed72e1330862 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -297,21 +297,15 @@ EXPORT_SYMBOL(param_ops_charp); | |||
297 | int param_set_bool(const char *val, const struct kernel_param *kp) | 297 | int param_set_bool(const char *val, const struct kernel_param *kp) |
298 | { | 298 | { |
299 | bool v; | 299 | bool v; |
300 | int ret; | ||
300 | 301 | ||
301 | /* No equals means "set"... */ | 302 | /* No equals means "set"... */ |
302 | if (!val) val = "1"; | 303 | if (!val) val = "1"; |
303 | 304 | ||
304 | /* One of =[yYnN01] */ | 305 | /* One of =[yYnN01] */ |
305 | switch (val[0]) { | 306 | ret = strtobool(val, &v); |
306 | case 'y': case 'Y': case '1': | 307 | if (ret) |
307 | v = true; | 308 | return ret; |
308 | break; | ||
309 | case 'n': case 'N': case '0': | ||
310 | v = false; | ||
311 | break; | ||
312 | default: | ||
313 | return -EINVAL; | ||
314 | } | ||
315 | 309 | ||
316 | if (kp->flags & KPARAM_ISBOOL) | 310 | if (kp->flags & KPARAM_ISBOOL) |
317 | *(bool *)kp->arg = v; | 311 | *(bool *)kp->arg = v; |
@@ -821,15 +815,18 @@ ssize_t __modver_version_show(struct module_attribute *mattr, | |||
821 | return sprintf(buf, "%s\n", vattr->version); | 815 | return sprintf(buf, "%s\n", vattr->version); |
822 | } | 816 | } |
823 | 817 | ||
824 | extern struct module_version_attribute __start___modver[], __stop___modver[]; | 818 | extern const struct module_version_attribute *__start___modver[]; |
819 | extern const struct module_version_attribute *__stop___modver[]; | ||
825 | 820 | ||
826 | static void __init version_sysfs_builtin(void) | 821 | static void __init version_sysfs_builtin(void) |
827 | { | 822 | { |
828 | const struct module_version_attribute *vattr; | 823 | const struct module_version_attribute **p; |
829 | struct module_kobject *mk; | 824 | struct module_kobject *mk; |
830 | int err; | 825 | int err; |
831 | 826 | ||
832 | for (vattr = __start___modver; vattr < __stop___modver; vattr++) { | 827 | for (p = __start___modver; p < __stop___modver; p++) { |
828 | const struct module_version_attribute *vattr = *p; | ||
829 | |||
833 | mk = locate_module_kobject(vattr->module_name); | 830 | mk = locate_module_kobject(vattr->module_name); |
834 | if (mk) { | 831 | if (mk) { |
835 | err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); | 832 | err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 6de9a8fc3417..87f4d24b55b0 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -125,12 +125,6 @@ config PM_DEBUG | |||
125 | code. This is helpful when debugging and reporting PM bugs, like | 125 | code. This is helpful when debugging and reporting PM bugs, like |
126 | suspend support. | 126 | suspend support. |
127 | 127 | ||
128 | config PM_VERBOSE | ||
129 | bool "Verbose Power Management debugging" | ||
130 | depends on PM_DEBUG | ||
131 | ---help--- | ||
132 | This option enables verbose messages from the Power Management code. | ||
133 | |||
134 | config PM_ADVANCED_DEBUG | 128 | config PM_ADVANCED_DEBUG |
135 | bool "Extra PM attributes in sysfs for low-level debugging/testing" | 129 | bool "Extra PM attributes in sysfs for low-level debugging/testing" |
136 | depends on PM_DEBUG | 130 | depends on PM_DEBUG |
@@ -229,3 +223,7 @@ config PM_OPP | |||
229 | representing individual voltage domains and provides SOC | 223 | representing individual voltage domains and provides SOC |
230 | implementations a ready to use framework to manage OPPs. | 224 | implementations a ready to use framework to manage OPPs. |
231 | For more information, read <file:Documentation/power/opp.txt> | 225 | For more information, read <file:Documentation/power/opp.txt> |
226 | |||
227 | config PM_RUNTIME_CLK | ||
228 | def_bool y | ||
229 | depends on PM_RUNTIME && HAVE_CLK | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 50aae660174d..f9bec56d8825 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -272,12 +272,7 @@ static int create_image(int platform_mode) | |||
272 | 272 | ||
273 | local_irq_disable(); | 273 | local_irq_disable(); |
274 | 274 | ||
275 | error = sysdev_suspend(PMSG_FREEZE); | 275 | error = syscore_suspend(); |
276 | if (!error) { | ||
277 | error = syscore_suspend(); | ||
278 | if (error) | ||
279 | sysdev_resume(); | ||
280 | } | ||
281 | if (error) { | 276 | if (error) { |
282 | printk(KERN_ERR "PM: Some system devices failed to power down, " | 277 | printk(KERN_ERR "PM: Some system devices failed to power down, " |
283 | "aborting hibernation\n"); | 278 | "aborting hibernation\n"); |
@@ -302,7 +297,6 @@ static int create_image(int platform_mode) | |||
302 | 297 | ||
303 | Power_up: | 298 | Power_up: |
304 | syscore_resume(); | 299 | syscore_resume(); |
305 | sysdev_resume(); | ||
306 | /* NOTE: dpm_resume_noirq() is just a resume() for devices | 300 | /* NOTE: dpm_resume_noirq() is just a resume() for devices |
307 | * that suspended with irqs off ... no overall powerup. | 301 | * that suspended with irqs off ... no overall powerup. |
308 | */ | 302 | */ |
@@ -333,20 +327,25 @@ static int create_image(int platform_mode) | |||
333 | 327 | ||
334 | int hibernation_snapshot(int platform_mode) | 328 | int hibernation_snapshot(int platform_mode) |
335 | { | 329 | { |
330 | pm_message_t msg = PMSG_RECOVER; | ||
336 | int error; | 331 | int error; |
337 | 332 | ||
338 | error = platform_begin(platform_mode); | 333 | error = platform_begin(platform_mode); |
339 | if (error) | 334 | if (error) |
340 | goto Close; | 335 | goto Close; |
341 | 336 | ||
337 | error = dpm_prepare(PMSG_FREEZE); | ||
338 | if (error) | ||
339 | goto Complete_devices; | ||
340 | |||
342 | /* Preallocate image memory before shutting down devices. */ | 341 | /* Preallocate image memory before shutting down devices. */ |
343 | error = hibernate_preallocate_memory(); | 342 | error = hibernate_preallocate_memory(); |
344 | if (error) | 343 | if (error) |
345 | goto Close; | 344 | goto Complete_devices; |
346 | 345 | ||
347 | suspend_console(); | 346 | suspend_console(); |
348 | pm_restrict_gfp_mask(); | 347 | pm_restrict_gfp_mask(); |
349 | error = dpm_suspend_start(PMSG_FREEZE); | 348 | error = dpm_suspend(PMSG_FREEZE); |
350 | if (error) | 349 | if (error) |
351 | goto Recover_platform; | 350 | goto Recover_platform; |
352 | 351 | ||
@@ -364,13 +363,17 @@ int hibernation_snapshot(int platform_mode) | |||
364 | if (error || !in_suspend) | 363 | if (error || !in_suspend) |
365 | swsusp_free(); | 364 | swsusp_free(); |
366 | 365 | ||
367 | dpm_resume_end(in_suspend ? | 366 | msg = in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE; |
368 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 367 | dpm_resume(msg); |
369 | 368 | ||
370 | if (error || !in_suspend) | 369 | if (error || !in_suspend) |
371 | pm_restore_gfp_mask(); | 370 | pm_restore_gfp_mask(); |
372 | 371 | ||
373 | resume_console(); | 372 | resume_console(); |
373 | |||
374 | Complete_devices: | ||
375 | dpm_complete(msg); | ||
376 | |||
374 | Close: | 377 | Close: |
375 | platform_end(platform_mode); | 378 | platform_end(platform_mode); |
376 | return error; | 379 | return error; |
@@ -409,12 +412,7 @@ static int resume_target_kernel(bool platform_mode) | |||
409 | 412 | ||
410 | local_irq_disable(); | 413 | local_irq_disable(); |
411 | 414 | ||
412 | error = sysdev_suspend(PMSG_QUIESCE); | 415 | error = syscore_suspend(); |
413 | if (!error) { | ||
414 | error = syscore_suspend(); | ||
415 | if (error) | ||
416 | sysdev_resume(); | ||
417 | } | ||
418 | if (error) | 416 | if (error) |
419 | goto Enable_irqs; | 417 | goto Enable_irqs; |
420 | 418 | ||
@@ -442,7 +440,6 @@ static int resume_target_kernel(bool platform_mode) | |||
442 | touch_softlockup_watchdog(); | 440 | touch_softlockup_watchdog(); |
443 | 441 | ||
444 | syscore_resume(); | 442 | syscore_resume(); |
445 | sysdev_resume(); | ||
446 | 443 | ||
447 | Enable_irqs: | 444 | Enable_irqs: |
448 | local_irq_enable(); | 445 | local_irq_enable(); |
@@ -528,7 +525,6 @@ int hibernation_platform_enter(void) | |||
528 | goto Platform_finish; | 525 | goto Platform_finish; |
529 | 526 | ||
530 | local_irq_disable(); | 527 | local_irq_disable(); |
531 | sysdev_suspend(PMSG_HIBERNATE); | ||
532 | syscore_suspend(); | 528 | syscore_suspend(); |
533 | if (pm_wakeup_pending()) { | 529 | if (pm_wakeup_pending()) { |
534 | error = -EAGAIN; | 530 | error = -EAGAIN; |
@@ -541,7 +537,6 @@ int hibernation_platform_enter(void) | |||
541 | 537 | ||
542 | Power_up: | 538 | Power_up: |
543 | syscore_resume(); | 539 | syscore_resume(); |
544 | sysdev_resume(); | ||
545 | local_irq_enable(); | 540 | local_irq_enable(); |
546 | enable_nonboot_cpus(); | 541 | enable_nonboot_cpus(); |
547 | 542 | ||
@@ -982,10 +977,33 @@ static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *att | |||
982 | 977 | ||
983 | power_attr(image_size); | 978 | power_attr(image_size); |
984 | 979 | ||
980 | static ssize_t reserved_size_show(struct kobject *kobj, | ||
981 | struct kobj_attribute *attr, char *buf) | ||
982 | { | ||
983 | return sprintf(buf, "%lu\n", reserved_size); | ||
984 | } | ||
985 | |||
986 | static ssize_t reserved_size_store(struct kobject *kobj, | ||
987 | struct kobj_attribute *attr, | ||
988 | const char *buf, size_t n) | ||
989 | { | ||
990 | unsigned long size; | ||
991 | |||
992 | if (sscanf(buf, "%lu", &size) == 1) { | ||
993 | reserved_size = size; | ||
994 | return n; | ||
995 | } | ||
996 | |||
997 | return -EINVAL; | ||
998 | } | ||
999 | |||
1000 | power_attr(reserved_size); | ||
1001 | |||
985 | static struct attribute * g[] = { | 1002 | static struct attribute * g[] = { |
986 | &disk_attr.attr, | 1003 | &disk_attr.attr, |
987 | &resume_attr.attr, | 1004 | &resume_attr.attr, |
988 | &image_size_attr.attr, | 1005 | &image_size_attr.attr, |
1006 | &reserved_size_attr.attr, | ||
989 | NULL, | 1007 | NULL, |
990 | }; | 1008 | }; |
991 | 1009 | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index de9aef8742f4..2981af4ce7cb 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -337,6 +337,7 @@ static int __init pm_init(void) | |||
337 | if (error) | 337 | if (error) |
338 | return error; | 338 | return error; |
339 | hibernate_image_size_init(); | 339 | hibernate_image_size_init(); |
340 | hibernate_reserved_size_init(); | ||
340 | power_kobj = kobject_create_and_add("power", NULL); | 341 | power_kobj = kobject_create_and_add("power", NULL); |
341 | if (!power_kobj) | 342 | if (!power_kobj) |
342 | return -ENOMEM; | 343 | return -ENOMEM; |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 03634be55f62..9a00a0a26280 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -15,6 +15,7 @@ struct swsusp_info { | |||
15 | 15 | ||
16 | #ifdef CONFIG_HIBERNATION | 16 | #ifdef CONFIG_HIBERNATION |
17 | /* kernel/power/snapshot.c */ | 17 | /* kernel/power/snapshot.c */ |
18 | extern void __init hibernate_reserved_size_init(void); | ||
18 | extern void __init hibernate_image_size_init(void); | 19 | extern void __init hibernate_image_size_init(void); |
19 | 20 | ||
20 | #ifdef CONFIG_ARCH_HIBERNATION_HEADER | 21 | #ifdef CONFIG_ARCH_HIBERNATION_HEADER |
@@ -55,6 +56,7 @@ extern int hibernation_platform_enter(void); | |||
55 | 56 | ||
56 | #else /* !CONFIG_HIBERNATION */ | 57 | #else /* !CONFIG_HIBERNATION */ |
57 | 58 | ||
59 | static inline void hibernate_reserved_size_init(void) {} | ||
58 | static inline void hibernate_image_size_init(void) {} | 60 | static inline void hibernate_image_size_init(void) {} |
59 | #endif /* !CONFIG_HIBERNATION */ | 61 | #endif /* !CONFIG_HIBERNATION */ |
60 | 62 | ||
@@ -72,6 +74,8 @@ static struct kobj_attribute _name##_attr = { \ | |||
72 | 74 | ||
73 | /* Preferred image size in bytes (default 500 MB) */ | 75 | /* Preferred image size in bytes (default 500 MB) */ |
74 | extern unsigned long image_size; | 76 | extern unsigned long image_size; |
77 | /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ | ||
78 | extern unsigned long reserved_size; | ||
75 | extern int in_suspend; | 79 | extern int in_suspend; |
76 | extern dev_t swsusp_resume_device; | 80 | extern dev_t swsusp_resume_device; |
77 | extern sector_t swsusp_resume_block; | 81 | extern sector_t swsusp_resume_block; |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index ca0aacc24874..ace55889f702 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -41,16 +41,28 @@ static void swsusp_set_page_forbidden(struct page *); | |||
41 | static void swsusp_unset_page_forbidden(struct page *); | 41 | static void swsusp_unset_page_forbidden(struct page *); |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Number of bytes to reserve for memory allocations made by device drivers | ||
45 | * from their ->freeze() and ->freeze_noirq() callbacks so that they don't | ||
46 | * cause image creation to fail (tunable via /sys/power/reserved_size). | ||
47 | */ | ||
48 | unsigned long reserved_size; | ||
49 | |||
50 | void __init hibernate_reserved_size_init(void) | ||
51 | { | ||
52 | reserved_size = SPARE_PAGES * PAGE_SIZE; | ||
53 | } | ||
54 | |||
55 | /* | ||
44 | * Preferred image size in bytes (tunable via /sys/power/image_size). | 56 | * Preferred image size in bytes (tunable via /sys/power/image_size). |
45 | * When it is set to N, the image creating code will do its best to | 57 | * When it is set to N, swsusp will do its best to ensure the image |
46 | * ensure the image size will not exceed N bytes, but if that is | 58 | * size will not exceed N bytes, but if that is impossible, it will |
47 | * impossible, it will try to create the smallest image possible. | 59 | * try to create the smallest image possible. |
48 | */ | 60 | */ |
49 | unsigned long image_size; | 61 | unsigned long image_size; |
50 | 62 | ||
51 | void __init hibernate_image_size_init(void) | 63 | void __init hibernate_image_size_init(void) |
52 | { | 64 | { |
53 | image_size = (totalram_pages / 3) * PAGE_SIZE; | 65 | image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; |
54 | } | 66 | } |
55 | 67 | ||
56 | /* List of PBEs needed for restoring the pages that were allocated before | 68 | /* List of PBEs needed for restoring the pages that were allocated before |
@@ -1263,11 +1275,13 @@ static unsigned long minimum_image_size(unsigned long saveable) | |||
1263 | * frame in use. We also need a number of page frames to be free during | 1275 | * frame in use. We also need a number of page frames to be free during |
1264 | * hibernation for allocations made while saving the image and for device | 1276 | * hibernation for allocations made while saving the image and for device |
1265 | * drivers, in case they need to allocate memory from their hibernation | 1277 | * drivers, in case they need to allocate memory from their hibernation |
1266 | * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES, | 1278 | * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough |
1267 | * respectively, both of which are rough estimates). To make this happen, we | 1279 | * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through |
1268 | * compute the total number of available page frames and allocate at least | 1280 | * /sys/power/reserved_size, respectively). To make this happen, we compute the |
1281 | * total number of available page frames and allocate at least | ||
1269 | * | 1282 | * |
1270 | * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES | 1283 | * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 |
1284 | * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) | ||
1271 | * | 1285 | * |
1272 | * of them, which corresponds to the maximum size of a hibernation image. | 1286 | * of them, which corresponds to the maximum size of a hibernation image. |
1273 | * | 1287 | * |
@@ -1322,7 +1336,8 @@ int hibernate_preallocate_memory(void) | |||
1322 | count -= totalreserve_pages; | 1336 | count -= totalreserve_pages; |
1323 | 1337 | ||
1324 | /* Compute the maximum number of saveable pages to leave in memory. */ | 1338 | /* Compute the maximum number of saveable pages to leave in memory. */ |
1325 | max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; | 1339 | max_size = (count - (size + PAGES_FOR_IO)) / 2 |
1340 | - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); | ||
1326 | /* Compute the desired number of image pages specified by image_size. */ | 1341 | /* Compute the desired number of image pages specified by image_size. */ |
1327 | size = DIV_ROUND_UP(image_size, PAGE_SIZE); | 1342 | size = DIV_ROUND_UP(image_size, PAGE_SIZE); |
1328 | if (size > max_size) | 1343 | if (size > max_size) |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8935369d503a..1c41ba215419 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -163,19 +163,13 @@ static int suspend_enter(suspend_state_t state) | |||
163 | arch_suspend_disable_irqs(); | 163 | arch_suspend_disable_irqs(); |
164 | BUG_ON(!irqs_disabled()); | 164 | BUG_ON(!irqs_disabled()); |
165 | 165 | ||
166 | error = sysdev_suspend(PMSG_SUSPEND); | 166 | error = syscore_suspend(); |
167 | if (!error) { | ||
168 | error = syscore_suspend(); | ||
169 | if (error) | ||
170 | sysdev_resume(); | ||
171 | } | ||
172 | if (!error) { | 167 | if (!error) { |
173 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { | 168 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { |
174 | error = suspend_ops->enter(state); | 169 | error = suspend_ops->enter(state); |
175 | events_check_enabled = false; | 170 | events_check_enabled = false; |
176 | } | 171 | } |
177 | syscore_resume(); | 172 | syscore_resume(); |
178 | sysdev_resume(); | ||
179 | } | 173 | } |
180 | 174 | ||
181 | arch_suspend_enable_irqs(); | 175 | arch_suspend_enable_irqs(); |
@@ -216,7 +210,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
216 | goto Close; | 210 | goto Close; |
217 | } | 211 | } |
218 | suspend_console(); | 212 | suspend_console(); |
219 | pm_restrict_gfp_mask(); | ||
220 | suspend_test_start(); | 213 | suspend_test_start(); |
221 | error = dpm_suspend_start(PMSG_SUSPEND); | 214 | error = dpm_suspend_start(PMSG_SUSPEND); |
222 | if (error) { | 215 | if (error) { |
@@ -227,13 +220,12 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
227 | if (suspend_test(TEST_DEVICES)) | 220 | if (suspend_test(TEST_DEVICES)) |
228 | goto Recover_platform; | 221 | goto Recover_platform; |
229 | 222 | ||
230 | suspend_enter(state); | 223 | error = suspend_enter(state); |
231 | 224 | ||
232 | Resume_devices: | 225 | Resume_devices: |
233 | suspend_test_start(); | 226 | suspend_test_start(); |
234 | dpm_resume_end(PMSG_RESUME); | 227 | dpm_resume_end(PMSG_RESUME); |
235 | suspend_test_finish("resume devices"); | 228 | suspend_test_finish("resume devices"); |
236 | pm_restore_gfp_mask(); | ||
237 | resume_console(); | 229 | resume_console(); |
238 | Close: | 230 | Close: |
239 | if (suspend_ops->end) | 231 | if (suspend_ops->end) |
@@ -294,7 +286,9 @@ int enter_state(suspend_state_t state) | |||
294 | goto Finish; | 286 | goto Finish; |
295 | 287 | ||
296 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); | 288 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); |
289 | pm_restrict_gfp_mask(); | ||
297 | error = suspend_devices_and_enter(state); | 290 | error = suspend_devices_and_enter(state); |
291 | pm_restore_gfp_mask(); | ||
298 | 292 | ||
299 | Finish: | 293 | Finish: |
300 | pr_debug("PM: Finishing wakeup.\n"); | 294 | pr_debug("PM: Finishing wakeup.\n"); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index c36c3b9e8a84..7d02d33be699 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp) | |||
135 | free_basic_memory_bitmaps(); | 135 | free_basic_memory_bitmaps(); |
136 | data = filp->private_data; | 136 | data = filp->private_data; |
137 | free_all_swap_pages(data->swap); | 137 | free_all_swap_pages(data->swap); |
138 | if (data->frozen) | 138 | if (data->frozen) { |
139 | pm_restore_gfp_mask(); | ||
139 | thaw_processes(); | 140 | thaw_processes(); |
141 | } | ||
140 | pm_notifier_call_chain(data->mode == O_RDONLY ? | 142 | pm_notifier_call_chain(data->mode == O_RDONLY ? |
141 | PM_POST_HIBERNATION : PM_POST_RESTORE); | 143 | PM_POST_HIBERNATION : PM_POST_RESTORE); |
142 | atomic_inc(&snapshot_device_available); | 144 | atomic_inc(&snapshot_device_available); |
@@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
379 | * PM_HIBERNATION_PREPARE | 381 | * PM_HIBERNATION_PREPARE |
380 | */ | 382 | */ |
381 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); | 383 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); |
384 | data->ready = 0; | ||
382 | break; | 385 | break; |
383 | 386 | ||
384 | case SNAPSHOT_PLATFORM_SUPPORT: | 387 | case SNAPSHOT_PLATFORM_SUPPORT: |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f3240e987928..7784bd216b6a 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -142,10 +142,17 @@ static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) | |||
142 | * Ensure that queued callbacks are all executed. | 142 | * Ensure that queued callbacks are all executed. |
143 | * If we detect that we are nested in a RCU read-side critical | 143 | * If we detect that we are nested in a RCU read-side critical |
144 | * section, we should simply fail, otherwise we would deadlock. | 144 | * section, we should simply fail, otherwise we would deadlock. |
145 | * In !PREEMPT configurations, there is no way to tell if we are | ||
146 | * in a RCU read-side critical section or not, so we never | ||
147 | * attempt any fixup and just print a warning. | ||
145 | */ | 148 | */ |
149 | #ifndef CONFIG_PREEMPT | ||
150 | WARN_ON_ONCE(1); | ||
151 | return 0; | ||
152 | #endif | ||
146 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || | 153 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
147 | irqs_disabled()) { | 154 | irqs_disabled()) { |
148 | WARN_ON(1); | 155 | WARN_ON_ONCE(1); |
149 | return 0; | 156 | return 0; |
150 | } | 157 | } |
151 | rcu_barrier(); | 158 | rcu_barrier(); |
@@ -184,10 +191,17 @@ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) | |||
184 | * Ensure that queued callbacks are all executed. | 191 | * Ensure that queued callbacks are all executed. |
185 | * If we detect that we are nested in a RCU read-side critical | 192 | * If we detect that we are nested in a RCU read-side critical |
186 | * section, we should simply fail, otherwise we would deadlock. | 193 | * section, we should simply fail, otherwise we would deadlock. |
194 | * In !PREEMPT configurations, there is no way to tell if we are | ||
195 | * in a RCU read-side critical section or not, so we never | ||
196 | * attempt any fixup and just print a warning. | ||
187 | */ | 197 | */ |
198 | #ifndef CONFIG_PREEMPT | ||
199 | WARN_ON_ONCE(1); | ||
200 | return 0; | ||
201 | #endif | ||
188 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || | 202 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
189 | irqs_disabled()) { | 203 | irqs_disabled()) { |
190 | WARN_ON(1); | 204 | WARN_ON_ONCE(1); |
191 | return 0; | 205 | return 0; |
192 | } | 206 | } |
193 | rcu_barrier(); | 207 | rcu_barrier(); |
@@ -214,15 +228,17 @@ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) | |||
214 | * Ensure that queued callbacks are all executed. | 228 | * Ensure that queued callbacks are all executed. |
215 | * If we detect that we are nested in a RCU read-side critical | 229 | * If we detect that we are nested in a RCU read-side critical |
216 | * section, we should simply fail, otherwise we would deadlock. | 230 | * section, we should simply fail, otherwise we would deadlock. |
217 | * Note that the machinery to reliably determine whether | 231 | * In !PREEMPT configurations, there is no way to tell if we are |
218 | * or not we are in an RCU read-side critical section | 232 | * in a RCU read-side critical section or not, so we never |
219 | * exists only in the preemptible RCU implementations | 233 | * attempt any fixup and just print a warning. |
220 | * (TINY_PREEMPT_RCU and TREE_PREEMPT_RCU), which is why | ||
221 | * DEBUG_OBJECTS_RCU_HEAD is disallowed if !PREEMPT. | ||
222 | */ | 234 | */ |
235 | #ifndef CONFIG_PREEMPT | ||
236 | WARN_ON_ONCE(1); | ||
237 | return 0; | ||
238 | #endif | ||
223 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || | 239 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
224 | irqs_disabled()) { | 240 | irqs_disabled()) { |
225 | WARN_ON(1); | 241 | WARN_ON_ONCE(1); |
226 | return 0; | 242 | return 0; |
227 | } | 243 | } |
228 | rcu_barrier(); | 244 | rcu_barrier(); |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 0c343b9a46d5..421abfd3641d 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -40,10 +40,10 @@ | |||
40 | static struct task_struct *rcu_kthread_task; | 40 | static struct task_struct *rcu_kthread_task; |
41 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); | 41 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); |
42 | static unsigned long have_rcu_kthread_work; | 42 | static unsigned long have_rcu_kthread_work; |
43 | static void invoke_rcu_kthread(void); | ||
44 | 43 | ||
45 | /* Forward declarations for rcutiny_plugin.h. */ | 44 | /* Forward declarations for rcutiny_plugin.h. */ |
46 | struct rcu_ctrlblk; | 45 | struct rcu_ctrlblk; |
46 | static void invoke_rcu_kthread(void); | ||
47 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); | 47 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
48 | static int rcu_kthread(void *arg); | 48 | static int rcu_kthread(void *arg); |
49 | static void __call_rcu(struct rcu_head *head, | 49 | static void __call_rcu(struct rcu_head *head, |
@@ -79,36 +79,45 @@ void rcu_exit_nohz(void) | |||
79 | #endif /* #ifdef CONFIG_NO_HZ */ | 79 | #endif /* #ifdef CONFIG_NO_HZ */ |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). | 82 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
83 | * Also disable irqs to avoid confusion due to interrupt handlers | 83 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
84 | * invoking call_rcu(). | 84 | * invoking call_rcu(). |
85 | */ | 85 | */ |
86 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | 86 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
87 | { | 87 | { |
88 | unsigned long flags; | ||
89 | |||
90 | local_irq_save(flags); | ||
91 | if (rcp->rcucblist != NULL && | 88 | if (rcp->rcucblist != NULL && |
92 | rcp->donetail != rcp->curtail) { | 89 | rcp->donetail != rcp->curtail) { |
93 | rcp->donetail = rcp->curtail; | 90 | rcp->donetail = rcp->curtail; |
94 | local_irq_restore(flags); | ||
95 | return 1; | 91 | return 1; |
96 | } | 92 | } |
97 | local_irq_restore(flags); | ||
98 | 93 | ||
99 | return 0; | 94 | return 0; |
100 | } | 95 | } |
101 | 96 | ||
102 | /* | 97 | /* |
98 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | ||
99 | * or to boost readers. | ||
100 | */ | ||
101 | static void invoke_rcu_kthread(void) | ||
102 | { | ||
103 | have_rcu_kthread_work = 1; | ||
104 | wake_up(&rcu_kthread_wq); | ||
105 | } | ||
106 | |||
107 | /* | ||
103 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | 108 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
104 | * are at it, given that any rcu quiescent state is also an rcu_bh | 109 | * are at it, given that any rcu quiescent state is also an rcu_bh |
105 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | 110 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
106 | */ | 111 | */ |
107 | void rcu_sched_qs(int cpu) | 112 | void rcu_sched_qs(int cpu) |
108 | { | 113 | { |
114 | unsigned long flags; | ||
115 | |||
116 | local_irq_save(flags); | ||
109 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + | 117 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
110 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | 118 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
111 | invoke_rcu_kthread(); | 119 | invoke_rcu_kthread(); |
120 | local_irq_restore(flags); | ||
112 | } | 121 | } |
113 | 122 | ||
114 | /* | 123 | /* |
@@ -116,8 +125,12 @@ void rcu_sched_qs(int cpu) | |||
116 | */ | 125 | */ |
117 | void rcu_bh_qs(int cpu) | 126 | void rcu_bh_qs(int cpu) |
118 | { | 127 | { |
128 | unsigned long flags; | ||
129 | |||
130 | local_irq_save(flags); | ||
119 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | 131 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
120 | invoke_rcu_kthread(); | 132 | invoke_rcu_kthread(); |
133 | local_irq_restore(flags); | ||
121 | } | 134 | } |
122 | 135 | ||
123 | /* | 136 | /* |
@@ -167,7 +180,7 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
167 | prefetch(next); | 180 | prefetch(next); |
168 | debug_rcu_head_unqueue(list); | 181 | debug_rcu_head_unqueue(list); |
169 | local_bh_disable(); | 182 | local_bh_disable(); |
170 | list->func(list); | 183 | __rcu_reclaim(list); |
171 | local_bh_enable(); | 184 | local_bh_enable(); |
172 | list = next; | 185 | list = next; |
173 | RCU_TRACE(cb_count++); | 186 | RCU_TRACE(cb_count++); |
@@ -208,20 +221,6 @@ static int rcu_kthread(void *arg) | |||
208 | } | 221 | } |
209 | 222 | ||
210 | /* | 223 | /* |
211 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | ||
212 | * or to boost readers. | ||
213 | */ | ||
214 | static void invoke_rcu_kthread(void) | ||
215 | { | ||
216 | unsigned long flags; | ||
217 | |||
218 | local_irq_save(flags); | ||
219 | have_rcu_kthread_work = 1; | ||
220 | wake_up(&rcu_kthread_wq); | ||
221 | local_irq_restore(flags); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Wait for a grace period to elapse. But it is illegal to invoke | 224 | * Wait for a grace period to elapse. But it is illegal to invoke |
226 | * synchronize_sched() from within an RCU read-side critical section. | 225 | * synchronize_sched() from within an RCU read-side critical section. |
227 | * Therefore, any legal call to synchronize_sched() is a quiescent | 226 | * Therefore, any legal call to synchronize_sched() is a quiescent |
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 3cb8e362e883..f259c676195f 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -100,23 +100,28 @@ struct rcu_preempt_ctrlblk { | |||
100 | u8 completed; /* Last grace period completed. */ | 100 | u8 completed; /* Last grace period completed. */ |
101 | /* If all three are equal, RCU is idle. */ | 101 | /* If all three are equal, RCU is idle. */ |
102 | #ifdef CONFIG_RCU_BOOST | 102 | #ifdef CONFIG_RCU_BOOST |
103 | s8 boosted_this_gp; /* Has boosting already happened? */ | ||
104 | unsigned long boost_time; /* When to start boosting (jiffies) */ | 103 | unsigned long boost_time; /* When to start boosting (jiffies) */ |
105 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 104 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
106 | #ifdef CONFIG_RCU_TRACE | 105 | #ifdef CONFIG_RCU_TRACE |
107 | unsigned long n_grace_periods; | 106 | unsigned long n_grace_periods; |
108 | #ifdef CONFIG_RCU_BOOST | 107 | #ifdef CONFIG_RCU_BOOST |
109 | unsigned long n_tasks_boosted; | 108 | unsigned long n_tasks_boosted; |
109 | /* Total number of tasks boosted. */ | ||
110 | unsigned long n_exp_boosts; | 110 | unsigned long n_exp_boosts; |
111 | /* Number of tasks boosted for expedited GP. */ | ||
111 | unsigned long n_normal_boosts; | 112 | unsigned long n_normal_boosts; |
112 | unsigned long n_normal_balk_blkd_tasks; | 113 | /* Number of tasks boosted for normal GP. */ |
113 | unsigned long n_normal_balk_gp_tasks; | 114 | unsigned long n_balk_blkd_tasks; |
114 | unsigned long n_normal_balk_boost_tasks; | 115 | /* Refused to boost: no blocked tasks. */ |
115 | unsigned long n_normal_balk_boosted; | 116 | unsigned long n_balk_exp_gp_tasks; |
116 | unsigned long n_normal_balk_notyet; | 117 | /* Refused to boost: nothing blocking GP. */ |
117 | unsigned long n_normal_balk_nos; | 118 | unsigned long n_balk_boost_tasks; |
118 | unsigned long n_exp_balk_blkd_tasks; | 119 | /* Refused to boost: already boosting. */ |
119 | unsigned long n_exp_balk_nos; | 120 | unsigned long n_balk_notyet; |
121 | /* Refused to boost: not yet time. */ | ||
122 | unsigned long n_balk_nos; | ||
123 | /* Refused to boost: not sure why, though. */ | ||
124 | /* This can happen due to race conditions. */ | ||
120 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 125 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
121 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 126 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
122 | }; | 127 | }; |
@@ -201,7 +206,6 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t) | |||
201 | 206 | ||
202 | #ifdef CONFIG_RCU_BOOST | 207 | #ifdef CONFIG_RCU_BOOST |
203 | static void rcu_initiate_boost_trace(void); | 208 | static void rcu_initiate_boost_trace(void); |
204 | static void rcu_initiate_exp_boost_trace(void); | ||
205 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 209 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
206 | 210 | ||
207 | /* | 211 | /* |
@@ -219,41 +223,21 @@ static void show_tiny_preempt_stats(struct seq_file *m) | |||
219 | "N."[!rcu_preempt_ctrlblk.gp_tasks], | 223 | "N."[!rcu_preempt_ctrlblk.gp_tasks], |
220 | "E."[!rcu_preempt_ctrlblk.exp_tasks]); | 224 | "E."[!rcu_preempt_ctrlblk.exp_tasks]); |
221 | #ifdef CONFIG_RCU_BOOST | 225 | #ifdef CONFIG_RCU_BOOST |
222 | seq_printf(m, " ttb=%c btg=", | 226 | seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n", |
223 | "B."[!rcu_preempt_ctrlblk.boost_tasks]); | 227 | " ", |
224 | switch (rcu_preempt_ctrlblk.boosted_this_gp) { | 228 | "B."[!rcu_preempt_ctrlblk.boost_tasks], |
225 | case -1: | ||
226 | seq_puts(m, "exp"); | ||
227 | break; | ||
228 | case 0: | ||
229 | seq_puts(m, "no"); | ||
230 | break; | ||
231 | case 1: | ||
232 | seq_puts(m, "begun"); | ||
233 | break; | ||
234 | case 2: | ||
235 | seq_puts(m, "done"); | ||
236 | break; | ||
237 | default: | ||
238 | seq_printf(m, "?%d?", rcu_preempt_ctrlblk.boosted_this_gp); | ||
239 | } | ||
240 | seq_printf(m, " ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n", | ||
241 | rcu_preempt_ctrlblk.n_tasks_boosted, | 229 | rcu_preempt_ctrlblk.n_tasks_boosted, |
242 | rcu_preempt_ctrlblk.n_exp_boosts, | 230 | rcu_preempt_ctrlblk.n_exp_boosts, |
243 | rcu_preempt_ctrlblk.n_normal_boosts, | 231 | rcu_preempt_ctrlblk.n_normal_boosts, |
244 | (int)(jiffies & 0xffff), | 232 | (int)(jiffies & 0xffff), |
245 | (int)(rcu_preempt_ctrlblk.boost_time & 0xffff)); | 233 | (int)(rcu_preempt_ctrlblk.boost_time & 0xffff)); |
246 | seq_printf(m, " %s: nt=%lu gt=%lu bt=%lu b=%lu ny=%lu nos=%lu\n", | 234 | seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n", |
247 | "normal balk", | 235 | " balk", |
248 | rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks, | 236 | rcu_preempt_ctrlblk.n_balk_blkd_tasks, |
249 | rcu_preempt_ctrlblk.n_normal_balk_gp_tasks, | 237 | rcu_preempt_ctrlblk.n_balk_exp_gp_tasks, |
250 | rcu_preempt_ctrlblk.n_normal_balk_boost_tasks, | 238 | rcu_preempt_ctrlblk.n_balk_boost_tasks, |
251 | rcu_preempt_ctrlblk.n_normal_balk_boosted, | 239 | rcu_preempt_ctrlblk.n_balk_notyet, |
252 | rcu_preempt_ctrlblk.n_normal_balk_notyet, | 240 | rcu_preempt_ctrlblk.n_balk_nos); |
253 | rcu_preempt_ctrlblk.n_normal_balk_nos); | ||
254 | seq_printf(m, " exp balk: bt=%lu nos=%lu\n", | ||
255 | rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks, | ||
256 | rcu_preempt_ctrlblk.n_exp_balk_nos); | ||
257 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 241 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
258 | } | 242 | } |
259 | 243 | ||
@@ -271,25 +255,59 @@ static int rcu_boost(void) | |||
271 | { | 255 | { |
272 | unsigned long flags; | 256 | unsigned long flags; |
273 | struct rt_mutex mtx; | 257 | struct rt_mutex mtx; |
274 | struct list_head *np; | ||
275 | struct task_struct *t; | 258 | struct task_struct *t; |
259 | struct list_head *tb; | ||
276 | 260 | ||
277 | if (rcu_preempt_ctrlblk.boost_tasks == NULL) | 261 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && |
262 | rcu_preempt_ctrlblk.exp_tasks == NULL) | ||
278 | return 0; /* Nothing to boost. */ | 263 | return 0; /* Nothing to boost. */ |
264 | |||
279 | raw_local_irq_save(flags); | 265 | raw_local_irq_save(flags); |
280 | rcu_preempt_ctrlblk.boosted_this_gp++; | 266 | |
281 | t = container_of(rcu_preempt_ctrlblk.boost_tasks, struct task_struct, | 267 | /* |
282 | rcu_node_entry); | 268 | * Recheck with irqs disabled: all tasks in need of boosting |
283 | np = rcu_next_node_entry(t); | 269 | * might exit their RCU read-side critical sections on their own |
270 | * if we are preempted just before disabling irqs. | ||
271 | */ | ||
272 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && | ||
273 | rcu_preempt_ctrlblk.exp_tasks == NULL) { | ||
274 | raw_local_irq_restore(flags); | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Preferentially boost tasks blocking expedited grace periods. | ||
280 | * This cannot starve the normal grace periods because a second | ||
281 | * expedited grace period must boost all blocked tasks, including | ||
282 | * those blocking the pre-existing normal grace period. | ||
283 | */ | ||
284 | if (rcu_preempt_ctrlblk.exp_tasks != NULL) { | ||
285 | tb = rcu_preempt_ctrlblk.exp_tasks; | ||
286 | RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++); | ||
287 | } else { | ||
288 | tb = rcu_preempt_ctrlblk.boost_tasks; | ||
289 | RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++); | ||
290 | } | ||
291 | RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++); | ||
292 | |||
293 | /* | ||
294 | * We boost task t by manufacturing an rt_mutex that appears to | ||
295 | * be held by task t. We leave a pointer to that rt_mutex where | ||
296 | * task t can find it, and task t will release the mutex when it | ||
297 | * exits its outermost RCU read-side critical section. Then | ||
298 | * simply acquiring this artificial rt_mutex will boost task | ||
299 | * t's priority. (Thanks to tglx for suggesting this approach!) | ||
300 | */ | ||
301 | t = container_of(tb, struct task_struct, rcu_node_entry); | ||
284 | rt_mutex_init_proxy_locked(&mtx, t); | 302 | rt_mutex_init_proxy_locked(&mtx, t); |
285 | t->rcu_boost_mutex = &mtx; | 303 | t->rcu_boost_mutex = &mtx; |
286 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; | 304 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; |
287 | raw_local_irq_restore(flags); | 305 | raw_local_irq_restore(flags); |
288 | rt_mutex_lock(&mtx); | 306 | rt_mutex_lock(&mtx); |
289 | RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++); | 307 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ |
290 | rcu_preempt_ctrlblk.boosted_this_gp++; | 308 | |
291 | rt_mutex_unlock(&mtx); | 309 | return rcu_preempt_ctrlblk.boost_tasks != NULL || |
292 | return rcu_preempt_ctrlblk.boost_tasks != NULL; | 310 | rcu_preempt_ctrlblk.exp_tasks != NULL; |
293 | } | 311 | } |
294 | 312 | ||
295 | /* | 313 | /* |
@@ -304,42 +322,25 @@ static int rcu_boost(void) | |||
304 | */ | 322 | */ |
305 | static int rcu_initiate_boost(void) | 323 | static int rcu_initiate_boost(void) |
306 | { | 324 | { |
307 | if (!rcu_preempt_blocked_readers_cgp()) { | 325 | if (!rcu_preempt_blocked_readers_cgp() && |
308 | RCU_TRACE(rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks++); | 326 | rcu_preempt_ctrlblk.exp_tasks == NULL) { |
327 | RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++); | ||
309 | return 0; | 328 | return 0; |
310 | } | 329 | } |
311 | if (rcu_preempt_ctrlblk.gp_tasks != NULL && | 330 | if (rcu_preempt_ctrlblk.exp_tasks != NULL || |
312 | rcu_preempt_ctrlblk.boost_tasks == NULL && | 331 | (rcu_preempt_ctrlblk.gp_tasks != NULL && |
313 | rcu_preempt_ctrlblk.boosted_this_gp == 0 && | 332 | rcu_preempt_ctrlblk.boost_tasks == NULL && |
314 | ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) { | 333 | ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) { |
315 | rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks; | 334 | if (rcu_preempt_ctrlblk.exp_tasks == NULL) |
335 | rcu_preempt_ctrlblk.boost_tasks = | ||
336 | rcu_preempt_ctrlblk.gp_tasks; | ||
316 | invoke_rcu_kthread(); | 337 | invoke_rcu_kthread(); |
317 | RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++); | ||
318 | } else | 338 | } else |
319 | RCU_TRACE(rcu_initiate_boost_trace()); | 339 | RCU_TRACE(rcu_initiate_boost_trace()); |
320 | return 1; | 340 | return 1; |
321 | } | 341 | } |
322 | 342 | ||
323 | /* | 343 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) |
324 | * Initiate boosting for an expedited grace period. | ||
325 | */ | ||
326 | static void rcu_initiate_expedited_boost(void) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | |||
330 | raw_local_irq_save(flags); | ||
331 | if (!list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) { | ||
332 | rcu_preempt_ctrlblk.boost_tasks = | ||
333 | rcu_preempt_ctrlblk.blkd_tasks.next; | ||
334 | rcu_preempt_ctrlblk.boosted_this_gp = -1; | ||
335 | invoke_rcu_kthread(); | ||
336 | RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++); | ||
337 | } else | ||
338 | RCU_TRACE(rcu_initiate_exp_boost_trace()); | ||
339 | raw_local_irq_restore(flags); | ||
340 | } | ||
341 | |||
342 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000); | ||
343 | 344 | ||
344 | /* | 345 | /* |
345 | * Do priority-boost accounting for the start of a new grace period. | 346 | * Do priority-boost accounting for the start of a new grace period. |
@@ -347,8 +348,6 @@ static void rcu_initiate_expedited_boost(void) | |||
347 | static void rcu_preempt_boost_start_gp(void) | 348 | static void rcu_preempt_boost_start_gp(void) |
348 | { | 349 | { |
349 | rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; | 350 | rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; |
350 | if (rcu_preempt_ctrlblk.boosted_this_gp > 0) | ||
351 | rcu_preempt_ctrlblk.boosted_this_gp = 0; | ||
352 | } | 351 | } |
353 | 352 | ||
354 | #else /* #ifdef CONFIG_RCU_BOOST */ | 353 | #else /* #ifdef CONFIG_RCU_BOOST */ |
@@ -372,13 +371,6 @@ static int rcu_initiate_boost(void) | |||
372 | } | 371 | } |
373 | 372 | ||
374 | /* | 373 | /* |
375 | * If there is no RCU priority boosting, we don't initiate expedited boosting. | ||
376 | */ | ||
377 | static void rcu_initiate_expedited_boost(void) | ||
378 | { | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * If there is no RCU priority boosting, nothing to do at grace-period start. | 374 | * If there is no RCU priority boosting, nothing to do at grace-period start. |
383 | */ | 375 | */ |
384 | static void rcu_preempt_boost_start_gp(void) | 376 | static void rcu_preempt_boost_start_gp(void) |
@@ -418,7 +410,7 @@ static void rcu_preempt_cpu_qs(void) | |||
418 | if (!rcu_preempt_gp_in_progress()) | 410 | if (!rcu_preempt_gp_in_progress()) |
419 | return; | 411 | return; |
420 | /* | 412 | /* |
421 | * Check up on boosting. If there are no readers blocking the | 413 | * Check up on boosting. If there are readers blocking the |
422 | * current grace period, leave. | 414 | * current grace period, leave. |
423 | */ | 415 | */ |
424 | if (rcu_initiate_boost()) | 416 | if (rcu_initiate_boost()) |
@@ -578,7 +570,7 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
578 | empty = !rcu_preempt_blocked_readers_cgp(); | 570 | empty = !rcu_preempt_blocked_readers_cgp(); |
579 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; | 571 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; |
580 | np = rcu_next_node_entry(t); | 572 | np = rcu_next_node_entry(t); |
581 | list_del(&t->rcu_node_entry); | 573 | list_del_init(&t->rcu_node_entry); |
582 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) | 574 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) |
583 | rcu_preempt_ctrlblk.gp_tasks = np; | 575 | rcu_preempt_ctrlblk.gp_tasks = np; |
584 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) | 576 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) |
@@ -587,7 +579,6 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
587 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks) | 579 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks) |
588 | rcu_preempt_ctrlblk.boost_tasks = np; | 580 | rcu_preempt_ctrlblk.boost_tasks = np; |
589 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 581 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
590 | INIT_LIST_HEAD(&t->rcu_node_entry); | ||
591 | 582 | ||
592 | /* | 583 | /* |
593 | * If this was the last task on the current list, and if | 584 | * If this was the last task on the current list, and if |
@@ -812,13 +803,16 @@ void synchronize_rcu_expedited(void) | |||
812 | rpcp->exp_tasks = rpcp->blkd_tasks.next; | 803 | rpcp->exp_tasks = rpcp->blkd_tasks.next; |
813 | if (rpcp->exp_tasks == &rpcp->blkd_tasks) | 804 | if (rpcp->exp_tasks == &rpcp->blkd_tasks) |
814 | rpcp->exp_tasks = NULL; | 805 | rpcp->exp_tasks = NULL; |
815 | local_irq_restore(flags); | ||
816 | 806 | ||
817 | /* Wait for tail of ->blkd_tasks list to drain. */ | 807 | /* Wait for tail of ->blkd_tasks list to drain. */ |
818 | if (rcu_preempted_readers_exp()) | 808 | if (!rcu_preempted_readers_exp()) |
819 | rcu_initiate_expedited_boost(); | 809 | local_irq_restore(flags); |
810 | else { | ||
811 | rcu_initiate_boost(); | ||
812 | local_irq_restore(flags); | ||
820 | wait_event(sync_rcu_preempt_exp_wq, | 813 | wait_event(sync_rcu_preempt_exp_wq, |
821 | !rcu_preempted_readers_exp()); | 814 | !rcu_preempted_readers_exp()); |
815 | } | ||
822 | 816 | ||
823 | /* Clean up and exit. */ | 817 | /* Clean up and exit. */ |
824 | barrier(); /* ensure expedited GP seen before counter increment. */ | 818 | barrier(); /* ensure expedited GP seen before counter increment. */ |
@@ -931,24 +925,17 @@ void __init rcu_scheduler_starting(void) | |||
931 | 925 | ||
932 | static void rcu_initiate_boost_trace(void) | 926 | static void rcu_initiate_boost_trace(void) |
933 | { | 927 | { |
934 | if (rcu_preempt_ctrlblk.gp_tasks == NULL) | 928 | if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) |
935 | rcu_preempt_ctrlblk.n_normal_balk_gp_tasks++; | 929 | rcu_preempt_ctrlblk.n_balk_blkd_tasks++; |
930 | else if (rcu_preempt_ctrlblk.gp_tasks == NULL && | ||
931 | rcu_preempt_ctrlblk.exp_tasks == NULL) | ||
932 | rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++; | ||
936 | else if (rcu_preempt_ctrlblk.boost_tasks != NULL) | 933 | else if (rcu_preempt_ctrlblk.boost_tasks != NULL) |
937 | rcu_preempt_ctrlblk.n_normal_balk_boost_tasks++; | 934 | rcu_preempt_ctrlblk.n_balk_boost_tasks++; |
938 | else if (rcu_preempt_ctrlblk.boosted_this_gp != 0) | ||
939 | rcu_preempt_ctrlblk.n_normal_balk_boosted++; | ||
940 | else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) | 935 | else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) |
941 | rcu_preempt_ctrlblk.n_normal_balk_notyet++; | 936 | rcu_preempt_ctrlblk.n_balk_notyet++; |
942 | else | ||
943 | rcu_preempt_ctrlblk.n_normal_balk_nos++; | ||
944 | } | ||
945 | |||
946 | static void rcu_initiate_exp_boost_trace(void) | ||
947 | { | ||
948 | if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) | ||
949 | rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks++; | ||
950 | else | 937 | else |
951 | rcu_preempt_ctrlblk.n_exp_balk_nos++; | 938 | rcu_preempt_ctrlblk.n_balk_nos++; |
952 | } | 939 | } |
953 | 940 | ||
954 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 941 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index c224da41890c..2e138db03382 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -131,7 +131,7 @@ struct rcu_torture { | |||
131 | 131 | ||
132 | static LIST_HEAD(rcu_torture_freelist); | 132 | static LIST_HEAD(rcu_torture_freelist); |
133 | static struct rcu_torture __rcu *rcu_torture_current; | 133 | static struct rcu_torture __rcu *rcu_torture_current; |
134 | static long rcu_torture_current_version; | 134 | static unsigned long rcu_torture_current_version; |
135 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; | 135 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
136 | static DEFINE_SPINLOCK(rcu_torture_lock); | 136 | static DEFINE_SPINLOCK(rcu_torture_lock); |
137 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = | 137 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = |
@@ -146,8 +146,6 @@ static atomic_t n_rcu_torture_mberror; | |||
146 | static atomic_t n_rcu_torture_error; | 146 | static atomic_t n_rcu_torture_error; |
147 | static long n_rcu_torture_boost_ktrerror; | 147 | static long n_rcu_torture_boost_ktrerror; |
148 | static long n_rcu_torture_boost_rterror; | 148 | static long n_rcu_torture_boost_rterror; |
149 | static long n_rcu_torture_boost_allocerror; | ||
150 | static long n_rcu_torture_boost_afferror; | ||
151 | static long n_rcu_torture_boost_failure; | 149 | static long n_rcu_torture_boost_failure; |
152 | static long n_rcu_torture_boosts; | 150 | static long n_rcu_torture_boosts; |
153 | static long n_rcu_torture_timers; | 151 | static long n_rcu_torture_timers; |
@@ -163,11 +161,11 @@ static int stutter_pause_test; | |||
163 | #endif | 161 | #endif |
164 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 162 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
165 | 163 | ||
166 | #ifdef CONFIG_RCU_BOOST | 164 | #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) |
167 | #define rcu_can_boost() 1 | 165 | #define rcu_can_boost() 1 |
168 | #else /* #ifdef CONFIG_RCU_BOOST */ | 166 | #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ |
169 | #define rcu_can_boost() 0 | 167 | #define rcu_can_boost() 0 |
170 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 168 | #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ |
171 | 169 | ||
172 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ | 170 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
173 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ | 171 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
@@ -751,6 +749,7 @@ static int rcu_torture_boost(void *arg) | |||
751 | n_rcu_torture_boost_rterror++; | 749 | n_rcu_torture_boost_rterror++; |
752 | } | 750 | } |
753 | 751 | ||
752 | init_rcu_head_on_stack(&rbi.rcu); | ||
754 | /* Each pass through the following loop does one boost-test cycle. */ | 753 | /* Each pass through the following loop does one boost-test cycle. */ |
755 | do { | 754 | do { |
756 | /* Wait for the next test interval. */ | 755 | /* Wait for the next test interval. */ |
@@ -810,6 +809,7 @@ checkwait: rcu_stutter_wait("rcu_torture_boost"); | |||
810 | 809 | ||
811 | /* Clean up and exit. */ | 810 | /* Clean up and exit. */ |
812 | VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping"); | 811 | VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping"); |
812 | destroy_rcu_head_on_stack(&rbi.rcu); | ||
813 | rcutorture_shutdown_absorb("rcu_torture_boost"); | 813 | rcutorture_shutdown_absorb("rcu_torture_boost"); |
814 | while (!kthread_should_stop() || rbi.inflight) | 814 | while (!kthread_should_stop() || rbi.inflight) |
815 | schedule_timeout_uninterruptible(1); | 815 | schedule_timeout_uninterruptible(1); |
@@ -886,7 +886,7 @@ rcu_torture_writer(void *arg) | |||
886 | old_rp->rtort_pipe_count++; | 886 | old_rp->rtort_pipe_count++; |
887 | cur_ops->deferred_free(old_rp); | 887 | cur_ops->deferred_free(old_rp); |
888 | } | 888 | } |
889 | rcu_torture_current_version++; | 889 | rcutorture_record_progress(++rcu_torture_current_version); |
890 | oldbatch = cur_ops->completed(); | 890 | oldbatch = cur_ops->completed(); |
891 | rcu_stutter_wait("rcu_torture_writer"); | 891 | rcu_stutter_wait("rcu_torture_writer"); |
892 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 892 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
@@ -1066,8 +1066,8 @@ rcu_torture_printk(char *page) | |||
1066 | } | 1066 | } |
1067 | cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); | 1067 | cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); |
1068 | cnt += sprintf(&page[cnt], | 1068 | cnt += sprintf(&page[cnt], |
1069 | "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " | 1069 | "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d " |
1070 | "rtmbe: %d rtbke: %ld rtbre: %ld rtbae: %ld rtbafe: %ld " | 1070 | "rtmbe: %d rtbke: %ld rtbre: %ld " |
1071 | "rtbf: %ld rtb: %ld nt: %ld", | 1071 | "rtbf: %ld rtb: %ld nt: %ld", |
1072 | rcu_torture_current, | 1072 | rcu_torture_current, |
1073 | rcu_torture_current_version, | 1073 | rcu_torture_current_version, |
@@ -1078,16 +1078,12 @@ rcu_torture_printk(char *page) | |||
1078 | atomic_read(&n_rcu_torture_mberror), | 1078 | atomic_read(&n_rcu_torture_mberror), |
1079 | n_rcu_torture_boost_ktrerror, | 1079 | n_rcu_torture_boost_ktrerror, |
1080 | n_rcu_torture_boost_rterror, | 1080 | n_rcu_torture_boost_rterror, |
1081 | n_rcu_torture_boost_allocerror, | ||
1082 | n_rcu_torture_boost_afferror, | ||
1083 | n_rcu_torture_boost_failure, | 1081 | n_rcu_torture_boost_failure, |
1084 | n_rcu_torture_boosts, | 1082 | n_rcu_torture_boosts, |
1085 | n_rcu_torture_timers); | 1083 | n_rcu_torture_timers); |
1086 | if (atomic_read(&n_rcu_torture_mberror) != 0 || | 1084 | if (atomic_read(&n_rcu_torture_mberror) != 0 || |
1087 | n_rcu_torture_boost_ktrerror != 0 || | 1085 | n_rcu_torture_boost_ktrerror != 0 || |
1088 | n_rcu_torture_boost_rterror != 0 || | 1086 | n_rcu_torture_boost_rterror != 0 || |
1089 | n_rcu_torture_boost_allocerror != 0 || | ||
1090 | n_rcu_torture_boost_afferror != 0 || | ||
1091 | n_rcu_torture_boost_failure != 0) | 1087 | n_rcu_torture_boost_failure != 0) |
1092 | cnt += sprintf(&page[cnt], " !!!"); | 1088 | cnt += sprintf(&page[cnt], " !!!"); |
1093 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); | 1089 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); |
@@ -1331,6 +1327,7 @@ rcu_torture_cleanup(void) | |||
1331 | int i; | 1327 | int i; |
1332 | 1328 | ||
1333 | mutex_lock(&fullstop_mutex); | 1329 | mutex_lock(&fullstop_mutex); |
1330 | rcutorture_record_test_transition(); | ||
1334 | if (fullstop == FULLSTOP_SHUTDOWN) { | 1331 | if (fullstop == FULLSTOP_SHUTDOWN) { |
1335 | printk(KERN_WARNING /* but going down anyway, so... */ | 1332 | printk(KERN_WARNING /* but going down anyway, so... */ |
1336 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | 1333 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); |
@@ -1486,8 +1483,6 @@ rcu_torture_init(void) | |||
1486 | atomic_set(&n_rcu_torture_error, 0); | 1483 | atomic_set(&n_rcu_torture_error, 0); |
1487 | n_rcu_torture_boost_ktrerror = 0; | 1484 | n_rcu_torture_boost_ktrerror = 0; |
1488 | n_rcu_torture_boost_rterror = 0; | 1485 | n_rcu_torture_boost_rterror = 0; |
1489 | n_rcu_torture_boost_allocerror = 0; | ||
1490 | n_rcu_torture_boost_afferror = 0; | ||
1491 | n_rcu_torture_boost_failure = 0; | 1486 | n_rcu_torture_boost_failure = 0; |
1492 | n_rcu_torture_boosts = 0; | 1487 | n_rcu_torture_boosts = 0; |
1493 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | 1488 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
@@ -1624,6 +1619,7 @@ rcu_torture_init(void) | |||
1624 | } | 1619 | } |
1625 | } | 1620 | } |
1626 | register_reboot_notifier(&rcutorture_shutdown_nb); | 1621 | register_reboot_notifier(&rcutorture_shutdown_nb); |
1622 | rcutorture_record_test_transition(); | ||
1627 | mutex_unlock(&fullstop_mutex); | 1623 | mutex_unlock(&fullstop_mutex); |
1628 | return 0; | 1624 | return 0; |
1629 | 1625 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index dd4aea806f8e..e486f7c3ffb8 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -47,6 +47,8 @@ | |||
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | 49 | #include <linux/kernel_stat.h> |
50 | #include <linux/wait.h> | ||
51 | #include <linux/kthread.h> | ||
50 | 52 | ||
51 | #include "rcutree.h" | 53 | #include "rcutree.h" |
52 | 54 | ||
@@ -79,10 +81,41 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
79 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
80 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
81 | 83 | ||
84 | static struct rcu_state *rcu_state; | ||
85 | |||
82 | int rcu_scheduler_active __read_mostly; | 86 | int rcu_scheduler_active __read_mostly; |
83 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 87 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
84 | 88 | ||
85 | /* | 89 | /* |
90 | * Control variables for per-CPU and per-rcu_node kthreads. These | ||
91 | * handle all flavors of RCU. | ||
92 | */ | ||
93 | static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); | ||
94 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | ||
95 | DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); | ||
96 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | ||
97 | static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq); | ||
98 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | ||
99 | static char rcu_kthreads_spawnable; | ||
100 | |||
101 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | ||
102 | static void invoke_rcu_cpu_kthread(void); | ||
103 | |||
104 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ | ||
105 | |||
106 | /* | ||
107 | * Track the rcutorture test sequence number and the update version | ||
108 | * number within a given test. The rcutorture_testseq is incremented | ||
109 | * on every rcutorture module load and unload, so has an odd value | ||
110 | * when a test is running. The rcutorture_vernum is set to zero | ||
111 | * when rcutorture starts and is incremented on each rcutorture update. | ||
112 | * These variables enable correlating rcutorture output with the | ||
113 | * RCU tracing information. | ||
114 | */ | ||
115 | unsigned long rcutorture_testseq; | ||
116 | unsigned long rcutorture_vernum; | ||
117 | |||
118 | /* | ||
86 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 119 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
87 | * permit this function to be invoked without holding the root rcu_node | 120 | * permit this function to be invoked without holding the root rcu_node |
88 | * structure's ->lock, but of course results can be subject to change. | 121 | * structure's ->lock, but of course results can be subject to change. |
@@ -124,6 +157,7 @@ void rcu_note_context_switch(int cpu) | |||
124 | rcu_sched_qs(cpu); | 157 | rcu_sched_qs(cpu); |
125 | rcu_preempt_note_context_switch(cpu); | 158 | rcu_preempt_note_context_switch(cpu); |
126 | } | 159 | } |
160 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | ||
127 | 161 | ||
128 | #ifdef CONFIG_NO_HZ | 162 | #ifdef CONFIG_NO_HZ |
129 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 163 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
@@ -140,10 +174,8 @@ module_param(blimit, int, 0); | |||
140 | module_param(qhimark, int, 0); | 174 | module_param(qhimark, int, 0); |
141 | module_param(qlowmark, int, 0); | 175 | module_param(qlowmark, int, 0); |
142 | 176 | ||
143 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 177 | int rcu_cpu_stall_suppress __read_mostly; |
144 | int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT; | ||
145 | module_param(rcu_cpu_stall_suppress, int, 0644); | 178 | module_param(rcu_cpu_stall_suppress, int, 0644); |
146 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
147 | 179 | ||
148 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 180 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
149 | static int rcu_pending(int cpu); | 181 | static int rcu_pending(int cpu); |
@@ -176,6 +208,31 @@ void rcu_bh_force_quiescent_state(void) | |||
176 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); | 208 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); |
177 | 209 | ||
178 | /* | 210 | /* |
211 | * Record the number of times rcutorture tests have been initiated and | ||
212 | * terminated. This information allows the debugfs tracing stats to be | ||
213 | * correlated to the rcutorture messages, even when the rcutorture module | ||
214 | * is being repeatedly loaded and unloaded. In other words, we cannot | ||
215 | * store this state in rcutorture itself. | ||
216 | */ | ||
217 | void rcutorture_record_test_transition(void) | ||
218 | { | ||
219 | rcutorture_testseq++; | ||
220 | rcutorture_vernum = 0; | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); | ||
223 | |||
224 | /* | ||
225 | * Record the number of writer passes through the current rcutorture test. | ||
226 | * This is also used to correlate debugfs tracing stats with the rcutorture | ||
227 | * messages. | ||
228 | */ | ||
229 | void rcutorture_record_progress(unsigned long vernum) | ||
230 | { | ||
231 | rcutorture_vernum++; | ||
232 | } | ||
233 | EXPORT_SYMBOL_GPL(rcutorture_record_progress); | ||
234 | |||
235 | /* | ||
179 | * Force a quiescent state for RCU-sched. | 236 | * Force a quiescent state for RCU-sched. |
180 | */ | 237 | */ |
181 | void rcu_sched_force_quiescent_state(void) | 238 | void rcu_sched_force_quiescent_state(void) |
@@ -234,8 +291,8 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
234 | return 1; | 291 | return 1; |
235 | } | 292 | } |
236 | 293 | ||
237 | /* If preemptable RCU, no point in sending reschedule IPI. */ | 294 | /* If preemptible RCU, no point in sending reschedule IPI. */ |
238 | if (rdp->preemptable) | 295 | if (rdp->preemptible) |
239 | return 0; | 296 | return 0; |
240 | 297 | ||
241 | /* The CPU is online, so send it a reschedule IPI. */ | 298 | /* The CPU is online, so send it a reschedule IPI. */ |
@@ -450,8 +507,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
450 | 507 | ||
451 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 508 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
452 | 509 | ||
453 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
454 | |||
455 | int rcu_cpu_stall_suppress __read_mostly; | 510 | int rcu_cpu_stall_suppress __read_mostly; |
456 | 511 | ||
457 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 512 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
@@ -537,21 +592,24 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
537 | 592 | ||
538 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | 593 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) |
539 | { | 594 | { |
540 | long delta; | 595 | unsigned long j; |
596 | unsigned long js; | ||
541 | struct rcu_node *rnp; | 597 | struct rcu_node *rnp; |
542 | 598 | ||
543 | if (rcu_cpu_stall_suppress) | 599 | if (rcu_cpu_stall_suppress) |
544 | return; | 600 | return; |
545 | delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); | 601 | j = ACCESS_ONCE(jiffies); |
602 | js = ACCESS_ONCE(rsp->jiffies_stall); | ||
546 | rnp = rdp->mynode; | 603 | rnp = rdp->mynode; |
547 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) { | 604 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { |
548 | 605 | ||
549 | /* We haven't checked in, so go dump stack. */ | 606 | /* We haven't checked in, so go dump stack. */ |
550 | print_cpu_stall(rsp); | 607 | print_cpu_stall(rsp); |
551 | 608 | ||
552 | } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { | 609 | } else if (rcu_gp_in_progress(rsp) && |
610 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { | ||
553 | 611 | ||
554 | /* They had two time units to dump stack, so complain. */ | 612 | /* They had a few time units to dump stack, so complain. */ |
555 | print_other_cpu_stall(rsp); | 613 | print_other_cpu_stall(rsp); |
556 | } | 614 | } |
557 | } | 615 | } |
@@ -587,26 +645,6 @@ static void __init check_cpu_stall_init(void) | |||
587 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | 645 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
588 | } | 646 | } |
589 | 647 | ||
590 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
591 | |||
592 | static void record_gp_stall_check_time(struct rcu_state *rsp) | ||
593 | { | ||
594 | } | ||
595 | |||
596 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | ||
597 | { | ||
598 | } | ||
599 | |||
600 | void rcu_cpu_stall_reset(void) | ||
601 | { | ||
602 | } | ||
603 | |||
604 | static void __init check_cpu_stall_init(void) | ||
605 | { | ||
606 | } | ||
607 | |||
608 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
609 | |||
610 | /* | 648 | /* |
611 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 649 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
612 | * This is used both when we started the grace period and when we notice | 650 | * This is used both when we started the grace period and when we notice |
@@ -809,6 +847,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
809 | rnp->completed = rsp->completed; | 847 | rnp->completed = rsp->completed; |
810 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 848 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
811 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | 849 | rcu_start_gp_per_cpu(rsp, rnp, rdp); |
850 | rcu_preempt_boost_start_gp(rnp); | ||
812 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 851 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
813 | return; | 852 | return; |
814 | } | 853 | } |
@@ -844,6 +883,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
844 | rnp->completed = rsp->completed; | 883 | rnp->completed = rsp->completed; |
845 | if (rnp == rdp->mynode) | 884 | if (rnp == rdp->mynode) |
846 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | 885 | rcu_start_gp_per_cpu(rsp, rnp, rdp); |
886 | rcu_preempt_boost_start_gp(rnp); | ||
847 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 887 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
848 | } | 888 | } |
849 | 889 | ||
@@ -864,7 +904,12 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
864 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) | 904 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
865 | __releases(rcu_get_root(rsp)->lock) | 905 | __releases(rcu_get_root(rsp)->lock) |
866 | { | 906 | { |
907 | unsigned long gp_duration; | ||
908 | |||
867 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 909 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
910 | gp_duration = jiffies - rsp->gp_start; | ||
911 | if (gp_duration > rsp->gp_max) | ||
912 | rsp->gp_max = gp_duration; | ||
868 | rsp->completed = rsp->gpnum; | 913 | rsp->completed = rsp->gpnum; |
869 | rsp->signaled = RCU_GP_IDLE; | 914 | rsp->signaled = RCU_GP_IDLE; |
870 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 915 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
@@ -894,7 +939,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, | |||
894 | return; | 939 | return; |
895 | } | 940 | } |
896 | rnp->qsmask &= ~mask; | 941 | rnp->qsmask &= ~mask; |
897 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | 942 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
898 | 943 | ||
899 | /* Other bits still set at this level, so done. */ | 944 | /* Other bits still set at this level, so done. */ |
900 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 945 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -1037,6 +1082,8 @@ static void rcu_send_cbs_to_online(struct rcu_state *rsp) | |||
1037 | /* | 1082 | /* |
1038 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy | 1083 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy |
1039 | * and move all callbacks from the outgoing CPU to the current one. | 1084 | * and move all callbacks from the outgoing CPU to the current one. |
1085 | * There can only be one CPU hotplug operation at a time, so no other | ||
1086 | * CPU can be attempting to update rcu_cpu_kthread_task. | ||
1040 | */ | 1087 | */ |
1041 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 1088 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
1042 | { | 1089 | { |
@@ -1045,6 +1092,14 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1045 | int need_report = 0; | 1092 | int need_report = 0; |
1046 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1093 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1047 | struct rcu_node *rnp; | 1094 | struct rcu_node *rnp; |
1095 | struct task_struct *t; | ||
1096 | |||
1097 | /* Stop the CPU's kthread. */ | ||
1098 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1099 | if (t != NULL) { | ||
1100 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1101 | kthread_stop(t); | ||
1102 | } | ||
1048 | 1103 | ||
1049 | /* Exclude any attempts to start a new grace period. */ | 1104 | /* Exclude any attempts to start a new grace period. */ |
1050 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1105 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
@@ -1082,6 +1137,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1082 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1137 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1083 | if (need_report & RCU_OFL_TASKS_EXP_GP) | 1138 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
1084 | rcu_report_exp_rnp(rsp, rnp); | 1139 | rcu_report_exp_rnp(rsp, rnp); |
1140 | rcu_node_kthread_setaffinity(rnp, -1); | ||
1085 | } | 1141 | } |
1086 | 1142 | ||
1087 | /* | 1143 | /* |
@@ -1143,7 +1199,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1143 | next = list->next; | 1199 | next = list->next; |
1144 | prefetch(next); | 1200 | prefetch(next); |
1145 | debug_rcu_head_unqueue(list); | 1201 | debug_rcu_head_unqueue(list); |
1146 | list->func(list); | 1202 | __rcu_reclaim(list); |
1147 | list = next; | 1203 | list = next; |
1148 | if (++count >= rdp->blimit) | 1204 | if (++count >= rdp->blimit) |
1149 | break; | 1205 | break; |
@@ -1179,7 +1235,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1179 | 1235 | ||
1180 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1236 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
1181 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1237 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1182 | raise_softirq(RCU_SOFTIRQ); | 1238 | invoke_rcu_cpu_kthread(); |
1183 | } | 1239 | } |
1184 | 1240 | ||
1185 | /* | 1241 | /* |
@@ -1225,7 +1281,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
1225 | } | 1281 | } |
1226 | rcu_preempt_check_callbacks(cpu); | 1282 | rcu_preempt_check_callbacks(cpu); |
1227 | if (rcu_pending(cpu)) | 1283 | if (rcu_pending(cpu)) |
1228 | raise_softirq(RCU_SOFTIRQ); | 1284 | invoke_rcu_cpu_kthread(); |
1229 | } | 1285 | } |
1230 | 1286 | ||
1231 | #ifdef CONFIG_SMP | 1287 | #ifdef CONFIG_SMP |
@@ -1233,6 +1289,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
1233 | /* | 1289 | /* |
1234 | * Scan the leaf rcu_node structures, processing dyntick state for any that | 1290 | * Scan the leaf rcu_node structures, processing dyntick state for any that |
1235 | * have not yet encountered a quiescent state, using the function specified. | 1291 | * have not yet encountered a quiescent state, using the function specified. |
1292 | * Also initiate boosting for any threads blocked on the root rcu_node. | ||
1293 | * | ||
1236 | * The caller must have suppressed start of new grace periods. | 1294 | * The caller must have suppressed start of new grace periods. |
1237 | */ | 1295 | */ |
1238 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | 1296 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) |
@@ -1251,7 +1309,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1251 | return; | 1309 | return; |
1252 | } | 1310 | } |
1253 | if (rnp->qsmask == 0) { | 1311 | if (rnp->qsmask == 0) { |
1254 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1312 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
1255 | continue; | 1313 | continue; |
1256 | } | 1314 | } |
1257 | cpu = rnp->grplo; | 1315 | cpu = rnp->grplo; |
@@ -1269,6 +1327,11 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1269 | } | 1327 | } |
1270 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1328 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1271 | } | 1329 | } |
1330 | rnp = rcu_get_root(rsp); | ||
1331 | if (rnp->qsmask == 0) { | ||
1332 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1333 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1334 | } | ||
1272 | } | 1335 | } |
1273 | 1336 | ||
1274 | /* | 1337 | /* |
@@ -1389,7 +1452,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1389 | /* | 1452 | /* |
1390 | * Do softirq processing for the current CPU. | 1453 | * Do softirq processing for the current CPU. |
1391 | */ | 1454 | */ |
1392 | static void rcu_process_callbacks(struct softirq_action *unused) | 1455 | static void rcu_process_callbacks(void) |
1393 | { | 1456 | { |
1394 | /* | 1457 | /* |
1395 | * Memory references from any prior RCU read-side critical sections | 1458 | * Memory references from any prior RCU read-side critical sections |
@@ -1414,6 +1477,347 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1414 | rcu_needs_cpu_flush(); | 1477 | rcu_needs_cpu_flush(); |
1415 | } | 1478 | } |
1416 | 1479 | ||
1480 | /* | ||
1481 | * Wake up the current CPU's kthread. This replaces raise_softirq() | ||
1482 | * in earlier versions of RCU. Note that because we are running on | ||
1483 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task | ||
1484 | * cannot disappear out from under us. | ||
1485 | */ | ||
1486 | static void invoke_rcu_cpu_kthread(void) | ||
1487 | { | ||
1488 | unsigned long flags; | ||
1489 | |||
1490 | local_irq_save(flags); | ||
1491 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1492 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1493 | local_irq_restore(flags); | ||
1494 | return; | ||
1495 | } | ||
1496 | wake_up(&__get_cpu_var(rcu_cpu_wq)); | ||
1497 | local_irq_restore(flags); | ||
1498 | } | ||
1499 | |||
1500 | /* | ||
1501 | * Wake up the specified per-rcu_node-structure kthread. | ||
1502 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
1503 | * to do anything to keep them alive. | ||
1504 | */ | ||
1505 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
1506 | { | ||
1507 | struct task_struct *t; | ||
1508 | |||
1509 | t = rnp->node_kthread_task; | ||
1510 | if (t != NULL) | ||
1511 | wake_up_process(t); | ||
1512 | } | ||
1513 | |||
1514 | /* | ||
1515 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1516 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1517 | * is not going away. | ||
1518 | */ | ||
1519 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1520 | { | ||
1521 | int policy; | ||
1522 | struct sched_param sp; | ||
1523 | struct task_struct *t; | ||
1524 | |||
1525 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1526 | if (t == NULL) | ||
1527 | return; | ||
1528 | if (to_rt) { | ||
1529 | policy = SCHED_FIFO; | ||
1530 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1531 | } else { | ||
1532 | policy = SCHED_NORMAL; | ||
1533 | sp.sched_priority = 0; | ||
1534 | } | ||
1535 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1536 | } | ||
1537 | |||
1538 | /* | ||
1539 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
1540 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
1541 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
1542 | * the booster kthread. | ||
1543 | */ | ||
1544 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
1545 | { | ||
1546 | unsigned long flags; | ||
1547 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
1548 | struct rcu_node *rnp = rdp->mynode; | ||
1549 | |||
1550 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1551 | rnp->wakemask |= rdp->grpmask; | ||
1552 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1553 | invoke_rcu_node_kthread(rnp); | ||
1554 | } | ||
1555 | |||
1556 | /* | ||
1557 | * Drop to non-real-time priority and yield, but only after posting a | ||
1558 | * timer that will cause us to regain our real-time priority if we | ||
1559 | * remain preempted. Either way, we restore our real-time priority | ||
1560 | * before returning. | ||
1561 | */ | ||
1562 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
1563 | { | ||
1564 | struct sched_param sp; | ||
1565 | struct timer_list yield_timer; | ||
1566 | |||
1567 | setup_timer_on_stack(&yield_timer, f, arg); | ||
1568 | mod_timer(&yield_timer, jiffies + 2); | ||
1569 | sp.sched_priority = 0; | ||
1570 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
1571 | set_user_nice(current, 19); | ||
1572 | schedule(); | ||
1573 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1574 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
1575 | del_timer(&yield_timer); | ||
1576 | } | ||
1577 | |||
1578 | /* | ||
1579 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1580 | * This can happen while the corresponding CPU is either coming online | ||
1581 | * or going offline. We cannot wait until the CPU is fully online | ||
1582 | * before starting the kthread, because the various notifier functions | ||
1583 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1584 | * the corresponding CPU is online. | ||
1585 | * | ||
1586 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1587 | * | ||
1588 | * Caller must disable bh. This function can momentarily enable it. | ||
1589 | */ | ||
1590 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1591 | { | ||
1592 | while (cpu_is_offline(cpu) || | ||
1593 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
1594 | smp_processor_id() != cpu) { | ||
1595 | if (kthread_should_stop()) | ||
1596 | return 1; | ||
1597 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
1598 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1599 | local_bh_enable(); | ||
1600 | schedule_timeout_uninterruptible(1); | ||
1601 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1602 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1603 | local_bh_disable(); | ||
1604 | } | ||
1605 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1606 | return 0; | ||
1607 | } | ||
1608 | |||
1609 | /* | ||
1610 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
1611 | * earlier RCU softirq. | ||
1612 | */ | ||
1613 | static int rcu_cpu_kthread(void *arg) | ||
1614 | { | ||
1615 | int cpu = (int)(long)arg; | ||
1616 | unsigned long flags; | ||
1617 | int spincnt = 0; | ||
1618 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1619 | wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu); | ||
1620 | char work; | ||
1621 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1622 | |||
1623 | for (;;) { | ||
1624 | *statusp = RCU_KTHREAD_WAITING; | ||
1625 | wait_event_interruptible(*wqp, | ||
1626 | *workp != 0 || kthread_should_stop()); | ||
1627 | local_bh_disable(); | ||
1628 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1629 | local_bh_enable(); | ||
1630 | break; | ||
1631 | } | ||
1632 | *statusp = RCU_KTHREAD_RUNNING; | ||
1633 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
1634 | local_irq_save(flags); | ||
1635 | work = *workp; | ||
1636 | *workp = 0; | ||
1637 | local_irq_restore(flags); | ||
1638 | if (work) | ||
1639 | rcu_process_callbacks(); | ||
1640 | local_bh_enable(); | ||
1641 | if (*workp != 0) | ||
1642 | spincnt++; | ||
1643 | else | ||
1644 | spincnt = 0; | ||
1645 | if (spincnt > 10) { | ||
1646 | *statusp = RCU_KTHREAD_YIELDING; | ||
1647 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
1648 | spincnt = 0; | ||
1649 | } | ||
1650 | } | ||
1651 | *statusp = RCU_KTHREAD_STOPPED; | ||
1652 | return 0; | ||
1653 | } | ||
1654 | |||
1655 | /* | ||
1656 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1657 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1658 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1659 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1660 | * will enforce sufficient ordering. | ||
1661 | */ | ||
1662 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1663 | { | ||
1664 | struct sched_param sp; | ||
1665 | struct task_struct *t; | ||
1666 | |||
1667 | if (!rcu_kthreads_spawnable || | ||
1668 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1669 | return 0; | ||
1670 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
1671 | if (IS_ERR(t)) | ||
1672 | return PTR_ERR(t); | ||
1673 | kthread_bind(t, cpu); | ||
1674 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1675 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1676 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1677 | wake_up_process(t); | ||
1678 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1679 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1680 | return 0; | ||
1681 | } | ||
1682 | |||
1683 | /* | ||
1684 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
1685 | * kthreads when needed. We ignore requests to wake up kthreads | ||
1686 | * for offline CPUs, which is OK because force_quiescent_state() | ||
1687 | * takes care of this case. | ||
1688 | */ | ||
1689 | static int rcu_node_kthread(void *arg) | ||
1690 | { | ||
1691 | int cpu; | ||
1692 | unsigned long flags; | ||
1693 | unsigned long mask; | ||
1694 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1695 | struct sched_param sp; | ||
1696 | struct task_struct *t; | ||
1697 | |||
1698 | for (;;) { | ||
1699 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
1700 | wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0); | ||
1701 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
1702 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1703 | mask = rnp->wakemask; | ||
1704 | rnp->wakemask = 0; | ||
1705 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1706 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
1707 | if ((mask & 0x1) == 0) | ||
1708 | continue; | ||
1709 | preempt_disable(); | ||
1710 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1711 | if (!cpu_online(cpu) || t == NULL) { | ||
1712 | preempt_enable(); | ||
1713 | continue; | ||
1714 | } | ||
1715 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
1716 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1717 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1718 | preempt_enable(); | ||
1719 | } | ||
1720 | } | ||
1721 | /* NOTREACHED */ | ||
1722 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
1723 | return 0; | ||
1724 | } | ||
1725 | |||
1726 | /* | ||
1727 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
1728 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
1729 | * held, so the value of rnp->qsmaskinit will be stable. | ||
1730 | * | ||
1731 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
1732 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
1733 | * this function allows the kthread to execute on any CPU. | ||
1734 | */ | ||
1735 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1736 | { | ||
1737 | cpumask_var_t cm; | ||
1738 | int cpu; | ||
1739 | unsigned long mask = rnp->qsmaskinit; | ||
1740 | |||
1741 | if (rnp->node_kthread_task == NULL) | ||
1742 | return; | ||
1743 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
1744 | return; | ||
1745 | cpumask_clear(cm); | ||
1746 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
1747 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
1748 | cpumask_set_cpu(cpu, cm); | ||
1749 | if (cpumask_weight(cm) == 0) { | ||
1750 | cpumask_setall(cm); | ||
1751 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
1752 | cpumask_clear_cpu(cpu, cm); | ||
1753 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
1754 | } | ||
1755 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | ||
1756 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
1757 | free_cpumask_var(cm); | ||
1758 | } | ||
1759 | |||
1760 | /* | ||
1761 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
1762 | * Called during boot before online/offline can happen, or, if | ||
1763 | * during runtime, with the main CPU-hotplug locks held. So only | ||
1764 | * one of these can be executing at a time. | ||
1765 | */ | ||
1766 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
1767 | struct rcu_node *rnp) | ||
1768 | { | ||
1769 | unsigned long flags; | ||
1770 | int rnp_index = rnp - &rsp->node[0]; | ||
1771 | struct sched_param sp; | ||
1772 | struct task_struct *t; | ||
1773 | |||
1774 | if (!rcu_kthreads_spawnable || | ||
1775 | rnp->qsmaskinit == 0) | ||
1776 | return 0; | ||
1777 | if (rnp->node_kthread_task == NULL) { | ||
1778 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
1779 | "rcun%d", rnp_index); | ||
1780 | if (IS_ERR(t)) | ||
1781 | return PTR_ERR(t); | ||
1782 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1783 | rnp->node_kthread_task = t; | ||
1784 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1785 | wake_up_process(t); | ||
1786 | sp.sched_priority = 99; | ||
1787 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1788 | } | ||
1789 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
1790 | } | ||
1791 | |||
1792 | /* | ||
1793 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
1794 | */ | ||
1795 | static int __init rcu_spawn_kthreads(void) | ||
1796 | { | ||
1797 | int cpu; | ||
1798 | struct rcu_node *rnp; | ||
1799 | |||
1800 | rcu_kthreads_spawnable = 1; | ||
1801 | for_each_possible_cpu(cpu) { | ||
1802 | init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu)); | ||
1803 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
1804 | if (cpu_online(cpu)) | ||
1805 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1806 | } | ||
1807 | rnp = rcu_get_root(rcu_state); | ||
1808 | init_waitqueue_head(&rnp->node_wq); | ||
1809 | rcu_init_boost_waitqueue(rnp); | ||
1810 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1811 | if (NUM_RCU_NODES > 1) | ||
1812 | rcu_for_each_leaf_node(rcu_state, rnp) { | ||
1813 | init_waitqueue_head(&rnp->node_wq); | ||
1814 | rcu_init_boost_waitqueue(rnp); | ||
1815 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1816 | } | ||
1817 | return 0; | ||
1818 | } | ||
1819 | early_initcall(rcu_spawn_kthreads); | ||
1820 | |||
1417 | static void | 1821 | static void |
1418 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1822 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
1419 | struct rcu_state *rsp) | 1823 | struct rcu_state *rsp) |
@@ -1439,6 +1843,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1439 | /* Add the callback to our list. */ | 1843 | /* Add the callback to our list. */ |
1440 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | 1844 | *rdp->nxttail[RCU_NEXT_TAIL] = head; |
1441 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | 1845 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
1846 | rdp->qlen++; | ||
1847 | |||
1848 | /* If interrupts were disabled, don't dive into RCU core. */ | ||
1849 | if (irqs_disabled_flags(flags)) { | ||
1850 | local_irq_restore(flags); | ||
1851 | return; | ||
1852 | } | ||
1442 | 1853 | ||
1443 | /* | 1854 | /* |
1444 | * Force the grace period if too many callbacks or too long waiting. | 1855 | * Force the grace period if too many callbacks or too long waiting. |
@@ -1447,7 +1858,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1447 | * invoking force_quiescent_state() if the newly enqueued callback | 1858 | * invoking force_quiescent_state() if the newly enqueued callback |
1448 | * is the only one waiting for a grace period to complete. | 1859 | * is the only one waiting for a grace period to complete. |
1449 | */ | 1860 | */ |
1450 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | 1861 | if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { |
1451 | 1862 | ||
1452 | /* Are we ignoring a completed grace period? */ | 1863 | /* Are we ignoring a completed grace period? */ |
1453 | rcu_process_gp_end(rsp, rdp); | 1864 | rcu_process_gp_end(rsp, rdp); |
@@ -1583,7 +1994,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1583 | * or RCU-bh, force a local reschedule. | 1994 | * or RCU-bh, force a local reschedule. |
1584 | */ | 1995 | */ |
1585 | rdp->n_rp_qs_pending++; | 1996 | rdp->n_rp_qs_pending++; |
1586 | if (!rdp->preemptable && | 1997 | if (!rdp->preemptible && |
1587 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, | 1998 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, |
1588 | jiffies)) | 1999 | jiffies)) |
1589 | set_need_resched(); | 2000 | set_need_resched(); |
@@ -1760,7 +2171,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1760 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | 2171 | * that this CPU cannot possibly have any RCU callbacks in flight yet. |
1761 | */ | 2172 | */ |
1762 | static void __cpuinit | 2173 | static void __cpuinit |
1763 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | 2174 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) |
1764 | { | 2175 | { |
1765 | unsigned long flags; | 2176 | unsigned long flags; |
1766 | unsigned long mask; | 2177 | unsigned long mask; |
@@ -1772,7 +2183,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1772 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 2183 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1773 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 2184 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1774 | rdp->beenonline = 1; /* We have now been online. */ | 2185 | rdp->beenonline = 1; /* We have now been online. */ |
1775 | rdp->preemptable = preemptable; | 2186 | rdp->preemptible = preemptible; |
1776 | rdp->qlen_last_fqs_check = 0; | 2187 | rdp->qlen_last_fqs_check = 0; |
1777 | rdp->n_force_qs_snap = rsp->n_force_qs; | 2188 | rdp->n_force_qs_snap = rsp->n_force_qs; |
1778 | rdp->blimit = blimit; | 2189 | rdp->blimit = blimit; |
@@ -1813,6 +2224,19 @@ static void __cpuinit rcu_online_cpu(int cpu) | |||
1813 | rcu_preempt_init_percpu_data(cpu); | 2224 | rcu_preempt_init_percpu_data(cpu); |
1814 | } | 2225 | } |
1815 | 2226 | ||
2227 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
2228 | { | ||
2229 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2230 | struct rcu_node *rnp = rdp->mynode; | ||
2231 | |||
2232 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
2233 | if (rcu_kthreads_spawnable) { | ||
2234 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
2235 | if (rnp->node_kthread_task == NULL) | ||
2236 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
2237 | } | ||
2238 | } | ||
2239 | |||
1816 | /* | 2240 | /* |
1817 | * Handle CPU online/offline notification events. | 2241 | * Handle CPU online/offline notification events. |
1818 | */ | 2242 | */ |
@@ -1820,11 +2244,23 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1820 | unsigned long action, void *hcpu) | 2244 | unsigned long action, void *hcpu) |
1821 | { | 2245 | { |
1822 | long cpu = (long)hcpu; | 2246 | long cpu = (long)hcpu; |
2247 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2248 | struct rcu_node *rnp = rdp->mynode; | ||
1823 | 2249 | ||
1824 | switch (action) { | 2250 | switch (action) { |
1825 | case CPU_UP_PREPARE: | 2251 | case CPU_UP_PREPARE: |
1826 | case CPU_UP_PREPARE_FROZEN: | 2252 | case CPU_UP_PREPARE_FROZEN: |
1827 | rcu_online_cpu(cpu); | 2253 | rcu_online_cpu(cpu); |
2254 | rcu_online_kthreads(cpu); | ||
2255 | break; | ||
2256 | case CPU_ONLINE: | ||
2257 | case CPU_DOWN_FAILED: | ||
2258 | rcu_node_kthread_setaffinity(rnp, -1); | ||
2259 | rcu_cpu_kthread_setrt(cpu, 1); | ||
2260 | break; | ||
2261 | case CPU_DOWN_PREPARE: | ||
2262 | rcu_node_kthread_setaffinity(rnp, cpu); | ||
2263 | rcu_cpu_kthread_setrt(cpu, 0); | ||
1828 | break; | 2264 | break; |
1829 | case CPU_DYING: | 2265 | case CPU_DYING: |
1830 | case CPU_DYING_FROZEN: | 2266 | case CPU_DYING_FROZEN: |
@@ -1943,10 +2379,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
1943 | j / rsp->levelspread[i - 1]; | 2379 | j / rsp->levelspread[i - 1]; |
1944 | } | 2380 | } |
1945 | rnp->level = i; | 2381 | rnp->level = i; |
1946 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | 2382 | INIT_LIST_HEAD(&rnp->blkd_tasks); |
1947 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
1948 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1949 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1950 | } | 2383 | } |
1951 | } | 2384 | } |
1952 | 2385 | ||
@@ -1968,7 +2401,6 @@ void __init rcu_init(void) | |||
1968 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | 2401 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); |
1969 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 2402 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
1970 | __rcu_init_preempt(); | 2403 | __rcu_init_preempt(); |
1971 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
1972 | 2404 | ||
1973 | /* | 2405 | /* |
1974 | * We don't need protection against CPU-hotplug here because | 2406 | * We don't need protection against CPU-hotplug here because |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index e8f057e44e3e..257664815d5d 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -91,6 +91,14 @@ struct rcu_dynticks { | |||
91 | /* remains even for nmi from irq handler. */ | 91 | /* remains even for nmi from irq handler. */ |
92 | }; | 92 | }; |
93 | 93 | ||
94 | /* RCU's kthread states for tracing. */ | ||
95 | #define RCU_KTHREAD_STOPPED 0 | ||
96 | #define RCU_KTHREAD_RUNNING 1 | ||
97 | #define RCU_KTHREAD_WAITING 2 | ||
98 | #define RCU_KTHREAD_OFFCPU 3 | ||
99 | #define RCU_KTHREAD_YIELDING 4 | ||
100 | #define RCU_KTHREAD_MAX 4 | ||
101 | |||
94 | /* | 102 | /* |
95 | * Definition for node within the RCU grace-period-detection hierarchy. | 103 | * Definition for node within the RCU grace-period-detection hierarchy. |
96 | */ | 104 | */ |
@@ -109,10 +117,11 @@ struct rcu_node { | |||
109 | /* an rcu_data structure, otherwise, each */ | 117 | /* an rcu_data structure, otherwise, each */ |
110 | /* bit corresponds to a child rcu_node */ | 118 | /* bit corresponds to a child rcu_node */ |
111 | /* structure. */ | 119 | /* structure. */ |
112 | unsigned long expmask; /* Groups that have ->blocked_tasks[] */ | 120 | unsigned long expmask; /* Groups that have ->blkd_tasks */ |
113 | /* elements that need to drain to allow the */ | 121 | /* elements that need to drain to allow the */ |
114 | /* current expedited grace period to */ | 122 | /* current expedited grace period to */ |
115 | /* complete (only for TREE_PREEMPT_RCU). */ | 123 | /* complete (only for TREE_PREEMPT_RCU). */ |
124 | unsigned long wakemask; /* CPUs whose kthread needs to be awakened. */ | ||
116 | unsigned long qsmaskinit; | 125 | unsigned long qsmaskinit; |
117 | /* Per-GP initial value for qsmask & expmask. */ | 126 | /* Per-GP initial value for qsmask & expmask. */ |
118 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | 127 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
@@ -122,11 +131,68 @@ struct rcu_node { | |||
122 | u8 grpnum; /* CPU/group number for next level up. */ | 131 | u8 grpnum; /* CPU/group number for next level up. */ |
123 | u8 level; /* root is at level 0. */ | 132 | u8 level; /* root is at level 0. */ |
124 | struct rcu_node *parent; | 133 | struct rcu_node *parent; |
125 | struct list_head blocked_tasks[4]; | 134 | struct list_head blkd_tasks; |
126 | /* Tasks blocked in RCU read-side critsect. */ | 135 | /* Tasks blocked in RCU read-side critical */ |
127 | /* Grace period number (->gpnum) x blocked */ | 136 | /* section. Tasks are placed at the head */ |
128 | /* by tasks on the (x & 0x1) element of the */ | 137 | /* of this list and age towards the tail. */ |
129 | /* blocked_tasks[] array. */ | 138 | struct list_head *gp_tasks; |
139 | /* Pointer to the first task blocking the */ | ||
140 | /* current grace period, or NULL if there */ | ||
141 | /* is no such task. */ | ||
142 | struct list_head *exp_tasks; | ||
143 | /* Pointer to the first task blocking the */ | ||
144 | /* current expedited grace period, or NULL */ | ||
145 | /* if there is no such task. If there */ | ||
146 | /* is no current expedited grace period, */ | ||
147 | /* then there can cannot be any such task. */ | ||
148 | #ifdef CONFIG_RCU_BOOST | ||
149 | struct list_head *boost_tasks; | ||
150 | /* Pointer to first task that needs to be */ | ||
151 | /* priority boosted, or NULL if no priority */ | ||
152 | /* boosting is needed for this rcu_node */ | ||
153 | /* structure. If there are no tasks */ | ||
154 | /* queued on this rcu_node structure that */ | ||
155 | /* are blocking the current grace period, */ | ||
156 | /* there can be no such task. */ | ||
157 | unsigned long boost_time; | ||
158 | /* When to start boosting (jiffies). */ | ||
159 | struct task_struct *boost_kthread_task; | ||
160 | /* kthread that takes care of priority */ | ||
161 | /* boosting for this rcu_node structure. */ | ||
162 | wait_queue_head_t boost_wq; | ||
163 | /* Wait queue on which to park the boost */ | ||
164 | /* kthread. */ | ||
165 | unsigned int boost_kthread_status; | ||
166 | /* State of boost_kthread_task for tracing. */ | ||
167 | unsigned long n_tasks_boosted; | ||
168 | /* Total number of tasks boosted. */ | ||
169 | unsigned long n_exp_boosts; | ||
170 | /* Number of tasks boosted for expedited GP. */ | ||
171 | unsigned long n_normal_boosts; | ||
172 | /* Number of tasks boosted for normal GP. */ | ||
173 | unsigned long n_balk_blkd_tasks; | ||
174 | /* Refused to boost: no blocked tasks. */ | ||
175 | unsigned long n_balk_exp_gp_tasks; | ||
176 | /* Refused to boost: nothing blocking GP. */ | ||
177 | unsigned long n_balk_boost_tasks; | ||
178 | /* Refused to boost: already boosting. */ | ||
179 | unsigned long n_balk_notblocked; | ||
180 | /* Refused to boost: RCU RS CS still running. */ | ||
181 | unsigned long n_balk_notyet; | ||
182 | /* Refused to boost: not yet time. */ | ||
183 | unsigned long n_balk_nos; | ||
184 | /* Refused to boost: not sure why, though. */ | ||
185 | /* This can happen due to race conditions. */ | ||
186 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
187 | struct task_struct *node_kthread_task; | ||
188 | /* kthread that takes care of this rcu_node */ | ||
189 | /* structure, for example, awakening the */ | ||
190 | /* per-CPU kthreads as needed. */ | ||
191 | wait_queue_head_t node_wq; | ||
192 | /* Wait queue on which to park the per-node */ | ||
193 | /* kthread. */ | ||
194 | unsigned int node_kthread_status; | ||
195 | /* State of node_kthread_task for tracing. */ | ||
130 | } ____cacheline_internodealigned_in_smp; | 196 | } ____cacheline_internodealigned_in_smp; |
131 | 197 | ||
132 | /* | 198 | /* |
@@ -175,7 +241,7 @@ struct rcu_data { | |||
175 | bool passed_quiesc; /* User-mode/idle loop etc. */ | 241 | bool passed_quiesc; /* User-mode/idle loop etc. */ |
176 | bool qs_pending; /* Core waits for quiesc state. */ | 242 | bool qs_pending; /* Core waits for quiesc state. */ |
177 | bool beenonline; /* CPU online at least once. */ | 243 | bool beenonline; /* CPU online at least once. */ |
178 | bool preemptable; /* Preemptable RCU? */ | 244 | bool preemptible; /* Preemptible RCU? */ |
179 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | 245 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
180 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | 246 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
181 | 247 | ||
@@ -254,7 +320,6 @@ struct rcu_data { | |||
254 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 320 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
255 | 321 | ||
256 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | 322 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ |
257 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
258 | 323 | ||
259 | #ifdef CONFIG_PROVE_RCU | 324 | #ifdef CONFIG_PROVE_RCU |
260 | #define RCU_STALL_DELAY_DELTA (5 * HZ) | 325 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
@@ -272,13 +337,6 @@ struct rcu_data { | |||
272 | /* scheduling clock irq */ | 337 | /* scheduling clock irq */ |
273 | /* before ratting on them. */ | 338 | /* before ratting on them. */ |
274 | 339 | ||
275 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE | ||
276 | #define RCU_CPU_STALL_SUPPRESS_INIT 0 | ||
277 | #else | ||
278 | #define RCU_CPU_STALL_SUPPRESS_INIT 1 | ||
279 | #endif | ||
280 | |||
281 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
282 | 340 | ||
283 | /* | 341 | /* |
284 | * RCU global state, including node hierarchy. This hierarchy is | 342 | * RCU global state, including node hierarchy. This hierarchy is |
@@ -325,12 +383,12 @@ struct rcu_state { | |||
325 | /* due to lock unavailable. */ | 383 | /* due to lock unavailable. */ |
326 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | 384 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ |
327 | /* due to no GP active. */ | 385 | /* due to no GP active. */ |
328 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
329 | unsigned long gp_start; /* Time at which GP started, */ | 386 | unsigned long gp_start; /* Time at which GP started, */ |
330 | /* but in jiffies. */ | 387 | /* but in jiffies. */ |
331 | unsigned long jiffies_stall; /* Time at which to check */ | 388 | unsigned long jiffies_stall; /* Time at which to check */ |
332 | /* for CPU stalls. */ | 389 | /* for CPU stalls. */ |
333 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 390 | unsigned long gp_max; /* Maximum GP duration in */ |
391 | /* jiffies. */ | ||
334 | char *name; /* Name of structure. */ | 392 | char *name; /* Name of structure. */ |
335 | }; | 393 | }; |
336 | 394 | ||
@@ -361,16 +419,14 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | |||
361 | static void rcu_bootup_announce(void); | 419 | static void rcu_bootup_announce(void); |
362 | long rcu_batches_completed(void); | 420 | long rcu_batches_completed(void); |
363 | static void rcu_preempt_note_context_switch(int cpu); | 421 | static void rcu_preempt_note_context_switch(int cpu); |
364 | static int rcu_preempted_readers(struct rcu_node *rnp); | 422 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
365 | #ifdef CONFIG_HOTPLUG_CPU | 423 | #ifdef CONFIG_HOTPLUG_CPU |
366 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 424 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
367 | unsigned long flags); | 425 | unsigned long flags); |
368 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 426 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
369 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
370 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 427 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
371 | static void rcu_print_task_stall(struct rcu_node *rnp); | 428 | static void rcu_print_task_stall(struct rcu_node *rnp); |
372 | static void rcu_preempt_stall_reset(void); | 429 | static void rcu_preempt_stall_reset(void); |
373 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
374 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 430 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
375 | #ifdef CONFIG_HOTPLUG_CPU | 431 | #ifdef CONFIG_HOTPLUG_CPU |
376 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | 432 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
@@ -390,5 +446,13 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | |||
390 | static void rcu_preempt_send_cbs_to_online(void); | 446 | static void rcu_preempt_send_cbs_to_online(void); |
391 | static void __init __rcu_init_preempt(void); | 447 | static void __init __rcu_init_preempt(void); |
392 | static void rcu_needs_cpu_flush(void); | 448 | static void rcu_needs_cpu_flush(void); |
449 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp); | ||
450 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | ||
451 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | ||
452 | cpumask_var_t cm); | ||
453 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | ||
454 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | ||
455 | struct rcu_node *rnp, | ||
456 | int rnp_index); | ||
393 | 457 | ||
394 | #endif /* #ifndef RCU_TREE_NONCORE */ | 458 | #endif /* #ifndef RCU_TREE_NONCORE */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index a3638710dc67..3f6559a5f5cd 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) |
3 | * Internal non-public definitions that provide either classic | 3 | * Internal non-public definitions that provide either classic |
4 | * or preemptable semantics. | 4 | * or preemptible semantics. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -54,10 +54,6 @@ static void __init rcu_bootup_announce_oddness(void) | |||
54 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | 54 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE |
55 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | 55 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); |
56 | #endif | 56 | #endif |
57 | #ifndef CONFIG_RCU_CPU_STALL_DETECTOR | ||
58 | printk(KERN_INFO | ||
59 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | ||
60 | #endif | ||
61 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) | 57 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) |
62 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | 58 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); |
63 | #endif | 59 | #endif |
@@ -70,6 +66,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
70 | 66 | ||
71 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 67 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); |
72 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 68 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
69 | static struct rcu_state *rcu_state = &rcu_preempt_state; | ||
73 | 70 | ||
74 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 71 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
75 | 72 | ||
@@ -78,7 +75,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp); | |||
78 | */ | 75 | */ |
79 | static void __init rcu_bootup_announce(void) | 76 | static void __init rcu_bootup_announce(void) |
80 | { | 77 | { |
81 | printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); | 78 | printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n"); |
82 | rcu_bootup_announce_oddness(); | 79 | rcu_bootup_announce_oddness(); |
83 | } | 80 | } |
84 | 81 | ||
@@ -111,7 +108,7 @@ void rcu_force_quiescent_state(void) | |||
111 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 108 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
112 | 109 | ||
113 | /* | 110 | /* |
114 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | 111 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
115 | * that this just means that the task currently running on the CPU is | 112 | * that this just means that the task currently running on the CPU is |
116 | * not in a quiescent state. There might be any number of tasks blocked | 113 | * not in a quiescent state. There might be any number of tasks blocked |
117 | * while in an RCU read-side critical section. | 114 | * while in an RCU read-side critical section. |
@@ -134,12 +131,12 @@ static void rcu_preempt_qs(int cpu) | |||
134 | * We have entered the scheduler, and the current task might soon be | 131 | * We have entered the scheduler, and the current task might soon be |
135 | * context-switched away from. If this task is in an RCU read-side | 132 | * context-switched away from. If this task is in an RCU read-side |
136 | * critical section, we will no longer be able to rely on the CPU to | 133 | * critical section, we will no longer be able to rely on the CPU to |
137 | * record that fact, so we enqueue the task on the appropriate entry | 134 | * record that fact, so we enqueue the task on the blkd_tasks list. |
138 | * of the blocked_tasks[] array. The task will dequeue itself when | 135 | * The task will dequeue itself when it exits the outermost enclosing |
139 | * it exits the outermost enclosing RCU read-side critical section. | 136 | * RCU read-side critical section. Therefore, the current grace period |
140 | * Therefore, the current grace period cannot be permitted to complete | 137 | * cannot be permitted to complete until the blkd_tasks list entries |
141 | * until the blocked_tasks[] entry indexed by the low-order bit of | 138 | * predating the current grace period drain, in other words, until |
142 | * rnp->gpnum empties. | 139 | * rnp->gp_tasks becomes NULL. |
143 | * | 140 | * |
144 | * Caller must disable preemption. | 141 | * Caller must disable preemption. |
145 | */ | 142 | */ |
@@ -147,7 +144,6 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
147 | { | 144 | { |
148 | struct task_struct *t = current; | 145 | struct task_struct *t = current; |
149 | unsigned long flags; | 146 | unsigned long flags; |
150 | int phase; | ||
151 | struct rcu_data *rdp; | 147 | struct rcu_data *rdp; |
152 | struct rcu_node *rnp; | 148 | struct rcu_node *rnp; |
153 | 149 | ||
@@ -169,15 +165,30 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
169 | * (i.e., this CPU has not yet passed through a quiescent | 165 | * (i.e., this CPU has not yet passed through a quiescent |
170 | * state for the current grace period), then as long | 166 | * state for the current grace period), then as long |
171 | * as that task remains queued, the current grace period | 167 | * as that task remains queued, the current grace period |
172 | * cannot end. | 168 | * cannot end. Note that there is some uncertainty as |
169 | * to exactly when the current grace period started. | ||
170 | * We take a conservative approach, which can result | ||
171 | * in unnecessarily waiting on tasks that started very | ||
172 | * slightly after the current grace period began. C'est | ||
173 | * la vie!!! | ||
173 | * | 174 | * |
174 | * But first, note that the current CPU must still be | 175 | * But first, note that the current CPU must still be |
175 | * on line! | 176 | * on line! |
176 | */ | 177 | */ |
177 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); | 178 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
178 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); | 179 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
179 | phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; | 180 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { |
180 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); | 181 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); |
182 | rnp->gp_tasks = &t->rcu_node_entry; | ||
183 | #ifdef CONFIG_RCU_BOOST | ||
184 | if (rnp->boost_tasks != NULL) | ||
185 | rnp->boost_tasks = rnp->gp_tasks; | ||
186 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
187 | } else { | ||
188 | list_add(&t->rcu_node_entry, &rnp->blkd_tasks); | ||
189 | if (rnp->qsmask & rdp->grpmask) | ||
190 | rnp->gp_tasks = &t->rcu_node_entry; | ||
191 | } | ||
181 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 192 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
182 | } | 193 | } |
183 | 194 | ||
@@ -196,7 +207,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
196 | } | 207 | } |
197 | 208 | ||
198 | /* | 209 | /* |
199 | * Tree-preemptable RCU implementation for rcu_read_lock(). | 210 | * Tree-preemptible RCU implementation for rcu_read_lock(). |
200 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | 211 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
201 | * if we block. | 212 | * if we block. |
202 | */ | 213 | */ |
@@ -212,12 +223,9 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock); | |||
212 | * for the specified rcu_node structure. If the caller needs a reliable | 223 | * for the specified rcu_node structure. If the caller needs a reliable |
213 | * answer, it must hold the rcu_node's ->lock. | 224 | * answer, it must hold the rcu_node's ->lock. |
214 | */ | 225 | */ |
215 | static int rcu_preempted_readers(struct rcu_node *rnp) | 226 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
216 | { | 227 | { |
217 | int phase = rnp->gpnum & 0x1; | 228 | return rnp->gp_tasks != NULL; |
218 | |||
219 | return !list_empty(&rnp->blocked_tasks[phase]) || | ||
220 | !list_empty(&rnp->blocked_tasks[phase + 2]); | ||
221 | } | 229 | } |
222 | 230 | ||
223 | /* | 231 | /* |
@@ -233,7 +241,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | |||
233 | unsigned long mask; | 241 | unsigned long mask; |
234 | struct rcu_node *rnp_p; | 242 | struct rcu_node *rnp_p; |
235 | 243 | ||
236 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | 244 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
237 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 245 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
238 | return; /* Still need more quiescent states! */ | 246 | return; /* Still need more quiescent states! */ |
239 | } | 247 | } |
@@ -257,6 +265,21 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | |||
257 | } | 265 | } |
258 | 266 | ||
259 | /* | 267 | /* |
268 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | ||
269 | * returning NULL if at the end of the list. | ||
270 | */ | ||
271 | static struct list_head *rcu_next_node_entry(struct task_struct *t, | ||
272 | struct rcu_node *rnp) | ||
273 | { | ||
274 | struct list_head *np; | ||
275 | |||
276 | np = t->rcu_node_entry.next; | ||
277 | if (np == &rnp->blkd_tasks) | ||
278 | np = NULL; | ||
279 | return np; | ||
280 | } | ||
281 | |||
282 | /* | ||
260 | * Handle special cases during rcu_read_unlock(), such as needing to | 283 | * Handle special cases during rcu_read_unlock(), such as needing to |
261 | * notify RCU core processing or task having blocked during the RCU | 284 | * notify RCU core processing or task having blocked during the RCU |
262 | * read-side critical section. | 285 | * read-side critical section. |
@@ -266,6 +289,7 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
266 | int empty; | 289 | int empty; |
267 | int empty_exp; | 290 | int empty_exp; |
268 | unsigned long flags; | 291 | unsigned long flags; |
292 | struct list_head *np; | ||
269 | struct rcu_node *rnp; | 293 | struct rcu_node *rnp; |
270 | int special; | 294 | int special; |
271 | 295 | ||
@@ -306,10 +330,19 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
306 | break; | 330 | break; |
307 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 331 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
308 | } | 332 | } |
309 | empty = !rcu_preempted_readers(rnp); | 333 | empty = !rcu_preempt_blocked_readers_cgp(rnp); |
310 | empty_exp = !rcu_preempted_readers_exp(rnp); | 334 | empty_exp = !rcu_preempted_readers_exp(rnp); |
311 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | 335 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ |
336 | np = rcu_next_node_entry(t, rnp); | ||
312 | list_del_init(&t->rcu_node_entry); | 337 | list_del_init(&t->rcu_node_entry); |
338 | if (&t->rcu_node_entry == rnp->gp_tasks) | ||
339 | rnp->gp_tasks = np; | ||
340 | if (&t->rcu_node_entry == rnp->exp_tasks) | ||
341 | rnp->exp_tasks = np; | ||
342 | #ifdef CONFIG_RCU_BOOST | ||
343 | if (&t->rcu_node_entry == rnp->boost_tasks) | ||
344 | rnp->boost_tasks = np; | ||
345 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
313 | t->rcu_blocked_node = NULL; | 346 | t->rcu_blocked_node = NULL; |
314 | 347 | ||
315 | /* | 348 | /* |
@@ -322,6 +355,15 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
322 | else | 355 | else |
323 | rcu_report_unblock_qs_rnp(rnp, flags); | 356 | rcu_report_unblock_qs_rnp(rnp, flags); |
324 | 357 | ||
358 | #ifdef CONFIG_RCU_BOOST | ||
359 | /* Unboost if we were boosted. */ | ||
360 | if (special & RCU_READ_UNLOCK_BOOSTED) { | ||
361 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; | ||
362 | rt_mutex_unlock(t->rcu_boost_mutex); | ||
363 | t->rcu_boost_mutex = NULL; | ||
364 | } | ||
365 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
366 | |||
325 | /* | 367 | /* |
326 | * If this was the last task on the expedited lists, | 368 | * If this was the last task on the expedited lists, |
327 | * then we need to report up the rcu_node hierarchy. | 369 | * then we need to report up the rcu_node hierarchy. |
@@ -334,7 +376,7 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
334 | } | 376 | } |
335 | 377 | ||
336 | /* | 378 | /* |
337 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | 379 | * Tree-preemptible RCU implementation for rcu_read_unlock(). |
338 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | 380 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost |
339 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | 381 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then |
340 | * invoke rcu_read_unlock_special() to clean up after a context switch | 382 | * invoke rcu_read_unlock_special() to clean up after a context switch |
@@ -356,8 +398,6 @@ void __rcu_read_unlock(void) | |||
356 | } | 398 | } |
357 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | 399 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
358 | 400 | ||
359 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
360 | |||
361 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE | 401 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
362 | 402 | ||
363 | /* | 403 | /* |
@@ -367,18 +407,16 @@ EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |||
367 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | 407 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
368 | { | 408 | { |
369 | unsigned long flags; | 409 | unsigned long flags; |
370 | struct list_head *lp; | ||
371 | int phase; | ||
372 | struct task_struct *t; | 410 | struct task_struct *t; |
373 | 411 | ||
374 | if (rcu_preempted_readers(rnp)) { | 412 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
375 | raw_spin_lock_irqsave(&rnp->lock, flags); | 413 | return; |
376 | phase = rnp->gpnum & 0x1; | 414 | raw_spin_lock_irqsave(&rnp->lock, flags); |
377 | lp = &rnp->blocked_tasks[phase]; | 415 | t = list_entry(rnp->gp_tasks, |
378 | list_for_each_entry(t, lp, rcu_node_entry) | 416 | struct task_struct, rcu_node_entry); |
379 | sched_show_task(t); | 417 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) |
380 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 418 | sched_show_task(t); |
381 | } | 419 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
382 | } | 420 | } |
383 | 421 | ||
384 | /* | 422 | /* |
@@ -408,16 +446,14 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |||
408 | */ | 446 | */ |
409 | static void rcu_print_task_stall(struct rcu_node *rnp) | 447 | static void rcu_print_task_stall(struct rcu_node *rnp) |
410 | { | 448 | { |
411 | struct list_head *lp; | ||
412 | int phase; | ||
413 | struct task_struct *t; | 449 | struct task_struct *t; |
414 | 450 | ||
415 | if (rcu_preempted_readers(rnp)) { | 451 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
416 | phase = rnp->gpnum & 0x1; | 452 | return; |
417 | lp = &rnp->blocked_tasks[phase]; | 453 | t = list_entry(rnp->gp_tasks, |
418 | list_for_each_entry(t, lp, rcu_node_entry) | 454 | struct task_struct, rcu_node_entry); |
419 | printk(" P%d", t->pid); | 455 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) |
420 | } | 456 | printk(" P%d", t->pid); |
421 | } | 457 | } |
422 | 458 | ||
423 | /* | 459 | /* |
@@ -430,18 +466,21 @@ static void rcu_preempt_stall_reset(void) | |||
430 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | 466 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; |
431 | } | 467 | } |
432 | 468 | ||
433 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
434 | |||
435 | /* | 469 | /* |
436 | * Check that the list of blocked tasks for the newly completed grace | 470 | * Check that the list of blocked tasks for the newly completed grace |
437 | * period is in fact empty. It is a serious bug to complete a grace | 471 | * period is in fact empty. It is a serious bug to complete a grace |
438 | * period that still has RCU readers blocked! This function must be | 472 | * period that still has RCU readers blocked! This function must be |
439 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | 473 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock |
440 | * must be held by the caller. | 474 | * must be held by the caller. |
475 | * | ||
476 | * Also, if there are blocked tasks on the list, they automatically | ||
477 | * block the newly created grace period, so set up ->gp_tasks accordingly. | ||
441 | */ | 478 | */ |
442 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 479 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
443 | { | 480 | { |
444 | WARN_ON_ONCE(rcu_preempted_readers(rnp)); | 481 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); |
482 | if (!list_empty(&rnp->blkd_tasks)) | ||
483 | rnp->gp_tasks = rnp->blkd_tasks.next; | ||
445 | WARN_ON_ONCE(rnp->qsmask); | 484 | WARN_ON_ONCE(rnp->qsmask); |
446 | } | 485 | } |
447 | 486 | ||
@@ -465,50 +504,68 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
465 | struct rcu_node *rnp, | 504 | struct rcu_node *rnp, |
466 | struct rcu_data *rdp) | 505 | struct rcu_data *rdp) |
467 | { | 506 | { |
468 | int i; | ||
469 | struct list_head *lp; | 507 | struct list_head *lp; |
470 | struct list_head *lp_root; | 508 | struct list_head *lp_root; |
471 | int retval = 0; | 509 | int retval = 0; |
472 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 510 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
473 | struct task_struct *tp; | 511 | struct task_struct *t; |
474 | 512 | ||
475 | if (rnp == rnp_root) { | 513 | if (rnp == rnp_root) { |
476 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 514 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
477 | return 0; /* Shouldn't happen: at least one CPU online. */ | 515 | return 0; /* Shouldn't happen: at least one CPU online. */ |
478 | } | 516 | } |
479 | WARN_ON_ONCE(rnp != rdp->mynode && | 517 | |
480 | (!list_empty(&rnp->blocked_tasks[0]) || | 518 | /* If we are on an internal node, complain bitterly. */ |
481 | !list_empty(&rnp->blocked_tasks[1]) || | 519 | WARN_ON_ONCE(rnp != rdp->mynode); |
482 | !list_empty(&rnp->blocked_tasks[2]) || | ||
483 | !list_empty(&rnp->blocked_tasks[3]))); | ||
484 | 520 | ||
485 | /* | 521 | /* |
486 | * Move tasks up to root rcu_node. Rely on the fact that the | 522 | * Move tasks up to root rcu_node. Don't try to get fancy for |
487 | * root rcu_node can be at most one ahead of the rest of the | 523 | * this corner-case operation -- just put this node's tasks |
488 | * rcu_nodes in terms of gp_num value. This fact allows us to | 524 | * at the head of the root node's list, and update the root node's |
489 | * move the blocked_tasks[] array directly, element by element. | 525 | * ->gp_tasks and ->exp_tasks pointers to those of this node's, |
526 | * if non-NULL. This might result in waiting for more tasks than | ||
527 | * absolutely necessary, but this is a good performance/complexity | ||
528 | * tradeoff. | ||
490 | */ | 529 | */ |
491 | if (rcu_preempted_readers(rnp)) | 530 | if (rcu_preempt_blocked_readers_cgp(rnp)) |
492 | retval |= RCU_OFL_TASKS_NORM_GP; | 531 | retval |= RCU_OFL_TASKS_NORM_GP; |
493 | if (rcu_preempted_readers_exp(rnp)) | 532 | if (rcu_preempted_readers_exp(rnp)) |
494 | retval |= RCU_OFL_TASKS_EXP_GP; | 533 | retval |= RCU_OFL_TASKS_EXP_GP; |
495 | for (i = 0; i < 4; i++) { | 534 | lp = &rnp->blkd_tasks; |
496 | lp = &rnp->blocked_tasks[i]; | 535 | lp_root = &rnp_root->blkd_tasks; |
497 | lp_root = &rnp_root->blocked_tasks[i]; | 536 | while (!list_empty(lp)) { |
498 | while (!list_empty(lp)) { | 537 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); |
499 | tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); | 538 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ |
500 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | 539 | list_del(&t->rcu_node_entry); |
501 | list_del(&tp->rcu_node_entry); | 540 | t->rcu_blocked_node = rnp_root; |
502 | tp->rcu_blocked_node = rnp_root; | 541 | list_add(&t->rcu_node_entry, lp_root); |
503 | list_add(&tp->rcu_node_entry, lp_root); | 542 | if (&t->rcu_node_entry == rnp->gp_tasks) |
504 | raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 543 | rnp_root->gp_tasks = rnp->gp_tasks; |
505 | } | 544 | if (&t->rcu_node_entry == rnp->exp_tasks) |
545 | rnp_root->exp_tasks = rnp->exp_tasks; | ||
546 | #ifdef CONFIG_RCU_BOOST | ||
547 | if (&t->rcu_node_entry == rnp->boost_tasks) | ||
548 | rnp_root->boost_tasks = rnp->boost_tasks; | ||
549 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
550 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | ||
506 | } | 551 | } |
552 | |||
553 | #ifdef CONFIG_RCU_BOOST | ||
554 | /* In case root is being boosted and leaf is not. */ | ||
555 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | ||
556 | if (rnp_root->boost_tasks != NULL && | ||
557 | rnp_root->boost_tasks != rnp_root->gp_tasks) | ||
558 | rnp_root->boost_tasks = rnp_root->gp_tasks; | ||
559 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | ||
560 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
561 | |||
562 | rnp->gp_tasks = NULL; | ||
563 | rnp->exp_tasks = NULL; | ||
507 | return retval; | 564 | return retval; |
508 | } | 565 | } |
509 | 566 | ||
510 | /* | 567 | /* |
511 | * Do CPU-offline processing for preemptable RCU. | 568 | * Do CPU-offline processing for preemptible RCU. |
512 | */ | 569 | */ |
513 | static void rcu_preempt_offline_cpu(int cpu) | 570 | static void rcu_preempt_offline_cpu(int cpu) |
514 | { | 571 | { |
@@ -537,7 +594,7 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
537 | } | 594 | } |
538 | 595 | ||
539 | /* | 596 | /* |
540 | * Process callbacks for preemptable RCU. | 597 | * Process callbacks for preemptible RCU. |
541 | */ | 598 | */ |
542 | static void rcu_preempt_process_callbacks(void) | 599 | static void rcu_preempt_process_callbacks(void) |
543 | { | 600 | { |
@@ -546,7 +603,7 @@ static void rcu_preempt_process_callbacks(void) | |||
546 | } | 603 | } |
547 | 604 | ||
548 | /* | 605 | /* |
549 | * Queue a preemptable-RCU callback for invocation after a grace period. | 606 | * Queue a preemptible-RCU callback for invocation after a grace period. |
550 | */ | 607 | */ |
551 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 608 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
552 | { | 609 | { |
@@ -594,8 +651,7 @@ static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |||
594 | */ | 651 | */ |
595 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | 652 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) |
596 | { | 653 | { |
597 | return !list_empty(&rnp->blocked_tasks[2]) || | 654 | return rnp->exp_tasks != NULL; |
598 | !list_empty(&rnp->blocked_tasks[3]); | ||
599 | } | 655 | } |
600 | 656 | ||
601 | /* | 657 | /* |
@@ -655,13 +711,17 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |||
655 | static void | 711 | static void |
656 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | 712 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) |
657 | { | 713 | { |
658 | int must_wait; | 714 | unsigned long flags; |
715 | int must_wait = 0; | ||
659 | 716 | ||
660 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 717 | raw_spin_lock_irqsave(&rnp->lock, flags); |
661 | list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); | 718 | if (list_empty(&rnp->blkd_tasks)) |
662 | list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); | 719 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
663 | must_wait = rcu_preempted_readers_exp(rnp); | 720 | else { |
664 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | 721 | rnp->exp_tasks = rnp->blkd_tasks.next; |
722 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ | ||
723 | must_wait = 1; | ||
724 | } | ||
665 | if (!must_wait) | 725 | if (!must_wait) |
666 | rcu_report_exp_rnp(rsp, rnp); | 726 | rcu_report_exp_rnp(rsp, rnp); |
667 | } | 727 | } |
@@ -669,9 +729,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
669 | /* | 729 | /* |
670 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | 730 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea |
671 | * is to invoke synchronize_sched_expedited() to push all the tasks to | 731 | * is to invoke synchronize_sched_expedited() to push all the tasks to |
672 | * the ->blocked_tasks[] lists, move all entries from the first set of | 732 | * the ->blkd_tasks lists and wait for this list to drain. |
673 | * ->blocked_tasks[] lists to the second set, and finally wait for this | ||
674 | * second set to drain. | ||
675 | */ | 733 | */ |
676 | void synchronize_rcu_expedited(void) | 734 | void synchronize_rcu_expedited(void) |
677 | { | 735 | { |
@@ -703,7 +761,7 @@ void synchronize_rcu_expedited(void) | |||
703 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | 761 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) |
704 | goto unlock_mb_ret; /* Others did our work for us. */ | 762 | goto unlock_mb_ret; /* Others did our work for us. */ |
705 | 763 | ||
706 | /* force all RCU readers onto blocked_tasks[]. */ | 764 | /* force all RCU readers onto ->blkd_tasks lists. */ |
707 | synchronize_sched_expedited(); | 765 | synchronize_sched_expedited(); |
708 | 766 | ||
709 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 767 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
@@ -715,7 +773,7 @@ void synchronize_rcu_expedited(void) | |||
715 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 773 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
716 | } | 774 | } |
717 | 775 | ||
718 | /* Snapshot current state of ->blocked_tasks[] lists. */ | 776 | /* Snapshot current state of ->blkd_tasks lists. */ |
719 | rcu_for_each_leaf_node(rsp, rnp) | 777 | rcu_for_each_leaf_node(rsp, rnp) |
720 | sync_rcu_preempt_exp_init(rsp, rnp); | 778 | sync_rcu_preempt_exp_init(rsp, rnp); |
721 | if (NUM_RCU_NODES > 1) | 779 | if (NUM_RCU_NODES > 1) |
@@ -723,7 +781,7 @@ void synchronize_rcu_expedited(void) | |||
723 | 781 | ||
724 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 782 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
725 | 783 | ||
726 | /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ | 784 | /* Wait for snapshotted ->blkd_tasks lists to drain. */ |
727 | rnp = rcu_get_root(rsp); | 785 | rnp = rcu_get_root(rsp); |
728 | wait_event(sync_rcu_preempt_exp_wq, | 786 | wait_event(sync_rcu_preempt_exp_wq, |
729 | sync_rcu_preempt_exp_done(rnp)); | 787 | sync_rcu_preempt_exp_done(rnp)); |
@@ -739,7 +797,7 @@ mb_ret: | |||
739 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 797 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
740 | 798 | ||
741 | /* | 799 | /* |
742 | * Check to see if there is any immediate preemptable-RCU-related work | 800 | * Check to see if there is any immediate preemptible-RCU-related work |
743 | * to be done. | 801 | * to be done. |
744 | */ | 802 | */ |
745 | static int rcu_preempt_pending(int cpu) | 803 | static int rcu_preempt_pending(int cpu) |
@@ -749,7 +807,7 @@ static int rcu_preempt_pending(int cpu) | |||
749 | } | 807 | } |
750 | 808 | ||
751 | /* | 809 | /* |
752 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | 810 | * Does preemptible RCU need the CPU to stay out of dynticks mode? |
753 | */ | 811 | */ |
754 | static int rcu_preempt_needs_cpu(int cpu) | 812 | static int rcu_preempt_needs_cpu(int cpu) |
755 | { | 813 | { |
@@ -766,7 +824,7 @@ void rcu_barrier(void) | |||
766 | EXPORT_SYMBOL_GPL(rcu_barrier); | 824 | EXPORT_SYMBOL_GPL(rcu_barrier); |
767 | 825 | ||
768 | /* | 826 | /* |
769 | * Initialize preemptable RCU's per-CPU data. | 827 | * Initialize preemptible RCU's per-CPU data. |
770 | */ | 828 | */ |
771 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | 829 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) |
772 | { | 830 | { |
@@ -774,7 +832,7 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |||
774 | } | 832 | } |
775 | 833 | ||
776 | /* | 834 | /* |
777 | * Move preemptable RCU's callbacks from dying CPU to other online CPU. | 835 | * Move preemptible RCU's callbacks from dying CPU to other online CPU. |
778 | */ | 836 | */ |
779 | static void rcu_preempt_send_cbs_to_online(void) | 837 | static void rcu_preempt_send_cbs_to_online(void) |
780 | { | 838 | { |
@@ -782,7 +840,7 @@ static void rcu_preempt_send_cbs_to_online(void) | |||
782 | } | 840 | } |
783 | 841 | ||
784 | /* | 842 | /* |
785 | * Initialize preemptable RCU's state structures. | 843 | * Initialize preemptible RCU's state structures. |
786 | */ | 844 | */ |
787 | static void __init __rcu_init_preempt(void) | 845 | static void __init __rcu_init_preempt(void) |
788 | { | 846 | { |
@@ -790,7 +848,7 @@ static void __init __rcu_init_preempt(void) | |||
790 | } | 848 | } |
791 | 849 | ||
792 | /* | 850 | /* |
793 | * Check for a task exiting while in a preemptable-RCU read-side | 851 | * Check for a task exiting while in a preemptible-RCU read-side |
794 | * critical section, clean up if so. No need to issue warnings, | 852 | * critical section, clean up if so. No need to issue warnings, |
795 | * as debug_check_no_locks_held() already does this if lockdep | 853 | * as debug_check_no_locks_held() already does this if lockdep |
796 | * is enabled. | 854 | * is enabled. |
@@ -802,11 +860,13 @@ void exit_rcu(void) | |||
802 | if (t->rcu_read_lock_nesting == 0) | 860 | if (t->rcu_read_lock_nesting == 0) |
803 | return; | 861 | return; |
804 | t->rcu_read_lock_nesting = 1; | 862 | t->rcu_read_lock_nesting = 1; |
805 | rcu_read_unlock(); | 863 | __rcu_read_unlock(); |
806 | } | 864 | } |
807 | 865 | ||
808 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 866 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
809 | 867 | ||
868 | static struct rcu_state *rcu_state = &rcu_sched_state; | ||
869 | |||
810 | /* | 870 | /* |
811 | * Tell them what RCU they are running. | 871 | * Tell them what RCU they are running. |
812 | */ | 872 | */ |
@@ -836,7 +896,7 @@ void rcu_force_quiescent_state(void) | |||
836 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 896 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
837 | 897 | ||
838 | /* | 898 | /* |
839 | * Because preemptable RCU does not exist, we never have to check for | 899 | * Because preemptible RCU does not exist, we never have to check for |
840 | * CPUs being in quiescent states. | 900 | * CPUs being in quiescent states. |
841 | */ | 901 | */ |
842 | static void rcu_preempt_note_context_switch(int cpu) | 902 | static void rcu_preempt_note_context_switch(int cpu) |
@@ -844,10 +904,10 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
844 | } | 904 | } |
845 | 905 | ||
846 | /* | 906 | /* |
847 | * Because preemptable RCU does not exist, there are never any preempted | 907 | * Because preemptible RCU does not exist, there are never any preempted |
848 | * RCU readers. | 908 | * RCU readers. |
849 | */ | 909 | */ |
850 | static int rcu_preempted_readers(struct rcu_node *rnp) | 910 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
851 | { | 911 | { |
852 | return 0; | 912 | return 0; |
853 | } | 913 | } |
@@ -862,10 +922,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | |||
862 | 922 | ||
863 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 923 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
864 | 924 | ||
865 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
866 | |||
867 | /* | 925 | /* |
868 | * Because preemptable RCU does not exist, we never have to check for | 926 | * Because preemptible RCU does not exist, we never have to check for |
869 | * tasks blocked within RCU read-side critical sections. | 927 | * tasks blocked within RCU read-side critical sections. |
870 | */ | 928 | */ |
871 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | 929 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) |
@@ -873,7 +931,7 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |||
873 | } | 931 | } |
874 | 932 | ||
875 | /* | 933 | /* |
876 | * Because preemptable RCU does not exist, we never have to check for | 934 | * Because preemptible RCU does not exist, we never have to check for |
877 | * tasks blocked within RCU read-side critical sections. | 935 | * tasks blocked within RCU read-side critical sections. |
878 | */ | 936 | */ |
879 | static void rcu_print_task_stall(struct rcu_node *rnp) | 937 | static void rcu_print_task_stall(struct rcu_node *rnp) |
@@ -888,10 +946,8 @@ static void rcu_preempt_stall_reset(void) | |||
888 | { | 946 | { |
889 | } | 947 | } |
890 | 948 | ||
891 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
892 | |||
893 | /* | 949 | /* |
894 | * Because there is no preemptable RCU, there can be no readers blocked, | 950 | * Because there is no preemptible RCU, there can be no readers blocked, |
895 | * so there is no need to check for blocked tasks. So check only for | 951 | * so there is no need to check for blocked tasks. So check only for |
896 | * bogus qsmask values. | 952 | * bogus qsmask values. |
897 | */ | 953 | */ |
@@ -903,7 +959,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
903 | #ifdef CONFIG_HOTPLUG_CPU | 959 | #ifdef CONFIG_HOTPLUG_CPU |
904 | 960 | ||
905 | /* | 961 | /* |
906 | * Because preemptable RCU does not exist, it never needs to migrate | 962 | * Because preemptible RCU does not exist, it never needs to migrate |
907 | * tasks that were blocked within RCU read-side critical sections, and | 963 | * tasks that were blocked within RCU read-side critical sections, and |
908 | * such non-existent tasks cannot possibly have been blocking the current | 964 | * such non-existent tasks cannot possibly have been blocking the current |
909 | * grace period. | 965 | * grace period. |
@@ -916,7 +972,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
916 | } | 972 | } |
917 | 973 | ||
918 | /* | 974 | /* |
919 | * Because preemptable RCU does not exist, it never needs CPU-offline | 975 | * Because preemptible RCU does not exist, it never needs CPU-offline |
920 | * processing. | 976 | * processing. |
921 | */ | 977 | */ |
922 | static void rcu_preempt_offline_cpu(int cpu) | 978 | static void rcu_preempt_offline_cpu(int cpu) |
@@ -926,7 +982,7 @@ static void rcu_preempt_offline_cpu(int cpu) | |||
926 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 982 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
927 | 983 | ||
928 | /* | 984 | /* |
929 | * Because preemptable RCU does not exist, it never has any callbacks | 985 | * Because preemptible RCU does not exist, it never has any callbacks |
930 | * to check. | 986 | * to check. |
931 | */ | 987 | */ |
932 | static void rcu_preempt_check_callbacks(int cpu) | 988 | static void rcu_preempt_check_callbacks(int cpu) |
@@ -934,7 +990,7 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
934 | } | 990 | } |
935 | 991 | ||
936 | /* | 992 | /* |
937 | * Because preemptable RCU does not exist, it never has any callbacks | 993 | * Because preemptible RCU does not exist, it never has any callbacks |
938 | * to process. | 994 | * to process. |
939 | */ | 995 | */ |
940 | static void rcu_preempt_process_callbacks(void) | 996 | static void rcu_preempt_process_callbacks(void) |
@@ -943,7 +999,7 @@ static void rcu_preempt_process_callbacks(void) | |||
943 | 999 | ||
944 | /* | 1000 | /* |
945 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 1001 | * Wait for an rcu-preempt grace period, but make it happen quickly. |
946 | * But because preemptable RCU does not exist, map to rcu-sched. | 1002 | * But because preemptible RCU does not exist, map to rcu-sched. |
947 | */ | 1003 | */ |
948 | void synchronize_rcu_expedited(void) | 1004 | void synchronize_rcu_expedited(void) |
949 | { | 1005 | { |
@@ -954,7 +1010,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |||
954 | #ifdef CONFIG_HOTPLUG_CPU | 1010 | #ifdef CONFIG_HOTPLUG_CPU |
955 | 1011 | ||
956 | /* | 1012 | /* |
957 | * Because preemptable RCU does not exist, there is never any need to | 1013 | * Because preemptible RCU does not exist, there is never any need to |
958 | * report on tasks preempted in RCU read-side critical sections during | 1014 | * report on tasks preempted in RCU read-side critical sections during |
959 | * expedited RCU grace periods. | 1015 | * expedited RCU grace periods. |
960 | */ | 1016 | */ |
@@ -966,7 +1022,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |||
966 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 1022 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
967 | 1023 | ||
968 | /* | 1024 | /* |
969 | * Because preemptable RCU does not exist, it never has any work to do. | 1025 | * Because preemptible RCU does not exist, it never has any work to do. |
970 | */ | 1026 | */ |
971 | static int rcu_preempt_pending(int cpu) | 1027 | static int rcu_preempt_pending(int cpu) |
972 | { | 1028 | { |
@@ -974,7 +1030,7 @@ static int rcu_preempt_pending(int cpu) | |||
974 | } | 1030 | } |
975 | 1031 | ||
976 | /* | 1032 | /* |
977 | * Because preemptable RCU does not exist, it never needs any CPU. | 1033 | * Because preemptible RCU does not exist, it never needs any CPU. |
978 | */ | 1034 | */ |
979 | static int rcu_preempt_needs_cpu(int cpu) | 1035 | static int rcu_preempt_needs_cpu(int cpu) |
980 | { | 1036 | { |
@@ -982,7 +1038,7 @@ static int rcu_preempt_needs_cpu(int cpu) | |||
982 | } | 1038 | } |
983 | 1039 | ||
984 | /* | 1040 | /* |
985 | * Because preemptable RCU does not exist, rcu_barrier() is just | 1041 | * Because preemptible RCU does not exist, rcu_barrier() is just |
986 | * another name for rcu_barrier_sched(). | 1042 | * another name for rcu_barrier_sched(). |
987 | */ | 1043 | */ |
988 | void rcu_barrier(void) | 1044 | void rcu_barrier(void) |
@@ -992,7 +1048,7 @@ void rcu_barrier(void) | |||
992 | EXPORT_SYMBOL_GPL(rcu_barrier); | 1048 | EXPORT_SYMBOL_GPL(rcu_barrier); |
993 | 1049 | ||
994 | /* | 1050 | /* |
995 | * Because preemptable RCU does not exist, there is no per-CPU | 1051 | * Because preemptible RCU does not exist, there is no per-CPU |
996 | * data to initialize. | 1052 | * data to initialize. |
997 | */ | 1053 | */ |
998 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | 1054 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) |
@@ -1000,14 +1056,14 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |||
1000 | } | 1056 | } |
1001 | 1057 | ||
1002 | /* | 1058 | /* |
1003 | * Because there is no preemptable RCU, there are no callbacks to move. | 1059 | * Because there is no preemptible RCU, there are no callbacks to move. |
1004 | */ | 1060 | */ |
1005 | static void rcu_preempt_send_cbs_to_online(void) | 1061 | static void rcu_preempt_send_cbs_to_online(void) |
1006 | { | 1062 | { |
1007 | } | 1063 | } |
1008 | 1064 | ||
1009 | /* | 1065 | /* |
1010 | * Because preemptable RCU does not exist, it need not be initialized. | 1066 | * Because preemptible RCU does not exist, it need not be initialized. |
1011 | */ | 1067 | */ |
1012 | static void __init __rcu_init_preempt(void) | 1068 | static void __init __rcu_init_preempt(void) |
1013 | { | 1069 | { |
@@ -1015,6 +1071,276 @@ static void __init __rcu_init_preempt(void) | |||
1015 | 1071 | ||
1016 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1072 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1017 | 1073 | ||
1074 | #ifdef CONFIG_RCU_BOOST | ||
1075 | |||
1076 | #include "rtmutex_common.h" | ||
1077 | |||
1078 | #ifdef CONFIG_RCU_TRACE | ||
1079 | |||
1080 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | ||
1081 | { | ||
1082 | if (list_empty(&rnp->blkd_tasks)) | ||
1083 | rnp->n_balk_blkd_tasks++; | ||
1084 | else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) | ||
1085 | rnp->n_balk_exp_gp_tasks++; | ||
1086 | else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) | ||
1087 | rnp->n_balk_boost_tasks++; | ||
1088 | else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) | ||
1089 | rnp->n_balk_notblocked++; | ||
1090 | else if (rnp->gp_tasks != NULL && | ||
1091 | ULONG_CMP_LT(jiffies, rnp->boost_time)) | ||
1092 | rnp->n_balk_notyet++; | ||
1093 | else | ||
1094 | rnp->n_balk_nos++; | ||
1095 | } | ||
1096 | |||
1097 | #else /* #ifdef CONFIG_RCU_TRACE */ | ||
1098 | |||
1099 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | ||
1100 | { | ||
1101 | } | ||
1102 | |||
1103 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | ||
1104 | |||
1105 | /* | ||
1106 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks | ||
1107 | * or ->boost_tasks, advancing the pointer to the next task in the | ||
1108 | * ->blkd_tasks list. | ||
1109 | * | ||
1110 | * Note that irqs must be enabled: boosting the task can block. | ||
1111 | * Returns 1 if there are more tasks needing to be boosted. | ||
1112 | */ | ||
1113 | static int rcu_boost(struct rcu_node *rnp) | ||
1114 | { | ||
1115 | unsigned long flags; | ||
1116 | struct rt_mutex mtx; | ||
1117 | struct task_struct *t; | ||
1118 | struct list_head *tb; | ||
1119 | |||
1120 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) | ||
1121 | return 0; /* Nothing left to boost. */ | ||
1122 | |||
1123 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1124 | |||
1125 | /* | ||
1126 | * Recheck under the lock: all tasks in need of boosting | ||
1127 | * might exit their RCU read-side critical sections on their own. | ||
1128 | */ | ||
1129 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { | ||
1130 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1131 | return 0; | ||
1132 | } | ||
1133 | |||
1134 | /* | ||
1135 | * Preferentially boost tasks blocking expedited grace periods. | ||
1136 | * This cannot starve the normal grace periods because a second | ||
1137 | * expedited grace period must boost all blocked tasks, including | ||
1138 | * those blocking the pre-existing normal grace period. | ||
1139 | */ | ||
1140 | if (rnp->exp_tasks != NULL) { | ||
1141 | tb = rnp->exp_tasks; | ||
1142 | rnp->n_exp_boosts++; | ||
1143 | } else { | ||
1144 | tb = rnp->boost_tasks; | ||
1145 | rnp->n_normal_boosts++; | ||
1146 | } | ||
1147 | rnp->n_tasks_boosted++; | ||
1148 | |||
1149 | /* | ||
1150 | * We boost task t by manufacturing an rt_mutex that appears to | ||
1151 | * be held by task t. We leave a pointer to that rt_mutex where | ||
1152 | * task t can find it, and task t will release the mutex when it | ||
1153 | * exits its outermost RCU read-side critical section. Then | ||
1154 | * simply acquiring this artificial rt_mutex will boost task | ||
1155 | * t's priority. (Thanks to tglx for suggesting this approach!) | ||
1156 | * | ||
1157 | * Note that task t must acquire rnp->lock to remove itself from | ||
1158 | * the ->blkd_tasks list, which it will do from exit() if from | ||
1159 | * nowhere else. We therefore are guaranteed that task t will | ||
1160 | * stay around at least until we drop rnp->lock. Note that | ||
1161 | * rnp->lock also resolves races between our priority boosting | ||
1162 | * and task t's exiting its outermost RCU read-side critical | ||
1163 | * section. | ||
1164 | */ | ||
1165 | t = container_of(tb, struct task_struct, rcu_node_entry); | ||
1166 | rt_mutex_init_proxy_locked(&mtx, t); | ||
1167 | t->rcu_boost_mutex = &mtx; | ||
1168 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; | ||
1169 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1170 | rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ | ||
1171 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ | ||
1172 | |||
1173 | return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL; | ||
1174 | } | ||
1175 | |||
1176 | /* | ||
1177 | * Timer handler to initiate waking up of boost kthreads that | ||
1178 | * have yielded the CPU due to excessive numbers of tasks to | ||
1179 | * boost. We wake up the per-rcu_node kthread, which in turn | ||
1180 | * will wake up the booster kthread. | ||
1181 | */ | ||
1182 | static void rcu_boost_kthread_timer(unsigned long arg) | ||
1183 | { | ||
1184 | invoke_rcu_node_kthread((struct rcu_node *)arg); | ||
1185 | } | ||
1186 | |||
1187 | /* | ||
1188 | * Priority-boosting kthread. One per leaf rcu_node and one for the | ||
1189 | * root rcu_node. | ||
1190 | */ | ||
1191 | static int rcu_boost_kthread(void *arg) | ||
1192 | { | ||
1193 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1194 | int spincnt = 0; | ||
1195 | int more2boost; | ||
1196 | |||
1197 | for (;;) { | ||
1198 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; | ||
1199 | wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || | ||
1200 | rnp->exp_tasks); | ||
1201 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; | ||
1202 | more2boost = rcu_boost(rnp); | ||
1203 | if (more2boost) | ||
1204 | spincnt++; | ||
1205 | else | ||
1206 | spincnt = 0; | ||
1207 | if (spincnt > 10) { | ||
1208 | rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); | ||
1209 | spincnt = 0; | ||
1210 | } | ||
1211 | } | ||
1212 | /* NOTREACHED */ | ||
1213 | return 0; | ||
1214 | } | ||
1215 | |||
1216 | /* | ||
1217 | * Check to see if it is time to start boosting RCU readers that are | ||
1218 | * blocking the current grace period, and, if so, tell the per-rcu_node | ||
1219 | * kthread to start boosting them. If there is an expedited grace | ||
1220 | * period in progress, it is always time to boost. | ||
1221 | * | ||
1222 | * The caller must hold rnp->lock, which this function releases, | ||
1223 | * but irqs remain disabled. The ->boost_kthread_task is immortal, | ||
1224 | * so we don't need to worry about it going away. | ||
1225 | */ | ||
1226 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | ||
1227 | { | ||
1228 | struct task_struct *t; | ||
1229 | |||
1230 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { | ||
1231 | rnp->n_balk_exp_gp_tasks++; | ||
1232 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1233 | return; | ||
1234 | } | ||
1235 | if (rnp->exp_tasks != NULL || | ||
1236 | (rnp->gp_tasks != NULL && | ||
1237 | rnp->boost_tasks == NULL && | ||
1238 | rnp->qsmask == 0 && | ||
1239 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { | ||
1240 | if (rnp->exp_tasks == NULL) | ||
1241 | rnp->boost_tasks = rnp->gp_tasks; | ||
1242 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1243 | t = rnp->boost_kthread_task; | ||
1244 | if (t != NULL) | ||
1245 | wake_up_process(t); | ||
1246 | } else { | ||
1247 | rcu_initiate_boost_trace(rnp); | ||
1248 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1249 | } | ||
1250 | } | ||
1251 | |||
1252 | /* | ||
1253 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | ||
1254 | * held, so no one should be messing with the existence of the boost | ||
1255 | * kthread. | ||
1256 | */ | ||
1257 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | ||
1258 | cpumask_var_t cm) | ||
1259 | { | ||
1260 | struct task_struct *t; | ||
1261 | |||
1262 | t = rnp->boost_kthread_task; | ||
1263 | if (t != NULL) | ||
1264 | set_cpus_allowed_ptr(rnp->boost_kthread_task, cm); | ||
1265 | } | ||
1266 | |||
1267 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) | ||
1268 | |||
1269 | /* | ||
1270 | * Do priority-boost accounting for the start of a new grace period. | ||
1271 | */ | ||
1272 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | ||
1273 | { | ||
1274 | rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1278 | * Initialize the RCU-boost waitqueue. | ||
1279 | */ | ||
1280 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp) | ||
1281 | { | ||
1282 | init_waitqueue_head(&rnp->boost_wq); | ||
1283 | } | ||
1284 | |||
1285 | /* | ||
1286 | * Create an RCU-boost kthread for the specified node if one does not | ||
1287 | * already exist. We only create this kthread for preemptible RCU. | ||
1288 | * Returns zero if all is well, a negated errno otherwise. | ||
1289 | */ | ||
1290 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | ||
1291 | struct rcu_node *rnp, | ||
1292 | int rnp_index) | ||
1293 | { | ||
1294 | unsigned long flags; | ||
1295 | struct sched_param sp; | ||
1296 | struct task_struct *t; | ||
1297 | |||
1298 | if (&rcu_preempt_state != rsp) | ||
1299 | return 0; | ||
1300 | if (rnp->boost_kthread_task != NULL) | ||
1301 | return 0; | ||
1302 | t = kthread_create(rcu_boost_kthread, (void *)rnp, | ||
1303 | "rcub%d", rnp_index); | ||
1304 | if (IS_ERR(t)) | ||
1305 | return PTR_ERR(t); | ||
1306 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1307 | rnp->boost_kthread_task = t; | ||
1308 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1309 | wake_up_process(t); | ||
1310 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1311 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
1316 | |||
1317 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | ||
1318 | { | ||
1319 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1320 | } | ||
1321 | |||
1322 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | ||
1323 | cpumask_var_t cm) | ||
1324 | { | ||
1325 | } | ||
1326 | |||
1327 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | ||
1328 | { | ||
1329 | } | ||
1330 | |||
1331 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp) | ||
1332 | { | ||
1333 | } | ||
1334 | |||
1335 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | ||
1336 | struct rcu_node *rnp, | ||
1337 | int rnp_index) | ||
1338 | { | ||
1339 | return 0; | ||
1340 | } | ||
1341 | |||
1342 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
1343 | |||
1018 | #ifndef CONFIG_SMP | 1344 | #ifndef CONFIG_SMP |
1019 | 1345 | ||
1020 | void synchronize_sched_expedited(void) | 1346 | void synchronize_sched_expedited(void) |
@@ -1187,8 +1513,8 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
1187 | * | 1513 | * |
1188 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 1514 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
1189 | * disabled, we do one pass of force_quiescent_state(), then do a | 1515 | * disabled, we do one pass of force_quiescent_state(), then do a |
1190 | * raise_softirq() to cause rcu_process_callbacks() to be invoked later. | 1516 | * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked |
1191 | * The per-cpu rcu_dyntick_drain variable controls the sequencing. | 1517 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. |
1192 | */ | 1518 | */ |
1193 | int rcu_needs_cpu(int cpu) | 1519 | int rcu_needs_cpu(int cpu) |
1194 | { | 1520 | { |
@@ -1239,7 +1565,7 @@ int rcu_needs_cpu(int cpu) | |||
1239 | 1565 | ||
1240 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 1566 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
1241 | if (c) | 1567 | if (c) |
1242 | raise_softirq(RCU_SOFTIRQ); | 1568 | invoke_rcu_cpu_kthread(); |
1243 | return c; | 1569 | return c; |
1244 | } | 1570 | } |
1245 | 1571 | ||
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index c8e97853b970..aa0fd72b4bc7 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -46,6 +46,18 @@ | |||
46 | #define RCU_TREE_NONCORE | 46 | #define RCU_TREE_NONCORE |
47 | #include "rcutree.h" | 47 | #include "rcutree.h" |
48 | 48 | ||
49 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | ||
50 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); | ||
51 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | ||
52 | DECLARE_PER_CPU(char, rcu_cpu_has_work); | ||
53 | |||
54 | static char convert_kthread_status(unsigned int kthread_status) | ||
55 | { | ||
56 | if (kthread_status > RCU_KTHREAD_MAX) | ||
57 | return '?'; | ||
58 | return "SRWOY"[kthread_status]; | ||
59 | } | ||
60 | |||
49 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | 61 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) |
50 | { | 62 | { |
51 | if (!rdp->beenonline) | 63 | if (!rdp->beenonline) |
@@ -64,7 +76,21 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
64 | rdp->dynticks_fqs); | 76 | rdp->dynticks_fqs); |
65 | #endif /* #ifdef CONFIG_NO_HZ */ | 77 | #endif /* #ifdef CONFIG_NO_HZ */ |
66 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); | 78 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); |
67 | seq_printf(m, " ql=%ld b=%ld", rdp->qlen, rdp->blimit); | 79 | seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", |
80 | rdp->qlen, | ||
81 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | ||
82 | rdp->nxttail[RCU_NEXT_TAIL]], | ||
83 | ".R"[rdp->nxttail[RCU_WAIT_TAIL] != | ||
84 | rdp->nxttail[RCU_NEXT_READY_TAIL]], | ||
85 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != | ||
86 | rdp->nxttail[RCU_WAIT_TAIL]], | ||
87 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], | ||
88 | per_cpu(rcu_cpu_has_work, rdp->cpu), | ||
89 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, | ||
90 | rdp->cpu)), | ||
91 | per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), | ||
92 | per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, | ||
93 | rdp->blimit); | ||
68 | seq_printf(m, " ci=%lu co=%lu ca=%lu\n", | 94 | seq_printf(m, " ci=%lu co=%lu ca=%lu\n", |
69 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | 95 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); |
70 | } | 96 | } |
@@ -121,7 +147,18 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
121 | rdp->dynticks_fqs); | 147 | rdp->dynticks_fqs); |
122 | #endif /* #ifdef CONFIG_NO_HZ */ | 148 | #endif /* #ifdef CONFIG_NO_HZ */ |
123 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); | 149 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); |
124 | seq_printf(m, ",%ld,%ld", rdp->qlen, rdp->blimit); | 150 | seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, |
151 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | ||
152 | rdp->nxttail[RCU_NEXT_TAIL]], | ||
153 | ".R"[rdp->nxttail[RCU_WAIT_TAIL] != | ||
154 | rdp->nxttail[RCU_NEXT_READY_TAIL]], | ||
155 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != | ||
156 | rdp->nxttail[RCU_WAIT_TAIL]], | ||
157 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], | ||
158 | per_cpu(rcu_cpu_has_work, rdp->cpu), | ||
159 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, | ||
160 | rdp->cpu)), | ||
161 | rdp->blimit); | ||
125 | seq_printf(m, ",%lu,%lu,%lu\n", | 162 | seq_printf(m, ",%lu,%lu,%lu\n", |
126 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | 163 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); |
127 | } | 164 | } |
@@ -157,11 +194,76 @@ static const struct file_operations rcudata_csv_fops = { | |||
157 | .release = single_release, | 194 | .release = single_release, |
158 | }; | 195 | }; |
159 | 196 | ||
197 | #ifdef CONFIG_RCU_BOOST | ||
198 | |||
199 | static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp) | ||
200 | { | ||
201 | seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu " | ||
202 | "j=%04x bt=%04x\n", | ||
203 | rnp->grplo, rnp->grphi, | ||
204 | "T."[list_empty(&rnp->blkd_tasks)], | ||
205 | "N."[!rnp->gp_tasks], | ||
206 | "E."[!rnp->exp_tasks], | ||
207 | "B."[!rnp->boost_tasks], | ||
208 | convert_kthread_status(rnp->boost_kthread_status), | ||
209 | rnp->n_tasks_boosted, rnp->n_exp_boosts, | ||
210 | rnp->n_normal_boosts, | ||
211 | (int)(jiffies & 0xffff), | ||
212 | (int)(rnp->boost_time & 0xffff)); | ||
213 | seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n", | ||
214 | " balk", | ||
215 | rnp->n_balk_blkd_tasks, | ||
216 | rnp->n_balk_exp_gp_tasks, | ||
217 | rnp->n_balk_boost_tasks, | ||
218 | rnp->n_balk_notblocked, | ||
219 | rnp->n_balk_notyet, | ||
220 | rnp->n_balk_nos); | ||
221 | } | ||
222 | |||
223 | static int show_rcu_node_boost(struct seq_file *m, void *unused) | ||
224 | { | ||
225 | struct rcu_node *rnp; | ||
226 | |||
227 | rcu_for_each_leaf_node(&rcu_preempt_state, rnp) | ||
228 | print_one_rcu_node_boost(m, rnp); | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | static int rcu_node_boost_open(struct inode *inode, struct file *file) | ||
233 | { | ||
234 | return single_open(file, show_rcu_node_boost, NULL); | ||
235 | } | ||
236 | |||
237 | static const struct file_operations rcu_node_boost_fops = { | ||
238 | .owner = THIS_MODULE, | ||
239 | .open = rcu_node_boost_open, | ||
240 | .read = seq_read, | ||
241 | .llseek = seq_lseek, | ||
242 | .release = single_release, | ||
243 | }; | ||
244 | |||
245 | /* | ||
246 | * Create the rcuboost debugfs entry. Standard error return. | ||
247 | */ | ||
248 | static int rcu_boost_trace_create_file(struct dentry *rcudir) | ||
249 | { | ||
250 | return !debugfs_create_file("rcuboost", 0444, rcudir, NULL, | ||
251 | &rcu_node_boost_fops); | ||
252 | } | ||
253 | |||
254 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
255 | |||
256 | static int rcu_boost_trace_create_file(struct dentry *rcudir) | ||
257 | { | ||
258 | return 0; /* There cannot be an error if we didn't create it! */ | ||
259 | } | ||
260 | |||
261 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
262 | |||
160 | static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | 263 | static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) |
161 | { | 264 | { |
162 | unsigned long gpnum; | 265 | unsigned long gpnum; |
163 | int level = 0; | 266 | int level = 0; |
164 | int phase; | ||
165 | struct rcu_node *rnp; | 267 | struct rcu_node *rnp; |
166 | 268 | ||
167 | gpnum = rsp->gpnum; | 269 | gpnum = rsp->gpnum; |
@@ -178,13 +280,11 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
178 | seq_puts(m, "\n"); | 280 | seq_puts(m, "\n"); |
179 | level = rnp->level; | 281 | level = rnp->level; |
180 | } | 282 | } |
181 | phase = gpnum & 0x1; | 283 | seq_printf(m, "%lx/%lx %c%c>%c %d:%d ^%d ", |
182 | seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ", | ||
183 | rnp->qsmask, rnp->qsmaskinit, | 284 | rnp->qsmask, rnp->qsmaskinit, |
184 | "T."[list_empty(&rnp->blocked_tasks[phase])], | 285 | ".G"[rnp->gp_tasks != NULL], |
185 | "E."[list_empty(&rnp->blocked_tasks[phase + 2])], | 286 | ".E"[rnp->exp_tasks != NULL], |
186 | "T."[list_empty(&rnp->blocked_tasks[!phase])], | 287 | ".T"[!list_empty(&rnp->blkd_tasks)], |
187 | "E."[list_empty(&rnp->blocked_tasks[!phase + 2])], | ||
188 | rnp->grplo, rnp->grphi, rnp->grpnum); | 288 | rnp->grplo, rnp->grphi, rnp->grpnum); |
189 | } | 289 | } |
190 | seq_puts(m, "\n"); | 290 | seq_puts(m, "\n"); |
@@ -216,16 +316,35 @@ static const struct file_operations rcuhier_fops = { | |||
216 | .release = single_release, | 316 | .release = single_release, |
217 | }; | 317 | }; |
218 | 318 | ||
319 | static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp) | ||
320 | { | ||
321 | unsigned long flags; | ||
322 | unsigned long completed; | ||
323 | unsigned long gpnum; | ||
324 | unsigned long gpage; | ||
325 | unsigned long gpmax; | ||
326 | struct rcu_node *rnp = &rsp->node[0]; | ||
327 | |||
328 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
329 | completed = rsp->completed; | ||
330 | gpnum = rsp->gpnum; | ||
331 | if (rsp->completed == rsp->gpnum) | ||
332 | gpage = 0; | ||
333 | else | ||
334 | gpage = jiffies - rsp->gp_start; | ||
335 | gpmax = rsp->gp_max; | ||
336 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
337 | seq_printf(m, "%s: completed=%ld gpnum=%lu age=%ld max=%ld\n", | ||
338 | rsp->name, completed, gpnum, gpage, gpmax); | ||
339 | } | ||
340 | |||
219 | static int show_rcugp(struct seq_file *m, void *unused) | 341 | static int show_rcugp(struct seq_file *m, void *unused) |
220 | { | 342 | { |
221 | #ifdef CONFIG_TREE_PREEMPT_RCU | 343 | #ifdef CONFIG_TREE_PREEMPT_RCU |
222 | seq_printf(m, "rcu_preempt: completed=%ld gpnum=%lu\n", | 344 | show_one_rcugp(m, &rcu_preempt_state); |
223 | rcu_preempt_state.completed, rcu_preempt_state.gpnum); | ||
224 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 345 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
225 | seq_printf(m, "rcu_sched: completed=%ld gpnum=%lu\n", | 346 | show_one_rcugp(m, &rcu_sched_state); |
226 | rcu_sched_state.completed, rcu_sched_state.gpnum); | 347 | show_one_rcugp(m, &rcu_bh_state); |
227 | seq_printf(m, "rcu_bh: completed=%ld gpnum=%lu\n", | ||
228 | rcu_bh_state.completed, rcu_bh_state.gpnum); | ||
229 | return 0; | 348 | return 0; |
230 | } | 349 | } |
231 | 350 | ||
@@ -298,6 +417,29 @@ static const struct file_operations rcu_pending_fops = { | |||
298 | .release = single_release, | 417 | .release = single_release, |
299 | }; | 418 | }; |
300 | 419 | ||
420 | static int show_rcutorture(struct seq_file *m, void *unused) | ||
421 | { | ||
422 | seq_printf(m, "rcutorture test sequence: %lu %s\n", | ||
423 | rcutorture_testseq >> 1, | ||
424 | (rcutorture_testseq & 0x1) ? "(test in progress)" : ""); | ||
425 | seq_printf(m, "rcutorture update version number: %lu\n", | ||
426 | rcutorture_vernum); | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int rcutorture_open(struct inode *inode, struct file *file) | ||
431 | { | ||
432 | return single_open(file, show_rcutorture, NULL); | ||
433 | } | ||
434 | |||
435 | static const struct file_operations rcutorture_fops = { | ||
436 | .owner = THIS_MODULE, | ||
437 | .open = rcutorture_open, | ||
438 | .read = seq_read, | ||
439 | .llseek = seq_lseek, | ||
440 | .release = single_release, | ||
441 | }; | ||
442 | |||
301 | static struct dentry *rcudir; | 443 | static struct dentry *rcudir; |
302 | 444 | ||
303 | static int __init rcutree_trace_init(void) | 445 | static int __init rcutree_trace_init(void) |
@@ -318,6 +460,9 @@ static int __init rcutree_trace_init(void) | |||
318 | if (!retval) | 460 | if (!retval) |
319 | goto free_out; | 461 | goto free_out; |
320 | 462 | ||
463 | if (rcu_boost_trace_create_file(rcudir)) | ||
464 | goto free_out; | ||
465 | |||
321 | retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); | 466 | retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); |
322 | if (!retval) | 467 | if (!retval) |
323 | goto free_out; | 468 | goto free_out; |
@@ -331,6 +476,11 @@ static int __init rcutree_trace_init(void) | |||
331 | NULL, &rcu_pending_fops); | 476 | NULL, &rcu_pending_fops); |
332 | if (!retval) | 477 | if (!retval) |
333 | goto free_out; | 478 | goto free_out; |
479 | |||
480 | retval = debugfs_create_file("rcutorture", 0444, rcudir, | ||
481 | NULL, &rcutorture_fops); | ||
482 | if (!retval) | ||
483 | goto free_out; | ||
334 | return 0; | 484 | return 0; |
335 | free_out: | 485 | free_out: |
336 | debugfs_remove_recursive(rcudir); | 486 | debugfs_remove_recursive(rcudir); |
diff --git a/kernel/sched.c b/kernel/sched.c index 312f8b95c2d4..c62acf45d3b9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -231,7 +231,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * sched_domains_mutex serializes calls to arch_init_sched_domains, | 234 | * sched_domains_mutex serializes calls to init_sched_domains, |
235 | * detach_destroy_domains and partition_sched_domains. | 235 | * detach_destroy_domains and partition_sched_domains. |
236 | */ | 236 | */ |
237 | static DEFINE_MUTEX(sched_domains_mutex); | 237 | static DEFINE_MUTEX(sched_domains_mutex); |
@@ -312,6 +312,9 @@ struct cfs_rq { | |||
312 | 312 | ||
313 | u64 exec_clock; | 313 | u64 exec_clock; |
314 | u64 min_vruntime; | 314 | u64 min_vruntime; |
315 | #ifndef CONFIG_64BIT | ||
316 | u64 min_vruntime_copy; | ||
317 | #endif | ||
315 | 318 | ||
316 | struct rb_root tasks_timeline; | 319 | struct rb_root tasks_timeline; |
317 | struct rb_node *rb_leftmost; | 320 | struct rb_node *rb_leftmost; |
@@ -325,7 +328,9 @@ struct cfs_rq { | |||
325 | */ | 328 | */ |
326 | struct sched_entity *curr, *next, *last, *skip; | 329 | struct sched_entity *curr, *next, *last, *skip; |
327 | 330 | ||
331 | #ifdef CONFIG_SCHED_DEBUG | ||
328 | unsigned int nr_spread_over; | 332 | unsigned int nr_spread_over; |
333 | #endif | ||
329 | 334 | ||
330 | #ifdef CONFIG_FAIR_GROUP_SCHED | 335 | #ifdef CONFIG_FAIR_GROUP_SCHED |
331 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 336 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
@@ -417,6 +422,7 @@ struct rt_rq { | |||
417 | */ | 422 | */ |
418 | struct root_domain { | 423 | struct root_domain { |
419 | atomic_t refcount; | 424 | atomic_t refcount; |
425 | struct rcu_head rcu; | ||
420 | cpumask_var_t span; | 426 | cpumask_var_t span; |
421 | cpumask_var_t online; | 427 | cpumask_var_t online; |
422 | 428 | ||
@@ -460,7 +466,7 @@ struct rq { | |||
460 | u64 nohz_stamp; | 466 | u64 nohz_stamp; |
461 | unsigned char nohz_balance_kick; | 467 | unsigned char nohz_balance_kick; |
462 | #endif | 468 | #endif |
463 | unsigned int skip_clock_update; | 469 | int skip_clock_update; |
464 | 470 | ||
465 | /* capture load from *all* tasks on this cpu: */ | 471 | /* capture load from *all* tasks on this cpu: */ |
466 | struct load_weight load; | 472 | struct load_weight load; |
@@ -553,6 +559,10 @@ struct rq { | |||
553 | unsigned int ttwu_count; | 559 | unsigned int ttwu_count; |
554 | unsigned int ttwu_local; | 560 | unsigned int ttwu_local; |
555 | #endif | 561 | #endif |
562 | |||
563 | #ifdef CONFIG_SMP | ||
564 | struct task_struct *wake_list; | ||
565 | #endif | ||
556 | }; | 566 | }; |
557 | 567 | ||
558 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 568 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
@@ -571,7 +581,7 @@ static inline int cpu_of(struct rq *rq) | |||
571 | 581 | ||
572 | #define rcu_dereference_check_sched_domain(p) \ | 582 | #define rcu_dereference_check_sched_domain(p) \ |
573 | rcu_dereference_check((p), \ | 583 | rcu_dereference_check((p), \ |
574 | rcu_read_lock_sched_held() || \ | 584 | rcu_read_lock_held() || \ |
575 | lockdep_is_held(&sched_domains_mutex)) | 585 | lockdep_is_held(&sched_domains_mutex)) |
576 | 586 | ||
577 | /* | 587 | /* |
@@ -596,7 +606,7 @@ static inline int cpu_of(struct rq *rq) | |||
596 | * Return the group to which this tasks belongs. | 606 | * Return the group to which this tasks belongs. |
597 | * | 607 | * |
598 | * We use task_subsys_state_check() and extend the RCU verification | 608 | * We use task_subsys_state_check() and extend the RCU verification |
599 | * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() | 609 | * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() |
600 | * holds that lock for each task it moves into the cgroup. Therefore | 610 | * holds that lock for each task it moves into the cgroup. Therefore |
601 | * by holding that lock, we pin the task to the current cgroup. | 611 | * by holding that lock, we pin the task to the current cgroup. |
602 | */ | 612 | */ |
@@ -606,7 +616,7 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
606 | struct cgroup_subsys_state *css; | 616 | struct cgroup_subsys_state *css; |
607 | 617 | ||
608 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, | 618 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
609 | lockdep_is_held(&task_rq(p)->lock)); | 619 | lockdep_is_held(&p->pi_lock)); |
610 | tg = container_of(css, struct task_group, css); | 620 | tg = container_of(css, struct task_group, css); |
611 | 621 | ||
612 | return autogroup_task_group(p, tg); | 622 | return autogroup_task_group(p, tg); |
@@ -642,7 +652,7 @@ static void update_rq_clock(struct rq *rq) | |||
642 | { | 652 | { |
643 | s64 delta; | 653 | s64 delta; |
644 | 654 | ||
645 | if (rq->skip_clock_update) | 655 | if (rq->skip_clock_update > 0) |
646 | return; | 656 | return; |
647 | 657 | ||
648 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; | 658 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
@@ -838,18 +848,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p) | |||
838 | return rq->curr == p; | 848 | return rq->curr == p; |
839 | } | 849 | } |
840 | 850 | ||
841 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | ||
842 | static inline int task_running(struct rq *rq, struct task_struct *p) | 851 | static inline int task_running(struct rq *rq, struct task_struct *p) |
843 | { | 852 | { |
853 | #ifdef CONFIG_SMP | ||
854 | return p->on_cpu; | ||
855 | #else | ||
844 | return task_current(rq, p); | 856 | return task_current(rq, p); |
857 | #endif | ||
845 | } | 858 | } |
846 | 859 | ||
860 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | ||
847 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | 861 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
848 | { | 862 | { |
863 | #ifdef CONFIG_SMP | ||
864 | /* | ||
865 | * We can optimise this out completely for !SMP, because the | ||
866 | * SMP rebalancing from interrupt is the only thing that cares | ||
867 | * here. | ||
868 | */ | ||
869 | next->on_cpu = 1; | ||
870 | #endif | ||
849 | } | 871 | } |
850 | 872 | ||
851 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | 873 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
852 | { | 874 | { |
875 | #ifdef CONFIG_SMP | ||
876 | /* | ||
877 | * After ->on_cpu is cleared, the task can be moved to a different CPU. | ||
878 | * We must ensure this doesn't happen until the switch is completely | ||
879 | * finished. | ||
880 | */ | ||
881 | smp_wmb(); | ||
882 | prev->on_cpu = 0; | ||
883 | #endif | ||
853 | #ifdef CONFIG_DEBUG_SPINLOCK | 884 | #ifdef CONFIG_DEBUG_SPINLOCK |
854 | /* this is a valid case when another task releases the spinlock */ | 885 | /* this is a valid case when another task releases the spinlock */ |
855 | rq->lock.owner = current; | 886 | rq->lock.owner = current; |
@@ -865,15 +896,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
865 | } | 896 | } |
866 | 897 | ||
867 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | 898 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
868 | static inline int task_running(struct rq *rq, struct task_struct *p) | ||
869 | { | ||
870 | #ifdef CONFIG_SMP | ||
871 | return p->oncpu; | ||
872 | #else | ||
873 | return task_current(rq, p); | ||
874 | #endif | ||
875 | } | ||
876 | |||
877 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | 899 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
878 | { | 900 | { |
879 | #ifdef CONFIG_SMP | 901 | #ifdef CONFIG_SMP |
@@ -882,7 +904,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
882 | * SMP rebalancing from interrupt is the only thing that cares | 904 | * SMP rebalancing from interrupt is the only thing that cares |
883 | * here. | 905 | * here. |
884 | */ | 906 | */ |
885 | next->oncpu = 1; | 907 | next->on_cpu = 1; |
886 | #endif | 908 | #endif |
887 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 909 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
888 | raw_spin_unlock_irq(&rq->lock); | 910 | raw_spin_unlock_irq(&rq->lock); |
@@ -895,12 +917,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
895 | { | 917 | { |
896 | #ifdef CONFIG_SMP | 918 | #ifdef CONFIG_SMP |
897 | /* | 919 | /* |
898 | * After ->oncpu is cleared, the task can be moved to a different CPU. | 920 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
899 | * We must ensure this doesn't happen until the switch is completely | 921 | * We must ensure this doesn't happen until the switch is completely |
900 | * finished. | 922 | * finished. |
901 | */ | 923 | */ |
902 | smp_wmb(); | 924 | smp_wmb(); |
903 | prev->oncpu = 0; | 925 | prev->on_cpu = 0; |
904 | #endif | 926 | #endif |
905 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 927 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
906 | local_irq_enable(); | 928 | local_irq_enable(); |
@@ -909,23 +931,15 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
909 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 931 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
910 | 932 | ||
911 | /* | 933 | /* |
912 | * Check whether the task is waking, we use this to synchronize ->cpus_allowed | 934 | * __task_rq_lock - lock the rq @p resides on. |
913 | * against ttwu(). | ||
914 | */ | ||
915 | static inline int task_is_waking(struct task_struct *p) | ||
916 | { | ||
917 | return unlikely(p->state == TASK_WAKING); | ||
918 | } | ||
919 | |||
920 | /* | ||
921 | * __task_rq_lock - lock the runqueue a given task resides on. | ||
922 | * Must be called interrupts disabled. | ||
923 | */ | 935 | */ |
924 | static inline struct rq *__task_rq_lock(struct task_struct *p) | 936 | static inline struct rq *__task_rq_lock(struct task_struct *p) |
925 | __acquires(rq->lock) | 937 | __acquires(rq->lock) |
926 | { | 938 | { |
927 | struct rq *rq; | 939 | struct rq *rq; |
928 | 940 | ||
941 | lockdep_assert_held(&p->pi_lock); | ||
942 | |||
929 | for (;;) { | 943 | for (;;) { |
930 | rq = task_rq(p); | 944 | rq = task_rq(p); |
931 | raw_spin_lock(&rq->lock); | 945 | raw_spin_lock(&rq->lock); |
@@ -936,22 +950,22 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
936 | } | 950 | } |
937 | 951 | ||
938 | /* | 952 | /* |
939 | * task_rq_lock - lock the runqueue a given task resides on and disable | 953 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. |
940 | * interrupts. Note the ordering: we can safely lookup the task_rq without | ||
941 | * explicitly disabling preemption. | ||
942 | */ | 954 | */ |
943 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | 955 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) |
956 | __acquires(p->pi_lock) | ||
944 | __acquires(rq->lock) | 957 | __acquires(rq->lock) |
945 | { | 958 | { |
946 | struct rq *rq; | 959 | struct rq *rq; |
947 | 960 | ||
948 | for (;;) { | 961 | for (;;) { |
949 | local_irq_save(*flags); | 962 | raw_spin_lock_irqsave(&p->pi_lock, *flags); |
950 | rq = task_rq(p); | 963 | rq = task_rq(p); |
951 | raw_spin_lock(&rq->lock); | 964 | raw_spin_lock(&rq->lock); |
952 | if (likely(rq == task_rq(p))) | 965 | if (likely(rq == task_rq(p))) |
953 | return rq; | 966 | return rq; |
954 | raw_spin_unlock_irqrestore(&rq->lock, *flags); | 967 | raw_spin_unlock(&rq->lock); |
968 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
955 | } | 969 | } |
956 | } | 970 | } |
957 | 971 | ||
@@ -961,10 +975,13 @@ static void __task_rq_unlock(struct rq *rq) | |||
961 | raw_spin_unlock(&rq->lock); | 975 | raw_spin_unlock(&rq->lock); |
962 | } | 976 | } |
963 | 977 | ||
964 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) | 978 | static inline void |
979 | task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) | ||
965 | __releases(rq->lock) | 980 | __releases(rq->lock) |
981 | __releases(p->pi_lock) | ||
966 | { | 982 | { |
967 | raw_spin_unlock_irqrestore(&rq->lock, *flags); | 983 | raw_spin_unlock(&rq->lock); |
984 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
968 | } | 985 | } |
969 | 986 | ||
970 | /* | 987 | /* |
@@ -1193,11 +1210,17 @@ int get_nohz_timer_target(void) | |||
1193 | int i; | 1210 | int i; |
1194 | struct sched_domain *sd; | 1211 | struct sched_domain *sd; |
1195 | 1212 | ||
1213 | rcu_read_lock(); | ||
1196 | for_each_domain(cpu, sd) { | 1214 | for_each_domain(cpu, sd) { |
1197 | for_each_cpu(i, sched_domain_span(sd)) | 1215 | for_each_cpu(i, sched_domain_span(sd)) { |
1198 | if (!idle_cpu(i)) | 1216 | if (!idle_cpu(i)) { |
1199 | return i; | 1217 | cpu = i; |
1218 | goto unlock; | ||
1219 | } | ||
1220 | } | ||
1200 | } | 1221 | } |
1222 | unlock: | ||
1223 | rcu_read_unlock(); | ||
1201 | return cpu; | 1224 | return cpu; |
1202 | } | 1225 | } |
1203 | /* | 1226 | /* |
@@ -1307,15 +1330,15 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
1307 | { | 1330 | { |
1308 | u64 tmp; | 1331 | u64 tmp; |
1309 | 1332 | ||
1333 | tmp = (u64)delta_exec * weight; | ||
1334 | |||
1310 | if (!lw->inv_weight) { | 1335 | if (!lw->inv_weight) { |
1311 | if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) | 1336 | if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) |
1312 | lw->inv_weight = 1; | 1337 | lw->inv_weight = 1; |
1313 | else | 1338 | else |
1314 | lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) | 1339 | lw->inv_weight = WMULT_CONST / lw->weight; |
1315 | / (lw->weight+1); | ||
1316 | } | 1340 | } |
1317 | 1341 | ||
1318 | tmp = (u64)delta_exec * weight; | ||
1319 | /* | 1342 | /* |
1320 | * Check whether we'd overflow the 64-bit multiplication: | 1343 | * Check whether we'd overflow the 64-bit multiplication: |
1321 | */ | 1344 | */ |
@@ -1773,7 +1796,6 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) | |||
1773 | update_rq_clock(rq); | 1796 | update_rq_clock(rq); |
1774 | sched_info_queued(p); | 1797 | sched_info_queued(p); |
1775 | p->sched_class->enqueue_task(rq, p, flags); | 1798 | p->sched_class->enqueue_task(rq, p, flags); |
1776 | p->se.on_rq = 1; | ||
1777 | } | 1799 | } |
1778 | 1800 | ||
1779 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | 1801 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
@@ -1781,7 +1803,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | |||
1781 | update_rq_clock(rq); | 1803 | update_rq_clock(rq); |
1782 | sched_info_dequeued(p); | 1804 | sched_info_dequeued(p); |
1783 | p->sched_class->dequeue_task(rq, p, flags); | 1805 | p->sched_class->dequeue_task(rq, p, flags); |
1784 | p->se.on_rq = 0; | ||
1785 | } | 1806 | } |
1786 | 1807 | ||
1787 | /* | 1808 | /* |
@@ -2116,7 +2137,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
2116 | * A queue event has occurred, and we're going to schedule. In | 2137 | * A queue event has occurred, and we're going to schedule. In |
2117 | * this case, we can save a useless back to back clock update. | 2138 | * this case, we can save a useless back to back clock update. |
2118 | */ | 2139 | */ |
2119 | if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) | 2140 | if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) |
2120 | rq->skip_clock_update = 1; | 2141 | rq->skip_clock_update = 1; |
2121 | } | 2142 | } |
2122 | 2143 | ||
@@ -2162,6 +2183,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
2162 | */ | 2183 | */ |
2163 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && | 2184 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && |
2164 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); | 2185 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); |
2186 | |||
2187 | #ifdef CONFIG_LOCKDEP | ||
2188 | WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || | ||
2189 | lockdep_is_held(&task_rq(p)->lock))); | ||
2190 | #endif | ||
2165 | #endif | 2191 | #endif |
2166 | 2192 | ||
2167 | trace_sched_migrate_task(p, new_cpu); | 2193 | trace_sched_migrate_task(p, new_cpu); |
@@ -2182,19 +2208,6 @@ struct migration_arg { | |||
2182 | static int migration_cpu_stop(void *data); | 2208 | static int migration_cpu_stop(void *data); |
2183 | 2209 | ||
2184 | /* | 2210 | /* |
2185 | * The task's runqueue lock must be held. | ||
2186 | * Returns true if you have to wait for migration thread. | ||
2187 | */ | ||
2188 | static bool migrate_task(struct task_struct *p, struct rq *rq) | ||
2189 | { | ||
2190 | /* | ||
2191 | * If the task is not on a runqueue (and not running), then | ||
2192 | * the next wake-up will properly place the task. | ||
2193 | */ | ||
2194 | return p->se.on_rq || task_running(rq, p); | ||
2195 | } | ||
2196 | |||
2197 | /* | ||
2198 | * wait_task_inactive - wait for a thread to unschedule. | 2211 | * wait_task_inactive - wait for a thread to unschedule. |
2199 | * | 2212 | * |
2200 | * If @match_state is nonzero, it's the @p->state value just checked and | 2213 | * If @match_state is nonzero, it's the @p->state value just checked and |
@@ -2251,11 +2264,11 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
2251 | rq = task_rq_lock(p, &flags); | 2264 | rq = task_rq_lock(p, &flags); |
2252 | trace_sched_wait_task(p); | 2265 | trace_sched_wait_task(p); |
2253 | running = task_running(rq, p); | 2266 | running = task_running(rq, p); |
2254 | on_rq = p->se.on_rq; | 2267 | on_rq = p->on_rq; |
2255 | ncsw = 0; | 2268 | ncsw = 0; |
2256 | if (!match_state || p->state == match_state) | 2269 | if (!match_state || p->state == match_state) |
2257 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ | 2270 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
2258 | task_rq_unlock(rq, &flags); | 2271 | task_rq_unlock(rq, p, &flags); |
2259 | 2272 | ||
2260 | /* | 2273 | /* |
2261 | * If it changed from the expected state, bail out now. | 2274 | * If it changed from the expected state, bail out now. |
@@ -2330,7 +2343,7 @@ EXPORT_SYMBOL_GPL(kick_process); | |||
2330 | 2343 | ||
2331 | #ifdef CONFIG_SMP | 2344 | #ifdef CONFIG_SMP |
2332 | /* | 2345 | /* |
2333 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | 2346 | * ->cpus_allowed is protected by both rq->lock and p->pi_lock |
2334 | */ | 2347 | */ |
2335 | static int select_fallback_rq(int cpu, struct task_struct *p) | 2348 | static int select_fallback_rq(int cpu, struct task_struct *p) |
2336 | { | 2349 | { |
@@ -2363,12 +2376,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2363 | } | 2376 | } |
2364 | 2377 | ||
2365 | /* | 2378 | /* |
2366 | * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. | 2379 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
2367 | */ | 2380 | */ |
2368 | static inline | 2381 | static inline |
2369 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) | 2382 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) |
2370 | { | 2383 | { |
2371 | int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); | 2384 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); |
2372 | 2385 | ||
2373 | /* | 2386 | /* |
2374 | * In order not to call set_task_cpu() on a blocking task we need | 2387 | * In order not to call set_task_cpu() on a blocking task we need |
@@ -2394,27 +2407,62 @@ static void update_avg(u64 *avg, u64 sample) | |||
2394 | } | 2407 | } |
2395 | #endif | 2408 | #endif |
2396 | 2409 | ||
2397 | static inline void ttwu_activate(struct task_struct *p, struct rq *rq, | 2410 | static void |
2398 | bool is_sync, bool is_migrate, bool is_local, | 2411 | ttwu_stat(struct task_struct *p, int cpu, int wake_flags) |
2399 | unsigned long en_flags) | ||
2400 | { | 2412 | { |
2413 | #ifdef CONFIG_SCHEDSTATS | ||
2414 | struct rq *rq = this_rq(); | ||
2415 | |||
2416 | #ifdef CONFIG_SMP | ||
2417 | int this_cpu = smp_processor_id(); | ||
2418 | |||
2419 | if (cpu == this_cpu) { | ||
2420 | schedstat_inc(rq, ttwu_local); | ||
2421 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
2422 | } else { | ||
2423 | struct sched_domain *sd; | ||
2424 | |||
2425 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
2426 | rcu_read_lock(); | ||
2427 | for_each_domain(this_cpu, sd) { | ||
2428 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { | ||
2429 | schedstat_inc(sd, ttwu_wake_remote); | ||
2430 | break; | ||
2431 | } | ||
2432 | } | ||
2433 | rcu_read_unlock(); | ||
2434 | } | ||
2435 | #endif /* CONFIG_SMP */ | ||
2436 | |||
2437 | schedstat_inc(rq, ttwu_count); | ||
2401 | schedstat_inc(p, se.statistics.nr_wakeups); | 2438 | schedstat_inc(p, se.statistics.nr_wakeups); |
2402 | if (is_sync) | 2439 | |
2440 | if (wake_flags & WF_SYNC) | ||
2403 | schedstat_inc(p, se.statistics.nr_wakeups_sync); | 2441 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
2404 | if (is_migrate) | 2442 | |
2443 | if (cpu != task_cpu(p)) | ||
2405 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); | 2444 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
2406 | if (is_local) | ||
2407 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
2408 | else | ||
2409 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
2410 | 2445 | ||
2446 | #endif /* CONFIG_SCHEDSTATS */ | ||
2447 | } | ||
2448 | |||
2449 | static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) | ||
2450 | { | ||
2411 | activate_task(rq, p, en_flags); | 2451 | activate_task(rq, p, en_flags); |
2452 | p->on_rq = 1; | ||
2453 | |||
2454 | /* if a worker is waking up, notify workqueue */ | ||
2455 | if (p->flags & PF_WQ_WORKER) | ||
2456 | wq_worker_waking_up(p, cpu_of(rq)); | ||
2412 | } | 2457 | } |
2413 | 2458 | ||
2414 | static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, | 2459 | /* |
2415 | int wake_flags, bool success) | 2460 | * Mark the task runnable and perform wakeup-preemption. |
2461 | */ | ||
2462 | static void | ||
2463 | ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | ||
2416 | { | 2464 | { |
2417 | trace_sched_wakeup(p, success); | 2465 | trace_sched_wakeup(p, true); |
2418 | check_preempt_curr(rq, p, wake_flags); | 2466 | check_preempt_curr(rq, p, wake_flags); |
2419 | 2467 | ||
2420 | p->state = TASK_RUNNING; | 2468 | p->state = TASK_RUNNING; |
@@ -2433,9 +2481,99 @@ static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, | |||
2433 | rq->idle_stamp = 0; | 2481 | rq->idle_stamp = 0; |
2434 | } | 2482 | } |
2435 | #endif | 2483 | #endif |
2436 | /* if a worker is waking up, notify workqueue */ | 2484 | } |
2437 | if ((p->flags & PF_WQ_WORKER) && success) | 2485 | |
2438 | wq_worker_waking_up(p, cpu_of(rq)); | 2486 | static void |
2487 | ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) | ||
2488 | { | ||
2489 | #ifdef CONFIG_SMP | ||
2490 | if (p->sched_contributes_to_load) | ||
2491 | rq->nr_uninterruptible--; | ||
2492 | #endif | ||
2493 | |||
2494 | ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); | ||
2495 | ttwu_do_wakeup(rq, p, wake_flags); | ||
2496 | } | ||
2497 | |||
2498 | /* | ||
2499 | * Called in case the task @p isn't fully descheduled from its runqueue, | ||
2500 | * in this case we must do a remote wakeup. Its a 'light' wakeup though, | ||
2501 | * since all we need to do is flip p->state to TASK_RUNNING, since | ||
2502 | * the task is still ->on_rq. | ||
2503 | */ | ||
2504 | static int ttwu_remote(struct task_struct *p, int wake_flags) | ||
2505 | { | ||
2506 | struct rq *rq; | ||
2507 | int ret = 0; | ||
2508 | |||
2509 | rq = __task_rq_lock(p); | ||
2510 | if (p->on_rq) { | ||
2511 | ttwu_do_wakeup(rq, p, wake_flags); | ||
2512 | ret = 1; | ||
2513 | } | ||
2514 | __task_rq_unlock(rq); | ||
2515 | |||
2516 | return ret; | ||
2517 | } | ||
2518 | |||
2519 | #ifdef CONFIG_SMP | ||
2520 | static void sched_ttwu_pending(void) | ||
2521 | { | ||
2522 | struct rq *rq = this_rq(); | ||
2523 | struct task_struct *list = xchg(&rq->wake_list, NULL); | ||
2524 | |||
2525 | if (!list) | ||
2526 | return; | ||
2527 | |||
2528 | raw_spin_lock(&rq->lock); | ||
2529 | |||
2530 | while (list) { | ||
2531 | struct task_struct *p = list; | ||
2532 | list = list->wake_entry; | ||
2533 | ttwu_do_activate(rq, p, 0); | ||
2534 | } | ||
2535 | |||
2536 | raw_spin_unlock(&rq->lock); | ||
2537 | } | ||
2538 | |||
2539 | void scheduler_ipi(void) | ||
2540 | { | ||
2541 | sched_ttwu_pending(); | ||
2542 | } | ||
2543 | |||
2544 | static void ttwu_queue_remote(struct task_struct *p, int cpu) | ||
2545 | { | ||
2546 | struct rq *rq = cpu_rq(cpu); | ||
2547 | struct task_struct *next = rq->wake_list; | ||
2548 | |||
2549 | for (;;) { | ||
2550 | struct task_struct *old = next; | ||
2551 | |||
2552 | p->wake_entry = next; | ||
2553 | next = cmpxchg(&rq->wake_list, old, p); | ||
2554 | if (next == old) | ||
2555 | break; | ||
2556 | } | ||
2557 | |||
2558 | if (!next) | ||
2559 | smp_send_reschedule(cpu); | ||
2560 | } | ||
2561 | #endif | ||
2562 | |||
2563 | static void ttwu_queue(struct task_struct *p, int cpu) | ||
2564 | { | ||
2565 | struct rq *rq = cpu_rq(cpu); | ||
2566 | |||
2567 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_TTWU_QUEUE) | ||
2568 | if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { | ||
2569 | ttwu_queue_remote(p, cpu); | ||
2570 | return; | ||
2571 | } | ||
2572 | #endif | ||
2573 | |||
2574 | raw_spin_lock(&rq->lock); | ||
2575 | ttwu_do_activate(rq, p, 0); | ||
2576 | raw_spin_unlock(&rq->lock); | ||
2439 | } | 2577 | } |
2440 | 2578 | ||
2441 | /** | 2579 | /** |
@@ -2453,92 +2591,64 @@ static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, | |||
2453 | * Returns %true if @p was woken up, %false if it was already running | 2591 | * Returns %true if @p was woken up, %false if it was already running |
2454 | * or @state didn't match @p's state. | 2592 | * or @state didn't match @p's state. |
2455 | */ | 2593 | */ |
2456 | static int try_to_wake_up(struct task_struct *p, unsigned int state, | 2594 | static int |
2457 | int wake_flags) | 2595 | try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) |
2458 | { | 2596 | { |
2459 | int cpu, orig_cpu, this_cpu, success = 0; | ||
2460 | unsigned long flags; | 2597 | unsigned long flags; |
2461 | unsigned long en_flags = ENQUEUE_WAKEUP; | 2598 | int cpu, success = 0; |
2462 | struct rq *rq; | ||
2463 | |||
2464 | this_cpu = get_cpu(); | ||
2465 | 2599 | ||
2466 | smp_wmb(); | 2600 | smp_wmb(); |
2467 | rq = task_rq_lock(p, &flags); | 2601 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
2468 | if (!(p->state & state)) | 2602 | if (!(p->state & state)) |
2469 | goto out; | 2603 | goto out; |
2470 | 2604 | ||
2471 | if (p->se.on_rq) | 2605 | success = 1; /* we're going to change ->state */ |
2472 | goto out_running; | ||
2473 | |||
2474 | cpu = task_cpu(p); | 2606 | cpu = task_cpu(p); |
2475 | orig_cpu = cpu; | ||
2476 | 2607 | ||
2477 | #ifdef CONFIG_SMP | 2608 | if (p->on_rq && ttwu_remote(p, wake_flags)) |
2478 | if (unlikely(task_running(rq, p))) | 2609 | goto stat; |
2479 | goto out_activate; | ||
2480 | 2610 | ||
2611 | #ifdef CONFIG_SMP | ||
2481 | /* | 2612 | /* |
2482 | * In order to handle concurrent wakeups and release the rq->lock | 2613 | * If the owning (remote) cpu is still in the middle of schedule() with |
2483 | * we put the task in TASK_WAKING state. | 2614 | * this task as prev, wait until its done referencing the task. |
2484 | * | ||
2485 | * First fix up the nr_uninterruptible count: | ||
2486 | */ | 2615 | */ |
2487 | if (task_contributes_to_load(p)) { | 2616 | while (p->on_cpu) { |
2488 | if (likely(cpu_online(orig_cpu))) | 2617 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
2489 | rq->nr_uninterruptible--; | 2618 | /* |
2490 | else | 2619 | * If called from interrupt context we could have landed in the |
2491 | this_rq()->nr_uninterruptible--; | 2620 | * middle of schedule(), in this case we should take care not |
2492 | } | 2621 | * to spin on ->on_cpu if p is current, since that would |
2493 | p->state = TASK_WAKING; | 2622 | * deadlock. |
2494 | 2623 | */ | |
2495 | if (p->sched_class->task_waking) { | 2624 | if (p == current) { |
2496 | p->sched_class->task_waking(rq, p); | 2625 | ttwu_queue(p, cpu); |
2497 | en_flags |= ENQUEUE_WAKING; | 2626 | goto stat; |
2627 | } | ||
2628 | #endif | ||
2629 | cpu_relax(); | ||
2498 | } | 2630 | } |
2499 | |||
2500 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); | ||
2501 | if (cpu != orig_cpu) | ||
2502 | set_task_cpu(p, cpu); | ||
2503 | __task_rq_unlock(rq); | ||
2504 | |||
2505 | rq = cpu_rq(cpu); | ||
2506 | raw_spin_lock(&rq->lock); | ||
2507 | |||
2508 | /* | 2631 | /* |
2509 | * We migrated the task without holding either rq->lock, however | 2632 | * Pairs with the smp_wmb() in finish_lock_switch(). |
2510 | * since the task is not on the task list itself, nobody else | ||
2511 | * will try and migrate the task, hence the rq should match the | ||
2512 | * cpu we just moved it to. | ||
2513 | */ | 2633 | */ |
2514 | WARN_ON(task_cpu(p) != cpu); | 2634 | smp_rmb(); |
2515 | WARN_ON(p->state != TASK_WAKING); | ||
2516 | 2635 | ||
2517 | #ifdef CONFIG_SCHEDSTATS | 2636 | p->sched_contributes_to_load = !!task_contributes_to_load(p); |
2518 | schedstat_inc(rq, ttwu_count); | 2637 | p->state = TASK_WAKING; |
2519 | if (cpu == this_cpu) | 2638 | |
2520 | schedstat_inc(rq, ttwu_local); | 2639 | if (p->sched_class->task_waking) |
2521 | else { | 2640 | p->sched_class->task_waking(p); |
2522 | struct sched_domain *sd; | ||
2523 | for_each_domain(this_cpu, sd) { | ||
2524 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { | ||
2525 | schedstat_inc(sd, ttwu_wake_remote); | ||
2526 | break; | ||
2527 | } | ||
2528 | } | ||
2529 | } | ||
2530 | #endif /* CONFIG_SCHEDSTATS */ | ||
2531 | 2641 | ||
2532 | out_activate: | 2642 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); |
2643 | if (task_cpu(p) != cpu) | ||
2644 | set_task_cpu(p, cpu); | ||
2533 | #endif /* CONFIG_SMP */ | 2645 | #endif /* CONFIG_SMP */ |
2534 | ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, | 2646 | |
2535 | cpu == this_cpu, en_flags); | 2647 | ttwu_queue(p, cpu); |
2536 | success = 1; | 2648 | stat: |
2537 | out_running: | 2649 | ttwu_stat(p, cpu, wake_flags); |
2538 | ttwu_post_activation(p, rq, wake_flags, success); | ||
2539 | out: | 2650 | out: |
2540 | task_rq_unlock(rq, &flags); | 2651 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
2541 | put_cpu(); | ||
2542 | 2652 | ||
2543 | return success; | 2653 | return success; |
2544 | } | 2654 | } |
@@ -2547,31 +2657,34 @@ out: | |||
2547 | * try_to_wake_up_local - try to wake up a local task with rq lock held | 2657 | * try_to_wake_up_local - try to wake up a local task with rq lock held |
2548 | * @p: the thread to be awakened | 2658 | * @p: the thread to be awakened |
2549 | * | 2659 | * |
2550 | * Put @p on the run-queue if it's not already there. The caller must | 2660 | * Put @p on the run-queue if it's not already there. The caller must |
2551 | * ensure that this_rq() is locked, @p is bound to this_rq() and not | 2661 | * ensure that this_rq() is locked, @p is bound to this_rq() and not |
2552 | * the current task. this_rq() stays locked over invocation. | 2662 | * the current task. |
2553 | */ | 2663 | */ |
2554 | static void try_to_wake_up_local(struct task_struct *p) | 2664 | static void try_to_wake_up_local(struct task_struct *p) |
2555 | { | 2665 | { |
2556 | struct rq *rq = task_rq(p); | 2666 | struct rq *rq = task_rq(p); |
2557 | bool success = false; | ||
2558 | 2667 | ||
2559 | BUG_ON(rq != this_rq()); | 2668 | BUG_ON(rq != this_rq()); |
2560 | BUG_ON(p == current); | 2669 | BUG_ON(p == current); |
2561 | lockdep_assert_held(&rq->lock); | 2670 | lockdep_assert_held(&rq->lock); |
2562 | 2671 | ||
2672 | if (!raw_spin_trylock(&p->pi_lock)) { | ||
2673 | raw_spin_unlock(&rq->lock); | ||
2674 | raw_spin_lock(&p->pi_lock); | ||
2675 | raw_spin_lock(&rq->lock); | ||
2676 | } | ||
2677 | |||
2563 | if (!(p->state & TASK_NORMAL)) | 2678 | if (!(p->state & TASK_NORMAL)) |
2564 | return; | 2679 | goto out; |
2565 | 2680 | ||
2566 | if (!p->se.on_rq) { | 2681 | if (!p->on_rq) |
2567 | if (likely(!task_running(rq, p))) { | 2682 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); |
2568 | schedstat_inc(rq, ttwu_count); | 2683 | |
2569 | schedstat_inc(rq, ttwu_local); | 2684 | ttwu_do_wakeup(rq, p, 0); |
2570 | } | 2685 | ttwu_stat(p, smp_processor_id(), 0); |
2571 | ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); | 2686 | out: |
2572 | success = true; | 2687 | raw_spin_unlock(&p->pi_lock); |
2573 | } | ||
2574 | ttwu_post_activation(p, rq, 0, success); | ||
2575 | } | 2688 | } |
2576 | 2689 | ||
2577 | /** | 2690 | /** |
@@ -2604,19 +2717,21 @@ int wake_up_state(struct task_struct *p, unsigned int state) | |||
2604 | */ | 2717 | */ |
2605 | static void __sched_fork(struct task_struct *p) | 2718 | static void __sched_fork(struct task_struct *p) |
2606 | { | 2719 | { |
2720 | p->on_rq = 0; | ||
2721 | |||
2722 | p->se.on_rq = 0; | ||
2607 | p->se.exec_start = 0; | 2723 | p->se.exec_start = 0; |
2608 | p->se.sum_exec_runtime = 0; | 2724 | p->se.sum_exec_runtime = 0; |
2609 | p->se.prev_sum_exec_runtime = 0; | 2725 | p->se.prev_sum_exec_runtime = 0; |
2610 | p->se.nr_migrations = 0; | 2726 | p->se.nr_migrations = 0; |
2611 | p->se.vruntime = 0; | 2727 | p->se.vruntime = 0; |
2728 | INIT_LIST_HEAD(&p->se.group_node); | ||
2612 | 2729 | ||
2613 | #ifdef CONFIG_SCHEDSTATS | 2730 | #ifdef CONFIG_SCHEDSTATS |
2614 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); | 2731 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
2615 | #endif | 2732 | #endif |
2616 | 2733 | ||
2617 | INIT_LIST_HEAD(&p->rt.run_list); | 2734 | INIT_LIST_HEAD(&p->rt.run_list); |
2618 | p->se.on_rq = 0; | ||
2619 | INIT_LIST_HEAD(&p->se.group_node); | ||
2620 | 2735 | ||
2621 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2736 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
2622 | INIT_HLIST_HEAD(&p->preempt_notifiers); | 2737 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
@@ -2626,8 +2741,9 @@ static void __sched_fork(struct task_struct *p) | |||
2626 | /* | 2741 | /* |
2627 | * fork()/clone()-time setup: | 2742 | * fork()/clone()-time setup: |
2628 | */ | 2743 | */ |
2629 | void sched_fork(struct task_struct *p, int clone_flags) | 2744 | void sched_fork(struct task_struct *p) |
2630 | { | 2745 | { |
2746 | unsigned long flags; | ||
2631 | int cpu = get_cpu(); | 2747 | int cpu = get_cpu(); |
2632 | 2748 | ||
2633 | __sched_fork(p); | 2749 | __sched_fork(p); |
@@ -2678,16 +2794,16 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2678 | * | 2794 | * |
2679 | * Silence PROVE_RCU. | 2795 | * Silence PROVE_RCU. |
2680 | */ | 2796 | */ |
2681 | rcu_read_lock(); | 2797 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
2682 | set_task_cpu(p, cpu); | 2798 | set_task_cpu(p, cpu); |
2683 | rcu_read_unlock(); | 2799 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
2684 | 2800 | ||
2685 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2801 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2686 | if (likely(sched_info_on())) | 2802 | if (likely(sched_info_on())) |
2687 | memset(&p->sched_info, 0, sizeof(p->sched_info)); | 2803 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
2688 | #endif | 2804 | #endif |
2689 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 2805 | #if defined(CONFIG_SMP) |
2690 | p->oncpu = 0; | 2806 | p->on_cpu = 0; |
2691 | #endif | 2807 | #endif |
2692 | #ifdef CONFIG_PREEMPT | 2808 | #ifdef CONFIG_PREEMPT |
2693 | /* Want to start with kernel preemption disabled. */ | 2809 | /* Want to start with kernel preemption disabled. */ |
@@ -2707,41 +2823,31 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2707 | * that must be done for every newly created context, then puts the task | 2823 | * that must be done for every newly created context, then puts the task |
2708 | * on the runqueue and wakes it. | 2824 | * on the runqueue and wakes it. |
2709 | */ | 2825 | */ |
2710 | void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | 2826 | void wake_up_new_task(struct task_struct *p) |
2711 | { | 2827 | { |
2712 | unsigned long flags; | 2828 | unsigned long flags; |
2713 | struct rq *rq; | 2829 | struct rq *rq; |
2714 | int cpu __maybe_unused = get_cpu(); | ||
2715 | 2830 | ||
2831 | raw_spin_lock_irqsave(&p->pi_lock, flags); | ||
2716 | #ifdef CONFIG_SMP | 2832 | #ifdef CONFIG_SMP |
2717 | rq = task_rq_lock(p, &flags); | ||
2718 | p->state = TASK_WAKING; | ||
2719 | |||
2720 | /* | 2833 | /* |
2721 | * Fork balancing, do it here and not earlier because: | 2834 | * Fork balancing, do it here and not earlier because: |
2722 | * - cpus_allowed can change in the fork path | 2835 | * - cpus_allowed can change in the fork path |
2723 | * - any previously selected cpu might disappear through hotplug | 2836 | * - any previously selected cpu might disappear through hotplug |
2724 | * | ||
2725 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock | ||
2726 | * without people poking at ->cpus_allowed. | ||
2727 | */ | 2837 | */ |
2728 | cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); | 2838 | set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); |
2729 | set_task_cpu(p, cpu); | ||
2730 | |||
2731 | p->state = TASK_RUNNING; | ||
2732 | task_rq_unlock(rq, &flags); | ||
2733 | #endif | 2839 | #endif |
2734 | 2840 | ||
2735 | rq = task_rq_lock(p, &flags); | 2841 | rq = __task_rq_lock(p); |
2736 | activate_task(rq, p, 0); | 2842 | activate_task(rq, p, 0); |
2737 | trace_sched_wakeup_new(p, 1); | 2843 | p->on_rq = 1; |
2844 | trace_sched_wakeup_new(p, true); | ||
2738 | check_preempt_curr(rq, p, WF_FORK); | 2845 | check_preempt_curr(rq, p, WF_FORK); |
2739 | #ifdef CONFIG_SMP | 2846 | #ifdef CONFIG_SMP |
2740 | if (p->sched_class->task_woken) | 2847 | if (p->sched_class->task_woken) |
2741 | p->sched_class->task_woken(rq, p); | 2848 | p->sched_class->task_woken(rq, p); |
2742 | #endif | 2849 | #endif |
2743 | task_rq_unlock(rq, &flags); | 2850 | task_rq_unlock(rq, p, &flags); |
2744 | put_cpu(); | ||
2745 | } | 2851 | } |
2746 | 2852 | ||
2747 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2853 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
@@ -3450,27 +3556,22 @@ void sched_exec(void) | |||
3450 | { | 3556 | { |
3451 | struct task_struct *p = current; | 3557 | struct task_struct *p = current; |
3452 | unsigned long flags; | 3558 | unsigned long flags; |
3453 | struct rq *rq; | ||
3454 | int dest_cpu; | 3559 | int dest_cpu; |
3455 | 3560 | ||
3456 | rq = task_rq_lock(p, &flags); | 3561 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
3457 | dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); | 3562 | dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); |
3458 | if (dest_cpu == smp_processor_id()) | 3563 | if (dest_cpu == smp_processor_id()) |
3459 | goto unlock; | 3564 | goto unlock; |
3460 | 3565 | ||
3461 | /* | 3566 | if (likely(cpu_active(dest_cpu))) { |
3462 | * select_task_rq() can race against ->cpus_allowed | ||
3463 | */ | ||
3464 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && | ||
3465 | likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { | ||
3466 | struct migration_arg arg = { p, dest_cpu }; | 3567 | struct migration_arg arg = { p, dest_cpu }; |
3467 | 3568 | ||
3468 | task_rq_unlock(rq, &flags); | 3569 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
3469 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); | 3570 | stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
3470 | return; | 3571 | return; |
3471 | } | 3572 | } |
3472 | unlock: | 3573 | unlock: |
3473 | task_rq_unlock(rq, &flags); | 3574 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
3474 | } | 3575 | } |
3475 | 3576 | ||
3476 | #endif | 3577 | #endif |
@@ -3507,7 +3608,7 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
3507 | 3608 | ||
3508 | rq = task_rq_lock(p, &flags); | 3609 | rq = task_rq_lock(p, &flags); |
3509 | ns = do_task_delta_exec(p, rq); | 3610 | ns = do_task_delta_exec(p, rq); |
3510 | task_rq_unlock(rq, &flags); | 3611 | task_rq_unlock(rq, p, &flags); |
3511 | 3612 | ||
3512 | return ns; | 3613 | return ns; |
3513 | } | 3614 | } |
@@ -3525,7 +3626,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
3525 | 3626 | ||
3526 | rq = task_rq_lock(p, &flags); | 3627 | rq = task_rq_lock(p, &flags); |
3527 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); | 3628 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); |
3528 | task_rq_unlock(rq, &flags); | 3629 | task_rq_unlock(rq, p, &flags); |
3529 | 3630 | ||
3530 | return ns; | 3631 | return ns; |
3531 | } | 3632 | } |
@@ -3549,7 +3650,7 @@ unsigned long long thread_group_sched_runtime(struct task_struct *p) | |||
3549 | rq = task_rq_lock(p, &flags); | 3650 | rq = task_rq_lock(p, &flags); |
3550 | thread_group_cputime(p, &totals); | 3651 | thread_group_cputime(p, &totals); |
3551 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); | 3652 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); |
3552 | task_rq_unlock(rq, &flags); | 3653 | task_rq_unlock(rq, p, &flags); |
3553 | 3654 | ||
3554 | return ns; | 3655 | return ns; |
3555 | } | 3656 | } |
@@ -3903,9 +4004,6 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3903 | /* | 4004 | /* |
3904 | * This function gets called by the timer code, with HZ frequency. | 4005 | * This function gets called by the timer code, with HZ frequency. |
3905 | * We call it with interrupts disabled. | 4006 | * We call it with interrupts disabled. |
3906 | * | ||
3907 | * It also gets called by the fork code, when changing the parent's | ||
3908 | * timeslices. | ||
3909 | */ | 4007 | */ |
3910 | void scheduler_tick(void) | 4008 | void scheduler_tick(void) |
3911 | { | 4009 | { |
@@ -4025,17 +4123,11 @@ static inline void schedule_debug(struct task_struct *prev) | |||
4025 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); | 4123 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
4026 | 4124 | ||
4027 | schedstat_inc(this_rq(), sched_count); | 4125 | schedstat_inc(this_rq(), sched_count); |
4028 | #ifdef CONFIG_SCHEDSTATS | ||
4029 | if (unlikely(prev->lock_depth >= 0)) { | ||
4030 | schedstat_inc(this_rq(), rq_sched_info.bkl_count); | ||
4031 | schedstat_inc(prev, sched_info.bkl_count); | ||
4032 | } | ||
4033 | #endif | ||
4034 | } | 4126 | } |
4035 | 4127 | ||
4036 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | 4128 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
4037 | { | 4129 | { |
4038 | if (prev->se.on_rq) | 4130 | if (prev->on_rq || rq->skip_clock_update < 0) |
4039 | update_rq_clock(rq); | 4131 | update_rq_clock(rq); |
4040 | prev->sched_class->put_prev_task(rq, prev); | 4132 | prev->sched_class->put_prev_task(rq, prev); |
4041 | } | 4133 | } |
@@ -4097,11 +4189,13 @@ need_resched: | |||
4097 | if (unlikely(signal_pending_state(prev->state, prev))) { | 4189 | if (unlikely(signal_pending_state(prev->state, prev))) { |
4098 | prev->state = TASK_RUNNING; | 4190 | prev->state = TASK_RUNNING; |
4099 | } else { | 4191 | } else { |
4192 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | ||
4193 | prev->on_rq = 0; | ||
4194 | |||
4100 | /* | 4195 | /* |
4101 | * If a worker is going to sleep, notify and | 4196 | * If a worker went to sleep, notify and ask workqueue |
4102 | * ask workqueue whether it wants to wake up a | 4197 | * whether it wants to wake up a task to maintain |
4103 | * task to maintain concurrency. If so, wake | 4198 | * concurrency. |
4104 | * up the task. | ||
4105 | */ | 4199 | */ |
4106 | if (prev->flags & PF_WQ_WORKER) { | 4200 | if (prev->flags & PF_WQ_WORKER) { |
4107 | struct task_struct *to_wakeup; | 4201 | struct task_struct *to_wakeup; |
@@ -4110,11 +4204,10 @@ need_resched: | |||
4110 | if (to_wakeup) | 4204 | if (to_wakeup) |
4111 | try_to_wake_up_local(to_wakeup); | 4205 | try_to_wake_up_local(to_wakeup); |
4112 | } | 4206 | } |
4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | ||
4114 | 4207 | ||
4115 | /* | 4208 | /* |
4116 | * If we are going to sleep and we have plugged IO queued, make | 4209 | * If we are going to sleep and we have plugged IO |
4117 | * sure to submit it to avoid deadlocks. | 4210 | * queued, make sure to submit it to avoid deadlocks. |
4118 | */ | 4211 | */ |
4119 | if (blk_needs_flush_plug(prev)) { | 4212 | if (blk_needs_flush_plug(prev)) { |
4120 | raw_spin_unlock(&rq->lock); | 4213 | raw_spin_unlock(&rq->lock); |
@@ -4161,70 +4254,53 @@ need_resched: | |||
4161 | EXPORT_SYMBOL(schedule); | 4254 | EXPORT_SYMBOL(schedule); |
4162 | 4255 | ||
4163 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 4256 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4164 | /* | ||
4165 | * Look out! "owner" is an entirely speculative pointer | ||
4166 | * access and not reliable. | ||
4167 | */ | ||
4168 | int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | ||
4169 | { | ||
4170 | unsigned int cpu; | ||
4171 | struct rq *rq; | ||
4172 | 4257 | ||
4173 | if (!sched_feat(OWNER_SPIN)) | 4258 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
4174 | return 0; | 4259 | { |
4260 | bool ret = false; | ||
4175 | 4261 | ||
4176 | #ifdef CONFIG_DEBUG_PAGEALLOC | 4262 | rcu_read_lock(); |
4177 | /* | 4263 | if (lock->owner != owner) |
4178 | * Need to access the cpu field knowing that | 4264 | goto fail; |
4179 | * DEBUG_PAGEALLOC could have unmapped it if | ||
4180 | * the mutex owner just released it and exited. | ||
4181 | */ | ||
4182 | if (probe_kernel_address(&owner->cpu, cpu)) | ||
4183 | return 0; | ||
4184 | #else | ||
4185 | cpu = owner->cpu; | ||
4186 | #endif | ||
4187 | 4265 | ||
4188 | /* | 4266 | /* |
4189 | * Even if the access succeeded (likely case), | 4267 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
4190 | * the cpu field may no longer be valid. | 4268 | * lock->owner still matches owner, if that fails, owner might |
4269 | * point to free()d memory, if it still matches, the rcu_read_lock() | ||
4270 | * ensures the memory stays valid. | ||
4191 | */ | 4271 | */ |
4192 | if (cpu >= nr_cpumask_bits) | 4272 | barrier(); |
4193 | return 0; | ||
4194 | 4273 | ||
4195 | /* | 4274 | ret = owner->on_cpu; |
4196 | * We need to validate that we can do a | 4275 | fail: |
4197 | * get_cpu() and that we have the percpu area. | 4276 | rcu_read_unlock(); |
4198 | */ | ||
4199 | if (!cpu_online(cpu)) | ||
4200 | return 0; | ||
4201 | 4277 | ||
4202 | rq = cpu_rq(cpu); | 4278 | return ret; |
4279 | } | ||
4203 | 4280 | ||
4204 | for (;;) { | 4281 | /* |
4205 | /* | 4282 | * Look out! "owner" is an entirely speculative pointer |
4206 | * Owner changed, break to re-assess state. | 4283 | * access and not reliable. |
4207 | */ | 4284 | */ |
4208 | if (lock->owner != owner) { | 4285 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
4209 | /* | 4286 | { |
4210 | * If the lock has switched to a different owner, | 4287 | if (!sched_feat(OWNER_SPIN)) |
4211 | * we likely have heavy contention. Return 0 to quit | 4288 | return 0; |
4212 | * optimistic spinning and not contend further: | ||
4213 | */ | ||
4214 | if (lock->owner) | ||
4215 | return 0; | ||
4216 | break; | ||
4217 | } | ||
4218 | 4289 | ||
4219 | /* | 4290 | while (owner_running(lock, owner)) { |
4220 | * Is that owner really running on that cpu? | 4291 | if (need_resched()) |
4221 | */ | ||
4222 | if (task_thread_info(rq->curr) != owner || need_resched()) | ||
4223 | return 0; | 4292 | return 0; |
4224 | 4293 | ||
4225 | arch_mutex_cpu_relax(); | 4294 | arch_mutex_cpu_relax(); |
4226 | } | 4295 | } |
4227 | 4296 | ||
4297 | /* | ||
4298 | * If the owner changed to another task there is likely | ||
4299 | * heavy contention, stop spinning. | ||
4300 | */ | ||
4301 | if (lock->owner) | ||
4302 | return 0; | ||
4303 | |||
4228 | return 1; | 4304 | return 1; |
4229 | } | 4305 | } |
4230 | #endif | 4306 | #endif |
@@ -4684,19 +4760,18 @@ EXPORT_SYMBOL(sleep_on_timeout); | |||
4684 | */ | 4760 | */ |
4685 | void rt_mutex_setprio(struct task_struct *p, int prio) | 4761 | void rt_mutex_setprio(struct task_struct *p, int prio) |
4686 | { | 4762 | { |
4687 | unsigned long flags; | ||
4688 | int oldprio, on_rq, running; | 4763 | int oldprio, on_rq, running; |
4689 | struct rq *rq; | 4764 | struct rq *rq; |
4690 | const struct sched_class *prev_class; | 4765 | const struct sched_class *prev_class; |
4691 | 4766 | ||
4692 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 4767 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
4693 | 4768 | ||
4694 | rq = task_rq_lock(p, &flags); | 4769 | rq = __task_rq_lock(p); |
4695 | 4770 | ||
4696 | trace_sched_pi_setprio(p, prio); | 4771 | trace_sched_pi_setprio(p, prio); |
4697 | oldprio = p->prio; | 4772 | oldprio = p->prio; |
4698 | prev_class = p->sched_class; | 4773 | prev_class = p->sched_class; |
4699 | on_rq = p->se.on_rq; | 4774 | on_rq = p->on_rq; |
4700 | running = task_current(rq, p); | 4775 | running = task_current(rq, p); |
4701 | if (on_rq) | 4776 | if (on_rq) |
4702 | dequeue_task(rq, p, 0); | 4777 | dequeue_task(rq, p, 0); |
@@ -4716,7 +4791,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4716 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); | 4791 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); |
4717 | 4792 | ||
4718 | check_class_changed(rq, p, prev_class, oldprio); | 4793 | check_class_changed(rq, p, prev_class, oldprio); |
4719 | task_rq_unlock(rq, &flags); | 4794 | __task_rq_unlock(rq); |
4720 | } | 4795 | } |
4721 | 4796 | ||
4722 | #endif | 4797 | #endif |
@@ -4744,7 +4819,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4744 | p->static_prio = NICE_TO_PRIO(nice); | 4819 | p->static_prio = NICE_TO_PRIO(nice); |
4745 | goto out_unlock; | 4820 | goto out_unlock; |
4746 | } | 4821 | } |
4747 | on_rq = p->se.on_rq; | 4822 | on_rq = p->on_rq; |
4748 | if (on_rq) | 4823 | if (on_rq) |
4749 | dequeue_task(rq, p, 0); | 4824 | dequeue_task(rq, p, 0); |
4750 | 4825 | ||
@@ -4764,7 +4839,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4764 | resched_task(rq->curr); | 4839 | resched_task(rq->curr); |
4765 | } | 4840 | } |
4766 | out_unlock: | 4841 | out_unlock: |
4767 | task_rq_unlock(rq, &flags); | 4842 | task_rq_unlock(rq, p, &flags); |
4768 | } | 4843 | } |
4769 | EXPORT_SYMBOL(set_user_nice); | 4844 | EXPORT_SYMBOL(set_user_nice); |
4770 | 4845 | ||
@@ -4878,8 +4953,6 @@ static struct task_struct *find_process_by_pid(pid_t pid) | |||
4878 | static void | 4953 | static void |
4879 | __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | 4954 | __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) |
4880 | { | 4955 | { |
4881 | BUG_ON(p->se.on_rq); | ||
4882 | |||
4883 | p->policy = policy; | 4956 | p->policy = policy; |
4884 | p->rt_priority = prio; | 4957 | p->rt_priority = prio; |
4885 | p->normal_prio = normal_prio(p); | 4958 | p->normal_prio = normal_prio(p); |
@@ -4994,20 +5067,17 @@ recheck: | |||
4994 | /* | 5067 | /* |
4995 | * make sure no PI-waiters arrive (or leave) while we are | 5068 | * make sure no PI-waiters arrive (or leave) while we are |
4996 | * changing the priority of the task: | 5069 | * changing the priority of the task: |
4997 | */ | 5070 | * |
4998 | raw_spin_lock_irqsave(&p->pi_lock, flags); | ||
4999 | /* | ||
5000 | * To be able to change p->policy safely, the appropriate | 5071 | * To be able to change p->policy safely, the appropriate |
5001 | * runqueue lock must be held. | 5072 | * runqueue lock must be held. |
5002 | */ | 5073 | */ |
5003 | rq = __task_rq_lock(p); | 5074 | rq = task_rq_lock(p, &flags); |
5004 | 5075 | ||
5005 | /* | 5076 | /* |
5006 | * Changing the policy of the stop threads its a very bad idea | 5077 | * Changing the policy of the stop threads its a very bad idea |
5007 | */ | 5078 | */ |
5008 | if (p == rq->stop) { | 5079 | if (p == rq->stop) { |
5009 | __task_rq_unlock(rq); | 5080 | task_rq_unlock(rq, p, &flags); |
5010 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
5011 | return -EINVAL; | 5081 | return -EINVAL; |
5012 | } | 5082 | } |
5013 | 5083 | ||
@@ -5031,8 +5101,7 @@ recheck: | |||
5031 | if (rt_bandwidth_enabled() && rt_policy(policy) && | 5101 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
5032 | task_group(p)->rt_bandwidth.rt_runtime == 0 && | 5102 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
5033 | !task_group_is_autogroup(task_group(p))) { | 5103 | !task_group_is_autogroup(task_group(p))) { |
5034 | __task_rq_unlock(rq); | 5104 | task_rq_unlock(rq, p, &flags); |
5035 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
5036 | return -EPERM; | 5105 | return -EPERM; |
5037 | } | 5106 | } |
5038 | } | 5107 | } |
@@ -5041,11 +5110,10 @@ recheck: | |||
5041 | /* recheck policy now with rq lock held */ | 5110 | /* recheck policy now with rq lock held */ |
5042 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 5111 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
5043 | policy = oldpolicy = -1; | 5112 | policy = oldpolicy = -1; |
5044 | __task_rq_unlock(rq); | 5113 | task_rq_unlock(rq, p, &flags); |
5045 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
5046 | goto recheck; | 5114 | goto recheck; |
5047 | } | 5115 | } |
5048 | on_rq = p->se.on_rq; | 5116 | on_rq = p->on_rq; |
5049 | running = task_current(rq, p); | 5117 | running = task_current(rq, p); |
5050 | if (on_rq) | 5118 | if (on_rq) |
5051 | deactivate_task(rq, p, 0); | 5119 | deactivate_task(rq, p, 0); |
@@ -5064,8 +5132,7 @@ recheck: | |||
5064 | activate_task(rq, p, 0); | 5132 | activate_task(rq, p, 0); |
5065 | 5133 | ||
5066 | check_class_changed(rq, p, prev_class, oldprio); | 5134 | check_class_changed(rq, p, prev_class, oldprio); |
5067 | __task_rq_unlock(rq); | 5135 | task_rq_unlock(rq, p, &flags); |
5068 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
5069 | 5136 | ||
5070 | rt_mutex_adjust_pi(p); | 5137 | rt_mutex_adjust_pi(p); |
5071 | 5138 | ||
@@ -5316,7 +5383,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
5316 | { | 5383 | { |
5317 | struct task_struct *p; | 5384 | struct task_struct *p; |
5318 | unsigned long flags; | 5385 | unsigned long flags; |
5319 | struct rq *rq; | ||
5320 | int retval; | 5386 | int retval; |
5321 | 5387 | ||
5322 | get_online_cpus(); | 5388 | get_online_cpus(); |
@@ -5331,9 +5397,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
5331 | if (retval) | 5397 | if (retval) |
5332 | goto out_unlock; | 5398 | goto out_unlock; |
5333 | 5399 | ||
5334 | rq = task_rq_lock(p, &flags); | 5400 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
5335 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); | 5401 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5336 | task_rq_unlock(rq, &flags); | 5402 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
5337 | 5403 | ||
5338 | out_unlock: | 5404 | out_unlock: |
5339 | rcu_read_unlock(); | 5405 | rcu_read_unlock(); |
@@ -5658,7 +5724,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
5658 | 5724 | ||
5659 | rq = task_rq_lock(p, &flags); | 5725 | rq = task_rq_lock(p, &flags); |
5660 | time_slice = p->sched_class->get_rr_interval(rq, p); | 5726 | time_slice = p->sched_class->get_rr_interval(rq, p); |
5661 | task_rq_unlock(rq, &flags); | 5727 | task_rq_unlock(rq, p, &flags); |
5662 | 5728 | ||
5663 | rcu_read_unlock(); | 5729 | rcu_read_unlock(); |
5664 | jiffies_to_timespec(time_slice, &t); | 5730 | jiffies_to_timespec(time_slice, &t); |
@@ -5776,17 +5842,14 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5776 | rcu_read_unlock(); | 5842 | rcu_read_unlock(); |
5777 | 5843 | ||
5778 | rq->curr = rq->idle = idle; | 5844 | rq->curr = rq->idle = idle; |
5779 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5845 | #if defined(CONFIG_SMP) |
5780 | idle->oncpu = 1; | 5846 | idle->on_cpu = 1; |
5781 | #endif | 5847 | #endif |
5782 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 5848 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
5783 | 5849 | ||
5784 | /* Set the preempt count _outside_ the spinlocks! */ | 5850 | /* Set the preempt count _outside_ the spinlocks! */ |
5785 | #if defined(CONFIG_PREEMPT) | ||
5786 | task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); | ||
5787 | #else | ||
5788 | task_thread_info(idle)->preempt_count = 0; | 5851 | task_thread_info(idle)->preempt_count = 0; |
5789 | #endif | 5852 | |
5790 | /* | 5853 | /* |
5791 | * The idle tasks have their own, simple scheduling class: | 5854 | * The idle tasks have their own, simple scheduling class: |
5792 | */ | 5855 | */ |
@@ -5881,26 +5944,17 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5881 | unsigned int dest_cpu; | 5944 | unsigned int dest_cpu; |
5882 | int ret = 0; | 5945 | int ret = 0; |
5883 | 5946 | ||
5884 | /* | ||
5885 | * Serialize against TASK_WAKING so that ttwu() and wunt() can | ||
5886 | * drop the rq->lock and still rely on ->cpus_allowed. | ||
5887 | */ | ||
5888 | again: | ||
5889 | while (task_is_waking(p)) | ||
5890 | cpu_relax(); | ||
5891 | rq = task_rq_lock(p, &flags); | 5947 | rq = task_rq_lock(p, &flags); |
5892 | if (task_is_waking(p)) { | 5948 | |
5893 | task_rq_unlock(rq, &flags); | 5949 | if (cpumask_equal(&p->cpus_allowed, new_mask)) |
5894 | goto again; | 5950 | goto out; |
5895 | } | ||
5896 | 5951 | ||
5897 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { | 5952 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
5898 | ret = -EINVAL; | 5953 | ret = -EINVAL; |
5899 | goto out; | 5954 | goto out; |
5900 | } | 5955 | } |
5901 | 5956 | ||
5902 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 5957 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) { |
5903 | !cpumask_equal(&p->cpus_allowed, new_mask))) { | ||
5904 | ret = -EINVAL; | 5958 | ret = -EINVAL; |
5905 | goto out; | 5959 | goto out; |
5906 | } | 5960 | } |
@@ -5917,16 +5971,16 @@ again: | |||
5917 | goto out; | 5971 | goto out; |
5918 | 5972 | ||
5919 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); | 5973 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
5920 | if (migrate_task(p, rq)) { | 5974 | if (p->on_rq) { |
5921 | struct migration_arg arg = { p, dest_cpu }; | 5975 | struct migration_arg arg = { p, dest_cpu }; |
5922 | /* Need help from migration thread: drop lock and wait. */ | 5976 | /* Need help from migration thread: drop lock and wait. */ |
5923 | task_rq_unlock(rq, &flags); | 5977 | task_rq_unlock(rq, p, &flags); |
5924 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); | 5978 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
5925 | tlb_migrate_finish(p->mm); | 5979 | tlb_migrate_finish(p->mm); |
5926 | return 0; | 5980 | return 0; |
5927 | } | 5981 | } |
5928 | out: | 5982 | out: |
5929 | task_rq_unlock(rq, &flags); | 5983 | task_rq_unlock(rq, p, &flags); |
5930 | 5984 | ||
5931 | return ret; | 5985 | return ret; |
5932 | } | 5986 | } |
@@ -5954,6 +6008,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5954 | rq_src = cpu_rq(src_cpu); | 6008 | rq_src = cpu_rq(src_cpu); |
5955 | rq_dest = cpu_rq(dest_cpu); | 6009 | rq_dest = cpu_rq(dest_cpu); |
5956 | 6010 | ||
6011 | raw_spin_lock(&p->pi_lock); | ||
5957 | double_rq_lock(rq_src, rq_dest); | 6012 | double_rq_lock(rq_src, rq_dest); |
5958 | /* Already moved. */ | 6013 | /* Already moved. */ |
5959 | if (task_cpu(p) != src_cpu) | 6014 | if (task_cpu(p) != src_cpu) |
@@ -5966,7 +6021,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5966 | * If we're not on a rq, the next wake-up will ensure we're | 6021 | * If we're not on a rq, the next wake-up will ensure we're |
5967 | * placed properly. | 6022 | * placed properly. |
5968 | */ | 6023 | */ |
5969 | if (p->se.on_rq) { | 6024 | if (p->on_rq) { |
5970 | deactivate_task(rq_src, p, 0); | 6025 | deactivate_task(rq_src, p, 0); |
5971 | set_task_cpu(p, dest_cpu); | 6026 | set_task_cpu(p, dest_cpu); |
5972 | activate_task(rq_dest, p, 0); | 6027 | activate_task(rq_dest, p, 0); |
@@ -5976,6 +6031,7 @@ done: | |||
5976 | ret = 1; | 6031 | ret = 1; |
5977 | fail: | 6032 | fail: |
5978 | double_rq_unlock(rq_src, rq_dest); | 6033 | double_rq_unlock(rq_src, rq_dest); |
6034 | raw_spin_unlock(&p->pi_lock); | ||
5979 | return ret; | 6035 | return ret; |
5980 | } | 6036 | } |
5981 | 6037 | ||
@@ -6316,6 +6372,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6316 | 6372 | ||
6317 | #ifdef CONFIG_HOTPLUG_CPU | 6373 | #ifdef CONFIG_HOTPLUG_CPU |
6318 | case CPU_DYING: | 6374 | case CPU_DYING: |
6375 | sched_ttwu_pending(); | ||
6319 | /* Update our root-domain */ | 6376 | /* Update our root-domain */ |
6320 | raw_spin_lock_irqsave(&rq->lock, flags); | 6377 | raw_spin_lock_irqsave(&rq->lock, flags); |
6321 | if (rq->rd) { | 6378 | if (rq->rd) { |
@@ -6394,6 +6451,8 @@ early_initcall(migration_init); | |||
6394 | 6451 | ||
6395 | #ifdef CONFIG_SMP | 6452 | #ifdef CONFIG_SMP |
6396 | 6453 | ||
6454 | static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ | ||
6455 | |||
6397 | #ifdef CONFIG_SCHED_DEBUG | 6456 | #ifdef CONFIG_SCHED_DEBUG |
6398 | 6457 | ||
6399 | static __read_mostly int sched_domain_debug_enabled; | 6458 | static __read_mostly int sched_domain_debug_enabled; |
@@ -6489,7 +6548,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6489 | 6548 | ||
6490 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6549 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6491 | { | 6550 | { |
6492 | cpumask_var_t groupmask; | ||
6493 | int level = 0; | 6551 | int level = 0; |
6494 | 6552 | ||
6495 | if (!sched_domain_debug_enabled) | 6553 | if (!sched_domain_debug_enabled) |
@@ -6502,20 +6560,14 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6502 | 6560 | ||
6503 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6561 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6504 | 6562 | ||
6505 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { | ||
6506 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | ||
6507 | return; | ||
6508 | } | ||
6509 | |||
6510 | for (;;) { | 6563 | for (;;) { |
6511 | if (sched_domain_debug_one(sd, cpu, level, groupmask)) | 6564 | if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) |
6512 | break; | 6565 | break; |
6513 | level++; | 6566 | level++; |
6514 | sd = sd->parent; | 6567 | sd = sd->parent; |
6515 | if (!sd) | 6568 | if (!sd) |
6516 | break; | 6569 | break; |
6517 | } | 6570 | } |
6518 | free_cpumask_var(groupmask); | ||
6519 | } | 6571 | } |
6520 | #else /* !CONFIG_SCHED_DEBUG */ | 6572 | #else /* !CONFIG_SCHED_DEBUG */ |
6521 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6573 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6572,12 +6624,11 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6572 | return 1; | 6624 | return 1; |
6573 | } | 6625 | } |
6574 | 6626 | ||
6575 | static void free_rootdomain(struct root_domain *rd) | 6627 | static void free_rootdomain(struct rcu_head *rcu) |
6576 | { | 6628 | { |
6577 | synchronize_sched(); | 6629 | struct root_domain *rd = container_of(rcu, struct root_domain, rcu); |
6578 | 6630 | ||
6579 | cpupri_cleanup(&rd->cpupri); | 6631 | cpupri_cleanup(&rd->cpupri); |
6580 | |||
6581 | free_cpumask_var(rd->rto_mask); | 6632 | free_cpumask_var(rd->rto_mask); |
6582 | free_cpumask_var(rd->online); | 6633 | free_cpumask_var(rd->online); |
6583 | free_cpumask_var(rd->span); | 6634 | free_cpumask_var(rd->span); |
@@ -6618,7 +6669,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6618 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 6669 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
6619 | 6670 | ||
6620 | if (old_rd) | 6671 | if (old_rd) |
6621 | free_rootdomain(old_rd); | 6672 | call_rcu_sched(&old_rd->rcu, free_rootdomain); |
6622 | } | 6673 | } |
6623 | 6674 | ||
6624 | static int init_rootdomain(struct root_domain *rd) | 6675 | static int init_rootdomain(struct root_domain *rd) |
@@ -6669,6 +6720,25 @@ static struct root_domain *alloc_rootdomain(void) | |||
6669 | return rd; | 6720 | return rd; |
6670 | } | 6721 | } |
6671 | 6722 | ||
6723 | static void free_sched_domain(struct rcu_head *rcu) | ||
6724 | { | ||
6725 | struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); | ||
6726 | if (atomic_dec_and_test(&sd->groups->ref)) | ||
6727 | kfree(sd->groups); | ||
6728 | kfree(sd); | ||
6729 | } | ||
6730 | |||
6731 | static void destroy_sched_domain(struct sched_domain *sd, int cpu) | ||
6732 | { | ||
6733 | call_rcu(&sd->rcu, free_sched_domain); | ||
6734 | } | ||
6735 | |||
6736 | static void destroy_sched_domains(struct sched_domain *sd, int cpu) | ||
6737 | { | ||
6738 | for (; sd; sd = sd->parent) | ||
6739 | destroy_sched_domain(sd, cpu); | ||
6740 | } | ||
6741 | |||
6672 | /* | 6742 | /* |
6673 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must | 6743 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
6674 | * hold the hotplug lock. | 6744 | * hold the hotplug lock. |
@@ -6679,9 +6749,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6679 | struct rq *rq = cpu_rq(cpu); | 6749 | struct rq *rq = cpu_rq(cpu); |
6680 | struct sched_domain *tmp; | 6750 | struct sched_domain *tmp; |
6681 | 6751 | ||
6682 | for (tmp = sd; tmp; tmp = tmp->parent) | ||
6683 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); | ||
6684 | |||
6685 | /* Remove the sched domains which do not contribute to scheduling. */ | 6752 | /* Remove the sched domains which do not contribute to scheduling. */ |
6686 | for (tmp = sd; tmp; ) { | 6753 | for (tmp = sd; tmp; ) { |
6687 | struct sched_domain *parent = tmp->parent; | 6754 | struct sched_domain *parent = tmp->parent; |
@@ -6692,12 +6759,15 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6692 | tmp->parent = parent->parent; | 6759 | tmp->parent = parent->parent; |
6693 | if (parent->parent) | 6760 | if (parent->parent) |
6694 | parent->parent->child = tmp; | 6761 | parent->parent->child = tmp; |
6762 | destroy_sched_domain(parent, cpu); | ||
6695 | } else | 6763 | } else |
6696 | tmp = tmp->parent; | 6764 | tmp = tmp->parent; |
6697 | } | 6765 | } |
6698 | 6766 | ||
6699 | if (sd && sd_degenerate(sd)) { | 6767 | if (sd && sd_degenerate(sd)) { |
6768 | tmp = sd; | ||
6700 | sd = sd->parent; | 6769 | sd = sd->parent; |
6770 | destroy_sched_domain(tmp, cpu); | ||
6701 | if (sd) | 6771 | if (sd) |
6702 | sd->child = NULL; | 6772 | sd->child = NULL; |
6703 | } | 6773 | } |
@@ -6705,7 +6775,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6705 | sched_domain_debug(sd, cpu); | 6775 | sched_domain_debug(sd, cpu); |
6706 | 6776 | ||
6707 | rq_attach_root(rq, rd); | 6777 | rq_attach_root(rq, rd); |
6778 | tmp = rq->sd; | ||
6708 | rcu_assign_pointer(rq->sd, sd); | 6779 | rcu_assign_pointer(rq->sd, sd); |
6780 | destroy_sched_domains(tmp, cpu); | ||
6709 | } | 6781 | } |
6710 | 6782 | ||
6711 | /* cpus with isolated domains */ | 6783 | /* cpus with isolated domains */ |
@@ -6721,56 +6793,6 @@ static int __init isolated_cpu_setup(char *str) | |||
6721 | 6793 | ||
6722 | __setup("isolcpus=", isolated_cpu_setup); | 6794 | __setup("isolcpus=", isolated_cpu_setup); |
6723 | 6795 | ||
6724 | /* | ||
6725 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | ||
6726 | * to a function which identifies what group(along with sched group) a CPU | ||
6727 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids | ||
6728 | * (due to the fact that we keep track of groups covered with a struct cpumask). | ||
6729 | * | ||
6730 | * init_sched_build_groups will build a circular linked list of the groups | ||
6731 | * covered by the given span, and will set each group's ->cpumask correctly, | ||
6732 | * and ->cpu_power to 0. | ||
6733 | */ | ||
6734 | static void | ||
6735 | init_sched_build_groups(const struct cpumask *span, | ||
6736 | const struct cpumask *cpu_map, | ||
6737 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
6738 | struct sched_group **sg, | ||
6739 | struct cpumask *tmpmask), | ||
6740 | struct cpumask *covered, struct cpumask *tmpmask) | ||
6741 | { | ||
6742 | struct sched_group *first = NULL, *last = NULL; | ||
6743 | int i; | ||
6744 | |||
6745 | cpumask_clear(covered); | ||
6746 | |||
6747 | for_each_cpu(i, span) { | ||
6748 | struct sched_group *sg; | ||
6749 | int group = group_fn(i, cpu_map, &sg, tmpmask); | ||
6750 | int j; | ||
6751 | |||
6752 | if (cpumask_test_cpu(i, covered)) | ||
6753 | continue; | ||
6754 | |||
6755 | cpumask_clear(sched_group_cpus(sg)); | ||
6756 | sg->cpu_power = 0; | ||
6757 | |||
6758 | for_each_cpu(j, span) { | ||
6759 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | ||
6760 | continue; | ||
6761 | |||
6762 | cpumask_set_cpu(j, covered); | ||
6763 | cpumask_set_cpu(j, sched_group_cpus(sg)); | ||
6764 | } | ||
6765 | if (!first) | ||
6766 | first = sg; | ||
6767 | if (last) | ||
6768 | last->next = sg; | ||
6769 | last = sg; | ||
6770 | } | ||
6771 | last->next = first; | ||
6772 | } | ||
6773 | |||
6774 | #define SD_NODES_PER_DOMAIN 16 | 6796 | #define SD_NODES_PER_DOMAIN 16 |
6775 | 6797 | ||
6776 | #ifdef CONFIG_NUMA | 6798 | #ifdef CONFIG_NUMA |
@@ -6787,7 +6809,7 @@ init_sched_build_groups(const struct cpumask *span, | |||
6787 | */ | 6809 | */ |
6788 | static int find_next_best_node(int node, nodemask_t *used_nodes) | 6810 | static int find_next_best_node(int node, nodemask_t *used_nodes) |
6789 | { | 6811 | { |
6790 | int i, n, val, min_val, best_node = 0; | 6812 | int i, n, val, min_val, best_node = -1; |
6791 | 6813 | ||
6792 | min_val = INT_MAX; | 6814 | min_val = INT_MAX; |
6793 | 6815 | ||
@@ -6811,7 +6833,8 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
6811 | } | 6833 | } |
6812 | } | 6834 | } |
6813 | 6835 | ||
6814 | node_set(best_node, *used_nodes); | 6836 | if (best_node != -1) |
6837 | node_set(best_node, *used_nodes); | ||
6815 | return best_node; | 6838 | return best_node; |
6816 | } | 6839 | } |
6817 | 6840 | ||
@@ -6837,315 +6860,130 @@ static void sched_domain_node_span(int node, struct cpumask *span) | |||
6837 | 6860 | ||
6838 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 6861 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
6839 | int next_node = find_next_best_node(node, &used_nodes); | 6862 | int next_node = find_next_best_node(node, &used_nodes); |
6840 | 6863 | if (next_node < 0) | |
6864 | break; | ||
6841 | cpumask_or(span, span, cpumask_of_node(next_node)); | 6865 | cpumask_or(span, span, cpumask_of_node(next_node)); |
6842 | } | 6866 | } |
6843 | } | 6867 | } |
6868 | |||
6869 | static const struct cpumask *cpu_node_mask(int cpu) | ||
6870 | { | ||
6871 | lockdep_assert_held(&sched_domains_mutex); | ||
6872 | |||
6873 | sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask); | ||
6874 | |||
6875 | return sched_domains_tmpmask; | ||
6876 | } | ||
6877 | |||
6878 | static const struct cpumask *cpu_allnodes_mask(int cpu) | ||
6879 | { | ||
6880 | return cpu_possible_mask; | ||
6881 | } | ||
6844 | #endif /* CONFIG_NUMA */ | 6882 | #endif /* CONFIG_NUMA */ |
6845 | 6883 | ||
6846 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 6884 | static const struct cpumask *cpu_cpu_mask(int cpu) |
6885 | { | ||
6886 | return cpumask_of_node(cpu_to_node(cpu)); | ||
6887 | } | ||
6847 | 6888 | ||
6848 | /* | 6889 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
6849 | * The cpus mask in sched_group and sched_domain hangs off the end. | ||
6850 | * | ||
6851 | * ( See the the comments in include/linux/sched.h:struct sched_group | ||
6852 | * and struct sched_domain. ) | ||
6853 | */ | ||
6854 | struct static_sched_group { | ||
6855 | struct sched_group sg; | ||
6856 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); | ||
6857 | }; | ||
6858 | 6890 | ||
6859 | struct static_sched_domain { | 6891 | struct sd_data { |
6860 | struct sched_domain sd; | 6892 | struct sched_domain **__percpu sd; |
6861 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | 6893 | struct sched_group **__percpu sg; |
6862 | }; | 6894 | }; |
6863 | 6895 | ||
6864 | struct s_data { | 6896 | struct s_data { |
6865 | #ifdef CONFIG_NUMA | 6897 | struct sched_domain ** __percpu sd; |
6866 | int sd_allnodes; | ||
6867 | cpumask_var_t domainspan; | ||
6868 | cpumask_var_t covered; | ||
6869 | cpumask_var_t notcovered; | ||
6870 | #endif | ||
6871 | cpumask_var_t nodemask; | ||
6872 | cpumask_var_t this_sibling_map; | ||
6873 | cpumask_var_t this_core_map; | ||
6874 | cpumask_var_t this_book_map; | ||
6875 | cpumask_var_t send_covered; | ||
6876 | cpumask_var_t tmpmask; | ||
6877 | struct sched_group **sched_group_nodes; | ||
6878 | struct root_domain *rd; | 6898 | struct root_domain *rd; |
6879 | }; | 6899 | }; |
6880 | 6900 | ||
6881 | enum s_alloc { | 6901 | enum s_alloc { |
6882 | sa_sched_groups = 0, | ||
6883 | sa_rootdomain, | 6902 | sa_rootdomain, |
6884 | sa_tmpmask, | 6903 | sa_sd, |
6885 | sa_send_covered, | 6904 | sa_sd_storage, |
6886 | sa_this_book_map, | ||
6887 | sa_this_core_map, | ||
6888 | sa_this_sibling_map, | ||
6889 | sa_nodemask, | ||
6890 | sa_sched_group_nodes, | ||
6891 | #ifdef CONFIG_NUMA | ||
6892 | sa_notcovered, | ||
6893 | sa_covered, | ||
6894 | sa_domainspan, | ||
6895 | #endif | ||
6896 | sa_none, | 6905 | sa_none, |
6897 | }; | 6906 | }; |
6898 | 6907 | ||
6899 | /* | 6908 | struct sched_domain_topology_level; |
6900 | * SMT sched-domains: | ||
6901 | */ | ||
6902 | #ifdef CONFIG_SCHED_SMT | ||
6903 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | ||
6904 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); | ||
6905 | 6909 | ||
6906 | static int | 6910 | typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); |
6907 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 6911 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); |
6908 | struct sched_group **sg, struct cpumask *unused) | ||
6909 | { | ||
6910 | if (sg) | ||
6911 | *sg = &per_cpu(sched_groups, cpu).sg; | ||
6912 | return cpu; | ||
6913 | } | ||
6914 | #endif /* CONFIG_SCHED_SMT */ | ||
6915 | 6912 | ||
6916 | /* | 6913 | struct sched_domain_topology_level { |
6917 | * multi-core sched-domains: | 6914 | sched_domain_init_f init; |
6918 | */ | 6915 | sched_domain_mask_f mask; |
6919 | #ifdef CONFIG_SCHED_MC | 6916 | struct sd_data data; |
6920 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); | 6917 | }; |
6921 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); | ||
6922 | |||
6923 | static int | ||
6924 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | ||
6925 | struct sched_group **sg, struct cpumask *mask) | ||
6926 | { | ||
6927 | int group; | ||
6928 | #ifdef CONFIG_SCHED_SMT | ||
6929 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); | ||
6930 | group = cpumask_first(mask); | ||
6931 | #else | ||
6932 | group = cpu; | ||
6933 | #endif | ||
6934 | if (sg) | ||
6935 | *sg = &per_cpu(sched_group_core, group).sg; | ||
6936 | return group; | ||
6937 | } | ||
6938 | #endif /* CONFIG_SCHED_MC */ | ||
6939 | 6918 | ||
6940 | /* | 6919 | /* |
6941 | * book sched-domains: | 6920 | * Assumes the sched_domain tree is fully constructed |
6942 | */ | 6921 | */ |
6943 | #ifdef CONFIG_SCHED_BOOK | 6922 | static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) |
6944 | static DEFINE_PER_CPU(struct static_sched_domain, book_domains); | ||
6945 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); | ||
6946 | |||
6947 | static int | ||
6948 | cpu_to_book_group(int cpu, const struct cpumask *cpu_map, | ||
6949 | struct sched_group **sg, struct cpumask *mask) | ||
6950 | { | 6923 | { |
6951 | int group = cpu; | 6924 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); |
6952 | #ifdef CONFIG_SCHED_MC | 6925 | struct sched_domain *child = sd->child; |
6953 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | ||
6954 | group = cpumask_first(mask); | ||
6955 | #elif defined(CONFIG_SCHED_SMT) | ||
6956 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); | ||
6957 | group = cpumask_first(mask); | ||
6958 | #endif | ||
6959 | if (sg) | ||
6960 | *sg = &per_cpu(sched_group_book, group).sg; | ||
6961 | return group; | ||
6962 | } | ||
6963 | #endif /* CONFIG_SCHED_BOOK */ | ||
6964 | 6926 | ||
6965 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); | 6927 | if (child) |
6966 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); | 6928 | cpu = cpumask_first(sched_domain_span(child)); |
6967 | 6929 | ||
6968 | static int | ||
6969 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | ||
6970 | struct sched_group **sg, struct cpumask *mask) | ||
6971 | { | ||
6972 | int group; | ||
6973 | #ifdef CONFIG_SCHED_BOOK | ||
6974 | cpumask_and(mask, cpu_book_mask(cpu), cpu_map); | ||
6975 | group = cpumask_first(mask); | ||
6976 | #elif defined(CONFIG_SCHED_MC) | ||
6977 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | ||
6978 | group = cpumask_first(mask); | ||
6979 | #elif defined(CONFIG_SCHED_SMT) | ||
6980 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); | ||
6981 | group = cpumask_first(mask); | ||
6982 | #else | ||
6983 | group = cpu; | ||
6984 | #endif | ||
6985 | if (sg) | 6930 | if (sg) |
6986 | *sg = &per_cpu(sched_group_phys, group).sg; | 6931 | *sg = *per_cpu_ptr(sdd->sg, cpu); |
6987 | return group; | 6932 | |
6933 | return cpu; | ||
6988 | } | 6934 | } |
6989 | 6935 | ||
6990 | #ifdef CONFIG_NUMA | ||
6991 | /* | 6936 | /* |
6992 | * The init_sched_build_groups can't handle what we want to do with node | 6937 | * build_sched_groups takes the cpumask we wish to span, and a pointer |
6993 | * groups, so roll our own. Now each node has its own list of groups which | 6938 | * to a function which identifies what group(along with sched group) a CPU |
6994 | * gets dynamically allocated. | 6939 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
6940 | * (due to the fact that we keep track of groups covered with a struct cpumask). | ||
6941 | * | ||
6942 | * build_sched_groups will build a circular linked list of the groups | ||
6943 | * covered by the given span, and will set each group's ->cpumask correctly, | ||
6944 | * and ->cpu_power to 0. | ||
6995 | */ | 6945 | */ |
6996 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); | 6946 | static void |
6997 | static struct sched_group ***sched_group_nodes_bycpu; | 6947 | build_sched_groups(struct sched_domain *sd) |
6998 | |||
6999 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); | ||
7000 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); | ||
7001 | |||
7002 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, | ||
7003 | struct sched_group **sg, | ||
7004 | struct cpumask *nodemask) | ||
7005 | { | ||
7006 | int group; | ||
7007 | |||
7008 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); | ||
7009 | group = cpumask_first(nodemask); | ||
7010 | |||
7011 | if (sg) | ||
7012 | *sg = &per_cpu(sched_group_allnodes, group).sg; | ||
7013 | return group; | ||
7014 | } | ||
7015 | |||
7016 | static void init_numa_sched_groups_power(struct sched_group *group_head) | ||
7017 | { | ||
7018 | struct sched_group *sg = group_head; | ||
7019 | int j; | ||
7020 | |||
7021 | if (!sg) | ||
7022 | return; | ||
7023 | do { | ||
7024 | for_each_cpu(j, sched_group_cpus(sg)) { | ||
7025 | struct sched_domain *sd; | ||
7026 | |||
7027 | sd = &per_cpu(phys_domains, j).sd; | ||
7028 | if (j != group_first_cpu(sd->groups)) { | ||
7029 | /* | ||
7030 | * Only add "power" once for each | ||
7031 | * physical package. | ||
7032 | */ | ||
7033 | continue; | ||
7034 | } | ||
7035 | |||
7036 | sg->cpu_power += sd->groups->cpu_power; | ||
7037 | } | ||
7038 | sg = sg->next; | ||
7039 | } while (sg != group_head); | ||
7040 | } | ||
7041 | |||
7042 | static int build_numa_sched_groups(struct s_data *d, | ||
7043 | const struct cpumask *cpu_map, int num) | ||
7044 | { | 6948 | { |
7045 | struct sched_domain *sd; | 6949 | struct sched_group *first = NULL, *last = NULL; |
7046 | struct sched_group *sg, *prev; | 6950 | struct sd_data *sdd = sd->private; |
7047 | int n, j; | 6951 | const struct cpumask *span = sched_domain_span(sd); |
7048 | 6952 | struct cpumask *covered; | |
7049 | cpumask_clear(d->covered); | 6953 | int i; |
7050 | cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map); | ||
7051 | if (cpumask_empty(d->nodemask)) { | ||
7052 | d->sched_group_nodes[num] = NULL; | ||
7053 | goto out; | ||
7054 | } | ||
7055 | |||
7056 | sched_domain_node_span(num, d->domainspan); | ||
7057 | cpumask_and(d->domainspan, d->domainspan, cpu_map); | ||
7058 | |||
7059 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | ||
7060 | GFP_KERNEL, num); | ||
7061 | if (!sg) { | ||
7062 | printk(KERN_WARNING "Can not alloc domain group for node %d\n", | ||
7063 | num); | ||
7064 | return -ENOMEM; | ||
7065 | } | ||
7066 | d->sched_group_nodes[num] = sg; | ||
7067 | |||
7068 | for_each_cpu(j, d->nodemask) { | ||
7069 | sd = &per_cpu(node_domains, j).sd; | ||
7070 | sd->groups = sg; | ||
7071 | } | ||
7072 | |||
7073 | sg->cpu_power = 0; | ||
7074 | cpumask_copy(sched_group_cpus(sg), d->nodemask); | ||
7075 | sg->next = sg; | ||
7076 | cpumask_or(d->covered, d->covered, d->nodemask); | ||
7077 | 6954 | ||
7078 | prev = sg; | 6955 | lockdep_assert_held(&sched_domains_mutex); |
7079 | for (j = 0; j < nr_node_ids; j++) { | 6956 | covered = sched_domains_tmpmask; |
7080 | n = (num + j) % nr_node_ids; | ||
7081 | cpumask_complement(d->notcovered, d->covered); | ||
7082 | cpumask_and(d->tmpmask, d->notcovered, cpu_map); | ||
7083 | cpumask_and(d->tmpmask, d->tmpmask, d->domainspan); | ||
7084 | if (cpumask_empty(d->tmpmask)) | ||
7085 | break; | ||
7086 | cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n)); | ||
7087 | if (cpumask_empty(d->tmpmask)) | ||
7088 | continue; | ||
7089 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | ||
7090 | GFP_KERNEL, num); | ||
7091 | if (!sg) { | ||
7092 | printk(KERN_WARNING | ||
7093 | "Can not alloc domain group for node %d\n", j); | ||
7094 | return -ENOMEM; | ||
7095 | } | ||
7096 | sg->cpu_power = 0; | ||
7097 | cpumask_copy(sched_group_cpus(sg), d->tmpmask); | ||
7098 | sg->next = prev->next; | ||
7099 | cpumask_or(d->covered, d->covered, d->tmpmask); | ||
7100 | prev->next = sg; | ||
7101 | prev = sg; | ||
7102 | } | ||
7103 | out: | ||
7104 | return 0; | ||
7105 | } | ||
7106 | #endif /* CONFIG_NUMA */ | ||
7107 | 6957 | ||
7108 | #ifdef CONFIG_NUMA | 6958 | cpumask_clear(covered); |
7109 | /* Free memory allocated for various sched_group structures */ | ||
7110 | static void free_sched_groups(const struct cpumask *cpu_map, | ||
7111 | struct cpumask *nodemask) | ||
7112 | { | ||
7113 | int cpu, i; | ||
7114 | 6959 | ||
7115 | for_each_cpu(cpu, cpu_map) { | 6960 | for_each_cpu(i, span) { |
7116 | struct sched_group **sched_group_nodes | 6961 | struct sched_group *sg; |
7117 | = sched_group_nodes_bycpu[cpu]; | 6962 | int group = get_group(i, sdd, &sg); |
6963 | int j; | ||
7118 | 6964 | ||
7119 | if (!sched_group_nodes) | 6965 | if (cpumask_test_cpu(i, covered)) |
7120 | continue; | 6966 | continue; |
7121 | 6967 | ||
7122 | for (i = 0; i < nr_node_ids; i++) { | 6968 | cpumask_clear(sched_group_cpus(sg)); |
7123 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 6969 | sg->cpu_power = 0; |
7124 | 6970 | ||
7125 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); | 6971 | for_each_cpu(j, span) { |
7126 | if (cpumask_empty(nodemask)) | 6972 | if (get_group(j, sdd, NULL) != group) |
7127 | continue; | 6973 | continue; |
7128 | 6974 | ||
7129 | if (sg == NULL) | 6975 | cpumask_set_cpu(j, covered); |
7130 | continue; | 6976 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
7131 | sg = sg->next; | ||
7132 | next_sg: | ||
7133 | oldsg = sg; | ||
7134 | sg = sg->next; | ||
7135 | kfree(oldsg); | ||
7136 | if (oldsg != sched_group_nodes[i]) | ||
7137 | goto next_sg; | ||
7138 | } | 6977 | } |
7139 | kfree(sched_group_nodes); | 6978 | |
7140 | sched_group_nodes_bycpu[cpu] = NULL; | 6979 | if (!first) |
6980 | first = sg; | ||
6981 | if (last) | ||
6982 | last->next = sg; | ||
6983 | last = sg; | ||
7141 | } | 6984 | } |
6985 | last->next = first; | ||
7142 | } | 6986 | } |
7143 | #else /* !CONFIG_NUMA */ | ||
7144 | static void free_sched_groups(const struct cpumask *cpu_map, | ||
7145 | struct cpumask *nodemask) | ||
7146 | { | ||
7147 | } | ||
7148 | #endif /* CONFIG_NUMA */ | ||
7149 | 6987 | ||
7150 | /* | 6988 | /* |
7151 | * Initialize sched groups cpu_power. | 6989 | * Initialize sched groups cpu_power. |
@@ -7159,11 +6997,6 @@ static void free_sched_groups(const struct cpumask *cpu_map, | |||
7159 | */ | 6997 | */ |
7160 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) | 6998 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) |
7161 | { | 6999 | { |
7162 | struct sched_domain *child; | ||
7163 | struct sched_group *group; | ||
7164 | long power; | ||
7165 | int weight; | ||
7166 | |||
7167 | WARN_ON(!sd || !sd->groups); | 7000 | WARN_ON(!sd || !sd->groups); |
7168 | 7001 | ||
7169 | if (cpu != group_first_cpu(sd->groups)) | 7002 | if (cpu != group_first_cpu(sd->groups)) |
@@ -7171,36 +7004,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7171 | 7004 | ||
7172 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); | 7005 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); |
7173 | 7006 | ||
7174 | child = sd->child; | 7007 | update_group_power(sd, cpu); |
7175 | |||
7176 | sd->groups->cpu_power = 0; | ||
7177 | |||
7178 | if (!child) { | ||
7179 | power = SCHED_LOAD_SCALE; | ||
7180 | weight = cpumask_weight(sched_domain_span(sd)); | ||
7181 | /* | ||
7182 | * SMT siblings share the power of a single core. | ||
7183 | * Usually multiple threads get a better yield out of | ||
7184 | * that one core than a single thread would have, | ||
7185 | * reflect that in sd->smt_gain. | ||
7186 | */ | ||
7187 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | ||
7188 | power *= sd->smt_gain; | ||
7189 | power /= weight; | ||
7190 | power >>= SCHED_LOAD_SHIFT; | ||
7191 | } | ||
7192 | sd->groups->cpu_power += power; | ||
7193 | return; | ||
7194 | } | ||
7195 | |||
7196 | /* | ||
7197 | * Add cpu_power of each child group to this groups cpu_power. | ||
7198 | */ | ||
7199 | group = child->groups; | ||
7200 | do { | ||
7201 | sd->groups->cpu_power += group->cpu_power; | ||
7202 | group = group->next; | ||
7203 | } while (group != child->groups); | ||
7204 | } | 7008 | } |
7205 | 7009 | ||
7206 | /* | 7010 | /* |
@@ -7214,15 +7018,15 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7214 | # define SD_INIT_NAME(sd, type) do { } while (0) | 7018 | # define SD_INIT_NAME(sd, type) do { } while (0) |
7215 | #endif | 7019 | #endif |
7216 | 7020 | ||
7217 | #define SD_INIT(sd, type) sd_init_##type(sd) | 7021 | #define SD_INIT_FUNC(type) \ |
7218 | 7022 | static noinline struct sched_domain * \ | |
7219 | #define SD_INIT_FUNC(type) \ | 7023 | sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ |
7220 | static noinline void sd_init_##type(struct sched_domain *sd) \ | 7024 | { \ |
7221 | { \ | 7025 | struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ |
7222 | memset(sd, 0, sizeof(*sd)); \ | 7026 | *sd = SD_##type##_INIT; \ |
7223 | *sd = SD_##type##_INIT; \ | 7027 | SD_INIT_NAME(sd, type); \ |
7224 | sd->level = SD_LV_##type; \ | 7028 | sd->private = &tl->data; \ |
7225 | SD_INIT_NAME(sd, type); \ | 7029 | return sd; \ |
7226 | } | 7030 | } |
7227 | 7031 | ||
7228 | SD_INIT_FUNC(CPU) | 7032 | SD_INIT_FUNC(CPU) |
@@ -7241,13 +7045,14 @@ SD_INIT_FUNC(CPU) | |||
7241 | #endif | 7045 | #endif |
7242 | 7046 | ||
7243 | static int default_relax_domain_level = -1; | 7047 | static int default_relax_domain_level = -1; |
7048 | int sched_domain_level_max; | ||
7244 | 7049 | ||
7245 | static int __init setup_relax_domain_level(char *str) | 7050 | static int __init setup_relax_domain_level(char *str) |
7246 | { | 7051 | { |
7247 | unsigned long val; | 7052 | unsigned long val; |
7248 | 7053 | ||
7249 | val = simple_strtoul(str, NULL, 0); | 7054 | val = simple_strtoul(str, NULL, 0); |
7250 | if (val < SD_LV_MAX) | 7055 | if (val < sched_domain_level_max) |
7251 | default_relax_domain_level = val; | 7056 | default_relax_domain_level = val; |
7252 | 7057 | ||
7253 | return 1; | 7058 | return 1; |
@@ -7275,37 +7080,20 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7275 | } | 7080 | } |
7276 | } | 7081 | } |
7277 | 7082 | ||
7083 | static void __sdt_free(const struct cpumask *cpu_map); | ||
7084 | static int __sdt_alloc(const struct cpumask *cpu_map); | ||
7085 | |||
7278 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | 7086 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
7279 | const struct cpumask *cpu_map) | 7087 | const struct cpumask *cpu_map) |
7280 | { | 7088 | { |
7281 | switch (what) { | 7089 | switch (what) { |
7282 | case sa_sched_groups: | ||
7283 | free_sched_groups(cpu_map, d->tmpmask); /* fall through */ | ||
7284 | d->sched_group_nodes = NULL; | ||
7285 | case sa_rootdomain: | 7090 | case sa_rootdomain: |
7286 | free_rootdomain(d->rd); /* fall through */ | 7091 | if (!atomic_read(&d->rd->refcount)) |
7287 | case sa_tmpmask: | 7092 | free_rootdomain(&d->rd->rcu); /* fall through */ |
7288 | free_cpumask_var(d->tmpmask); /* fall through */ | 7093 | case sa_sd: |
7289 | case sa_send_covered: | 7094 | free_percpu(d->sd); /* fall through */ |
7290 | free_cpumask_var(d->send_covered); /* fall through */ | 7095 | case sa_sd_storage: |
7291 | case sa_this_book_map: | 7096 | __sdt_free(cpu_map); /* fall through */ |
7292 | free_cpumask_var(d->this_book_map); /* fall through */ | ||
7293 | case sa_this_core_map: | ||
7294 | free_cpumask_var(d->this_core_map); /* fall through */ | ||
7295 | case sa_this_sibling_map: | ||
7296 | free_cpumask_var(d->this_sibling_map); /* fall through */ | ||
7297 | case sa_nodemask: | ||
7298 | free_cpumask_var(d->nodemask); /* fall through */ | ||
7299 | case sa_sched_group_nodes: | ||
7300 | #ifdef CONFIG_NUMA | ||
7301 | kfree(d->sched_group_nodes); /* fall through */ | ||
7302 | case sa_notcovered: | ||
7303 | free_cpumask_var(d->notcovered); /* fall through */ | ||
7304 | case sa_covered: | ||
7305 | free_cpumask_var(d->covered); /* fall through */ | ||
7306 | case sa_domainspan: | ||
7307 | free_cpumask_var(d->domainspan); /* fall through */ | ||
7308 | #endif | ||
7309 | case sa_none: | 7097 | case sa_none: |
7310 | break; | 7098 | break; |
7311 | } | 7099 | } |
@@ -7314,308 +7102,212 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
7314 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | 7102 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, |
7315 | const struct cpumask *cpu_map) | 7103 | const struct cpumask *cpu_map) |
7316 | { | 7104 | { |
7317 | #ifdef CONFIG_NUMA | 7105 | memset(d, 0, sizeof(*d)); |
7318 | if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL)) | 7106 | |
7319 | return sa_none; | 7107 | if (__sdt_alloc(cpu_map)) |
7320 | if (!alloc_cpumask_var(&d->covered, GFP_KERNEL)) | 7108 | return sa_sd_storage; |
7321 | return sa_domainspan; | 7109 | d->sd = alloc_percpu(struct sched_domain *); |
7322 | if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL)) | 7110 | if (!d->sd) |
7323 | return sa_covered; | 7111 | return sa_sd_storage; |
7324 | /* Allocate the per-node list of sched groups */ | ||
7325 | d->sched_group_nodes = kcalloc(nr_node_ids, | ||
7326 | sizeof(struct sched_group *), GFP_KERNEL); | ||
7327 | if (!d->sched_group_nodes) { | ||
7328 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | ||
7329 | return sa_notcovered; | ||
7330 | } | ||
7331 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; | ||
7332 | #endif | ||
7333 | if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) | ||
7334 | return sa_sched_group_nodes; | ||
7335 | if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) | ||
7336 | return sa_nodemask; | ||
7337 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) | ||
7338 | return sa_this_sibling_map; | ||
7339 | if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) | ||
7340 | return sa_this_core_map; | ||
7341 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | ||
7342 | return sa_this_book_map; | ||
7343 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) | ||
7344 | return sa_send_covered; | ||
7345 | d->rd = alloc_rootdomain(); | 7112 | d->rd = alloc_rootdomain(); |
7346 | if (!d->rd) { | 7113 | if (!d->rd) |
7347 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7114 | return sa_sd; |
7348 | return sa_tmpmask; | ||
7349 | } | ||
7350 | return sa_rootdomain; | 7115 | return sa_rootdomain; |
7351 | } | 7116 | } |
7352 | 7117 | ||
7353 | static struct sched_domain *__build_numa_sched_domains(struct s_data *d, | 7118 | /* |
7354 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) | 7119 | * NULL the sd_data elements we've used to build the sched_domain and |
7120 | * sched_group structure so that the subsequent __free_domain_allocs() | ||
7121 | * will not free the data we're using. | ||
7122 | */ | ||
7123 | static void claim_allocations(int cpu, struct sched_domain *sd) | ||
7355 | { | 7124 | { |
7356 | struct sched_domain *sd = NULL; | 7125 | struct sd_data *sdd = sd->private; |
7357 | #ifdef CONFIG_NUMA | 7126 | struct sched_group *sg = sd->groups; |
7358 | struct sched_domain *parent; | ||
7359 | |||
7360 | d->sd_allnodes = 0; | ||
7361 | if (cpumask_weight(cpu_map) > | ||
7362 | SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { | ||
7363 | sd = &per_cpu(allnodes_domains, i).sd; | ||
7364 | SD_INIT(sd, ALLNODES); | ||
7365 | set_domain_attribute(sd, attr); | ||
7366 | cpumask_copy(sched_domain_span(sd), cpu_map); | ||
7367 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
7368 | d->sd_allnodes = 1; | ||
7369 | } | ||
7370 | parent = sd; | ||
7371 | |||
7372 | sd = &per_cpu(node_domains, i).sd; | ||
7373 | SD_INIT(sd, NODE); | ||
7374 | set_domain_attribute(sd, attr); | ||
7375 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | ||
7376 | sd->parent = parent; | ||
7377 | if (parent) | ||
7378 | parent->child = sd; | ||
7379 | cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); | ||
7380 | #endif | ||
7381 | return sd; | ||
7382 | } | ||
7383 | 7127 | ||
7384 | static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, | 7128 | WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); |
7385 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | 7129 | *per_cpu_ptr(sdd->sd, cpu) = NULL; |
7386 | struct sched_domain *parent, int i) | ||
7387 | { | ||
7388 | struct sched_domain *sd; | ||
7389 | sd = &per_cpu(phys_domains, i).sd; | ||
7390 | SD_INIT(sd, CPU); | ||
7391 | set_domain_attribute(sd, attr); | ||
7392 | cpumask_copy(sched_domain_span(sd), d->nodemask); | ||
7393 | sd->parent = parent; | ||
7394 | if (parent) | ||
7395 | parent->child = sd; | ||
7396 | cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
7397 | return sd; | ||
7398 | } | ||
7399 | 7130 | ||
7400 | static struct sched_domain *__build_book_sched_domain(struct s_data *d, | 7131 | if (cpu == cpumask_first(sched_group_cpus(sg))) { |
7401 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | 7132 | WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg); |
7402 | struct sched_domain *parent, int i) | 7133 | *per_cpu_ptr(sdd->sg, cpu) = NULL; |
7403 | { | 7134 | } |
7404 | struct sched_domain *sd = parent; | ||
7405 | #ifdef CONFIG_SCHED_BOOK | ||
7406 | sd = &per_cpu(book_domains, i).sd; | ||
7407 | SD_INIT(sd, BOOK); | ||
7408 | set_domain_attribute(sd, attr); | ||
7409 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); | ||
7410 | sd->parent = parent; | ||
7411 | parent->child = sd; | ||
7412 | cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
7413 | #endif | ||
7414 | return sd; | ||
7415 | } | 7135 | } |
7416 | 7136 | ||
7417 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, | 7137 | #ifdef CONFIG_SCHED_SMT |
7418 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | 7138 | static const struct cpumask *cpu_smt_mask(int cpu) |
7419 | struct sched_domain *parent, int i) | ||
7420 | { | 7139 | { |
7421 | struct sched_domain *sd = parent; | 7140 | return topology_thread_cpumask(cpu); |
7422 | #ifdef CONFIG_SCHED_MC | ||
7423 | sd = &per_cpu(core_domains, i).sd; | ||
7424 | SD_INIT(sd, MC); | ||
7425 | set_domain_attribute(sd, attr); | ||
7426 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); | ||
7427 | sd->parent = parent; | ||
7428 | parent->child = sd; | ||
7429 | cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
7430 | #endif | ||
7431 | return sd; | ||
7432 | } | 7141 | } |
7433 | |||
7434 | static struct sched_domain *__build_smt_sched_domain(struct s_data *d, | ||
7435 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7436 | struct sched_domain *parent, int i) | ||
7437 | { | ||
7438 | struct sched_domain *sd = parent; | ||
7439 | #ifdef CONFIG_SCHED_SMT | ||
7440 | sd = &per_cpu(cpu_domains, i).sd; | ||
7441 | SD_INIT(sd, SIBLING); | ||
7442 | set_domain_attribute(sd, attr); | ||
7443 | cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); | ||
7444 | sd->parent = parent; | ||
7445 | parent->child = sd; | ||
7446 | cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
7447 | #endif | 7142 | #endif |
7448 | return sd; | ||
7449 | } | ||
7450 | 7143 | ||
7451 | static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | 7144 | /* |
7452 | const struct cpumask *cpu_map, int cpu) | 7145 | * Topology list, bottom-up. |
7453 | { | 7146 | */ |
7454 | switch (l) { | 7147 | static struct sched_domain_topology_level default_topology[] = { |
7455 | #ifdef CONFIG_SCHED_SMT | 7148 | #ifdef CONFIG_SCHED_SMT |
7456 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ | 7149 | { sd_init_SIBLING, cpu_smt_mask, }, |
7457 | cpumask_and(d->this_sibling_map, cpu_map, | ||
7458 | topology_thread_cpumask(cpu)); | ||
7459 | if (cpu == cpumask_first(d->this_sibling_map)) | ||
7460 | init_sched_build_groups(d->this_sibling_map, cpu_map, | ||
7461 | &cpu_to_cpu_group, | ||
7462 | d->send_covered, d->tmpmask); | ||
7463 | break; | ||
7464 | #endif | 7150 | #endif |
7465 | #ifdef CONFIG_SCHED_MC | 7151 | #ifdef CONFIG_SCHED_MC |
7466 | case SD_LV_MC: /* set up multi-core groups */ | 7152 | { sd_init_MC, cpu_coregroup_mask, }, |
7467 | cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); | ||
7468 | if (cpu == cpumask_first(d->this_core_map)) | ||
7469 | init_sched_build_groups(d->this_core_map, cpu_map, | ||
7470 | &cpu_to_core_group, | ||
7471 | d->send_covered, d->tmpmask); | ||
7472 | break; | ||
7473 | #endif | 7153 | #endif |
7474 | #ifdef CONFIG_SCHED_BOOK | 7154 | #ifdef CONFIG_SCHED_BOOK |
7475 | case SD_LV_BOOK: /* set up book groups */ | 7155 | { sd_init_BOOK, cpu_book_mask, }, |
7476 | cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); | ||
7477 | if (cpu == cpumask_first(d->this_book_map)) | ||
7478 | init_sched_build_groups(d->this_book_map, cpu_map, | ||
7479 | &cpu_to_book_group, | ||
7480 | d->send_covered, d->tmpmask); | ||
7481 | break; | ||
7482 | #endif | 7156 | #endif |
7483 | case SD_LV_CPU: /* set up physical groups */ | 7157 | { sd_init_CPU, cpu_cpu_mask, }, |
7484 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); | ||
7485 | if (!cpumask_empty(d->nodemask)) | ||
7486 | init_sched_build_groups(d->nodemask, cpu_map, | ||
7487 | &cpu_to_phys_group, | ||
7488 | d->send_covered, d->tmpmask); | ||
7489 | break; | ||
7490 | #ifdef CONFIG_NUMA | 7158 | #ifdef CONFIG_NUMA |
7491 | case SD_LV_ALLNODES: | 7159 | { sd_init_NODE, cpu_node_mask, }, |
7492 | init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group, | 7160 | { sd_init_ALLNODES, cpu_allnodes_mask, }, |
7493 | d->send_covered, d->tmpmask); | ||
7494 | break; | ||
7495 | #endif | 7161 | #endif |
7496 | default: | 7162 | { NULL, }, |
7497 | break; | 7163 | }; |
7164 | |||
7165 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; | ||
7166 | |||
7167 | static int __sdt_alloc(const struct cpumask *cpu_map) | ||
7168 | { | ||
7169 | struct sched_domain_topology_level *tl; | ||
7170 | int j; | ||
7171 | |||
7172 | for (tl = sched_domain_topology; tl->init; tl++) { | ||
7173 | struct sd_data *sdd = &tl->data; | ||
7174 | |||
7175 | sdd->sd = alloc_percpu(struct sched_domain *); | ||
7176 | if (!sdd->sd) | ||
7177 | return -ENOMEM; | ||
7178 | |||
7179 | sdd->sg = alloc_percpu(struct sched_group *); | ||
7180 | if (!sdd->sg) | ||
7181 | return -ENOMEM; | ||
7182 | |||
7183 | for_each_cpu(j, cpu_map) { | ||
7184 | struct sched_domain *sd; | ||
7185 | struct sched_group *sg; | ||
7186 | |||
7187 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), | ||
7188 | GFP_KERNEL, cpu_to_node(j)); | ||
7189 | if (!sd) | ||
7190 | return -ENOMEM; | ||
7191 | |||
7192 | *per_cpu_ptr(sdd->sd, j) = sd; | ||
7193 | |||
7194 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | ||
7195 | GFP_KERNEL, cpu_to_node(j)); | ||
7196 | if (!sg) | ||
7197 | return -ENOMEM; | ||
7198 | |||
7199 | *per_cpu_ptr(sdd->sg, j) = sg; | ||
7200 | } | ||
7201 | } | ||
7202 | |||
7203 | return 0; | ||
7204 | } | ||
7205 | |||
7206 | static void __sdt_free(const struct cpumask *cpu_map) | ||
7207 | { | ||
7208 | struct sched_domain_topology_level *tl; | ||
7209 | int j; | ||
7210 | |||
7211 | for (tl = sched_domain_topology; tl->init; tl++) { | ||
7212 | struct sd_data *sdd = &tl->data; | ||
7213 | |||
7214 | for_each_cpu(j, cpu_map) { | ||
7215 | kfree(*per_cpu_ptr(sdd->sd, j)); | ||
7216 | kfree(*per_cpu_ptr(sdd->sg, j)); | ||
7217 | } | ||
7218 | free_percpu(sdd->sd); | ||
7219 | free_percpu(sdd->sg); | ||
7220 | } | ||
7221 | } | ||
7222 | |||
7223 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | ||
7224 | struct s_data *d, const struct cpumask *cpu_map, | ||
7225 | struct sched_domain_attr *attr, struct sched_domain *child, | ||
7226 | int cpu) | ||
7227 | { | ||
7228 | struct sched_domain *sd = tl->init(tl, cpu); | ||
7229 | if (!sd) | ||
7230 | return child; | ||
7231 | |||
7232 | set_domain_attribute(sd, attr); | ||
7233 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); | ||
7234 | if (child) { | ||
7235 | sd->level = child->level + 1; | ||
7236 | sched_domain_level_max = max(sched_domain_level_max, sd->level); | ||
7237 | child->parent = sd; | ||
7498 | } | 7238 | } |
7239 | sd->child = child; | ||
7240 | |||
7241 | return sd; | ||
7499 | } | 7242 | } |
7500 | 7243 | ||
7501 | /* | 7244 | /* |
7502 | * Build sched domains for a given set of cpus and attach the sched domains | 7245 | * Build sched domains for a given set of cpus and attach the sched domains |
7503 | * to the individual cpus | 7246 | * to the individual cpus |
7504 | */ | 7247 | */ |
7505 | static int __build_sched_domains(const struct cpumask *cpu_map, | 7248 | static int build_sched_domains(const struct cpumask *cpu_map, |
7506 | struct sched_domain_attr *attr) | 7249 | struct sched_domain_attr *attr) |
7507 | { | 7250 | { |
7508 | enum s_alloc alloc_state = sa_none; | 7251 | enum s_alloc alloc_state = sa_none; |
7509 | struct s_data d; | ||
7510 | struct sched_domain *sd; | 7252 | struct sched_domain *sd; |
7511 | int i; | 7253 | struct s_data d; |
7512 | #ifdef CONFIG_NUMA | 7254 | int i, ret = -ENOMEM; |
7513 | d.sd_allnodes = 0; | ||
7514 | #endif | ||
7515 | 7255 | ||
7516 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); | 7256 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); |
7517 | if (alloc_state != sa_rootdomain) | 7257 | if (alloc_state != sa_rootdomain) |
7518 | goto error; | 7258 | goto error; |
7519 | alloc_state = sa_sched_groups; | ||
7520 | 7259 | ||
7521 | /* | 7260 | /* Set up domains for cpus specified by the cpu_map. */ |
7522 | * Set up domains for cpus specified by the cpu_map. | ||
7523 | */ | ||
7524 | for_each_cpu(i, cpu_map) { | 7261 | for_each_cpu(i, cpu_map) { |
7525 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), | 7262 | struct sched_domain_topology_level *tl; |
7526 | cpu_map); | ||
7527 | 7263 | ||
7528 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); | 7264 | sd = NULL; |
7529 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); | 7265 | for (tl = sched_domain_topology; tl->init; tl++) |
7530 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); | 7266 | sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); |
7531 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); | ||
7532 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); | ||
7533 | } | ||
7534 | |||
7535 | for_each_cpu(i, cpu_map) { | ||
7536 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); | ||
7537 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); | ||
7538 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); | ||
7539 | } | ||
7540 | |||
7541 | /* Set up physical groups */ | ||
7542 | for (i = 0; i < nr_node_ids; i++) | ||
7543 | build_sched_groups(&d, SD_LV_CPU, cpu_map, i); | ||
7544 | 7267 | ||
7545 | #ifdef CONFIG_NUMA | 7268 | while (sd->child) |
7546 | /* Set up node groups */ | 7269 | sd = sd->child; |
7547 | if (d.sd_allnodes) | ||
7548 | build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); | ||
7549 | |||
7550 | for (i = 0; i < nr_node_ids; i++) | ||
7551 | if (build_numa_sched_groups(&d, cpu_map, i)) | ||
7552 | goto error; | ||
7553 | #endif | ||
7554 | 7270 | ||
7555 | /* Calculate CPU power for physical packages and nodes */ | 7271 | *per_cpu_ptr(d.sd, i) = sd; |
7556 | #ifdef CONFIG_SCHED_SMT | ||
7557 | for_each_cpu(i, cpu_map) { | ||
7558 | sd = &per_cpu(cpu_domains, i).sd; | ||
7559 | init_sched_groups_power(i, sd); | ||
7560 | } | ||
7561 | #endif | ||
7562 | #ifdef CONFIG_SCHED_MC | ||
7563 | for_each_cpu(i, cpu_map) { | ||
7564 | sd = &per_cpu(core_domains, i).sd; | ||
7565 | init_sched_groups_power(i, sd); | ||
7566 | } | 7272 | } |
7567 | #endif | ||
7568 | #ifdef CONFIG_SCHED_BOOK | ||
7569 | for_each_cpu(i, cpu_map) { | ||
7570 | sd = &per_cpu(book_domains, i).sd; | ||
7571 | init_sched_groups_power(i, sd); | ||
7572 | } | ||
7573 | #endif | ||
7574 | 7273 | ||
7274 | /* Build the groups for the domains */ | ||
7575 | for_each_cpu(i, cpu_map) { | 7275 | for_each_cpu(i, cpu_map) { |
7576 | sd = &per_cpu(phys_domains, i).sd; | 7276 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { |
7577 | init_sched_groups_power(i, sd); | 7277 | sd->span_weight = cpumask_weight(sched_domain_span(sd)); |
7578 | } | 7278 | get_group(i, sd->private, &sd->groups); |
7279 | atomic_inc(&sd->groups->ref); | ||
7579 | 7280 | ||
7580 | #ifdef CONFIG_NUMA | 7281 | if (i != cpumask_first(sched_domain_span(sd))) |
7581 | for (i = 0; i < nr_node_ids; i++) | 7282 | continue; |
7582 | init_numa_sched_groups_power(d.sched_group_nodes[i]); | ||
7583 | 7283 | ||
7584 | if (d.sd_allnodes) { | 7284 | build_sched_groups(sd); |
7585 | struct sched_group *sg; | 7285 | } |
7286 | } | ||
7586 | 7287 | ||
7587 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, | 7288 | /* Calculate CPU power for physical packages and nodes */ |
7588 | d.tmpmask); | 7289 | for (i = nr_cpumask_bits-1; i >= 0; i--) { |
7589 | init_numa_sched_groups_power(sg); | 7290 | if (!cpumask_test_cpu(i, cpu_map)) |
7291 | continue; | ||
7292 | |||
7293 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { | ||
7294 | claim_allocations(i, sd); | ||
7295 | init_sched_groups_power(i, sd); | ||
7296 | } | ||
7590 | } | 7297 | } |
7591 | #endif | ||
7592 | 7298 | ||
7593 | /* Attach the domains */ | 7299 | /* Attach the domains */ |
7300 | rcu_read_lock(); | ||
7594 | for_each_cpu(i, cpu_map) { | 7301 | for_each_cpu(i, cpu_map) { |
7595 | #ifdef CONFIG_SCHED_SMT | 7302 | sd = *per_cpu_ptr(d.sd, i); |
7596 | sd = &per_cpu(cpu_domains, i).sd; | ||
7597 | #elif defined(CONFIG_SCHED_MC) | ||
7598 | sd = &per_cpu(core_domains, i).sd; | ||
7599 | #elif defined(CONFIG_SCHED_BOOK) | ||
7600 | sd = &per_cpu(book_domains, i).sd; | ||
7601 | #else | ||
7602 | sd = &per_cpu(phys_domains, i).sd; | ||
7603 | #endif | ||
7604 | cpu_attach_domain(sd, d.rd, i); | 7303 | cpu_attach_domain(sd, d.rd, i); |
7605 | } | 7304 | } |
7305 | rcu_read_unlock(); | ||
7606 | 7306 | ||
7607 | d.sched_group_nodes = NULL; /* don't free this we still need it */ | 7307 | ret = 0; |
7608 | __free_domain_allocs(&d, sa_tmpmask, cpu_map); | ||
7609 | return 0; | ||
7610 | |||
7611 | error: | 7308 | error: |
7612 | __free_domain_allocs(&d, alloc_state, cpu_map); | 7309 | __free_domain_allocs(&d, alloc_state, cpu_map); |
7613 | return -ENOMEM; | 7310 | return ret; |
7614 | } | ||
7615 | |||
7616 | static int build_sched_domains(const struct cpumask *cpu_map) | ||
7617 | { | ||
7618 | return __build_sched_domains(cpu_map, NULL); | ||
7619 | } | 7311 | } |
7620 | 7312 | ||
7621 | static cpumask_var_t *doms_cur; /* current sched domains */ | 7313 | static cpumask_var_t *doms_cur; /* current sched domains */ |
@@ -7670,7 +7362,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) | |||
7670 | * For now this just excludes isolated cpus, but could be used to | 7362 | * For now this just excludes isolated cpus, but could be used to |
7671 | * exclude other special cases in the future. | 7363 | * exclude other special cases in the future. |
7672 | */ | 7364 | */ |
7673 | static int arch_init_sched_domains(const struct cpumask *cpu_map) | 7365 | static int init_sched_domains(const struct cpumask *cpu_map) |
7674 | { | 7366 | { |
7675 | int err; | 7367 | int err; |
7676 | 7368 | ||
@@ -7681,32 +7373,24 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map) | |||
7681 | doms_cur = &fallback_doms; | 7373 | doms_cur = &fallback_doms; |
7682 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); | 7374 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
7683 | dattr_cur = NULL; | 7375 | dattr_cur = NULL; |
7684 | err = build_sched_domains(doms_cur[0]); | 7376 | err = build_sched_domains(doms_cur[0], NULL); |
7685 | register_sched_domain_sysctl(); | 7377 | register_sched_domain_sysctl(); |
7686 | 7378 | ||
7687 | return err; | 7379 | return err; |
7688 | } | 7380 | } |
7689 | 7381 | ||
7690 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, | ||
7691 | struct cpumask *tmpmask) | ||
7692 | { | ||
7693 | free_sched_groups(cpu_map, tmpmask); | ||
7694 | } | ||
7695 | |||
7696 | /* | 7382 | /* |
7697 | * Detach sched domains from a group of cpus specified in cpu_map | 7383 | * Detach sched domains from a group of cpus specified in cpu_map |
7698 | * These cpus will now be attached to the NULL domain | 7384 | * These cpus will now be attached to the NULL domain |
7699 | */ | 7385 | */ |
7700 | static void detach_destroy_domains(const struct cpumask *cpu_map) | 7386 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
7701 | { | 7387 | { |
7702 | /* Save because hotplug lock held. */ | ||
7703 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
7704 | int i; | 7388 | int i; |
7705 | 7389 | ||
7390 | rcu_read_lock(); | ||
7706 | for_each_cpu(i, cpu_map) | 7391 | for_each_cpu(i, cpu_map) |
7707 | cpu_attach_domain(NULL, &def_root_domain, i); | 7392 | cpu_attach_domain(NULL, &def_root_domain, i); |
7708 | synchronize_sched(); | 7393 | rcu_read_unlock(); |
7709 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); | ||
7710 | } | 7394 | } |
7711 | 7395 | ||
7712 | /* handle null as "default" */ | 7396 | /* handle null as "default" */ |
@@ -7795,8 +7479,7 @@ match1: | |||
7795 | goto match2; | 7479 | goto match2; |
7796 | } | 7480 | } |
7797 | /* no match - add a new doms_new */ | 7481 | /* no match - add a new doms_new */ |
7798 | __build_sched_domains(doms_new[i], | 7482 | build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); |
7799 | dattr_new ? dattr_new + i : NULL); | ||
7800 | match2: | 7483 | match2: |
7801 | ; | 7484 | ; |
7802 | } | 7485 | } |
@@ -7815,7 +7498,7 @@ match2: | |||
7815 | } | 7498 | } |
7816 | 7499 | ||
7817 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 7500 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
7818 | static void arch_reinit_sched_domains(void) | 7501 | static void reinit_sched_domains(void) |
7819 | { | 7502 | { |
7820 | get_online_cpus(); | 7503 | get_online_cpus(); |
7821 | 7504 | ||
@@ -7848,7 +7531,7 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
7848 | else | 7531 | else |
7849 | sched_mc_power_savings = level; | 7532 | sched_mc_power_savings = level; |
7850 | 7533 | ||
7851 | arch_reinit_sched_domains(); | 7534 | reinit_sched_domains(); |
7852 | 7535 | ||
7853 | return count; | 7536 | return count; |
7854 | } | 7537 | } |
@@ -7967,14 +7650,9 @@ void __init sched_init_smp(void) | |||
7967 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | 7650 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
7968 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | 7651 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
7969 | 7652 | ||
7970 | #if defined(CONFIG_NUMA) | ||
7971 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | ||
7972 | GFP_KERNEL); | ||
7973 | BUG_ON(sched_group_nodes_bycpu == NULL); | ||
7974 | #endif | ||
7975 | get_online_cpus(); | 7653 | get_online_cpus(); |
7976 | mutex_lock(&sched_domains_mutex); | 7654 | mutex_lock(&sched_domains_mutex); |
7977 | arch_init_sched_domains(cpu_active_mask); | 7655 | init_sched_domains(cpu_active_mask); |
7978 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | 7656 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
7979 | if (cpumask_empty(non_isolated_cpus)) | 7657 | if (cpumask_empty(non_isolated_cpus)) |
7980 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); | 7658 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
@@ -8281,6 +7959,7 @@ void __init sched_init(void) | |||
8281 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 7959 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
8282 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 7960 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
8283 | #ifdef CONFIG_SMP | 7961 | #ifdef CONFIG_SMP |
7962 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); | ||
8284 | #ifdef CONFIG_NO_HZ | 7963 | #ifdef CONFIG_NO_HZ |
8285 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); | 7964 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
8286 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); | 7965 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); |
@@ -8340,7 +8019,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
8340 | int old_prio = p->prio; | 8019 | int old_prio = p->prio; |
8341 | int on_rq; | 8020 | int on_rq; |
8342 | 8021 | ||
8343 | on_rq = p->se.on_rq; | 8022 | on_rq = p->on_rq; |
8344 | if (on_rq) | 8023 | if (on_rq) |
8345 | deactivate_task(rq, p, 0); | 8024 | deactivate_task(rq, p, 0); |
8346 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 8025 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
@@ -8553,7 +8232,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8553 | { | 8232 | { |
8554 | struct rt_rq *rt_rq; | 8233 | struct rt_rq *rt_rq; |
8555 | struct sched_rt_entity *rt_se; | 8234 | struct sched_rt_entity *rt_se; |
8556 | struct rq *rq; | ||
8557 | int i; | 8235 | int i; |
8558 | 8236 | ||
8559 | tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); | 8237 | tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); |
@@ -8567,8 +8245,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8567 | ktime_to_ns(def_rt_bandwidth.rt_period), 0); | 8245 | ktime_to_ns(def_rt_bandwidth.rt_period), 0); |
8568 | 8246 | ||
8569 | for_each_possible_cpu(i) { | 8247 | for_each_possible_cpu(i) { |
8570 | rq = cpu_rq(i); | ||
8571 | |||
8572 | rt_rq = kzalloc_node(sizeof(struct rt_rq), | 8248 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
8573 | GFP_KERNEL, cpu_to_node(i)); | 8249 | GFP_KERNEL, cpu_to_node(i)); |
8574 | if (!rt_rq) | 8250 | if (!rt_rq) |
@@ -8683,7 +8359,7 @@ void sched_move_task(struct task_struct *tsk) | |||
8683 | rq = task_rq_lock(tsk, &flags); | 8359 | rq = task_rq_lock(tsk, &flags); |
8684 | 8360 | ||
8685 | running = task_current(rq, tsk); | 8361 | running = task_current(rq, tsk); |
8686 | on_rq = tsk->se.on_rq; | 8362 | on_rq = tsk->on_rq; |
8687 | 8363 | ||
8688 | if (on_rq) | 8364 | if (on_rq) |
8689 | dequeue_task(rq, tsk, 0); | 8365 | dequeue_task(rq, tsk, 0); |
@@ -8702,7 +8378,7 @@ void sched_move_task(struct task_struct *tsk) | |||
8702 | if (on_rq) | 8378 | if (on_rq) |
8703 | enqueue_task(rq, tsk, 0); | 8379 | enqueue_task(rq, tsk, 0); |
8704 | 8380 | ||
8705 | task_rq_unlock(rq, &flags); | 8381 | task_rq_unlock(rq, tsk, &flags); |
8706 | } | 8382 | } |
8707 | #endif /* CONFIG_CGROUP_SCHED */ | 8383 | #endif /* CONFIG_CGROUP_SCHED */ |
8708 | 8384 | ||
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 7bacd83a4158..a6710a112b4f 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
152 | read_lock_irqsave(&tasklist_lock, flags); | 152 | read_lock_irqsave(&tasklist_lock, flags); |
153 | 153 | ||
154 | do_each_thread(g, p) { | 154 | do_each_thread(g, p) { |
155 | if (!p->se.on_rq || task_cpu(p) != rq_cpu) | 155 | if (!p->on_rq || task_cpu(p) != rq_cpu) |
156 | continue; | 156 | continue; |
157 | 157 | ||
158 | print_task(m, rq, p); | 158 | print_task(m, rq, p); |
@@ -296,9 +296,6 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
296 | P(ttwu_count); | 296 | P(ttwu_count); |
297 | P(ttwu_local); | 297 | P(ttwu_local); |
298 | 298 | ||
299 | SEQ_printf(m, " .%-30s: %d\n", "bkl_count", | ||
300 | rq->rq_sched_info.bkl_count); | ||
301 | |||
302 | #undef P | 299 | #undef P |
303 | #undef P64 | 300 | #undef P64 |
304 | #endif | 301 | #endif |
@@ -441,7 +438,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
441 | P(se.statistics.wait_count); | 438 | P(se.statistics.wait_count); |
442 | PN(se.statistics.iowait_sum); | 439 | PN(se.statistics.iowait_sum); |
443 | P(se.statistics.iowait_count); | 440 | P(se.statistics.iowait_count); |
444 | P(sched_info.bkl_count); | ||
445 | P(se.nr_migrations); | 441 | P(se.nr_migrations); |
446 | P(se.statistics.nr_migrations_cold); | 442 | P(se.statistics.nr_migrations_cold); |
447 | P(se.statistics.nr_failed_migrations_affine); | 443 | P(se.statistics.nr_failed_migrations_affine); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6fa833ab2cb8..37f22626225e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -358,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | 360 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); |
361 | #ifndef CONFIG_64BIT | ||
362 | smp_wmb(); | ||
363 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | ||
364 | #endif | ||
361 | } | 365 | } |
362 | 366 | ||
363 | /* | 367 | /* |
@@ -1340,6 +1344,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
1340 | hrtick_update(rq); | 1344 | hrtick_update(rq); |
1341 | } | 1345 | } |
1342 | 1346 | ||
1347 | static void set_next_buddy(struct sched_entity *se); | ||
1348 | |||
1343 | /* | 1349 | /* |
1344 | * The dequeue_task method is called before nr_running is | 1350 | * The dequeue_task method is called before nr_running is |
1345 | * decreased. We remove the task from the rbtree and | 1351 | * decreased. We remove the task from the rbtree and |
@@ -1349,14 +1355,22 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
1349 | { | 1355 | { |
1350 | struct cfs_rq *cfs_rq; | 1356 | struct cfs_rq *cfs_rq; |
1351 | struct sched_entity *se = &p->se; | 1357 | struct sched_entity *se = &p->se; |
1358 | int task_sleep = flags & DEQUEUE_SLEEP; | ||
1352 | 1359 | ||
1353 | for_each_sched_entity(se) { | 1360 | for_each_sched_entity(se) { |
1354 | cfs_rq = cfs_rq_of(se); | 1361 | cfs_rq = cfs_rq_of(se); |
1355 | dequeue_entity(cfs_rq, se, flags); | 1362 | dequeue_entity(cfs_rq, se, flags); |
1356 | 1363 | ||
1357 | /* Don't dequeue parent if it has other entities besides us */ | 1364 | /* Don't dequeue parent if it has other entities besides us */ |
1358 | if (cfs_rq->load.weight) | 1365 | if (cfs_rq->load.weight) { |
1366 | /* | ||
1367 | * Bias pick_next to pick a task from this cfs_rq, as | ||
1368 | * p is sleeping when it is within its sched_slice. | ||
1369 | */ | ||
1370 | if (task_sleep && parent_entity(se)) | ||
1371 | set_next_buddy(parent_entity(se)); | ||
1359 | break; | 1372 | break; |
1373 | } | ||
1360 | flags |= DEQUEUE_SLEEP; | 1374 | flags |= DEQUEUE_SLEEP; |
1361 | } | 1375 | } |
1362 | 1376 | ||
@@ -1372,12 +1386,25 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
1372 | 1386 | ||
1373 | #ifdef CONFIG_SMP | 1387 | #ifdef CONFIG_SMP |
1374 | 1388 | ||
1375 | static void task_waking_fair(struct rq *rq, struct task_struct *p) | 1389 | static void task_waking_fair(struct task_struct *p) |
1376 | { | 1390 | { |
1377 | struct sched_entity *se = &p->se; | 1391 | struct sched_entity *se = &p->se; |
1378 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 1392 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
1393 | u64 min_vruntime; | ||
1379 | 1394 | ||
1380 | se->vruntime -= cfs_rq->min_vruntime; | 1395 | #ifndef CONFIG_64BIT |
1396 | u64 min_vruntime_copy; | ||
1397 | |||
1398 | do { | ||
1399 | min_vruntime_copy = cfs_rq->min_vruntime_copy; | ||
1400 | smp_rmb(); | ||
1401 | min_vruntime = cfs_rq->min_vruntime; | ||
1402 | } while (min_vruntime != min_vruntime_copy); | ||
1403 | #else | ||
1404 | min_vruntime = cfs_rq->min_vruntime; | ||
1405 | #endif | ||
1406 | |||
1407 | se->vruntime -= min_vruntime; | ||
1381 | } | 1408 | } |
1382 | 1409 | ||
1383 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1410 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -1622,6 +1649,7 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
1622 | /* | 1649 | /* |
1623 | * Otherwise, iterate the domains and find an elegible idle cpu. | 1650 | * Otherwise, iterate the domains and find an elegible idle cpu. |
1624 | */ | 1651 | */ |
1652 | rcu_read_lock(); | ||
1625 | for_each_domain(target, sd) { | 1653 | for_each_domain(target, sd) { |
1626 | if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) | 1654 | if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) |
1627 | break; | 1655 | break; |
@@ -1641,6 +1669,7 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
1641 | cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) | 1669 | cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) |
1642 | break; | 1670 | break; |
1643 | } | 1671 | } |
1672 | rcu_read_unlock(); | ||
1644 | 1673 | ||
1645 | return target; | 1674 | return target; |
1646 | } | 1675 | } |
@@ -1657,7 +1686,7 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
1657 | * preempt must be disabled. | 1686 | * preempt must be disabled. |
1658 | */ | 1687 | */ |
1659 | static int | 1688 | static int |
1660 | select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) | 1689 | select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) |
1661 | { | 1690 | { |
1662 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | 1691 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; |
1663 | int cpu = smp_processor_id(); | 1692 | int cpu = smp_processor_id(); |
@@ -1673,6 +1702,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ | |||
1673 | new_cpu = prev_cpu; | 1702 | new_cpu = prev_cpu; |
1674 | } | 1703 | } |
1675 | 1704 | ||
1705 | rcu_read_lock(); | ||
1676 | for_each_domain(cpu, tmp) { | 1706 | for_each_domain(cpu, tmp) { |
1677 | if (!(tmp->flags & SD_LOAD_BALANCE)) | 1707 | if (!(tmp->flags & SD_LOAD_BALANCE)) |
1678 | continue; | 1708 | continue; |
@@ -1723,9 +1753,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ | |||
1723 | 1753 | ||
1724 | if (affine_sd) { | 1754 | if (affine_sd) { |
1725 | if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) | 1755 | if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) |
1726 | return select_idle_sibling(p, cpu); | 1756 | prev_cpu = cpu; |
1727 | else | 1757 | |
1728 | return select_idle_sibling(p, prev_cpu); | 1758 | new_cpu = select_idle_sibling(p, prev_cpu); |
1759 | goto unlock; | ||
1729 | } | 1760 | } |
1730 | 1761 | ||
1731 | while (sd) { | 1762 | while (sd) { |
@@ -1766,6 +1797,8 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ | |||
1766 | } | 1797 | } |
1767 | /* while loop will break here if sd == NULL */ | 1798 | /* while loop will break here if sd == NULL */ |
1768 | } | 1799 | } |
1800 | unlock: | ||
1801 | rcu_read_unlock(); | ||
1769 | 1802 | ||
1770 | return new_cpu; | 1803 | return new_cpu; |
1771 | } | 1804 | } |
@@ -1789,10 +1822,7 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | |||
1789 | * This is especially important for buddies when the leftmost | 1822 | * This is especially important for buddies when the leftmost |
1790 | * task is higher priority than the buddy. | 1823 | * task is higher priority than the buddy. |
1791 | */ | 1824 | */ |
1792 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 1825 | return calc_delta_fair(gran, se); |
1793 | gran = calc_delta_fair(gran, se); | ||
1794 | |||
1795 | return gran; | ||
1796 | } | 1826 | } |
1797 | 1827 | ||
1798 | /* | 1828 | /* |
@@ -1826,26 +1856,26 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1826 | 1856 | ||
1827 | static void set_last_buddy(struct sched_entity *se) | 1857 | static void set_last_buddy(struct sched_entity *se) |
1828 | { | 1858 | { |
1829 | if (likely(task_of(se)->policy != SCHED_IDLE)) { | 1859 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) |
1830 | for_each_sched_entity(se) | 1860 | return; |
1831 | cfs_rq_of(se)->last = se; | 1861 | |
1832 | } | 1862 | for_each_sched_entity(se) |
1863 | cfs_rq_of(se)->last = se; | ||
1833 | } | 1864 | } |
1834 | 1865 | ||
1835 | static void set_next_buddy(struct sched_entity *se) | 1866 | static void set_next_buddy(struct sched_entity *se) |
1836 | { | 1867 | { |
1837 | if (likely(task_of(se)->policy != SCHED_IDLE)) { | 1868 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) |
1838 | for_each_sched_entity(se) | 1869 | return; |
1839 | cfs_rq_of(se)->next = se; | 1870 | |
1840 | } | 1871 | for_each_sched_entity(se) |
1872 | cfs_rq_of(se)->next = se; | ||
1841 | } | 1873 | } |
1842 | 1874 | ||
1843 | static void set_skip_buddy(struct sched_entity *se) | 1875 | static void set_skip_buddy(struct sched_entity *se) |
1844 | { | 1876 | { |
1845 | if (likely(task_of(se)->policy != SCHED_IDLE)) { | 1877 | for_each_sched_entity(se) |
1846 | for_each_sched_entity(se) | 1878 | cfs_rq_of(se)->skip = se; |
1847 | cfs_rq_of(se)->skip = se; | ||
1848 | } | ||
1849 | } | 1879 | } |
1850 | 1880 | ||
1851 | /* | 1881 | /* |
@@ -1857,12 +1887,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1857 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1887 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1858 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1888 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1859 | int scale = cfs_rq->nr_running >= sched_nr_latency; | 1889 | int scale = cfs_rq->nr_running >= sched_nr_latency; |
1890 | int next_buddy_marked = 0; | ||
1860 | 1891 | ||
1861 | if (unlikely(se == pse)) | 1892 | if (unlikely(se == pse)) |
1862 | return; | 1893 | return; |
1863 | 1894 | ||
1864 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) | 1895 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { |
1865 | set_next_buddy(pse); | 1896 | set_next_buddy(pse); |
1897 | next_buddy_marked = 1; | ||
1898 | } | ||
1866 | 1899 | ||
1867 | /* | 1900 | /* |
1868 | * We can come here with TIF_NEED_RESCHED already set from new task | 1901 | * We can come here with TIF_NEED_RESCHED already set from new task |
@@ -1890,8 +1923,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1890 | update_curr(cfs_rq); | 1923 | update_curr(cfs_rq); |
1891 | find_matching_se(&se, &pse); | 1924 | find_matching_se(&se, &pse); |
1892 | BUG_ON(!pse); | 1925 | BUG_ON(!pse); |
1893 | if (wakeup_preempt_entity(se, pse) == 1) | 1926 | if (wakeup_preempt_entity(se, pse) == 1) { |
1927 | /* | ||
1928 | * Bias pick_next to pick the sched entity that is | ||
1929 | * triggering this preemption. | ||
1930 | */ | ||
1931 | if (!next_buddy_marked) | ||
1932 | set_next_buddy(pse); | ||
1894 | goto preempt; | 1933 | goto preempt; |
1934 | } | ||
1895 | 1935 | ||
1896 | return; | 1936 | return; |
1897 | 1937 | ||
@@ -2102,7 +2142,7 @@ static unsigned long | |||
2102 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2142 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2103 | unsigned long max_load_move, struct sched_domain *sd, | 2143 | unsigned long max_load_move, struct sched_domain *sd, |
2104 | enum cpu_idle_type idle, int *all_pinned, | 2144 | enum cpu_idle_type idle, int *all_pinned, |
2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) | 2145 | struct cfs_rq *busiest_cfs_rq) |
2106 | { | 2146 | { |
2107 | int loops = 0, pulled = 0; | 2147 | int loops = 0, pulled = 0; |
2108 | long rem_load_move = max_load_move; | 2148 | long rem_load_move = max_load_move; |
@@ -2140,9 +2180,6 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2140 | */ | 2180 | */ |
2141 | if (rem_load_move <= 0) | 2181 | if (rem_load_move <= 0) |
2142 | break; | 2182 | break; |
2143 | |||
2144 | if (p->prio < *this_best_prio) | ||
2145 | *this_best_prio = p->prio; | ||
2146 | } | 2183 | } |
2147 | out: | 2184 | out: |
2148 | /* | 2185 | /* |
@@ -2202,7 +2239,7 @@ static unsigned long | |||
2202 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2239 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2203 | unsigned long max_load_move, | 2240 | unsigned long max_load_move, |
2204 | struct sched_domain *sd, enum cpu_idle_type idle, | 2241 | struct sched_domain *sd, enum cpu_idle_type idle, |
2205 | int *all_pinned, int *this_best_prio) | 2242 | int *all_pinned) |
2206 | { | 2243 | { |
2207 | long rem_load_move = max_load_move; | 2244 | long rem_load_move = max_load_move; |
2208 | int busiest_cpu = cpu_of(busiest); | 2245 | int busiest_cpu = cpu_of(busiest); |
@@ -2227,7 +2264,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2227 | rem_load = div_u64(rem_load, busiest_h_load + 1); | 2264 | rem_load = div_u64(rem_load, busiest_h_load + 1); |
2228 | 2265 | ||
2229 | moved_load = balance_tasks(this_rq, this_cpu, busiest, | 2266 | moved_load = balance_tasks(this_rq, this_cpu, busiest, |
2230 | rem_load, sd, idle, all_pinned, this_best_prio, | 2267 | rem_load, sd, idle, all_pinned, |
2231 | busiest_cfs_rq); | 2268 | busiest_cfs_rq); |
2232 | 2269 | ||
2233 | if (!moved_load) | 2270 | if (!moved_load) |
@@ -2253,11 +2290,11 @@ static unsigned long | |||
2253 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2290 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2254 | unsigned long max_load_move, | 2291 | unsigned long max_load_move, |
2255 | struct sched_domain *sd, enum cpu_idle_type idle, | 2292 | struct sched_domain *sd, enum cpu_idle_type idle, |
2256 | int *all_pinned, int *this_best_prio) | 2293 | int *all_pinned) |
2257 | { | 2294 | { |
2258 | return balance_tasks(this_rq, this_cpu, busiest, | 2295 | return balance_tasks(this_rq, this_cpu, busiest, |
2259 | max_load_move, sd, idle, all_pinned, | 2296 | max_load_move, sd, idle, all_pinned, |
2260 | this_best_prio, &busiest->cfs); | 2297 | &busiest->cfs); |
2261 | } | 2298 | } |
2262 | #endif | 2299 | #endif |
2263 | 2300 | ||
@@ -2274,12 +2311,11 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2274 | int *all_pinned) | 2311 | int *all_pinned) |
2275 | { | 2312 | { |
2276 | unsigned long total_load_moved = 0, load_moved; | 2313 | unsigned long total_load_moved = 0, load_moved; |
2277 | int this_best_prio = this_rq->curr->prio; | ||
2278 | 2314 | ||
2279 | do { | 2315 | do { |
2280 | load_moved = load_balance_fair(this_rq, this_cpu, busiest, | 2316 | load_moved = load_balance_fair(this_rq, this_cpu, busiest, |
2281 | max_load_move - total_load_moved, | 2317 | max_load_move - total_load_moved, |
2282 | sd, idle, all_pinned, &this_best_prio); | 2318 | sd, idle, all_pinned); |
2283 | 2319 | ||
2284 | total_load_moved += load_moved; | 2320 | total_load_moved += load_moved; |
2285 | 2321 | ||
@@ -2648,7 +2684,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
2648 | /* | 2684 | /* |
2649 | * Only siblings can have significantly less than SCHED_LOAD_SCALE | 2685 | * Only siblings can have significantly less than SCHED_LOAD_SCALE |
2650 | */ | 2686 | */ |
2651 | if (sd->level != SD_LV_SIBLING) | 2687 | if (!(sd->flags & SD_SHARE_CPUPOWER)) |
2652 | return 0; | 2688 | return 0; |
2653 | 2689 | ||
2654 | /* | 2690 | /* |
@@ -3465,6 +3501,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3465 | raw_spin_unlock(&this_rq->lock); | 3501 | raw_spin_unlock(&this_rq->lock); |
3466 | 3502 | ||
3467 | update_shares(this_cpu); | 3503 | update_shares(this_cpu); |
3504 | rcu_read_lock(); | ||
3468 | for_each_domain(this_cpu, sd) { | 3505 | for_each_domain(this_cpu, sd) { |
3469 | unsigned long interval; | 3506 | unsigned long interval; |
3470 | int balance = 1; | 3507 | int balance = 1; |
@@ -3486,6 +3523,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3486 | break; | 3523 | break; |
3487 | } | 3524 | } |
3488 | } | 3525 | } |
3526 | rcu_read_unlock(); | ||
3489 | 3527 | ||
3490 | raw_spin_lock(&this_rq->lock); | 3528 | raw_spin_lock(&this_rq->lock); |
3491 | 3529 | ||
@@ -3534,6 +3572,7 @@ static int active_load_balance_cpu_stop(void *data) | |||
3534 | double_lock_balance(busiest_rq, target_rq); | 3572 | double_lock_balance(busiest_rq, target_rq); |
3535 | 3573 | ||
3536 | /* Search for an sd spanning us and the target CPU. */ | 3574 | /* Search for an sd spanning us and the target CPU. */ |
3575 | rcu_read_lock(); | ||
3537 | for_each_domain(target_cpu, sd) { | 3576 | for_each_domain(target_cpu, sd) { |
3538 | if ((sd->flags & SD_LOAD_BALANCE) && | 3577 | if ((sd->flags & SD_LOAD_BALANCE) && |
3539 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) | 3578 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
@@ -3549,6 +3588,7 @@ static int active_load_balance_cpu_stop(void *data) | |||
3549 | else | 3588 | else |
3550 | schedstat_inc(sd, alb_failed); | 3589 | schedstat_inc(sd, alb_failed); |
3551 | } | 3590 | } |
3591 | rcu_read_unlock(); | ||
3552 | double_unlock_balance(busiest_rq, target_rq); | 3592 | double_unlock_balance(busiest_rq, target_rq); |
3553 | out_unlock: | 3593 | out_unlock: |
3554 | busiest_rq->active_balance = 0; | 3594 | busiest_rq->active_balance = 0; |
@@ -3675,6 +3715,7 @@ static int find_new_ilb(int cpu) | |||
3675 | { | 3715 | { |
3676 | struct sched_domain *sd; | 3716 | struct sched_domain *sd; |
3677 | struct sched_group *ilb_group; | 3717 | struct sched_group *ilb_group; |
3718 | int ilb = nr_cpu_ids; | ||
3678 | 3719 | ||
3679 | /* | 3720 | /* |
3680 | * Have idle load balancer selection from semi-idle packages only | 3721 | * Have idle load balancer selection from semi-idle packages only |
@@ -3690,20 +3731,25 @@ static int find_new_ilb(int cpu) | |||
3690 | if (cpumask_weight(nohz.idle_cpus_mask) < 2) | 3731 | if (cpumask_weight(nohz.idle_cpus_mask) < 2) |
3691 | goto out_done; | 3732 | goto out_done; |
3692 | 3733 | ||
3734 | rcu_read_lock(); | ||
3693 | for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { | 3735 | for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { |
3694 | ilb_group = sd->groups; | 3736 | ilb_group = sd->groups; |
3695 | 3737 | ||
3696 | do { | 3738 | do { |
3697 | if (is_semi_idle_group(ilb_group)) | 3739 | if (is_semi_idle_group(ilb_group)) { |
3698 | return cpumask_first(nohz.grp_idle_mask); | 3740 | ilb = cpumask_first(nohz.grp_idle_mask); |
3741 | goto unlock; | ||
3742 | } | ||
3699 | 3743 | ||
3700 | ilb_group = ilb_group->next; | 3744 | ilb_group = ilb_group->next; |
3701 | 3745 | ||
3702 | } while (ilb_group != sd->groups); | 3746 | } while (ilb_group != sd->groups); |
3703 | } | 3747 | } |
3748 | unlock: | ||
3749 | rcu_read_unlock(); | ||
3704 | 3750 | ||
3705 | out_done: | 3751 | out_done: |
3706 | return nr_cpu_ids; | 3752 | return ilb; |
3707 | } | 3753 | } |
3708 | #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ | 3754 | #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ |
3709 | static inline int find_new_ilb(int call_cpu) | 3755 | static inline int find_new_ilb(int call_cpu) |
@@ -3848,6 +3894,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3848 | 3894 | ||
3849 | update_shares(cpu); | 3895 | update_shares(cpu); |
3850 | 3896 | ||
3897 | rcu_read_lock(); | ||
3851 | for_each_domain(cpu, sd) { | 3898 | for_each_domain(cpu, sd) { |
3852 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3899 | if (!(sd->flags & SD_LOAD_BALANCE)) |
3853 | continue; | 3900 | continue; |
@@ -3893,6 +3940,7 @@ out: | |||
3893 | if (!balance) | 3940 | if (!balance) |
3894 | break; | 3941 | break; |
3895 | } | 3942 | } |
3943 | rcu_read_unlock(); | ||
3896 | 3944 | ||
3897 | /* | 3945 | /* |
3898 | * next_balance will be updated only when there is a need. | 3946 | * next_balance will be updated only when there is a need. |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 68e69acc29b9..be40f7371ee1 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -64,3 +64,9 @@ SCHED_FEAT(OWNER_SPIN, 1) | |||
64 | * Decrement CPU power based on irq activity | 64 | * Decrement CPU power based on irq activity |
65 | */ | 65 | */ |
66 | SCHED_FEAT(NONIRQ_POWER, 1) | 66 | SCHED_FEAT(NONIRQ_POWER, 1) |
67 | |||
68 | /* | ||
69 | * Queue remote wakeups on the target CPU and process them | ||
70 | * using the scheduler IPI. Reduces rq->lock contention/bounces. | ||
71 | */ | ||
72 | SCHED_FEAT(TTWU_QUEUE, 1) | ||
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index a776a6396427..0a51882534ea 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | #ifdef CONFIG_SMP | 8 | #ifdef CONFIG_SMP |
9 | static int | 9 | static int |
10 | select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags) | 10 | select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) |
11 | { | 11 | { |
12 | return task_cpu(p); /* IDLE tasks as never migrated */ | 12 | return task_cpu(p); /* IDLE tasks as never migrated */ |
13 | } | 13 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index e7cebdc65f82..64b2a37c07d0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -183,6 +183,14 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
183 | return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); | 183 | return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); |
184 | } | 184 | } |
185 | 185 | ||
186 | typedef struct task_group *rt_rq_iter_t; | ||
187 | |||
188 | #define for_each_rt_rq(rt_rq, iter, rq) \ | ||
189 | for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \ | ||
190 | (&iter->list != &task_groups) && \ | ||
191 | (rt_rq = iter->rt_rq[cpu_of(rq)]); \ | ||
192 | iter = list_entry_rcu(iter->list.next, typeof(*iter), list)) | ||
193 | |||
186 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | 194 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) |
187 | { | 195 | { |
188 | list_add_rcu(&rt_rq->leaf_rt_rq_list, | 196 | list_add_rcu(&rt_rq->leaf_rt_rq_list, |
@@ -288,6 +296,11 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
288 | return ktime_to_ns(def_rt_bandwidth.rt_period); | 296 | return ktime_to_ns(def_rt_bandwidth.rt_period); |
289 | } | 297 | } |
290 | 298 | ||
299 | typedef struct rt_rq *rt_rq_iter_t; | ||
300 | |||
301 | #define for_each_rt_rq(rt_rq, iter, rq) \ | ||
302 | for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | ||
303 | |||
291 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | 304 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) |
292 | { | 305 | { |
293 | } | 306 | } |
@@ -402,12 +415,13 @@ next: | |||
402 | static void __disable_runtime(struct rq *rq) | 415 | static void __disable_runtime(struct rq *rq) |
403 | { | 416 | { |
404 | struct root_domain *rd = rq->rd; | 417 | struct root_domain *rd = rq->rd; |
418 | rt_rq_iter_t iter; | ||
405 | struct rt_rq *rt_rq; | 419 | struct rt_rq *rt_rq; |
406 | 420 | ||
407 | if (unlikely(!scheduler_running)) | 421 | if (unlikely(!scheduler_running)) |
408 | return; | 422 | return; |
409 | 423 | ||
410 | for_each_leaf_rt_rq(rt_rq, rq) { | 424 | for_each_rt_rq(rt_rq, iter, rq) { |
411 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 425 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
412 | s64 want; | 426 | s64 want; |
413 | int i; | 427 | int i; |
@@ -487,6 +501,7 @@ static void disable_runtime(struct rq *rq) | |||
487 | 501 | ||
488 | static void __enable_runtime(struct rq *rq) | 502 | static void __enable_runtime(struct rq *rq) |
489 | { | 503 | { |
504 | rt_rq_iter_t iter; | ||
490 | struct rt_rq *rt_rq; | 505 | struct rt_rq *rt_rq; |
491 | 506 | ||
492 | if (unlikely(!scheduler_running)) | 507 | if (unlikely(!scheduler_running)) |
@@ -495,7 +510,7 @@ static void __enable_runtime(struct rq *rq) | |||
495 | /* | 510 | /* |
496 | * Reset each runqueue's bandwidth settings | 511 | * Reset each runqueue's bandwidth settings |
497 | */ | 512 | */ |
498 | for_each_leaf_rt_rq(rt_rq, rq) { | 513 | for_each_rt_rq(rt_rq, iter, rq) { |
499 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 514 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
500 | 515 | ||
501 | raw_spin_lock(&rt_b->rt_runtime_lock); | 516 | raw_spin_lock(&rt_b->rt_runtime_lock); |
@@ -562,6 +577,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
562 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { | 577 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { |
563 | rt_rq->rt_throttled = 0; | 578 | rt_rq->rt_throttled = 0; |
564 | enqueue = 1; | 579 | enqueue = 1; |
580 | |||
581 | /* | ||
582 | * Force a clock update if the CPU was idle, | ||
583 | * lest wakeup -> unthrottle time accumulate. | ||
584 | */ | ||
585 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) | ||
586 | rq->skip_clock_update = -1; | ||
565 | } | 587 | } |
566 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 588 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
567 | idle = 0; | 589 | idle = 0; |
@@ -977,13 +999,23 @@ static void yield_task_rt(struct rq *rq) | |||
977 | static int find_lowest_rq(struct task_struct *task); | 999 | static int find_lowest_rq(struct task_struct *task); |
978 | 1000 | ||
979 | static int | 1001 | static int |
980 | select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) | 1002 | select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) |
981 | { | 1003 | { |
1004 | struct task_struct *curr; | ||
1005 | struct rq *rq; | ||
1006 | int cpu; | ||
1007 | |||
982 | if (sd_flag != SD_BALANCE_WAKE) | 1008 | if (sd_flag != SD_BALANCE_WAKE) |
983 | return smp_processor_id(); | 1009 | return smp_processor_id(); |
984 | 1010 | ||
1011 | cpu = task_cpu(p); | ||
1012 | rq = cpu_rq(cpu); | ||
1013 | |||
1014 | rcu_read_lock(); | ||
1015 | curr = ACCESS_ONCE(rq->curr); /* unlocked access */ | ||
1016 | |||
985 | /* | 1017 | /* |
986 | * If the current task is an RT task, then | 1018 | * If the current task on @p's runqueue is an RT task, then |
987 | * try to see if we can wake this RT task up on another | 1019 | * try to see if we can wake this RT task up on another |
988 | * runqueue. Otherwise simply start this RT task | 1020 | * runqueue. Otherwise simply start this RT task |
989 | * on its current runqueue. | 1021 | * on its current runqueue. |
@@ -997,21 +1029,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) | |||
997 | * lock? | 1029 | * lock? |
998 | * | 1030 | * |
999 | * For equal prio tasks, we just let the scheduler sort it out. | 1031 | * For equal prio tasks, we just let the scheduler sort it out. |
1032 | * | ||
1033 | * Otherwise, just let it ride on the affined RQ and the | ||
1034 | * post-schedule router will push the preempted task away | ||
1035 | * | ||
1036 | * This test is optimistic, if we get it wrong the load-balancer | ||
1037 | * will have to sort it out. | ||
1000 | */ | 1038 | */ |
1001 | if (unlikely(rt_task(rq->curr)) && | 1039 | if (curr && unlikely(rt_task(curr)) && |
1002 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1040 | (curr->rt.nr_cpus_allowed < 2 || |
1003 | rq->curr->prio < p->prio) && | 1041 | curr->prio < p->prio) && |
1004 | (p->rt.nr_cpus_allowed > 1)) { | 1042 | (p->rt.nr_cpus_allowed > 1)) { |
1005 | int cpu = find_lowest_rq(p); | 1043 | int target = find_lowest_rq(p); |
1006 | 1044 | ||
1007 | return (cpu == -1) ? task_cpu(p) : cpu; | 1045 | if (target != -1) |
1046 | cpu = target; | ||
1008 | } | 1047 | } |
1048 | rcu_read_unlock(); | ||
1009 | 1049 | ||
1010 | /* | 1050 | return cpu; |
1011 | * Otherwise, just let it ride on the affined RQ and the | ||
1012 | * post-schedule router will push the preempted task away | ||
1013 | */ | ||
1014 | return task_cpu(p); | ||
1015 | } | 1051 | } |
1016 | 1052 | ||
1017 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 1053 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
@@ -1136,7 +1172,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
1136 | * The previous task needs to be made eligible for pushing | 1172 | * The previous task needs to be made eligible for pushing |
1137 | * if it is still active | 1173 | * if it is still active |
1138 | */ | 1174 | */ |
1139 | if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) | 1175 | if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) |
1140 | enqueue_pushable_task(rq, p); | 1176 | enqueue_pushable_task(rq, p); |
1141 | } | 1177 | } |
1142 | 1178 | ||
@@ -1287,7 +1323,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1287 | !cpumask_test_cpu(lowest_rq->cpu, | 1323 | !cpumask_test_cpu(lowest_rq->cpu, |
1288 | &task->cpus_allowed) || | 1324 | &task->cpus_allowed) || |
1289 | task_running(rq, task) || | 1325 | task_running(rq, task) || |
1290 | !task->se.on_rq)) { | 1326 | !task->on_rq)) { |
1291 | 1327 | ||
1292 | raw_spin_unlock(&lowest_rq->lock); | 1328 | raw_spin_unlock(&lowest_rq->lock); |
1293 | lowest_rq = NULL; | 1329 | lowest_rq = NULL; |
@@ -1321,7 +1357,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) | |||
1321 | BUG_ON(task_current(rq, p)); | 1357 | BUG_ON(task_current(rq, p)); |
1322 | BUG_ON(p->rt.nr_cpus_allowed <= 1); | 1358 | BUG_ON(p->rt.nr_cpus_allowed <= 1); |
1323 | 1359 | ||
1324 | BUG_ON(!p->se.on_rq); | 1360 | BUG_ON(!p->on_rq); |
1325 | BUG_ON(!rt_task(p)); | 1361 | BUG_ON(!rt_task(p)); |
1326 | 1362 | ||
1327 | return p; | 1363 | return p; |
@@ -1467,7 +1503,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1467 | */ | 1503 | */ |
1468 | if (p && (p->prio < this_rq->rt.highest_prio.curr)) { | 1504 | if (p && (p->prio < this_rq->rt.highest_prio.curr)) { |
1469 | WARN_ON(p == src_rq->curr); | 1505 | WARN_ON(p == src_rq->curr); |
1470 | WARN_ON(!p->se.on_rq); | 1506 | WARN_ON(!p->on_rq); |
1471 | 1507 | ||
1472 | /* | 1508 | /* |
1473 | * There's a chance that p is higher in priority | 1509 | * There's a chance that p is higher in priority |
@@ -1538,7 +1574,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1538 | * Update the migration status of the RQ if we have an RT task | 1574 | * Update the migration status of the RQ if we have an RT task |
1539 | * which is running AND changing its weight value. | 1575 | * which is running AND changing its weight value. |
1540 | */ | 1576 | */ |
1541 | if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { | 1577 | if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) { |
1542 | struct rq *rq = task_rq(p); | 1578 | struct rq *rq = task_rq(p); |
1543 | 1579 | ||
1544 | if (!task_current(rq, p)) { | 1580 | if (!task_current(rq, p)) { |
@@ -1608,7 +1644,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) | |||
1608 | * we may need to handle the pulling of RT tasks | 1644 | * we may need to handle the pulling of RT tasks |
1609 | * now. | 1645 | * now. |
1610 | */ | 1646 | */ |
1611 | if (p->se.on_rq && !rq->rt.rt_nr_running) | 1647 | if (p->on_rq && !rq->rt.rt_nr_running) |
1612 | pull_rt_task(rq); | 1648 | pull_rt_task(rq); |
1613 | } | 1649 | } |
1614 | 1650 | ||
@@ -1638,7 +1674,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) | |||
1638 | * If that current running task is also an RT task | 1674 | * If that current running task is also an RT task |
1639 | * then see if we can move to another run queue. | 1675 | * then see if we can move to another run queue. |
1640 | */ | 1676 | */ |
1641 | if (p->se.on_rq && rq->curr != p) { | 1677 | if (p->on_rq && rq->curr != p) { |
1642 | #ifdef CONFIG_SMP | 1678 | #ifdef CONFIG_SMP |
1643 | if (rq->rt.overloaded && push_rt_task(rq) && | 1679 | if (rq->rt.overloaded && push_rt_task(rq) && |
1644 | /* Don't resched if we changed runqueues */ | 1680 | /* Don't resched if we changed runqueues */ |
@@ -1657,7 +1693,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) | |||
1657 | static void | 1693 | static void |
1658 | prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) | 1694 | prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) |
1659 | { | 1695 | { |
1660 | if (!p->se.on_rq) | 1696 | if (!p->on_rq) |
1661 | return; | 1697 | return; |
1662 | 1698 | ||
1663 | if (rq->curr == p) { | 1699 | if (rq->curr == p) { |
@@ -1796,10 +1832,11 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); | |||
1796 | 1832 | ||
1797 | static void print_rt_stats(struct seq_file *m, int cpu) | 1833 | static void print_rt_stats(struct seq_file *m, int cpu) |
1798 | { | 1834 | { |
1835 | rt_rq_iter_t iter; | ||
1799 | struct rt_rq *rt_rq; | 1836 | struct rt_rq *rt_rq; |
1800 | 1837 | ||
1801 | rcu_read_lock(); | 1838 | rcu_read_lock(); |
1802 | for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu)) | 1839 | for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) |
1803 | print_rt_rq(m, cpu, rt_rq); | 1840 | print_rt_rq(m, cpu, rt_rq); |
1804 | rcu_read_unlock(); | 1841 | rcu_read_unlock(); |
1805 | } | 1842 | } |
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c index 1ba2bd40fdac..6f437632afab 100644 --- a/kernel/sched_stoptask.c +++ b/kernel/sched_stoptask.c | |||
@@ -9,8 +9,7 @@ | |||
9 | 9 | ||
10 | #ifdef CONFIG_SMP | 10 | #ifdef CONFIG_SMP |
11 | static int | 11 | static int |
12 | select_task_rq_stop(struct rq *rq, struct task_struct *p, | 12 | select_task_rq_stop(struct task_struct *p, int sd_flag, int flags) |
13 | int sd_flag, int flags) | ||
14 | { | 13 | { |
15 | return task_cpu(p); /* stop tasks as never migrate */ | 14 | return task_cpu(p); /* stop tasks as never migrate */ |
16 | } | 15 | } |
@@ -26,7 +25,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) | |||
26 | { | 25 | { |
27 | struct task_struct *stop = rq->stop; | 26 | struct task_struct *stop = rq->stop; |
28 | 27 | ||
29 | if (stop && stop->se.on_rq) | 28 | if (stop && stop->on_rq) |
30 | return stop; | 29 | return stop; |
31 | 30 | ||
32 | return NULL; | 31 | return NULL; |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 174f976c2874..13960170cad4 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | |||
58 | 58 | ||
59 | char *softirq_to_name[NR_SOFTIRQS] = { | 59 | char *softirq_to_name[NR_SOFTIRQS] = { |
60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", | 60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
61 | "TASKLET", "SCHED", "HRTIMER", "RCU" | 61 | "TASKLET", "SCHED", "HRTIMER" |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /* | 64 | /* |
diff --git a/kernel/sys.c b/kernel/sys.c index af468edf096a..e4128b278f23 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -314,8 +314,8 @@ void kernel_restart_prepare(char *cmd) | |||
314 | { | 314 | { |
315 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 315 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
316 | system_state = SYSTEM_RESTART; | 316 | system_state = SYSTEM_RESTART; |
317 | usermodehelper_disable(); | ||
317 | device_shutdown(); | 318 | device_shutdown(); |
318 | sysdev_shutdown(); | ||
319 | syscore_shutdown(); | 319 | syscore_shutdown(); |
320 | } | 320 | } |
321 | 321 | ||
@@ -344,6 +344,7 @@ static void kernel_shutdown_prepare(enum system_states state) | |||
344 | blocking_notifier_call_chain(&reboot_notifier_list, | 344 | blocking_notifier_call_chain(&reboot_notifier_list, |
345 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); | 345 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); |
346 | system_state = state; | 346 | system_state = state; |
347 | usermodehelper_disable(); | ||
347 | device_shutdown(); | 348 | device_shutdown(); |
348 | } | 349 | } |
349 | /** | 350 | /** |
@@ -354,7 +355,6 @@ static void kernel_shutdown_prepare(enum system_states state) | |||
354 | void kernel_halt(void) | 355 | void kernel_halt(void) |
355 | { | 356 | { |
356 | kernel_shutdown_prepare(SYSTEM_HALT); | 357 | kernel_shutdown_prepare(SYSTEM_HALT); |
357 | sysdev_shutdown(); | ||
358 | syscore_shutdown(); | 358 | syscore_shutdown(); |
359 | printk(KERN_EMERG "System halted.\n"); | 359 | printk(KERN_EMERG "System halted.\n"); |
360 | kmsg_dump(KMSG_DUMP_HALT); | 360 | kmsg_dump(KMSG_DUMP_HALT); |
@@ -374,7 +374,6 @@ void kernel_power_off(void) | |||
374 | if (pm_power_off_prepare) | 374 | if (pm_power_off_prepare) |
375 | pm_power_off_prepare(); | 375 | pm_power_off_prepare(); |
376 | disable_nonboot_cpus(); | 376 | disable_nonboot_cpus(); |
377 | sysdev_shutdown(); | ||
378 | syscore_shutdown(); | 377 | syscore_shutdown(); |
379 | printk(KERN_EMERG "Power down.\n"); | 378 | printk(KERN_EMERG "Power down.\n"); |
380 | kmsg_dump(KMSG_DUMP_POWEROFF); | 379 | kmsg_dump(KMSG_DUMP_POWEROFF); |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index b0425991e9ac..e2fd74b8e8c2 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o | 1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o |
2 | obj-y += timeconv.o posix-clock.o | 2 | obj-y += timeconv.o posix-clock.o alarmtimer.o |
3 | 3 | ||
4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c new file mode 100644 index 000000000000..9265014cb4db --- /dev/null +++ b/kernel/time/alarmtimer.c | |||
@@ -0,0 +1,694 @@ | |||
1 | /* | ||
2 | * Alarmtimer interface | ||
3 | * | ||
4 | * This interface provides a timer which is similarto hrtimers, | ||
5 | * but triggers a RTC alarm if the box is suspend. | ||
6 | * | ||
7 | * This interface is influenced by the Android RTC Alarm timer | ||
8 | * interface. | ||
9 | * | ||
10 | * Copyright (C) 2010 IBM Corperation | ||
11 | * | ||
12 | * Author: John Stultz <john.stultz@linaro.org> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License version 2 as | ||
16 | * published by the Free Software Foundation. | ||
17 | */ | ||
18 | #include <linux/time.h> | ||
19 | #include <linux/hrtimer.h> | ||
20 | #include <linux/timerqueue.h> | ||
21 | #include <linux/rtc.h> | ||
22 | #include <linux/alarmtimer.h> | ||
23 | #include <linux/mutex.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/posix-timers.h> | ||
26 | #include <linux/workqueue.h> | ||
27 | #include <linux/freezer.h> | ||
28 | |||
29 | /** | ||
30 | * struct alarm_base - Alarm timer bases | ||
31 | * @lock: Lock for syncrhonized access to the base | ||
32 | * @timerqueue: Timerqueue head managing the list of events | ||
33 | * @timer: hrtimer used to schedule events while running | ||
34 | * @gettime: Function to read the time correlating to the base | ||
35 | * @base_clockid: clockid for the base | ||
36 | */ | ||
37 | static struct alarm_base { | ||
38 | spinlock_t lock; | ||
39 | struct timerqueue_head timerqueue; | ||
40 | struct hrtimer timer; | ||
41 | ktime_t (*gettime)(void); | ||
42 | clockid_t base_clockid; | ||
43 | } alarm_bases[ALARM_NUMTYPE]; | ||
44 | |||
45 | #ifdef CONFIG_RTC_CLASS | ||
46 | /* rtc timer and device for setting alarm wakeups at suspend */ | ||
47 | static struct rtc_timer rtctimer; | ||
48 | static struct rtc_device *rtcdev; | ||
49 | #endif | ||
50 | |||
51 | /* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ | ||
52 | static ktime_t freezer_delta; | ||
53 | static DEFINE_SPINLOCK(freezer_delta_lock); | ||
54 | |||
55 | |||
56 | /** | ||
57 | * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue | ||
58 | * @base: pointer to the base where the timer is being run | ||
59 | * @alarm: pointer to alarm being enqueued. | ||
60 | * | ||
61 | * Adds alarm to a alarm_base timerqueue and if necessary sets | ||
62 | * an hrtimer to run. | ||
63 | * | ||
64 | * Must hold base->lock when calling. | ||
65 | */ | ||
66 | static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm) | ||
67 | { | ||
68 | timerqueue_add(&base->timerqueue, &alarm->node); | ||
69 | if (&alarm->node == timerqueue_getnext(&base->timerqueue)) { | ||
70 | hrtimer_try_to_cancel(&base->timer); | ||
71 | hrtimer_start(&base->timer, alarm->node.expires, | ||
72 | HRTIMER_MODE_ABS); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * alarmtimer_remove - Removes an alarm timer from an alarm_base timerqueue | ||
78 | * @base: pointer to the base where the timer is running | ||
79 | * @alarm: pointer to alarm being removed | ||
80 | * | ||
81 | * Removes alarm to a alarm_base timerqueue and if necessary sets | ||
82 | * a new timer to run. | ||
83 | * | ||
84 | * Must hold base->lock when calling. | ||
85 | */ | ||
86 | static void alarmtimer_remove(struct alarm_base *base, struct alarm *alarm) | ||
87 | { | ||
88 | struct timerqueue_node *next = timerqueue_getnext(&base->timerqueue); | ||
89 | |||
90 | timerqueue_del(&base->timerqueue, &alarm->node); | ||
91 | if (next == &alarm->node) { | ||
92 | hrtimer_try_to_cancel(&base->timer); | ||
93 | next = timerqueue_getnext(&base->timerqueue); | ||
94 | if (!next) | ||
95 | return; | ||
96 | hrtimer_start(&base->timer, next->expires, HRTIMER_MODE_ABS); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | |||
101 | /** | ||
102 | * alarmtimer_fired - Handles alarm hrtimer being fired. | ||
103 | * @timer: pointer to hrtimer being run | ||
104 | * | ||
105 | * When a alarm timer fires, this runs through the timerqueue to | ||
106 | * see which alarms expired, and runs those. If there are more alarm | ||
107 | * timers queued for the future, we set the hrtimer to fire when | ||
108 | * when the next future alarm timer expires. | ||
109 | */ | ||
110 | static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) | ||
111 | { | ||
112 | struct alarm_base *base = container_of(timer, struct alarm_base, timer); | ||
113 | struct timerqueue_node *next; | ||
114 | unsigned long flags; | ||
115 | ktime_t now; | ||
116 | int ret = HRTIMER_NORESTART; | ||
117 | |||
118 | spin_lock_irqsave(&base->lock, flags); | ||
119 | now = base->gettime(); | ||
120 | while ((next = timerqueue_getnext(&base->timerqueue))) { | ||
121 | struct alarm *alarm; | ||
122 | ktime_t expired = next->expires; | ||
123 | |||
124 | if (expired.tv64 >= now.tv64) | ||
125 | break; | ||
126 | |||
127 | alarm = container_of(next, struct alarm, node); | ||
128 | |||
129 | timerqueue_del(&base->timerqueue, &alarm->node); | ||
130 | alarm->enabled = 0; | ||
131 | /* Re-add periodic timers */ | ||
132 | if (alarm->period.tv64) { | ||
133 | alarm->node.expires = ktime_add(expired, alarm->period); | ||
134 | timerqueue_add(&base->timerqueue, &alarm->node); | ||
135 | alarm->enabled = 1; | ||
136 | } | ||
137 | spin_unlock_irqrestore(&base->lock, flags); | ||
138 | if (alarm->function) | ||
139 | alarm->function(alarm); | ||
140 | spin_lock_irqsave(&base->lock, flags); | ||
141 | } | ||
142 | |||
143 | if (next) { | ||
144 | hrtimer_set_expires(&base->timer, next->expires); | ||
145 | ret = HRTIMER_RESTART; | ||
146 | } | ||
147 | spin_unlock_irqrestore(&base->lock, flags); | ||
148 | |||
149 | return ret; | ||
150 | |||
151 | } | ||
152 | |||
153 | #ifdef CONFIG_RTC_CLASS | ||
154 | /** | ||
155 | * alarmtimer_suspend - Suspend time callback | ||
156 | * @dev: unused | ||
157 | * @state: unused | ||
158 | * | ||
159 | * When we are going into suspend, we look through the bases | ||
160 | * to see which is the soonest timer to expire. We then | ||
161 | * set an rtc timer to fire that far into the future, which | ||
162 | * will wake us from suspend. | ||
163 | */ | ||
164 | static int alarmtimer_suspend(struct device *dev) | ||
165 | { | ||
166 | struct rtc_time tm; | ||
167 | ktime_t min, now; | ||
168 | unsigned long flags; | ||
169 | int i; | ||
170 | |||
171 | spin_lock_irqsave(&freezer_delta_lock, flags); | ||
172 | min = freezer_delta; | ||
173 | freezer_delta = ktime_set(0, 0); | ||
174 | spin_unlock_irqrestore(&freezer_delta_lock, flags); | ||
175 | |||
176 | /* If we have no rtcdev, just return */ | ||
177 | if (!rtcdev) | ||
178 | return 0; | ||
179 | |||
180 | /* Find the soonest timer to expire*/ | ||
181 | for (i = 0; i < ALARM_NUMTYPE; i++) { | ||
182 | struct alarm_base *base = &alarm_bases[i]; | ||
183 | struct timerqueue_node *next; | ||
184 | ktime_t delta; | ||
185 | |||
186 | spin_lock_irqsave(&base->lock, flags); | ||
187 | next = timerqueue_getnext(&base->timerqueue); | ||
188 | spin_unlock_irqrestore(&base->lock, flags); | ||
189 | if (!next) | ||
190 | continue; | ||
191 | delta = ktime_sub(next->expires, base->gettime()); | ||
192 | if (!min.tv64 || (delta.tv64 < min.tv64)) | ||
193 | min = delta; | ||
194 | } | ||
195 | if (min.tv64 == 0) | ||
196 | return 0; | ||
197 | |||
198 | /* XXX - Should we enforce a minimum sleep time? */ | ||
199 | WARN_ON(min.tv64 < NSEC_PER_SEC); | ||
200 | |||
201 | /* Setup an rtc timer to fire that far in the future */ | ||
202 | rtc_timer_cancel(rtcdev, &rtctimer); | ||
203 | rtc_read_time(rtcdev, &tm); | ||
204 | now = rtc_tm_to_ktime(tm); | ||
205 | now = ktime_add(now, min); | ||
206 | |||
207 | rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0)); | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | #else | ||
212 | static int alarmtimer_suspend(struct device *dev) | ||
213 | { | ||
214 | return 0; | ||
215 | } | ||
216 | #endif | ||
217 | |||
218 | static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type) | ||
219 | { | ||
220 | ktime_t delta; | ||
221 | unsigned long flags; | ||
222 | struct alarm_base *base = &alarm_bases[type]; | ||
223 | |||
224 | delta = ktime_sub(absexp, base->gettime()); | ||
225 | |||
226 | spin_lock_irqsave(&freezer_delta_lock, flags); | ||
227 | if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) | ||
228 | freezer_delta = delta; | ||
229 | spin_unlock_irqrestore(&freezer_delta_lock, flags); | ||
230 | } | ||
231 | |||
232 | |||
233 | /** | ||
234 | * alarm_init - Initialize an alarm structure | ||
235 | * @alarm: ptr to alarm to be initialized | ||
236 | * @type: the type of the alarm | ||
237 | * @function: callback that is run when the alarm fires | ||
238 | */ | ||
239 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, | ||
240 | void (*function)(struct alarm *)) | ||
241 | { | ||
242 | timerqueue_init(&alarm->node); | ||
243 | alarm->period = ktime_set(0, 0); | ||
244 | alarm->function = function; | ||
245 | alarm->type = type; | ||
246 | alarm->enabled = 0; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * alarm_start - Sets an alarm to fire | ||
251 | * @alarm: ptr to alarm to set | ||
252 | * @start: time to run the alarm | ||
253 | * @period: period at which the alarm will recur | ||
254 | */ | ||
255 | void alarm_start(struct alarm *alarm, ktime_t start, ktime_t period) | ||
256 | { | ||
257 | struct alarm_base *base = &alarm_bases[alarm->type]; | ||
258 | unsigned long flags; | ||
259 | |||
260 | spin_lock_irqsave(&base->lock, flags); | ||
261 | if (alarm->enabled) | ||
262 | alarmtimer_remove(base, alarm); | ||
263 | alarm->node.expires = start; | ||
264 | alarm->period = period; | ||
265 | alarmtimer_enqueue(base, alarm); | ||
266 | alarm->enabled = 1; | ||
267 | spin_unlock_irqrestore(&base->lock, flags); | ||
268 | } | ||
269 | |||
270 | /** | ||
271 | * alarm_cancel - Tries to cancel an alarm timer | ||
272 | * @alarm: ptr to alarm to be canceled | ||
273 | */ | ||
274 | void alarm_cancel(struct alarm *alarm) | ||
275 | { | ||
276 | struct alarm_base *base = &alarm_bases[alarm->type]; | ||
277 | unsigned long flags; | ||
278 | |||
279 | spin_lock_irqsave(&base->lock, flags); | ||
280 | if (alarm->enabled) | ||
281 | alarmtimer_remove(base, alarm); | ||
282 | alarm->enabled = 0; | ||
283 | spin_unlock_irqrestore(&base->lock, flags); | ||
284 | } | ||
285 | |||
286 | |||
287 | /** | ||
288 | * clock2alarm - helper that converts from clockid to alarmtypes | ||
289 | * @clockid: clockid. | ||
290 | */ | ||
291 | static enum alarmtimer_type clock2alarm(clockid_t clockid) | ||
292 | { | ||
293 | if (clockid == CLOCK_REALTIME_ALARM) | ||
294 | return ALARM_REALTIME; | ||
295 | if (clockid == CLOCK_BOOTTIME_ALARM) | ||
296 | return ALARM_BOOTTIME; | ||
297 | return -1; | ||
298 | } | ||
299 | |||
300 | /** | ||
301 | * alarm_handle_timer - Callback for posix timers | ||
302 | * @alarm: alarm that fired | ||
303 | * | ||
304 | * Posix timer callback for expired alarm timers. | ||
305 | */ | ||
306 | static void alarm_handle_timer(struct alarm *alarm) | ||
307 | { | ||
308 | struct k_itimer *ptr = container_of(alarm, struct k_itimer, | ||
309 | it.alarmtimer); | ||
310 | if (posix_timer_event(ptr, 0) != 0) | ||
311 | ptr->it_overrun++; | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * alarm_clock_getres - posix getres interface | ||
316 | * @which_clock: clockid | ||
317 | * @tp: timespec to fill | ||
318 | * | ||
319 | * Returns the granularity of underlying alarm base clock | ||
320 | */ | ||
321 | static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp) | ||
322 | { | ||
323 | clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; | ||
324 | |||
325 | return hrtimer_get_res(baseid, tp); | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * alarm_clock_get - posix clock_get interface | ||
330 | * @which_clock: clockid | ||
331 | * @tp: timespec to fill. | ||
332 | * | ||
333 | * Provides the underlying alarm base time. | ||
334 | */ | ||
335 | static int alarm_clock_get(clockid_t which_clock, struct timespec *tp) | ||
336 | { | ||
337 | struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; | ||
338 | |||
339 | *tp = ktime_to_timespec(base->gettime()); | ||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * alarm_timer_create - posix timer_create interface | ||
345 | * @new_timer: k_itimer pointer to manage | ||
346 | * | ||
347 | * Initializes the k_itimer structure. | ||
348 | */ | ||
349 | static int alarm_timer_create(struct k_itimer *new_timer) | ||
350 | { | ||
351 | enum alarmtimer_type type; | ||
352 | struct alarm_base *base; | ||
353 | |||
354 | if (!capable(CAP_WAKE_ALARM)) | ||
355 | return -EPERM; | ||
356 | |||
357 | type = clock2alarm(new_timer->it_clock); | ||
358 | base = &alarm_bases[type]; | ||
359 | alarm_init(&new_timer->it.alarmtimer, type, alarm_handle_timer); | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * alarm_timer_get - posix timer_get interface | ||
365 | * @new_timer: k_itimer pointer | ||
366 | * @cur_setting: itimerspec data to fill | ||
367 | * | ||
368 | * Copies the itimerspec data out from the k_itimer | ||
369 | */ | ||
370 | static void alarm_timer_get(struct k_itimer *timr, | ||
371 | struct itimerspec *cur_setting) | ||
372 | { | ||
373 | cur_setting->it_interval = | ||
374 | ktime_to_timespec(timr->it.alarmtimer.period); | ||
375 | cur_setting->it_value = | ||
376 | ktime_to_timespec(timr->it.alarmtimer.node.expires); | ||
377 | return; | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * alarm_timer_del - posix timer_del interface | ||
382 | * @timr: k_itimer pointer to be deleted | ||
383 | * | ||
384 | * Cancels any programmed alarms for the given timer. | ||
385 | */ | ||
386 | static int alarm_timer_del(struct k_itimer *timr) | ||
387 | { | ||
388 | alarm_cancel(&timr->it.alarmtimer); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * alarm_timer_set - posix timer_set interface | ||
394 | * @timr: k_itimer pointer to be deleted | ||
395 | * @flags: timer flags | ||
396 | * @new_setting: itimerspec to be used | ||
397 | * @old_setting: itimerspec being replaced | ||
398 | * | ||
399 | * Sets the timer to new_setting, and starts the timer. | ||
400 | */ | ||
401 | static int alarm_timer_set(struct k_itimer *timr, int flags, | ||
402 | struct itimerspec *new_setting, | ||
403 | struct itimerspec *old_setting) | ||
404 | { | ||
405 | /* Save old values */ | ||
406 | old_setting->it_interval = | ||
407 | ktime_to_timespec(timr->it.alarmtimer.period); | ||
408 | old_setting->it_value = | ||
409 | ktime_to_timespec(timr->it.alarmtimer.node.expires); | ||
410 | |||
411 | /* If the timer was already set, cancel it */ | ||
412 | alarm_cancel(&timr->it.alarmtimer); | ||
413 | |||
414 | /* start the timer */ | ||
415 | alarm_start(&timr->it.alarmtimer, | ||
416 | timespec_to_ktime(new_setting->it_value), | ||
417 | timespec_to_ktime(new_setting->it_interval)); | ||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * alarmtimer_nsleep_wakeup - Wakeup function for alarm_timer_nsleep | ||
423 | * @alarm: ptr to alarm that fired | ||
424 | * | ||
425 | * Wakes up the task that set the alarmtimer | ||
426 | */ | ||
427 | static void alarmtimer_nsleep_wakeup(struct alarm *alarm) | ||
428 | { | ||
429 | struct task_struct *task = (struct task_struct *)alarm->data; | ||
430 | |||
431 | alarm->data = NULL; | ||
432 | if (task) | ||
433 | wake_up_process(task); | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * alarmtimer_do_nsleep - Internal alarmtimer nsleep implementation | ||
438 | * @alarm: ptr to alarmtimer | ||
439 | * @absexp: absolute expiration time | ||
440 | * | ||
441 | * Sets the alarm timer and sleeps until it is fired or interrupted. | ||
442 | */ | ||
443 | static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp) | ||
444 | { | ||
445 | alarm->data = (void *)current; | ||
446 | do { | ||
447 | set_current_state(TASK_INTERRUPTIBLE); | ||
448 | alarm_start(alarm, absexp, ktime_set(0, 0)); | ||
449 | if (likely(alarm->data)) | ||
450 | schedule(); | ||
451 | |||
452 | alarm_cancel(alarm); | ||
453 | } while (alarm->data && !signal_pending(current)); | ||
454 | |||
455 | __set_current_state(TASK_RUNNING); | ||
456 | |||
457 | return (alarm->data == NULL); | ||
458 | } | ||
459 | |||
460 | |||
461 | /** | ||
462 | * update_rmtp - Update remaining timespec value | ||
463 | * @exp: expiration time | ||
464 | * @type: timer type | ||
465 | * @rmtp: user pointer to remaining timepsec value | ||
466 | * | ||
467 | * Helper function that fills in rmtp value with time between | ||
468 | * now and the exp value | ||
469 | */ | ||
470 | static int update_rmtp(ktime_t exp, enum alarmtimer_type type, | ||
471 | struct timespec __user *rmtp) | ||
472 | { | ||
473 | struct timespec rmt; | ||
474 | ktime_t rem; | ||
475 | |||
476 | rem = ktime_sub(exp, alarm_bases[type].gettime()); | ||
477 | |||
478 | if (rem.tv64 <= 0) | ||
479 | return 0; | ||
480 | rmt = ktime_to_timespec(rem); | ||
481 | |||
482 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) | ||
483 | return -EFAULT; | ||
484 | |||
485 | return 1; | ||
486 | |||
487 | } | ||
488 | |||
489 | /** | ||
490 | * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep | ||
491 | * @restart: ptr to restart block | ||
492 | * | ||
493 | * Handles restarted clock_nanosleep calls | ||
494 | */ | ||
495 | static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) | ||
496 | { | ||
497 | enum alarmtimer_type type = restart->nanosleep.index; | ||
498 | ktime_t exp; | ||
499 | struct timespec __user *rmtp; | ||
500 | struct alarm alarm; | ||
501 | int ret = 0; | ||
502 | |||
503 | exp.tv64 = restart->nanosleep.expires; | ||
504 | alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); | ||
505 | |||
506 | if (alarmtimer_do_nsleep(&alarm, exp)) | ||
507 | goto out; | ||
508 | |||
509 | if (freezing(current)) | ||
510 | alarmtimer_freezerset(exp, type); | ||
511 | |||
512 | rmtp = restart->nanosleep.rmtp; | ||
513 | if (rmtp) { | ||
514 | ret = update_rmtp(exp, type, rmtp); | ||
515 | if (ret <= 0) | ||
516 | goto out; | ||
517 | } | ||
518 | |||
519 | |||
520 | /* The other values in restart are already filled in */ | ||
521 | ret = -ERESTART_RESTARTBLOCK; | ||
522 | out: | ||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * alarm_timer_nsleep - alarmtimer nanosleep | ||
528 | * @which_clock: clockid | ||
529 | * @flags: determins abstime or relative | ||
530 | * @tsreq: requested sleep time (abs or rel) | ||
531 | * @rmtp: remaining sleep time saved | ||
532 | * | ||
533 | * Handles clock_nanosleep calls against _ALARM clockids | ||
534 | */ | ||
535 | static int alarm_timer_nsleep(const clockid_t which_clock, int flags, | ||
536 | struct timespec *tsreq, struct timespec __user *rmtp) | ||
537 | { | ||
538 | enum alarmtimer_type type = clock2alarm(which_clock); | ||
539 | struct alarm alarm; | ||
540 | ktime_t exp; | ||
541 | int ret = 0; | ||
542 | struct restart_block *restart; | ||
543 | |||
544 | if (!capable(CAP_WAKE_ALARM)) | ||
545 | return -EPERM; | ||
546 | |||
547 | alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); | ||
548 | |||
549 | exp = timespec_to_ktime(*tsreq); | ||
550 | /* Convert (if necessary) to absolute time */ | ||
551 | if (flags != TIMER_ABSTIME) { | ||
552 | ktime_t now = alarm_bases[type].gettime(); | ||
553 | exp = ktime_add(now, exp); | ||
554 | } | ||
555 | |||
556 | if (alarmtimer_do_nsleep(&alarm, exp)) | ||
557 | goto out; | ||
558 | |||
559 | if (freezing(current)) | ||
560 | alarmtimer_freezerset(exp, type); | ||
561 | |||
562 | /* abs timers don't set remaining time or restart */ | ||
563 | if (flags == TIMER_ABSTIME) { | ||
564 | ret = -ERESTARTNOHAND; | ||
565 | goto out; | ||
566 | } | ||
567 | |||
568 | if (rmtp) { | ||
569 | ret = update_rmtp(exp, type, rmtp); | ||
570 | if (ret <= 0) | ||
571 | goto out; | ||
572 | } | ||
573 | |||
574 | restart = ¤t_thread_info()->restart_block; | ||
575 | restart->fn = alarm_timer_nsleep_restart; | ||
576 | restart->nanosleep.index = type; | ||
577 | restart->nanosleep.expires = exp.tv64; | ||
578 | restart->nanosleep.rmtp = rmtp; | ||
579 | ret = -ERESTART_RESTARTBLOCK; | ||
580 | |||
581 | out: | ||
582 | return ret; | ||
583 | } | ||
584 | |||
585 | |||
586 | /* Suspend hook structures */ | ||
587 | static const struct dev_pm_ops alarmtimer_pm_ops = { | ||
588 | .suspend = alarmtimer_suspend, | ||
589 | }; | ||
590 | |||
591 | static struct platform_driver alarmtimer_driver = { | ||
592 | .driver = { | ||
593 | .name = "alarmtimer", | ||
594 | .pm = &alarmtimer_pm_ops, | ||
595 | } | ||
596 | }; | ||
597 | |||
598 | /** | ||
599 | * alarmtimer_init - Initialize alarm timer code | ||
600 | * | ||
601 | * This function initializes the alarm bases and registers | ||
602 | * the posix clock ids. | ||
603 | */ | ||
604 | static int __init alarmtimer_init(void) | ||
605 | { | ||
606 | int error = 0; | ||
607 | int i; | ||
608 | struct k_clock alarm_clock = { | ||
609 | .clock_getres = alarm_clock_getres, | ||
610 | .clock_get = alarm_clock_get, | ||
611 | .timer_create = alarm_timer_create, | ||
612 | .timer_set = alarm_timer_set, | ||
613 | .timer_del = alarm_timer_del, | ||
614 | .timer_get = alarm_timer_get, | ||
615 | .nsleep = alarm_timer_nsleep, | ||
616 | }; | ||
617 | |||
618 | posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); | ||
619 | posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); | ||
620 | |||
621 | /* Initialize alarm bases */ | ||
622 | alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME; | ||
623 | alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real; | ||
624 | alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME; | ||
625 | alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime; | ||
626 | for (i = 0; i < ALARM_NUMTYPE; i++) { | ||
627 | timerqueue_init_head(&alarm_bases[i].timerqueue); | ||
628 | spin_lock_init(&alarm_bases[i].lock); | ||
629 | hrtimer_init(&alarm_bases[i].timer, | ||
630 | alarm_bases[i].base_clockid, | ||
631 | HRTIMER_MODE_ABS); | ||
632 | alarm_bases[i].timer.function = alarmtimer_fired; | ||
633 | } | ||
634 | error = platform_driver_register(&alarmtimer_driver); | ||
635 | platform_device_register_simple("alarmtimer", -1, NULL, 0); | ||
636 | |||
637 | return error; | ||
638 | } | ||
639 | device_initcall(alarmtimer_init); | ||
640 | |||
641 | #ifdef CONFIG_RTC_CLASS | ||
642 | /** | ||
643 | * has_wakealarm - check rtc device has wakealarm ability | ||
644 | * @dev: current device | ||
645 | * @name_ptr: name to be returned | ||
646 | * | ||
647 | * This helper function checks to see if the rtc device can wake | ||
648 | * from suspend. | ||
649 | */ | ||
650 | static int __init has_wakealarm(struct device *dev, void *name_ptr) | ||
651 | { | ||
652 | struct rtc_device *candidate = to_rtc_device(dev); | ||
653 | |||
654 | if (!candidate->ops->set_alarm) | ||
655 | return 0; | ||
656 | if (!device_may_wakeup(candidate->dev.parent)) | ||
657 | return 0; | ||
658 | |||
659 | *(const char **)name_ptr = dev_name(dev); | ||
660 | return 1; | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * alarmtimer_init_late - Late initializing of alarmtimer code | ||
665 | * | ||
666 | * This function locates a rtc device to use for wakealarms. | ||
667 | * Run as late_initcall to make sure rtc devices have been | ||
668 | * registered. | ||
669 | */ | ||
670 | static int __init alarmtimer_init_late(void) | ||
671 | { | ||
672 | char *str; | ||
673 | |||
674 | /* Find an rtc device and init the rtc_timer */ | ||
675 | class_find_device(rtc_class, NULL, &str, has_wakealarm); | ||
676 | if (str) | ||
677 | rtcdev = rtc_class_open(str); | ||
678 | if (!rtcdev) { | ||
679 | printk(KERN_WARNING "No RTC device found, ALARM timers will" | ||
680 | " not wake from suspend"); | ||
681 | } | ||
682 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
683 | |||
684 | return 0; | ||
685 | } | ||
686 | #else | ||
687 | static int __init alarmtimer_init_late(void) | ||
688 | { | ||
689 | printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers" | ||
690 | " will not wake from suspend"); | ||
691 | return 0; | ||
692 | } | ||
693 | #endif | ||
694 | late_initcall(alarmtimer_init_late); | ||
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 0d74b9ba90c8..22a9da9a9c96 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -194,6 +194,70 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
194 | } | 194 | } |
195 | EXPORT_SYMBOL_GPL(clockevents_register_device); | 195 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
196 | 196 | ||
197 | static void clockevents_config(struct clock_event_device *dev, | ||
198 | u32 freq) | ||
199 | { | ||
200 | unsigned long sec; | ||
201 | |||
202 | if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) | ||
203 | return; | ||
204 | |||
205 | /* | ||
206 | * Calculate the maximum number of seconds we can sleep. Limit | ||
207 | * to 10 minutes for hardware which can program more than | ||
208 | * 32bit ticks so we still get reasonable conversion values. | ||
209 | */ | ||
210 | sec = dev->max_delta_ticks; | ||
211 | do_div(sec, freq); | ||
212 | if (!sec) | ||
213 | sec = 1; | ||
214 | else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) | ||
215 | sec = 600; | ||
216 | |||
217 | clockevents_calc_mult_shift(dev, freq, sec); | ||
218 | dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); | ||
219 | dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * clockevents_config_and_register - Configure and register a clock event device | ||
224 | * @dev: device to register | ||
225 | * @freq: The clock frequency | ||
226 | * @min_delta: The minimum clock ticks to program in oneshot mode | ||
227 | * @max_delta: The maximum clock ticks to program in oneshot mode | ||
228 | * | ||
229 | * min/max_delta can be 0 for devices which do not support oneshot mode. | ||
230 | */ | ||
231 | void clockevents_config_and_register(struct clock_event_device *dev, | ||
232 | u32 freq, unsigned long min_delta, | ||
233 | unsigned long max_delta) | ||
234 | { | ||
235 | dev->min_delta_ticks = min_delta; | ||
236 | dev->max_delta_ticks = max_delta; | ||
237 | clockevents_config(dev, freq); | ||
238 | clockevents_register_device(dev); | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * clockevents_update_freq - Update frequency and reprogram a clock event device. | ||
243 | * @dev: device to modify | ||
244 | * @freq: new device frequency | ||
245 | * | ||
246 | * Reconfigure and reprogram a clock event device in oneshot | ||
247 | * mode. Must be called on the cpu for which the device delivers per | ||
248 | * cpu timer events with interrupts disabled! Returns 0 on success, | ||
249 | * -ETIME when the event is in the past. | ||
250 | */ | ||
251 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) | ||
252 | { | ||
253 | clockevents_config(dev, freq); | ||
254 | |||
255 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | ||
256 | return 0; | ||
257 | |||
258 | return clockevents_program_event(dev, dev->next_event, ktime_get()); | ||
259 | } | ||
260 | |||
197 | /* | 261 | /* |
198 | * Noop handler when we shut down an event device | 262 | * Noop handler when we shut down an event device |
199 | */ | 263 | */ |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 6519cf62d9cd..d9d5f8c885f6 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -626,19 +626,6 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
626 | list_add(&cs->list, entry); | 626 | list_add(&cs->list, entry); |
627 | } | 627 | } |
628 | 628 | ||
629 | |||
630 | /* | ||
631 | * Maximum time we expect to go between ticks. This includes idle | ||
632 | * tickless time. It provides the trade off between selecting a | ||
633 | * mult/shift pair that is very precise but can only handle a short | ||
634 | * period of time, vs. a mult/shift pair that can handle long periods | ||
635 | * of time but isn't as precise. | ||
636 | * | ||
637 | * This is a subsystem constant, and actual hardware limitations | ||
638 | * may override it (ie: clocksources that wrap every 3 seconds). | ||
639 | */ | ||
640 | #define MAX_UPDATE_LENGTH 5 /* Seconds */ | ||
641 | |||
642 | /** | 629 | /** |
643 | * __clocksource_updatefreq_scale - Used update clocksource with new freq | 630 | * __clocksource_updatefreq_scale - Used update clocksource with new freq |
644 | * @t: clocksource to be registered | 631 | * @t: clocksource to be registered |
@@ -652,15 +639,28 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
652 | */ | 639 | */ |
653 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | 640 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) |
654 | { | 641 | { |
642 | unsigned long sec; | ||
643 | |||
655 | /* | 644 | /* |
656 | * Ideally we want to use some of the limits used in | 645 | * Calc the maximum number of seconds which we can run before |
657 | * clocksource_max_deferment, to provide a more informed | 646 | * wrapping around. For clocksources which have a mask > 32bit |
658 | * MAX_UPDATE_LENGTH. But for now this just gets the | 647 | * we need to limit the max sleep time to have a good |
659 | * register interface working properly. | 648 | * conversion precision. 10 minutes is still a reasonable |
649 | * amount. That results in a shift value of 24 for a | ||
650 | * clocksource with mask >= 40bit and f >= 4GHz. That maps to | ||
651 | * ~ 0.06ppm granularity for NTP. We apply the same 12.5% | ||
652 | * margin as we do in clocksource_max_deferment() | ||
660 | */ | 653 | */ |
654 | sec = (cs->mask - (cs->mask >> 5)); | ||
655 | do_div(sec, freq); | ||
656 | do_div(sec, scale); | ||
657 | if (!sec) | ||
658 | sec = 1; | ||
659 | else if (sec > 600 && cs->mask > UINT_MAX) | ||
660 | sec = 600; | ||
661 | |||
661 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, | 662 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, |
662 | NSEC_PER_SEC/scale, | 663 | NSEC_PER_SEC / scale, sec * scale); |
663 | MAX_UPDATE_LENGTH*scale); | ||
664 | cs->max_idle_ns = clocksource_max_deferment(cs); | 664 | cs->max_idle_ns = clocksource_max_deferment(cs); |
665 | } | 665 | } |
666 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); | 666 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); |
@@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
685 | /* Add clocksource to the clcoksource list */ | 685 | /* Add clocksource to the clcoksource list */ |
686 | mutex_lock(&clocksource_mutex); | 686 | mutex_lock(&clocksource_mutex); |
687 | clocksource_enqueue(cs); | 687 | clocksource_enqueue(cs); |
688 | clocksource_select(); | ||
689 | clocksource_enqueue_watchdog(cs); | 688 | clocksource_enqueue_watchdog(cs); |
689 | clocksource_select(); | ||
690 | mutex_unlock(&clocksource_mutex); | 690 | mutex_unlock(&clocksource_mutex); |
691 | return 0; | 691 | return 0; |
692 | } | 692 | } |
@@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs) | |||
706 | 706 | ||
707 | mutex_lock(&clocksource_mutex); | 707 | mutex_lock(&clocksource_mutex); |
708 | clocksource_enqueue(cs); | 708 | clocksource_enqueue(cs); |
709 | clocksource_select(); | ||
710 | clocksource_enqueue_watchdog(cs); | 709 | clocksource_enqueue_watchdog(cs); |
710 | clocksource_select(); | ||
711 | mutex_unlock(&clocksource_mutex); | 711 | mutex_unlock(&clocksource_mutex); |
712 | return 0; | 712 | return 0; |
713 | } | 713 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index da800ffa810c..723c7637e55a 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -522,10 +522,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, | |||
522 | */ | 522 | */ |
523 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 523 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
524 | { | 524 | { |
525 | int cpu = smp_processor_id(); | ||
526 | |||
525 | /* Set it up only once ! */ | 527 | /* Set it up only once ! */ |
526 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | 528 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
527 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; | 529 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
528 | int cpu = smp_processor_id(); | ||
529 | 530 | ||
530 | bc->event_handler = tick_handle_oneshot_broadcast; | 531 | bc->event_handler = tick_handle_oneshot_broadcast; |
531 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 532 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); |
@@ -551,6 +552,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
551 | tick_broadcast_set_event(tick_next_period, 1); | 552 | tick_broadcast_set_event(tick_next_period, 1); |
552 | } else | 553 | } else |
553 | bc->next_event.tv64 = KTIME_MAX; | 554 | bc->next_event.tv64 = KTIME_MAX; |
555 | } else { | ||
556 | /* | ||
557 | * The first cpu which switches to oneshot mode sets | ||
558 | * the bit for all other cpus which are in the general | ||
559 | * (periodic) broadcast mask. So the bit is set and | ||
560 | * would prevent the first broadcast enter after this | ||
561 | * to program the bc device. | ||
562 | */ | ||
563 | tick_broadcast_clear_oneshot(cpu); | ||
554 | } | 564 | } |
555 | } | 565 | } |
556 | 566 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 8ad5d576755e..8e6a05a5915a 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -596,6 +596,58 @@ void __init timekeeping_init(void) | |||
596 | static struct timespec timekeeping_suspend_time; | 596 | static struct timespec timekeeping_suspend_time; |
597 | 597 | ||
598 | /** | 598 | /** |
599 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval | ||
600 | * @delta: pointer to a timespec delta value | ||
601 | * | ||
602 | * Takes a timespec offset measuring a suspend interval and properly | ||
603 | * adds the sleep offset to the timekeeping variables. | ||
604 | */ | ||
605 | static void __timekeeping_inject_sleeptime(struct timespec *delta) | ||
606 | { | ||
607 | xtime = timespec_add(xtime, *delta); | ||
608 | wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); | ||
609 | total_sleep_time = timespec_add(total_sleep_time, *delta); | ||
610 | } | ||
611 | |||
612 | |||
613 | /** | ||
614 | * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values | ||
615 | * @delta: pointer to a timespec delta value | ||
616 | * | ||
617 | * This hook is for architectures that cannot support read_persistent_clock | ||
618 | * because their RTC/persistent clock is only accessible when irqs are enabled. | ||
619 | * | ||
620 | * This function should only be called by rtc_resume(), and allows | ||
621 | * a suspend offset to be injected into the timekeeping values. | ||
622 | */ | ||
623 | void timekeeping_inject_sleeptime(struct timespec *delta) | ||
624 | { | ||
625 | unsigned long flags; | ||
626 | struct timespec ts; | ||
627 | |||
628 | /* Make sure we don't set the clock twice */ | ||
629 | read_persistent_clock(&ts); | ||
630 | if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) | ||
631 | return; | ||
632 | |||
633 | write_seqlock_irqsave(&xtime_lock, flags); | ||
634 | timekeeping_forward_now(); | ||
635 | |||
636 | __timekeeping_inject_sleeptime(delta); | ||
637 | |||
638 | timekeeper.ntp_error = 0; | ||
639 | ntp_clear(); | ||
640 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, | ||
641 | timekeeper.mult); | ||
642 | |||
643 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
644 | |||
645 | /* signal hrtimers about time change */ | ||
646 | clock_was_set(); | ||
647 | } | ||
648 | |||
649 | |||
650 | /** | ||
599 | * timekeeping_resume - Resumes the generic timekeeping subsystem. | 651 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
600 | * | 652 | * |
601 | * This is for the generic clocksource timekeeping. | 653 | * This is for the generic clocksource timekeeping. |
@@ -615,9 +667,7 @@ static void timekeeping_resume(void) | |||
615 | 667 | ||
616 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { | 668 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
617 | ts = timespec_sub(ts, timekeeping_suspend_time); | 669 | ts = timespec_sub(ts, timekeeping_suspend_time); |
618 | xtime = timespec_add(xtime, ts); | 670 | __timekeeping_inject_sleeptime(&ts); |
619 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); | ||
620 | total_sleep_time = timespec_add(total_sleep_time, ts); | ||
621 | } | 671 | } |
622 | /* re-base the last cycle value */ | 672 | /* re-base the last cycle value */ |
623 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 673 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 35d55a386145..f925c45f0afa 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -53,7 +53,6 @@ const char *reserved_field_names[] = { | |||
53 | "common_preempt_count", | 53 | "common_preempt_count", |
54 | "common_pid", | 54 | "common_pid", |
55 | "common_tgid", | 55 | "common_tgid", |
56 | "common_lock_depth", | ||
57 | FIELD_STRING_IP, | 56 | FIELD_STRING_IP, |
58 | FIELD_STRING_RETIP, | 57 | FIELD_STRING_RETIP, |
59 | FIELD_STRING_FUNC, | 58 | FIELD_STRING_FUNC, |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c768bcdda1b7..10ef61981149 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -238,6 +238,21 @@ config DETECT_HUNG_TASK | |||
238 | enabled then all held locks will also be reported. This | 238 | enabled then all held locks will also be reported. This |
239 | feature has negligible overhead. | 239 | feature has negligible overhead. |
240 | 240 | ||
241 | config DEFAULT_HUNG_TASK_TIMEOUT | ||
242 | int "Default timeout for hung task detection (in seconds)" | ||
243 | depends on DETECT_HUNG_TASK | ||
244 | default 120 | ||
245 | help | ||
246 | This option controls the default timeout (in seconds) used | ||
247 | to determine when a task has become non-responsive and should | ||
248 | be considered hung. | ||
249 | |||
250 | It can be adjusted at runtime via the kernel.hung_task_timeout | ||
251 | sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout. | ||
252 | |||
253 | A timeout of 0 disables the check. The default is two minutes. | ||
254 | Keeping the default should be fine in most cases. | ||
255 | |||
241 | config BOOTPARAM_HUNG_TASK_PANIC | 256 | config BOOTPARAM_HUNG_TASK_PANIC |
242 | bool "Panic (Reboot) On Hung Tasks" | 257 | bool "Panic (Reboot) On Hung Tasks" |
243 | depends on DETECT_HUNG_TASK | 258 | depends on DETECT_HUNG_TASK |
@@ -337,7 +352,7 @@ config DEBUG_OBJECTS_WORK | |||
337 | 352 | ||
338 | config DEBUG_OBJECTS_RCU_HEAD | 353 | config DEBUG_OBJECTS_RCU_HEAD |
339 | bool "Debug RCU callbacks objects" | 354 | bool "Debug RCU callbacks objects" |
340 | depends on DEBUG_OBJECTS && PREEMPT | 355 | depends on DEBUG_OBJECTS |
341 | help | 356 | help |
342 | Enable this to turn on debugging of RCU list heads (call_rcu() usage). | 357 | Enable this to turn on debugging of RCU list heads (call_rcu() usage). |
343 | 358 | ||
@@ -398,9 +413,9 @@ config SLUB_STATS | |||
398 | config DEBUG_KMEMLEAK | 413 | config DEBUG_KMEMLEAK |
399 | bool "Kernel memory leak detector" | 414 | bool "Kernel memory leak detector" |
400 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 415 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
401 | (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) | 416 | (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) |
402 | 417 | ||
403 | select DEBUG_FS if SYSFS | 418 | select DEBUG_FS |
404 | select STACKTRACE if STACKTRACE_SUPPORT | 419 | select STACKTRACE if STACKTRACE_SUPPORT |
405 | select KALLSYMS | 420 | select KALLSYMS |
406 | select CRC32 | 421 | select CRC32 |
@@ -875,22 +890,9 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
875 | Say N here if you want the RCU torture tests to start only | 890 | Say N here if you want the RCU torture tests to start only |
876 | after being manually enabled via /proc. | 891 | after being manually enabled via /proc. |
877 | 892 | ||
878 | config RCU_CPU_STALL_DETECTOR | ||
879 | bool "Check for stalled CPUs delaying RCU grace periods" | ||
880 | depends on TREE_RCU || TREE_PREEMPT_RCU | ||
881 | default y | ||
882 | help | ||
883 | This option causes RCU to printk information on which | ||
884 | CPUs are delaying the current grace period, but only when | ||
885 | the grace period extends for excessive time periods. | ||
886 | |||
887 | Say N if you want to disable such checks. | ||
888 | |||
889 | Say Y if you are unsure. | ||
890 | |||
891 | config RCU_CPU_STALL_TIMEOUT | 893 | config RCU_CPU_STALL_TIMEOUT |
892 | int "RCU CPU stall timeout in seconds" | 894 | int "RCU CPU stall timeout in seconds" |
893 | depends on RCU_CPU_STALL_DETECTOR | 895 | depends on TREE_RCU || TREE_PREEMPT_RCU |
894 | range 3 300 | 896 | range 3 300 |
895 | default 60 | 897 | default 60 |
896 | help | 898 | help |
@@ -899,22 +901,9 @@ config RCU_CPU_STALL_TIMEOUT | |||
899 | RCU grace period persists, additional CPU stall warnings are | 901 | RCU grace period persists, additional CPU stall warnings are |
900 | printed at more widely spaced intervals. | 902 | printed at more widely spaced intervals. |
901 | 903 | ||
902 | config RCU_CPU_STALL_DETECTOR_RUNNABLE | ||
903 | bool "RCU CPU stall checking starts automatically at boot" | ||
904 | depends on RCU_CPU_STALL_DETECTOR | ||
905 | default y | ||
906 | help | ||
907 | If set, start checking for RCU CPU stalls immediately on | ||
908 | boot. Otherwise, RCU CPU stall checking must be manually | ||
909 | enabled. | ||
910 | |||
911 | Say Y if you are unsure. | ||
912 | |||
913 | Say N if you wish to suppress RCU CPU stall checking during boot. | ||
914 | |||
915 | config RCU_CPU_STALL_VERBOSE | 904 | config RCU_CPU_STALL_VERBOSE |
916 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | 905 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" |
917 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | 906 | depends on TREE_PREEMPT_RCU |
918 | default y | 907 | default y |
919 | help | 908 | help |
920 | This option causes RCU to printk detailed per-task information | 909 | This option causes RCU to printk detailed per-task information |
diff --git a/lib/Makefile b/lib/Makefile index ef0f28571156..4b49a249064b 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -21,7 +21,8 @@ lib-y += kobject.o kref.o klist.o | |||
21 | 21 | ||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o | 24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
25 | bsearch.o | ||
25 | obj-y += kstrtox.o | 26 | obj-y += kstrtox.o |
26 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
27 | 28 | ||
diff --git a/lib/bsearch.c b/lib/bsearch.c new file mode 100644 index 000000000000..5b54758e2afb --- /dev/null +++ b/lib/bsearch.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * A generic implementation of binary search for the Linux kernel | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Ksplice, Inc. | ||
5 | * Author: Tim Abbott <tabbott@ksplice.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation; version 2. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/bsearch.h> | ||
14 | |||
15 | /* | ||
16 | * bsearch - binary search an array of elements | ||
17 | * @key: pointer to item being searched for | ||
18 | * @base: pointer to first element to search | ||
19 | * @num: number of elements | ||
20 | * @size: size of each element | ||
21 | * @cmp: pointer to comparison function | ||
22 | * | ||
23 | * This function does a binary search on the given array. The | ||
24 | * contents of the array should already be in ascending sorted order | ||
25 | * under the provided comparison function. | ||
26 | * | ||
27 | * Note that the key need not have the same type as the elements in | ||
28 | * the array, e.g. key could be a string and the comparison function | ||
29 | * could compare the string with the struct's name field. However, if | ||
30 | * the key and elements in the array are of the same type, you can use | ||
31 | * the same comparison function for both sort() and bsearch(). | ||
32 | */ | ||
33 | void *bsearch(const void *key, const void *base, size_t num, size_t size, | ||
34 | int (*cmp)(const void *key, const void *elt)) | ||
35 | { | ||
36 | size_t start = 0, end = num; | ||
37 | int result; | ||
38 | |||
39 | while (start < end) { | ||
40 | size_t mid = start + (end - start) / 2; | ||
41 | |||
42 | result = cmp(key, base + mid * size); | ||
43 | if (result < 0) | ||
44 | end = mid; | ||
45 | else if (result > 0) | ||
46 | start = mid + 1; | ||
47 | else | ||
48 | return (void *)base + mid * size; | ||
49 | } | ||
50 | |||
51 | return NULL; | ||
52 | } | ||
53 | EXPORT_SYMBOL(bsearch); | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 4bfb0471f106..db07bfd9298e 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -649,7 +649,7 @@ out_err: | |||
649 | return -ENOMEM; | 649 | return -ENOMEM; |
650 | } | 650 | } |
651 | 651 | ||
652 | static int device_dma_allocations(struct device *dev) | 652 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
653 | { | 653 | { |
654 | struct dma_debug_entry *entry; | 654 | struct dma_debug_entry *entry; |
655 | unsigned long flags; | 655 | unsigned long flags; |
@@ -660,8 +660,10 @@ static int device_dma_allocations(struct device *dev) | |||
660 | for (i = 0; i < HASH_SIZE; ++i) { | 660 | for (i = 0; i < HASH_SIZE; ++i) { |
661 | spin_lock(&dma_entry_hash[i].lock); | 661 | spin_lock(&dma_entry_hash[i].lock); |
662 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | 662 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
663 | if (entry->dev == dev) | 663 | if (entry->dev == dev) { |
664 | count += 1; | 664 | count += 1; |
665 | *out_entry = entry; | ||
666 | } | ||
665 | } | 667 | } |
666 | spin_unlock(&dma_entry_hash[i].lock); | 668 | spin_unlock(&dma_entry_hash[i].lock); |
667 | } | 669 | } |
@@ -674,6 +676,7 @@ static int device_dma_allocations(struct device *dev) | |||
674 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) | 676 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
675 | { | 677 | { |
676 | struct device *dev = data; | 678 | struct device *dev = data; |
679 | struct dma_debug_entry *uninitialized_var(entry); | ||
677 | int count; | 680 | int count; |
678 | 681 | ||
679 | if (global_disable) | 682 | if (global_disable) |
@@ -681,12 +684,17 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti | |||
681 | 684 | ||
682 | switch (action) { | 685 | switch (action) { |
683 | case BUS_NOTIFY_UNBOUND_DRIVER: | 686 | case BUS_NOTIFY_UNBOUND_DRIVER: |
684 | count = device_dma_allocations(dev); | 687 | count = device_dma_allocations(dev, &entry); |
685 | if (count == 0) | 688 | if (count == 0) |
686 | break; | 689 | break; |
687 | err_printk(dev, NULL, "DMA-API: device driver has pending " | 690 | err_printk(dev, entry, "DMA-API: device driver has pending " |
688 | "DMA allocations while released from device " | 691 | "DMA allocations while released from device " |
689 | "[count=%d]\n", count); | 692 | "[count=%d]\n" |
693 | "One of leaked entries details: " | ||
694 | "[device address=0x%016llx] [size=%llu bytes] " | ||
695 | "[mapped with %s] [mapped as %s]\n", | ||
696 | count, entry->dev_addr, entry->size, | ||
697 | dir2name[entry->direction], type2name[entry->type]); | ||
690 | break; | 698 | break; |
691 | default: | 699 | default: |
692 | break; | 700 | break; |
diff --git a/lib/string.c b/lib/string.c index f71bead1be3e..01fad9b203e1 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -535,6 +535,35 @@ bool sysfs_streq(const char *s1, const char *s2) | |||
535 | } | 535 | } |
536 | EXPORT_SYMBOL(sysfs_streq); | 536 | EXPORT_SYMBOL(sysfs_streq); |
537 | 537 | ||
538 | /** | ||
539 | * strtobool - convert common user inputs into boolean values | ||
540 | * @s: input string | ||
541 | * @res: result | ||
542 | * | ||
543 | * This routine returns 0 iff the first character is one of 'Yy1Nn0'. | ||
544 | * Otherwise it will return -EINVAL. Value pointed to by res is | ||
545 | * updated upon finding a match. | ||
546 | */ | ||
547 | int strtobool(const char *s, bool *res) | ||
548 | { | ||
549 | switch (s[0]) { | ||
550 | case 'y': | ||
551 | case 'Y': | ||
552 | case '1': | ||
553 | *res = true; | ||
554 | break; | ||
555 | case 'n': | ||
556 | case 'N': | ||
557 | case '0': | ||
558 | *res = false; | ||
559 | break; | ||
560 | default: | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | return 0; | ||
564 | } | ||
565 | EXPORT_SYMBOL(strtobool); | ||
566 | |||
538 | #ifndef __HAVE_ARCH_MEMSET | 567 | #ifndef __HAVE_ARCH_MEMSET |
539 | /** | 568 | /** |
540 | * memset - Fill a region of memory with the given value | 569 | * memset - Fill a region of memory with the given value |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index bc0ac6b333dc..dfd60192bc2e 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
797 | return string(buf, end, uuid, spec); | 797 | return string(buf, end, uuid, spec); |
798 | } | 798 | } |
799 | 799 | ||
800 | int kptr_restrict = 1; | 800 | int kptr_restrict __read_mostly; |
801 | 801 | ||
802 | /* | 802 | /* |
803 | * Show a '%p' thing. A kernel extension is that the '%p' is followed | 803 | * Show a '%p' thing. A kernel extension is that the '%p' is followed |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c1d5867543e4..aacee45616fc 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1414,9 +1414,12 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1414 | ++(*pos); | 1414 | ++(*pos); |
1415 | 1415 | ||
1416 | list_for_each_continue_rcu(n, &object_list) { | 1416 | list_for_each_continue_rcu(n, &object_list) { |
1417 | next_obj = list_entry(n, struct kmemleak_object, object_list); | 1417 | struct kmemleak_object *obj = |
1418 | if (get_object(next_obj)) | 1418 | list_entry(n, struct kmemleak_object, object_list); |
1419 | if (get_object(obj)) { | ||
1420 | next_obj = obj; | ||
1419 | break; | 1421 | break; |
1422 | } | ||
1420 | } | 1423 | } |
1421 | 1424 | ||
1422 | put_object(prev_obj); | 1425 | put_object(prev_obj); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f8a97b9a350..3f8bce264df6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2317,6 +2317,21 @@ void free_pages(unsigned long addr, unsigned int order) | |||
2317 | 2317 | ||
2318 | EXPORT_SYMBOL(free_pages); | 2318 | EXPORT_SYMBOL(free_pages); |
2319 | 2319 | ||
2320 | static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) | ||
2321 | { | ||
2322 | if (addr) { | ||
2323 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | ||
2324 | unsigned long used = addr + PAGE_ALIGN(size); | ||
2325 | |||
2326 | split_page(virt_to_page((void *)addr), order); | ||
2327 | while (used < alloc_end) { | ||
2328 | free_page(used); | ||
2329 | used += PAGE_SIZE; | ||
2330 | } | ||
2331 | } | ||
2332 | return (void *)addr; | ||
2333 | } | ||
2334 | |||
2320 | /** | 2335 | /** |
2321 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. | 2336 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. |
2322 | * @size: the number of bytes to allocate | 2337 | * @size: the number of bytes to allocate |
@@ -2336,22 +2351,33 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | |||
2336 | unsigned long addr; | 2351 | unsigned long addr; |
2337 | 2352 | ||
2338 | addr = __get_free_pages(gfp_mask, order); | 2353 | addr = __get_free_pages(gfp_mask, order); |
2339 | if (addr) { | 2354 | return make_alloc_exact(addr, order, size); |
2340 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | ||
2341 | unsigned long used = addr + PAGE_ALIGN(size); | ||
2342 | |||
2343 | split_page(virt_to_page((void *)addr), order); | ||
2344 | while (used < alloc_end) { | ||
2345 | free_page(used); | ||
2346 | used += PAGE_SIZE; | ||
2347 | } | ||
2348 | } | ||
2349 | |||
2350 | return (void *)addr; | ||
2351 | } | 2355 | } |
2352 | EXPORT_SYMBOL(alloc_pages_exact); | 2356 | EXPORT_SYMBOL(alloc_pages_exact); |
2353 | 2357 | ||
2354 | /** | 2358 | /** |
2359 | * alloc_pages_exact_nid - allocate an exact number of physically-contiguous | ||
2360 | * pages on a node. | ||
2361 | * @nid: the preferred node ID where memory should be allocated | ||
2362 | * @size: the number of bytes to allocate | ||
2363 | * @gfp_mask: GFP flags for the allocation | ||
2364 | * | ||
2365 | * Like alloc_pages_exact(), but try to allocate on node nid first before falling | ||
2366 | * back. | ||
2367 | * Note this is not alloc_pages_exact_node() which allocates on a specific node, | ||
2368 | * but is not exact. | ||
2369 | */ | ||
2370 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) | ||
2371 | { | ||
2372 | unsigned order = get_order(size); | ||
2373 | struct page *p = alloc_pages_node(nid, gfp_mask, order); | ||
2374 | if (!p) | ||
2375 | return NULL; | ||
2376 | return make_alloc_exact((unsigned long)page_address(p), order, size); | ||
2377 | } | ||
2378 | EXPORT_SYMBOL(alloc_pages_exact_nid); | ||
2379 | |||
2380 | /** | ||
2355 | * free_pages_exact - release memory allocated via alloc_pages_exact() | 2381 | * free_pages_exact - release memory allocated via alloc_pages_exact() |
2356 | * @virt: the value returned by alloc_pages_exact. | 2382 | * @virt: the value returned by alloc_pages_exact. |
2357 | * @size: size of allocation, same value as passed to alloc_pages_exact(). | 2383 | * @size: size of allocation, same value as passed to alloc_pages_exact(). |
@@ -3564,7 +3590,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) | |||
3564 | 3590 | ||
3565 | if (!slab_is_available()) { | 3591 | if (!slab_is_available()) { |
3566 | zone->wait_table = (wait_queue_head_t *) | 3592 | zone->wait_table = (wait_queue_head_t *) |
3567 | alloc_bootmem_node(pgdat, alloc_size); | 3593 | alloc_bootmem_node_nopanic(pgdat, alloc_size); |
3568 | } else { | 3594 | } else { |
3569 | /* | 3595 | /* |
3570 | * This case means that a zone whose size was 0 gets new memory | 3596 | * This case means that a zone whose size was 0 gets new memory |
@@ -4141,7 +4167,8 @@ static void __init setup_usemap(struct pglist_data *pgdat, | |||
4141 | unsigned long usemapsize = usemap_size(zonesize); | 4167 | unsigned long usemapsize = usemap_size(zonesize); |
4142 | zone->pageblock_flags = NULL; | 4168 | zone->pageblock_flags = NULL; |
4143 | if (usemapsize) | 4169 | if (usemapsize) |
4144 | zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); | 4170 | zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, |
4171 | usemapsize); | ||
4145 | } | 4172 | } |
4146 | #else | 4173 | #else |
4147 | static inline void setup_usemap(struct pglist_data *pgdat, | 4174 | static inline void setup_usemap(struct pglist_data *pgdat, |
@@ -4307,7 +4334,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
4307 | size = (end - start) * sizeof(struct page); | 4334 | size = (end - start) * sizeof(struct page); |
4308 | map = alloc_remap(pgdat->node_id, size); | 4335 | map = alloc_remap(pgdat->node_id, size); |
4309 | if (!map) | 4336 | if (!map) |
4310 | map = alloc_bootmem_node(pgdat, size); | 4337 | map = alloc_bootmem_node_nopanic(pgdat, size); |
4311 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); | 4338 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
4312 | } | 4339 | } |
4313 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 4340 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 99055010cece..2daadc322ba6 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -134,7 +134,7 @@ static void *__init_refok alloc_page_cgroup(size_t size, int nid) | |||
134 | { | 134 | { |
135 | void *addr = NULL; | 135 | void *addr = NULL; |
136 | 136 | ||
137 | addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN); | 137 | addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); |
138 | if (addr) | 138 | if (addr) |
139 | return addr; | 139 | return addr; |
140 | 140 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 8fa27e4e582a..dfc7069102ee 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -852,7 +852,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_ | |||
852 | 852 | ||
853 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) | 853 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) |
854 | { | 854 | { |
855 | struct inode *inode; | 855 | struct address_space *mapping; |
856 | unsigned long idx; | 856 | unsigned long idx; |
857 | unsigned long size; | 857 | unsigned long size; |
858 | unsigned long limit; | 858 | unsigned long limit; |
@@ -875,8 +875,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s | |||
875 | if (size > SHMEM_NR_DIRECT) | 875 | if (size > SHMEM_NR_DIRECT) |
876 | size = SHMEM_NR_DIRECT; | 876 | size = SHMEM_NR_DIRECT; |
877 | offset = shmem_find_swp(entry, ptr, ptr+size); | 877 | offset = shmem_find_swp(entry, ptr, ptr+size); |
878 | if (offset >= 0) | 878 | if (offset >= 0) { |
879 | shmem_swp_balance_unmap(); | ||
879 | goto found; | 880 | goto found; |
881 | } | ||
880 | if (!info->i_indirect) | 882 | if (!info->i_indirect) |
881 | goto lost2; | 883 | goto lost2; |
882 | 884 | ||
@@ -914,11 +916,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s | |||
914 | if (size > ENTRIES_PER_PAGE) | 916 | if (size > ENTRIES_PER_PAGE) |
915 | size = ENTRIES_PER_PAGE; | 917 | size = ENTRIES_PER_PAGE; |
916 | offset = shmem_find_swp(entry, ptr, ptr+size); | 918 | offset = shmem_find_swp(entry, ptr, ptr+size); |
917 | shmem_swp_unmap(ptr); | ||
918 | if (offset >= 0) { | 919 | if (offset >= 0) { |
919 | shmem_dir_unmap(dir); | 920 | shmem_dir_unmap(dir); |
920 | goto found; | 921 | goto found; |
921 | } | 922 | } |
923 | shmem_swp_unmap(ptr); | ||
922 | } | 924 | } |
923 | } | 925 | } |
924 | lost1: | 926 | lost1: |
@@ -928,8 +930,7 @@ lost2: | |||
928 | return 0; | 930 | return 0; |
929 | found: | 931 | found: |
930 | idx += offset; | 932 | idx += offset; |
931 | inode = igrab(&info->vfs_inode); | 933 | ptr += offset; |
932 | spin_unlock(&info->lock); | ||
933 | 934 | ||
934 | /* | 935 | /* |
935 | * Move _head_ to start search for next from here. | 936 | * Move _head_ to start search for next from here. |
@@ -940,37 +941,18 @@ found: | |||
940 | */ | 941 | */ |
941 | if (shmem_swaplist.next != &info->swaplist) | 942 | if (shmem_swaplist.next != &info->swaplist) |
942 | list_move_tail(&shmem_swaplist, &info->swaplist); | 943 | list_move_tail(&shmem_swaplist, &info->swaplist); |
943 | mutex_unlock(&shmem_swaplist_mutex); | ||
944 | 944 | ||
945 | error = 1; | ||
946 | if (!inode) | ||
947 | goto out; | ||
948 | /* | 945 | /* |
949 | * Charge page using GFP_KERNEL while we can wait. | 946 | * We rely on shmem_swaplist_mutex, not only to protect the swaplist, |
950 | * Charged back to the user(not to caller) when swap account is used. | 947 | * but also to hold up shmem_evict_inode(): so inode cannot be freed |
951 | * add_to_page_cache() will be called with GFP_NOWAIT. | 948 | * beneath us (pagelock doesn't help until the page is in pagecache). |
952 | */ | 949 | */ |
953 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | 950 | mapping = info->vfs_inode.i_mapping; |
954 | if (error) | 951 | error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); |
955 | goto out; | 952 | /* which does mem_cgroup_uncharge_cache_page on error */ |
956 | error = radix_tree_preload(GFP_KERNEL); | ||
957 | if (error) { | ||
958 | mem_cgroup_uncharge_cache_page(page); | ||
959 | goto out; | ||
960 | } | ||
961 | error = 1; | ||
962 | |||
963 | spin_lock(&info->lock); | ||
964 | ptr = shmem_swp_entry(info, idx, NULL); | ||
965 | if (ptr && ptr->val == entry.val) { | ||
966 | error = add_to_page_cache_locked(page, inode->i_mapping, | ||
967 | idx, GFP_NOWAIT); | ||
968 | /* does mem_cgroup_uncharge_cache_page on error */ | ||
969 | } else /* we must compensate for our precharge above */ | ||
970 | mem_cgroup_uncharge_cache_page(page); | ||
971 | 953 | ||
972 | if (error == -EEXIST) { | 954 | if (error == -EEXIST) { |
973 | struct page *filepage = find_get_page(inode->i_mapping, idx); | 955 | struct page *filepage = find_get_page(mapping, idx); |
974 | error = 1; | 956 | error = 1; |
975 | if (filepage) { | 957 | if (filepage) { |
976 | /* | 958 | /* |
@@ -990,14 +972,8 @@ found: | |||
990 | swap_free(entry); | 972 | swap_free(entry); |
991 | error = 1; /* not an error, but entry was found */ | 973 | error = 1; /* not an error, but entry was found */ |
992 | } | 974 | } |
993 | if (ptr) | 975 | shmem_swp_unmap(ptr); |
994 | shmem_swp_unmap(ptr); | ||
995 | spin_unlock(&info->lock); | 976 | spin_unlock(&info->lock); |
996 | radix_tree_preload_end(); | ||
997 | out: | ||
998 | unlock_page(page); | ||
999 | page_cache_release(page); | ||
1000 | iput(inode); /* allows for NULL */ | ||
1001 | return error; | 977 | return error; |
1002 | } | 978 | } |
1003 | 979 | ||
@@ -1009,6 +985,26 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
1009 | struct list_head *p, *next; | 985 | struct list_head *p, *next; |
1010 | struct shmem_inode_info *info; | 986 | struct shmem_inode_info *info; |
1011 | int found = 0; | 987 | int found = 0; |
988 | int error; | ||
989 | |||
990 | /* | ||
991 | * Charge page using GFP_KERNEL while we can wait, before taking | ||
992 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). | ||
993 | * Charged back to the user (not to caller) when swap account is used. | ||
994 | * add_to_page_cache() will be called with GFP_NOWAIT. | ||
995 | */ | ||
996 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | ||
997 | if (error) | ||
998 | goto out; | ||
999 | /* | ||
1000 | * Try to preload while we can wait, to not make a habit of | ||
1001 | * draining atomic reserves; but don't latch on to this cpu, | ||
1002 | * it's okay if sometimes we get rescheduled after this. | ||
1003 | */ | ||
1004 | error = radix_tree_preload(GFP_KERNEL); | ||
1005 | if (error) | ||
1006 | goto uncharge; | ||
1007 | radix_tree_preload_end(); | ||
1012 | 1008 | ||
1013 | mutex_lock(&shmem_swaplist_mutex); | 1009 | mutex_lock(&shmem_swaplist_mutex); |
1014 | list_for_each_safe(p, next, &shmem_swaplist) { | 1010 | list_for_each_safe(p, next, &shmem_swaplist) { |
@@ -1016,17 +1012,19 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
1016 | found = shmem_unuse_inode(info, entry, page); | 1012 | found = shmem_unuse_inode(info, entry, page); |
1017 | cond_resched(); | 1013 | cond_resched(); |
1018 | if (found) | 1014 | if (found) |
1019 | goto out; | 1015 | break; |
1020 | } | 1016 | } |
1021 | mutex_unlock(&shmem_swaplist_mutex); | 1017 | mutex_unlock(&shmem_swaplist_mutex); |
1022 | /* | 1018 | |
1023 | * Can some race bring us here? We've been holding page lock, | 1019 | uncharge: |
1024 | * so I think not; but would rather try again later than BUG() | 1020 | if (!found) |
1025 | */ | 1021 | mem_cgroup_uncharge_cache_page(page); |
1022 | if (found < 0) | ||
1023 | error = found; | ||
1024 | out: | ||
1026 | unlock_page(page); | 1025 | unlock_page(page); |
1027 | page_cache_release(page); | 1026 | page_cache_release(page); |
1028 | out: | 1027 | return error; |
1029 | return (found < 0) ? found : 0; | ||
1030 | } | 1028 | } |
1031 | 1029 | ||
1032 | /* | 1030 | /* |
@@ -1064,7 +1062,25 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
1064 | else | 1062 | else |
1065 | swap.val = 0; | 1063 | swap.val = 0; |
1066 | 1064 | ||
1065 | /* | ||
1066 | * Add inode to shmem_unuse()'s list of swapped-out inodes, | ||
1067 | * if it's not already there. Do it now because we cannot take | ||
1068 | * mutex while holding spinlock, and must do so before the page | ||
1069 | * is moved to swap cache, when its pagelock no longer protects | ||
1070 | * the inode from eviction. But don't unlock the mutex until | ||
1071 | * we've taken the spinlock, because shmem_unuse_inode() will | ||
1072 | * prune a !swapped inode from the swaplist under both locks. | ||
1073 | */ | ||
1074 | if (swap.val) { | ||
1075 | mutex_lock(&shmem_swaplist_mutex); | ||
1076 | if (list_empty(&info->swaplist)) | ||
1077 | list_add_tail(&info->swaplist, &shmem_swaplist); | ||
1078 | } | ||
1079 | |||
1067 | spin_lock(&info->lock); | 1080 | spin_lock(&info->lock); |
1081 | if (swap.val) | ||
1082 | mutex_unlock(&shmem_swaplist_mutex); | ||
1083 | |||
1068 | if (index >= info->next_index) { | 1084 | if (index >= info->next_index) { |
1069 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); | 1085 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); |
1070 | goto unlock; | 1086 | goto unlock; |
@@ -1084,21 +1100,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
1084 | delete_from_page_cache(page); | 1100 | delete_from_page_cache(page); |
1085 | shmem_swp_set(info, entry, swap.val); | 1101 | shmem_swp_set(info, entry, swap.val); |
1086 | shmem_swp_unmap(entry); | 1102 | shmem_swp_unmap(entry); |
1087 | if (list_empty(&info->swaplist)) | ||
1088 | inode = igrab(inode); | ||
1089 | else | ||
1090 | inode = NULL; | ||
1091 | spin_unlock(&info->lock); | 1103 | spin_unlock(&info->lock); |
1092 | swap_shmem_alloc(swap); | 1104 | swap_shmem_alloc(swap); |
1093 | BUG_ON(page_mapped(page)); | 1105 | BUG_ON(page_mapped(page)); |
1094 | swap_writepage(page, wbc); | 1106 | swap_writepage(page, wbc); |
1095 | if (inode) { | ||
1096 | mutex_lock(&shmem_swaplist_mutex); | ||
1097 | /* move instead of add in case we're racing */ | ||
1098 | list_move_tail(&info->swaplist, &shmem_swaplist); | ||
1099 | mutex_unlock(&shmem_swaplist_mutex); | ||
1100 | iput(inode); | ||
1101 | } | ||
1102 | return 0; | 1107 | return 0; |
1103 | } | 1108 | } |
1104 | 1109 | ||
@@ -1400,20 +1405,14 @@ repeat: | |||
1400 | if (sbinfo->max_blocks) { | 1405 | if (sbinfo->max_blocks) { |
1401 | if (percpu_counter_compare(&sbinfo->used_blocks, | 1406 | if (percpu_counter_compare(&sbinfo->used_blocks, |
1402 | sbinfo->max_blocks) >= 0 || | 1407 | sbinfo->max_blocks) >= 0 || |
1403 | shmem_acct_block(info->flags)) { | 1408 | shmem_acct_block(info->flags)) |
1404 | spin_unlock(&info->lock); | 1409 | goto nospace; |
1405 | error = -ENOSPC; | ||
1406 | goto failed; | ||
1407 | } | ||
1408 | percpu_counter_inc(&sbinfo->used_blocks); | 1410 | percpu_counter_inc(&sbinfo->used_blocks); |
1409 | spin_lock(&inode->i_lock); | 1411 | spin_lock(&inode->i_lock); |
1410 | inode->i_blocks += BLOCKS_PER_PAGE; | 1412 | inode->i_blocks += BLOCKS_PER_PAGE; |
1411 | spin_unlock(&inode->i_lock); | 1413 | spin_unlock(&inode->i_lock); |
1412 | } else if (shmem_acct_block(info->flags)) { | 1414 | } else if (shmem_acct_block(info->flags)) |
1413 | spin_unlock(&info->lock); | 1415 | goto nospace; |
1414 | error = -ENOSPC; | ||
1415 | goto failed; | ||
1416 | } | ||
1417 | 1416 | ||
1418 | if (!filepage) { | 1417 | if (!filepage) { |
1419 | int ret; | 1418 | int ret; |
@@ -1493,6 +1492,24 @@ done: | |||
1493 | error = 0; | 1492 | error = 0; |
1494 | goto out; | 1493 | goto out; |
1495 | 1494 | ||
1495 | nospace: | ||
1496 | /* | ||
1497 | * Perhaps the page was brought in from swap between find_lock_page | ||
1498 | * and taking info->lock? We allow for that at add_to_page_cache_lru, | ||
1499 | * but must also avoid reporting a spurious ENOSPC while working on a | ||
1500 | * full tmpfs. (When filepage has been passed in to shmem_getpage, it | ||
1501 | * is already in page cache, which prevents this race from occurring.) | ||
1502 | */ | ||
1503 | if (!filepage) { | ||
1504 | struct page *page = find_get_page(mapping, idx); | ||
1505 | if (page) { | ||
1506 | spin_unlock(&info->lock); | ||
1507 | page_cache_release(page); | ||
1508 | goto repeat; | ||
1509 | } | ||
1510 | } | ||
1511 | spin_unlock(&info->lock); | ||
1512 | error = -ENOSPC; | ||
1496 | failed: | 1513 | failed: |
1497 | if (*pagep != filepage) { | 1514 | if (*pagep != filepage) { |
1498 | unlock_page(filepage); | 1515 | unlock_page(filepage); |
@@ -396,6 +396,9 @@ static void lru_deactivate_fn(struct page *page, void *arg) | |||
396 | if (!PageLRU(page)) | 396 | if (!PageLRU(page)) |
397 | return; | 397 | return; |
398 | 398 | ||
399 | if (PageUnevictable(page)) | ||
400 | return; | ||
401 | |||
399 | /* Some processes are using the page */ | 402 | /* Some processes are using the page */ |
400 | if (page_mapped(page)) | 403 | if (page_mapped(page)) |
401 | return; | 404 | return; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index f6b435c80079..8bfd45050a61 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -937,7 +937,7 @@ keep_lumpy: | |||
937 | * back off and wait for congestion to clear because further reclaim | 937 | * back off and wait for congestion to clear because further reclaim |
938 | * will encounter the same problem | 938 | * will encounter the same problem |
939 | */ | 939 | */ |
940 | if (nr_dirty == nr_congested && nr_dirty != 0) | 940 | if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) |
941 | zone_set_flag(zone, ZONE_CONGESTED); | 941 | zone_set_flag(zone, ZONE_CONGESTED); |
942 | 942 | ||
943 | free_page_list(&free_pages); | 943 | free_page_list(&free_pages); |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 7850412f52b7..0eb1a886b370 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
124 | 124 | ||
125 | grp->nr_vlans--; | 125 | grp->nr_vlans--; |
126 | 126 | ||
127 | if (vlan->flags & VLAN_FLAG_GVRP) | ||
128 | vlan_gvrp_request_leave(dev); | ||
129 | |||
127 | vlan_group_set_device(grp, vlan_id, NULL); | 130 | vlan_group_set_device(grp, vlan_id, NULL); |
128 | if (!grp->killall) | 131 | if (!grp->killall) |
129 | synchronize_net(); | 132 | synchronize_net(); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e34ea9e5e28b..b2ff6c8d3603 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev) | |||
487 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 487 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
488 | struct net_device *real_dev = vlan->real_dev; | 488 | struct net_device *real_dev = vlan->real_dev; |
489 | 489 | ||
490 | if (vlan->flags & VLAN_FLAG_GVRP) | ||
491 | vlan_gvrp_request_leave(dev); | ||
492 | |||
493 | dev_mc_unsync(real_dev, dev); | 490 | dev_mc_unsync(real_dev, dev); |
494 | dev_uc_unsync(real_dev, dev); | 491 | dev_uc_unsync(real_dev, dev); |
495 | if (dev->flags & IFF_ALLMULTI) | 492 | if (dev->flags & IFF_ALLMULTI) |
diff --git a/net/9p/client.c b/net/9p/client.c index 77367745be9b..a9aa2dd66482 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -614,7 +614,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
614 | 614 | ||
615 | err = c->trans_mod->request(c, req); | 615 | err = c->trans_mod->request(c, req); |
616 | if (err < 0) { | 616 | if (err < 0) { |
617 | if (err != -ERESTARTSYS) | 617 | if (err != -ERESTARTSYS && err != -EFAULT) |
618 | c->status = Disconnected; | 618 | c->status = Disconnected; |
619 | goto reterr; | 619 | goto reterr; |
620 | } | 620 | } |
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index b58a501cf3d1..a873277cb996 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
@@ -674,6 +674,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, | |||
674 | } | 674 | } |
675 | 675 | ||
676 | strcpy(dirent->d_name, nameptr); | 676 | strcpy(dirent->d_name, nameptr); |
677 | kfree(nameptr); | ||
677 | 678 | ||
678 | out: | 679 | out: |
679 | return fake_pdu.offset; | 680 | return fake_pdu.offset; |
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index e883172f9aa2..9a70ebdec56e 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c | |||
@@ -63,7 +63,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, | |||
63 | int nr_pages, u8 rw) | 63 | int nr_pages, u8 rw) |
64 | { | 64 | { |
65 | uint32_t first_page_bytes = 0; | 65 | uint32_t first_page_bytes = 0; |
66 | uint32_t pdata_mapped_pages; | 66 | int32_t pdata_mapped_pages; |
67 | struct trans_rpage_info *rpinfo; | 67 | struct trans_rpage_info *rpinfo; |
68 | 68 | ||
69 | *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); | 69 | *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); |
@@ -75,14 +75,9 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, | |||
75 | rpinfo = req->tc->private; | 75 | rpinfo = req->tc->private; |
76 | pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, | 76 | pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, |
77 | nr_pages, rw, &rpinfo->rp_data[0]); | 77 | nr_pages, rw, &rpinfo->rp_data[0]); |
78 | if (pdata_mapped_pages <= 0) | ||
79 | return pdata_mapped_pages; | ||
78 | 80 | ||
79 | if (pdata_mapped_pages < 0) { | ||
80 | printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p" | ||
81 | "nr_pages:%d\n", pdata_mapped_pages, | ||
82 | req->tc->pubuf, nr_pages); | ||
83 | pdata_mapped_pages = 0; | ||
84 | return -EIO; | ||
85 | } | ||
86 | rpinfo->rp_nr_pages = pdata_mapped_pages; | 81 | rpinfo->rp_nr_pages = pdata_mapped_pages; |
87 | if (*pdata_off) { | 82 | if (*pdata_off) { |
88 | *pdata_len = first_page_bytes; | 83 | *pdata_len = first_page_bytes; |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 3cc43558cf9c..150b6ce23df3 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -28,18 +28,10 @@ | |||
28 | #include <linux/udp.h> | 28 | #include <linux/udp.h> |
29 | #include <linux/if_vlan.h> | 29 | #include <linux/if_vlan.h> |
30 | 30 | ||
31 | static void gw_node_free_rcu(struct rcu_head *rcu) | ||
32 | { | ||
33 | struct gw_node *gw_node; | ||
34 | |||
35 | gw_node = container_of(rcu, struct gw_node, rcu); | ||
36 | kfree(gw_node); | ||
37 | } | ||
38 | |||
39 | static void gw_node_free_ref(struct gw_node *gw_node) | 31 | static void gw_node_free_ref(struct gw_node *gw_node) |
40 | { | 32 | { |
41 | if (atomic_dec_and_test(&gw_node->refcount)) | 33 | if (atomic_dec_and_test(&gw_node->refcount)) |
42 | call_rcu(&gw_node->rcu, gw_node_free_rcu); | 34 | kfree_rcu(gw_node, rcu); |
43 | } | 35 | } |
44 | 36 | ||
45 | void *gw_get_selected(struct bat_priv *bat_priv) | 37 | void *gw_get_selected(struct bat_priv *bat_priv) |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 0b9133022d2d..ed23a5895d6c 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -56,18 +56,10 @@ err: | |||
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
58 | 58 | ||
59 | static void neigh_node_free_rcu(struct rcu_head *rcu) | ||
60 | { | ||
61 | struct neigh_node *neigh_node; | ||
62 | |||
63 | neigh_node = container_of(rcu, struct neigh_node, rcu); | ||
64 | kfree(neigh_node); | ||
65 | } | ||
66 | |||
67 | void neigh_node_free_ref(struct neigh_node *neigh_node) | 59 | void neigh_node_free_ref(struct neigh_node *neigh_node) |
68 | { | 60 | { |
69 | if (atomic_dec_and_test(&neigh_node->refcount)) | 61 | if (atomic_dec_and_test(&neigh_node->refcount)) |
70 | call_rcu(&neigh_node->rcu, neigh_node_free_rcu); | 62 | kfree_rcu(neigh_node, rcu); |
71 | } | 63 | } |
72 | 64 | ||
73 | struct neigh_node *create_neighbor(struct orig_node *orig_node, | 65 | struct neigh_node *create_neighbor(struct orig_node *orig_node, |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 824e1f6e50f2..04efe022c13b 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -76,18 +76,10 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len) | |||
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
79 | static void softif_neigh_free_rcu(struct rcu_head *rcu) | ||
80 | { | ||
81 | struct softif_neigh *softif_neigh; | ||
82 | |||
83 | softif_neigh = container_of(rcu, struct softif_neigh, rcu); | ||
84 | kfree(softif_neigh); | ||
85 | } | ||
86 | |||
87 | static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) | 79 | static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) |
88 | { | 80 | { |
89 | if (atomic_dec_and_test(&softif_neigh->refcount)) | 81 | if (atomic_dec_and_test(&softif_neigh->refcount)) |
90 | call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); | 82 | kfree_rcu(softif_neigh, rcu); |
91 | } | 83 | } |
92 | 84 | ||
93 | void softif_neigh_purge(struct bat_priv *bat_priv) | 85 | void softif_neigh_purge(struct bat_priv *bat_priv) |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 94954c74f6ae..42fdffd1d76c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -369,15 +369,6 @@ static void __sco_sock_close(struct sock *sk) | |||
369 | 369 | ||
370 | case BT_CONNECTED: | 370 | case BT_CONNECTED: |
371 | case BT_CONFIG: | 371 | case BT_CONFIG: |
372 | if (sco_pi(sk)->conn) { | ||
373 | sk->sk_state = BT_DISCONN; | ||
374 | sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); | ||
375 | hci_conn_put(sco_pi(sk)->conn->hcon); | ||
376 | sco_pi(sk)->conn = NULL; | ||
377 | } else | ||
378 | sco_chan_del(sk, ECONNRESET); | ||
379 | break; | ||
380 | |||
381 | case BT_CONNECT: | 372 | case BT_CONNECT: |
382 | case BT_DISCONN: | 373 | case BT_DISCONN: |
383 | sco_chan_del(sk, ECONNRESET); | 374 | sco_chan_del(sk, ECONNRESET); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index f3bc322c5891..74ef4d4846a4 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, | |||
737 | nf_bridge->mask |= BRNF_PKT_TYPE; | 737 | nf_bridge->mask |= BRNF_PKT_TYPE; |
738 | } | 738 | } |
739 | 739 | ||
740 | if (br_parse_ip_options(skb)) | 740 | if (pf == PF_INET && br_parse_ip_options(skb)) |
741 | return NF_DROP; | 741 | return NF_DROP; |
742 | 742 | ||
743 | /* The physdev module checks on this */ | 743 | /* The physdev module checks on this */ |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 893669caa8de..1a92b369c820 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1766,7 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info, | |||
1766 | 1766 | ||
1767 | newinfo->entries_size = size; | 1767 | newinfo->entries_size = size; |
1768 | 1768 | ||
1769 | xt_compat_init_offsets(AF_INET, info->nentries); | 1769 | xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); |
1770 | return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, | 1770 | return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, |
1771 | entries, newinfo); | 1771 | entries, newinfo); |
1772 | } | 1772 | } |
@@ -1882,7 +1882,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1882 | struct xt_match *match; | 1882 | struct xt_match *match; |
1883 | struct xt_target *wt; | 1883 | struct xt_target *wt; |
1884 | void *dst = NULL; | 1884 | void *dst = NULL; |
1885 | int off, pad = 0, ret = 0; | 1885 | int off, pad = 0; |
1886 | unsigned int size_kern, entry_offset, match_size = mwt->match_size; | 1886 | unsigned int size_kern, entry_offset, match_size = mwt->match_size; |
1887 | 1887 | ||
1888 | strlcpy(name, mwt->u.name, sizeof(name)); | 1888 | strlcpy(name, mwt->u.name, sizeof(name)); |
@@ -1935,13 +1935,6 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1935 | break; | 1935 | break; |
1936 | } | 1936 | } |
1937 | 1937 | ||
1938 | if (!dst) { | ||
1939 | ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, | ||
1940 | off + ebt_compat_entry_padsize()); | ||
1941 | if (ret < 0) | ||
1942 | return ret; | ||
1943 | } | ||
1944 | |||
1945 | state->buf_kern_offset += match_size + off; | 1938 | state->buf_kern_offset += match_size + off; |
1946 | state->buf_user_offset += match_size; | 1939 | state->buf_user_offset += match_size; |
1947 | pad = XT_ALIGN(size_kern) - size_kern; | 1940 | pad = XT_ALIGN(size_kern) - size_kern; |
@@ -2016,50 +2009,6 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | |||
2016 | return growth; | 2009 | return growth; |
2017 | } | 2010 | } |
2018 | 2011 | ||
2019 | #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \ | ||
2020 | ({ \ | ||
2021 | unsigned int __i; \ | ||
2022 | int __ret = 0; \ | ||
2023 | struct compat_ebt_entry_mwt *__watcher; \ | ||
2024 | \ | ||
2025 | for (__i = e->watchers_offset; \ | ||
2026 | __i < (e)->target_offset; \ | ||
2027 | __i += __watcher->watcher_size + \ | ||
2028 | sizeof(struct compat_ebt_entry_mwt)) { \ | ||
2029 | __watcher = (void *)(e) + __i; \ | ||
2030 | __ret = fn(__watcher , ## args); \ | ||
2031 | if (__ret != 0) \ | ||
2032 | break; \ | ||
2033 | } \ | ||
2034 | if (__ret == 0) { \ | ||
2035 | if (__i != (e)->target_offset) \ | ||
2036 | __ret = -EINVAL; \ | ||
2037 | } \ | ||
2038 | __ret; \ | ||
2039 | }) | ||
2040 | |||
2041 | #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \ | ||
2042 | ({ \ | ||
2043 | unsigned int __i; \ | ||
2044 | int __ret = 0; \ | ||
2045 | struct compat_ebt_entry_mwt *__match; \ | ||
2046 | \ | ||
2047 | for (__i = sizeof(struct ebt_entry); \ | ||
2048 | __i < (e)->watchers_offset; \ | ||
2049 | __i += __match->match_size + \ | ||
2050 | sizeof(struct compat_ebt_entry_mwt)) { \ | ||
2051 | __match = (void *)(e) + __i; \ | ||
2052 | __ret = fn(__match , ## args); \ | ||
2053 | if (__ret != 0) \ | ||
2054 | break; \ | ||
2055 | } \ | ||
2056 | if (__ret == 0) { \ | ||
2057 | if (__i != (e)->watchers_offset) \ | ||
2058 | __ret = -EINVAL; \ | ||
2059 | } \ | ||
2060 | __ret; \ | ||
2061 | }) | ||
2062 | |||
2063 | /* called for all ebt_entry structures. */ | 2012 | /* called for all ebt_entry structures. */ |
2064 | static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | 2013 | static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, |
2065 | unsigned int *total, | 2014 | unsigned int *total, |
@@ -2132,6 +2081,14 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | |||
2132 | } | 2081 | } |
2133 | } | 2082 | } |
2134 | 2083 | ||
2084 | if (state->buf_kern_start == NULL) { | ||
2085 | unsigned int offset = buf_start - (char *) base; | ||
2086 | |||
2087 | ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); | ||
2088 | if (ret < 0) | ||
2089 | return ret; | ||
2090 | } | ||
2091 | |||
2135 | startoff = state->buf_user_offset - startoff; | 2092 | startoff = state->buf_user_offset - startoff; |
2136 | 2093 | ||
2137 | BUG_ON(*total < startoff); | 2094 | BUG_ON(*total < startoff); |
@@ -2240,6 +2197,7 @@ static int compat_do_replace(struct net *net, void __user *user, | |||
2240 | 2197 | ||
2241 | xt_compat_lock(NFPROTO_BRIDGE); | 2198 | xt_compat_lock(NFPROTO_BRIDGE); |
2242 | 2199 | ||
2200 | xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); | ||
2243 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); | 2201 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); |
2244 | if (ret < 0) | 2202 | if (ret < 0) |
2245 | goto out_unlock; | 2203 | goto out_unlock; |
diff --git a/net/core/dev.c b/net/core/dev.c index 856b6ee9a1d5..b624fe4d9bd7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head) | |||
1284 | */ | 1284 | */ |
1285 | int dev_close(struct net_device *dev) | 1285 | int dev_close(struct net_device *dev) |
1286 | { | 1286 | { |
1287 | LIST_HEAD(single); | 1287 | if (dev->flags & IFF_UP) { |
1288 | LIST_HEAD(single); | ||
1288 | 1289 | ||
1289 | list_add(&dev->unreg_list, &single); | 1290 | list_add(&dev->unreg_list, &single); |
1290 | dev_close_many(&single); | 1291 | dev_close_many(&single); |
1291 | list_del(&single); | 1292 | list_del(&single); |
1293 | } | ||
1292 | return 0; | 1294 | return 0; |
1293 | } | 1295 | } |
1294 | EXPORT_SYMBOL(dev_close); | 1296 | EXPORT_SYMBOL(dev_close); |
@@ -5184,27 +5186,27 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
5184 | /* Fix illegal checksum combinations */ | 5186 | /* Fix illegal checksum combinations */ |
5185 | if ((features & NETIF_F_HW_CSUM) && | 5187 | if ((features & NETIF_F_HW_CSUM) && |
5186 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 5188 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { |
5187 | netdev_info(dev, "mixed HW and IP checksum settings.\n"); | 5189 | netdev_warn(dev, "mixed HW and IP checksum settings.\n"); |
5188 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); | 5190 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
5189 | } | 5191 | } |
5190 | 5192 | ||
5191 | if ((features & NETIF_F_NO_CSUM) && | 5193 | if ((features & NETIF_F_NO_CSUM) && |
5192 | (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 5194 | (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { |
5193 | netdev_info(dev, "mixed no checksumming and other settings.\n"); | 5195 | netdev_warn(dev, "mixed no checksumming and other settings.\n"); |
5194 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); | 5196 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); |
5195 | } | 5197 | } |
5196 | 5198 | ||
5197 | /* Fix illegal SG+CSUM combinations. */ | 5199 | /* Fix illegal SG+CSUM combinations. */ |
5198 | if ((features & NETIF_F_SG) && | 5200 | if ((features & NETIF_F_SG) && |
5199 | !(features & NETIF_F_ALL_CSUM)) { | 5201 | !(features & NETIF_F_ALL_CSUM)) { |
5200 | netdev_info(dev, | 5202 | netdev_dbg(dev, |
5201 | "Dropping NETIF_F_SG since no checksum feature.\n"); | 5203 | "Dropping NETIF_F_SG since no checksum feature.\n"); |
5202 | features &= ~NETIF_F_SG; | 5204 | features &= ~NETIF_F_SG; |
5203 | } | 5205 | } |
5204 | 5206 | ||
5205 | /* TSO requires that SG is present as well. */ | 5207 | /* TSO requires that SG is present as well. */ |
5206 | if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { | 5208 | if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { |
5207 | netdev_info(dev, "Dropping TSO features since no SG feature.\n"); | 5209 | netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); |
5208 | features &= ~NETIF_F_ALL_TSO; | 5210 | features &= ~NETIF_F_ALL_TSO; |
5209 | } | 5211 | } |
5210 | 5212 | ||
@@ -5214,7 +5216,7 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
5214 | 5216 | ||
5215 | /* Software GSO depends on SG. */ | 5217 | /* Software GSO depends on SG. */ |
5216 | if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { | 5218 | if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { |
5217 | netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); | 5219 | netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); |
5218 | features &= ~NETIF_F_GSO; | 5220 | features &= ~NETIF_F_GSO; |
5219 | } | 5221 | } |
5220 | 5222 | ||
@@ -5224,13 +5226,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
5224 | if (!((features & NETIF_F_GEN_CSUM) || | 5226 | if (!((features & NETIF_F_GEN_CSUM) || |
5225 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) | 5227 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) |
5226 | == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 5228 | == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { |
5227 | netdev_info(dev, | 5229 | netdev_dbg(dev, |
5228 | "Dropping NETIF_F_UFO since no checksum offload features.\n"); | 5230 | "Dropping NETIF_F_UFO since no checksum offload features.\n"); |
5229 | features &= ~NETIF_F_UFO; | 5231 | features &= ~NETIF_F_UFO; |
5230 | } | 5232 | } |
5231 | 5233 | ||
5232 | if (!(features & NETIF_F_SG)) { | 5234 | if (!(features & NETIF_F_SG)) { |
5233 | netdev_info(dev, | 5235 | netdev_dbg(dev, |
5234 | "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); | 5236 | "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); |
5235 | features &= ~NETIF_F_UFO; | 5237 | features &= ~NETIF_F_UFO; |
5236 | } | 5238 | } |
@@ -5412,12 +5414,6 @@ int register_netdevice(struct net_device *dev) | |||
5412 | dev->features |= NETIF_F_SOFT_FEATURES; | 5414 | dev->features |= NETIF_F_SOFT_FEATURES; |
5413 | dev->wanted_features = dev->features & dev->hw_features; | 5415 | dev->wanted_features = dev->features & dev->hw_features; |
5414 | 5416 | ||
5415 | /* Avoid warning from netdev_fix_features() for GSO without SG */ | ||
5416 | if (!(dev->wanted_features & NETIF_F_SG)) { | ||
5417 | dev->wanted_features &= ~NETIF_F_GSO; | ||
5418 | dev->features &= ~NETIF_F_GSO; | ||
5419 | } | ||
5420 | |||
5421 | /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, | 5417 | /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, |
5422 | * vlan_dev_init() will do the dev->features check, so these features | 5418 | * vlan_dev_init() will do the dev->features check, so these features |
5423 | * are enabled only if supported by underlying device. | 5419 | * are enabled only if supported by underlying device. |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 7b39f3ed2fda..e2e66939ed00 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -68,14 +68,6 @@ static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | |||
68 | return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); | 68 | return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void ha_rcu_free(struct rcu_head *head) | ||
72 | { | ||
73 | struct netdev_hw_addr *ha; | ||
74 | |||
75 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
76 | kfree(ha); | ||
77 | } | ||
78 | |||
79 | static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, | 71 | static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, |
80 | unsigned char *addr, int addr_len, | 72 | unsigned char *addr, int addr_len, |
81 | unsigned char addr_type, bool global) | 73 | unsigned char addr_type, bool global) |
@@ -94,7 +86,7 @@ static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, | |||
94 | if (--ha->refcount) | 86 | if (--ha->refcount) |
95 | return 0; | 87 | return 0; |
96 | list_del_rcu(&ha->list); | 88 | list_del_rcu(&ha->list); |
97 | call_rcu(&ha->rcu_head, ha_rcu_free); | 89 | kfree_rcu(ha, rcu_head); |
98 | list->count--; | 90 | list->count--; |
99 | return 0; | 91 | return 0; |
100 | } | 92 | } |
@@ -197,7 +189,7 @@ void __hw_addr_flush(struct netdev_hw_addr_list *list) | |||
197 | 189 | ||
198 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | 190 | list_for_each_entry_safe(ha, tmp, &list->list, list) { |
199 | list_del_rcu(&ha->list); | 191 | list_del_rcu(&ha->list); |
200 | call_rcu(&ha->rcu_head, ha_rcu_free); | 192 | kfree_rcu(ha, rcu_head); |
201 | } | 193 | } |
202 | list->count = 0; | 194 | list->count = 0; |
203 | } | 195 | } |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 706502ff64aa..7f36b38e060f 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -207,14 +207,6 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) | |||
207 | rcu_read_unlock(); | 207 | rcu_read_unlock(); |
208 | } | 208 | } |
209 | 209 | ||
210 | |||
211 | static void free_dm_hw_stat(struct rcu_head *head) | ||
212 | { | ||
213 | struct dm_hw_stat_delta *n; | ||
214 | n = container_of(head, struct dm_hw_stat_delta, rcu); | ||
215 | kfree(n); | ||
216 | } | ||
217 | |||
218 | static int set_all_monitor_traces(int state) | 210 | static int set_all_monitor_traces(int state) |
219 | { | 211 | { |
220 | int rc = 0; | 212 | int rc = 0; |
@@ -245,7 +237,7 @@ static int set_all_monitor_traces(int state) | |||
245 | list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { | 237 | list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { |
246 | if (new_stat->dev == NULL) { | 238 | if (new_stat->dev == NULL) { |
247 | list_del_rcu(&new_stat->list); | 239 | list_del_rcu(&new_stat->list); |
248 | call_rcu(&new_stat->rcu, free_dm_hw_stat); | 240 | kfree_rcu(new_stat, rcu); |
249 | } | 241 | } |
250 | } | 242 | } |
251 | break; | 243 | break; |
@@ -314,7 +306,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, | |||
314 | new_stat->dev = NULL; | 306 | new_stat->dev = NULL; |
315 | if (trace_state == TRACE_OFF) { | 307 | if (trace_state == TRACE_OFF) { |
316 | list_del_rcu(&new_stat->list); | 308 | list_del_rcu(&new_stat->list); |
317 | call_rcu(&new_stat->rcu, free_dm_hw_stat); | 309 | kfree_rcu(new_stat, rcu); |
318 | break; | 310 | break; |
319 | } | 311 | } |
320 | } | 312 | } |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 7c2373321b74..43b03dd71e85 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -249,13 +249,6 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
249 | } | 249 | } |
250 | EXPORT_SYMBOL(gen_new_estimator); | 250 | EXPORT_SYMBOL(gen_new_estimator); |
251 | 251 | ||
252 | static void __gen_kill_estimator(struct rcu_head *head) | ||
253 | { | ||
254 | struct gen_estimator *e = container_of(head, | ||
255 | struct gen_estimator, e_rcu); | ||
256 | kfree(e); | ||
257 | } | ||
258 | |||
259 | /** | 252 | /** |
260 | * gen_kill_estimator - remove a rate estimator | 253 | * gen_kill_estimator - remove a rate estimator |
261 | * @bstats: basic statistics | 254 | * @bstats: basic statistics |
@@ -279,7 +272,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
279 | write_unlock(&est_lock); | 272 | write_unlock(&est_lock); |
280 | 273 | ||
281 | list_del_rcu(&e->list); | 274 | list_del_rcu(&e->list); |
282 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 275 | kfree_rcu(e, e_rcu); |
283 | } | 276 | } |
284 | spin_unlock_bh(&est_tree_lock); | 277 | spin_unlock_bh(&est_tree_lock); |
285 | } | 278 | } |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 5ceb257e860c..80b2aad3b73d 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -565,13 +565,6 @@ static ssize_t show_rps_map(struct netdev_rx_queue *queue, | |||
565 | return len; | 565 | return len; |
566 | } | 566 | } |
567 | 567 | ||
568 | static void rps_map_release(struct rcu_head *rcu) | ||
569 | { | ||
570 | struct rps_map *map = container_of(rcu, struct rps_map, rcu); | ||
571 | |||
572 | kfree(map); | ||
573 | } | ||
574 | |||
575 | static ssize_t store_rps_map(struct netdev_rx_queue *queue, | 568 | static ssize_t store_rps_map(struct netdev_rx_queue *queue, |
576 | struct rx_queue_attribute *attribute, | 569 | struct rx_queue_attribute *attribute, |
577 | const char *buf, size_t len) | 570 | const char *buf, size_t len) |
@@ -619,7 +612,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, | |||
619 | spin_unlock(&rps_map_lock); | 612 | spin_unlock(&rps_map_lock); |
620 | 613 | ||
621 | if (old_map) | 614 | if (old_map) |
622 | call_rcu(&old_map->rcu, rps_map_release); | 615 | kfree_rcu(old_map, rcu); |
623 | 616 | ||
624 | free_cpumask_var(mask); | 617 | free_cpumask_var(mask); |
625 | return len; | 618 | return len; |
@@ -728,7 +721,7 @@ static void rx_queue_release(struct kobject *kobj) | |||
728 | map = rcu_dereference_raw(queue->rps_map); | 721 | map = rcu_dereference_raw(queue->rps_map); |
729 | if (map) { | 722 | if (map) { |
730 | RCU_INIT_POINTER(queue->rps_map, NULL); | 723 | RCU_INIT_POINTER(queue->rps_map, NULL); |
731 | call_rcu(&map->rcu, rps_map_release); | 724 | kfree_rcu(map, rcu); |
732 | } | 725 | } |
733 | 726 | ||
734 | flow_table = rcu_dereference_raw(queue->rps_flow_table); | 727 | flow_table = rcu_dereference_raw(queue->rps_flow_table); |
@@ -898,21 +891,6 @@ static ssize_t show_xps_map(struct netdev_queue *queue, | |||
898 | return len; | 891 | return len; |
899 | } | 892 | } |
900 | 893 | ||
901 | static void xps_map_release(struct rcu_head *rcu) | ||
902 | { | ||
903 | struct xps_map *map = container_of(rcu, struct xps_map, rcu); | ||
904 | |||
905 | kfree(map); | ||
906 | } | ||
907 | |||
908 | static void xps_dev_maps_release(struct rcu_head *rcu) | ||
909 | { | ||
910 | struct xps_dev_maps *dev_maps = | ||
911 | container_of(rcu, struct xps_dev_maps, rcu); | ||
912 | |||
913 | kfree(dev_maps); | ||
914 | } | ||
915 | |||
916 | static DEFINE_MUTEX(xps_map_mutex); | 894 | static DEFINE_MUTEX(xps_map_mutex); |
917 | #define xmap_dereference(P) \ | 895 | #define xmap_dereference(P) \ |
918 | rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) | 896 | rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) |
@@ -1009,7 +987,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, | |||
1009 | map = dev_maps ? | 987 | map = dev_maps ? |
1010 | xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; | 988 | xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; |
1011 | if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map) | 989 | if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map) |
1012 | call_rcu(&map->rcu, xps_map_release); | 990 | kfree_rcu(map, rcu); |
1013 | if (new_dev_maps->cpu_map[cpu]) | 991 | if (new_dev_maps->cpu_map[cpu]) |
1014 | nonempty = 1; | 992 | nonempty = 1; |
1015 | } | 993 | } |
@@ -1022,7 +1000,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, | |||
1022 | } | 1000 | } |
1023 | 1001 | ||
1024 | if (dev_maps) | 1002 | if (dev_maps) |
1025 | call_rcu(&dev_maps->rcu, xps_dev_maps_release); | 1003 | kfree_rcu(dev_maps, rcu); |
1026 | 1004 | ||
1027 | netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : | 1005 | netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : |
1028 | NUMA_NO_NODE); | 1006 | NUMA_NO_NODE); |
@@ -1084,7 +1062,7 @@ static void netdev_queue_release(struct kobject *kobj) | |||
1084 | else { | 1062 | else { |
1085 | RCU_INIT_POINTER(dev_maps->cpu_map[i], | 1063 | RCU_INIT_POINTER(dev_maps->cpu_map[i], |
1086 | NULL); | 1064 | NULL); |
1087 | call_rcu(&map->rcu, xps_map_release); | 1065 | kfree_rcu(map, rcu); |
1088 | map = NULL; | 1066 | map = NULL; |
1089 | } | 1067 | } |
1090 | } | 1068 | } |
@@ -1094,7 +1072,7 @@ static void netdev_queue_release(struct kobject *kobj) | |||
1094 | 1072 | ||
1095 | if (!nonempty) { | 1073 | if (!nonempty) { |
1096 | RCU_INIT_POINTER(dev->xps_maps, NULL); | 1074 | RCU_INIT_POINTER(dev->xps_maps, NULL); |
1097 | call_rcu(&dev_maps->rcu, xps_dev_maps_release); | 1075 | kfree_rcu(dev_maps, rcu); |
1098 | } | 1076 | } |
1099 | } | 1077 | } |
1100 | 1078 | ||
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 3f860261c5ee..297bb9272240 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -27,14 +27,6 @@ EXPORT_SYMBOL(init_net); | |||
27 | 27 | ||
28 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ | 28 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ |
29 | 29 | ||
30 | static void net_generic_release(struct rcu_head *rcu) | ||
31 | { | ||
32 | struct net_generic *ng; | ||
33 | |||
34 | ng = container_of(rcu, struct net_generic, rcu); | ||
35 | kfree(ng); | ||
36 | } | ||
37 | |||
38 | static int net_assign_generic(struct net *net, int id, void *data) | 30 | static int net_assign_generic(struct net *net, int id, void *data) |
39 | { | 31 | { |
40 | struct net_generic *ng, *old_ng; | 32 | struct net_generic *ng, *old_ng; |
@@ -68,7 +60,7 @@ static int net_assign_generic(struct net *net, int id, void *data) | |||
68 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); | 60 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); |
69 | 61 | ||
70 | rcu_assign_pointer(net->gen, ng); | 62 | rcu_assign_pointer(net->gen, ng); |
71 | call_rcu(&old_ng->rcu, net_generic_release); | 63 | kfree_rcu(old_ng, rcu); |
72 | assign: | 64 | assign: |
73 | ng->ptr[id - 1] = data; | 65 | ng->ptr[id - 1] = data; |
74 | return 0; | 66 | return 0; |
diff --git a/net/dccp/options.c b/net/dccp/options.c index f06ffcfc8d71..4b2ab657ac8e 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
123 | case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: | 123 | case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: |
124 | if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ | 124 | if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ |
125 | break; | 125 | break; |
126 | if (len == 0) | ||
127 | goto out_invalid_option; | ||
126 | rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, | 128 | rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, |
127 | *value, value + 1, len - 1); | 129 | *value, value + 1, len - 1); |
128 | if (rc) | 130 | if (rc) |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 0dcaa903e00e..4c27615340dc 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -332,14 +332,9 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void) | |||
332 | return ifa; | 332 | return ifa; |
333 | } | 333 | } |
334 | 334 | ||
335 | static void dn_dev_free_ifa_rcu(struct rcu_head *head) | ||
336 | { | ||
337 | kfree(container_of(head, struct dn_ifaddr, rcu)); | ||
338 | } | ||
339 | |||
340 | static void dn_dev_free_ifa(struct dn_ifaddr *ifa) | 335 | static void dn_dev_free_ifa(struct dn_ifaddr *ifa) |
341 | { | 336 | { |
342 | call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu); | 337 | kfree_rcu(ifa, rcu); |
343 | } | 338 | } |
344 | 339 | ||
345 | static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy) | 340 | static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy) |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 641a5a2a9f9c..33e2c35b74b7 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -141,18 +141,8 @@ const struct fib_prop fib_props[RTN_MAX + 1] = { | |||
141 | }, | 141 | }, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | |||
145 | /* Release a nexthop info record */ | 144 | /* Release a nexthop info record */ |
146 | 145 | ||
147 | static void free_fib_info_rcu(struct rcu_head *head) | ||
148 | { | ||
149 | struct fib_info *fi = container_of(head, struct fib_info, rcu); | ||
150 | |||
151 | if (fi->fib_metrics != (u32 *) dst_default_metrics) | ||
152 | kfree(fi->fib_metrics); | ||
153 | kfree(fi); | ||
154 | } | ||
155 | |||
156 | void free_fib_info(struct fib_info *fi) | 146 | void free_fib_info(struct fib_info *fi) |
157 | { | 147 | { |
158 | if (fi->fib_dead == 0) { | 148 | if (fi->fib_dead == 0) { |
@@ -166,7 +156,7 @@ void free_fib_info(struct fib_info *fi) | |||
166 | } endfor_nexthops(fi); | 156 | } endfor_nexthops(fi); |
167 | fib_info_cnt--; | 157 | fib_info_cnt--; |
168 | release_net(fi->fib_net); | 158 | release_net(fi->fib_net); |
169 | call_rcu(&fi->rcu, free_fib_info_rcu); | 159 | kfree_rcu(fi, rcu); |
170 | } | 160 | } |
171 | 161 | ||
172 | void fib_release_info(struct fib_info *fi) | 162 | void fib_release_info(struct fib_info *fi) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 5fe9b8b41df3..11d4d28190bd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -350,14 +350,9 @@ static inline void free_leaf(struct leaf *l) | |||
350 | call_rcu_bh(&l->rcu, __leaf_free_rcu); | 350 | call_rcu_bh(&l->rcu, __leaf_free_rcu); |
351 | } | 351 | } |
352 | 352 | ||
353 | static void __leaf_info_free_rcu(struct rcu_head *head) | ||
354 | { | ||
355 | kfree(container_of(head, struct leaf_info, rcu)); | ||
356 | } | ||
357 | |||
358 | static inline void free_leaf_info(struct leaf_info *leaf) | 353 | static inline void free_leaf_info(struct leaf_info *leaf) |
359 | { | 354 | { |
360 | call_rcu(&leaf->rcu, __leaf_info_free_rcu); | 355 | kfree_rcu(leaf, rcu); |
361 | } | 356 | } |
362 | 357 | ||
363 | static struct tnode *tnode_alloc(size_t size) | 358 | static struct tnode *tnode_alloc(size_t size) |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1fd3d9ce8398..8f62d66d0857 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -149,17 +149,11 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc); | |||
149 | static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | 149 | static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, |
150 | int sfcount, __be32 *psfsrc, int delta); | 150 | int sfcount, __be32 *psfsrc, int delta); |
151 | 151 | ||
152 | |||
153 | static void ip_mc_list_reclaim(struct rcu_head *head) | ||
154 | { | ||
155 | kfree(container_of(head, struct ip_mc_list, rcu)); | ||
156 | } | ||
157 | |||
158 | static void ip_ma_put(struct ip_mc_list *im) | 152 | static void ip_ma_put(struct ip_mc_list *im) |
159 | { | 153 | { |
160 | if (atomic_dec_and_test(&im->refcnt)) { | 154 | if (atomic_dec_and_test(&im->refcnt)) { |
161 | in_dev_put(im->interface); | 155 | in_dev_put(im->interface); |
162 | call_rcu(&im->rcu, ip_mc_list_reclaim); | 156 | kfree_rcu(im, rcu); |
163 | } | 157 | } |
164 | } | 158 | } |
165 | 159 | ||
@@ -1836,12 +1830,6 @@ done: | |||
1836 | } | 1830 | } |
1837 | EXPORT_SYMBOL(ip_mc_join_group); | 1831 | EXPORT_SYMBOL(ip_mc_join_group); |
1838 | 1832 | ||
1839 | static void ip_sf_socklist_reclaim(struct rcu_head *rp) | ||
1840 | { | ||
1841 | kfree(container_of(rp, struct ip_sf_socklist, rcu)); | ||
1842 | /* sk_omem_alloc should have been decreased by the caller*/ | ||
1843 | } | ||
1844 | |||
1845 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | 1833 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, |
1846 | struct in_device *in_dev) | 1834 | struct in_device *in_dev) |
1847 | { | 1835 | { |
@@ -1858,18 +1846,10 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | |||
1858 | rcu_assign_pointer(iml->sflist, NULL); | 1846 | rcu_assign_pointer(iml->sflist, NULL); |
1859 | /* decrease mem now to avoid the memleak warning */ | 1847 | /* decrease mem now to avoid the memleak warning */ |
1860 | atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); | 1848 | atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); |
1861 | call_rcu(&psf->rcu, ip_sf_socklist_reclaim); | 1849 | kfree_rcu(psf, rcu); |
1862 | return err; | 1850 | return err; |
1863 | } | 1851 | } |
1864 | 1852 | ||
1865 | |||
1866 | static void ip_mc_socklist_reclaim(struct rcu_head *rp) | ||
1867 | { | ||
1868 | kfree(container_of(rp, struct ip_mc_socklist, rcu)); | ||
1869 | /* sk_omem_alloc should have been decreased by the caller*/ | ||
1870 | } | ||
1871 | |||
1872 | |||
1873 | /* | 1853 | /* |
1874 | * Ask a socket to leave a group. | 1854 | * Ask a socket to leave a group. |
1875 | */ | 1855 | */ |
@@ -1909,7 +1889,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
1909 | rtnl_unlock(); | 1889 | rtnl_unlock(); |
1910 | /* decrease mem now to avoid the memleak warning */ | 1890 | /* decrease mem now to avoid the memleak warning */ |
1911 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | 1891 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
1912 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); | 1892 | kfree_rcu(iml, rcu); |
1913 | return 0; | 1893 | return 0; |
1914 | } | 1894 | } |
1915 | if (!in_dev) | 1895 | if (!in_dev) |
@@ -2026,7 +2006,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
2026 | newpsl->sl_addr[i] = psl->sl_addr[i]; | 2006 | newpsl->sl_addr[i] = psl->sl_addr[i]; |
2027 | /* decrease mem now to avoid the memleak warning */ | 2007 | /* decrease mem now to avoid the memleak warning */ |
2028 | atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); | 2008 | atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); |
2029 | call_rcu(&psl->rcu, ip_sf_socklist_reclaim); | 2009 | kfree_rcu(psl, rcu); |
2030 | } | 2010 | } |
2031 | rcu_assign_pointer(pmc->sflist, newpsl); | 2011 | rcu_assign_pointer(pmc->sflist, newpsl); |
2032 | psl = newpsl; | 2012 | psl = newpsl; |
@@ -2127,7 +2107,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
2127 | psl->sl_count, psl->sl_addr, 0); | 2107 | psl->sl_count, psl->sl_addr, 0); |
2128 | /* decrease mem now to avoid the memleak warning */ | 2108 | /* decrease mem now to avoid the memleak warning */ |
2129 | atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); | 2109 | atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); |
2130 | call_rcu(&psl->rcu, ip_sf_socklist_reclaim); | 2110 | kfree_rcu(psl, rcu); |
2131 | } else | 2111 | } else |
2132 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, | 2112 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, |
2133 | 0, NULL, 0); | 2113 | 0, NULL, 0); |
@@ -2324,7 +2304,7 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2324 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); | 2304 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); |
2325 | /* decrease mem now to avoid the memleak warning */ | 2305 | /* decrease mem now to avoid the memleak warning */ |
2326 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | 2306 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
2327 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); | 2307 | kfree_rcu(iml, rcu); |
2328 | } | 2308 | } |
2329 | rtnl_unlock(); | 2309 | rtnl_unlock(); |
2330 | } | 2310 | } |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a1151b8adf3c..b1d282f11be7 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg) | |||
223 | 223 | ||
224 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { | 224 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
225 | struct sk_buff *head = qp->q.fragments; | 225 | struct sk_buff *head = qp->q.fragments; |
226 | const struct iphdr *iph; | ||
227 | int err; | ||
226 | 228 | ||
227 | rcu_read_lock(); | 229 | rcu_read_lock(); |
228 | head->dev = dev_get_by_index_rcu(net, qp->iif); | 230 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
229 | if (!head->dev) | 231 | if (!head->dev) |
230 | goto out_rcu_unlock; | 232 | goto out_rcu_unlock; |
231 | 233 | ||
234 | /* skb dst is stale, drop it, and perform route lookup again */ | ||
235 | skb_dst_drop(head); | ||
236 | iph = ip_hdr(head); | ||
237 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | ||
238 | iph->tos, head->dev); | ||
239 | if (err) | ||
240 | goto out_rcu_unlock; | ||
241 | |||
232 | /* | 242 | /* |
233 | * Only search router table for the head fragment, | 243 | * Only an end host needs to send an ICMP |
234 | * when defraging timeout at PRE_ROUTING HOOK. | 244 | * "Fragment Reassembly Timeout" message, per RFC792. |
235 | */ | 245 | */ |
236 | if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { | 246 | if (qp->user == IP_DEFRAG_CONNTRACK_IN && |
237 | const struct iphdr *iph = ip_hdr(head); | 247 | skb_rtable(head)->rt_type != RTN_LOCAL) |
238 | int err = ip_route_input(head, iph->daddr, iph->saddr, | 248 | goto out_rcu_unlock; |
239 | iph->tos, head->dev); | ||
240 | if (unlikely(err)) | ||
241 | goto out_rcu_unlock; | ||
242 | |||
243 | /* | ||
244 | * Only an end host needs to send an ICMP | ||
245 | * "Fragment Reassembly Timeout" message, per RFC792. | ||
246 | */ | ||
247 | if (skb_rtable(head)->rt_type != RTN_LOCAL) | ||
248 | goto out_rcu_unlock; | ||
249 | 249 | ||
250 | } | ||
251 | 250 | ||
252 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 251 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
253 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 252 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 34340c9c95fa..f376b05cca81 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -93,6 +93,7 @@ struct bictcp { | |||
93 | u32 ack_cnt; /* number of acks */ | 93 | u32 ack_cnt; /* number of acks */ |
94 | u32 tcp_cwnd; /* estimated tcp cwnd */ | 94 | u32 tcp_cwnd; /* estimated tcp cwnd */ |
95 | #define ACK_RATIO_SHIFT 4 | 95 | #define ACK_RATIO_SHIFT 4 |
96 | #define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) | ||
96 | u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ | 97 | u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ |
97 | u8 sample_cnt; /* number of samples to decide curr_rtt */ | 98 | u8 sample_cnt; /* number of samples to decide curr_rtt */ |
98 | u8 found; /* the exit point is found? */ | 99 | u8 found; /* the exit point is found? */ |
@@ -398,8 +399,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) | |||
398 | u32 delay; | 399 | u32 delay; |
399 | 400 | ||
400 | if (icsk->icsk_ca_state == TCP_CA_Open) { | 401 | if (icsk->icsk_ca_state == TCP_CA_Open) { |
401 | cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; | 402 | u32 ratio = ca->delayed_ack; |
402 | ca->delayed_ack += cnt; | 403 | |
404 | ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; | ||
405 | ratio += cnt; | ||
406 | |||
407 | ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); | ||
403 | } | 408 | } |
404 | 409 | ||
405 | /* Some calls are for duplicates without timetamps */ | 410 | /* Some calls are for duplicates without timetamps */ |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 571aa96a175c..2d51840e53a1 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
69 | } | 69 | } |
70 | EXPORT_SYMBOL(xfrm4_prepare_output); | 70 | EXPORT_SYMBOL(xfrm4_prepare_output); |
71 | 71 | ||
72 | static int xfrm4_output_finish(struct sk_buff *skb) | 72 | int xfrm4_output_finish(struct sk_buff *skb) |
73 | { | 73 | { |
74 | #ifdef CONFIG_NETFILTER | 74 | #ifdef CONFIG_NETFILTER |
75 | if (!skb_dst(skb)->xfrm) { | 75 | if (!skb_dst(skb)->xfrm) { |
@@ -86,7 +86,11 @@ static int xfrm4_output_finish(struct sk_buff *skb) | |||
86 | 86 | ||
87 | int xfrm4_output(struct sk_buff *skb) | 87 | int xfrm4_output(struct sk_buff *skb) |
88 | { | 88 | { |
89 | struct dst_entry *dst = skb_dst(skb); | ||
90 | struct xfrm_state *x = dst->xfrm; | ||
91 | |||
89 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, | 92 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, |
90 | NULL, skb_dst(skb)->dev, xfrm4_output_finish, | 93 | NULL, dst->dev, |
94 | x->outer_mode->afinfo->output_finish, | ||
91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 95 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
92 | } | 96 | } |
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 1717c64628d1..805d63ef4340 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c | |||
@@ -78,6 +78,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { | |||
78 | .init_tempsel = __xfrm4_init_tempsel, | 78 | .init_tempsel = __xfrm4_init_tempsel, |
79 | .init_temprop = xfrm4_init_temprop, | 79 | .init_temprop = xfrm4_init_temprop, |
80 | .output = xfrm4_output, | 80 | .output = xfrm4_output, |
81 | .output_finish = xfrm4_output_finish, | ||
81 | .extract_input = xfrm4_extract_input, | 82 | .extract_input = xfrm4_extract_input, |
82 | .extract_output = xfrm4_extract_output, | 83 | .extract_output = xfrm4_extract_output, |
83 | .transport_finish = xfrm4_transport_finish, | 84 | .transport_finish = xfrm4_transport_finish, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a7bda0757053..8f13d88d7dba 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -317,12 +317,6 @@ static void snmp6_free_dev(struct inet6_dev *idev) | |||
317 | 317 | ||
318 | /* Nobody refers to this device, we may destroy it. */ | 318 | /* Nobody refers to this device, we may destroy it. */ |
319 | 319 | ||
320 | static void in6_dev_finish_destroy_rcu(struct rcu_head *head) | ||
321 | { | ||
322 | struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu); | ||
323 | kfree(idev); | ||
324 | } | ||
325 | |||
326 | void in6_dev_finish_destroy(struct inet6_dev *idev) | 320 | void in6_dev_finish_destroy(struct inet6_dev *idev) |
327 | { | 321 | { |
328 | struct net_device *dev = idev->dev; | 322 | struct net_device *dev = idev->dev; |
@@ -339,7 +333,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) | |||
339 | return; | 333 | return; |
340 | } | 334 | } |
341 | snmp6_free_dev(idev); | 335 | snmp6_free_dev(idev); |
342 | call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu); | 336 | kfree_rcu(idev, rcu); |
343 | } | 337 | } |
344 | 338 | ||
345 | EXPORT_SYMBOL(in6_dev_finish_destroy); | 339 | EXPORT_SYMBOL(in6_dev_finish_destroy); |
@@ -535,12 +529,6 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | |||
535 | } | 529 | } |
536 | #endif | 530 | #endif |
537 | 531 | ||
538 | static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head) | ||
539 | { | ||
540 | struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu); | ||
541 | kfree(ifp); | ||
542 | } | ||
543 | |||
544 | /* Nobody refers to this ifaddr, destroy it */ | 532 | /* Nobody refers to this ifaddr, destroy it */ |
545 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | 533 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) |
546 | { | 534 | { |
@@ -561,7 +549,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
561 | } | 549 | } |
562 | dst_release(&ifp->rt->dst); | 550 | dst_release(&ifp->rt->dst); |
563 | 551 | ||
564 | call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); | 552 | kfree_rcu(ifp, rcu); |
565 | } | 553 | } |
566 | 554 | ||
567 | static void | 555 | static void |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 76b893771e6e..f2d98ca7588a 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -201,10 +201,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | static void ipv6_mc_socklist_reclaim(struct rcu_head *head) | ||
205 | { | ||
206 | kfree(container_of(head, struct ipv6_mc_socklist, rcu)); | ||
207 | } | ||
208 | /* | 204 | /* |
209 | * socket leave on multicast group | 205 | * socket leave on multicast group |
210 | */ | 206 | */ |
@@ -239,7 +235,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
239 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); | 235 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); |
240 | rcu_read_unlock(); | 236 | rcu_read_unlock(); |
241 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); | 237 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); |
242 | call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim); | 238 | kfree_rcu(mc_lst, rcu); |
243 | return 0; | 239 | return 0; |
244 | } | 240 | } |
245 | } | 241 | } |
@@ -307,7 +303,7 @@ void ipv6_sock_mc_close(struct sock *sk) | |||
307 | rcu_read_unlock(); | 303 | rcu_read_unlock(); |
308 | 304 | ||
309 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); | 305 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); |
310 | call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim); | 306 | kfree_rcu(mc_lst, rcu); |
311 | 307 | ||
312 | spin_lock(&ipv6_sk_mc_lock); | 308 | spin_lock(&ipv6_sk_mc_lock); |
313 | } | 309 | } |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 28e74488a329..a5a4c5dd5396 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -45,6 +45,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
45 | int tcphoff, needs_ack; | 45 | int tcphoff, needs_ack; |
46 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | 46 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); |
47 | struct ipv6hdr *ip6h; | 47 | struct ipv6hdr *ip6h; |
48 | #define DEFAULT_TOS_VALUE 0x0U | ||
49 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
48 | struct dst_entry *dst = NULL; | 50 | struct dst_entry *dst = NULL; |
49 | u8 proto; | 51 | u8 proto; |
50 | struct flowi6 fl6; | 52 | struct flowi6 fl6; |
@@ -124,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
124 | skb_put(nskb, sizeof(struct ipv6hdr)); | 126 | skb_put(nskb, sizeof(struct ipv6hdr)); |
125 | skb_reset_network_header(nskb); | 127 | skb_reset_network_header(nskb); |
126 | ip6h = ipv6_hdr(nskb); | 128 | ip6h = ipv6_hdr(nskb); |
127 | ip6h->version = 6; | 129 | *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); |
128 | ip6h->hop_limit = ip6_dst_hoplimit(dst); | 130 | ip6h->hop_limit = ip6_dst_hoplimit(dst); |
129 | ip6h->nexthdr = IPPROTO_TCP; | 131 | ip6h->nexthdr = IPPROTO_TCP; |
130 | ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); | 132 | ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 43b33373adb2..5f35d595e4a5 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -401,11 +401,6 @@ out: | |||
401 | return err; | 401 | return err; |
402 | } | 402 | } |
403 | 403 | ||
404 | static void prl_entry_destroy_rcu(struct rcu_head *head) | ||
405 | { | ||
406 | kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head)); | ||
407 | } | ||
408 | |||
409 | static void prl_list_destroy_rcu(struct rcu_head *head) | 404 | static void prl_list_destroy_rcu(struct rcu_head *head) |
410 | { | 405 | { |
411 | struct ip_tunnel_prl_entry *p, *n; | 406 | struct ip_tunnel_prl_entry *p, *n; |
@@ -433,7 +428,7 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) | |||
433 | p = &x->next) { | 428 | p = &x->next) { |
434 | if (x->addr == a->addr) { | 429 | if (x->addr == a->addr) { |
435 | *p = x->next; | 430 | *p = x->next; |
436 | call_rcu(&x->rcu_head, prl_entry_destroy_rcu); | 431 | kfree_rcu(x, rcu_head); |
437 | t->prl_count--; | 432 | t->prl_count--; |
438 | goto out; | 433 | goto out; |
439 | } | 434 | } |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8e688b3de9ab..49a91c5f5623 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
79 | } | 79 | } |
80 | EXPORT_SYMBOL(xfrm6_prepare_output); | 80 | EXPORT_SYMBOL(xfrm6_prepare_output); |
81 | 81 | ||
82 | static int xfrm6_output_finish(struct sk_buff *skb) | 82 | int xfrm6_output_finish(struct sk_buff *skb) |
83 | { | 83 | { |
84 | #ifdef CONFIG_NETFILTER | 84 | #ifdef CONFIG_NETFILTER |
85 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; | 85 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; |
@@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
97 | if ((x && x->props.mode == XFRM_MODE_TUNNEL) && | 97 | if ((x && x->props.mode == XFRM_MODE_TUNNEL) && |
98 | ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || | 98 | ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || |
99 | dst_allfrag(skb_dst(skb)))) { | 99 | dst_allfrag(skb_dst(skb)))) { |
100 | return ip6_fragment(skb, xfrm6_output_finish); | 100 | return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); |
101 | } | 101 | } |
102 | return xfrm6_output_finish(skb); | 102 | return x->outer_mode->afinfo->output_finish(skb); |
103 | } | 103 | } |
104 | 104 | ||
105 | int xfrm6_output(struct sk_buff *skb) | 105 | int xfrm6_output(struct sk_buff *skb) |
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index afe941e9415c..248f0b2a7ee9 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -178,6 +178,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { | |||
178 | .tmpl_sort = __xfrm6_tmpl_sort, | 178 | .tmpl_sort = __xfrm6_tmpl_sort, |
179 | .state_sort = __xfrm6_state_sort, | 179 | .state_sort = __xfrm6_state_sort, |
180 | .output = xfrm6_output, | 180 | .output = xfrm6_output, |
181 | .output_finish = xfrm6_output_finish, | ||
181 | .extract_input = xfrm6_extract_input, | 182 | .extract_input = xfrm6_extract_input, |
182 | .extract_output = xfrm6_extract_output, | 183 | .extract_output = xfrm6_extract_output, |
183 | .transport_finish = xfrm6_transport_finish, | 184 | .transport_finish = xfrm6_transport_finish, |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 63d852cb4ca2..53defafb9aae 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -136,14 +136,6 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1 | |||
136 | ieee80211_tx_skb(sdata, skb); | 136 | ieee80211_tx_skb(sdata, skb); |
137 | } | 137 | } |
138 | 138 | ||
139 | static void kfree_tid_tx(struct rcu_head *rcu_head) | ||
140 | { | ||
141 | struct tid_ampdu_tx *tid_tx = | ||
142 | container_of(rcu_head, struct tid_ampdu_tx, rcu_head); | ||
143 | |||
144 | kfree(tid_tx); | ||
145 | } | ||
146 | |||
147 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 139 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
148 | enum ieee80211_back_parties initiator, | 140 | enum ieee80211_back_parties initiator, |
149 | bool tx) | 141 | bool tx) |
@@ -163,7 +155,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
163 | /* not even started yet! */ | 155 | /* not even started yet! */ |
164 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); | 156 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); |
165 | spin_unlock_bh(&sta->lock); | 157 | spin_unlock_bh(&sta->lock); |
166 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); | 158 | kfree_rcu(tid_tx, rcu_head); |
167 | return 0; | 159 | return 0; |
168 | } | 160 | } |
169 | 161 | ||
@@ -322,7 +314,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
322 | spin_unlock_bh(&sta->lock); | 314 | spin_unlock_bh(&sta->lock); |
323 | 315 | ||
324 | ieee80211_wake_queue_agg(local, tid); | 316 | ieee80211_wake_queue_agg(local, tid); |
325 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); | 317 | kfree_rcu(tid_tx, rcu_head); |
326 | return; | 318 | return; |
327 | } | 319 | } |
328 | 320 | ||
@@ -701,7 +693,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
701 | 693 | ||
702 | ieee80211_agg_splice_finish(local, tid); | 694 | ieee80211_agg_splice_finish(local, tid); |
703 | 695 | ||
704 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); | 696 | kfree_rcu(tid_tx, rcu_head); |
705 | 697 | ||
706 | unlock_sta: | 698 | unlock_sta: |
707 | spin_unlock_bh(&sta->lock); | 699 | spin_unlock_bh(&sta->lock); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce4596ed1268..bd1224fd216a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) | |||
237 | &local->dynamic_ps_disable_work); | 237 | &local->dynamic_ps_disable_work); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* Don't restart the timer if we're not disassociated */ | ||
241 | if (!ifmgd->associated) | ||
242 | return TX_CONTINUE; | ||
243 | |||
240 | mod_timer(&local->dynamic_ps_timer, jiffies + | 244 | mod_timer(&local->dynamic_ps_timer, jiffies + |
241 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); | 245 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); |
242 | 246 | ||
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index e73c8cae036b..ac3549690b8e 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -65,17 +65,9 @@ static void run_again(struct ieee80211_local *local, | |||
65 | mod_timer(&local->work_timer, timeout); | 65 | mod_timer(&local->work_timer, timeout); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void work_free_rcu(struct rcu_head *head) | ||
69 | { | ||
70 | struct ieee80211_work *wk = | ||
71 | container_of(head, struct ieee80211_work, rcu_head); | ||
72 | |||
73 | kfree(wk); | ||
74 | } | ||
75 | |||
76 | void free_work(struct ieee80211_work *wk) | 68 | void free_work(struct ieee80211_work *wk) |
77 | { | 69 | { |
78 | call_rcu(&wk->rcu_head, work_free_rcu); | 70 | kfree_rcu(wk, rcu_head); |
79 | } | 71 | } |
80 | 72 | ||
81 | static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, | 73 | static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, |
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 2dc6de13ac18..059af3120be7 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c | |||
@@ -572,11 +572,11 @@ static const struct file_operations ip_vs_app_fops = { | |||
572 | .open = ip_vs_app_open, | 572 | .open = ip_vs_app_open, |
573 | .read = seq_read, | 573 | .read = seq_read, |
574 | .llseek = seq_lseek, | 574 | .llseek = seq_lseek, |
575 | .release = seq_release, | 575 | .release = seq_release_net, |
576 | }; | 576 | }; |
577 | #endif | 577 | #endif |
578 | 578 | ||
579 | static int __net_init __ip_vs_app_init(struct net *net) | 579 | int __net_init __ip_vs_app_init(struct net *net) |
580 | { | 580 | { |
581 | struct netns_ipvs *ipvs = net_ipvs(net); | 581 | struct netns_ipvs *ipvs = net_ipvs(net); |
582 | 582 | ||
@@ -585,26 +585,17 @@ static int __net_init __ip_vs_app_init(struct net *net) | |||
585 | return 0; | 585 | return 0; |
586 | } | 586 | } |
587 | 587 | ||
588 | static void __net_exit __ip_vs_app_cleanup(struct net *net) | 588 | void __net_exit __ip_vs_app_cleanup(struct net *net) |
589 | { | 589 | { |
590 | proc_net_remove(net, "ip_vs_app"); | 590 | proc_net_remove(net, "ip_vs_app"); |
591 | } | 591 | } |
592 | 592 | ||
593 | static struct pernet_operations ip_vs_app_ops = { | ||
594 | .init = __ip_vs_app_init, | ||
595 | .exit = __ip_vs_app_cleanup, | ||
596 | }; | ||
597 | |||
598 | int __init ip_vs_app_init(void) | 593 | int __init ip_vs_app_init(void) |
599 | { | 594 | { |
600 | int rv; | 595 | return 0; |
601 | |||
602 | rv = register_pernet_subsys(&ip_vs_app_ops); | ||
603 | return rv; | ||
604 | } | 596 | } |
605 | 597 | ||
606 | 598 | ||
607 | void ip_vs_app_cleanup(void) | 599 | void ip_vs_app_cleanup(void) |
608 | { | 600 | { |
609 | unregister_pernet_subsys(&ip_vs_app_ops); | ||
610 | } | 601 | } |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index c97bd45975be..bf28ac2fc99b 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = { | |||
1046 | .open = ip_vs_conn_open, | 1046 | .open = ip_vs_conn_open, |
1047 | .read = seq_read, | 1047 | .read = seq_read, |
1048 | .llseek = seq_lseek, | 1048 | .llseek = seq_lseek, |
1049 | .release = seq_release, | 1049 | .release = seq_release_net, |
1050 | }; | 1050 | }; |
1051 | 1051 | ||
1052 | static const char *ip_vs_origin_name(unsigned flags) | 1052 | static const char *ip_vs_origin_name(unsigned flags) |
@@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = { | |||
1114 | .open = ip_vs_conn_sync_open, | 1114 | .open = ip_vs_conn_sync_open, |
1115 | .read = seq_read, | 1115 | .read = seq_read, |
1116 | .llseek = seq_lseek, | 1116 | .llseek = seq_lseek, |
1117 | .release = seq_release, | 1117 | .release = seq_release_net, |
1118 | }; | 1118 | }; |
1119 | 1119 | ||
1120 | #endif | 1120 | #endif |
@@ -1258,22 +1258,17 @@ int __net_init __ip_vs_conn_init(struct net *net) | |||
1258 | return 0; | 1258 | return 0; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | static void __net_exit __ip_vs_conn_cleanup(struct net *net) | 1261 | void __net_exit __ip_vs_conn_cleanup(struct net *net) |
1262 | { | 1262 | { |
1263 | /* flush all the connection entries first */ | 1263 | /* flush all the connection entries first */ |
1264 | ip_vs_conn_flush(net); | 1264 | ip_vs_conn_flush(net); |
1265 | proc_net_remove(net, "ip_vs_conn"); | 1265 | proc_net_remove(net, "ip_vs_conn"); |
1266 | proc_net_remove(net, "ip_vs_conn_sync"); | 1266 | proc_net_remove(net, "ip_vs_conn_sync"); |
1267 | } | 1267 | } |
1268 | static struct pernet_operations ipvs_conn_ops = { | ||
1269 | .init = __ip_vs_conn_init, | ||
1270 | .exit = __ip_vs_conn_cleanup, | ||
1271 | }; | ||
1272 | 1268 | ||
1273 | int __init ip_vs_conn_init(void) | 1269 | int __init ip_vs_conn_init(void) |
1274 | { | 1270 | { |
1275 | int idx; | 1271 | int idx; |
1276 | int retc; | ||
1277 | 1272 | ||
1278 | /* Compute size and mask */ | 1273 | /* Compute size and mask */ |
1279 | ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; | 1274 | ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; |
@@ -1309,17 +1304,14 @@ int __init ip_vs_conn_init(void) | |||
1309 | rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); | 1304 | rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); |
1310 | } | 1305 | } |
1311 | 1306 | ||
1312 | retc = register_pernet_subsys(&ipvs_conn_ops); | ||
1313 | |||
1314 | /* calculate the random value for connection hash */ | 1307 | /* calculate the random value for connection hash */ |
1315 | get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); | 1308 | get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); |
1316 | 1309 | ||
1317 | return retc; | 1310 | return 0; |
1318 | } | 1311 | } |
1319 | 1312 | ||
1320 | void ip_vs_conn_cleanup(void) | 1313 | void ip_vs_conn_cleanup(void) |
1321 | { | 1314 | { |
1322 | unregister_pernet_subsys(&ipvs_conn_ops); | ||
1323 | /* Release the empty cache */ | 1315 | /* Release the empty cache */ |
1324 | kmem_cache_destroy(ip_vs_conn_cachep); | 1316 | kmem_cache_destroy(ip_vs_conn_cachep); |
1325 | vfree(ip_vs_conn_tab); | 1317 | vfree(ip_vs_conn_tab); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 07accf6b2401..a74dae6c5dbc 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1113,6 +1113,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1113 | return NF_ACCEPT; | 1113 | return NF_ACCEPT; |
1114 | 1114 | ||
1115 | net = skb_net(skb); | 1115 | net = skb_net(skb); |
1116 | if (!net_ipvs(net)->enable) | ||
1117 | return NF_ACCEPT; | ||
1118 | |||
1116 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1119 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
1117 | #ifdef CONFIG_IP_VS_IPV6 | 1120 | #ifdef CONFIG_IP_VS_IPV6 |
1118 | if (af == AF_INET6) { | 1121 | if (af == AF_INET6) { |
@@ -1343,6 +1346,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
1343 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | 1346 | return NF_ACCEPT; /* The packet looks wrong, ignore */ |
1344 | 1347 | ||
1345 | net = skb_net(skb); | 1348 | net = skb_net(skb); |
1349 | |||
1346 | pd = ip_vs_proto_data_get(net, cih->protocol); | 1350 | pd = ip_vs_proto_data_get(net, cih->protocol); |
1347 | if (!pd) | 1351 | if (!pd) |
1348 | return NF_ACCEPT; | 1352 | return NF_ACCEPT; |
@@ -1529,6 +1533,11 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1529 | IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); | 1533 | IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); |
1530 | return NF_ACCEPT; | 1534 | return NF_ACCEPT; |
1531 | } | 1535 | } |
1536 | /* ipvs enabled in this netns ? */ | ||
1537 | net = skb_net(skb); | ||
1538 | if (!net_ipvs(net)->enable) | ||
1539 | return NF_ACCEPT; | ||
1540 | |||
1532 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1541 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
1533 | 1542 | ||
1534 | /* Bad... Do not break raw sockets */ | 1543 | /* Bad... Do not break raw sockets */ |
@@ -1562,7 +1571,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1562 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1571 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
1563 | } | 1572 | } |
1564 | 1573 | ||
1565 | net = skb_net(skb); | ||
1566 | /* Protocol supported? */ | 1574 | /* Protocol supported? */ |
1567 | pd = ip_vs_proto_data_get(net, iph.protocol); | 1575 | pd = ip_vs_proto_data_get(net, iph.protocol); |
1568 | if (unlikely(!pd)) | 1576 | if (unlikely(!pd)) |
@@ -1588,7 +1596,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1588 | } | 1596 | } |
1589 | 1597 | ||
1590 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); | 1598 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); |
1591 | net = skb_net(skb); | ||
1592 | ipvs = net_ipvs(net); | 1599 | ipvs = net_ipvs(net); |
1593 | /* Check the server status */ | 1600 | /* Check the server status */ |
1594 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 1601 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
@@ -1743,10 +1750,16 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, | |||
1743 | int (*okfn)(struct sk_buff *)) | 1750 | int (*okfn)(struct sk_buff *)) |
1744 | { | 1751 | { |
1745 | int r; | 1752 | int r; |
1753 | struct net *net; | ||
1746 | 1754 | ||
1747 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) | 1755 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) |
1748 | return NF_ACCEPT; | 1756 | return NF_ACCEPT; |
1749 | 1757 | ||
1758 | /* ipvs enabled in this netns ? */ | ||
1759 | net = skb_net(skb); | ||
1760 | if (!net_ipvs(net)->enable) | ||
1761 | return NF_ACCEPT; | ||
1762 | |||
1750 | return ip_vs_in_icmp(skb, &r, hooknum); | 1763 | return ip_vs_in_icmp(skb, &r, hooknum); |
1751 | } | 1764 | } |
1752 | 1765 | ||
@@ -1757,10 +1770,16 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1757 | int (*okfn)(struct sk_buff *)) | 1770 | int (*okfn)(struct sk_buff *)) |
1758 | { | 1771 | { |
1759 | int r; | 1772 | int r; |
1773 | struct net *net; | ||
1760 | 1774 | ||
1761 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) | 1775 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) |
1762 | return NF_ACCEPT; | 1776 | return NF_ACCEPT; |
1763 | 1777 | ||
1778 | /* ipvs enabled in this netns ? */ | ||
1779 | net = skb_net(skb); | ||
1780 | if (!net_ipvs(net)->enable) | ||
1781 | return NF_ACCEPT; | ||
1782 | |||
1764 | return ip_vs_in_icmp_v6(skb, &r, hooknum); | 1783 | return ip_vs_in_icmp_v6(skb, &r, hooknum); |
1765 | } | 1784 | } |
1766 | #endif | 1785 | #endif |
@@ -1884,19 +1903,70 @@ static int __net_init __ip_vs_init(struct net *net) | |||
1884 | pr_err("%s(): no memory.\n", __func__); | 1903 | pr_err("%s(): no memory.\n", __func__); |
1885 | return -ENOMEM; | 1904 | return -ENOMEM; |
1886 | } | 1905 | } |
1906 | /* Hold the beast until a service is registerd */ | ||
1907 | ipvs->enable = 0; | ||
1887 | ipvs->net = net; | 1908 | ipvs->net = net; |
1888 | /* Counters used for creating unique names */ | 1909 | /* Counters used for creating unique names */ |
1889 | ipvs->gen = atomic_read(&ipvs_netns_cnt); | 1910 | ipvs->gen = atomic_read(&ipvs_netns_cnt); |
1890 | atomic_inc(&ipvs_netns_cnt); | 1911 | atomic_inc(&ipvs_netns_cnt); |
1891 | net->ipvs = ipvs; | 1912 | net->ipvs = ipvs; |
1913 | |||
1914 | if (__ip_vs_estimator_init(net) < 0) | ||
1915 | goto estimator_fail; | ||
1916 | |||
1917 | if (__ip_vs_control_init(net) < 0) | ||
1918 | goto control_fail; | ||
1919 | |||
1920 | if (__ip_vs_protocol_init(net) < 0) | ||
1921 | goto protocol_fail; | ||
1922 | |||
1923 | if (__ip_vs_app_init(net) < 0) | ||
1924 | goto app_fail; | ||
1925 | |||
1926 | if (__ip_vs_conn_init(net) < 0) | ||
1927 | goto conn_fail; | ||
1928 | |||
1929 | if (__ip_vs_sync_init(net) < 0) | ||
1930 | goto sync_fail; | ||
1931 | |||
1892 | printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", | 1932 | printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", |
1893 | sizeof(struct netns_ipvs), ipvs->gen); | 1933 | sizeof(struct netns_ipvs), ipvs->gen); |
1894 | return 0; | 1934 | return 0; |
1935 | /* | ||
1936 | * Error handling | ||
1937 | */ | ||
1938 | |||
1939 | sync_fail: | ||
1940 | __ip_vs_conn_cleanup(net); | ||
1941 | conn_fail: | ||
1942 | __ip_vs_app_cleanup(net); | ||
1943 | app_fail: | ||
1944 | __ip_vs_protocol_cleanup(net); | ||
1945 | protocol_fail: | ||
1946 | __ip_vs_control_cleanup(net); | ||
1947 | control_fail: | ||
1948 | __ip_vs_estimator_cleanup(net); | ||
1949 | estimator_fail: | ||
1950 | return -ENOMEM; | ||
1895 | } | 1951 | } |
1896 | 1952 | ||
1897 | static void __net_exit __ip_vs_cleanup(struct net *net) | 1953 | static void __net_exit __ip_vs_cleanup(struct net *net) |
1898 | { | 1954 | { |
1899 | IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen); | 1955 | __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */ |
1956 | __ip_vs_conn_cleanup(net); | ||
1957 | __ip_vs_app_cleanup(net); | ||
1958 | __ip_vs_protocol_cleanup(net); | ||
1959 | __ip_vs_control_cleanup(net); | ||
1960 | __ip_vs_estimator_cleanup(net); | ||
1961 | IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); | ||
1962 | } | ||
1963 | |||
1964 | static void __net_exit __ip_vs_dev_cleanup(struct net *net) | ||
1965 | { | ||
1966 | EnterFunction(2); | ||
1967 | net_ipvs(net)->enable = 0; /* Disable packet reception */ | ||
1968 | __ip_vs_sync_cleanup(net); | ||
1969 | LeaveFunction(2); | ||
1900 | } | 1970 | } |
1901 | 1971 | ||
1902 | static struct pernet_operations ipvs_core_ops = { | 1972 | static struct pernet_operations ipvs_core_ops = { |
@@ -1906,6 +1976,10 @@ static struct pernet_operations ipvs_core_ops = { | |||
1906 | .size = sizeof(struct netns_ipvs), | 1976 | .size = sizeof(struct netns_ipvs), |
1907 | }; | 1977 | }; |
1908 | 1978 | ||
1979 | static struct pernet_operations ipvs_core_dev_ops = { | ||
1980 | .exit = __ip_vs_dev_cleanup, | ||
1981 | }; | ||
1982 | |||
1909 | /* | 1983 | /* |
1910 | * Initialize IP Virtual Server | 1984 | * Initialize IP Virtual Server |
1911 | */ | 1985 | */ |
@@ -1913,10 +1987,6 @@ static int __init ip_vs_init(void) | |||
1913 | { | 1987 | { |
1914 | int ret; | 1988 | int ret; |
1915 | 1989 | ||
1916 | ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ | ||
1917 | if (ret < 0) | ||
1918 | return ret; | ||
1919 | |||
1920 | ip_vs_estimator_init(); | 1990 | ip_vs_estimator_init(); |
1921 | ret = ip_vs_control_init(); | 1991 | ret = ip_vs_control_init(); |
1922 | if (ret < 0) { | 1992 | if (ret < 0) { |
@@ -1944,15 +2014,28 @@ static int __init ip_vs_init(void) | |||
1944 | goto cleanup_conn; | 2014 | goto cleanup_conn; |
1945 | } | 2015 | } |
1946 | 2016 | ||
2017 | ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ | ||
2018 | if (ret < 0) | ||
2019 | goto cleanup_sync; | ||
2020 | |||
2021 | ret = register_pernet_device(&ipvs_core_dev_ops); | ||
2022 | if (ret < 0) | ||
2023 | goto cleanup_sub; | ||
2024 | |||
1947 | ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); | 2025 | ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); |
1948 | if (ret < 0) { | 2026 | if (ret < 0) { |
1949 | pr_err("can't register hooks.\n"); | 2027 | pr_err("can't register hooks.\n"); |
1950 | goto cleanup_sync; | 2028 | goto cleanup_dev; |
1951 | } | 2029 | } |
1952 | 2030 | ||
1953 | pr_info("ipvs loaded.\n"); | 2031 | pr_info("ipvs loaded.\n"); |
2032 | |||
1954 | return ret; | 2033 | return ret; |
1955 | 2034 | ||
2035 | cleanup_dev: | ||
2036 | unregister_pernet_device(&ipvs_core_dev_ops); | ||
2037 | cleanup_sub: | ||
2038 | unregister_pernet_subsys(&ipvs_core_ops); | ||
1956 | cleanup_sync: | 2039 | cleanup_sync: |
1957 | ip_vs_sync_cleanup(); | 2040 | ip_vs_sync_cleanup(); |
1958 | cleanup_conn: | 2041 | cleanup_conn: |
@@ -1964,20 +2047,20 @@ cleanup_sync: | |||
1964 | ip_vs_control_cleanup(); | 2047 | ip_vs_control_cleanup(); |
1965 | cleanup_estimator: | 2048 | cleanup_estimator: |
1966 | ip_vs_estimator_cleanup(); | 2049 | ip_vs_estimator_cleanup(); |
1967 | unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ | ||
1968 | return ret; | 2050 | return ret; |
1969 | } | 2051 | } |
1970 | 2052 | ||
1971 | static void __exit ip_vs_cleanup(void) | 2053 | static void __exit ip_vs_cleanup(void) |
1972 | { | 2054 | { |
1973 | nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); | 2055 | nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); |
2056 | unregister_pernet_device(&ipvs_core_dev_ops); | ||
2057 | unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ | ||
1974 | ip_vs_sync_cleanup(); | 2058 | ip_vs_sync_cleanup(); |
1975 | ip_vs_conn_cleanup(); | 2059 | ip_vs_conn_cleanup(); |
1976 | ip_vs_app_cleanup(); | 2060 | ip_vs_app_cleanup(); |
1977 | ip_vs_protocol_cleanup(); | 2061 | ip_vs_protocol_cleanup(); |
1978 | ip_vs_control_cleanup(); | 2062 | ip_vs_control_cleanup(); |
1979 | ip_vs_estimator_cleanup(); | 2063 | ip_vs_estimator_cleanup(); |
1980 | unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ | ||
1981 | pr_info("ipvs unloaded.\n"); | 2064 | pr_info("ipvs unloaded.\n"); |
1982 | } | 2065 | } |
1983 | 2066 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index ae47090bf45f..37890f228b19 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -69,6 +69,11 @@ int ip_vs_get_debug_level(void) | |||
69 | } | 69 | } |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | |||
73 | /* Protos */ | ||
74 | static void __ip_vs_del_service(struct ip_vs_service *svc); | ||
75 | |||
76 | |||
72 | #ifdef CONFIG_IP_VS_IPV6 | 77 | #ifdef CONFIG_IP_VS_IPV6 |
73 | /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ | 78 | /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ |
74 | static int __ip_vs_addr_is_local_v6(struct net *net, | 79 | static int __ip_vs_addr_is_local_v6(struct net *net, |
@@ -1214,6 +1219,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, | |||
1214 | write_unlock_bh(&__ip_vs_svc_lock); | 1219 | write_unlock_bh(&__ip_vs_svc_lock); |
1215 | 1220 | ||
1216 | *svc_p = svc; | 1221 | *svc_p = svc; |
1222 | /* Now there is a service - full throttle */ | ||
1223 | ipvs->enable = 1; | ||
1217 | return 0; | 1224 | return 0; |
1218 | 1225 | ||
1219 | 1226 | ||
@@ -1472,6 +1479,84 @@ static int ip_vs_flush(struct net *net) | |||
1472 | return 0; | 1479 | return 0; |
1473 | } | 1480 | } |
1474 | 1481 | ||
1482 | /* | ||
1483 | * Delete service by {netns} in the service table. | ||
1484 | * Called by __ip_vs_cleanup() | ||
1485 | */ | ||
1486 | void __ip_vs_service_cleanup(struct net *net) | ||
1487 | { | ||
1488 | EnterFunction(2); | ||
1489 | /* Check for "full" addressed entries */ | ||
1490 | mutex_lock(&__ip_vs_mutex); | ||
1491 | ip_vs_flush(net); | ||
1492 | mutex_unlock(&__ip_vs_mutex); | ||
1493 | LeaveFunction(2); | ||
1494 | } | ||
1495 | /* | ||
1496 | * Release dst hold by dst_cache | ||
1497 | */ | ||
1498 | static inline void | ||
1499 | __ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev) | ||
1500 | { | ||
1501 | spin_lock_bh(&dest->dst_lock); | ||
1502 | if (dest->dst_cache && dest->dst_cache->dev == dev) { | ||
1503 | IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n", | ||
1504 | dev->name, | ||
1505 | IP_VS_DBG_ADDR(dest->af, &dest->addr), | ||
1506 | ntohs(dest->port), | ||
1507 | atomic_read(&dest->refcnt)); | ||
1508 | ip_vs_dst_reset(dest); | ||
1509 | } | ||
1510 | spin_unlock_bh(&dest->dst_lock); | ||
1511 | |||
1512 | } | ||
1513 | /* | ||
1514 | * Netdev event receiver | ||
1515 | * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to | ||
1516 | * a device that is "unregister" it must be released. | ||
1517 | */ | ||
1518 | static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, | ||
1519 | void *ptr) | ||
1520 | { | ||
1521 | struct net_device *dev = ptr; | ||
1522 | struct net *net = dev_net(dev); | ||
1523 | struct ip_vs_service *svc; | ||
1524 | struct ip_vs_dest *dest; | ||
1525 | unsigned int idx; | ||
1526 | |||
1527 | if (event != NETDEV_UNREGISTER) | ||
1528 | return NOTIFY_DONE; | ||
1529 | IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); | ||
1530 | EnterFunction(2); | ||
1531 | mutex_lock(&__ip_vs_mutex); | ||
1532 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { | ||
1533 | list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { | ||
1534 | if (net_eq(svc->net, net)) { | ||
1535 | list_for_each_entry(dest, &svc->destinations, | ||
1536 | n_list) { | ||
1537 | __ip_vs_dev_reset(dest, dev); | ||
1538 | } | ||
1539 | } | ||
1540 | } | ||
1541 | |||
1542 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { | ||
1543 | if (net_eq(svc->net, net)) { | ||
1544 | list_for_each_entry(dest, &svc->destinations, | ||
1545 | n_list) { | ||
1546 | __ip_vs_dev_reset(dest, dev); | ||
1547 | } | ||
1548 | } | ||
1549 | |||
1550 | } | ||
1551 | } | ||
1552 | |||
1553 | list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { | ||
1554 | __ip_vs_dev_reset(dest, dev); | ||
1555 | } | ||
1556 | mutex_unlock(&__ip_vs_mutex); | ||
1557 | LeaveFunction(2); | ||
1558 | return NOTIFY_DONE; | ||
1559 | } | ||
1475 | 1560 | ||
1476 | /* | 1561 | /* |
1477 | * Zero counters in a service or all services | 1562 | * Zero counters in a service or all services |
@@ -1981,7 +2066,7 @@ static const struct file_operations ip_vs_info_fops = { | |||
1981 | .open = ip_vs_info_open, | 2066 | .open = ip_vs_info_open, |
1982 | .read = seq_read, | 2067 | .read = seq_read, |
1983 | .llseek = seq_lseek, | 2068 | .llseek = seq_lseek, |
1984 | .release = seq_release_private, | 2069 | .release = seq_release_net, |
1985 | }; | 2070 | }; |
1986 | 2071 | ||
1987 | #endif | 2072 | #endif |
@@ -2024,7 +2109,7 @@ static const struct file_operations ip_vs_stats_fops = { | |||
2024 | .open = ip_vs_stats_seq_open, | 2109 | .open = ip_vs_stats_seq_open, |
2025 | .read = seq_read, | 2110 | .read = seq_read, |
2026 | .llseek = seq_lseek, | 2111 | .llseek = seq_lseek, |
2027 | .release = single_release, | 2112 | .release = single_release_net, |
2028 | }; | 2113 | }; |
2029 | 2114 | ||
2030 | static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) | 2115 | static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) |
@@ -2093,7 +2178,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = { | |||
2093 | .open = ip_vs_stats_percpu_seq_open, | 2178 | .open = ip_vs_stats_percpu_seq_open, |
2094 | .read = seq_read, | 2179 | .read = seq_read, |
2095 | .llseek = seq_lseek, | 2180 | .llseek = seq_lseek, |
2096 | .release = single_release, | 2181 | .release = single_release_net, |
2097 | }; | 2182 | }; |
2098 | #endif | 2183 | #endif |
2099 | 2184 | ||
@@ -3588,6 +3673,10 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { } | |||
3588 | 3673 | ||
3589 | #endif | 3674 | #endif |
3590 | 3675 | ||
3676 | static struct notifier_block ip_vs_dst_notifier = { | ||
3677 | .notifier_call = ip_vs_dst_event, | ||
3678 | }; | ||
3679 | |||
3591 | int __net_init __ip_vs_control_init(struct net *net) | 3680 | int __net_init __ip_vs_control_init(struct net *net) |
3592 | { | 3681 | { |
3593 | int idx; | 3682 | int idx; |
@@ -3626,7 +3715,7 @@ err: | |||
3626 | return -ENOMEM; | 3715 | return -ENOMEM; |
3627 | } | 3716 | } |
3628 | 3717 | ||
3629 | static void __net_exit __ip_vs_control_cleanup(struct net *net) | 3718 | void __net_exit __ip_vs_control_cleanup(struct net *net) |
3630 | { | 3719 | { |
3631 | struct netns_ipvs *ipvs = net_ipvs(net); | 3720 | struct netns_ipvs *ipvs = net_ipvs(net); |
3632 | 3721 | ||
@@ -3639,11 +3728,6 @@ static void __net_exit __ip_vs_control_cleanup(struct net *net) | |||
3639 | free_percpu(ipvs->tot_stats.cpustats); | 3728 | free_percpu(ipvs->tot_stats.cpustats); |
3640 | } | 3729 | } |
3641 | 3730 | ||
3642 | static struct pernet_operations ipvs_control_ops = { | ||
3643 | .init = __ip_vs_control_init, | ||
3644 | .exit = __ip_vs_control_cleanup, | ||
3645 | }; | ||
3646 | |||
3647 | int __init ip_vs_control_init(void) | 3731 | int __init ip_vs_control_init(void) |
3648 | { | 3732 | { |
3649 | int idx; | 3733 | int idx; |
@@ -3657,33 +3741,32 @@ int __init ip_vs_control_init(void) | |||
3657 | INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); | 3741 | INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); |
3658 | } | 3742 | } |
3659 | 3743 | ||
3660 | ret = register_pernet_subsys(&ipvs_control_ops); | ||
3661 | if (ret) { | ||
3662 | pr_err("cannot register namespace.\n"); | ||
3663 | goto err; | ||
3664 | } | ||
3665 | |||
3666 | smp_wmb(); /* Do we really need it now ? */ | 3744 | smp_wmb(); /* Do we really need it now ? */ |
3667 | 3745 | ||
3668 | ret = nf_register_sockopt(&ip_vs_sockopts); | 3746 | ret = nf_register_sockopt(&ip_vs_sockopts); |
3669 | if (ret) { | 3747 | if (ret) { |
3670 | pr_err("cannot register sockopt.\n"); | 3748 | pr_err("cannot register sockopt.\n"); |
3671 | goto err_net; | 3749 | goto err_sock; |
3672 | } | 3750 | } |
3673 | 3751 | ||
3674 | ret = ip_vs_genl_register(); | 3752 | ret = ip_vs_genl_register(); |
3675 | if (ret) { | 3753 | if (ret) { |
3676 | pr_err("cannot register Generic Netlink interface.\n"); | 3754 | pr_err("cannot register Generic Netlink interface.\n"); |
3677 | nf_unregister_sockopt(&ip_vs_sockopts); | 3755 | goto err_genl; |
3678 | goto err_net; | ||
3679 | } | 3756 | } |
3680 | 3757 | ||
3758 | ret = register_netdevice_notifier(&ip_vs_dst_notifier); | ||
3759 | if (ret < 0) | ||
3760 | goto err_notf; | ||
3761 | |||
3681 | LeaveFunction(2); | 3762 | LeaveFunction(2); |
3682 | return 0; | 3763 | return 0; |
3683 | 3764 | ||
3684 | err_net: | 3765 | err_notf: |
3685 | unregister_pernet_subsys(&ipvs_control_ops); | 3766 | ip_vs_genl_unregister(); |
3686 | err: | 3767 | err_genl: |
3768 | nf_unregister_sockopt(&ip_vs_sockopts); | ||
3769 | err_sock: | ||
3687 | return ret; | 3770 | return ret; |
3688 | } | 3771 | } |
3689 | 3772 | ||
@@ -3691,7 +3774,6 @@ err: | |||
3691 | void ip_vs_control_cleanup(void) | 3774 | void ip_vs_control_cleanup(void) |
3692 | { | 3775 | { |
3693 | EnterFunction(2); | 3776 | EnterFunction(2); |
3694 | unregister_pernet_subsys(&ipvs_control_ops); | ||
3695 | ip_vs_genl_unregister(); | 3777 | ip_vs_genl_unregister(); |
3696 | nf_unregister_sockopt(&ip_vs_sockopts); | 3778 | nf_unregister_sockopt(&ip_vs_sockopts); |
3697 | LeaveFunction(2); | 3779 | LeaveFunction(2); |
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 8c8766ca56ad..508cce98777c 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c | |||
@@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst, | |||
192 | dst->outbps = (e->outbps + 0xF) >> 5; | 192 | dst->outbps = (e->outbps + 0xF) >> 5; |
193 | } | 193 | } |
194 | 194 | ||
195 | static int __net_init __ip_vs_estimator_init(struct net *net) | 195 | int __net_init __ip_vs_estimator_init(struct net *net) |
196 | { | 196 | { |
197 | struct netns_ipvs *ipvs = net_ipvs(net); | 197 | struct netns_ipvs *ipvs = net_ipvs(net); |
198 | 198 | ||
@@ -203,24 +203,16 @@ static int __net_init __ip_vs_estimator_init(struct net *net) | |||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
206 | static void __net_exit __ip_vs_estimator_exit(struct net *net) | 206 | void __net_exit __ip_vs_estimator_cleanup(struct net *net) |
207 | { | 207 | { |
208 | del_timer_sync(&net_ipvs(net)->est_timer); | 208 | del_timer_sync(&net_ipvs(net)->est_timer); |
209 | } | 209 | } |
210 | static struct pernet_operations ip_vs_app_ops = { | ||
211 | .init = __ip_vs_estimator_init, | ||
212 | .exit = __ip_vs_estimator_exit, | ||
213 | }; | ||
214 | 210 | ||
215 | int __init ip_vs_estimator_init(void) | 211 | int __init ip_vs_estimator_init(void) |
216 | { | 212 | { |
217 | int rv; | 213 | return 0; |
218 | |||
219 | rv = register_pernet_subsys(&ip_vs_app_ops); | ||
220 | return rv; | ||
221 | } | 214 | } |
222 | 215 | ||
223 | void ip_vs_estimator_cleanup(void) | 216 | void ip_vs_estimator_cleanup(void) |
224 | { | 217 | { |
225 | unregister_pernet_subsys(&ip_vs_app_ops); | ||
226 | } | 218 | } |
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 17484a4416ef..eb86028536fc 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c | |||
@@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, | |||
316 | /* | 316 | /* |
317 | * per network name-space init | 317 | * per network name-space init |
318 | */ | 318 | */ |
319 | static int __net_init __ip_vs_protocol_init(struct net *net) | 319 | int __net_init __ip_vs_protocol_init(struct net *net) |
320 | { | 320 | { |
321 | #ifdef CONFIG_IP_VS_PROTO_TCP | 321 | #ifdef CONFIG_IP_VS_PROTO_TCP |
322 | register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); | 322 | register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); |
@@ -336,7 +336,7 @@ static int __net_init __ip_vs_protocol_init(struct net *net) | |||
336 | return 0; | 336 | return 0; |
337 | } | 337 | } |
338 | 338 | ||
339 | static void __net_exit __ip_vs_protocol_cleanup(struct net *net) | 339 | void __net_exit __ip_vs_protocol_cleanup(struct net *net) |
340 | { | 340 | { |
341 | struct netns_ipvs *ipvs = net_ipvs(net); | 341 | struct netns_ipvs *ipvs = net_ipvs(net); |
342 | struct ip_vs_proto_data *pd; | 342 | struct ip_vs_proto_data *pd; |
@@ -349,11 +349,6 @@ static void __net_exit __ip_vs_protocol_cleanup(struct net *net) | |||
349 | } | 349 | } |
350 | } | 350 | } |
351 | 351 | ||
352 | static struct pernet_operations ipvs_proto_ops = { | ||
353 | .init = __ip_vs_protocol_init, | ||
354 | .exit = __ip_vs_protocol_cleanup, | ||
355 | }; | ||
356 | |||
357 | int __init ip_vs_protocol_init(void) | 352 | int __init ip_vs_protocol_init(void) |
358 | { | 353 | { |
359 | char protocols[64]; | 354 | char protocols[64]; |
@@ -382,7 +377,6 @@ int __init ip_vs_protocol_init(void) | |||
382 | REGISTER_PROTOCOL(&ip_vs_protocol_esp); | 377 | REGISTER_PROTOCOL(&ip_vs_protocol_esp); |
383 | #endif | 378 | #endif |
384 | pr_info("Registered protocols (%s)\n", &protocols[2]); | 379 | pr_info("Registered protocols (%s)\n", &protocols[2]); |
385 | return register_pernet_subsys(&ipvs_proto_ops); | ||
386 | 380 | ||
387 | return 0; | 381 | return 0; |
388 | } | 382 | } |
@@ -393,7 +387,6 @@ void ip_vs_protocol_cleanup(void) | |||
393 | struct ip_vs_protocol *pp; | 387 | struct ip_vs_protocol *pp; |
394 | int i; | 388 | int i; |
395 | 389 | ||
396 | unregister_pernet_subsys(&ipvs_proto_ops); | ||
397 | /* unregister all the ipvs protocols */ | 390 | /* unregister all the ipvs protocols */ |
398 | for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { | 391 | for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { |
399 | while ((pp = ip_vs_proto_table[i]) != NULL) | 392 | while ((pp = ip_vs_proto_table[i]) != NULL) |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 3e7961e85e9c..e292e5bddc70 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -1303,13 +1303,18 @@ static struct socket *make_send_sock(struct net *net) | |||
1303 | struct socket *sock; | 1303 | struct socket *sock; |
1304 | int result; | 1304 | int result; |
1305 | 1305 | ||
1306 | /* First create a socket */ | 1306 | /* First create a socket move it to right name space later */ |
1307 | result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); | 1307 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
1308 | if (result < 0) { | 1308 | if (result < 0) { |
1309 | pr_err("Error during creation of socket; terminating\n"); | 1309 | pr_err("Error during creation of socket; terminating\n"); |
1310 | return ERR_PTR(result); | 1310 | return ERR_PTR(result); |
1311 | } | 1311 | } |
1312 | 1312 | /* | |
1313 | * Kernel sockets that are a part of a namespace, should not | ||
1314 | * hold a reference to a namespace in order to allow to stop it. | ||
1315 | * After sk_change_net should be released using sk_release_kernel. | ||
1316 | */ | ||
1317 | sk_change_net(sock->sk, net); | ||
1313 | result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); | 1318 | result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); |
1314 | if (result < 0) { | 1319 | if (result < 0) { |
1315 | pr_err("Error setting outbound mcast interface\n"); | 1320 | pr_err("Error setting outbound mcast interface\n"); |
@@ -1334,8 +1339,8 @@ static struct socket *make_send_sock(struct net *net) | |||
1334 | 1339 | ||
1335 | return sock; | 1340 | return sock; |
1336 | 1341 | ||
1337 | error: | 1342 | error: |
1338 | sock_release(sock); | 1343 | sk_release_kernel(sock->sk); |
1339 | return ERR_PTR(result); | 1344 | return ERR_PTR(result); |
1340 | } | 1345 | } |
1341 | 1346 | ||
@@ -1350,12 +1355,17 @@ static struct socket *make_receive_sock(struct net *net) | |||
1350 | int result; | 1355 | int result; |
1351 | 1356 | ||
1352 | /* First create a socket */ | 1357 | /* First create a socket */ |
1353 | result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); | 1358 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
1354 | if (result < 0) { | 1359 | if (result < 0) { |
1355 | pr_err("Error during creation of socket; terminating\n"); | 1360 | pr_err("Error during creation of socket; terminating\n"); |
1356 | return ERR_PTR(result); | 1361 | return ERR_PTR(result); |
1357 | } | 1362 | } |
1358 | 1363 | /* | |
1364 | * Kernel sockets that are a part of a namespace, should not | ||
1365 | * hold a reference to a namespace in order to allow to stop it. | ||
1366 | * After sk_change_net should be released using sk_release_kernel. | ||
1367 | */ | ||
1368 | sk_change_net(sock->sk, net); | ||
1359 | /* it is equivalent to the REUSEADDR option in user-space */ | 1369 | /* it is equivalent to the REUSEADDR option in user-space */ |
1360 | sock->sk->sk_reuse = 1; | 1370 | sock->sk->sk_reuse = 1; |
1361 | 1371 | ||
@@ -1377,8 +1387,8 @@ static struct socket *make_receive_sock(struct net *net) | |||
1377 | 1387 | ||
1378 | return sock; | 1388 | return sock; |
1379 | 1389 | ||
1380 | error: | 1390 | error: |
1381 | sock_release(sock); | 1391 | sk_release_kernel(sock->sk); |
1382 | return ERR_PTR(result); | 1392 | return ERR_PTR(result); |
1383 | } | 1393 | } |
1384 | 1394 | ||
@@ -1473,7 +1483,7 @@ static int sync_thread_master(void *data) | |||
1473 | ip_vs_sync_buff_release(sb); | 1483 | ip_vs_sync_buff_release(sb); |
1474 | 1484 | ||
1475 | /* release the sending multicast socket */ | 1485 | /* release the sending multicast socket */ |
1476 | sock_release(tinfo->sock); | 1486 | sk_release_kernel(tinfo->sock->sk); |
1477 | kfree(tinfo); | 1487 | kfree(tinfo); |
1478 | 1488 | ||
1479 | return 0; | 1489 | return 0; |
@@ -1513,7 +1523,7 @@ static int sync_thread_backup(void *data) | |||
1513 | } | 1523 | } |
1514 | 1524 | ||
1515 | /* release the sending multicast socket */ | 1525 | /* release the sending multicast socket */ |
1516 | sock_release(tinfo->sock); | 1526 | sk_release_kernel(tinfo->sock->sk); |
1517 | kfree(tinfo->buf); | 1527 | kfree(tinfo->buf); |
1518 | kfree(tinfo); | 1528 | kfree(tinfo); |
1519 | 1529 | ||
@@ -1601,7 +1611,7 @@ outtinfo: | |||
1601 | outbuf: | 1611 | outbuf: |
1602 | kfree(buf); | 1612 | kfree(buf); |
1603 | outsocket: | 1613 | outsocket: |
1604 | sock_release(sock); | 1614 | sk_release_kernel(sock->sk); |
1605 | out: | 1615 | out: |
1606 | return result; | 1616 | return result; |
1607 | } | 1617 | } |
@@ -1610,6 +1620,7 @@ out: | |||
1610 | int stop_sync_thread(struct net *net, int state) | 1620 | int stop_sync_thread(struct net *net, int state) |
1611 | { | 1621 | { |
1612 | struct netns_ipvs *ipvs = net_ipvs(net); | 1622 | struct netns_ipvs *ipvs = net_ipvs(net); |
1623 | int retc = -EINVAL; | ||
1613 | 1624 | ||
1614 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); | 1625 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); |
1615 | 1626 | ||
@@ -1629,7 +1640,7 @@ int stop_sync_thread(struct net *net, int state) | |||
1629 | spin_lock_bh(&ipvs->sync_lock); | 1640 | spin_lock_bh(&ipvs->sync_lock); |
1630 | ipvs->sync_state &= ~IP_VS_STATE_MASTER; | 1641 | ipvs->sync_state &= ~IP_VS_STATE_MASTER; |
1631 | spin_unlock_bh(&ipvs->sync_lock); | 1642 | spin_unlock_bh(&ipvs->sync_lock); |
1632 | kthread_stop(ipvs->master_thread); | 1643 | retc = kthread_stop(ipvs->master_thread); |
1633 | ipvs->master_thread = NULL; | 1644 | ipvs->master_thread = NULL; |
1634 | } else if (state == IP_VS_STATE_BACKUP) { | 1645 | } else if (state == IP_VS_STATE_BACKUP) { |
1635 | if (!ipvs->backup_thread) | 1646 | if (!ipvs->backup_thread) |
@@ -1639,22 +1650,20 @@ int stop_sync_thread(struct net *net, int state) | |||
1639 | task_pid_nr(ipvs->backup_thread)); | 1650 | task_pid_nr(ipvs->backup_thread)); |
1640 | 1651 | ||
1641 | ipvs->sync_state &= ~IP_VS_STATE_BACKUP; | 1652 | ipvs->sync_state &= ~IP_VS_STATE_BACKUP; |
1642 | kthread_stop(ipvs->backup_thread); | 1653 | retc = kthread_stop(ipvs->backup_thread); |
1643 | ipvs->backup_thread = NULL; | 1654 | ipvs->backup_thread = NULL; |
1644 | } else { | ||
1645 | return -EINVAL; | ||
1646 | } | 1655 | } |
1647 | 1656 | ||
1648 | /* decrease the module use count */ | 1657 | /* decrease the module use count */ |
1649 | ip_vs_use_count_dec(); | 1658 | ip_vs_use_count_dec(); |
1650 | 1659 | ||
1651 | return 0; | 1660 | return retc; |
1652 | } | 1661 | } |
1653 | 1662 | ||
1654 | /* | 1663 | /* |
1655 | * Initialize data struct for each netns | 1664 | * Initialize data struct for each netns |
1656 | */ | 1665 | */ |
1657 | static int __net_init __ip_vs_sync_init(struct net *net) | 1666 | int __net_init __ip_vs_sync_init(struct net *net) |
1658 | { | 1667 | { |
1659 | struct netns_ipvs *ipvs = net_ipvs(net); | 1668 | struct netns_ipvs *ipvs = net_ipvs(net); |
1660 | 1669 | ||
@@ -1668,24 +1677,24 @@ static int __net_init __ip_vs_sync_init(struct net *net) | |||
1668 | return 0; | 1677 | return 0; |
1669 | } | 1678 | } |
1670 | 1679 | ||
1671 | static void __ip_vs_sync_cleanup(struct net *net) | 1680 | void __ip_vs_sync_cleanup(struct net *net) |
1672 | { | 1681 | { |
1673 | stop_sync_thread(net, IP_VS_STATE_MASTER); | 1682 | int retc; |
1674 | stop_sync_thread(net, IP_VS_STATE_BACKUP); | ||
1675 | } | ||
1676 | 1683 | ||
1677 | static struct pernet_operations ipvs_sync_ops = { | 1684 | retc = stop_sync_thread(net, IP_VS_STATE_MASTER); |
1678 | .init = __ip_vs_sync_init, | 1685 | if (retc && retc != -ESRCH) |
1679 | .exit = __ip_vs_sync_cleanup, | 1686 | pr_err("Failed to stop Master Daemon\n"); |
1680 | }; | ||
1681 | 1687 | ||
1688 | retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); | ||
1689 | if (retc && retc != -ESRCH) | ||
1690 | pr_err("Failed to stop Backup Daemon\n"); | ||
1691 | } | ||
1682 | 1692 | ||
1683 | int __init ip_vs_sync_init(void) | 1693 | int __init ip_vs_sync_init(void) |
1684 | { | 1694 | { |
1685 | return register_pernet_subsys(&ipvs_sync_ops); | 1695 | return 0; |
1686 | } | 1696 | } |
1687 | 1697 | ||
1688 | void ip_vs_sync_cleanup(void) | 1698 | void ip_vs_sync_cleanup(void) |
1689 | { | 1699 | { |
1690 | unregister_pernet_subsys(&ipvs_sync_ops); | ||
1691 | } | 1700 | } |
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 80a23ed62bb0..05ecdc281a53 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c | |||
@@ -68,12 +68,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) | |||
68 | return (void *)(*ext) + off; | 68 | return (void *)(*ext) + off; |
69 | } | 69 | } |
70 | 70 | ||
71 | static void __nf_ct_ext_free_rcu(struct rcu_head *head) | ||
72 | { | ||
73 | struct nf_ct_ext *ext = container_of(head, struct nf_ct_ext, rcu); | ||
74 | kfree(ext); | ||
75 | } | ||
76 | |||
77 | void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) | 71 | void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) |
78 | { | 72 | { |
79 | struct nf_ct_ext *old, *new; | 73 | struct nf_ct_ext *old, *new; |
@@ -114,7 +108,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) | |||
114 | (void *)old + old->offset[i]); | 108 | (void *)old + old->offset[i]); |
115 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
116 | } | 110 | } |
117 | call_rcu(&old->rcu, __nf_ct_ext_free_rcu); | 111 | kfree_rcu(old, rcu); |
118 | ct->ext = new; | 112 | ct->ext = new; |
119 | } | 113 | } |
120 | 114 | ||
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 30bf8a167fc8..482e90c61850 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1334,6 +1334,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, | |||
1334 | struct nf_conn *ct; | 1334 | struct nf_conn *ct; |
1335 | int err = -EINVAL; | 1335 | int err = -EINVAL; |
1336 | struct nf_conntrack_helper *helper; | 1336 | struct nf_conntrack_helper *helper; |
1337 | struct nf_conn_tstamp *tstamp; | ||
1337 | 1338 | ||
1338 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); | 1339 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); |
1339 | if (IS_ERR(ct)) | 1340 | if (IS_ERR(ct)) |
@@ -1451,6 +1452,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, | |||
1451 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | 1452 | __set_bit(IPS_EXPECTED_BIT, &ct->status); |
1452 | ct->master = master_ct; | 1453 | ct->master = master_ct; |
1453 | } | 1454 | } |
1455 | tstamp = nf_conn_tstamp_find(ct); | ||
1456 | if (tstamp) | ||
1457 | tstamp->start = ktime_to_ns(ktime_get_real()); | ||
1454 | 1458 | ||
1455 | add_timer(&ct->timeout); | 1459 | add_timer(&ct->timeout); |
1456 | nf_conntrack_hash_insert(ct); | 1460 | nf_conntrack_hash_insert(ct); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index a9adf4c6b299..8a025a585d2f 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -455,6 +455,7 @@ void xt_compat_flush_offsets(u_int8_t af) | |||
455 | vfree(xt[af].compat_tab); | 455 | vfree(xt[af].compat_tab); |
456 | xt[af].compat_tab = NULL; | 456 | xt[af].compat_tab = NULL; |
457 | xt[af].number = 0; | 457 | xt[af].number = 0; |
458 | xt[af].cur = 0; | ||
458 | } | 459 | } |
459 | } | 460 | } |
460 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | 461 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); |
@@ -473,8 +474,7 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset) | |||
473 | else | 474 | else |
474 | return mid ? tmp[mid - 1].delta : 0; | 475 | return mid ? tmp[mid - 1].delta : 0; |
475 | } | 476 | } |
476 | WARN_ON_ONCE(1); | 477 | return left ? tmp[left - 1].delta : 0; |
477 | return 0; | ||
478 | } | 478 | } |
479 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | 479 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); |
480 | 480 | ||
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index 0a229191e55b..ae8271652efa 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c | |||
@@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
99 | u_int8_t orig, nv; | 99 | u_int8_t orig, nv; |
100 | 100 | ||
101 | orig = ipv6_get_dsfield(iph); | 101 | orig = ipv6_get_dsfield(iph); |
102 | nv = (orig & info->tos_mask) ^ info->tos_value; | 102 | nv = (orig & ~info->tos_mask) ^ info->tos_value; |
103 | 103 | ||
104 | if (orig != nv) { | 104 | if (orig != nv) { |
105 | if (!skb_make_writable(skb, sizeof(struct iphdr))) | 105 | if (!skb_make_writable(skb, sizeof(struct iphdr))) |
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 481a86fdc409..61805d7b38aa 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -272,11 +272,6 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par) | |||
272 | { | 272 | { |
273 | int ret; | 273 | int ret; |
274 | 274 | ||
275 | if (strcmp(par->table, "raw") == 0) { | ||
276 | pr_info("state is undetermined at the time of raw table\n"); | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | |||
280 | ret = nf_ct_l3proto_try_module_get(par->family); | 275 | ret = nf_ct_l3proto_try_module_get(par->family); |
281 | if (ret < 0) | 276 | if (ret < 0) |
282 | pr_info("cannot load conntrack support for proto=%u\n", | 277 | pr_info("cannot load conntrack support for proto=%u\n", |
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 4327e101c047..846f895cb656 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c | |||
@@ -62,13 +62,6 @@ static const struct nla_policy xt_osf_policy[OSF_ATTR_MAX + 1] = { | |||
62 | [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) }, | 62 | [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) }, |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static void xt_osf_finger_free_rcu(struct rcu_head *rcu_head) | ||
66 | { | ||
67 | struct xt_osf_finger *f = container_of(rcu_head, struct xt_osf_finger, rcu_head); | ||
68 | |||
69 | kfree(f); | ||
70 | } | ||
71 | |||
72 | static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, | 65 | static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, |
73 | const struct nlmsghdr *nlh, | 66 | const struct nlmsghdr *nlh, |
74 | const struct nlattr * const osf_attrs[]) | 67 | const struct nlattr * const osf_attrs[]) |
@@ -133,7 +126,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb, | |||
133 | * We are protected by nfnl mutex. | 126 | * We are protected by nfnl mutex. |
134 | */ | 127 | */ |
135 | list_del_rcu(&sf->finger_entry); | 128 | list_del_rcu(&sf->finger_entry); |
136 | call_rcu(&sf->rcu_head, xt_osf_finger_free_rcu); | 129 | kfree_rcu(sf, rcu_head); |
137 | 130 | ||
138 | err = 0; | 131 | err = 0; |
139 | break; | 132 | break; |
@@ -414,7 +407,7 @@ static void __exit xt_osf_fini(void) | |||
414 | 407 | ||
415 | list_for_each_entry_rcu(f, &xt_osf_fingers[i], finger_entry) { | 408 | list_for_each_entry_rcu(f, &xt_osf_fingers[i], finger_entry) { |
416 | list_del_rcu(&f->finger_entry); | 409 | list_del_rcu(&f->finger_entry); |
417 | call_rcu(&f->rcu_head, xt_osf_finger_free_rcu); | 410 | kfree_rcu(f, rcu_head); |
418 | } | 411 | } |
419 | } | 412 | } |
420 | rcu_read_unlock(); | 413 | rcu_read_unlock(); |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index e2b0a680dd56..9c38658fba8b 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -154,44 +154,6 @@ static const struct nla_policy netlbl_unlabel_genl_policy[NLBL_UNLABEL_A_MAX + 1 | |||
154 | */ | 154 | */ |
155 | 155 | ||
156 | /** | 156 | /** |
157 | * netlbl_unlhsh_free_addr4 - Frees an IPv4 address entry from the hash table | ||
158 | * @entry: the entry's RCU field | ||
159 | * | ||
160 | * Description: | ||
161 | * This function is designed to be used as a callback to the call_rcu() | ||
162 | * function so that memory allocated to a hash table address entry can be | ||
163 | * released safely. | ||
164 | * | ||
165 | */ | ||
166 | static void netlbl_unlhsh_free_addr4(struct rcu_head *entry) | ||
167 | { | ||
168 | struct netlbl_unlhsh_addr4 *ptr; | ||
169 | |||
170 | ptr = container_of(entry, struct netlbl_unlhsh_addr4, rcu); | ||
171 | kfree(ptr); | ||
172 | } | ||
173 | |||
174 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
175 | /** | ||
176 | * netlbl_unlhsh_free_addr6 - Frees an IPv6 address entry from the hash table | ||
177 | * @entry: the entry's RCU field | ||
178 | * | ||
179 | * Description: | ||
180 | * This function is designed to be used as a callback to the call_rcu() | ||
181 | * function so that memory allocated to a hash table address entry can be | ||
182 | * released safely. | ||
183 | * | ||
184 | */ | ||
185 | static void netlbl_unlhsh_free_addr6(struct rcu_head *entry) | ||
186 | { | ||
187 | struct netlbl_unlhsh_addr6 *ptr; | ||
188 | |||
189 | ptr = container_of(entry, struct netlbl_unlhsh_addr6, rcu); | ||
190 | kfree(ptr); | ||
191 | } | ||
192 | #endif /* IPv6 */ | ||
193 | |||
194 | /** | ||
195 | * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table | 157 | * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table |
196 | * @entry: the entry's RCU field | 158 | * @entry: the entry's RCU field |
197 | * | 159 | * |
@@ -568,7 +530,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, | |||
568 | if (entry == NULL) | 530 | if (entry == NULL) |
569 | return -ENOENT; | 531 | return -ENOENT; |
570 | 532 | ||
571 | call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); | 533 | kfree_rcu(entry, rcu); |
572 | return 0; | 534 | return 0; |
573 | } | 535 | } |
574 | 536 | ||
@@ -629,7 +591,7 @@ static int netlbl_unlhsh_remove_addr6(struct net *net, | |||
629 | if (entry == NULL) | 591 | if (entry == NULL) |
630 | return -ENOENT; | 592 | return -ENOENT; |
631 | 593 | ||
632 | call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); | 594 | kfree_rcu(entry, rcu); |
633 | return 0; | 595 | return 0; |
634 | } | 596 | } |
635 | #endif /* IPv6 */ | 597 | #endif /* IPv6 */ |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index c8f35b5d2ee9..5fe4f3b04ed3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1566,12 +1566,6 @@ netlink_kernel_release(struct sock *sk) | |||
1566 | } | 1566 | } |
1567 | EXPORT_SYMBOL(netlink_kernel_release); | 1567 | EXPORT_SYMBOL(netlink_kernel_release); |
1568 | 1568 | ||
1569 | |||
1570 | static void listeners_free_rcu(struct rcu_head *head) | ||
1571 | { | ||
1572 | kfree(container_of(head, struct listeners, rcu)); | ||
1573 | } | ||
1574 | |||
1575 | int __netlink_change_ngroups(struct sock *sk, unsigned int groups) | 1569 | int __netlink_change_ngroups(struct sock *sk, unsigned int groups) |
1576 | { | 1570 | { |
1577 | struct listeners *new, *old; | 1571 | struct listeners *new, *old; |
@@ -1588,7 +1582,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
1588 | memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); | 1582 | memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); |
1589 | rcu_assign_pointer(tbl->listeners, new); | 1583 | rcu_assign_pointer(tbl->listeners, new); |
1590 | 1584 | ||
1591 | call_rcu(&old->rcu, listeners_free_rcu); | 1585 | kfree_rcu(old, rcu); |
1592 | } | 1586 | } |
1593 | tbl->groups = groups; | 1587 | tbl->groups = groups; |
1594 | 1588 | ||
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 947038ddd04c..1566672235dd 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -162,14 +162,6 @@ int phonet_address_add(struct net_device *dev, u8 addr) | |||
162 | return err; | 162 | return err; |
163 | } | 163 | } |
164 | 164 | ||
165 | static void phonet_device_rcu_free(struct rcu_head *head) | ||
166 | { | ||
167 | struct phonet_device *pnd; | ||
168 | |||
169 | pnd = container_of(head, struct phonet_device, rcu); | ||
170 | kfree(pnd); | ||
171 | } | ||
172 | |||
173 | int phonet_address_del(struct net_device *dev, u8 addr) | 165 | int phonet_address_del(struct net_device *dev, u8 addr) |
174 | { | 166 | { |
175 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | 167 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); |
@@ -188,7 +180,7 @@ int phonet_address_del(struct net_device *dev, u8 addr) | |||
188 | mutex_unlock(&pndevs->lock); | 180 | mutex_unlock(&pndevs->lock); |
189 | 181 | ||
190 | if (pnd) | 182 | if (pnd) |
191 | call_rcu(&pnd->rcu, phonet_device_rcu_free); | 183 | kfree_rcu(pnd, rcu); |
192 | 184 | ||
193 | return err; | 185 | return err; |
194 | } | 186 | } |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 14b42f4ad791..a606025814a1 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -26,11 +26,6 @@ | |||
26 | #include <net/act_api.h> | 26 | #include <net/act_api.h> |
27 | #include <net/netlink.h> | 27 | #include <net/netlink.h> |
28 | 28 | ||
29 | static void tcf_common_free_rcu(struct rcu_head *head) | ||
30 | { | ||
31 | kfree(container_of(head, struct tcf_common, tcfc_rcu)); | ||
32 | } | ||
33 | |||
34 | void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) | 29 | void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) |
35 | { | 30 | { |
36 | unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); | 31 | unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); |
@@ -47,7 +42,7 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) | |||
47 | * gen_estimator est_timer() might access p->tcfc_lock | 42 | * gen_estimator est_timer() might access p->tcfc_lock |
48 | * or bstats, wait a RCU grace period before freeing p | 43 | * or bstats, wait a RCU grace period before freeing p |
49 | */ | 44 | */ |
50 | call_rcu(&p->tcfc_rcu, tcf_common_free_rcu); | 45 | kfree_rcu(p, tcfc_rcu); |
51 | return; | 46 | return; |
52 | } | 47 | } |
53 | } | 48 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 8a1630774fd6..b3b9b32f4e00 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -96,11 +96,6 @@ nla_put_failure: | |||
96 | goto done; | 96 | goto done; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void tcf_police_free_rcu(struct rcu_head *head) | ||
100 | { | ||
101 | kfree(container_of(head, struct tcf_police, tcf_rcu)); | ||
102 | } | ||
103 | |||
104 | static void tcf_police_destroy(struct tcf_police *p) | 99 | static void tcf_police_destroy(struct tcf_police *p) |
105 | { | 100 | { |
106 | unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); | 101 | unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); |
@@ -121,7 +116,7 @@ static void tcf_police_destroy(struct tcf_police *p) | |||
121 | * gen_estimator est_timer() might access p->tcf_lock | 116 | * gen_estimator est_timer() might access p->tcf_lock |
122 | * or bstats, wait a RCU grace period before freeing p | 117 | * or bstats, wait a RCU grace period before freeing p |
123 | */ | 118 | */ |
124 | call_rcu(&p->tcf_rcu, tcf_police_free_rcu); | 119 | kfree_rcu(p, tcf_rcu); |
125 | return; | 120 | return; |
126 | } | 121 | } |
127 | } | 122 | } |
@@ -401,7 +396,6 @@ static void __exit | |||
401 | police_cleanup_module(void) | 396 | police_cleanup_module(void) |
402 | { | 397 | { |
403 | tcf_unregister_action(&act_police_ops); | 398 | tcf_unregister_action(&act_police_ops); |
404 | rcu_barrier(); /* Wait for completion of call_rcu()'s (tcf_police_free_rcu) */ | ||
405 | } | 399 | } |
406 | 400 | ||
407 | module_init(police_init_module); | 401 | module_init(police_init_module); |
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index faf71d179e46..3c06c87cd280 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -219,7 +219,7 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | if (found) { | 221 | if (found) { |
222 | call_rcu(&addr->rcu, sctp_local_addr_free); | 222 | kfree_rcu(addr, rcu); |
223 | SCTP_DBG_OBJCNT_DEC(addr); | 223 | SCTP_DBG_OBJCNT_DEC(addr); |
224 | return 0; | 224 | return 0; |
225 | } | 225 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 865ce7ba4e14..185fe058db11 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -123,7 +123,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | |||
123 | } | 123 | } |
124 | spin_unlock_bh(&sctp_local_addr_lock); | 124 | spin_unlock_bh(&sctp_local_addr_lock); |
125 | if (found) | 125 | if (found) |
126 | call_rcu(&addr->rcu, sctp_local_addr_free); | 126 | kfree_rcu(addr, rcu); |
127 | break; | 127 | break; |
128 | } | 128 | } |
129 | 129 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index d5bf91d04f63..065d99958ced 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -230,13 +230,6 @@ static void sctp_free_local_addr_list(void) | |||
230 | } | 230 | } |
231 | } | 231 | } |
232 | 232 | ||
233 | void sctp_local_addr_free(struct rcu_head *head) | ||
234 | { | ||
235 | struct sctp_sockaddr_entry *e = container_of(head, | ||
236 | struct sctp_sockaddr_entry, rcu); | ||
237 | kfree(e); | ||
238 | } | ||
239 | |||
240 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ | 233 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ |
241 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | 234 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, |
242 | gfp_t gfp, int copy_flags) | 235 | gfp_t gfp, int copy_flags) |
@@ -681,7 +674,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
681 | } | 674 | } |
682 | spin_unlock_bh(&sctp_local_addr_lock); | 675 | spin_unlock_bh(&sctp_local_addr_lock); |
683 | if (found) | 676 | if (found) |
684 | call_rcu(&addr->rcu, sctp_local_addr_free); | 677 | kfree_rcu(addr, rcu); |
685 | break; | 678 | break; |
686 | } | 679 | } |
687 | 680 | ||
diff --git a/net/socket.c b/net/socket.c index 310d16b1b3c9..c2ed7c95ce87 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -263,15 +263,6 @@ static struct inode *sock_alloc_inode(struct super_block *sb) | |||
263 | return &ei->vfs_inode; | 263 | return &ei->vfs_inode; |
264 | } | 264 | } |
265 | 265 | ||
266 | |||
267 | |||
268 | static void wq_free_rcu(struct rcu_head *head) | ||
269 | { | ||
270 | struct socket_wq *wq = container_of(head, struct socket_wq, rcu); | ||
271 | |||
272 | kfree(wq); | ||
273 | } | ||
274 | |||
275 | static void sock_destroy_inode(struct inode *inode) | 266 | static void sock_destroy_inode(struct inode *inode) |
276 | { | 267 | { |
277 | struct socket_alloc *ei; | 268 | struct socket_alloc *ei; |
@@ -279,7 +270,7 @@ static void sock_destroy_inode(struct inode *inode) | |||
279 | 270 | ||
280 | ei = container_of(inode, struct socket_alloc, vfs_inode); | 271 | ei = container_of(inode, struct socket_alloc, vfs_inode); |
281 | wq = rcu_dereference_protected(ei->socket.wq, 1); | 272 | wq = rcu_dereference_protected(ei->socket.wq, 1); |
282 | call_rcu(&wq->rcu, wq_free_rcu); | 273 | kfree_rcu(wq, rcu); |
283 | kmem_cache_free(sock_inode_cachep, ei); | 274 | kmem_cache_free(sock_inode_cachep, ei); |
284 | } | 275 | } |
285 | 276 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 15792d8b6272..b4d745ea8ee1 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1406,6 +1406,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1406 | struct net *net = xp_net(policy); | 1406 | struct net *net = xp_net(policy); |
1407 | unsigned long now = jiffies; | 1407 | unsigned long now = jiffies; |
1408 | struct net_device *dev; | 1408 | struct net_device *dev; |
1409 | struct xfrm_mode *inner_mode; | ||
1409 | struct dst_entry *dst_prev = NULL; | 1410 | struct dst_entry *dst_prev = NULL; |
1410 | struct dst_entry *dst0 = NULL; | 1411 | struct dst_entry *dst0 = NULL; |
1411 | int i = 0; | 1412 | int i = 0; |
@@ -1436,6 +1437,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1436 | goto put_states; | 1437 | goto put_states; |
1437 | } | 1438 | } |
1438 | 1439 | ||
1440 | if (xfrm[i]->sel.family == AF_UNSPEC) { | ||
1441 | inner_mode = xfrm_ip2inner_mode(xfrm[i], | ||
1442 | xfrm_af2proto(family)); | ||
1443 | if (!inner_mode) { | ||
1444 | err = -EAFNOSUPPORT; | ||
1445 | dst_release(dst); | ||
1446 | goto put_states; | ||
1447 | } | ||
1448 | } else | ||
1449 | inner_mode = xfrm[i]->inner_mode; | ||
1450 | |||
1439 | if (!dst_prev) | 1451 | if (!dst_prev) |
1440 | dst0 = dst1; | 1452 | dst0 = dst1; |
1441 | else { | 1453 | else { |
@@ -1464,7 +1476,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1464 | dst1->lastuse = now; | 1476 | dst1->lastuse = now; |
1465 | 1477 | ||
1466 | dst1->input = dst_discard; | 1478 | dst1->input = dst_discard; |
1467 | dst1->output = xfrm[i]->outer_mode->afinfo->output; | 1479 | dst1->output = inner_mode->afinfo->output; |
1468 | 1480 | ||
1469 | dst1->next = dst_prev; | 1481 | dst1->next = dst_prev; |
1470 | dst_prev = dst1; | 1482 | dst_prev = dst1; |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index e8a781422feb..47f1b8638df9 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -535,6 +535,9 @@ int xfrm_init_replay(struct xfrm_state *x) | |||
535 | replay_esn->bmp_len * sizeof(__u32) * 8) | 535 | replay_esn->bmp_len * sizeof(__u32) * 8) |
536 | return -EINVAL; | 536 | return -EINVAL; |
537 | 537 | ||
538 | if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) | ||
539 | return -EINVAL; | ||
540 | |||
538 | if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) | 541 | if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) |
539 | x->repl = &xfrm_replay_esn; | 542 | x->repl = &xfrm_replay_esn; |
540 | else | 543 | else |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index cd104afcc5f2..413c53693e62 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
@@ -420,11 +420,10 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | if (hdr->e_shnum == 0) { | 423 | if (hdr->e_shnum == SHN_UNDEF) { |
424 | /* | 424 | /* |
425 | * There are more than 64k sections, | 425 | * There are more than 64k sections, |
426 | * read count from .sh_size. | 426 | * read count from .sh_size. |
427 | * note: it doesn't need shndx2secindex() | ||
428 | */ | 427 | */ |
429 | info->num_sections = TO_NATIVE(sechdrs[0].sh_size); | 428 | info->num_sections = TO_NATIVE(sechdrs[0].sh_size); |
430 | } | 429 | } |
@@ -432,8 +431,7 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
432 | info->num_sections = hdr->e_shnum; | 431 | info->num_sections = hdr->e_shnum; |
433 | } | 432 | } |
434 | if (hdr->e_shstrndx == SHN_XINDEX) { | 433 | if (hdr->e_shstrndx == SHN_XINDEX) { |
435 | info->secindex_strings = | 434 | info->secindex_strings = TO_NATIVE(sechdrs[0].sh_link); |
436 | shndx2secindex(TO_NATIVE(sechdrs[0].sh_link)); | ||
437 | } | 435 | } |
438 | else { | 436 | else { |
439 | info->secindex_strings = hdr->e_shstrndx; | 437 | info->secindex_strings = hdr->e_shstrndx; |
@@ -489,7 +487,7 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
489 | sechdrs[i].sh_offset; | 487 | sechdrs[i].sh_offset; |
490 | info->symtab_stop = (void *)hdr + | 488 | info->symtab_stop = (void *)hdr + |
491 | sechdrs[i].sh_offset + sechdrs[i].sh_size; | 489 | sechdrs[i].sh_offset + sechdrs[i].sh_size; |
492 | sh_link_idx = shndx2secindex(sechdrs[i].sh_link); | 490 | sh_link_idx = sechdrs[i].sh_link; |
493 | info->strtab = (void *)hdr + | 491 | info->strtab = (void *)hdr + |
494 | sechdrs[sh_link_idx].sh_offset; | 492 | sechdrs[sh_link_idx].sh_offset; |
495 | } | 493 | } |
@@ -516,11 +514,9 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
516 | 514 | ||
517 | if (symtab_shndx_idx != ~0U) { | 515 | if (symtab_shndx_idx != ~0U) { |
518 | Elf32_Word *p; | 516 | Elf32_Word *p; |
519 | if (symtab_idx != | 517 | if (symtab_idx != sechdrs[symtab_shndx_idx].sh_link) |
520 | shndx2secindex(sechdrs[symtab_shndx_idx].sh_link)) | ||
521 | fatal("%s: SYMTAB_SHNDX has bad sh_link: %u!=%u\n", | 518 | fatal("%s: SYMTAB_SHNDX has bad sh_link: %u!=%u\n", |
522 | filename, | 519 | filename, sechdrs[symtab_shndx_idx].sh_link, |
523 | shndx2secindex(sechdrs[symtab_shndx_idx].sh_link), | ||
524 | symtab_idx); | 520 | symtab_idx); |
525 | /* Fix endianness */ | 521 | /* Fix endianness */ |
526 | for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop; | 522 | for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop; |
@@ -1446,7 +1442,7 @@ static unsigned int *reloc_location(struct elf_info *elf, | |||
1446 | Elf_Shdr *sechdr, Elf_Rela *r) | 1442 | Elf_Shdr *sechdr, Elf_Rela *r) |
1447 | { | 1443 | { |
1448 | Elf_Shdr *sechdrs = elf->sechdrs; | 1444 | Elf_Shdr *sechdrs = elf->sechdrs; |
1449 | int section = shndx2secindex(sechdr->sh_info); | 1445 | int section = sechdr->sh_info; |
1450 | 1446 | ||
1451 | return (void *)elf->hdr + sechdrs[section].sh_offset + | 1447 | return (void *)elf->hdr + sechdrs[section].sh_offset + |
1452 | r->r_offset; | 1448 | r->r_offset; |
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h index 0388cfccac8d..2031119080dc 100644 --- a/scripts/mod/modpost.h +++ b/scripts/mod/modpost.h | |||
@@ -145,33 +145,22 @@ static inline int is_shndx_special(unsigned int i) | |||
145 | return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE; | 145 | return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE; |
146 | } | 146 | } |
147 | 147 | ||
148 | /* shndx is in [0..SHN_LORESERVE) U (SHN_HIRESERVE, 0xfffffff], thus: | 148 | /* |
149 | * shndx == 0 <=> sechdrs[0] | 149 | * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of |
150 | * ...... | 150 | * the way to -256..-1, to avoid conflicting with real section |
151 | * shndx == SHN_LORESERVE-1 <=> sechdrs[SHN_LORESERVE-1] | 151 | * indices. |
152 | * shndx == SHN_HIRESERVE+1 <=> sechdrs[SHN_LORESERVE] | ||
153 | * shndx == SHN_HIRESERVE+2 <=> sechdrs[SHN_LORESERVE+1] | ||
154 | * ...... | ||
155 | * fyi: sym->st_shndx is uint16, SHN_LORESERVE = ff00, SHN_HIRESERVE = ffff, | ||
156 | * so basically we map 0000..feff -> 0000..feff | ||
157 | * ff00..ffff -> (you are a bad boy, dont do it) | ||
158 | * 10000..xxxx -> ff00..(xxxx-0x100) | ||
159 | */ | 152 | */ |
160 | static inline unsigned int shndx2secindex(unsigned int i) | 153 | #define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1)) |
161 | { | ||
162 | if (i <= SHN_HIRESERVE) | ||
163 | return i; | ||
164 | return i - (SHN_HIRESERVE + 1 - SHN_LORESERVE); | ||
165 | } | ||
166 | 154 | ||
167 | /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */ | 155 | /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */ |
168 | static inline unsigned int get_secindex(const struct elf_info *info, | 156 | static inline unsigned int get_secindex(const struct elf_info *info, |
169 | const Elf_Sym *sym) | 157 | const Elf_Sym *sym) |
170 | { | 158 | { |
159 | if (is_shndx_special(sym->st_shndx)) | ||
160 | return SPECIAL(sym->st_shndx); | ||
171 | if (sym->st_shndx != SHN_XINDEX) | 161 | if (sym->st_shndx != SHN_XINDEX) |
172 | return sym->st_shndx; | 162 | return sym->st_shndx; |
173 | return shndx2secindex(info->symtab_shndx_start[sym - | 163 | return info->symtab_shndx_start[sym - info->symtab_start]; |
174 | info->symtab_start]); | ||
175 | } | 164 | } |
176 | 165 | ||
177 | /* file2alias.c */ | 166 | /* file2alias.c */ |
diff --git a/scripts/module-common.lds b/scripts/module-common.lds index 47a1f9ae0ede..0865b3e752be 100644 --- a/scripts/module-common.lds +++ b/scripts/module-common.lds | |||
@@ -5,4 +5,15 @@ | |||
5 | */ | 5 | */ |
6 | SECTIONS { | 6 | SECTIONS { |
7 | /DISCARD/ : { *(.discard) } | 7 | /DISCARD/ : { *(.discard) } |
8 | |||
9 | __ksymtab : { *(SORT(___ksymtab+*)) } | ||
10 | __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) } | ||
11 | __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) } | ||
12 | __ksymtab_unused_gpl : { *(SORT(___ksymtab_unused_gpl+*)) } | ||
13 | __ksymtab_gpl_future : { *(SORT(___ksymtab_gpl_future+*)) } | ||
14 | __kcrctab : { *(SORT(___kcrctab+*)) } | ||
15 | __kcrctab_gpl : { *(SORT(___kcrctab_gpl+*)) } | ||
16 | __kcrctab_unused : { *(SORT(___kcrctab_unused+*)) } | ||
17 | __kcrctab_unused_gpl : { *(SORT(___kcrctab_unused_gpl+*)) } | ||
18 | __kcrctab_gpl_future : { *(SORT(___kcrctab_gpl_future+*)) } | ||
8 | } | 19 | } |
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index c6ca8662a468..f66baf44f32d 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c | |||
@@ -69,18 +69,6 @@ error: | |||
69 | EXPORT_SYMBOL_GPL(user_instantiate); | 69 | EXPORT_SYMBOL_GPL(user_instantiate); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * dispose of the old data from an updated user defined key | ||
73 | */ | ||
74 | static void user_update_rcu_disposal(struct rcu_head *rcu) | ||
75 | { | ||
76 | struct user_key_payload *upayload; | ||
77 | |||
78 | upayload = container_of(rcu, struct user_key_payload, rcu); | ||
79 | |||
80 | kfree(upayload); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * update a user defined key | 72 | * update a user defined key |
85 | * - the key's semaphore is write-locked | 73 | * - the key's semaphore is write-locked |
86 | */ | 74 | */ |
@@ -114,7 +102,7 @@ int user_update(struct key *key, const void *data, size_t datalen) | |||
114 | key->expiry = 0; | 102 | key->expiry = 0; |
115 | } | 103 | } |
116 | 104 | ||
117 | call_rcu(&zap->rcu, user_update_rcu_disposal); | 105 | kfree_rcu(zap, rcu); |
118 | 106 | ||
119 | error: | 107 | error: |
120 | return ret; | 108 | return ret; |
@@ -145,7 +133,7 @@ void user_revoke(struct key *key) | |||
145 | 133 | ||
146 | if (upayload) { | 134 | if (upayload) { |
147 | rcu_assign_pointer(key->payload.data, NULL); | 135 | rcu_assign_pointer(key->payload.data, NULL); |
148 | call_rcu(&upayload->rcu, user_update_rcu_disposal); | 136 | kfree_rcu(upayload, rcu); |
149 | } | 137 | } |
150 | } | 138 | } |
151 | 139 | ||
diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 1d027e29ce8d..3d2715fd35ea 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c | |||
@@ -38,11 +38,7 @@ | |||
38 | #define AVC_CACHE_RECLAIM 16 | 38 | #define AVC_CACHE_RECLAIM 16 |
39 | 39 | ||
40 | #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS | 40 | #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS |
41 | #define avc_cache_stats_incr(field) \ | 41 | #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field) |
42 | do { \ | ||
43 | per_cpu(avc_cache_stats, get_cpu()).field++; \ | ||
44 | put_cpu(); \ | ||
45 | } while (0) | ||
46 | #else | 42 | #else |
47 | #define avc_cache_stats_incr(field) do {} while (0) | 43 | #define avc_cache_stats_incr(field) do {} while (0) |
48 | #endif | 44 | #endif |
@@ -347,11 +343,10 @@ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) | |||
347 | node = avc_search_node(ssid, tsid, tclass); | 343 | node = avc_search_node(ssid, tsid, tclass); |
348 | 344 | ||
349 | if (node) | 345 | if (node) |
350 | avc_cache_stats_incr(hits); | 346 | return node; |
351 | else | ||
352 | avc_cache_stats_incr(misses); | ||
353 | 347 | ||
354 | return node; | 348 | avc_cache_stats_incr(misses); |
349 | return NULL; | ||
355 | } | 350 | } |
356 | 351 | ||
357 | static int avc_latest_notif_update(int seqno, int is_insert) | 352 | static int avc_latest_notif_update(int seqno, int is_insert) |
@@ -769,7 +764,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, | |||
769 | rcu_read_lock(); | 764 | rcu_read_lock(); |
770 | 765 | ||
771 | node = avc_lookup(ssid, tsid, tclass); | 766 | node = avc_lookup(ssid, tsid, tclass); |
772 | if (!node) { | 767 | if (unlikely(!node)) { |
773 | rcu_read_unlock(); | 768 | rcu_read_unlock(); |
774 | 769 | ||
775 | if (in_avd) | 770 | if (in_avd) |
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h index e77b2ac2908b..47fda963495d 100644 --- a/security/selinux/include/avc.h +++ b/security/selinux/include/avc.h | |||
@@ -41,7 +41,6 @@ struct sk_buff; | |||
41 | */ | 41 | */ |
42 | struct avc_cache_stats { | 42 | struct avc_cache_stats { |
43 | unsigned int lookups; | 43 | unsigned int lookups; |
44 | unsigned int hits; | ||
45 | unsigned int misses; | 44 | unsigned int misses; |
46 | unsigned int allocations; | 45 | unsigned int allocations; |
47 | unsigned int reclaims; | 46 | unsigned int reclaims; |
diff --git a/security/selinux/netif.c b/security/selinux/netif.c index d6095d63d831..58cc481c93d5 100644 --- a/security/selinux/netif.c +++ b/security/selinux/netif.c | |||
@@ -104,22 +104,6 @@ static int sel_netif_insert(struct sel_netif *netif) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | /** | 106 | /** |
107 | * sel_netif_free - Frees an interface entry | ||
108 | * @p: the entry's RCU field | ||
109 | * | ||
110 | * Description: | ||
111 | * This function is designed to be used as a callback to the call_rcu() | ||
112 | * function so that memory allocated to a hash table interface entry can be | ||
113 | * released safely. | ||
114 | * | ||
115 | */ | ||
116 | static void sel_netif_free(struct rcu_head *p) | ||
117 | { | ||
118 | struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head); | ||
119 | kfree(netif); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * sel_netif_destroy - Remove an interface record from the table | 107 | * sel_netif_destroy - Remove an interface record from the table |
124 | * @netif: the existing interface record | 108 | * @netif: the existing interface record |
125 | * | 109 | * |
@@ -131,7 +115,7 @@ static void sel_netif_destroy(struct sel_netif *netif) | |||
131 | { | 115 | { |
132 | list_del_rcu(&netif->list); | 116 | list_del_rcu(&netif->list); |
133 | sel_netif_total--; | 117 | sel_netif_total--; |
134 | call_rcu(&netif->rcu_head, sel_netif_free); | 118 | kfree_rcu(netif, rcu_head); |
135 | } | 119 | } |
136 | 120 | ||
137 | /** | 121 | /** |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index ea39cb742ae5..c0e1a0f52462 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -1380,10 +1380,14 @@ static int sel_avc_stats_seq_show(struct seq_file *seq, void *v) | |||
1380 | if (v == SEQ_START_TOKEN) | 1380 | if (v == SEQ_START_TOKEN) |
1381 | seq_printf(seq, "lookups hits misses allocations reclaims " | 1381 | seq_printf(seq, "lookups hits misses allocations reclaims " |
1382 | "frees\n"); | 1382 | "frees\n"); |
1383 | else | 1383 | else { |
1384 | seq_printf(seq, "%u %u %u %u %u %u\n", st->lookups, | 1384 | unsigned int lookups = st->lookups; |
1385 | st->hits, st->misses, st->allocations, | 1385 | unsigned int misses = st->misses; |
1386 | unsigned int hits = lookups - misses; | ||
1387 | seq_printf(seq, "%u %u %u %u %u %u\n", lookups, | ||
1388 | hits, misses, st->allocations, | ||
1386 | st->reclaims, st->frees); | 1389 | st->reclaims, st->frees); |
1390 | } | ||
1387 | return 0; | 1391 | return 0; |
1388 | } | 1392 | } |
1389 | 1393 | ||
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index e6e7ce0d3d55..7102457661d6 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c | |||
@@ -1819,8 +1819,6 @@ static int filename_trans_read(struct policydb *p, void *fp) | |||
1819 | goto out; | 1819 | goto out; |
1820 | nel = le32_to_cpu(buf[0]); | 1820 | nel = le32_to_cpu(buf[0]); |
1821 | 1821 | ||
1822 | printk(KERN_ERR "%s: nel=%d\n", __func__, nel); | ||
1823 | |||
1824 | last = p->filename_trans; | 1822 | last = p->filename_trans; |
1825 | while (last && last->next) | 1823 | while (last && last->next) |
1826 | last = last->next; | 1824 | last = last->next; |
@@ -1857,8 +1855,6 @@ static int filename_trans_read(struct policydb *p, void *fp) | |||
1857 | goto out; | 1855 | goto out; |
1858 | name[len] = 0; | 1856 | name[len] = 0; |
1859 | 1857 | ||
1860 | printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name); | ||
1861 | |||
1862 | rc = next_entry(buf, fp, sizeof(u32) * 4); | 1858 | rc = next_entry(buf, fp, sizeof(u32) * 4); |
1863 | if (rc) | 1859 | if (rc) |
1864 | goto out; | 1860 | goto out; |
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 2727befd158e..b04d28039c16 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c | |||
@@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0), | |||
139 | SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), | 139 | SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), |
140 | 140 | ||
141 | SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), | 141 | SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), |
142 | SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), | 142 | SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0), |
143 | SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), | 143 | SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), |
144 | 144 | ||
145 | SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), | 145 | SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), |
@@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { | |||
602 | .read = ssm2602_read_reg_cache, | 602 | .read = ssm2602_read_reg_cache, |
603 | .write = ssm2602_write, | 603 | .write = ssm2602_write, |
604 | .set_bias_level = ssm2602_set_bias_level, | 604 | .set_bias_level = ssm2602_set_bias_level, |
605 | .reg_cache_size = sizeof(ssm2602_reg), | 605 | .reg_cache_size = ARRAY_SIZE(ssm2602_reg), |
606 | .reg_word_size = sizeof(u16), | 606 | .reg_word_size = sizeof(u16), |
607 | .reg_cache_default = ssm2602_reg, | 607 | .reg_cache_default = ssm2602_reg, |
608 | }; | 608 | }; |
@@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { | |||
614 | * low = 0x1a | 614 | * low = 0x1a |
615 | * high = 0x1b | 615 | * high = 0x1b |
616 | */ | 616 | */ |
617 | static int ssm2602_i2c_probe(struct i2c_client *i2c, | 617 | static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c, |
618 | const struct i2c_device_id *id) | 618 | const struct i2c_device_id *id) |
619 | { | 619 | { |
620 | struct ssm2602_priv *ssm2602; | 620 | struct ssm2602_priv *ssm2602; |
@@ -635,7 +635,7 @@ static int ssm2602_i2c_probe(struct i2c_client *i2c, | |||
635 | return ret; | 635 | return ret; |
636 | } | 636 | } |
637 | 637 | ||
638 | static int ssm2602_i2c_remove(struct i2c_client *client) | 638 | static int __devexit ssm2602_i2c_remove(struct i2c_client *client) |
639 | { | 639 | { |
640 | snd_soc_unregister_codec(&client->dev); | 640 | snd_soc_unregister_codec(&client->dev); |
641 | kfree(i2c_get_clientdata(client)); | 641 | kfree(i2c_get_clientdata(client)); |
@@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = { | |||
655 | .owner = THIS_MODULE, | 655 | .owner = THIS_MODULE, |
656 | }, | 656 | }, |
657 | .probe = ssm2602_i2c_probe, | 657 | .probe = ssm2602_i2c_probe, |
658 | .remove = ssm2602_i2c_remove, | 658 | .remove = __devexit_p(ssm2602_i2c_remove), |
659 | .id_table = ssm2602_i2c_id, | 659 | .id_table = ssm2602_i2c_id, |
660 | }; | 660 | }; |
661 | #endif | 661 | #endif |
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index 48ffd406a71d..a7b8f301bad3 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c | |||
@@ -601,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = { | |||
601 | .reg_cache_step = 1, | 601 | .reg_cache_step = 1, |
602 | .read = uda134x_read_reg_cache, | 602 | .read = uda134x_read_reg_cache, |
603 | .write = uda134x_write, | 603 | .write = uda134x_write, |
604 | #ifdef POWER_OFF_ON_STANDBY | ||
605 | .set_bias_level = uda134x_set_bias_level, | 604 | .set_bias_level = uda134x_set_bias_level, |
606 | #endif | ||
607 | }; | 605 | }; |
608 | 606 | ||
609 | static int __devinit uda134x_codec_probe(struct platform_device *pdev) | 607 | static int __devinit uda134x_codec_probe(struct platform_device *pdev) |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index f52b623bb692..824d1c8c8a35 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing), | |||
692 | SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), | 692 | SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), |
693 | 693 | ||
694 | SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, | 694 | SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, |
695 | WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv), | 695 | WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv), |
696 | SOC_ENUM("ADC Companding Mode", adc_companding), | 696 | SOC_ENUM("ADC Companding Mode", adc_companding), |
697 | SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), | 697 | SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), |
698 | 698 | ||
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 419bf4f5534a..cd22a54b2f14 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c | |||
@@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, | |||
133 | struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); | 133 | struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); |
134 | uint32_t conf; | 134 | uint32_t conf; |
135 | 135 | ||
136 | if (!dai->active) | 136 | if (dai->active) |
137 | return; | 137 | return; |
138 | 138 | ||
139 | conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); | 139 | conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); |
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c index d567c322a2fb..6b1f9d3bf34e 100644 --- a/sound/soc/mid-x86/sst_platform.c +++ b/sound/soc/mid-x86/sst_platform.c | |||
@@ -376,6 +376,11 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream, | |||
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
379 | static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream) | ||
380 | { | ||
381 | return snd_pcm_lib_free_pages(substream); | ||
382 | } | ||
383 | |||
379 | static struct snd_pcm_ops sst_platform_ops = { | 384 | static struct snd_pcm_ops sst_platform_ops = { |
380 | .open = sst_platform_open, | 385 | .open = sst_platform_open, |
381 | .close = sst_platform_close, | 386 | .close = sst_platform_close, |
@@ -384,6 +389,7 @@ static struct snd_pcm_ops sst_platform_ops = { | |||
384 | .trigger = sst_platform_pcm_trigger, | 389 | .trigger = sst_platform_pcm_trigger, |
385 | .pointer = sst_platform_pcm_pointer, | 390 | .pointer = sst_platform_pcm_pointer, |
386 | .hw_params = sst_platform_pcm_hw_params, | 391 | .hw_params = sst_platform_pcm_hw_params, |
392 | .hw_free = sst_platform_pcm_hw_free, | ||
387 | }; | 393 | }; |
388 | 394 | ||
389 | static void sst_pcm_free(struct snd_pcm *pcm) | 395 | static void sst_pcm_free(struct snd_pcm *pcm) |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d8562ce4de7a..dd55d1069468 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -3291,6 +3291,8 @@ int snd_soc_register_card(struct snd_soc_card *card) | |||
3291 | if (!card->name || !card->dev) | 3291 | if (!card->name || !card->dev) |
3292 | return -EINVAL; | 3292 | return -EINVAL; |
3293 | 3293 | ||
3294 | dev_set_drvdata(card->dev, card); | ||
3295 | |||
3294 | snd_soc_initialize_card_lists(card); | 3296 | snd_soc_initialize_card_lists(card); |
3295 | 3297 | ||
3296 | soc_init_card_debugfs(card); | 3298 | soc_init_card_debugfs(card); |
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt index 5bb41e55a3ac..3152cca15501 100644 --- a/tools/perf/Documentation/perf-script-perl.txt +++ b/tools/perf/Documentation/perf-script-perl.txt | |||
@@ -63,7 +63,6 @@ The format file for the sched_wakep event defines the following fields | |||
63 | field:unsigned char common_flags; | 63 | field:unsigned char common_flags; |
64 | field:unsigned char common_preempt_count; | 64 | field:unsigned char common_preempt_count; |
65 | field:int common_pid; | 65 | field:int common_pid; |
66 | field:int common_lock_depth; | ||
67 | 66 | ||
68 | field:char comm[TASK_COMM_LEN]; | 67 | field:char comm[TASK_COMM_LEN]; |
69 | field:pid_t pid; | 68 | field:pid_t pid; |
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt index 36b38277422c..471022069119 100644 --- a/tools/perf/Documentation/perf-script-python.txt +++ b/tools/perf/Documentation/perf-script-python.txt | |||
@@ -463,7 +463,6 @@ The format file for the sched_wakep event defines the following fields | |||
463 | field:unsigned char common_flags; | 463 | field:unsigned char common_flags; |
464 | field:unsigned char common_preempt_count; | 464 | field:unsigned char common_preempt_count; |
465 | field:int common_pid; | 465 | field:int common_pid; |
466 | field:int common_lock_depth; | ||
467 | 466 | ||
468 | field:char comm[TASK_COMM_LEN]; | 467 | field:char comm[TASK_COMM_LEN]; |
469 | field:pid_t pid; | 468 | field:pid_t pid; |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 416538248a4b..0974f957b8fa 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -427,7 +427,7 @@ static void mmap_read_all(void) | |||
427 | { | 427 | { |
428 | int i; | 428 | int i; |
429 | 429 | ||
430 | for (i = 0; i < evsel_list->cpus->nr; i++) { | 430 | for (i = 0; i < evsel_list->nr_mmaps; i++) { |
431 | if (evsel_list->mmap[i].base) | 431 | if (evsel_list->mmap[i].base) |
432 | mmap_read(&evsel_list->mmap[i]); | 432 | mmap_read(&evsel_list->mmap[i]); |
433 | } | 433 | } |
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 11e3c8458362..2f9a337b182f 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -549,7 +549,7 @@ static int test__basic_mmap(void) | |||
549 | ++foo; | 549 | ++foo; |
550 | } | 550 | } |
551 | 551 | ||
552 | while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { | 552 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { |
553 | struct perf_sample sample; | 553 | struct perf_sample sample; |
554 | 554 | ||
555 | if (event->header.type != PERF_RECORD_SAMPLE) { | 555 | if (event->header.type != PERF_RECORD_SAMPLE) { |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7e3d6e310bf8..ebfc7cf5f63b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event, | |||
801 | } | 801 | } |
802 | } | 802 | } |
803 | 803 | ||
804 | static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) | 804 | static void perf_session__mmap_read_idx(struct perf_session *self, int idx) |
805 | { | 805 | { |
806 | struct perf_sample sample; | 806 | struct perf_sample sample; |
807 | union perf_event *event; | 807 | union perf_event *event; |
808 | 808 | ||
809 | while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { | 809 | while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { |
810 | perf_session__parse_sample(self, event, &sample); | 810 | perf_session__parse_sample(self, event, &sample); |
811 | 811 | ||
812 | if (event->header.type == PERF_RECORD_SAMPLE) | 812 | if (event->header.type == PERF_RECORD_SAMPLE) |
@@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self) | |||
820 | { | 820 | { |
821 | int i; | 821 | int i; |
822 | 822 | ||
823 | for (i = 0; i < top.evlist->cpus->nr; i++) | 823 | for (i = 0; i < top.evlist->nr_mmaps; i++) |
824 | perf_session__mmap_read_cpu(self, i); | 824 | perf_session__mmap_read_idx(self, i); |
825 | } | 825 | } |
826 | 826 | ||
827 | static void start_counters(struct perf_evlist *evlist) | 827 | static void start_counters(struct perf_evlist *evlist) |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 45da8d186b49..23eb22b05d27 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |||
166 | return NULL; | 166 | return NULL; |
167 | } | 167 | } |
168 | 168 | ||
169 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | 169 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
170 | { | 170 | { |
171 | /* XXX Move this to perf.c, making it generally available */ | 171 | /* XXX Move this to perf.c, making it generally available */ |
172 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | 172 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); |
173 | struct perf_mmap *md = &evlist->mmap[cpu]; | 173 | struct perf_mmap *md = &evlist->mmap[idx]; |
174 | unsigned int head = perf_mmap__read_head(md); | 174 | unsigned int head = perf_mmap__read_head(md); |
175 | unsigned int old = md->prev; | 175 | unsigned int old = md->prev; |
176 | unsigned char *data = md->base + page_size; | 176 | unsigned char *data = md->base + page_size; |
@@ -235,31 +235,37 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | |||
235 | 235 | ||
236 | void perf_evlist__munmap(struct perf_evlist *evlist) | 236 | void perf_evlist__munmap(struct perf_evlist *evlist) |
237 | { | 237 | { |
238 | int cpu; | 238 | int i; |
239 | 239 | ||
240 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 240 | for (i = 0; i < evlist->nr_mmaps; i++) { |
241 | if (evlist->mmap[cpu].base != NULL) { | 241 | if (evlist->mmap[i].base != NULL) { |
242 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | 242 | munmap(evlist->mmap[i].base, evlist->mmap_len); |
243 | evlist->mmap[cpu].base = NULL; | 243 | evlist->mmap[i].base = NULL; |
244 | } | 244 | } |
245 | } | 245 | } |
246 | |||
247 | free(evlist->mmap); | ||
248 | evlist->mmap = NULL; | ||
246 | } | 249 | } |
247 | 250 | ||
248 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) | 251 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
249 | { | 252 | { |
250 | evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); | 253 | evlist->nr_mmaps = evlist->cpus->nr; |
254 | if (evlist->cpus->map[0] == -1) | ||
255 | evlist->nr_mmaps = evlist->threads->nr; | ||
256 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); | ||
251 | return evlist->mmap != NULL ? 0 : -ENOMEM; | 257 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
252 | } | 258 | } |
253 | 259 | ||
254 | static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, | 260 | static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, |
255 | int cpu, int prot, int mask, int fd) | 261 | int idx, int prot, int mask, int fd) |
256 | { | 262 | { |
257 | evlist->mmap[cpu].prev = 0; | 263 | evlist->mmap[idx].prev = 0; |
258 | evlist->mmap[cpu].mask = mask; | 264 | evlist->mmap[idx].mask = mask; |
259 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | 265 | evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, |
260 | MAP_SHARED, fd, 0); | 266 | MAP_SHARED, fd, 0); |
261 | if (evlist->mmap[cpu].base == MAP_FAILED) { | 267 | if (evlist->mmap[idx].base == MAP_FAILED) { |
262 | if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) | 268 | if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit) |
263 | ui__warning("Inherit is not allowed on per-task " | 269 | ui__warning("Inherit is not allowed on per-task " |
264 | "events using mmap.\n"); | 270 | "events using mmap.\n"); |
265 | return -1; | 271 | return -1; |
@@ -269,6 +275,86 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev | |||
269 | return 0; | 275 | return 0; |
270 | } | 276 | } |
271 | 277 | ||
278 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) | ||
279 | { | ||
280 | struct perf_evsel *evsel; | ||
281 | int cpu, thread; | ||
282 | |||
283 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
284 | int output = -1; | ||
285 | |||
286 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
287 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
288 | int fd = FD(evsel, cpu, thread); | ||
289 | |||
290 | if (output == -1) { | ||
291 | output = fd; | ||
292 | if (__perf_evlist__mmap(evlist, evsel, cpu, | ||
293 | prot, mask, output) < 0) | ||
294 | goto out_unmap; | ||
295 | } else { | ||
296 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
297 | goto out_unmap; | ||
298 | } | ||
299 | |||
300 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
301 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
302 | goto out_unmap; | ||
303 | } | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | |||
309 | out_unmap: | ||
310 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
311 | if (evlist->mmap[cpu].base != NULL) { | ||
312 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
313 | evlist->mmap[cpu].base = NULL; | ||
314 | } | ||
315 | } | ||
316 | return -1; | ||
317 | } | ||
318 | |||
319 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) | ||
320 | { | ||
321 | struct perf_evsel *evsel; | ||
322 | int thread; | ||
323 | |||
324 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
325 | int output = -1; | ||
326 | |||
327 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
328 | int fd = FD(evsel, 0, thread); | ||
329 | |||
330 | if (output == -1) { | ||
331 | output = fd; | ||
332 | if (__perf_evlist__mmap(evlist, evsel, thread, | ||
333 | prot, mask, output) < 0) | ||
334 | goto out_unmap; | ||
335 | } else { | ||
336 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
337 | goto out_unmap; | ||
338 | } | ||
339 | |||
340 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
341 | perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) | ||
342 | goto out_unmap; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | return 0; | ||
347 | |||
348 | out_unmap: | ||
349 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
350 | if (evlist->mmap[thread].base != NULL) { | ||
351 | munmap(evlist->mmap[thread].base, evlist->mmap_len); | ||
352 | evlist->mmap[thread].base = NULL; | ||
353 | } | ||
354 | } | ||
355 | return -1; | ||
356 | } | ||
357 | |||
272 | /** perf_evlist__mmap - Create per cpu maps to receive events | 358 | /** perf_evlist__mmap - Create per cpu maps to receive events |
273 | * | 359 | * |
274 | * @evlist - list of events | 360 | * @evlist - list of events |
@@ -287,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev | |||
287 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | 373 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) |
288 | { | 374 | { |
289 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | 375 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); |
290 | int mask = pages * page_size - 1, cpu; | 376 | int mask = pages * page_size - 1; |
291 | struct perf_evsel *first_evsel, *evsel; | 377 | struct perf_evsel *evsel; |
292 | const struct cpu_map *cpus = evlist->cpus; | 378 | const struct cpu_map *cpus = evlist->cpus; |
293 | const struct thread_map *threads = evlist->threads; | 379 | const struct thread_map *threads = evlist->threads; |
294 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); | 380 | int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); |
295 | 381 | ||
296 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) | 382 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
297 | return -ENOMEM; | 383 | return -ENOMEM; |
@@ -301,43 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | |||
301 | 387 | ||
302 | evlist->overwrite = overwrite; | 388 | evlist->overwrite = overwrite; |
303 | evlist->mmap_len = (pages + 1) * page_size; | 389 | evlist->mmap_len = (pages + 1) * page_size; |
304 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
305 | 390 | ||
306 | list_for_each_entry(evsel, &evlist->entries, node) { | 391 | list_for_each_entry(evsel, &evlist->entries, node) { |
307 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | 392 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
308 | evsel->sample_id == NULL && | 393 | evsel->sample_id == NULL && |
309 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | 394 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) |
310 | return -ENOMEM; | 395 | return -ENOMEM; |
311 | |||
312 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
313 | for (thread = 0; thread < threads->nr; thread++) { | ||
314 | int fd = FD(evsel, cpu, thread); | ||
315 | |||
316 | if (evsel->idx || thread) { | ||
317 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | ||
318 | FD(first_evsel, cpu, 0)) != 0) | ||
319 | goto out_unmap; | ||
320 | } else if (__perf_evlist__mmap(evlist, evsel, cpu, | ||
321 | prot, mask, fd) < 0) | ||
322 | goto out_unmap; | ||
323 | |||
324 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
325 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
326 | goto out_unmap; | ||
327 | } | ||
328 | } | ||
329 | } | 396 | } |
330 | 397 | ||
331 | return 0; | 398 | if (evlist->cpus->map[0] == -1) |
399 | return perf_evlist__mmap_per_thread(evlist, prot, mask); | ||
332 | 400 | ||
333 | out_unmap: | 401 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
334 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
335 | if (evlist->mmap[cpu].base != NULL) { | ||
336 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
337 | evlist->mmap[cpu].base = NULL; | ||
338 | } | ||
339 | } | ||
340 | return -1; | ||
341 | } | 402 | } |
342 | 403 | ||
343 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | 404 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, |
@@ -348,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | |||
348 | if (evlist->threads == NULL) | 409 | if (evlist->threads == NULL) |
349 | return -1; | 410 | return -1; |
350 | 411 | ||
351 | if (target_tid != -1) | 412 | if (cpu_list == NULL && target_tid != -1) |
352 | evlist->cpus = cpu_map__dummy_new(); | 413 | evlist->cpus = cpu_map__dummy_new(); |
353 | else | 414 | else |
354 | evlist->cpus = cpu_map__new(cpu_list); | 415 | evlist->cpus = cpu_map__new(cpu_list); |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 8b1cb7a4c5f1..7109d7add14e 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -17,6 +17,7 @@ struct perf_evlist { | |||
17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; | 17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; |
18 | int nr_entries; | 18 | int nr_entries; |
19 | int nr_fds; | 19 | int nr_fds; |
20 | int nr_mmaps; | ||
20 | int mmap_len; | 21 | int mmap_len; |
21 | bool overwrite; | 22 | bool overwrite; |
22 | union perf_event event_copy; | 23 | union perf_event event_copy; |
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); | |||
46 | 47 | ||
47 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | 48 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); |
48 | 49 | ||
49 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); | 50 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); |
50 | 51 | ||
51 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); | 52 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); |
52 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); | 53 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 8b0eff8b8283..b5c7d818001c 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | |||
680 | &cpu, &sample_id_all)) | 680 | &cpu, &sample_id_all)) |
681 | return NULL; | 681 | return NULL; |
682 | 682 | ||
683 | event = perf_evlist__read_on_cpu(evlist, cpu); | 683 | event = perf_evlist__mmap_read(evlist, cpu); |
684 | if (event != NULL) { | 684 | if (event != NULL) { |
685 | struct perf_evsel *first; | 685 | struct perf_evsel *first; |
686 | PyObject *pyevent = pyrf_event__new(event); | 686 | PyObject *pyevent = pyrf_event__new(event); |
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 0a7ed5b5e281..1e88485c16a0 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -2187,7 +2187,6 @@ static const struct flag flags[] = { | |||
2187 | { "TASKLET_SOFTIRQ", 6 }, | 2187 | { "TASKLET_SOFTIRQ", 6 }, |
2188 | { "SCHED_SOFTIRQ", 7 }, | 2188 | { "SCHED_SOFTIRQ", 7 }, |
2189 | { "HRTIMER_SOFTIRQ", 8 }, | 2189 | { "HRTIMER_SOFTIRQ", 8 }, |
2190 | { "RCU_SOFTIRQ", 9 }, | ||
2191 | 2190 | ||
2192 | { "HRTIMER_NORESTART", 0 }, | 2191 | { "HRTIMER_NORESTART", 0 }, |
2193 | { "HRTIMER_RESTART", 1 }, | 2192 | { "HRTIMER_RESTART", 1 }, |